1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * ibdm.c 28 * 29 * This file contains the InifiniBand Device Manager (IBDM) support functions. 30 * IB nexus driver will only be the client for the IBDM module. 31 * 32 * IBDM registers with IBTF for HCA arrival/removal notification. 33 * IBDM registers with SA access to send DM MADs to discover the IOC's behind 34 * the IOU's. 35 * 36 * IB nexus driver registers with IBDM to find the information about the 37 * HCA's and IOC's (behind the IOU) present on the IB fabric. 38 */ 39 40 #include <sys/systm.h> 41 #include <sys/taskq.h> 42 #include <sys/ib/mgt/ibdm/ibdm_impl.h> 43 #include <sys/ib/mgt/ibmf/ibmf_impl.h> 44 #include <sys/modctl.h> 45 46 /* Function Prototype declarations */ 47 static int ibdm_free_iou_info(ibdm_dp_gidinfo_t *, ibdm_iou_info_t **); 48 static int ibdm_fini(void); 49 static int ibdm_init(void); 50 static int ibdm_get_reachable_ports(ibdm_port_attr_t *, 51 ibdm_hca_list_t *); 52 static ibdm_dp_gidinfo_t *ibdm_check_dgid(ib_guid_t, ib_sn_prefix_t); 53 static ibdm_dp_gidinfo_t *ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *); 54 static boolean_t ibdm_is_cisco(ib_guid_t); 55 static boolean_t ibdm_is_cisco_switch(ibdm_dp_gidinfo_t *); 56 static void ibdm_wait_cisco_probe_completion(ibdm_dp_gidinfo_t *); 57 static int ibdm_set_classportinfo(ibdm_dp_gidinfo_t *); 58 static int ibdm_send_classportinfo(ibdm_dp_gidinfo_t *); 59 static int ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *); 60 static int ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *); 61 static int ibdm_get_node_port_guids(ibmf_saa_handle_t, ib_lid_t, 62 ib_guid_t *, ib_guid_t *); 63 static int ibdm_retry_command(ibdm_timeout_cb_args_t *); 64 static int ibdm_get_diagcode(ibdm_dp_gidinfo_t *, int); 65 static int ibdm_verify_mad_status(ib_mad_hdr_t *); 66 static int ibdm_handle_redirection(ibmf_msg_t *, 67 ibdm_dp_gidinfo_t *, int *); 68 static void ibdm_wait_probe_completion(void); 69 static void ibdm_sweep_fabric(int); 70 static void ibdm_probe_gid_thread(void *); 71 static void ibdm_wakeup_probe_gid_cv(void); 72 static void ibdm_port_attr_ibmf_init(ibdm_port_attr_t *, ib_pkey_t, int); 73 static int ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *, int); 74 static void ibdm_update_port_attr(ibdm_port_attr_t *); 75 static void ibdm_handle_hca_attach(ib_guid_t); 76 static void ibdm_handle_srventry_mad(ibmf_msg_t *, 77 ibdm_dp_gidinfo_t *, int *); 78 static void ibdm_ibmf_recv_cb(ibmf_handle_t, ibmf_msg_t *, void *); 79 static void ibdm_recv_incoming_mad(void *); 80 static void ibdm_process_incoming_mad(ibmf_handle_t, ibmf_msg_t *, void *); 81 static void ibdm_ibmf_send_cb(ibmf_handle_t, ibmf_msg_t *, void *); 82 static void ibdm_pkt_timeout_hdlr(void *arg); 83 static void ibdm_initialize_port(ibdm_port_attr_t *); 84 static void ibdm_handle_diagcode(ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 85 static void ibdm_probe_gid(ibdm_dp_gidinfo_t *); 86 static void ibdm_alloc_send_buffers(ibmf_msg_t *); 87 static void ibdm_free_send_buffers(ibmf_msg_t *); 88 static void ibdm_handle_hca_detach(ib_guid_t); 89 static int ibdm_fini_port(ibdm_port_attr_t *); 90 static int ibdm_uninit_hca(ibdm_hca_list_t *); 91 static void ibdm_handle_setclassportinfo(ibmf_handle_t, ibmf_msg_t *, 92 ibdm_dp_gidinfo_t *, int *); 93 static void ibdm_handle_iounitinfo(ibmf_handle_t, 94 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 95 static void ibdm_handle_ioc_profile(ibmf_handle_t, 96 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 97 static void ibdm_event_hdlr(void *, ibt_hca_hdl_t, 98 ibt_async_code_t, ibt_async_event_t *); 99 static void ibdm_handle_classportinfo(ibmf_handle_t, 100 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 101 static void ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *, 102 ibdm_dp_gidinfo_t *); 103 104 static ibdm_hca_list_t *ibdm_dup_hca_attr(ibdm_hca_list_t *); 105 static ibdm_ioc_info_t *ibdm_dup_ioc_info(ibdm_ioc_info_t *, 106 ibdm_dp_gidinfo_t *gid_list); 107 static void ibdm_probe_ioc(ib_guid_t, ib_guid_t, int); 108 static ibdm_ioc_info_t *ibdm_is_ioc_present(ib_guid_t, 109 ibdm_dp_gidinfo_t *, int *); 110 static ibdm_port_attr_t *ibdm_get_port_attr(ibt_async_event_t *, 111 ibdm_hca_list_t **); 112 static sa_node_record_t *ibdm_get_node_records(ibmf_saa_handle_t, 113 size_t *, ib_guid_t); 114 static int ibdm_get_node_record_by_port(ibmf_saa_handle_t, 115 ib_guid_t, sa_node_record_t **, size_t *); 116 static sa_portinfo_record_t *ibdm_get_portinfo(ibmf_saa_handle_t, size_t *, 117 ib_lid_t); 118 static ibdm_dp_gidinfo_t *ibdm_create_gid_info(ibdm_port_attr_t *, 119 ib_gid_t, ib_gid_t); 120 static ibdm_dp_gidinfo_t *ibdm_find_gid(ib_guid_t, ib_guid_t); 121 static int ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *, uint8_t); 122 static ibdm_ioc_info_t *ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *, int); 123 static void ibdm_saa_event_cb(ibmf_saa_handle_t, ibmf_saa_subnet_event_t, 124 ibmf_saa_event_details_t *, void *); 125 static void ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *, 126 ibdm_dp_gidinfo_t *); 127 static ibdm_dp_gidinfo_t *ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *); 128 static void ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *, 129 ibdm_dp_gidinfo_t *); 130 static void ibdm_addto_gidlist(ibdm_gid_t **, ibdm_gid_t *); 131 static void ibdm_free_gid_list(ibdm_gid_t *); 132 static void ibdm_rescan_gidlist(ib_guid_t *ioc_guid); 133 static void ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *); 134 static void ibdm_saa_event_taskq(void *); 135 static void ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *); 136 static void ibdm_get_next_port(ibdm_hca_list_t **, 137 ibdm_port_attr_t **, int); 138 static void ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *, 139 ibdm_dp_gidinfo_t *); 140 static void ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *, 141 ibdm_hca_list_t *); 142 static void ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *); 143 static void ibdm_saa_handle_new_gid(void *); 144 static void ibdm_reset_all_dgids(ibmf_saa_handle_t); 145 static void ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *); 146 static void ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *); 147 static void ibdm_fill_srv_attr_mod(ib_mad_hdr_t *, ibdm_timeout_cb_args_t *); 148 static void ibdm_bump_transactionID(ibdm_dp_gidinfo_t *); 149 static ibdm_ioc_info_t *ibdm_handle_prev_iou(); 150 static int ibdm_serv_cmp(ibdm_srvents_info_t *, ibdm_srvents_info_t *, 151 int); 152 static ibdm_ioc_info_t *ibdm_get_ioc_info_with_gid(ib_guid_t, 153 ibdm_dp_gidinfo_t **); 154 155 int ibdm_dft_timeout = IBDM_DFT_TIMEOUT; 156 int ibdm_dft_retry_cnt = IBDM_DFT_NRETRIES; 157 #ifdef DEBUG 158 int ibdm_ignore_saa_event = 0; 159 #endif 160 161 /* Modload support */ 162 static struct modlmisc ibdm_modlmisc = { 163 &mod_miscops, 164 "InfiniBand Device Manager", 165 }; 166 167 struct modlinkage ibdm_modlinkage = { 168 MODREV_1, 169 (void *)&ibdm_modlmisc, 170 NULL 171 }; 172 173 static ibt_clnt_modinfo_t ibdm_ibt_modinfo = { 174 IBTI_V2, 175 IBT_DM, 176 ibdm_event_hdlr, 177 NULL, 178 "ibdm" 179 }; 180 181 /* Global variables */ 182 ibdm_t ibdm; 183 int ibdm_taskq_enable = IBDM_ENABLE_TASKQ_HANDLING; 184 char *ibdm_string = "ibdm"; 185 186 _NOTE(SCHEME_PROTECTS_DATA("Serialized access by cv", 187 ibdm.ibdm_dp_gidlist_head)) 188 189 /* 190 * _init 191 * Loadable module init, called before any other module. 192 * Initialize mutex 193 * Register with IBTF 194 */ 195 int 196 _init(void) 197 { 198 int err; 199 200 IBTF_DPRINTF_L4("ibdm", "\t_init: addr of ibdm %p", &ibdm); 201 202 if ((err = ibdm_init()) != IBDM_SUCCESS) { 203 IBTF_DPRINTF_L2("ibdm", "_init: ibdm_init failed 0x%x", err); 204 (void) ibdm_fini(); 205 return (DDI_FAILURE); 206 } 207 208 if ((err = mod_install(&ibdm_modlinkage)) != 0) { 209 IBTF_DPRINTF_L2("ibdm", "_init: mod_install failed 0x%x", err); 210 (void) ibdm_fini(); 211 } 212 return (err); 213 } 214 215 216 int 217 _fini(void) 218 { 219 int err; 220 221 if ((err = ibdm_fini()) != IBDM_SUCCESS) { 222 IBTF_DPRINTF_L2("ibdm", "_fini: ibdm_fini failed 0x%x", err); 223 (void) ibdm_init(); 224 return (EBUSY); 225 } 226 227 if ((err = mod_remove(&ibdm_modlinkage)) != 0) { 228 IBTF_DPRINTF_L2("ibdm", "_fini: mod_remove failed 0x%x", err); 229 (void) ibdm_init(); 230 } 231 return (err); 232 } 233 234 235 int 236 _info(struct modinfo *modinfop) 237 { 238 return (mod_info(&ibdm_modlinkage, modinfop)); 239 } 240 241 242 /* 243 * ibdm_init(): 244 * Register with IBTF 245 * Allocate memory for the HCAs 246 * Allocate minor-nodes for the HCAs 247 */ 248 static int 249 ibdm_init(void) 250 { 251 int i, hca_count; 252 ib_guid_t *hca_guids; 253 ibt_status_t status; 254 255 IBTF_DPRINTF_L4("ibdm", "\tibdm_init:"); 256 if (!(ibdm.ibdm_state & IBDM_LOCKS_ALLOCED)) { 257 mutex_init(&ibdm.ibdm_mutex, NULL, MUTEX_DEFAULT, NULL); 258 mutex_init(&ibdm.ibdm_hl_mutex, NULL, MUTEX_DEFAULT, NULL); 259 mutex_init(&ibdm.ibdm_ibnex_mutex, NULL, MUTEX_DEFAULT, NULL); 260 mutex_enter(&ibdm.ibdm_mutex); 261 ibdm.ibdm_state |= IBDM_LOCKS_ALLOCED; 262 } 263 264 if (!(ibdm.ibdm_state & IBDM_IBT_ATTACHED)) { 265 if ((status = ibt_attach(&ibdm_ibt_modinfo, NULL, NULL, 266 (void *)&ibdm.ibdm_ibt_clnt_hdl)) != IBT_SUCCESS) { 267 IBTF_DPRINTF_L2("ibdm", "ibdm_init: ibt_attach " 268 "failed %x", status); 269 mutex_exit(&ibdm.ibdm_mutex); 270 return (IBDM_FAILURE); 271 } 272 273 ibdm.ibdm_state |= IBDM_IBT_ATTACHED; 274 mutex_exit(&ibdm.ibdm_mutex); 275 } 276 277 278 if (!(ibdm.ibdm_state & IBDM_HCA_ATTACHED)) { 279 hca_count = ibt_get_hca_list(&hca_guids); 280 IBTF_DPRINTF_L4("ibdm", "ibdm_init: num_hcas = %d", hca_count); 281 for (i = 0; i < hca_count; i++) 282 (void) ibdm_handle_hca_attach(hca_guids[i]); 283 if (hca_count) 284 ibt_free_hca_list(hca_guids, hca_count); 285 286 mutex_enter(&ibdm.ibdm_mutex); 287 ibdm.ibdm_state |= IBDM_HCA_ATTACHED; 288 mutex_exit(&ibdm.ibdm_mutex); 289 } 290 291 if (!(ibdm.ibdm_state & IBDM_CVS_ALLOCED)) { 292 cv_init(&ibdm.ibdm_probe_cv, NULL, CV_DRIVER, NULL); 293 cv_init(&ibdm.ibdm_busy_cv, NULL, CV_DRIVER, NULL); 294 mutex_enter(&ibdm.ibdm_mutex); 295 ibdm.ibdm_state |= IBDM_CVS_ALLOCED; 296 mutex_exit(&ibdm.ibdm_mutex); 297 } 298 return (IBDM_SUCCESS); 299 } 300 301 302 static int 303 ibdm_free_iou_info(ibdm_dp_gidinfo_t *gid_info, ibdm_iou_info_t **ioup) 304 { 305 int ii, k, niocs; 306 size_t size; 307 ibdm_gid_t *delete, *head; 308 timeout_id_t timeout_id; 309 ibdm_ioc_info_t *ioc; 310 ibdm_iou_info_t *gl_iou = *ioup; 311 312 ASSERT(mutex_owned(&gid_info->gl_mutex)); 313 if (gl_iou == NULL) { 314 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: No IOU"); 315 return (0); 316 } 317 318 niocs = gl_iou->iou_info.iou_num_ctrl_slots; 319 IBTF_DPRINTF_L4("ibdm", "\tfree_iou_info: gid_info = %p, niocs %d", 320 gid_info, niocs); 321 322 for (ii = 0; ii < niocs; ii++) { 323 ioc = (ibdm_ioc_info_t *)&gl_iou->iou_ioc_info[ii]; 324 325 /* handle the case where an ioc_timeout_id is scheduled */ 326 if (ioc->ioc_timeout_id) { 327 timeout_id = ioc->ioc_timeout_id; 328 ioc->ioc_timeout_id = 0; 329 mutex_exit(&gid_info->gl_mutex); 330 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 331 "ioc_timeout_id = 0x%x", timeout_id); 332 if (untimeout(timeout_id) == -1) { 333 IBTF_DPRINTF_L2("ibdm", "free_iou_info: " 334 "untimeout ioc_timeout_id failed"); 335 mutex_enter(&gid_info->gl_mutex); 336 return (-1); 337 } 338 mutex_enter(&gid_info->gl_mutex); 339 } 340 341 /* handle the case where an ioc_dc_timeout_id is scheduled */ 342 if (ioc->ioc_dc_timeout_id) { 343 timeout_id = ioc->ioc_dc_timeout_id; 344 ioc->ioc_dc_timeout_id = 0; 345 mutex_exit(&gid_info->gl_mutex); 346 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 347 "ioc_dc_timeout_id = 0x%x", timeout_id); 348 if (untimeout(timeout_id) == -1) { 349 IBTF_DPRINTF_L2("ibdm", "free_iou_info: " 350 "untimeout ioc_dc_timeout_id failed"); 351 mutex_enter(&gid_info->gl_mutex); 352 return (-1); 353 } 354 mutex_enter(&gid_info->gl_mutex); 355 } 356 357 /* handle the case where serv[k].se_timeout_id is scheduled */ 358 for (k = 0; k < ioc->ioc_profile.ioc_service_entries; k++) { 359 if (ioc->ioc_serv[k].se_timeout_id) { 360 timeout_id = ioc->ioc_serv[k].se_timeout_id; 361 ioc->ioc_serv[k].se_timeout_id = 0; 362 mutex_exit(&gid_info->gl_mutex); 363 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 364 "ioc->ioc_serv[%d].se_timeout_id = 0x%x", 365 k, timeout_id); 366 if (untimeout(timeout_id) == -1) { 367 IBTF_DPRINTF_L2("ibdm", "free_iou_info:" 368 " untimeout se_timeout_id failed"); 369 mutex_enter(&gid_info->gl_mutex); 370 return (-1); 371 } 372 mutex_enter(&gid_info->gl_mutex); 373 } 374 } 375 376 /* delete GID list in IOC */ 377 head = ioc->ioc_gid_list; 378 while (head) { 379 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: " 380 "Deleting gid_list struct %p", head); 381 delete = head; 382 head = head->gid_next; 383 kmem_free(delete, sizeof (ibdm_gid_t)); 384 } 385 ioc->ioc_gid_list = NULL; 386 387 /* delete ioc_serv */ 388 size = ioc->ioc_profile.ioc_service_entries * 389 sizeof (ibdm_srvents_info_t); 390 if (ioc->ioc_serv && size) { 391 kmem_free(ioc->ioc_serv, size); 392 ioc->ioc_serv = NULL; 393 } 394 } 395 /* 396 * Clear the IBDM_CISCO_PROBE_DONE flag to get the IO Unit information 397 * via the switch during the probe process. 398 */ 399 gid_info->gl_flag &= ~IBDM_CISCO_PROBE_DONE; 400 401 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: deleting IOU & IOC"); 402 size = sizeof (ibdm_iou_info_t) + niocs * sizeof (ibdm_ioc_info_t); 403 kmem_free(gl_iou, size); 404 *ioup = NULL; 405 return (0); 406 } 407 408 409 /* 410 * ibdm_fini(): 411 * Un-register with IBTF 412 * De allocate memory for the GID info 413 */ 414 static int 415 ibdm_fini() 416 { 417 int ii; 418 ibdm_hca_list_t *hca_list, *temp; 419 ibdm_dp_gidinfo_t *gid_info, *tmp; 420 ibdm_gid_t *head, *delete; 421 422 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini"); 423 424 mutex_enter(&ibdm.ibdm_hl_mutex); 425 if (ibdm.ibdm_state & IBDM_IBT_ATTACHED) { 426 if (ibt_detach(ibdm.ibdm_ibt_clnt_hdl) != IBT_SUCCESS) { 427 IBTF_DPRINTF_L2("ibdm", "\t_fini: ibt_detach failed"); 428 mutex_exit(&ibdm.ibdm_hl_mutex); 429 return (IBDM_FAILURE); 430 } 431 ibdm.ibdm_state &= ~IBDM_IBT_ATTACHED; 432 ibdm.ibdm_ibt_clnt_hdl = NULL; 433 } 434 435 hca_list = ibdm.ibdm_hca_list_head; 436 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: nhcas %d", ibdm.ibdm_hca_count); 437 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 438 temp = hca_list; 439 hca_list = hca_list->hl_next; 440 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: hca %p", temp); 441 if (ibdm_uninit_hca(temp) != IBDM_SUCCESS) { 442 IBTF_DPRINTF_L2("ibdm", "\tibdm_fini: " 443 "uninit_hca %p failed", temp); 444 mutex_exit(&ibdm.ibdm_hl_mutex); 445 return (IBDM_FAILURE); 446 } 447 } 448 mutex_exit(&ibdm.ibdm_hl_mutex); 449 450 mutex_enter(&ibdm.ibdm_mutex); 451 if (ibdm.ibdm_state & IBDM_HCA_ATTACHED) 452 ibdm.ibdm_state &= ~IBDM_HCA_ATTACHED; 453 454 gid_info = ibdm.ibdm_dp_gidlist_head; 455 while (gid_info) { 456 mutex_enter(&gid_info->gl_mutex); 457 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou); 458 mutex_exit(&gid_info->gl_mutex); 459 ibdm_delete_glhca_list(gid_info); 460 461 tmp = gid_info; 462 gid_info = gid_info->gl_next; 463 mutex_destroy(&tmp->gl_mutex); 464 head = tmp->gl_gid; 465 while (head) { 466 IBTF_DPRINTF_L4("ibdm", 467 "\tibdm_fini: Deleting gid structs"); 468 delete = head; 469 head = head->gid_next; 470 kmem_free(delete, sizeof (ibdm_gid_t)); 471 } 472 kmem_free(tmp, sizeof (ibdm_dp_gidinfo_t)); 473 } 474 mutex_exit(&ibdm.ibdm_mutex); 475 476 if (ibdm.ibdm_state & IBDM_LOCKS_ALLOCED) { 477 ibdm.ibdm_state &= ~IBDM_LOCKS_ALLOCED; 478 mutex_destroy(&ibdm.ibdm_mutex); 479 mutex_destroy(&ibdm.ibdm_hl_mutex); 480 mutex_destroy(&ibdm.ibdm_ibnex_mutex); 481 } 482 if (ibdm.ibdm_state & IBDM_CVS_ALLOCED) { 483 ibdm.ibdm_state &= ~IBDM_CVS_ALLOCED; 484 cv_destroy(&ibdm.ibdm_probe_cv); 485 cv_destroy(&ibdm.ibdm_busy_cv); 486 } 487 return (IBDM_SUCCESS); 488 } 489 490 491 /* 492 * ibdm_event_hdlr() 493 * 494 * IBDM registers this asynchronous event handler at the time of 495 * ibt_attach. IBDM support the following async events. For other 496 * event, simply returns success. 497 * IBT_HCA_ATTACH_EVENT: 498 * Retrieves the information about all the port that are 499 * present on this HCA, allocates the port attributes 500 * structure and calls IB nexus callback routine with 501 * the port attributes structure as an input argument. 502 * IBT_HCA_DETACH_EVENT: 503 * Retrieves the information about all the ports that are 504 * present on this HCA and calls IB nexus callback with 505 * port guid as an argument 506 * IBT_EVENT_PORT_UP: 507 * Register with IBMF and SA access 508 * Setup IBMF receive callback routine 509 * IBT_EVENT_PORT_DOWN: 510 * Un-Register with IBMF and SA access 511 * Teardown IBMF receive callback routine 512 */ 513 /*ARGSUSED*/ 514 static void 515 ibdm_event_hdlr(void *clnt_hdl, 516 ibt_hca_hdl_t hca_hdl, ibt_async_code_t code, ibt_async_event_t *event) 517 { 518 ibdm_hca_list_t *hca_list; 519 ibdm_port_attr_t *port; 520 ibmf_saa_handle_t port_sa_hdl; 521 522 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: async code 0x%x", code); 523 524 switch (code) { 525 case IBT_HCA_ATTACH_EVENT: /* New HCA registered with IBTF */ 526 ibdm_handle_hca_attach(event->ev_hca_guid); 527 break; 528 529 case IBT_HCA_DETACH_EVENT: /* HCA unregistered with IBTF */ 530 ibdm_handle_hca_detach(event->ev_hca_guid); 531 mutex_enter(&ibdm.ibdm_ibnex_mutex); 532 if (ibdm.ibdm_ibnex_callback != NULL) { 533 (*ibdm.ibdm_ibnex_callback)((void *) 534 &event->ev_hca_guid, IBDM_EVENT_HCA_REMOVED); 535 } 536 mutex_exit(&ibdm.ibdm_ibnex_mutex); 537 break; 538 539 case IBT_EVENT_PORT_UP: 540 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_UP"); 541 mutex_enter(&ibdm.ibdm_hl_mutex); 542 port = ibdm_get_port_attr(event, &hca_list); 543 if (port == NULL) { 544 IBTF_DPRINTF_L2("ibdm", 545 "\tevent_hdlr: HCA not present"); 546 mutex_exit(&ibdm.ibdm_hl_mutex); 547 break; 548 } 549 ibdm_initialize_port(port); 550 hca_list->hl_nports_active++; 551 mutex_exit(&ibdm.ibdm_hl_mutex); 552 break; 553 554 case IBT_ERROR_PORT_DOWN: 555 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_DOWN"); 556 mutex_enter(&ibdm.ibdm_hl_mutex); 557 port = ibdm_get_port_attr(event, &hca_list); 558 if (port == NULL) { 559 IBTF_DPRINTF_L2("ibdm", 560 "\tevent_hdlr: HCA not present"); 561 mutex_exit(&ibdm.ibdm_hl_mutex); 562 break; 563 } 564 hca_list->hl_nports_active--; 565 port_sa_hdl = port->pa_sa_hdl; 566 (void) ibdm_fini_port(port); 567 port->pa_state = IBT_PORT_DOWN; 568 mutex_exit(&ibdm.ibdm_hl_mutex); 569 ibdm_reset_all_dgids(port_sa_hdl); 570 break; 571 572 default: /* Ignore all other events/errors */ 573 break; 574 } 575 } 576 577 578 /* 579 * ibdm_initialize_port() 580 * Register with IBMF 581 * Register with SA access 582 * Register a receive callback routine with IBMF. IBMF invokes 583 * this routine whenever a MAD arrives at this port. 584 * Update the port attributes 585 */ 586 static void 587 ibdm_initialize_port(ibdm_port_attr_t *port) 588 { 589 int ii; 590 uint_t nports, size; 591 uint_t pkey_idx; 592 ib_pkey_t pkey; 593 ibt_hca_portinfo_t *pinfop; 594 ibmf_register_info_t ibmf_reg; 595 ibmf_saa_subnet_event_args_t event_args; 596 597 IBTF_DPRINTF_L4("ibdm", "\tinitialize_port:"); 598 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 599 600 /* Check whether the port is active */ 601 if (ibt_get_port_state(port->pa_hca_hdl, port->pa_port_num, NULL, 602 NULL) != IBT_SUCCESS) 603 return; 604 605 if (port->pa_sa_hdl != NULL) 606 return; 607 608 if (ibt_query_hca_ports(port->pa_hca_hdl, port->pa_port_num, 609 &pinfop, &nports, &size) != IBT_SUCCESS) { 610 /* This should not occur */ 611 port->pa_npkeys = 0; 612 port->pa_pkey_tbl = NULL; 613 return; 614 } 615 port->pa_sn_prefix = pinfop->p_sgid_tbl[0].gid_prefix; 616 617 port->pa_state = pinfop->p_linkstate; 618 port->pa_npkeys = pinfop->p_pkey_tbl_sz; 619 port->pa_pkey_tbl = (ibdm_pkey_tbl_t *)kmem_zalloc( 620 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP); 621 622 for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++) 623 port->pa_pkey_tbl[pkey_idx].pt_pkey = 624 pinfop->p_pkey_tbl[pkey_idx]; 625 626 ibt_free_portinfo(pinfop, size); 627 628 event_args.is_event_callback = ibdm_saa_event_cb; 629 event_args.is_event_callback_arg = port; 630 if (ibmf_sa_session_open(port->pa_port_guid, 0, &event_args, 631 IBMF_VERSION, 0, &port->pa_sa_hdl) != IBMF_SUCCESS) { 632 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 633 "sa access registration failed"); 634 return; 635 } 636 ibmf_reg.ir_ci_guid = port->pa_hca_guid; 637 ibmf_reg.ir_port_num = port->pa_port_num; 638 ibmf_reg.ir_client_class = DEV_MGT_MANAGER; 639 640 if (ibmf_register(&ibmf_reg, IBMF_VERSION, 0, NULL, NULL, 641 &port->pa_ibmf_hdl, &port->pa_ibmf_caps) != IBMF_SUCCESS) { 642 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 643 "IBMF registration failed"); 644 (void) ibdm_fini_port(port); 645 return; 646 } 647 if (ibmf_setup_async_cb(port->pa_ibmf_hdl, IBMF_QP_HANDLE_DEFAULT, 648 ibdm_ibmf_recv_cb, 0, 0) != IBMF_SUCCESS) { 649 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 650 "IBMF setup recv cb failed"); 651 (void) ibdm_fini_port(port); 652 return; 653 } 654 655 for (ii = 0; ii < port->pa_npkeys; ii++) { 656 pkey = port->pa_pkey_tbl[ii].pt_pkey; 657 if (IBDM_INVALID_PKEY(pkey)) { 658 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 659 continue; 660 } 661 ibdm_port_attr_ibmf_init(port, pkey, ii); 662 } 663 } 664 665 666 /* 667 * ibdm_port_attr_ibmf_init: 668 * With IBMF - Alloc QP Handle and Setup Async callback 669 */ 670 static void 671 ibdm_port_attr_ibmf_init(ibdm_port_attr_t *port, ib_pkey_t pkey, int ii) 672 { 673 int ret; 674 675 if ((ret = ibmf_alloc_qp(port->pa_ibmf_hdl, pkey, IB_GSI_QKEY, 676 IBMF_ALT_QP_MAD_NO_RMPP, &port->pa_pkey_tbl[ii].pt_qp_hdl)) != 677 IBMF_SUCCESS) { 678 IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: " 679 "IBMF failed to alloc qp %d", ret); 680 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 681 return; 682 } 683 684 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_init: QP handle is %p", 685 port->pa_ibmf_hdl); 686 687 if ((ret = ibmf_setup_async_cb(port->pa_ibmf_hdl, 688 port->pa_pkey_tbl[ii].pt_qp_hdl, ibdm_ibmf_recv_cb, 0, 0)) != 689 IBMF_SUCCESS) { 690 IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: " 691 "IBMF setup recv cb failed %d", ret); 692 (void) ibmf_free_qp(port->pa_ibmf_hdl, 693 &port->pa_pkey_tbl[ii].pt_qp_hdl, 0); 694 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 695 } 696 } 697 698 699 /* 700 * ibdm_get_port_attr() 701 * Get port attributes from HCA guid and port number 702 * Return pointer to ibdm_port_attr_t on Success 703 * and NULL on failure 704 */ 705 static ibdm_port_attr_t * 706 ibdm_get_port_attr(ibt_async_event_t *event, ibdm_hca_list_t **retval) 707 { 708 ibdm_hca_list_t *hca_list; 709 ibdm_port_attr_t *port_attr; 710 int ii; 711 712 IBTF_DPRINTF_L4("ibdm", "\tget_port_attr: port# %d", event->ev_port); 713 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 714 hca_list = ibdm.ibdm_hca_list_head; 715 while (hca_list) { 716 if (hca_list->hl_hca_guid == event->ev_hca_guid) { 717 for (ii = 0; ii < hca_list->hl_nports; ii++) { 718 port_attr = &hca_list->hl_port_attr[ii]; 719 if (port_attr->pa_port_num == event->ev_port) { 720 *retval = hca_list; 721 return (port_attr); 722 } 723 } 724 } 725 hca_list = hca_list->hl_next; 726 } 727 return (NULL); 728 } 729 730 731 /* 732 * ibdm_update_port_attr() 733 * Update the port attributes 734 */ 735 static void 736 ibdm_update_port_attr(ibdm_port_attr_t *port) 737 { 738 uint_t nports, size; 739 uint_t pkey_idx; 740 ibt_hca_portinfo_t *portinfop; 741 742 IBTF_DPRINTF_L4("ibdm", "\tupdate_port_attr: Begin"); 743 if (ibt_query_hca_ports(port->pa_hca_hdl, 744 port->pa_port_num, &portinfop, &nports, &size) != IBT_SUCCESS) { 745 /* This should not occur */ 746 port->pa_npkeys = 0; 747 port->pa_pkey_tbl = NULL; 748 return; 749 } 750 port->pa_sn_prefix = portinfop->p_sgid_tbl[0].gid_prefix; 751 752 port->pa_state = portinfop->p_linkstate; 753 754 /* 755 * PKey information in portinfo valid only if port is 756 * ACTIVE. Bail out if not. 757 */ 758 if (port->pa_state != IBT_PORT_ACTIVE) { 759 port->pa_npkeys = 0; 760 port->pa_pkey_tbl = NULL; 761 ibt_free_portinfo(portinfop, size); 762 return; 763 } 764 765 port->pa_npkeys = portinfop->p_pkey_tbl_sz; 766 port->pa_pkey_tbl = (ibdm_pkey_tbl_t *)kmem_zalloc( 767 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP); 768 769 for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++) { 770 port->pa_pkey_tbl[pkey_idx].pt_pkey = 771 portinfop->p_pkey_tbl[pkey_idx]; 772 } 773 ibt_free_portinfo(portinfop, size); 774 } 775 776 777 /* 778 * ibdm_handle_hca_attach() 779 */ 780 static void 781 ibdm_handle_hca_attach(ib_guid_t hca_guid) 782 { 783 uint_t size; 784 uint_t ii, nports; 785 ibt_status_t status; 786 ibt_hca_hdl_t hca_hdl; 787 ibt_hca_attr_t *hca_attr; 788 ibdm_hca_list_t *hca_list, *temp; 789 ibdm_port_attr_t *port_attr; 790 ibt_hca_portinfo_t *portinfop; 791 792 IBTF_DPRINTF_L4("ibdm", 793 "\thandle_hca_attach: hca_guid = 0x%llX", hca_guid); 794 795 /* open the HCA first */ 796 if ((status = ibt_open_hca(ibdm.ibdm_ibt_clnt_hdl, hca_guid, 797 &hca_hdl)) != IBT_SUCCESS) { 798 IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: " 799 "open_hca failed, status 0x%x", status); 800 return; 801 } 802 803 hca_attr = (ibt_hca_attr_t *) 804 kmem_alloc(sizeof (ibt_hca_attr_t), KM_SLEEP); 805 /* ibt_query_hca always returns IBT_SUCCESS */ 806 (void) ibt_query_hca(hca_hdl, hca_attr); 807 808 IBTF_DPRINTF_L4("ibdm", "\tvid: 0x%x, pid: 0x%x, ver: 0x%x," 809 " #ports: %d", hca_attr->hca_vendor_id, hca_attr->hca_device_id, 810 hca_attr->hca_version_id, hca_attr->hca_nports); 811 812 if ((status = ibt_query_hca_ports(hca_hdl, 0, &portinfop, &nports, 813 &size)) != IBT_SUCCESS) { 814 IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: " 815 "ibt_query_hca_ports failed, status 0x%x", status); 816 kmem_free(hca_attr, sizeof (ibt_hca_attr_t)); 817 (void) ibt_close_hca(hca_hdl); 818 return; 819 } 820 hca_list = (ibdm_hca_list_t *) 821 kmem_zalloc((sizeof (ibdm_hca_list_t)), KM_SLEEP); 822 hca_list->hl_port_attr = (ibdm_port_attr_t *)kmem_zalloc( 823 (sizeof (ibdm_port_attr_t) * hca_attr->hca_nports), KM_SLEEP); 824 hca_list->hl_hca_guid = hca_attr->hca_node_guid; 825 hca_list->hl_nports = hca_attr->hca_nports; 826 hca_list->hl_attach_time = ddi_get_time(); 827 hca_list->hl_hca_hdl = hca_hdl; 828 829 /* 830 * Init a dummy port attribute for the HCA node 831 * This is for Per-HCA Node. Initialize port_attr : 832 * hca_guid & port_guid -> hca_guid 833 * npkeys, pkey_tbl is NULL 834 * port_num, sn_prefix is 0 835 * vendorid, product_id, dev_version from HCA 836 * pa_state is IBT_PORT_ACTIVE 837 */ 838 hca_list->hl_hca_port_attr = (ibdm_port_attr_t *)kmem_zalloc( 839 sizeof (ibdm_port_attr_t), KM_SLEEP); 840 port_attr = hca_list->hl_hca_port_attr; 841 port_attr->pa_vendorid = hca_attr->hca_vendor_id; 842 port_attr->pa_productid = hca_attr->hca_device_id; 843 port_attr->pa_dev_version = hca_attr->hca_version_id; 844 port_attr->pa_hca_guid = hca_attr->hca_node_guid; 845 port_attr->pa_hca_hdl = hca_list->hl_hca_hdl; 846 port_attr->pa_port_guid = hca_attr->hca_node_guid; 847 port_attr->pa_state = IBT_PORT_ACTIVE; 848 849 850 for (ii = 0; ii < nports; ii++) { 851 port_attr = &hca_list->hl_port_attr[ii]; 852 port_attr->pa_vendorid = hca_attr->hca_vendor_id; 853 port_attr->pa_productid = hca_attr->hca_device_id; 854 port_attr->pa_dev_version = hca_attr->hca_version_id; 855 port_attr->pa_hca_guid = hca_attr->hca_node_guid; 856 port_attr->pa_hca_hdl = hca_list->hl_hca_hdl; 857 port_attr->pa_port_guid = portinfop[ii].p_sgid_tbl->gid_guid; 858 port_attr->pa_sn_prefix = portinfop[ii].p_sgid_tbl->gid_prefix; 859 port_attr->pa_port_num = portinfop[ii].p_port_num; 860 port_attr->pa_state = portinfop[ii].p_linkstate; 861 862 /* 863 * Register with IBMF, SA access when the port is in 864 * ACTIVE state. Also register a callback routine 865 * with IBMF to receive incoming DM MAD's. 866 * The IBDM event handler takes care of registration of 867 * port which are not active. 868 */ 869 IBTF_DPRINTF_L4("ibdm", 870 "\thandle_hca_attach: port guid %llx Port state 0x%x", 871 port_attr->pa_port_guid, portinfop[ii].p_linkstate); 872 873 if (portinfop[ii].p_linkstate == IBT_PORT_ACTIVE) { 874 mutex_enter(&ibdm.ibdm_hl_mutex); 875 hca_list->hl_nports_active++; 876 ibdm_initialize_port(port_attr); 877 mutex_exit(&ibdm.ibdm_hl_mutex); 878 } 879 } 880 mutex_enter(&ibdm.ibdm_hl_mutex); 881 for (temp = ibdm.ibdm_hca_list_head; temp; temp = temp->hl_next) { 882 if (temp->hl_hca_guid == hca_guid) { 883 IBTF_DPRINTF_L2("ibdm", "hca_attach: HCA %llX " 884 "already seen by IBDM", hca_guid); 885 mutex_exit(&ibdm.ibdm_hl_mutex); 886 (void) ibdm_uninit_hca(hca_list); 887 return; 888 } 889 } 890 ibdm.ibdm_hca_count++; 891 if (ibdm.ibdm_hca_list_head == NULL) { 892 ibdm.ibdm_hca_list_head = hca_list; 893 ibdm.ibdm_hca_list_tail = hca_list; 894 } else { 895 ibdm.ibdm_hca_list_tail->hl_next = hca_list; 896 ibdm.ibdm_hca_list_tail = hca_list; 897 } 898 mutex_exit(&ibdm.ibdm_hl_mutex); 899 mutex_enter(&ibdm.ibdm_ibnex_mutex); 900 if (ibdm.ibdm_ibnex_callback != NULL) { 901 (*ibdm.ibdm_ibnex_callback)((void *) 902 &hca_guid, IBDM_EVENT_HCA_ADDED); 903 } 904 mutex_exit(&ibdm.ibdm_ibnex_mutex); 905 906 kmem_free(hca_attr, sizeof (ibt_hca_attr_t)); 907 ibt_free_portinfo(portinfop, size); 908 } 909 910 911 /* 912 * ibdm_handle_hca_detach() 913 */ 914 static void 915 ibdm_handle_hca_detach(ib_guid_t hca_guid) 916 { 917 ibdm_hca_list_t *head, *prev = NULL; 918 size_t len; 919 ibdm_dp_gidinfo_t *gidinfo; 920 921 IBTF_DPRINTF_L4("ibdm", 922 "\thandle_hca_detach: hca_guid = 0x%llx", hca_guid); 923 924 /* Make sure no probes are running */ 925 mutex_enter(&ibdm.ibdm_mutex); 926 while (ibdm.ibdm_busy & IBDM_BUSY) 927 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 928 ibdm.ibdm_busy |= IBDM_BUSY; 929 mutex_exit(&ibdm.ibdm_mutex); 930 931 mutex_enter(&ibdm.ibdm_hl_mutex); 932 head = ibdm.ibdm_hca_list_head; 933 while (head) { 934 if (head->hl_hca_guid == hca_guid) { 935 if (prev == NULL) 936 ibdm.ibdm_hca_list_head = head->hl_next; 937 else 938 prev->hl_next = head->hl_next; 939 ibdm.ibdm_hca_count--; 940 break; 941 } 942 prev = head; 943 head = head->hl_next; 944 } 945 mutex_exit(&ibdm.ibdm_hl_mutex); 946 if (ibdm_uninit_hca(head) != IBDM_SUCCESS) 947 (void) ibdm_handle_hca_attach(hca_guid); 948 949 /* 950 * Now clean up the HCA lists in the gidlist. 951 */ 952 for (gidinfo = ibdm.ibdm_dp_gidlist_head; gidinfo; gidinfo = 953 gidinfo->gl_next) { 954 prev = NULL; 955 head = gidinfo->gl_hca_list; 956 while (head) { 957 if (head->hl_hca_guid == hca_guid) { 958 if (prev == NULL) 959 gidinfo->gl_hca_list = 960 head->hl_next; 961 else 962 prev->hl_next = head->hl_next; 963 964 len = sizeof (ibdm_hca_list_t) + 965 (head->hl_nports * 966 sizeof (ibdm_port_attr_t)); 967 kmem_free(head, len); 968 969 break; 970 } 971 prev = head; 972 head = head->hl_next; 973 } 974 } 975 976 mutex_enter(&ibdm.ibdm_mutex); 977 ibdm.ibdm_busy &= ~IBDM_BUSY; 978 cv_broadcast(&ibdm.ibdm_busy_cv); 979 mutex_exit(&ibdm.ibdm_mutex); 980 } 981 982 983 static int 984 ibdm_uninit_hca(ibdm_hca_list_t *head) 985 { 986 int ii; 987 ibdm_port_attr_t *port_attr; 988 989 for (ii = 0; ii < head->hl_nports; ii++) { 990 port_attr = &head->hl_port_attr[ii]; 991 if (ibdm_fini_port(port_attr) != IBDM_SUCCESS) { 992 IBTF_DPRINTF_L2("ibdm", "uninit_hca: HCA %p port 0x%x " 993 "ibdm_fini_port() failed", head, ii); 994 return (IBDM_FAILURE); 995 } 996 } 997 if (head->hl_hca_hdl) 998 if (ibt_close_hca(head->hl_hca_hdl) != IBT_SUCCESS) 999 return (IBDM_FAILURE); 1000 kmem_free(head->hl_port_attr, 1001 head->hl_nports * sizeof (ibdm_port_attr_t)); 1002 kmem_free(head->hl_hca_port_attr, sizeof (ibdm_port_attr_t)); 1003 kmem_free(head, sizeof (ibdm_hca_list_t)); 1004 return (IBDM_SUCCESS); 1005 } 1006 1007 1008 /* 1009 * For each port on the HCA, 1010 * 1) Teardown IBMF receive callback function 1011 * 2) Unregister with IBMF 1012 * 3) Unregister with SA access 1013 */ 1014 static int 1015 ibdm_fini_port(ibdm_port_attr_t *port_attr) 1016 { 1017 int ii, ibmf_status; 1018 1019 for (ii = 0; ii < port_attr->pa_npkeys; ii++) { 1020 if (port_attr->pa_pkey_tbl == NULL) 1021 break; 1022 if (!port_attr->pa_pkey_tbl[ii].pt_qp_hdl) 1023 continue; 1024 if (ibdm_port_attr_ibmf_fini(port_attr, ii) != IBDM_SUCCESS) { 1025 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1026 "ibdm_port_attr_ibmf_fini failed for " 1027 "port pkey 0x%x", ii); 1028 return (IBDM_FAILURE); 1029 } 1030 } 1031 1032 if (port_attr->pa_ibmf_hdl) { 1033 ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl, 1034 IBMF_QP_HANDLE_DEFAULT, 0); 1035 if (ibmf_status != IBMF_SUCCESS) { 1036 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1037 "ibmf_tear_down_async_cb failed %d", ibmf_status); 1038 return (IBDM_FAILURE); 1039 } 1040 1041 ibmf_status = ibmf_unregister(&port_attr->pa_ibmf_hdl, 0); 1042 if (ibmf_status != IBMF_SUCCESS) { 1043 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1044 "ibmf_unregister failed %d", ibmf_status); 1045 return (IBDM_FAILURE); 1046 } 1047 1048 port_attr->pa_ibmf_hdl = NULL; 1049 } 1050 1051 if (port_attr->pa_sa_hdl) { 1052 ibmf_status = ibmf_sa_session_close(&port_attr->pa_sa_hdl, 0); 1053 if (ibmf_status != IBMF_SUCCESS) { 1054 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1055 "ibmf_sa_session_close failed %d", ibmf_status); 1056 return (IBDM_FAILURE); 1057 } 1058 port_attr->pa_sa_hdl = NULL; 1059 } 1060 1061 if (port_attr->pa_pkey_tbl != NULL) { 1062 kmem_free(port_attr->pa_pkey_tbl, 1063 port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t)); 1064 port_attr->pa_pkey_tbl = NULL; 1065 port_attr->pa_npkeys = 0; 1066 } 1067 1068 return (IBDM_SUCCESS); 1069 } 1070 1071 1072 /* 1073 * ibdm_port_attr_ibmf_fini: 1074 * With IBMF - Tear down Async callback and free QP Handle 1075 */ 1076 static int 1077 ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *port_attr, int ii) 1078 { 1079 int ibmf_status; 1080 1081 IBTF_DPRINTF_L5("ibdm", "\tport_attr_ibmf_fini:"); 1082 1083 if (port_attr->pa_pkey_tbl[ii].pt_qp_hdl) { 1084 ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl, 1085 port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0); 1086 if (ibmf_status != IBMF_SUCCESS) { 1087 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: " 1088 "ibmf_tear_down_async_cb failed %d", ibmf_status); 1089 return (IBDM_FAILURE); 1090 } 1091 ibmf_status = ibmf_free_qp(port_attr->pa_ibmf_hdl, 1092 &port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0); 1093 if (ibmf_status != IBMF_SUCCESS) { 1094 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: " 1095 "ibmf_free_qp failed %d", ibmf_status); 1096 return (IBDM_FAILURE); 1097 } 1098 port_attr->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 1099 } 1100 return (IBDM_SUCCESS); 1101 } 1102 1103 1104 /* 1105 * ibdm_gid_decr_pending: 1106 * decrement gl_pending_cmds. If zero wakeup sleeping threads 1107 */ 1108 static void 1109 ibdm_gid_decr_pending(ibdm_dp_gidinfo_t *gidinfo) 1110 { 1111 mutex_enter(&ibdm.ibdm_mutex); 1112 mutex_enter(&gidinfo->gl_mutex); 1113 if (--gidinfo->gl_pending_cmds == 0) { 1114 /* 1115 * Handle DGID getting removed. 1116 */ 1117 if (gidinfo->gl_disconnected) { 1118 mutex_exit(&gidinfo->gl_mutex); 1119 mutex_exit(&ibdm.ibdm_mutex); 1120 1121 IBTF_DPRINTF_L3(ibdm_string, "\tgid_decr_pending: " 1122 "gidinfo %p hot removal", gidinfo); 1123 ibdm_delete_gidinfo(gidinfo); 1124 1125 mutex_enter(&ibdm.ibdm_mutex); 1126 ibdm.ibdm_ngid_probes_in_progress--; 1127 ibdm_wait_probe_completion(); 1128 mutex_exit(&ibdm.ibdm_mutex); 1129 return; 1130 } 1131 mutex_exit(&gidinfo->gl_mutex); 1132 mutex_exit(&ibdm.ibdm_mutex); 1133 ibdm_notify_newgid_iocs(gidinfo); 1134 mutex_enter(&ibdm.ibdm_mutex); 1135 mutex_enter(&gidinfo->gl_mutex); 1136 1137 ibdm.ibdm_ngid_probes_in_progress--; 1138 ibdm_wait_probe_completion(); 1139 } 1140 mutex_exit(&gidinfo->gl_mutex); 1141 mutex_exit(&ibdm.ibdm_mutex); 1142 } 1143 1144 1145 /* 1146 * ibdm_wait_probe_completion: 1147 * wait for probing to complete 1148 */ 1149 static void 1150 ibdm_wait_probe_completion(void) 1151 { 1152 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1153 if (ibdm.ibdm_ngid_probes_in_progress) { 1154 IBTF_DPRINTF_L4("ibdm", "\twait for probe complete"); 1155 ibdm.ibdm_busy |= IBDM_PROBE_IN_PROGRESS; 1156 while (ibdm.ibdm_busy & IBDM_PROBE_IN_PROGRESS) 1157 cv_wait(&ibdm.ibdm_probe_cv, &ibdm.ibdm_mutex); 1158 } 1159 } 1160 1161 1162 /* 1163 * ibdm_wait_cisco_probe_completion: 1164 * wait for the reply from the Cisco FC GW switch after a setclassportinfo 1165 * request is sent. This wait can be achieved on each gid. 1166 */ 1167 static void 1168 ibdm_wait_cisco_probe_completion(ibdm_dp_gidinfo_t *gidinfo) 1169 { 1170 ASSERT(MUTEX_HELD(&gidinfo->gl_mutex)); 1171 IBTF_DPRINTF_L4("ibdm", "\twait for cisco probe complete"); 1172 gidinfo->gl_flag |= IBDM_CISCO_PROBE; 1173 while (gidinfo->gl_flag & IBDM_CISCO_PROBE) 1174 cv_wait(&gidinfo->gl_probe_cv, &gidinfo->gl_mutex); 1175 } 1176 1177 1178 /* 1179 * ibdm_wakeup_probe_gid_cv: 1180 * wakeup waiting threads (based on ibdm_ngid_probes_in_progress) 1181 */ 1182 static void 1183 ibdm_wakeup_probe_gid_cv(void) 1184 { 1185 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1186 if (!ibdm.ibdm_ngid_probes_in_progress) { 1187 IBTF_DPRINTF_L4("ibdm", "wakeup_probe_gid_thread: Wakeup"); 1188 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 1189 cv_broadcast(&ibdm.ibdm_probe_cv); 1190 } 1191 1192 } 1193 1194 1195 /* 1196 * ibdm_sweep_fabric(reprobe_flag) 1197 * Find all possible Managed IOU's and their IOC's that are visible 1198 * to the host. The algorithm used is as follows 1199 * 1200 * Send a "bus walk" request for each port on the host HCA to SA access 1201 * SA returns complete set of GID's that are reachable from 1202 * source port. This is done in parallel. 1203 * 1204 * Initialize GID state to IBDM_GID_PROBE_NOT_DONE 1205 * 1206 * Sort the GID list and eliminate duplicate GID's 1207 * 1) Use DGID for sorting 1208 * 2) use PortGuid for sorting 1209 * Send SA query to retrieve NodeRecord and 1210 * extract PortGuid from that. 1211 * 1212 * Set GID state to IBDM_GID_PROBE_FAILED to all the ports that dont 1213 * support DM MAD's 1214 * Send a "Portinfo" query to get the port capabilities and 1215 * then check for DM MAD's support 1216 * 1217 * Send "ClassPortInfo" request for all the GID's in parallel, 1218 * set the GID state to IBDM_GET_CLASSPORTINFO and wait on the 1219 * cv_signal to complete. 1220 * 1221 * When DM agent on the remote GID sends back the response, IBMF 1222 * invokes DM callback routine. 1223 * 1224 * If the response is proper, send "IOUnitInfo" request and set 1225 * GID state to IBDM_GET_IOUNITINFO. 1226 * 1227 * If the response is proper, send "IocProfileInfo" request to 1228 * all the IOC simultaneously and set GID state to IBDM_GET_IOC_DETAILS. 1229 * 1230 * Send request to get Service entries simultaneously 1231 * 1232 * Signal the waiting thread when received response for all the commands. 1233 * 1234 * Set the GID state to IBDM_GID_PROBE_FAILED when received a error 1235 * response during the probing period. 1236 * 1237 * Note: 1238 * ibdm.ibdm_ngid_probes_in_progress and ibdm_gid_list_t:gl_pending_cmds 1239 * keep track of number commands in progress at any point of time. 1240 * MAD transaction ID is used to identify a particular GID 1241 * TBD: Consider registering the IBMF receive callback on demand 1242 * 1243 * Note: This routine must be called with ibdm.ibdm_mutex held 1244 * TBD: Re probe the failure GID (for certain failures) when requested 1245 * for fabric sweep next time 1246 * 1247 * Parameters : If reprobe_flag is set, All IOCs will be reprobed. 1248 */ 1249 static void 1250 ibdm_sweep_fabric(int reprobe_flag) 1251 { 1252 int ii; 1253 int new_paths = 0; 1254 uint8_t niocs; 1255 taskqid_t tid; 1256 ibdm_ioc_info_t *ioc; 1257 ibdm_hca_list_t *hca_list = NULL; 1258 ibdm_port_attr_t *port = NULL; 1259 ibdm_dp_gidinfo_t *gid_info; 1260 1261 IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: Enter"); 1262 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1263 1264 /* 1265 * Check whether a sweep already in progress. If so, just 1266 * wait for the fabric sweep to complete 1267 */ 1268 while (ibdm.ibdm_busy & IBDM_BUSY) 1269 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 1270 ibdm.ibdm_busy |= IBDM_BUSY; 1271 mutex_exit(&ibdm.ibdm_mutex); 1272 1273 ibdm_dump_sweep_fabric_timestamp(0); 1274 1275 /* Rescan the GID list for any removed GIDs for reprobe */ 1276 if (reprobe_flag) 1277 ibdm_rescan_gidlist(NULL); 1278 1279 /* 1280 * Get list of all the ports reachable from the local known HCA 1281 * ports which are active 1282 */ 1283 mutex_enter(&ibdm.ibdm_hl_mutex); 1284 for (ibdm_get_next_port(&hca_list, &port, 1); port; 1285 ibdm_get_next_port(&hca_list, &port, 1)) { 1286 /* 1287 * Get PATHS to all the reachable ports from 1288 * SGID and update the global ibdm structure. 1289 */ 1290 new_paths = ibdm_get_reachable_ports(port, hca_list); 1291 ibdm.ibdm_ngids += new_paths; 1292 } 1293 mutex_exit(&ibdm.ibdm_hl_mutex); 1294 1295 mutex_enter(&ibdm.ibdm_mutex); 1296 ibdm.ibdm_ngid_probes_in_progress += ibdm.ibdm_ngids; 1297 mutex_exit(&ibdm.ibdm_mutex); 1298 1299 /* Send a request to probe GIDs asynchronously. */ 1300 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 1301 gid_info = gid_info->gl_next) { 1302 mutex_enter(&gid_info->gl_mutex); 1303 gid_info->gl_reprobe_flag = reprobe_flag; 1304 mutex_exit(&gid_info->gl_mutex); 1305 1306 /* process newly encountered GIDs */ 1307 tid = taskq_dispatch(system_taskq, ibdm_probe_gid_thread, 1308 (void *)gid_info, TQ_NOSLEEP); 1309 IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: gid_info = %p" 1310 " taskq_id = %x", gid_info, tid); 1311 /* taskq failed to dispatch call it directly */ 1312 if (tid == NULL) 1313 ibdm_probe_gid_thread((void *)gid_info); 1314 } 1315 1316 mutex_enter(&ibdm.ibdm_mutex); 1317 ibdm_wait_probe_completion(); 1318 1319 /* 1320 * Update the properties, if reprobe_flag is set 1321 * Skip if gl_reprobe_flag is set, this will be 1322 * a re-inserted / new GID, for which notifications 1323 * have already been send. 1324 */ 1325 if (reprobe_flag) { 1326 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 1327 gid_info = gid_info->gl_next) { 1328 if (gid_info->gl_iou == NULL) 1329 continue; 1330 if (gid_info->gl_reprobe_flag) { 1331 gid_info->gl_reprobe_flag = 0; 1332 continue; 1333 } 1334 1335 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 1336 for (ii = 0; ii < niocs; ii++) { 1337 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 1338 if (ioc) 1339 ibdm_reprobe_update_port_srv(ioc, 1340 gid_info); 1341 } 1342 } 1343 } else if (ibdm.ibdm_prev_iou) { 1344 ibdm_ioc_info_t *ioc_list; 1345 1346 /* 1347 * Get the list of IOCs which have changed. 1348 * If any IOCs have changed, Notify IBNexus 1349 */ 1350 ibdm.ibdm_prev_iou = 0; 1351 ioc_list = ibdm_handle_prev_iou(); 1352 if (ioc_list) { 1353 if (ibdm.ibdm_ibnex_callback != NULL) { 1354 (*ibdm.ibdm_ibnex_callback)( 1355 (void *)ioc_list, 1356 IBDM_EVENT_IOC_PROP_UPDATE); 1357 } 1358 } 1359 } 1360 1361 ibdm_dump_sweep_fabric_timestamp(1); 1362 1363 ibdm.ibdm_busy &= ~IBDM_BUSY; 1364 cv_broadcast(&ibdm.ibdm_busy_cv); 1365 IBTF_DPRINTF_L5("ibdm", "\tsweep_fabric: EXIT"); 1366 } 1367 1368 1369 /* 1370 * ibdm_is_cisco: 1371 * Check if this is a Cisco device or not. 1372 */ 1373 static boolean_t 1374 ibdm_is_cisco(ib_guid_t guid) 1375 { 1376 if ((guid >> IBDM_OUI_GUID_SHIFT) == IBDM_CISCO_COMPANY_ID) 1377 return (B_TRUE); 1378 return (B_FALSE); 1379 } 1380 1381 1382 /* 1383 * ibdm_is_cisco_switch: 1384 * Check if this switch is a CISCO switch or not. 1385 * Note that if this switch is already activated, ibdm_is_cisco_switch() 1386 * returns B_FALSE not to re-activate it again. 1387 */ 1388 static boolean_t 1389 ibdm_is_cisco_switch(ibdm_dp_gidinfo_t *gid_info) 1390 { 1391 int company_id, device_id; 1392 ASSERT(gid_info != 0); 1393 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 1394 1395 /* 1396 * If this switch is already activated, don't re-activate it. 1397 */ 1398 if (gid_info->gl_flag & IBDM_CISCO_PROBE_DONE) 1399 return (B_FALSE); 1400 1401 /* 1402 * Check if this switch is a Cisco FC GW or not. 1403 * Use the node guid (the OUI part) instead of the vendor id 1404 * since the vendor id is zero in practice. 1405 */ 1406 company_id = gid_info->gl_nodeguid >> IBDM_OUI_GUID_SHIFT; 1407 device_id = gid_info->gl_devid; 1408 1409 if (company_id == IBDM_CISCO_COMPANY_ID && 1410 device_id == IBDM_CISCO_DEVICE_ID) 1411 return (B_TRUE); 1412 return (B_FALSE); 1413 } 1414 1415 1416 /* 1417 * ibdm_probe_gid_thread: 1418 * thread that does the actual work for sweeping the fabric 1419 * for a given GID 1420 */ 1421 static void 1422 ibdm_probe_gid_thread(void *args) 1423 { 1424 int reprobe_flag; 1425 ib_guid_t node_guid; 1426 ib_guid_t port_guid; 1427 ibdm_dp_gidinfo_t *gid_info; 1428 1429 gid_info = (ibdm_dp_gidinfo_t *)args; 1430 reprobe_flag = gid_info->gl_reprobe_flag; 1431 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: gid_info = %p, flag = %d", 1432 gid_info, reprobe_flag); 1433 ASSERT(gid_info != NULL); 1434 ASSERT(gid_info->gl_pending_cmds == 0); 1435 1436 if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE && 1437 reprobe_flag == 0) { 1438 /* 1439 * This GID may have been already probed. Send 1440 * in a CLP to check if IOUnitInfo changed? 1441 * Explicitly set gl_reprobe_flag to 0 so that 1442 * IBnex is not notified on completion 1443 */ 1444 if (gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) { 1445 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: " 1446 "get new IOCs information"); 1447 mutex_enter(&gid_info->gl_mutex); 1448 gid_info->gl_pending_cmds++; 1449 gid_info->gl_state = IBDM_GET_IOUNITINFO; 1450 gid_info->gl_reprobe_flag = 0; 1451 mutex_exit(&gid_info->gl_mutex); 1452 if (ibdm_send_iounitinfo(gid_info) != IBDM_SUCCESS) { 1453 mutex_enter(&gid_info->gl_mutex); 1454 --gid_info->gl_pending_cmds; 1455 mutex_exit(&gid_info->gl_mutex); 1456 mutex_enter(&ibdm.ibdm_mutex); 1457 --ibdm.ibdm_ngid_probes_in_progress; 1458 ibdm_wakeup_probe_gid_cv(); 1459 mutex_exit(&ibdm.ibdm_mutex); 1460 } 1461 } else { 1462 mutex_enter(&ibdm.ibdm_mutex); 1463 --ibdm.ibdm_ngid_probes_in_progress; 1464 ibdm_wakeup_probe_gid_cv(); 1465 mutex_exit(&ibdm.ibdm_mutex); 1466 } 1467 return; 1468 } else if (reprobe_flag && gid_info->gl_state == 1469 IBDM_GID_PROBING_COMPLETE) { 1470 /* 1471 * Reprobe all IOCs for the GID which has completed 1472 * probe. Skip other port GIDs to same IOU. 1473 * Explicitly set gl_reprobe_flag to 0 so that 1474 * IBnex is not notified on completion 1475 */ 1476 ibdm_ioc_info_t *ioc_info; 1477 uint8_t niocs, ii; 1478 1479 ASSERT(gid_info->gl_iou); 1480 mutex_enter(&gid_info->gl_mutex); 1481 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 1482 gid_info->gl_state = IBDM_GET_IOC_DETAILS; 1483 gid_info->gl_pending_cmds += niocs; 1484 gid_info->gl_reprobe_flag = 0; 1485 mutex_exit(&gid_info->gl_mutex); 1486 for (ii = 0; ii < niocs; ii++) { 1487 uchar_t slot_info; 1488 ib_dm_io_unitinfo_t *giou_info; 1489 1490 /* 1491 * Check whether IOC is present in the slot 1492 * Series of nibbles (in the field 1493 * iou_ctrl_list) represents a slot in the 1494 * IOU. 1495 * Byte format: 76543210 1496 * Bits 0-3 of first byte represent Slot 2 1497 * bits 4-7 of first byte represent slot 1, 1498 * bits 0-3 of second byte represent slot 4 1499 * and so on 1500 * Each 4-bit nibble has the following meaning 1501 * 0x0 : IOC not installed 1502 * 0x1 : IOC is present 1503 * 0xf : Slot does not exist 1504 * and all other values are reserved. 1505 */ 1506 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii); 1507 giou_info = &gid_info->gl_iou->iou_info; 1508 slot_info = giou_info->iou_ctrl_list[(ii/2)]; 1509 if ((ii % 2) == 0) 1510 slot_info = (slot_info >> 4); 1511 1512 if ((slot_info & 0xf) != 1) { 1513 ioc_info->ioc_state = 1514 IBDM_IOC_STATE_PROBE_FAILED; 1515 ibdm_gid_decr_pending(gid_info); 1516 continue; 1517 } 1518 1519 if (ibdm_send_ioc_profile(gid_info, ii) != 1520 IBDM_SUCCESS) { 1521 ibdm_gid_decr_pending(gid_info); 1522 } 1523 } 1524 1525 return; 1526 } else if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) { 1527 mutex_enter(&ibdm.ibdm_mutex); 1528 --ibdm.ibdm_ngid_probes_in_progress; 1529 ibdm_wakeup_probe_gid_cv(); 1530 mutex_exit(&ibdm.ibdm_mutex); 1531 return; 1532 } 1533 1534 /* 1535 * Check whether the destination GID supports DM agents. If 1536 * not, stop probing the GID and continue with the next GID 1537 * in the list. 1538 */ 1539 if (ibdm_is_dev_mgt_supported(gid_info) != IBDM_SUCCESS) { 1540 mutex_enter(&gid_info->gl_mutex); 1541 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1542 mutex_exit(&gid_info->gl_mutex); 1543 ibdm_delete_glhca_list(gid_info); 1544 mutex_enter(&ibdm.ibdm_mutex); 1545 --ibdm.ibdm_ngid_probes_in_progress; 1546 ibdm_wakeup_probe_gid_cv(); 1547 mutex_exit(&ibdm.ibdm_mutex); 1548 return; 1549 } 1550 1551 /* Get the nodeguid and portguid of the port */ 1552 if (ibdm_get_node_port_guids(gid_info->gl_sa_hdl, gid_info->gl_dlid, 1553 &node_guid, &port_guid) != IBDM_SUCCESS) { 1554 mutex_enter(&gid_info->gl_mutex); 1555 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1556 mutex_exit(&gid_info->gl_mutex); 1557 ibdm_delete_glhca_list(gid_info); 1558 mutex_enter(&ibdm.ibdm_mutex); 1559 --ibdm.ibdm_ngid_probes_in_progress; 1560 ibdm_wakeup_probe_gid_cv(); 1561 mutex_exit(&ibdm.ibdm_mutex); 1562 return; 1563 } 1564 1565 /* 1566 * Check whether we already knew about this NodeGuid 1567 * If so, do not probe the GID and continue with the 1568 * next GID in the gid list. Set the GID state to 1569 * probing done. 1570 */ 1571 mutex_enter(&ibdm.ibdm_mutex); 1572 gid_info->gl_nodeguid = node_guid; 1573 gid_info->gl_portguid = port_guid; 1574 if (ibdm_check_dest_nodeguid(gid_info) != NULL) { 1575 mutex_exit(&ibdm.ibdm_mutex); 1576 mutex_enter(&gid_info->gl_mutex); 1577 gid_info->gl_state = IBDM_GID_PROBING_SKIPPED; 1578 mutex_exit(&gid_info->gl_mutex); 1579 ibdm_delete_glhca_list(gid_info); 1580 mutex_enter(&ibdm.ibdm_mutex); 1581 --ibdm.ibdm_ngid_probes_in_progress; 1582 ibdm_wakeup_probe_gid_cv(); 1583 mutex_exit(&ibdm.ibdm_mutex); 1584 return; 1585 } 1586 ibdm_add_to_gl_gid(gid_info, gid_info); 1587 mutex_exit(&ibdm.ibdm_mutex); 1588 1589 /* 1590 * New or reinserted GID : Enable notification to IBnex 1591 */ 1592 mutex_enter(&gid_info->gl_mutex); 1593 gid_info->gl_reprobe_flag = 1; 1594 1595 /* 1596 * A Cisco FC GW needs the special handling to get IOUnitInfo. 1597 */ 1598 if (ibdm_is_cisco_switch(gid_info)) { 1599 gid_info->gl_pending_cmds++; 1600 gid_info->gl_state = IBDM_SET_CLASSPORTINFO; 1601 mutex_exit(&gid_info->gl_mutex); 1602 1603 if (ibdm_set_classportinfo(gid_info) != IBDM_SUCCESS) { 1604 mutex_enter(&gid_info->gl_mutex); 1605 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1606 --gid_info->gl_pending_cmds; 1607 mutex_exit(&gid_info->gl_mutex); 1608 1609 /* free the hca_list on this gid_info */ 1610 ibdm_delete_glhca_list(gid_info); 1611 1612 mutex_enter(&ibdm.ibdm_mutex); 1613 --ibdm.ibdm_ngid_probes_in_progress; 1614 ibdm_wakeup_probe_gid_cv(); 1615 mutex_exit(&ibdm.ibdm_mutex); 1616 1617 return; 1618 } 1619 1620 mutex_enter(&gid_info->gl_mutex); 1621 ibdm_wait_cisco_probe_completion(gid_info); 1622 1623 IBTF_DPRINTF_L4("ibdm", "\tibdm_probe_gid_thread: " 1624 "CISCO Wakeup signal received"); 1625 } 1626 1627 /* move on to the 'GET_CLASSPORTINFO' stage */ 1628 gid_info->gl_pending_cmds++; 1629 gid_info->gl_state = IBDM_GET_CLASSPORTINFO; 1630 mutex_exit(&gid_info->gl_mutex); 1631 1632 IBTF_DPRINTF_L3(ibdm_string, "\tibdm_probe_gid_thread: " 1633 "%d: gid_info %p gl_state %d pending_cmds %d", 1634 __LINE__, gid_info, gid_info->gl_state, 1635 gid_info->gl_pending_cmds); 1636 1637 /* 1638 * Send ClassPortInfo request to the GID asynchronously. 1639 */ 1640 if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) { 1641 1642 mutex_enter(&gid_info->gl_mutex); 1643 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1644 --gid_info->gl_pending_cmds; 1645 mutex_exit(&gid_info->gl_mutex); 1646 1647 /* free the hca_list on this gid_info */ 1648 ibdm_delete_glhca_list(gid_info); 1649 1650 mutex_enter(&ibdm.ibdm_mutex); 1651 --ibdm.ibdm_ngid_probes_in_progress; 1652 ibdm_wakeup_probe_gid_cv(); 1653 mutex_exit(&ibdm.ibdm_mutex); 1654 1655 return; 1656 } 1657 } 1658 1659 1660 /* 1661 * ibdm_check_dest_nodeguid 1662 * Searches for the NodeGuid in the GID list 1663 * Returns matching gid_info if found and otherwise NULL 1664 * 1665 * This function is called to handle new GIDs discovered 1666 * during device sweep / probe or for GID_AVAILABLE event. 1667 * 1668 * Parameter : 1669 * gid_info GID to check 1670 */ 1671 static ibdm_dp_gidinfo_t * 1672 ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *gid_info) 1673 { 1674 ibdm_dp_gidinfo_t *gid_list; 1675 ibdm_gid_t *tmp; 1676 1677 IBTF_DPRINTF_L4("ibdm", "\tcheck_dest_nodeguid"); 1678 1679 gid_list = ibdm.ibdm_dp_gidlist_head; 1680 while (gid_list) { 1681 if ((gid_list != gid_info) && 1682 (gid_info->gl_nodeguid == gid_list->gl_nodeguid)) { 1683 IBTF_DPRINTF_L4("ibdm", 1684 "\tcheck_dest_nodeguid: NodeGuid is present"); 1685 1686 /* Add to gid_list */ 1687 tmp = kmem_zalloc(sizeof (ibdm_gid_t), 1688 KM_SLEEP); 1689 tmp->gid_dgid_hi = gid_info->gl_dgid_hi; 1690 tmp->gid_dgid_lo = gid_info->gl_dgid_lo; 1691 tmp->gid_next = gid_list->gl_gid; 1692 gid_list->gl_gid = tmp; 1693 gid_list->gl_ngids++; 1694 return (gid_list); 1695 } 1696 1697 gid_list = gid_list->gl_next; 1698 } 1699 1700 return (NULL); 1701 } 1702 1703 1704 /* 1705 * ibdm_is_dev_mgt_supported 1706 * Get the PortInfo attribute (SA Query) 1707 * Check "CompatabilityMask" field in the Portinfo. 1708 * Return IBDM_SUCCESS if DM MAD's supported (if bit 19 set) 1709 * by the port, otherwise IBDM_FAILURE 1710 */ 1711 static int 1712 ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *gid_info) 1713 { 1714 int ret; 1715 size_t length = 0; 1716 sa_portinfo_record_t req, *resp = NULL; 1717 ibmf_saa_access_args_t qargs; 1718 1719 bzero(&req, sizeof (sa_portinfo_record_t)); 1720 req.EndportLID = gid_info->gl_dlid; 1721 1722 qargs.sq_attr_id = SA_PORTINFORECORD_ATTRID; 1723 qargs.sq_access_type = IBMF_SAA_RETRIEVE; 1724 qargs.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID; 1725 qargs.sq_template = &req; 1726 qargs.sq_callback = NULL; 1727 qargs.sq_callback_arg = NULL; 1728 1729 ret = ibmf_sa_access(gid_info->gl_sa_hdl, 1730 &qargs, 0, &length, (void **)&resp); 1731 1732 if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) { 1733 IBTF_DPRINTF_L2("ibdm", "\tis_dev_mgt_supported:" 1734 "failed to get PORTINFO attribute %d", ret); 1735 return (IBDM_FAILURE); 1736 } 1737 1738 if (resp->PortInfo.CapabilityMask & SM_CAP_MASK_IS_DM_SUPPD) { 1739 IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: SUPPD !!"); 1740 ret = IBDM_SUCCESS; 1741 } else { 1742 IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: " 1743 "Not SUPPD !!, cap 0x%x", resp->PortInfo.CapabilityMask); 1744 ret = IBDM_FAILURE; 1745 } 1746 kmem_free(resp, length); 1747 return (ret); 1748 } 1749 1750 1751 /* 1752 * ibdm_get_node_port_guids() 1753 * Get the NodeInfoRecord of the port 1754 * Save NodeGuid and PortGUID values in the GID list structure. 1755 * Return IBDM_SUCCESS/IBDM_FAILURE 1756 */ 1757 static int 1758 ibdm_get_node_port_guids(ibmf_saa_handle_t sa_hdl, ib_lid_t dlid, 1759 ib_guid_t *node_guid, ib_guid_t *port_guid) 1760 { 1761 int ret; 1762 size_t length = 0; 1763 sa_node_record_t req, *resp = NULL; 1764 ibmf_saa_access_args_t qargs; 1765 1766 IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids"); 1767 1768 bzero(&req, sizeof (sa_node_record_t)); 1769 req.LID = dlid; 1770 1771 qargs.sq_attr_id = SA_NODERECORD_ATTRID; 1772 qargs.sq_access_type = IBMF_SAA_RETRIEVE; 1773 qargs.sq_component_mask = SA_NODEINFO_COMPMASK_NODELID; 1774 qargs.sq_template = &req; 1775 qargs.sq_callback = NULL; 1776 qargs.sq_callback_arg = NULL; 1777 1778 ret = ibmf_sa_access(sa_hdl, &qargs, 0, &length, (void **)&resp); 1779 if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) { 1780 IBTF_DPRINTF_L2("ibdm", "\tget_node_port_guids:" 1781 " SA Retrieve Failed: %d", ret); 1782 return (IBDM_FAILURE); 1783 } 1784 IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids: NodeGuid %llx Port" 1785 "GUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.NodeGUID); 1786 1787 *node_guid = resp->NodeInfo.NodeGUID; 1788 *port_guid = resp->NodeInfo.PortGUID; 1789 kmem_free(resp, length); 1790 return (IBDM_SUCCESS); 1791 } 1792 1793 1794 /* 1795 * ibdm_get_reachable_ports() 1796 * Get list of the destination GID (and its path records) by 1797 * querying the SA access. 1798 * 1799 * Returns Number paths 1800 */ 1801 static int 1802 ibdm_get_reachable_ports(ibdm_port_attr_t *portinfo, ibdm_hca_list_t *hca) 1803 { 1804 uint_t ii, jj, nrecs; 1805 uint_t npaths = 0; 1806 size_t length; 1807 ib_gid_t sgid; 1808 ibdm_pkey_tbl_t *pkey_tbl; 1809 sa_path_record_t *result; 1810 sa_path_record_t *precp; 1811 ibdm_dp_gidinfo_t *gid_info; 1812 1813 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 1814 IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: portinfo %p", portinfo); 1815 1816 sgid.gid_prefix = portinfo->pa_sn_prefix; 1817 sgid.gid_guid = portinfo->pa_port_guid; 1818 1819 /* get reversible paths */ 1820 if (portinfo->pa_sa_hdl && ibmf_saa_paths_from_gid(portinfo->pa_sa_hdl, 1821 sgid, IBMF_SAA_PKEY_WC, B_TRUE, 0, &nrecs, &length, &result) 1822 != IBMF_SUCCESS) { 1823 IBTF_DPRINTF_L2("ibdm", 1824 "\tget_reachable_ports: Getting path records failed"); 1825 return (0); 1826 } 1827 1828 for (ii = 0; ii < nrecs; ii++) { 1829 sa_node_record_t *nrec; 1830 size_t length; 1831 1832 precp = &result[ii]; 1833 if ((gid_info = ibdm_check_dgid(precp->DGID.gid_guid, 1834 precp->DGID.gid_prefix)) != NULL) { 1835 IBTF_DPRINTF_L5("ibdm", "\tget_reachable_ports: " 1836 "Already exists nrecs %d, ii %d", nrecs, ii); 1837 ibdm_addto_glhcalist(gid_info, hca); 1838 continue; 1839 } 1840 /* 1841 * This is a new GID. Allocate a GID structure and 1842 * initialize the structure 1843 * gl_state is initialized to IBDM_GID_PROBE_NOT_DONE (0) 1844 * by kmem_zalloc call 1845 */ 1846 gid_info = kmem_zalloc(sizeof (ibdm_dp_gidinfo_t), KM_SLEEP); 1847 mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL); 1848 cv_init(&gid_info->gl_probe_cv, NULL, CV_DRIVER, NULL); 1849 gid_info->gl_dgid_hi = precp->DGID.gid_prefix; 1850 gid_info->gl_dgid_lo = precp->DGID.gid_guid; 1851 gid_info->gl_sgid_hi = precp->SGID.gid_prefix; 1852 gid_info->gl_sgid_lo = precp->SGID.gid_guid; 1853 gid_info->gl_p_key = precp->P_Key; 1854 gid_info->gl_sa_hdl = portinfo->pa_sa_hdl; 1855 gid_info->gl_ibmf_hdl = portinfo->pa_ibmf_hdl; 1856 gid_info->gl_slid = precp->SLID; 1857 gid_info->gl_dlid = precp->DLID; 1858 gid_info->gl_transactionID = (++ibdm.ibdm_transactionID) 1859 << IBDM_GID_TRANSACTIONID_SHIFT; 1860 gid_info->gl_min_transactionID = gid_info->gl_transactionID; 1861 gid_info->gl_max_transactionID = (ibdm.ibdm_transactionID +1) 1862 << IBDM_GID_TRANSACTIONID_SHIFT; 1863 gid_info->gl_SL = precp->SL; 1864 1865 /* 1866 * get the node record with this guid if the destination 1867 * device is a Cisco one. 1868 */ 1869 if (ibdm_is_cisco(precp->DGID.gid_guid) && 1870 (gid_info->gl_nodeguid == 0 || gid_info->gl_devid == 0) && 1871 ibdm_get_node_record_by_port(portinfo->pa_sa_hdl, 1872 precp->DGID.gid_guid, &nrec, &length) == IBDM_SUCCESS) { 1873 gid_info->gl_nodeguid = nrec->NodeInfo.NodeGUID; 1874 gid_info->gl_devid = nrec->NodeInfo.DeviceID; 1875 kmem_free(nrec, length); 1876 } 1877 1878 ibdm_addto_glhcalist(gid_info, hca); 1879 1880 ibdm_dump_path_info(precp); 1881 1882 gid_info->gl_qp_hdl = NULL; 1883 ASSERT(portinfo->pa_pkey_tbl != NULL && 1884 portinfo->pa_npkeys != 0); 1885 1886 for (jj = 0; jj < portinfo->pa_npkeys; jj++) { 1887 pkey_tbl = &portinfo->pa_pkey_tbl[jj]; 1888 if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) && 1889 (pkey_tbl->pt_qp_hdl != NULL)) { 1890 gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 1891 break; 1892 } 1893 } 1894 1895 /* 1896 * QP handle for GID not initialized. No matching Pkey 1897 * was found!! ibdm should *not* hit this case. Flag an 1898 * error and drop the GID if ibdm does encounter this. 1899 */ 1900 if (gid_info->gl_qp_hdl == NULL) { 1901 IBTF_DPRINTF_L2(ibdm_string, 1902 "\tget_reachable_ports: No matching Pkey"); 1903 ibdm_delete_gidinfo(gid_info); 1904 continue; 1905 } 1906 if (ibdm.ibdm_dp_gidlist_head == NULL) { 1907 ibdm.ibdm_dp_gidlist_head = gid_info; 1908 ibdm.ibdm_dp_gidlist_tail = gid_info; 1909 } else { 1910 ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info; 1911 gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail; 1912 ibdm.ibdm_dp_gidlist_tail = gid_info; 1913 } 1914 npaths++; 1915 } 1916 kmem_free(result, length); 1917 IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: npaths = %d", npaths); 1918 return (npaths); 1919 } 1920 1921 1922 /* 1923 * ibdm_check_dgid() 1924 * Look in the global list to check whether we know this DGID already 1925 * Return IBDM_GID_PRESENT/IBDM_GID_NOT_PRESENT 1926 */ 1927 static ibdm_dp_gidinfo_t * 1928 ibdm_check_dgid(ib_guid_t guid, ib_sn_prefix_t prefix) 1929 { 1930 ibdm_dp_gidinfo_t *gid_list; 1931 1932 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 1933 gid_list = gid_list->gl_next) { 1934 if ((guid == gid_list->gl_dgid_lo) && 1935 (prefix == gid_list->gl_dgid_hi)) { 1936 break; 1937 } 1938 } 1939 return (gid_list); 1940 } 1941 1942 1943 /* 1944 * ibdm_find_gid() 1945 * Look in the global list to find a GID entry with matching 1946 * port & node GUID. 1947 * Return pointer to gidinfo if found, else return NULL 1948 */ 1949 static ibdm_dp_gidinfo_t * 1950 ibdm_find_gid(ib_guid_t nodeguid, ib_guid_t portguid) 1951 { 1952 ibdm_dp_gidinfo_t *gid_list; 1953 1954 IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid(%llx, %llx)\n", 1955 nodeguid, portguid); 1956 1957 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 1958 gid_list = gid_list->gl_next) { 1959 if ((portguid == gid_list->gl_portguid) && 1960 (nodeguid == gid_list->gl_nodeguid)) { 1961 break; 1962 } 1963 } 1964 1965 IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid : returned %p\n", 1966 gid_list); 1967 return (gid_list); 1968 } 1969 1970 1971 /* 1972 * ibdm_set_classportinfo() 1973 * ibdm_set_classportinfo() is a function to activate a Cisco FC GW 1974 * by sending the setClassPortInfo request with the trapLID, trapGID 1975 * and etc. to the gateway since the gateway doesn't provide the IO 1976 * Unit Information othewise. This behavior is the Cisco specific one, 1977 * and this function is called to a Cisco FC GW only. 1978 * Returns IBDM_SUCCESS/IBDM_FAILURE 1979 */ 1980 static int 1981 ibdm_set_classportinfo(ibdm_dp_gidinfo_t *gid_info) 1982 { 1983 ibmf_msg_t *msg; 1984 ib_mad_hdr_t *hdr; 1985 ibdm_timeout_cb_args_t *cb_args; 1986 void *data; 1987 ib_mad_classportinfo_t *cpi; 1988 1989 IBTF_DPRINTF_L4("ibdm", 1990 "\tset_classportinfo: gid info 0x%p", gid_info); 1991 1992 /* 1993 * Send command to set classportinfo attribute. Allocate a IBMF 1994 * packet and initialize the packet. 1995 */ 1996 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 1997 &msg) != IBMF_SUCCESS) { 1998 IBTF_DPRINTF_L4("ibdm", "\tset_classportinfo: pkt alloc fail"); 1999 return (IBDM_FAILURE); 2000 } 2001 2002 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2003 ibdm_alloc_send_buffers(msg); 2004 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2005 2006 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2007 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2008 msg->im_local_addr.ia_remote_qno = 1; 2009 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2010 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2011 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2012 2013 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2014 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2015 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2016 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2017 hdr->R_Method = IB_DM_DEVMGT_METHOD_SET; 2018 hdr->Status = 0; 2019 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2020 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 2021 hdr->AttributeModifier = 0; 2022 2023 data = msg->im_msgbufs_send.im_bufs_cl_data; 2024 cpi = (ib_mad_classportinfo_t *)data; 2025 2026 /* 2027 * Set the classportinfo values to activate this Cisco FC GW. 2028 */ 2029 cpi->TrapGID_hi = h2b64(gid_info->gl_sgid_hi); 2030 cpi->TrapGID_lo = h2b64(gid_info->gl_sgid_lo); 2031 cpi->TrapLID = h2b16(gid_info->gl_slid); 2032 cpi->TrapSL = gid_info->gl_SL; 2033 cpi->TrapP_Key = h2b16(gid_info->gl_p_key); 2034 cpi->TrapQP = h2b32((((ibmf_alt_qp_t *)gid_info->gl_qp_hdl)->isq_qpn)); 2035 cpi->TrapQ_Key = h2b32((((ibmf_alt_qp_t *) 2036 gid_info->gl_qp_hdl)->isq_qkey)); 2037 2038 cb_args = &gid_info->gl_cpi_cb_args; 2039 cb_args->cb_gid_info = gid_info; 2040 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2041 cb_args->cb_req_type = IBDM_REQ_TYPE_CLASSPORTINFO; 2042 2043 mutex_enter(&gid_info->gl_mutex); 2044 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2045 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2046 mutex_exit(&gid_info->gl_mutex); 2047 2048 IBTF_DPRINTF_L5("ibdm", "\tset_classportinfo: " 2049 "timeout id %x", gid_info->gl_timeout_id); 2050 2051 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 2052 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2053 IBTF_DPRINTF_L2("ibdm", 2054 "\tset_classportinfo: ibmf send failed"); 2055 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 2056 } 2057 2058 return (IBDM_SUCCESS); 2059 } 2060 2061 2062 /* 2063 * ibdm_send_classportinfo() 2064 * Send classportinfo request. When the request is completed 2065 * IBMF calls ibdm_classportinfo_cb routine to inform about 2066 * the completion. 2067 * Returns IBDM_SUCCESS/IBDM_FAILURE 2068 */ 2069 static int 2070 ibdm_send_classportinfo(ibdm_dp_gidinfo_t *gid_info) 2071 { 2072 ibmf_msg_t *msg; 2073 ib_mad_hdr_t *hdr; 2074 ibdm_timeout_cb_args_t *cb_args; 2075 2076 IBTF_DPRINTF_L4("ibdm", 2077 "\tsend_classportinfo: gid info 0x%p", gid_info); 2078 2079 /* 2080 * Send command to get classportinfo attribute. Allocate a IBMF 2081 * packet and initialize the packet. 2082 */ 2083 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 2084 &msg) != IBMF_SUCCESS) { 2085 IBTF_DPRINTF_L4("ibdm", "\tsend_classportinfo: pkt alloc fail"); 2086 return (IBDM_FAILURE); 2087 } 2088 2089 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2090 ibdm_alloc_send_buffers(msg); 2091 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2092 2093 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2094 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2095 msg->im_local_addr.ia_remote_qno = 1; 2096 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2097 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2098 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2099 2100 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2101 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2102 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2103 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2104 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2105 hdr->Status = 0; 2106 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2107 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 2108 hdr->AttributeModifier = 0; 2109 2110 cb_args = &gid_info->gl_cpi_cb_args; 2111 cb_args->cb_gid_info = gid_info; 2112 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2113 cb_args->cb_req_type = IBDM_REQ_TYPE_CLASSPORTINFO; 2114 2115 mutex_enter(&gid_info->gl_mutex); 2116 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2117 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2118 mutex_exit(&gid_info->gl_mutex); 2119 2120 IBTF_DPRINTF_L5("ibdm", "\tsend_classportinfo: " 2121 "timeout id %x", gid_info->gl_timeout_id); 2122 2123 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 2124 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2125 IBTF_DPRINTF_L2("ibdm", 2126 "\tsend_classportinfo: ibmf send failed"); 2127 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 2128 } 2129 2130 return (IBDM_SUCCESS); 2131 } 2132 2133 2134 /* 2135 * ibdm_handle_setclassportinfo() 2136 * Invoked by the IBMF when setClassPortInfo request is completed. 2137 */ 2138 static void 2139 ibdm_handle_setclassportinfo(ibmf_handle_t ibmf_hdl, 2140 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2141 { 2142 void *data; 2143 timeout_id_t timeout_id; 2144 ib_mad_classportinfo_t *cpi; 2145 2146 IBTF_DPRINTF_L4("ibdm", "\thandle_setclassportinfo:ibmf hdl " 2147 "%p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2148 2149 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO) { 2150 IBTF_DPRINTF_L4("ibdm", "\thandle_setclassportinfo: " 2151 "Not a ClassPortInfo resp"); 2152 *flag |= IBDM_IBMF_PKT_UNEXP_RESP; 2153 return; 2154 } 2155 2156 /* 2157 * Verify whether timeout handler is created/active. 2158 * If created/ active, cancel the timeout handler 2159 */ 2160 mutex_enter(&gid_info->gl_mutex); 2161 if (gid_info->gl_state != IBDM_SET_CLASSPORTINFO) { 2162 IBTF_DPRINTF_L2("ibdm", "\thandle_setclassportinfo:DUP resp"); 2163 *flag |= IBDM_IBMF_PKT_DUP_RESP; 2164 mutex_exit(&gid_info->gl_mutex); 2165 return; 2166 } 2167 ibdm_bump_transactionID(gid_info); 2168 2169 gid_info->gl_iou_cb_args.cb_req_type = 0; 2170 if (gid_info->gl_timeout_id) { 2171 timeout_id = gid_info->gl_timeout_id; 2172 mutex_exit(&gid_info->gl_mutex); 2173 IBTF_DPRINTF_L5("ibdm", "handle_setlassportinfo: " 2174 "gl_timeout_id = 0x%x", timeout_id); 2175 if (untimeout(timeout_id) == -1) { 2176 IBTF_DPRINTF_L2("ibdm", "handle_setclassportinfo: " 2177 "untimeout gl_timeout_id failed"); 2178 } 2179 mutex_enter(&gid_info->gl_mutex); 2180 gid_info->gl_timeout_id = 0; 2181 } 2182 mutex_exit(&gid_info->gl_mutex); 2183 2184 data = msg->im_msgbufs_recv.im_bufs_cl_data; 2185 cpi = (ib_mad_classportinfo_t *)data; 2186 2187 ibdm_dump_classportinfo(cpi); 2188 } 2189 2190 2191 /* 2192 * ibdm_handle_classportinfo() 2193 * Invoked by the IBMF when the classportinfo request is completed. 2194 */ 2195 static void 2196 ibdm_handle_classportinfo(ibmf_handle_t ibmf_hdl, 2197 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2198 { 2199 void *data; 2200 timeout_id_t timeout_id; 2201 ib_mad_hdr_t *hdr; 2202 ib_mad_classportinfo_t *cpi; 2203 2204 IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo:ibmf hdl " 2205 "%p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2206 2207 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO) { 2208 IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo: " 2209 "Not a ClassPortInfo resp"); 2210 *flag |= IBDM_IBMF_PKT_UNEXP_RESP; 2211 return; 2212 } 2213 2214 /* 2215 * Verify whether timeout handler is created/active. 2216 * If created/ active, cancel the timeout handler 2217 */ 2218 mutex_enter(&gid_info->gl_mutex); 2219 ibdm_bump_transactionID(gid_info); 2220 if (gid_info->gl_state != IBDM_GET_CLASSPORTINFO) { 2221 IBTF_DPRINTF_L2("ibdm", "\thandle_classportinfo:DUP resp"); 2222 *flag |= IBDM_IBMF_PKT_DUP_RESP; 2223 mutex_exit(&gid_info->gl_mutex); 2224 return; 2225 } 2226 gid_info->gl_iou_cb_args.cb_req_type = 0; 2227 if (gid_info->gl_timeout_id) { 2228 timeout_id = gid_info->gl_timeout_id; 2229 mutex_exit(&gid_info->gl_mutex); 2230 IBTF_DPRINTF_L5("ibdm", "handle_ioclassportinfo: " 2231 "gl_timeout_id = 0x%x", timeout_id); 2232 if (untimeout(timeout_id) == -1) { 2233 IBTF_DPRINTF_L2("ibdm", "handle_classportinfo: " 2234 "untimeout gl_timeout_id failed"); 2235 } 2236 mutex_enter(&gid_info->gl_mutex); 2237 gid_info->gl_timeout_id = 0; 2238 } 2239 gid_info->gl_state = IBDM_GET_IOUNITINFO; 2240 gid_info->gl_pending_cmds++; 2241 mutex_exit(&gid_info->gl_mutex); 2242 2243 data = msg->im_msgbufs_recv.im_bufs_cl_data; 2244 cpi = (ib_mad_classportinfo_t *)data; 2245 2246 /* 2247 * Cache the "RespTimeValue" and redirection information in the 2248 * global gid list data structure. This cached information will 2249 * be used to send any further requests to the GID. 2250 */ 2251 gid_info->gl_resp_timeout = 2252 (b2h32(cpi->RespTimeValue) & 0x1F); 2253 2254 gid_info->gl_redirected = ((IBDM_IN_IBMFMSG_STATUS(msg) & 2255 MAD_STATUS_REDIRECT_REQUIRED) ? B_TRUE: B_FALSE); 2256 gid_info->gl_redirect_dlid = b2h16(cpi->RedirectLID); 2257 gid_info->gl_redirect_QP = (b2h32(cpi->RedirectQP) & 0xffffff); 2258 gid_info->gl_redirect_pkey = b2h16(cpi->RedirectP_Key); 2259 gid_info->gl_redirect_qkey = b2h32(cpi->RedirectQ_Key); 2260 gid_info->gl_redirectGID_hi = b2h64(cpi->RedirectGID_hi); 2261 gid_info->gl_redirectGID_lo = b2h64(cpi->RedirectGID_lo); 2262 gid_info->gl_redirectSL = cpi->RedirectSL; 2263 2264 ibdm_dump_classportinfo(cpi); 2265 2266 /* 2267 * Send IOUnitInfo request 2268 * Reuse previously allocated IBMF packet for sending ClassPortInfo 2269 * Check whether DM agent on the remote node requested redirection 2270 * If so, send the request to the redirect DGID/DLID/PKEY/QP. 2271 */ 2272 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2273 ibdm_alloc_send_buffers(msg); 2274 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2275 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2276 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2277 2278 if (gid_info->gl_redirected == B_TRUE) { 2279 if (gid_info->gl_redirect_dlid != 0) { 2280 msg->im_local_addr.ia_remote_lid = 2281 gid_info->gl_redirect_dlid; 2282 } 2283 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 2284 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 2285 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 2286 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 2287 } else { 2288 msg->im_local_addr.ia_remote_qno = 1; 2289 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2290 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2291 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2292 } 2293 2294 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2295 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2296 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2297 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2298 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2299 hdr->Status = 0; 2300 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2301 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 2302 hdr->AttributeModifier = 0; 2303 2304 gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO; 2305 gid_info->gl_iou_cb_args.cb_gid_info = gid_info; 2306 gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt; 2307 2308 mutex_enter(&gid_info->gl_mutex); 2309 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2310 &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2311 mutex_exit(&gid_info->gl_mutex); 2312 2313 IBTF_DPRINTF_L5("ibdm", "handle_classportinfo:" 2314 "timeout %x", gid_info->gl_timeout_id); 2315 2316 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, NULL, 2317 ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) != IBMF_SUCCESS) { 2318 IBTF_DPRINTF_L2("ibdm", 2319 "\thandle_classportinfo: msg transport failed"); 2320 ibdm_ibmf_send_cb(ibmf_hdl, msg, &gid_info->gl_iou_cb_args); 2321 } 2322 (*flag) |= IBDM_IBMF_PKT_REUSED; 2323 } 2324 2325 2326 /* 2327 * ibdm_send_iounitinfo: 2328 * Sends a DM request to get IOU unitinfo. 2329 */ 2330 static int 2331 ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *gid_info) 2332 { 2333 ibmf_msg_t *msg; 2334 ib_mad_hdr_t *hdr; 2335 2336 IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: gid info 0x%p", gid_info); 2337 2338 /* 2339 * Send command to get iounitinfo attribute. Allocate a IBMF 2340 * packet and initialize the packet. 2341 */ 2342 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, &msg) != 2343 IBMF_SUCCESS) { 2344 IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: pkt alloc fail"); 2345 return (IBDM_FAILURE); 2346 } 2347 2348 mutex_enter(&gid_info->gl_mutex); 2349 ibdm_bump_transactionID(gid_info); 2350 mutex_exit(&gid_info->gl_mutex); 2351 2352 2353 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2354 ibdm_alloc_send_buffers(msg); 2355 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2356 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2357 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2358 msg->im_local_addr.ia_remote_qno = 1; 2359 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2360 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2361 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2362 2363 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2364 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2365 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2366 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2367 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2368 hdr->Status = 0; 2369 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2370 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 2371 hdr->AttributeModifier = 0; 2372 2373 gid_info->gl_iou_cb_args.cb_gid_info = gid_info; 2374 gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt; 2375 gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO; 2376 2377 mutex_enter(&gid_info->gl_mutex); 2378 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2379 &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2380 mutex_exit(&gid_info->gl_mutex); 2381 2382 IBTF_DPRINTF_L5("ibdm", "send_iouunitinfo:" 2383 "timeout %x", gid_info->gl_timeout_id); 2384 2385 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg, 2386 NULL, ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) != 2387 IBMF_SUCCESS) { 2388 IBTF_DPRINTF_L2("ibdm", "\tsend_iounitinfo: ibmf send failed"); 2389 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, 2390 msg, &gid_info->gl_iou_cb_args); 2391 } 2392 return (IBDM_SUCCESS); 2393 } 2394 2395 /* 2396 * ibdm_handle_iounitinfo() 2397 * Invoked by the IBMF when IO Unitinfo request is completed. 2398 */ 2399 static void 2400 ibdm_handle_iounitinfo(ibmf_handle_t ibmf_hdl, 2401 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2402 { 2403 int ii, first = B_TRUE; 2404 int num_iocs; 2405 size_t size; 2406 uchar_t slot_info; 2407 timeout_id_t timeout_id; 2408 ib_mad_hdr_t *hdr; 2409 ibdm_ioc_info_t *ioc_info; 2410 ib_dm_io_unitinfo_t *iou_info; 2411 ib_dm_io_unitinfo_t *giou_info; 2412 ibdm_timeout_cb_args_t *cb_args; 2413 2414 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo:" 2415 " ibmf hdl %p pkt %p gid info %p", ibmf_hdl, msg, gid_info); 2416 2417 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_IO_UNITINFO) { 2418 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: " 2419 "Unexpected response"); 2420 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2421 return; 2422 } 2423 2424 mutex_enter(&gid_info->gl_mutex); 2425 if (gid_info->gl_state != IBDM_GET_IOUNITINFO) { 2426 IBTF_DPRINTF_L4("ibdm", 2427 "\thandle_iounitinfo: DUP resp"); 2428 mutex_exit(&gid_info->gl_mutex); 2429 (*flag) = IBDM_IBMF_PKT_DUP_RESP; 2430 return; 2431 } 2432 gid_info->gl_iou_cb_args.cb_req_type = 0; 2433 if (gid_info->gl_timeout_id) { 2434 timeout_id = gid_info->gl_timeout_id; 2435 mutex_exit(&gid_info->gl_mutex); 2436 IBTF_DPRINTF_L5("ibdm", "handle_iounitinfo: " 2437 "gl_timeout_id = 0x%x", timeout_id); 2438 if (untimeout(timeout_id) == -1) { 2439 IBTF_DPRINTF_L2("ibdm", "handle_iounitinfo: " 2440 "untimeout gl_timeout_id failed"); 2441 } 2442 mutex_enter(&gid_info->gl_mutex); 2443 gid_info->gl_timeout_id = 0; 2444 } 2445 gid_info->gl_state = IBDM_GET_IOC_DETAILS; 2446 2447 iou_info = IBDM_IN_IBMFMSG2IOU(msg); 2448 ibdm_dump_iounitinfo(iou_info); 2449 num_iocs = iou_info->iou_num_ctrl_slots; 2450 /* 2451 * check if number of IOCs reported is zero? if yes, return. 2452 * when num_iocs are reported zero internal IOC database needs 2453 * to be updated. To ensure that save the number of IOCs in 2454 * the new field "gl_num_iocs". Use a new field instead of 2455 * "giou_info->iou_num_ctrl_slots" as that would prevent 2456 * an unnecessary kmem_alloc/kmem_free when num_iocs is 0. 2457 */ 2458 if (num_iocs == 0 && gid_info->gl_num_iocs == 0) { 2459 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: no IOC's"); 2460 mutex_exit(&gid_info->gl_mutex); 2461 return; 2462 } 2463 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: num_iocs = %d", num_iocs); 2464 2465 /* 2466 * if there is an existing gl_iou (IOU has been probed before) 2467 * check if the "iou_changeid" is same as saved entry in 2468 * "giou_info->iou_changeid". 2469 * (note: this logic can prevent IOC enumeration if a given 2470 * vendor doesn't support setting iou_changeid field for its IOU) 2471 * 2472 * if there is an existing gl_iou and iou_changeid has changed : 2473 * free up existing gl_iou info and its related structures. 2474 * reallocate gl_iou info all over again. 2475 * if we donot free this up; then this leads to memory leaks 2476 */ 2477 if (gid_info->gl_iou) { 2478 giou_info = &gid_info->gl_iou->iou_info; 2479 if (b2h16(iou_info->iou_changeid) == 2480 giou_info->iou_changeid) { 2481 IBTF_DPRINTF_L3("ibdm", 2482 "\thandle_iounitinfo: no IOCs changed"); 2483 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE; 2484 mutex_exit(&gid_info->gl_mutex); 2485 return; 2486 } 2487 2488 /* 2489 * Store the iou info as prev_iou to be used after 2490 * sweep is done. 2491 */ 2492 ASSERT(gid_info->gl_prev_iou == NULL); 2493 IBTF_DPRINTF_L4(ibdm_string, 2494 "\thandle_iounitinfo: setting gl_prev_iou %p", 2495 gid_info->gl_prev_iou); 2496 gid_info->gl_prev_iou = gid_info->gl_iou; 2497 ibdm.ibdm_prev_iou = 1; 2498 gid_info->gl_iou = NULL; 2499 } 2500 2501 size = sizeof (ibdm_iou_info_t) + num_iocs * sizeof (ibdm_ioc_info_t); 2502 gid_info->gl_iou = (ibdm_iou_info_t *)kmem_zalloc(size, KM_SLEEP); 2503 giou_info = &gid_info->gl_iou->iou_info; 2504 gid_info->gl_iou->iou_ioc_info = (ibdm_ioc_info_t *) 2505 ((char *)gid_info->gl_iou + sizeof (ibdm_iou_info_t)); 2506 2507 giou_info->iou_num_ctrl_slots = gid_info->gl_num_iocs = num_iocs; 2508 giou_info->iou_flag = iou_info->iou_flag; 2509 bcopy(iou_info->iou_ctrl_list, giou_info->iou_ctrl_list, 128); 2510 giou_info->iou_changeid = b2h16(iou_info->iou_changeid); 2511 gid_info->gl_pending_cmds++; /* for diag code */ 2512 mutex_exit(&gid_info->gl_mutex); 2513 2514 if (ibdm_get_diagcode(gid_info, 0) != IBDM_SUCCESS) { 2515 mutex_enter(&gid_info->gl_mutex); 2516 gid_info->gl_pending_cmds--; 2517 mutex_exit(&gid_info->gl_mutex); 2518 } 2519 /* 2520 * Parallelize getting IOC controller profiles from here. 2521 * Allocate IBMF packets and send commands to get IOC profile for 2522 * each IOC present on the IOU. 2523 */ 2524 for (ii = 0; ii < num_iocs; ii++) { 2525 /* 2526 * Check whether IOC is present in the slot 2527 * Series of nibbles (in the field iou_ctrl_list) represents 2528 * a slot in the IOU. 2529 * Byte format: 76543210 2530 * Bits 0-3 of first byte represent Slot 2 2531 * bits 4-7 of first byte represent slot 1, 2532 * bits 0-3 of second byte represent slot 4 and so on 2533 * Each 4-bit nibble has the following meaning 2534 * 0x0 : IOC not installed 2535 * 0x1 : IOC is present 2536 * 0xf : Slot does not exist 2537 * and all other values are reserved. 2538 */ 2539 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii); 2540 slot_info = giou_info->iou_ctrl_list[(ii/2)]; 2541 if ((ii % 2) == 0) 2542 slot_info = (slot_info >> 4); 2543 2544 if ((slot_info & 0xf) != 1) { 2545 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: " 2546 "No IOC is present in the slot = %d", ii); 2547 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 2548 continue; 2549 } 2550 2551 mutex_enter(&gid_info->gl_mutex); 2552 ibdm_bump_transactionID(gid_info); 2553 mutex_exit(&gid_info->gl_mutex); 2554 2555 /* 2556 * Re use the already allocated packet (for IOUnitinfo) to 2557 * send the first IOC controller attribute. Allocate new 2558 * IBMF packets for the rest of the IOC's 2559 */ 2560 if (first != B_TRUE) { 2561 msg = NULL; 2562 if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP, 2563 &msg) != IBMF_SUCCESS) { 2564 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: " 2565 "IBMF packet allocation failed"); 2566 continue; 2567 } 2568 2569 } 2570 2571 /* allocate send buffers for all messages */ 2572 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2573 ibdm_alloc_send_buffers(msg); 2574 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2575 2576 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2577 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2578 if (gid_info->gl_redirected == B_TRUE) { 2579 if (gid_info->gl_redirect_dlid != 0) { 2580 msg->im_local_addr.ia_remote_lid = 2581 gid_info->gl_redirect_dlid; 2582 } 2583 msg->im_local_addr.ia_remote_qno = 2584 gid_info->gl_redirect_QP; 2585 msg->im_local_addr.ia_p_key = 2586 gid_info->gl_redirect_pkey; 2587 msg->im_local_addr.ia_q_key = 2588 gid_info->gl_redirect_qkey; 2589 msg->im_local_addr.ia_service_level = 2590 gid_info->gl_redirectSL; 2591 } else { 2592 msg->im_local_addr.ia_remote_qno = 1; 2593 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2594 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2595 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2596 } 2597 2598 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2599 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2600 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2601 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2602 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2603 hdr->Status = 0; 2604 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2605 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 2606 hdr->AttributeModifier = h2b32(ii + 1); 2607 2608 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_INVALID; 2609 cb_args = &ioc_info->ioc_cb_args; 2610 cb_args->cb_gid_info = gid_info; 2611 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2612 cb_args->cb_req_type = IBDM_REQ_TYPE_IOCINFO; 2613 cb_args->cb_ioc_num = ii; 2614 2615 mutex_enter(&gid_info->gl_mutex); 2616 gid_info->gl_pending_cmds++; /* for diag code */ 2617 2618 ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2619 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2620 mutex_exit(&gid_info->gl_mutex); 2621 2622 IBTF_DPRINTF_L5("ibdm", "\thandle_iounitinfo:" 2623 "timeout 0x%x, ioc_num %d", ioc_info->ioc_timeout_id, ii); 2624 2625 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, 2626 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2627 IBTF_DPRINTF_L2("ibdm", 2628 "\thandle_iounitinfo: msg transport failed"); 2629 ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args); 2630 } 2631 (*flag) |= IBDM_IBMF_PKT_REUSED; 2632 first = B_FALSE; 2633 gid_info->gl_iou->iou_niocs_probe_in_progress++; 2634 } 2635 } 2636 2637 2638 /* 2639 * ibdm_handle_ioc_profile() 2640 * Invoked by the IBMF when the IOCControllerProfile request 2641 * gets completed 2642 */ 2643 static void 2644 ibdm_handle_ioc_profile(ibmf_handle_t ibmf_hdl, 2645 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2646 { 2647 int first = B_TRUE, reprobe = 0; 2648 uint_t ii, ioc_no, srv_start; 2649 uint_t nserv_entries; 2650 timeout_id_t timeout_id; 2651 ib_mad_hdr_t *hdr; 2652 ibdm_ioc_info_t *ioc_info; 2653 ibdm_timeout_cb_args_t *cb_args; 2654 ib_dm_ioc_ctrl_profile_t *ioc, *gioc; 2655 2656 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:" 2657 " ibmf hdl %p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2658 2659 ioc = IBDM_IN_IBMFMSG2IOC(msg); 2660 /* 2661 * Check whether we know this IOC already 2662 * This will return NULL if reprobe is in progress 2663 * IBDM_IOC_STATE_REPROBE_PROGRESS will be set. 2664 * Do not hold mutexes here. 2665 */ 2666 if (ibdm_is_ioc_present(ioc->ioc_guid, gid_info, flag) != NULL) { 2667 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:" 2668 "IOC guid %llx is present", ioc->ioc_guid); 2669 return; 2670 } 2671 ioc_no = IBDM_IN_IBMFMSG_ATTRMOD(msg); 2672 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile: ioc_no = %d", ioc_no-1); 2673 2674 /* Make sure that IOC index is with the valid range */ 2675 if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) { 2676 IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: " 2677 "IOC index Out of range, index %d", ioc); 2678 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2679 return; 2680 } 2681 ioc_info = &gid_info->gl_iou->iou_ioc_info[ioc_no - 1]; 2682 ioc_info->ioc_iou_info = gid_info->gl_iou; 2683 2684 mutex_enter(&gid_info->gl_mutex); 2685 if (ioc_info->ioc_state == IBDM_IOC_STATE_REPROBE_PROGRESS) { 2686 reprobe = 1; 2687 ioc_info->ioc_prev_serv = ioc_info->ioc_serv; 2688 ioc_info->ioc_serv = NULL; 2689 ioc_info->ioc_prev_serv_cnt = 2690 ioc_info->ioc_profile.ioc_service_entries; 2691 } else if (ioc_info->ioc_state != IBDM_IOC_STATE_PROBE_INVALID) { 2692 IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: DUP response" 2693 "ioc %d, ioc_state %x", ioc_no - 1, ioc_info->ioc_state); 2694 mutex_exit(&gid_info->gl_mutex); 2695 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 2696 return; 2697 } 2698 ioc_info->ioc_cb_args.cb_req_type = 0; 2699 if (ioc_info->ioc_timeout_id) { 2700 timeout_id = ioc_info->ioc_timeout_id; 2701 ioc_info->ioc_timeout_id = 0; 2702 mutex_exit(&gid_info->gl_mutex); 2703 IBTF_DPRINTF_L5("ibdm", "handle_ioc_profile: " 2704 "ioc_timeout_id = 0x%x", timeout_id); 2705 if (untimeout(timeout_id) == -1) { 2706 IBTF_DPRINTF_L2("ibdm", "handle_ioc_profile: " 2707 "untimeout ioc_timeout_id failed"); 2708 } 2709 mutex_enter(&gid_info->gl_mutex); 2710 } 2711 2712 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_SUCCESS; 2713 if (reprobe == 0) { 2714 ioc_info->ioc_iou_guid = gid_info->gl_nodeguid; 2715 ioc_info->ioc_nodeguid = gid_info->gl_nodeguid; 2716 } 2717 2718 /* 2719 * Save all the IOC information in the global structures. 2720 * Note the wire format is Big Endian and the Sparc process also 2721 * big endian. So, there is no need to convert the data fields 2722 * The conversion routines used below are ineffective on Sparc 2723 * machines where as they will be effective on little endian 2724 * machines such as Intel processors. 2725 */ 2726 gioc = (ib_dm_ioc_ctrl_profile_t *)&ioc_info->ioc_profile; 2727 2728 /* 2729 * Restrict updates to onlyport GIDs and service entries during reprobe 2730 */ 2731 if (reprobe == 0) { 2732 gioc->ioc_guid = b2h64(ioc->ioc_guid); 2733 gioc->ioc_vendorid = 2734 ((b2h32(ioc->ioc_vendorid) & IB_DM_VENDORID_MASK) 2735 >> IB_DM_VENDORID_SHIFT); 2736 gioc->ioc_deviceid = b2h32(ioc->ioc_deviceid); 2737 gioc->ioc_device_ver = b2h16(ioc->ioc_device_ver); 2738 gioc->ioc_subsys_vendorid = 2739 ((b2h32(ioc->ioc_subsys_vendorid) & IB_DM_VENDORID_MASK) 2740 >> IB_DM_VENDORID_SHIFT); 2741 gioc->ioc_subsys_id = b2h32(ioc->ioc_subsys_id); 2742 gioc->ioc_io_class = b2h16(ioc->ioc_io_class); 2743 gioc->ioc_io_subclass = b2h16(ioc->ioc_io_subclass); 2744 gioc->ioc_protocol = b2h16(ioc->ioc_protocol); 2745 gioc->ioc_protocol_ver = b2h16(ioc->ioc_protocol_ver); 2746 gioc->ioc_send_msg_qdepth = 2747 b2h16(ioc->ioc_send_msg_qdepth); 2748 gioc->ioc_rdma_read_qdepth = 2749 b2h16(ioc->ioc_rdma_read_qdepth); 2750 gioc->ioc_send_msg_sz = b2h32(ioc->ioc_send_msg_sz); 2751 gioc->ioc_rdma_xfer_sz = b2h32(ioc->ioc_rdma_xfer_sz); 2752 gioc->ioc_ctrl_opcap_mask = ioc->ioc_ctrl_opcap_mask; 2753 bcopy(ioc->ioc_id_string, gioc->ioc_id_string, 2754 IB_DM_IOC_ID_STRING_LEN); 2755 2756 ioc_info->ioc_iou_diagcode = gid_info->gl_iou->iou_diagcode; 2757 ioc_info->ioc_iou_dc_valid = gid_info->gl_iou->iou_dc_valid; 2758 ioc_info->ioc_diagdeviceid = (IB_DM_IOU_DEVICEID_MASK & 2759 gid_info->gl_iou->iou_info.iou_flag) ? B_TRUE : B_FALSE; 2760 2761 if (ioc_info->ioc_diagdeviceid == B_TRUE) { 2762 gid_info->gl_pending_cmds++; 2763 IBTF_DPRINTF_L3(ibdm_string, 2764 "\tibdm_handle_ioc_profile: " 2765 "%d: gid_info %p gl_state %d pending_cmds %d", 2766 __LINE__, gid_info, gid_info->gl_state, 2767 gid_info->gl_pending_cmds); 2768 } 2769 } 2770 gioc->ioc_service_entries = ioc->ioc_service_entries; 2771 mutex_exit(&gid_info->gl_mutex); 2772 2773 ibdm_dump_ioc_profile(gioc); 2774 2775 if ((ioc_info->ioc_diagdeviceid == B_TRUE) && (reprobe == 0)) { 2776 if (ibdm_get_diagcode(gid_info, ioc_no) != IBDM_SUCCESS) { 2777 mutex_enter(&gid_info->gl_mutex); 2778 gid_info->gl_pending_cmds--; 2779 mutex_exit(&gid_info->gl_mutex); 2780 } 2781 } 2782 ioc_info->ioc_serv = (ibdm_srvents_info_t *)kmem_zalloc( 2783 (gioc->ioc_service_entries * sizeof (ibdm_srvents_info_t)), 2784 KM_SLEEP); 2785 2786 /* 2787 * In one single request, maximum number of requests that can be 2788 * obtained is 4. If number of service entries are more than four, 2789 * calculate number requests needed and send them parallelly. 2790 */ 2791 nserv_entries = ioc->ioc_service_entries; 2792 ii = 0; 2793 while (nserv_entries) { 2794 mutex_enter(&gid_info->gl_mutex); 2795 gid_info->gl_pending_cmds++; 2796 ibdm_bump_transactionID(gid_info); 2797 mutex_exit(&gid_info->gl_mutex); 2798 2799 if (first != B_TRUE) { 2800 if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP, 2801 &msg) != IBMF_SUCCESS) { 2802 continue; 2803 } 2804 2805 } 2806 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2807 ibdm_alloc_send_buffers(msg); 2808 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2809 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2810 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2811 if (gid_info->gl_redirected == B_TRUE) { 2812 if (gid_info->gl_redirect_dlid != 0) { 2813 msg->im_local_addr.ia_remote_lid = 2814 gid_info->gl_redirect_dlid; 2815 } 2816 msg->im_local_addr.ia_remote_qno = 2817 gid_info->gl_redirect_QP; 2818 msg->im_local_addr.ia_p_key = 2819 gid_info->gl_redirect_pkey; 2820 msg->im_local_addr.ia_q_key = 2821 gid_info->gl_redirect_qkey; 2822 msg->im_local_addr.ia_service_level = 2823 gid_info->gl_redirectSL; 2824 } else { 2825 msg->im_local_addr.ia_remote_qno = 1; 2826 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2827 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2828 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2829 } 2830 2831 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2832 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2833 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2834 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2835 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2836 hdr->Status = 0; 2837 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2838 hdr->AttributeID = h2b16(IB_DM_ATTR_SERVICE_ENTRIES); 2839 2840 srv_start = ii * 4; 2841 cb_args = &ioc_info->ioc_serv[srv_start].se_cb_args; 2842 cb_args->cb_gid_info = gid_info; 2843 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2844 cb_args->cb_req_type = IBDM_REQ_TYPE_SRVENTS; 2845 cb_args->cb_srvents_start = srv_start; 2846 cb_args->cb_ioc_num = ioc_no - 1; 2847 2848 if (nserv_entries >= IBDM_MAX_SERV_ENTRIES_PER_REQ) { 2849 nserv_entries -= IBDM_MAX_SERV_ENTRIES_PER_REQ; 2850 cb_args->cb_srvents_end = (cb_args->cb_srvents_start + 2851 IBDM_MAX_SERV_ENTRIES_PER_REQ - 1); 2852 } else { 2853 cb_args->cb_srvents_end = 2854 (cb_args->cb_srvents_start + nserv_entries - 1); 2855 nserv_entries = 0; 2856 } 2857 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr)) 2858 ibdm_fill_srv_attr_mod(hdr, cb_args); 2859 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr)) 2860 2861 mutex_enter(&gid_info->gl_mutex); 2862 ioc_info->ioc_serv[srv_start].se_timeout_id = timeout( 2863 ibdm_pkt_timeout_hdlr, cb_args, 2864 IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2865 mutex_exit(&gid_info->gl_mutex); 2866 2867 IBTF_DPRINTF_L5("ibdm", "\thandle_ioc_profile:" 2868 "timeout %x, ioc %d srv %d", 2869 ioc_info->ioc_serv[srv_start].se_timeout_id, 2870 ioc_no - 1, srv_start); 2871 2872 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, 2873 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2874 IBTF_DPRINTF_L2("ibdm", 2875 "\thandle_ioc_profile: msg send failed"); 2876 ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args); 2877 } 2878 (*flag) |= IBDM_IBMF_PKT_REUSED; 2879 first = B_FALSE; 2880 ii++; 2881 } 2882 } 2883 2884 2885 /* 2886 * ibdm_handle_srventry_mad() 2887 */ 2888 static void 2889 ibdm_handle_srventry_mad(ibmf_msg_t *msg, 2890 ibdm_dp_gidinfo_t *gid_info, int *flag) 2891 { 2892 uint_t ii, ioc_no, attrmod; 2893 uint_t nentries, start, end; 2894 timeout_id_t timeout_id; 2895 ib_dm_srv_t *srv_ents; 2896 ibdm_ioc_info_t *ioc_info; 2897 ibdm_srvents_info_t *gsrv_ents; 2898 2899 IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad:" 2900 " IBMF msg %p gid info %p", msg, gid_info); 2901 2902 srv_ents = IBDM_IN_IBMFMSG2SRVENT(msg); 2903 /* 2904 * Get the start and end index of the service entries 2905 * Upper 16 bits identify the IOC 2906 * Lower 16 bits specify the range of service entries 2907 * LSB specifies (Big endian) end of the range 2908 * MSB specifies (Big endian) start of the range 2909 */ 2910 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg); 2911 ioc_no = ((attrmod >> 16) & IBDM_16_BIT_MASK); 2912 end = ((attrmod >> 8) & IBDM_8_BIT_MASK); 2913 start = (attrmod & IBDM_8_BIT_MASK); 2914 2915 /* Make sure that IOC index is with the valid range */ 2916 if ((ioc_no < 1) | 2917 (ioc_no > gid_info->gl_iou->iou_info.iou_num_ctrl_slots)) { 2918 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 2919 "IOC index Out of range, index %d", ioc_no); 2920 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2921 return; 2922 } 2923 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1)); 2924 2925 /* 2926 * Make sure that the "start" and "end" service indexes are 2927 * with in the valid range 2928 */ 2929 nentries = ioc_info->ioc_profile.ioc_service_entries; 2930 if ((start > end) | (start >= nentries) | (end >= nentries)) { 2931 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 2932 "Attr modifier 0x%x, #Serv entries %d", attrmod, nentries); 2933 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2934 return; 2935 } 2936 gsrv_ents = &ioc_info->ioc_serv[start]; 2937 mutex_enter(&gid_info->gl_mutex); 2938 if (gsrv_ents->se_state != IBDM_SE_INVALID) { 2939 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 2940 "already known, ioc %d, srv %d, se_state %x", 2941 ioc_no - 1, start, gsrv_ents->se_state); 2942 mutex_exit(&gid_info->gl_mutex); 2943 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 2944 return; 2945 } 2946 ioc_info->ioc_serv[start].se_cb_args.cb_req_type = 0; 2947 if (ioc_info->ioc_serv[start].se_timeout_id) { 2948 IBTF_DPRINTF_L2("ibdm", 2949 "\thandle_srventry_mad: ioc %d start %d", ioc_no, start); 2950 timeout_id = ioc_info->ioc_serv[start].se_timeout_id; 2951 ioc_info->ioc_serv[start].se_timeout_id = 0; 2952 mutex_exit(&gid_info->gl_mutex); 2953 IBTF_DPRINTF_L5("ibdm", "handle_srverntry_mad: " 2954 "se_timeout_id = 0x%x", timeout_id); 2955 if (untimeout(timeout_id) == -1) { 2956 IBTF_DPRINTF_L2("ibdm", "handle_srventry_mad: " 2957 "untimeout se_timeout_id failed"); 2958 } 2959 mutex_enter(&gid_info->gl_mutex); 2960 } 2961 2962 gsrv_ents->se_state = IBDM_SE_VALID; 2963 mutex_exit(&gid_info->gl_mutex); 2964 for (ii = start; ii <= end; ii++, srv_ents++, gsrv_ents++) { 2965 gsrv_ents->se_attr.srv_id = b2h64(srv_ents->srv_id); 2966 bcopy(srv_ents->srv_name, 2967 gsrv_ents->se_attr.srv_name, IB_DM_MAX_SVC_NAME_LEN); 2968 ibdm_dump_service_entries(&gsrv_ents->se_attr); 2969 } 2970 } 2971 2972 2973 /* 2974 * ibdm_get_diagcode: 2975 * Send request to get IOU/IOC diag code 2976 * Returns IBDM_SUCCESS/IBDM_FAILURE 2977 */ 2978 static int 2979 ibdm_get_diagcode(ibdm_dp_gidinfo_t *gid_info, int attr) 2980 { 2981 ibmf_msg_t *msg; 2982 ib_mad_hdr_t *hdr; 2983 ibdm_ioc_info_t *ioc; 2984 ibdm_timeout_cb_args_t *cb_args; 2985 timeout_id_t *timeout_id; 2986 2987 IBTF_DPRINTF_L4("ibdm", "\tget_diagcode: gid info %p, attr = %d", 2988 gid_info, attr); 2989 2990 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 2991 &msg) != IBMF_SUCCESS) { 2992 IBTF_DPRINTF_L4("ibdm", "\tget_diagcode: pkt alloc fail"); 2993 return (IBDM_FAILURE); 2994 } 2995 2996 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2997 ibdm_alloc_send_buffers(msg); 2998 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2999 3000 mutex_enter(&gid_info->gl_mutex); 3001 ibdm_bump_transactionID(gid_info); 3002 mutex_exit(&gid_info->gl_mutex); 3003 3004 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 3005 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 3006 if (gid_info->gl_redirected == B_TRUE) { 3007 if (gid_info->gl_redirect_dlid != 0) { 3008 msg->im_local_addr.ia_remote_lid = 3009 gid_info->gl_redirect_dlid; 3010 } 3011 3012 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 3013 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 3014 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 3015 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 3016 } else { 3017 msg->im_local_addr.ia_remote_qno = 1; 3018 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 3019 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 3020 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 3021 } 3022 3023 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 3024 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 3025 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 3026 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 3027 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 3028 hdr->Status = 0; 3029 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 3030 3031 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 3032 hdr->AttributeModifier = h2b32(attr); 3033 3034 if (attr == 0) { 3035 cb_args = &gid_info->gl_iou_cb_args; 3036 gid_info->gl_iou->iou_dc_valid = B_FALSE; 3037 cb_args->cb_ioc_num = 0; 3038 cb_args->cb_req_type = IBDM_REQ_TYPE_IOU_DIAGCODE; 3039 timeout_id = &gid_info->gl_timeout_id; 3040 } else { 3041 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attr - 1)); 3042 ioc->ioc_dc_valid = B_FALSE; 3043 cb_args = &ioc->ioc_dc_cb_args; 3044 cb_args->cb_ioc_num = attr - 1; 3045 cb_args->cb_req_type = IBDM_REQ_TYPE_IOC_DIAGCODE; 3046 timeout_id = &ioc->ioc_dc_timeout_id; 3047 } 3048 cb_args->cb_gid_info = gid_info; 3049 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 3050 cb_args->cb_srvents_start = 0; 3051 3052 mutex_enter(&gid_info->gl_mutex); 3053 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 3054 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3055 mutex_exit(&gid_info->gl_mutex); 3056 3057 IBTF_DPRINTF_L5("ibdm", "\tget_diagcode:" 3058 "timeout %x, ioc %d", *timeout_id, cb_args->cb_ioc_num); 3059 3060 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 3061 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 3062 IBTF_DPRINTF_L2("ibdm", "\tget_diagcode: ibmf send failed"); 3063 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 3064 } 3065 return (IBDM_SUCCESS); 3066 } 3067 3068 /* 3069 * ibdm_handle_diagcode: 3070 * Process the DiagCode MAD response and update local DM 3071 * data structure. 3072 */ 3073 static void 3074 ibdm_handle_diagcode(ibmf_msg_t *ibmf_msg, 3075 ibdm_dp_gidinfo_t *gid_info, int *flag) 3076 { 3077 uint16_t attrmod, *diagcode; 3078 ibdm_iou_info_t *iou; 3079 ibdm_ioc_info_t *ioc; 3080 timeout_id_t timeout_id; 3081 ibdm_timeout_cb_args_t *cb_args; 3082 3083 diagcode = (uint16_t *)ibmf_msg->im_msgbufs_recv.im_bufs_cl_data; 3084 3085 mutex_enter(&gid_info->gl_mutex); 3086 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(ibmf_msg); 3087 iou = gid_info->gl_iou; 3088 if (attrmod == 0) { 3089 if (iou->iou_dc_valid != B_FALSE) { 3090 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 3091 IBTF_DPRINTF_L4("ibdm", 3092 "\thandle_diagcode: Duplicate IOU DiagCode"); 3093 mutex_exit(&gid_info->gl_mutex); 3094 return; 3095 } 3096 cb_args = &gid_info->gl_iou_cb_args; 3097 cb_args->cb_req_type = 0; 3098 iou->iou_diagcode = b2h16(*diagcode); 3099 iou->iou_dc_valid = B_TRUE; 3100 if (gid_info->gl_timeout_id) { 3101 timeout_id = gid_info->gl_timeout_id; 3102 mutex_exit(&gid_info->gl_mutex); 3103 IBTF_DPRINTF_L5("ibdm", "\thandle_diagcode: " 3104 "gl_timeout_id = 0x%x", timeout_id); 3105 if (untimeout(timeout_id) == -1) { 3106 IBTF_DPRINTF_L2("ibdm", "handle_diagcode: " 3107 "untimeout gl_timeout_id failed"); 3108 } 3109 mutex_enter(&gid_info->gl_mutex); 3110 gid_info->gl_timeout_id = 0; 3111 } 3112 } else { 3113 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod - 1)); 3114 if (ioc->ioc_dc_valid != B_FALSE) { 3115 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 3116 IBTF_DPRINTF_L4("ibdm", 3117 "\thandle_diagcode: Duplicate IOC DiagCode"); 3118 mutex_exit(&gid_info->gl_mutex); 3119 return; 3120 } 3121 cb_args = &ioc->ioc_dc_cb_args; 3122 cb_args->cb_req_type = 0; 3123 ioc->ioc_diagcode = b2h16(*diagcode); 3124 ioc->ioc_dc_valid = B_TRUE; 3125 timeout_id = iou->iou_ioc_info[attrmod - 1].ioc_dc_timeout_id; 3126 if (timeout_id) { 3127 iou->iou_ioc_info[attrmod - 1].ioc_dc_timeout_id = 0; 3128 mutex_exit(&gid_info->gl_mutex); 3129 IBTF_DPRINTF_L5("ibdm", "handle_diagcode: " 3130 "timeout_id = 0x%x", timeout_id); 3131 if (untimeout(timeout_id) == -1) { 3132 IBTF_DPRINTF_L2("ibdm", "\thandle_diagcode: " 3133 "untimeout ioc_dc_timeout_id failed"); 3134 } 3135 mutex_enter(&gid_info->gl_mutex); 3136 } 3137 } 3138 mutex_exit(&gid_info->gl_mutex); 3139 3140 IBTF_DPRINTF_L4("ibdm", "\thandle_diagcode: DiagCode : 0x%x" 3141 "attrmod : 0x%x", b2h16(*diagcode), attrmod); 3142 } 3143 3144 3145 /* 3146 * ibdm_is_ioc_present() 3147 * Return ibdm_ioc_info_t if IOC guid is found in the global gid list 3148 */ 3149 static ibdm_ioc_info_t * 3150 ibdm_is_ioc_present(ib_guid_t ioc_guid, 3151 ibdm_dp_gidinfo_t *gid_info, int *flag) 3152 { 3153 int ii; 3154 ibdm_ioc_info_t *ioc; 3155 ibdm_dp_gidinfo_t *head; 3156 ib_dm_io_unitinfo_t *iou; 3157 3158 mutex_enter(&ibdm.ibdm_mutex); 3159 head = ibdm.ibdm_dp_gidlist_head; 3160 while (head) { 3161 mutex_enter(&head->gl_mutex); 3162 if (head->gl_iou == NULL) { 3163 mutex_exit(&head->gl_mutex); 3164 head = head->gl_next; 3165 continue; 3166 } 3167 iou = &head->gl_iou->iou_info; 3168 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 3169 ioc = IBDM_GIDINFO2IOCINFO(head, ii); 3170 if ((ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) && 3171 (ioc->ioc_profile.ioc_guid == ioc_guid)) { 3172 if (gid_info == head) { 3173 *flag |= IBDM_IBMF_PKT_DUP_RESP; 3174 } else if (ibdm_check_dgid(head->gl_dgid_lo, 3175 head->gl_dgid_hi) != NULL) { 3176 IBTF_DPRINTF_L4("ibdm", "\tis_ioc_" 3177 "present: gid not present"); 3178 ibdm_add_to_gl_gid(gid_info, head); 3179 } 3180 mutex_exit(&head->gl_mutex); 3181 mutex_exit(&ibdm.ibdm_mutex); 3182 return (ioc); 3183 } 3184 } 3185 mutex_exit(&head->gl_mutex); 3186 head = head->gl_next; 3187 } 3188 mutex_exit(&ibdm.ibdm_mutex); 3189 return (NULL); 3190 } 3191 3192 3193 /* 3194 * ibdm_ibmf_send_cb() 3195 * IBMF invokes this callback routine after posting the DM MAD to 3196 * the HCA. 3197 */ 3198 /*ARGSUSED*/ 3199 static void 3200 ibdm_ibmf_send_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *ibmf_msg, void *arg) 3201 { 3202 ibdm_dump_ibmf_msg(ibmf_msg, 1); 3203 ibdm_free_send_buffers(ibmf_msg); 3204 if (ibmf_free_msg(ibmf_hdl, &ibmf_msg) != IBMF_SUCCESS) { 3205 IBTF_DPRINTF_L4("ibdm", 3206 "\tibmf_send_cb: IBMF free msg failed"); 3207 } 3208 } 3209 3210 3211 /* 3212 * ibdm_ibmf_recv_cb() 3213 * Invoked by the IBMF when a response to the one of the DM requests 3214 * is received. 3215 */ 3216 /*ARGSUSED*/ 3217 static void 3218 ibdm_ibmf_recv_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msg, void *arg) 3219 { 3220 ibdm_taskq_args_t *taskq_args; 3221 3222 /* 3223 * If the taskq enable is set then dispatch a taskq to process 3224 * the MAD, otherwise just process it on this thread 3225 */ 3226 if (ibdm_taskq_enable != IBDM_ENABLE_TASKQ_HANDLING) { 3227 ibdm_process_incoming_mad(ibmf_hdl, msg, arg); 3228 return; 3229 } 3230 3231 /* 3232 * create a taskq and dispatch it to process the incoming MAD 3233 */ 3234 taskq_args = kmem_alloc(sizeof (ibdm_taskq_args_t), KM_NOSLEEP); 3235 if (taskq_args == NULL) { 3236 IBTF_DPRINTF_L2("ibdm", "ibmf_recv_cb: kmem_alloc failed for" 3237 "taskq_args"); 3238 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3239 IBTF_DPRINTF_L4("ibmf_recv_cb", 3240 "\tibmf_recv_cb: IBMF free msg failed"); 3241 } 3242 return; 3243 } 3244 taskq_args->tq_ibmf_handle = ibmf_hdl; 3245 taskq_args->tq_ibmf_msg = msg; 3246 taskq_args->tq_args = arg; 3247 3248 if (taskq_dispatch(system_taskq, ibdm_recv_incoming_mad, taskq_args, 3249 TQ_NOSLEEP) == 0) { 3250 IBTF_DPRINTF_L2("ibdm", "ibmf_recv_cb: taskq_dispatch failed"); 3251 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3252 IBTF_DPRINTF_L4("ibmf_recv_cb", 3253 "\tibmf_recv_cb: IBMF free msg failed"); 3254 } 3255 kmem_free(taskq_args, sizeof (ibdm_taskq_args_t)); 3256 return; 3257 } 3258 3259 /* taskq_args are deleted in ibdm_recv_incoming_mad() */ 3260 } 3261 3262 3263 void 3264 ibdm_recv_incoming_mad(void *args) 3265 { 3266 ibdm_taskq_args_t *taskq_args; 3267 3268 taskq_args = (ibdm_taskq_args_t *)args; 3269 3270 IBTF_DPRINTF_L4("ibdm", "\tibdm_recv_incoming_mad: " 3271 "Processing incoming MAD via taskq"); 3272 3273 ibdm_process_incoming_mad(taskq_args->tq_ibmf_handle, 3274 taskq_args->tq_ibmf_msg, taskq_args->tq_args); 3275 3276 kmem_free(taskq_args, sizeof (ibdm_taskq_args_t)); 3277 } 3278 3279 3280 /* 3281 * Calls ibdm_process_incoming_mad with all function arguments extracted 3282 * from args 3283 */ 3284 /*ARGSUSED*/ 3285 static void 3286 ibdm_process_incoming_mad(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msg, void *arg) 3287 { 3288 int flag = 0; 3289 int ret; 3290 uint64_t transaction_id; 3291 ib_mad_hdr_t *hdr; 3292 ibdm_dp_gidinfo_t *gid_info = NULL; 3293 3294 IBTF_DPRINTF_L4("ibdm", 3295 "\tprocess_incoming_mad: ibmf hdl %p pkt %p", ibmf_hdl, msg); 3296 ibdm_dump_ibmf_msg(msg, 0); 3297 3298 /* 3299 * IBMF calls this routine for every DM MAD that arrives at this port. 3300 * But we handle only the responses for requests we sent. We drop all 3301 * the DM packets that does not have response bit set in the MAD 3302 * header(this eliminates all the requests sent to this port). 3303 * We handle only DM class version 1 MAD's 3304 */ 3305 hdr = IBDM_IN_IBMFMSG_MADHDR(msg); 3306 if (ibdm_verify_mad_status(hdr) != IBDM_SUCCESS) { 3307 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3308 IBTF_DPRINTF_L2("ibdm", "\tprocess_incoming_mad: " 3309 "IBMF free msg failed DM request drop it"); 3310 } 3311 return; 3312 } 3313 3314 transaction_id = b2h64(hdr->TransactionID); 3315 3316 mutex_enter(&ibdm.ibdm_mutex); 3317 gid_info = ibdm.ibdm_dp_gidlist_head; 3318 while (gid_info) { 3319 if ((gid_info->gl_transactionID & 3320 IBDM_GID_TRANSACTIONID_MASK) == 3321 (transaction_id & IBDM_GID_TRANSACTIONID_MASK)) 3322 break; 3323 gid_info = gid_info->gl_next; 3324 } 3325 mutex_exit(&ibdm.ibdm_mutex); 3326 3327 if (gid_info == NULL) { 3328 /* Drop the packet */ 3329 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: transaction ID" 3330 " does not match: 0x%llx", transaction_id); 3331 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3332 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3333 "IBMF free msg failed DM request drop it"); 3334 } 3335 return; 3336 } 3337 3338 /* Handle redirection for all the MAD's, except ClassPortInfo */ 3339 if (((IBDM_IN_IBMFMSG_STATUS(msg) & MAD_STATUS_REDIRECT_REQUIRED)) && 3340 (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO)) { 3341 ret = ibdm_handle_redirection(msg, gid_info, &flag); 3342 if (ret == IBDM_SUCCESS) { 3343 return; 3344 } 3345 } else { 3346 uint_t gl_state; 3347 3348 mutex_enter(&gid_info->gl_mutex); 3349 gl_state = gid_info->gl_state; 3350 mutex_exit(&gid_info->gl_mutex); 3351 3352 switch (gl_state) { 3353 3354 case IBDM_SET_CLASSPORTINFO: 3355 ibdm_handle_setclassportinfo( 3356 ibmf_hdl, msg, gid_info, &flag); 3357 break; 3358 3359 case IBDM_GET_CLASSPORTINFO: 3360 ibdm_handle_classportinfo( 3361 ibmf_hdl, msg, gid_info, &flag); 3362 break; 3363 3364 case IBDM_GET_IOUNITINFO: 3365 ibdm_handle_iounitinfo(ibmf_hdl, msg, gid_info, &flag); 3366 break; 3367 3368 case IBDM_GET_IOC_DETAILS: 3369 switch (IBDM_IN_IBMFMSG_ATTR(msg)) { 3370 3371 case IB_DM_ATTR_SERVICE_ENTRIES: 3372 ibdm_handle_srventry_mad(msg, gid_info, &flag); 3373 break; 3374 3375 case IB_DM_ATTR_IOC_CTRL_PROFILE: 3376 ibdm_handle_ioc_profile( 3377 ibmf_hdl, msg, gid_info, &flag); 3378 break; 3379 3380 case IB_DM_ATTR_DIAG_CODE: 3381 ibdm_handle_diagcode(msg, gid_info, &flag); 3382 break; 3383 3384 default: 3385 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3386 "Error state, wrong attribute :-("); 3387 (void) ibmf_free_msg(ibmf_hdl, &msg); 3388 return; 3389 } 3390 break; 3391 default: 3392 IBTF_DPRINTF_L2("ibdm", 3393 "process_incoming_mad: Dropping the packet" 3394 " gl_state %x", gl_state); 3395 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3396 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3397 "IBMF free msg failed DM request drop it"); 3398 } 3399 return; 3400 } 3401 } 3402 3403 if ((flag & IBDM_IBMF_PKT_DUP_RESP) || 3404 (flag & IBDM_IBMF_PKT_UNEXP_RESP)) { 3405 IBTF_DPRINTF_L2("ibdm", 3406 "\tprocess_incoming_mad:Dup/unexp resp : 0x%x", flag); 3407 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3408 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3409 "IBMF free msg failed DM request drop it"); 3410 } 3411 return; 3412 } 3413 3414 mutex_enter(&gid_info->gl_mutex); 3415 if (gid_info->gl_pending_cmds < 1) { 3416 IBTF_DPRINTF_L2("ibdm", 3417 "\tprocess_incoming_mad: pending commands negative"); 3418 } 3419 if (--gid_info->gl_pending_cmds) { 3420 IBTF_DPRINTF_L4("ibdm", "\tprocess_incoming_mad: " 3421 "gid_info %p pending cmds %d", 3422 gid_info, gid_info->gl_pending_cmds); 3423 mutex_exit(&gid_info->gl_mutex); 3424 } else { 3425 uint_t prev_state; 3426 IBTF_DPRINTF_L4("ibdm", "\tprocess_incoming_mad: Probing DONE"); 3427 prev_state = gid_info->gl_state; 3428 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE; 3429 if (prev_state == IBDM_SET_CLASSPORTINFO) { 3430 IBTF_DPRINTF_L4("ibdm", 3431 "\tprocess_incoming_mad: " 3432 "Setclassportinfo for Cisco FC GW is done."); 3433 gid_info->gl_flag &= ~IBDM_CISCO_PROBE; 3434 gid_info->gl_flag |= IBDM_CISCO_PROBE_DONE; 3435 mutex_exit(&gid_info->gl_mutex); 3436 cv_broadcast(&gid_info->gl_probe_cv); 3437 } else { 3438 mutex_exit(&gid_info->gl_mutex); 3439 ibdm_notify_newgid_iocs(gid_info); 3440 mutex_enter(&ibdm.ibdm_mutex); 3441 if (--ibdm.ibdm_ngid_probes_in_progress == 0) { 3442 IBTF_DPRINTF_L4("ibdm", 3443 "\tprocess_incoming_mad: Wakeup"); 3444 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 3445 cv_broadcast(&ibdm.ibdm_probe_cv); 3446 } 3447 mutex_exit(&ibdm.ibdm_mutex); 3448 } 3449 } 3450 3451 /* 3452 * Do not deallocate the IBMF packet if atleast one request 3453 * is posted. IBMF packet is reused. 3454 */ 3455 if (!(flag & IBDM_IBMF_PKT_REUSED)) { 3456 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3457 IBTF_DPRINTF_L2("ibdm", "\tprocess_incoming_mad: " 3458 "IBMF free msg failed DM request drop it"); 3459 } 3460 } 3461 } 3462 3463 3464 /* 3465 * ibdm_verify_mad_status() 3466 * Verifies the MAD status 3467 * Returns IBDM_SUCCESS if status is correct 3468 * Returns IBDM_FAILURE for bogus MAD status 3469 */ 3470 static int 3471 ibdm_verify_mad_status(ib_mad_hdr_t *hdr) 3472 { 3473 int ret = 0; 3474 3475 if ((hdr->R_Method != IB_DM_DEVMGT_METHOD_GET_RESP) || 3476 (hdr->ClassVersion != IB_DM_CLASS_VERSION_1)) { 3477 return (IBDM_FAILURE); 3478 } 3479 3480 if (b2h16(hdr->Status) == 0) 3481 ret = IBDM_SUCCESS; 3482 else if ((b2h16(hdr->Status) & 0x1f) == MAD_STATUS_REDIRECT_REQUIRED) 3483 ret = IBDM_SUCCESS; 3484 else { 3485 IBTF_DPRINTF_L2("ibdm", 3486 "\tverify_mad_status: Status : 0x%x", b2h16(hdr->Status)); 3487 ret = IBDM_FAILURE; 3488 } 3489 return (ret); 3490 } 3491 3492 3493 3494 /* 3495 * ibdm_handle_redirection() 3496 * Returns IBDM_SUCCESS/IBDM_FAILURE 3497 */ 3498 static int 3499 ibdm_handle_redirection(ibmf_msg_t *msg, 3500 ibdm_dp_gidinfo_t *gid_info, int *flag) 3501 { 3502 int attrmod, ioc_no, start; 3503 void *data; 3504 timeout_id_t *timeout_id; 3505 ib_mad_hdr_t *hdr; 3506 ibdm_ioc_info_t *ioc = NULL; 3507 ibdm_timeout_cb_args_t *cb_args; 3508 ib_mad_classportinfo_t *cpi; 3509 3510 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection: Enter"); 3511 mutex_enter(&gid_info->gl_mutex); 3512 switch (gid_info->gl_state) { 3513 case IBDM_GET_IOUNITINFO: 3514 cb_args = &gid_info->gl_iou_cb_args; 3515 timeout_id = &gid_info->gl_timeout_id; 3516 break; 3517 3518 case IBDM_GET_IOC_DETAILS: 3519 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg); 3520 switch (IBDM_IN_IBMFMSG_ATTR(msg)) { 3521 3522 case IB_DM_ATTR_DIAG_CODE: 3523 if (attrmod == 0) { 3524 cb_args = &gid_info->gl_iou_cb_args; 3525 timeout_id = &gid_info->gl_timeout_id; 3526 break; 3527 } 3528 if (IBDM_IS_IOC_NUM_INVALID(attrmod, gid_info)) { 3529 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3530 "IOC# Out of range %d", attrmod); 3531 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3532 mutex_exit(&gid_info->gl_mutex); 3533 return (IBDM_FAILURE); 3534 } 3535 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod -1)); 3536 cb_args = &ioc->ioc_dc_cb_args; 3537 timeout_id = &ioc->ioc_dc_timeout_id; 3538 break; 3539 3540 case IB_DM_ATTR_IOC_CTRL_PROFILE: 3541 if (IBDM_IS_IOC_NUM_INVALID(attrmod, gid_info)) { 3542 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3543 "IOC# Out of range %d", attrmod); 3544 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3545 mutex_exit(&gid_info->gl_mutex); 3546 return (IBDM_FAILURE); 3547 } 3548 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod -1)); 3549 cb_args = &ioc->ioc_cb_args; 3550 timeout_id = &ioc->ioc_timeout_id; 3551 break; 3552 3553 case IB_DM_ATTR_SERVICE_ENTRIES: 3554 ioc_no = ((attrmod >> 16) & IBDM_16_BIT_MASK); 3555 if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) { 3556 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3557 "IOC# Out of range %d", ioc_no); 3558 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3559 mutex_exit(&gid_info->gl_mutex); 3560 return (IBDM_FAILURE); 3561 } 3562 start = (attrmod & IBDM_8_BIT_MASK); 3563 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1)); 3564 if (start > ioc->ioc_profile.ioc_service_entries) { 3565 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3566 " SE index Out of range %d", start); 3567 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3568 mutex_exit(&gid_info->gl_mutex); 3569 return (IBDM_FAILURE); 3570 } 3571 cb_args = &ioc->ioc_serv[start].se_cb_args; 3572 timeout_id = &ioc->ioc_serv[start].se_timeout_id; 3573 break; 3574 3575 default: 3576 /* ERROR State */ 3577 IBTF_DPRINTF_L2("ibdm", 3578 "\thandle_redirection: wrong attribute :-("); 3579 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3580 mutex_exit(&gid_info->gl_mutex); 3581 return (IBDM_FAILURE); 3582 } 3583 break; 3584 default: 3585 /* ERROR State */ 3586 IBTF_DPRINTF_L2("ibdm", 3587 "\thandle_redirection: Error state :-("); 3588 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3589 mutex_exit(&gid_info->gl_mutex); 3590 return (IBDM_FAILURE); 3591 } 3592 if ((*timeout_id) != 0) { 3593 mutex_exit(&gid_info->gl_mutex); 3594 if (untimeout(*timeout_id) == -1) { 3595 IBTF_DPRINTF_L2("ibdm", "\thandle_redirection: " 3596 "untimeout failed %x", *timeout_id); 3597 } else { 3598 IBTF_DPRINTF_L5("ibdm", 3599 "\thandle_redirection: timeout %x", *timeout_id); 3600 } 3601 mutex_enter(&gid_info->gl_mutex); 3602 *timeout_id = 0; 3603 } 3604 3605 data = msg->im_msgbufs_recv.im_bufs_cl_data; 3606 cpi = (ib_mad_classportinfo_t *)data; 3607 3608 gid_info->gl_resp_timeout = 3609 (b2h32(cpi->RespTimeValue) & 0x1F); 3610 3611 gid_info->gl_redirected = B_TRUE; 3612 gid_info->gl_redirect_dlid = b2h16(cpi->RedirectLID); 3613 gid_info->gl_redirect_QP = (b2h32(cpi->RedirectQP) & 0xffffff); 3614 gid_info->gl_redirect_pkey = b2h16(cpi->RedirectP_Key); 3615 gid_info->gl_redirect_qkey = b2h32(cpi->RedirectQ_Key); 3616 gid_info->gl_redirectGID_hi = b2h64(cpi->RedirectGID_hi); 3617 gid_info->gl_redirectGID_lo = b2h64(cpi->RedirectGID_lo); 3618 gid_info->gl_redirectSL = cpi->RedirectSL; 3619 3620 if (gid_info->gl_redirect_dlid != 0) { 3621 msg->im_local_addr.ia_remote_lid = 3622 gid_info->gl_redirect_dlid; 3623 } 3624 ibdm_bump_transactionID(gid_info); 3625 mutex_exit(&gid_info->gl_mutex); 3626 3627 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg, *hdr)) 3628 ibdm_alloc_send_buffers(msg); 3629 3630 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 3631 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 3632 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 3633 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 3634 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 3635 hdr->Status = 0; 3636 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 3637 hdr->AttributeID = 3638 msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeID; 3639 hdr->AttributeModifier = 3640 msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeModifier; 3641 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg, *hdr)) 3642 3643 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 3644 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 3645 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 3646 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 3647 3648 mutex_enter(&gid_info->gl_mutex); 3649 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 3650 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3651 mutex_exit(&gid_info->gl_mutex); 3652 3653 IBTF_DPRINTF_L5("ibdm", "\thandle_redirect:" 3654 "timeout %x", *timeout_id); 3655 3656 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 3657 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 3658 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection:" 3659 "message transport failed"); 3660 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 3661 } 3662 (*flag) |= IBDM_IBMF_PKT_REUSED; 3663 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection: Exit"); 3664 return (IBDM_SUCCESS); 3665 } 3666 3667 3668 /* 3669 * ibdm_pkt_timeout_hdlr 3670 * This timeout handler is registed for every IBMF packet that is 3671 * sent through the IBMF. It gets called when no response is received 3672 * within the specified time for the packet. No retries for the failed 3673 * commands currently. Drops the failed IBMF packet and update the 3674 * pending list commands. 3675 */ 3676 static void 3677 ibdm_pkt_timeout_hdlr(void *arg) 3678 { 3679 ibdm_iou_info_t *iou; 3680 ibdm_ioc_info_t *ioc; 3681 ibdm_timeout_cb_args_t *cb_args = arg; 3682 ibdm_dp_gidinfo_t *gid_info; 3683 int srv_ent; 3684 uint_t new_gl_state; 3685 3686 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: gid_info: %p " 3687 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3688 cb_args->cb_req_type, cb_args->cb_ioc_num, 3689 cb_args->cb_srvents_start); 3690 3691 gid_info = cb_args->cb_gid_info; 3692 mutex_enter(&gid_info->gl_mutex); 3693 3694 if ((gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) || 3695 (cb_args->cb_req_type == 0)) { 3696 3697 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: req completed" 3698 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_req_type, 3699 cb_args->cb_ioc_num, cb_args->cb_srvents_start); 3700 3701 if (gid_info->gl_timeout_id) 3702 gid_info->gl_timeout_id = 0; 3703 mutex_exit(&gid_info->gl_mutex); 3704 return; 3705 } 3706 if (cb_args->cb_retry_count) { 3707 cb_args->cb_retry_count--; 3708 /* 3709 * A new timeout_id is set inside ibdm_retry_command(). 3710 * When the function returns an error, the timeout_id 3711 * is reset (to zero) in the switch statement below. 3712 */ 3713 if (ibdm_retry_command(cb_args) == IBDM_SUCCESS) { 3714 mutex_exit(&gid_info->gl_mutex); 3715 return; 3716 } 3717 cb_args->cb_retry_count = 0; 3718 } 3719 3720 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: command failed: gid %p" 3721 " rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3722 cb_args->cb_req_type, cb_args->cb_ioc_num, 3723 cb_args->cb_srvents_start); 3724 3725 switch (cb_args->cb_req_type) { 3726 3727 case IBDM_REQ_TYPE_CLASSPORTINFO: 3728 case IBDM_REQ_TYPE_IOUINFO: 3729 new_gl_state = IBDM_GID_PROBING_FAILED; 3730 if (gid_info->gl_timeout_id) 3731 gid_info->gl_timeout_id = 0; 3732 break; 3733 3734 case IBDM_REQ_TYPE_IOCINFO: 3735 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3736 iou = gid_info->gl_iou; 3737 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3738 ioc->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 3739 if (ioc->ioc_timeout_id) 3740 ioc->ioc_timeout_id = 0; 3741 break; 3742 3743 case IBDM_REQ_TYPE_SRVENTS: 3744 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3745 iou = gid_info->gl_iou; 3746 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3747 ioc->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 3748 srv_ent = cb_args->cb_srvents_start; 3749 if (ioc->ioc_serv[srv_ent].se_timeout_id) 3750 ioc->ioc_serv[srv_ent].se_timeout_id = 0; 3751 break; 3752 3753 case IBDM_REQ_TYPE_IOU_DIAGCODE: 3754 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3755 iou = gid_info->gl_iou; 3756 iou->iou_dc_valid = B_FALSE; 3757 if (gid_info->gl_timeout_id) 3758 gid_info->gl_timeout_id = 0; 3759 break; 3760 3761 case IBDM_REQ_TYPE_IOC_DIAGCODE: 3762 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3763 iou = gid_info->gl_iou; 3764 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3765 ioc->ioc_dc_valid = B_FALSE; 3766 if (ioc->ioc_dc_timeout_id) 3767 ioc->ioc_dc_timeout_id = 0; 3768 break; 3769 3770 default: /* ERROR State */ 3771 new_gl_state = IBDM_GID_PROBING_FAILED; 3772 if (gid_info->gl_timeout_id) 3773 gid_info->gl_timeout_id = 0; 3774 IBTF_DPRINTF_L2("ibdm", 3775 "\tpkt_timeout_hdlr: wrong request type."); 3776 break; 3777 } 3778 3779 --gid_info->gl_pending_cmds; /* decrease the counter */ 3780 3781 if (gid_info->gl_pending_cmds == 0) { 3782 gid_info->gl_state = new_gl_state; 3783 mutex_exit(&gid_info->gl_mutex); 3784 /* 3785 * Delete this gid_info if the gid probe fails. 3786 */ 3787 if (new_gl_state == IBDM_GID_PROBING_FAILED) { 3788 ibdm_delete_glhca_list(gid_info); 3789 } 3790 ibdm_notify_newgid_iocs(gid_info); 3791 mutex_enter(&ibdm.ibdm_mutex); 3792 if (--ibdm.ibdm_ngid_probes_in_progress == 0) { 3793 IBTF_DPRINTF_L4("ibdm", "\tpkt_timeout_hdlr: Wakeup"); 3794 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 3795 cv_broadcast(&ibdm.ibdm_probe_cv); 3796 } 3797 mutex_exit(&ibdm.ibdm_mutex); 3798 } else { 3799 /* 3800 * Reset gl_pending_cmd if the extra timeout happens since 3801 * gl_pending_cmd becomes negative as a result. 3802 */ 3803 if (gid_info->gl_pending_cmds < 0) { 3804 gid_info->gl_pending_cmds = 0; 3805 IBTF_DPRINTF_L2("ibdm", 3806 "\tpkt_timeout_hdlr: extra timeout request." 3807 " reset gl_pending_cmds"); 3808 } 3809 mutex_exit(&gid_info->gl_mutex); 3810 /* 3811 * Delete this gid_info if the gid probe fails. 3812 */ 3813 if (new_gl_state == IBDM_GID_PROBING_FAILED) { 3814 ibdm_delete_glhca_list(gid_info); 3815 } 3816 } 3817 } 3818 3819 3820 /* 3821 * ibdm_retry_command() 3822 * Retries the failed command. 3823 * Returns IBDM_FAILURE/IBDM_SUCCESS 3824 */ 3825 static int 3826 ibdm_retry_command(ibdm_timeout_cb_args_t *cb_args) 3827 { 3828 int ret; 3829 ibmf_msg_t *msg; 3830 ib_mad_hdr_t *hdr; 3831 ibdm_dp_gidinfo_t *gid_info = cb_args->cb_gid_info; 3832 timeout_id_t *timeout_id; 3833 ibdm_ioc_info_t *ioc; 3834 int ioc_no; 3835 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 3836 3837 IBTF_DPRINTF_L2("ibdm", "\tretry_command: gid_info: %p " 3838 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3839 cb_args->cb_req_type, cb_args->cb_ioc_num, 3840 cb_args->cb_srvents_start); 3841 3842 ret = ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_NOSLEEP, &msg); 3843 3844 3845 /* 3846 * Reset the gid if alloc_msg failed with BAD_HANDLE 3847 * ibdm_reset_gidinfo reinits the gid_info 3848 */ 3849 if (ret == IBMF_BAD_HANDLE) { 3850 IBTF_DPRINTF_L3(ibdm_string, "\tretry_command: gid %p hdl bad", 3851 gid_info); 3852 3853 mutex_exit(&gid_info->gl_mutex); 3854 ibdm_reset_gidinfo(gid_info); 3855 mutex_enter(&gid_info->gl_mutex); 3856 3857 /* Retry alloc */ 3858 ret = ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_NOSLEEP, 3859 &msg); 3860 } 3861 3862 if (ret != IBDM_SUCCESS) { 3863 IBTF_DPRINTF_L2("ibdm", "\tretry_command: alloc failed: %p " 3864 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3865 cb_args->cb_req_type, cb_args->cb_ioc_num, 3866 cb_args->cb_srvents_start); 3867 return (IBDM_FAILURE); 3868 } 3869 3870 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 3871 ibdm_alloc_send_buffers(msg); 3872 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 3873 3874 ibdm_bump_transactionID(gid_info); 3875 3876 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 3877 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 3878 if (gid_info->gl_redirected == B_TRUE) { 3879 if (gid_info->gl_redirect_dlid != 0) { 3880 msg->im_local_addr.ia_remote_lid = 3881 gid_info->gl_redirect_dlid; 3882 } 3883 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 3884 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 3885 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 3886 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 3887 } else { 3888 msg->im_local_addr.ia_remote_qno = 1; 3889 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 3890 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 3891 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 3892 } 3893 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 3894 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr)) 3895 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 3896 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 3897 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 3898 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 3899 hdr->Status = 0; 3900 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 3901 3902 switch (cb_args->cb_req_type) { 3903 case IBDM_REQ_TYPE_CLASSPORTINFO: 3904 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 3905 hdr->AttributeModifier = 0; 3906 timeout_id = &gid_info->gl_timeout_id; 3907 break; 3908 case IBDM_REQ_TYPE_IOUINFO: 3909 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 3910 hdr->AttributeModifier = 0; 3911 timeout_id = &gid_info->gl_timeout_id; 3912 break; 3913 case IBDM_REQ_TYPE_IOCINFO: 3914 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 3915 hdr->AttributeModifier = h2b32(cb_args->cb_ioc_num + 1); 3916 ioc = IBDM_GIDINFO2IOCINFO(gid_info, cb_args->cb_ioc_num); 3917 timeout_id = &ioc->ioc_timeout_id; 3918 break; 3919 case IBDM_REQ_TYPE_SRVENTS: 3920 hdr->AttributeID = h2b16(IB_DM_ATTR_SERVICE_ENTRIES); 3921 ibdm_fill_srv_attr_mod(hdr, cb_args); 3922 ioc = IBDM_GIDINFO2IOCINFO(gid_info, cb_args->cb_ioc_num); 3923 timeout_id = 3924 &ioc->ioc_serv[cb_args->cb_srvents_start].se_timeout_id; 3925 break; 3926 case IBDM_REQ_TYPE_IOU_DIAGCODE: 3927 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 3928 hdr->AttributeModifier = 0; 3929 timeout_id = &gid_info->gl_timeout_id; 3930 break; 3931 case IBDM_REQ_TYPE_IOC_DIAGCODE: 3932 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 3933 hdr->AttributeModifier = h2b32(cb_args->cb_ioc_num + 1); 3934 ioc_no = cb_args->cb_ioc_num; 3935 ioc = &gid_info->gl_iou->iou_ioc_info[ioc_no]; 3936 timeout_id = &ioc->ioc_dc_timeout_id; 3937 break; 3938 } 3939 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*hdr)) 3940 3941 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 3942 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3943 3944 mutex_exit(&gid_info->gl_mutex); 3945 3946 IBTF_DPRINTF_L5("ibdm", "\tretry_command: %p,%x,%d,%d:" 3947 "timeout %x", cb_args->cb_req_type, cb_args->cb_ioc_num, 3948 cb_args->cb_srvents_start, *timeout_id); 3949 3950 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, 3951 gid_info->gl_qp_hdl, msg, NULL, ibdm_ibmf_send_cb, 3952 cb_args, 0) != IBMF_SUCCESS) { 3953 IBTF_DPRINTF_L2("ibdm", "\tretry_command: send failed: %p " 3954 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3955 cb_args->cb_req_type, cb_args->cb_ioc_num, 3956 cb_args->cb_srvents_start); 3957 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 3958 } 3959 mutex_enter(&gid_info->gl_mutex); 3960 return (IBDM_SUCCESS); 3961 } 3962 3963 3964 /* 3965 * ibdm_update_ioc_port_gidlist() 3966 */ 3967 static void 3968 ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *dest, 3969 ibdm_dp_gidinfo_t *gid_info) 3970 { 3971 int ii, ngid_ents; 3972 ibdm_gid_t *tmp; 3973 ibdm_hca_list_t *gid_hca_head, *temp; 3974 ibdm_hca_list_t *ioc_head = NULL; 3975 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 3976 3977 IBTF_DPRINTF_L5("ibdm", "\tupdate_ioc_port_gidlist: Enter"); 3978 3979 ngid_ents = gid_info->gl_ngids; 3980 dest->ioc_nportgids = ngid_ents; 3981 dest->ioc_gid_list = kmem_zalloc(sizeof (ibdm_gid_t) * 3982 ngid_ents, KM_SLEEP); 3983 tmp = gid_info->gl_gid; 3984 for (ii = 0; (ii < ngid_ents) && (tmp); ii++) { 3985 dest->ioc_gid_list[ii].gid_dgid_hi = tmp->gid_dgid_hi; 3986 dest->ioc_gid_list[ii].gid_dgid_lo = tmp->gid_dgid_lo; 3987 tmp = tmp->gid_next; 3988 } 3989 3990 gid_hca_head = gid_info->gl_hca_list; 3991 while (gid_hca_head) { 3992 temp = ibdm_dup_hca_attr(gid_hca_head); 3993 temp->hl_next = ioc_head; 3994 ioc_head = temp; 3995 gid_hca_head = gid_hca_head->hl_next; 3996 } 3997 dest->ioc_hca_list = ioc_head; 3998 } 3999 4000 4001 /* 4002 * ibdm_alloc_send_buffers() 4003 * Allocates memory for the IBMF send buffer to send and/or receive 4004 * the Device Management MAD packet. 4005 */ 4006 static void 4007 ibdm_alloc_send_buffers(ibmf_msg_t *msgp) 4008 { 4009 msgp->im_msgbufs_send.im_bufs_mad_hdr = 4010 kmem_zalloc(IBDM_MAD_SIZE, KM_SLEEP); 4011 4012 msgp->im_msgbufs_send.im_bufs_cl_hdr = (uchar_t *) 4013 msgp->im_msgbufs_send.im_bufs_mad_hdr + sizeof (ib_mad_hdr_t); 4014 msgp->im_msgbufs_send.im_bufs_cl_hdr_len = IBDM_DM_MAD_HDR_SZ; 4015 4016 msgp->im_msgbufs_send.im_bufs_cl_data = 4017 ((char *)msgp->im_msgbufs_send.im_bufs_cl_hdr + IBDM_DM_MAD_HDR_SZ); 4018 msgp->im_msgbufs_send.im_bufs_cl_data_len = 4019 IBDM_MAD_SIZE - sizeof (ib_mad_hdr_t) - IBDM_DM_MAD_HDR_SZ; 4020 } 4021 4022 4023 /* 4024 * ibdm_alloc_send_buffers() 4025 * De-allocates memory for the IBMF send buffer 4026 */ 4027 static void 4028 ibdm_free_send_buffers(ibmf_msg_t *msgp) 4029 { 4030 if (msgp->im_msgbufs_send.im_bufs_mad_hdr != NULL) 4031 kmem_free(msgp->im_msgbufs_send.im_bufs_mad_hdr, IBDM_MAD_SIZE); 4032 } 4033 4034 /* 4035 * ibdm_probe_ioc() 4036 * 1. Gets the node records for the port GUID. This detects all the port 4037 * to the IOU. 4038 * 2. Selectively probes all the IOC, given it's node GUID 4039 * 3. In case of reprobe, only the IOC to be reprobed is send the IOC 4040 * Controller Profile asynchronously 4041 */ 4042 /*ARGSUSED*/ 4043 static void 4044 ibdm_probe_ioc(ib_guid_t nodeguid, ib_guid_t ioc_guid, int reprobe_flag) 4045 { 4046 int ii, nrecords; 4047 size_t nr_len = 0, pi_len = 0; 4048 ib_gid_t sgid, dgid; 4049 ibdm_hca_list_t *hca_list = NULL; 4050 sa_node_record_t *nr, *tmp; 4051 ibdm_port_attr_t *port = NULL; 4052 ibdm_dp_gidinfo_t *reprobe_gid, *new_gid, *node_gid; 4053 ibdm_dp_gidinfo_t *temp_gidinfo; 4054 ibdm_gid_t *temp_gid; 4055 sa_portinfo_record_t *pi; 4056 4057 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc(%llx, %llx, %x): Begin", 4058 nodeguid, ioc_guid, reprobe_flag); 4059 4060 /* Rescan the GID list for any removed GIDs for reprobe */ 4061 if (reprobe_flag) 4062 ibdm_rescan_gidlist(&ioc_guid); 4063 4064 mutex_enter(&ibdm.ibdm_hl_mutex); 4065 for (ibdm_get_next_port(&hca_list, &port, 1); port; 4066 ibdm_get_next_port(&hca_list, &port, 1)) { 4067 reprobe_gid = new_gid = node_gid = NULL; 4068 4069 nr = ibdm_get_node_records(port->pa_sa_hdl, &nr_len, nodeguid); 4070 if (nr == NULL) { 4071 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc: no records"); 4072 continue; 4073 } 4074 nrecords = (nr_len / sizeof (sa_node_record_t)); 4075 for (tmp = nr, ii = 0; (ii < nrecords); ii++, tmp++) { 4076 if ((pi = ibdm_get_portinfo( 4077 port->pa_sa_hdl, &pi_len, tmp->LID)) == NULL) { 4078 IBTF_DPRINTF_L4("ibdm", 4079 "\tibdm_get_portinfo: no portinfo recs"); 4080 continue; 4081 } 4082 4083 /* 4084 * If Device Management is not supported on 4085 * this port, skip the rest. 4086 */ 4087 if (!(pi->PortInfo.CapabilityMask & 4088 SM_CAP_MASK_IS_DM_SUPPD)) { 4089 kmem_free(pi, pi_len); 4090 continue; 4091 } 4092 4093 /* 4094 * For reprobes: Check if GID, already in 4095 * the list. If so, set the state to SKIPPED 4096 */ 4097 if (((temp_gidinfo = ibdm_find_gid(nodeguid, 4098 tmp->NodeInfo.PortGUID)) != NULL) && 4099 temp_gidinfo->gl_state == 4100 IBDM_GID_PROBING_COMPLETE) { 4101 ASSERT(reprobe_gid == NULL); 4102 ibdm_addto_glhcalist(temp_gidinfo, 4103 hca_list); 4104 reprobe_gid = temp_gidinfo; 4105 kmem_free(pi, pi_len); 4106 continue; 4107 } else if (temp_gidinfo != NULL) { 4108 kmem_free(pi, pi_len); 4109 ibdm_addto_glhcalist(temp_gidinfo, 4110 hca_list); 4111 continue; 4112 } 4113 4114 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : " 4115 "create_gid : prefix %llx, guid %llx\n", 4116 pi->PortInfo.GidPrefix, 4117 tmp->NodeInfo.PortGUID); 4118 4119 sgid.gid_prefix = port->pa_sn_prefix; 4120 sgid.gid_guid = port->pa_port_guid; 4121 dgid.gid_prefix = pi->PortInfo.GidPrefix; 4122 dgid.gid_guid = tmp->NodeInfo.PortGUID; 4123 new_gid = ibdm_create_gid_info(port, sgid, 4124 dgid); 4125 if (new_gid == NULL) { 4126 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4127 "create_gid_info failed\n"); 4128 kmem_free(pi, pi_len); 4129 continue; 4130 } 4131 if (node_gid == NULL) { 4132 node_gid = new_gid; 4133 ibdm_add_to_gl_gid(node_gid, node_gid); 4134 } else { 4135 IBTF_DPRINTF_L4("ibdm", 4136 "\tprobe_ioc: new gid"); 4137 temp_gid = kmem_zalloc( 4138 sizeof (ibdm_gid_t), KM_SLEEP); 4139 temp_gid->gid_dgid_hi = 4140 new_gid->gl_dgid_hi; 4141 temp_gid->gid_dgid_lo = 4142 new_gid->gl_dgid_lo; 4143 temp_gid->gid_next = node_gid->gl_gid; 4144 node_gid->gl_gid = temp_gid; 4145 node_gid->gl_ngids++; 4146 } 4147 new_gid->gl_nodeguid = nodeguid; 4148 new_gid->gl_portguid = dgid.gid_guid; 4149 ibdm_addto_glhcalist(new_gid, hca_list); 4150 4151 /* 4152 * Set the state to skipped as all these 4153 * gids point to the same node. 4154 * We (re)probe only one GID below and reset 4155 * state appropriately 4156 */ 4157 new_gid->gl_state = IBDM_GID_PROBING_SKIPPED; 4158 new_gid->gl_devid = (*tmp).NodeInfo.DeviceID; 4159 kmem_free(pi, pi_len); 4160 } 4161 kmem_free(nr, nr_len); 4162 4163 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : reprobe_flag %d " 4164 "reprobe_gid %p new_gid %p node_gid %p", 4165 reprobe_flag, reprobe_gid, new_gid, node_gid); 4166 4167 if (reprobe_flag != 0 && reprobe_gid != NULL) { 4168 int niocs, jj; 4169 ibdm_ioc_info_t *tmp_ioc; 4170 int ioc_matched = 0; 4171 4172 mutex_exit(&ibdm.ibdm_hl_mutex); 4173 mutex_enter(&reprobe_gid->gl_mutex); 4174 reprobe_gid->gl_state = IBDM_GET_IOC_DETAILS; 4175 niocs = 4176 reprobe_gid->gl_iou->iou_info.iou_num_ctrl_slots; 4177 reprobe_gid->gl_pending_cmds++; 4178 mutex_exit(&reprobe_gid->gl_mutex); 4179 4180 for (jj = 0; jj < niocs; jj++) { 4181 tmp_ioc = 4182 IBDM_GIDINFO2IOCINFO(reprobe_gid, jj); 4183 if (tmp_ioc->ioc_profile.ioc_guid != ioc_guid) 4184 continue; 4185 4186 ioc_matched = 1; 4187 4188 /* 4189 * Explicitly set gl_reprobe_flag to 0 so that 4190 * IBnex is not notified on completion 4191 */ 4192 mutex_enter(&reprobe_gid->gl_mutex); 4193 reprobe_gid->gl_reprobe_flag = 0; 4194 mutex_exit(&reprobe_gid->gl_mutex); 4195 4196 mutex_enter(&ibdm.ibdm_mutex); 4197 ibdm.ibdm_ngid_probes_in_progress++; 4198 mutex_exit(&ibdm.ibdm_mutex); 4199 if (ibdm_send_ioc_profile(reprobe_gid, jj) != 4200 IBDM_SUCCESS) { 4201 IBTF_DPRINTF_L4("ibdm", 4202 "\tprobe_ioc: " 4203 "send_ioc_profile failed " 4204 "for ioc %d", jj); 4205 ibdm_gid_decr_pending(reprobe_gid); 4206 break; 4207 } 4208 mutex_enter(&ibdm.ibdm_mutex); 4209 ibdm_wait_probe_completion(); 4210 mutex_exit(&ibdm.ibdm_mutex); 4211 break; 4212 } 4213 if (ioc_matched == 0) 4214 ibdm_gid_decr_pending(reprobe_gid); 4215 else { 4216 mutex_enter(&ibdm.ibdm_hl_mutex); 4217 break; 4218 } 4219 } else if (new_gid != NULL) { 4220 mutex_exit(&ibdm.ibdm_hl_mutex); 4221 node_gid = node_gid ? node_gid : new_gid; 4222 4223 /* 4224 * New or reinserted GID : Enable notification 4225 * to IBnex 4226 */ 4227 mutex_enter(&node_gid->gl_mutex); 4228 node_gid->gl_reprobe_flag = 1; 4229 mutex_exit(&node_gid->gl_mutex); 4230 4231 ibdm_probe_gid(node_gid); 4232 4233 mutex_enter(&ibdm.ibdm_hl_mutex); 4234 } 4235 } 4236 mutex_exit(&ibdm.ibdm_hl_mutex); 4237 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : End\n"); 4238 } 4239 4240 4241 /* 4242 * ibdm_probe_gid() 4243 * Selectively probes the GID 4244 */ 4245 static void 4246 ibdm_probe_gid(ibdm_dp_gidinfo_t *gid_info) 4247 { 4248 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid:"); 4249 4250 /* 4251 * A Cisco FC GW needs the special handling to get IOUnitInfo. 4252 */ 4253 mutex_enter(&gid_info->gl_mutex); 4254 if (ibdm_is_cisco_switch(gid_info)) { 4255 gid_info->gl_pending_cmds++; 4256 gid_info->gl_state = IBDM_SET_CLASSPORTINFO; 4257 mutex_exit(&gid_info->gl_mutex); 4258 4259 if (ibdm_set_classportinfo(gid_info) != IBDM_SUCCESS) { 4260 4261 mutex_enter(&gid_info->gl_mutex); 4262 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 4263 --gid_info->gl_pending_cmds; 4264 mutex_exit(&gid_info->gl_mutex); 4265 4266 /* free the hca_list on this gid_info */ 4267 ibdm_delete_glhca_list(gid_info); 4268 gid_info = gid_info->gl_next; 4269 return; 4270 } 4271 4272 mutex_enter(&gid_info->gl_mutex); 4273 ibdm_wait_cisco_probe_completion(gid_info); 4274 4275 IBTF_DPRINTF_L4("ibdm", 4276 "\tprobe_gid: CISCO Wakeup signal received"); 4277 } 4278 4279 /* move on to the 'GET_CLASSPORTINFO' stage */ 4280 gid_info->gl_pending_cmds++; 4281 gid_info->gl_state = IBDM_GET_CLASSPORTINFO; 4282 mutex_exit(&gid_info->gl_mutex); 4283 4284 if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) { 4285 4286 mutex_enter(&gid_info->gl_mutex); 4287 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 4288 --gid_info->gl_pending_cmds; 4289 mutex_exit(&gid_info->gl_mutex); 4290 4291 /* free the hca_list on this gid_info */ 4292 ibdm_delete_glhca_list(gid_info); 4293 gid_info = gid_info->gl_next; 4294 return; 4295 } 4296 4297 mutex_enter(&ibdm.ibdm_mutex); 4298 ibdm.ibdm_ngid_probes_in_progress++; 4299 gid_info = gid_info->gl_next; 4300 ibdm_wait_probe_completion(); 4301 mutex_exit(&ibdm.ibdm_mutex); 4302 4303 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid: Wakeup signal received"); 4304 } 4305 4306 4307 /* 4308 * ibdm_create_gid_info() 4309 * Allocates a gid_info structure and initializes 4310 * Returns pointer to the structure on success 4311 * and NULL on failure 4312 */ 4313 static ibdm_dp_gidinfo_t * 4314 ibdm_create_gid_info(ibdm_port_attr_t *port, ib_gid_t sgid, ib_gid_t dgid) 4315 { 4316 uint8_t ii, npaths; 4317 sa_path_record_t *path; 4318 size_t len; 4319 ibdm_pkey_tbl_t *pkey_tbl; 4320 ibdm_dp_gidinfo_t *gid_info = NULL; 4321 int ret; 4322 4323 IBTF_DPRINTF_L4("ibdm", "\tcreate_gid_info: Begin"); 4324 npaths = 1; 4325 4326 /* query for reversible paths */ 4327 if (port->pa_sa_hdl) 4328 ret = ibmf_saa_gid_to_pathrecords(port->pa_sa_hdl, 4329 sgid, dgid, IBMF_SAA_PKEY_WC, 0, B_TRUE, &npaths, 0, 4330 &len, &path); 4331 else 4332 return (NULL); 4333 4334 if (ret == IBMF_SUCCESS && path) { 4335 ibdm_dump_path_info(path); 4336 4337 gid_info = kmem_zalloc( 4338 sizeof (ibdm_dp_gidinfo_t), KM_SLEEP); 4339 mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL); 4340 cv_init(&gid_info->gl_probe_cv, NULL, CV_DRIVER, NULL); 4341 gid_info->gl_dgid_hi = path->DGID.gid_prefix; 4342 gid_info->gl_dgid_lo = path->DGID.gid_guid; 4343 gid_info->gl_sgid_hi = path->SGID.gid_prefix; 4344 gid_info->gl_sgid_lo = path->SGID.gid_guid; 4345 gid_info->gl_p_key = path->P_Key; 4346 gid_info->gl_sa_hdl = port->pa_sa_hdl; 4347 gid_info->gl_ibmf_hdl = port->pa_ibmf_hdl; 4348 gid_info->gl_slid = path->SLID; 4349 gid_info->gl_dlid = path->DLID; 4350 gid_info->gl_transactionID = (++ibdm.ibdm_transactionID) 4351 << IBDM_GID_TRANSACTIONID_SHIFT; 4352 gid_info->gl_min_transactionID = gid_info->gl_transactionID; 4353 gid_info->gl_max_transactionID = (ibdm.ibdm_transactionID +1) 4354 << IBDM_GID_TRANSACTIONID_SHIFT; 4355 gid_info->gl_SL = path->SL; 4356 4357 gid_info->gl_qp_hdl = IBMF_QP_HANDLE_DEFAULT; 4358 for (ii = 0; ii < port->pa_npkeys; ii++) { 4359 if (port->pa_pkey_tbl == NULL) 4360 break; 4361 4362 pkey_tbl = &port->pa_pkey_tbl[ii]; 4363 if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) && 4364 (pkey_tbl->pt_qp_hdl != NULL)) { 4365 gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 4366 break; 4367 } 4368 } 4369 kmem_free(path, len); 4370 4371 /* 4372 * QP handle for GID not initialized. No matching Pkey 4373 * was found!! ibdm should *not* hit this case. Flag an 4374 * error and drop the GID if ibdm does encounter this. 4375 */ 4376 if (gid_info->gl_qp_hdl == NULL) { 4377 IBTF_DPRINTF_L2(ibdm_string, 4378 "\tcreate_gid_info: No matching Pkey"); 4379 ibdm_delete_gidinfo(gid_info); 4380 return (NULL); 4381 } 4382 4383 ibdm.ibdm_ngids++; 4384 if (ibdm.ibdm_dp_gidlist_head == NULL) { 4385 ibdm.ibdm_dp_gidlist_head = gid_info; 4386 ibdm.ibdm_dp_gidlist_tail = gid_info; 4387 } else { 4388 ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info; 4389 gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail; 4390 ibdm.ibdm_dp_gidlist_tail = gid_info; 4391 } 4392 } 4393 4394 return (gid_info); 4395 } 4396 4397 4398 /* 4399 * ibdm_get_node_records 4400 * Sends a SA query to get the NODE record 4401 * Returns pointer to the sa_node_record_t on success 4402 * and NULL on failure 4403 */ 4404 static sa_node_record_t * 4405 ibdm_get_node_records(ibmf_saa_handle_t sa_hdl, size_t *length, ib_guid_t guid) 4406 { 4407 sa_node_record_t req, *resp = NULL; 4408 ibmf_saa_access_args_t args; 4409 int ret; 4410 4411 IBTF_DPRINTF_L4("ibdm", "\tget_node_records: Begin"); 4412 4413 bzero(&req, sizeof (sa_node_record_t)); 4414 req.NodeInfo.NodeGUID = guid; 4415 4416 args.sq_attr_id = SA_NODERECORD_ATTRID; 4417 args.sq_access_type = IBMF_SAA_RETRIEVE; 4418 args.sq_component_mask = SA_NODEINFO_COMPMASK_NODEGUID; 4419 args.sq_template = &req; 4420 args.sq_callback = NULL; 4421 args.sq_callback_arg = NULL; 4422 4423 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) &resp); 4424 if (ret != IBMF_SUCCESS) { 4425 IBTF_DPRINTF_L2("ibdm", "\tget_node_records:" 4426 " SA Retrieve Failed: %d", ret); 4427 return (NULL); 4428 } 4429 if ((resp == NULL) || (*length == 0)) { 4430 IBTF_DPRINTF_L2("ibdm", "\tget_node_records: No records"); 4431 return (NULL); 4432 } 4433 4434 IBTF_DPRINTF_L4("ibdm", "\tget_node_records: NodeGuid %llx " 4435 "PortGUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.PortGUID); 4436 4437 return (resp); 4438 } 4439 4440 4441 /* 4442 * ibdm_get_portinfo() 4443 * Sends a SA query to get the PortInfo record 4444 * Returns pointer to the sa_portinfo_record_t on success 4445 * and NULL on failure 4446 */ 4447 static sa_portinfo_record_t * 4448 ibdm_get_portinfo(ibmf_saa_handle_t sa_hdl, size_t *length, ib_lid_t lid) 4449 { 4450 sa_portinfo_record_t req, *resp = NULL; 4451 ibmf_saa_access_args_t args; 4452 int ret; 4453 4454 IBTF_DPRINTF_L4("ibdm", "\tget_portinfo: Begin"); 4455 4456 bzero(&req, sizeof (sa_portinfo_record_t)); 4457 req.EndportLID = lid; 4458 4459 args.sq_attr_id = SA_PORTINFORECORD_ATTRID; 4460 args.sq_access_type = IBMF_SAA_RETRIEVE; 4461 args.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID; 4462 args.sq_template = &req; 4463 args.sq_callback = NULL; 4464 args.sq_callback_arg = NULL; 4465 4466 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) &resp); 4467 if (ret != IBMF_SUCCESS) { 4468 IBTF_DPRINTF_L2("ibdm", "\tget_portinfo:" 4469 " SA Retrieve Failed: 0x%X", ret); 4470 return (NULL); 4471 } 4472 if ((*length == 0) || (resp == NULL)) 4473 return (NULL); 4474 4475 IBTF_DPRINTF_L4("ibdm", "\tget_portinfo: GidPrefix %llx Cap 0x%x", 4476 resp->PortInfo.GidPrefix, resp->PortInfo.CapabilityMask); 4477 return (resp); 4478 } 4479 4480 4481 /* 4482 * ibdm_ibnex_register_callback 4483 * IB nexus callback routine for HCA attach and detach notification 4484 */ 4485 void 4486 ibdm_ibnex_register_callback(ibdm_callback_t ibnex_dm_callback) 4487 { 4488 IBTF_DPRINTF_L4("ibdm", "\tibnex_register_callbacks"); 4489 mutex_enter(&ibdm.ibdm_ibnex_mutex); 4490 ibdm.ibdm_ibnex_callback = ibnex_dm_callback; 4491 mutex_exit(&ibdm.ibdm_ibnex_mutex); 4492 } 4493 4494 4495 /* 4496 * ibdm_ibnex_unregister_callbacks 4497 */ 4498 void 4499 ibdm_ibnex_unregister_callback() 4500 { 4501 IBTF_DPRINTF_L4("ibdm", "\tibnex_unregister_callbacks"); 4502 mutex_enter(&ibdm.ibdm_ibnex_mutex); 4503 ibdm.ibdm_ibnex_callback = NULL; 4504 mutex_exit(&ibdm.ibdm_ibnex_mutex); 4505 } 4506 4507 4508 /* 4509 * ibdm_ibnex_get_waittime() 4510 * Calculates the wait time based on the last HCA attach time 4511 */ 4512 time_t 4513 ibdm_ibnex_get_waittime(ib_guid_t hca_guid, int *dft_wait) 4514 { 4515 int ii; 4516 time_t temp, wait_time = 0; 4517 ibdm_hca_list_t *hca; 4518 4519 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_waittime hcaguid:%llx" 4520 "\tport settling time %d", hca_guid, *dft_wait); 4521 4522 mutex_enter(&ibdm.ibdm_hl_mutex); 4523 hca = ibdm.ibdm_hca_list_head; 4524 4525 if (hca_guid) { 4526 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4527 if ((hca_guid == hca->hl_hca_guid) && 4528 (hca->hl_nports != hca->hl_nports_active)) { 4529 wait_time = 4530 ddi_get_time() - hca->hl_attach_time; 4531 wait_time = ((wait_time >= *dft_wait) ? 4532 0 : (*dft_wait - wait_time)); 4533 break; 4534 } 4535 hca = hca->hl_next; 4536 } 4537 mutex_exit(&ibdm.ibdm_hl_mutex); 4538 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_waittime %llx", wait_time); 4539 return (wait_time); 4540 } 4541 4542 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4543 if (hca->hl_nports != hca->hl_nports_active) { 4544 temp = ddi_get_time() - hca->hl_attach_time; 4545 temp = ((temp >= *dft_wait) ? 0 : (*dft_wait - temp)); 4546 wait_time = (temp > wait_time) ? temp : wait_time; 4547 } 4548 } 4549 mutex_exit(&ibdm.ibdm_hl_mutex); 4550 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_waittime %llx", wait_time); 4551 return (wait_time); 4552 } 4553 4554 4555 /* 4556 * ibdm_ibnex_probe_hcaport 4557 * Probes the presence of HCA port (with HCA dip and port number) 4558 * Returns port attributes structure on SUCCESS 4559 */ 4560 ibdm_port_attr_t * 4561 ibdm_ibnex_probe_hcaport(ib_guid_t hca_guid, uint8_t port_num) 4562 { 4563 int ii, jj; 4564 ibdm_hca_list_t *hca_list; 4565 ibdm_port_attr_t *port_attr; 4566 4567 IBTF_DPRINTF_L4("ibdm", "\tibnex_probe_hcaport:"); 4568 4569 mutex_enter(&ibdm.ibdm_hl_mutex); 4570 hca_list = ibdm.ibdm_hca_list_head; 4571 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4572 if (hca_list->hl_hca_guid == hca_guid) { 4573 for (jj = 0; jj < hca_list->hl_nports; jj++) { 4574 if (hca_list->hl_port_attr[jj].pa_port_num == 4575 port_num) { 4576 break; 4577 } 4578 } 4579 if (jj != hca_list->hl_nports) 4580 break; 4581 } 4582 hca_list = hca_list->hl_next; 4583 } 4584 if (ii == ibdm.ibdm_hca_count) { 4585 IBTF_DPRINTF_L2("ibdm", "\tibnex_probe_hcaport: not found"); 4586 mutex_exit(&ibdm.ibdm_hl_mutex); 4587 return (NULL); 4588 } 4589 port_attr = (ibdm_port_attr_t *)kmem_zalloc( 4590 sizeof (ibdm_port_attr_t), KM_SLEEP); 4591 bcopy((char *)&hca_list->hl_port_attr[jj], 4592 port_attr, sizeof (ibdm_port_attr_t)); 4593 ibdm_update_port_attr(port_attr); 4594 4595 mutex_exit(&ibdm.ibdm_hl_mutex); 4596 return (port_attr); 4597 } 4598 4599 4600 /* 4601 * ibdm_ibnex_get_port_attrs 4602 * Scan all HCAs for a matching port_guid. 4603 * Returns "port attributes" structure on success. 4604 */ 4605 ibdm_port_attr_t * 4606 ibdm_ibnex_get_port_attrs(ib_guid_t port_guid) 4607 { 4608 int ii, jj; 4609 ibdm_hca_list_t *hca_list; 4610 ibdm_port_attr_t *port_attr; 4611 4612 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_port_attrs:"); 4613 4614 mutex_enter(&ibdm.ibdm_hl_mutex); 4615 hca_list = ibdm.ibdm_hca_list_head; 4616 4617 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4618 for (jj = 0; jj < hca_list->hl_nports; jj++) { 4619 if (hca_list->hl_port_attr[jj].pa_port_guid == 4620 port_guid) { 4621 break; 4622 } 4623 } 4624 if (jj != hca_list->hl_nports) 4625 break; 4626 hca_list = hca_list->hl_next; 4627 } 4628 4629 if (ii == ibdm.ibdm_hca_count) { 4630 IBTF_DPRINTF_L2("ibdm", "\tibnex_get_port_attrs: not found"); 4631 mutex_exit(&ibdm.ibdm_hl_mutex); 4632 return (NULL); 4633 } 4634 4635 port_attr = (ibdm_port_attr_t *)kmem_alloc(sizeof (ibdm_port_attr_t), 4636 KM_SLEEP); 4637 bcopy((char *)&hca_list->hl_port_attr[jj], port_attr, 4638 sizeof (ibdm_port_attr_t)); 4639 ibdm_update_port_attr(port_attr); 4640 4641 mutex_exit(&ibdm.ibdm_hl_mutex); 4642 return (port_attr); 4643 } 4644 4645 4646 /* 4647 * ibdm_ibnex_free_port_attr() 4648 */ 4649 void 4650 ibdm_ibnex_free_port_attr(ibdm_port_attr_t *port_attr) 4651 { 4652 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_port_attr:"); 4653 if (port_attr) { 4654 if (port_attr->pa_pkey_tbl != NULL) { 4655 kmem_free(port_attr->pa_pkey_tbl, 4656 (port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t))); 4657 } 4658 kmem_free(port_attr, sizeof (ibdm_port_attr_t)); 4659 } 4660 } 4661 4662 4663 /* 4664 * ibdm_ibnex_get_hca_list() 4665 * Returns portinfo for all the port for all the HCA's 4666 */ 4667 void 4668 ibdm_ibnex_get_hca_list(ibdm_hca_list_t **hca, int *count) 4669 { 4670 ibdm_hca_list_t *head = NULL, *temp, *temp1; 4671 int ii; 4672 4673 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_list:"); 4674 4675 mutex_enter(&ibdm.ibdm_hl_mutex); 4676 temp = ibdm.ibdm_hca_list_head; 4677 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4678 temp1 = ibdm_dup_hca_attr(temp); 4679 temp1->hl_next = head; 4680 head = temp1; 4681 temp = temp->hl_next; 4682 } 4683 *count = ibdm.ibdm_hca_count; 4684 *hca = head; 4685 mutex_exit(&ibdm.ibdm_hl_mutex); 4686 } 4687 4688 4689 /* 4690 * ibdm_ibnex_get_hca_info_by_guid() 4691 */ 4692 ibdm_hca_list_t * 4693 ibdm_ibnex_get_hca_info_by_guid(ib_guid_t hca_guid) 4694 { 4695 ibdm_hca_list_t *head = NULL, *hca = NULL; 4696 4697 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_info_by_dip"); 4698 4699 mutex_enter(&ibdm.ibdm_hl_mutex); 4700 head = ibdm.ibdm_hca_list_head; 4701 while (head) { 4702 if (head->hl_hca_guid == hca_guid) { 4703 hca = ibdm_dup_hca_attr(head); 4704 hca->hl_next = NULL; 4705 break; 4706 } 4707 head = head->hl_next; 4708 } 4709 mutex_exit(&ibdm.ibdm_hl_mutex); 4710 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_info_by_dip %p", hca); 4711 return (hca); 4712 } 4713 4714 4715 /* 4716 * ibdm_dup_hca_attr() 4717 * Allocate a new HCA attribute strucuture and initialize 4718 * hca attribute structure with the incoming HCA attributes 4719 * returned the allocated hca attributes. 4720 */ 4721 static ibdm_hca_list_t * 4722 ibdm_dup_hca_attr(ibdm_hca_list_t *in_hca) 4723 { 4724 int len; 4725 ibdm_hca_list_t *out_hca; 4726 4727 len = sizeof (ibdm_hca_list_t) + 4728 (in_hca->hl_nports * sizeof (ibdm_port_attr_t)); 4729 IBTF_DPRINTF_L4("ibdm", "\tdup_hca_attr len %d", len); 4730 out_hca = (ibdm_hca_list_t *)kmem_alloc(len, KM_SLEEP); 4731 bcopy((char *)in_hca, 4732 (char *)out_hca, sizeof (ibdm_hca_list_t)); 4733 if (in_hca->hl_nports) { 4734 out_hca->hl_port_attr = (ibdm_port_attr_t *) 4735 ((char *)out_hca + sizeof (ibdm_hca_list_t)); 4736 bcopy((char *)in_hca->hl_port_attr, 4737 (char *)out_hca->hl_port_attr, 4738 (in_hca->hl_nports * sizeof (ibdm_port_attr_t))); 4739 for (len = 0; len < out_hca->hl_nports; len++) 4740 ibdm_update_port_attr(&out_hca->hl_port_attr[len]); 4741 } 4742 return (out_hca); 4743 } 4744 4745 4746 /* 4747 * ibdm_ibnex_free_hca_list() 4748 * Free one/more HCA lists 4749 */ 4750 void 4751 ibdm_ibnex_free_hca_list(ibdm_hca_list_t *hca_list) 4752 { 4753 int ii; 4754 size_t len; 4755 ibdm_hca_list_t *temp; 4756 ibdm_port_attr_t *port; 4757 4758 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_hca_list:"); 4759 ASSERT(hca_list); 4760 while (hca_list) { 4761 temp = hca_list; 4762 hca_list = hca_list->hl_next; 4763 for (ii = 0; ii < temp->hl_nports; ii++) { 4764 port = &temp->hl_port_attr[ii]; 4765 len = (port->pa_npkeys * sizeof (ibdm_pkey_tbl_t)); 4766 if (len != 0) 4767 kmem_free(port->pa_pkey_tbl, len); 4768 } 4769 len = sizeof (ibdm_hca_list_t) + (temp->hl_nports * 4770 sizeof (ibdm_port_attr_t)); 4771 kmem_free(temp, len); 4772 } 4773 } 4774 4775 4776 /* 4777 * ibdm_ibnex_probe_iocguid() 4778 * Probes the IOC on the fabric and returns the IOC information 4779 * if present. Otherwise, NULL is returned 4780 */ 4781 /* ARGSUSED */ 4782 ibdm_ioc_info_t * 4783 ibdm_ibnex_probe_ioc(ib_guid_t iou, ib_guid_t ioc_guid, int reprobe_flag) 4784 { 4785 int k; 4786 ibdm_ioc_info_t *ioc_info; 4787 ibdm_dp_gidinfo_t *gid_info; /* used as index and arg */ 4788 timeout_id_t *timeout_id; 4789 4790 IBTF_DPRINTF_L4("ibdm", "\tibnex_probe_ioc: (%llX, %llX, %d) Begin", 4791 iou, ioc_guid, reprobe_flag); 4792 /* Check whether we know this already */ 4793 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info); 4794 if (ioc_info == NULL) { 4795 mutex_enter(&ibdm.ibdm_mutex); 4796 while (ibdm.ibdm_busy & IBDM_BUSY) 4797 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4798 ibdm.ibdm_busy |= IBDM_BUSY; 4799 mutex_exit(&ibdm.ibdm_mutex); 4800 ibdm_probe_ioc(iou, ioc_guid, 0); 4801 mutex_enter(&ibdm.ibdm_mutex); 4802 ibdm.ibdm_busy &= ~IBDM_BUSY; 4803 cv_broadcast(&ibdm.ibdm_busy_cv); 4804 mutex_exit(&ibdm.ibdm_mutex); 4805 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info); 4806 } else if (reprobe_flag) { /* Handle Reprobe for the IOC */ 4807 ASSERT(gid_info != NULL); 4808 /* Free the ioc_list before reprobe; and cancel any timers */ 4809 mutex_enter(&ibdm.ibdm_mutex); 4810 mutex_enter(&gid_info->gl_mutex); 4811 if (ioc_info->ioc_timeout_id) { 4812 timeout_id = ioc_info->ioc_timeout_id; 4813 ioc_info->ioc_timeout_id = 0; 4814 mutex_exit(&gid_info->gl_mutex); 4815 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4816 "ioc_timeout_id = 0x%x", timeout_id); 4817 if (untimeout(timeout_id) == -1) { 4818 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4819 "untimeout ioc_timeout_id failed"); 4820 } 4821 mutex_enter(&gid_info->gl_mutex); 4822 } 4823 if (ioc_info->ioc_dc_timeout_id) { 4824 timeout_id = ioc_info->ioc_dc_timeout_id; 4825 ioc_info->ioc_dc_timeout_id = 0; 4826 mutex_exit(&gid_info->gl_mutex); 4827 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4828 "ioc_dc_timeout_id = 0x%x", timeout_id); 4829 if (untimeout(timeout_id) == -1) { 4830 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4831 "untimeout ioc_dc_timeout_id failed"); 4832 } 4833 mutex_enter(&gid_info->gl_mutex); 4834 } 4835 for (k = 0; k < ioc_info->ioc_profile.ioc_service_entries; k++) 4836 if (ioc_info->ioc_serv[k].se_timeout_id) { 4837 timeout_id = ioc_info->ioc_serv[k]. 4838 se_timeout_id; 4839 ioc_info->ioc_serv[k].se_timeout_id = 0; 4840 mutex_exit(&gid_info->gl_mutex); 4841 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4842 "ioc_info->ioc_serv[k].se_timeout_id = %x", 4843 k, timeout_id); 4844 if (untimeout(timeout_id) == -1) { 4845 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4846 "untimeout se_timeout_id %d " 4847 "failed", k); 4848 } 4849 mutex_enter(&gid_info->gl_mutex); 4850 } 4851 mutex_exit(&gid_info->gl_mutex); 4852 mutex_exit(&ibdm.ibdm_mutex); 4853 ibdm_ibnex_free_ioc_list(ioc_info); 4854 4855 mutex_enter(&ibdm.ibdm_mutex); 4856 while (ibdm.ibdm_busy & IBDM_BUSY) 4857 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4858 ibdm.ibdm_busy |= IBDM_BUSY; 4859 mutex_exit(&ibdm.ibdm_mutex); 4860 4861 ibdm_probe_ioc(iou, ioc_guid, 1); 4862 4863 /* 4864 * Skip if gl_reprobe_flag is set, this will be 4865 * a re-inserted / new GID, for which notifications 4866 * have already been send. 4867 */ 4868 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 4869 gid_info = gid_info->gl_next) { 4870 uint8_t ii, niocs; 4871 ibdm_ioc_info_t *ioc; 4872 4873 if (gid_info->gl_iou == NULL) 4874 continue; 4875 4876 if (gid_info->gl_reprobe_flag) { 4877 gid_info->gl_reprobe_flag = 0; 4878 continue; 4879 } 4880 4881 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 4882 for (ii = 0; ii < niocs; ii++) { 4883 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 4884 if (ioc->ioc_profile.ioc_guid == ioc_guid) { 4885 mutex_enter(&ibdm.ibdm_mutex); 4886 ibdm_reprobe_update_port_srv(ioc, 4887 gid_info); 4888 mutex_exit(&ibdm.ibdm_mutex); 4889 } 4890 } 4891 } 4892 mutex_enter(&ibdm.ibdm_mutex); 4893 ibdm.ibdm_busy &= ~IBDM_BUSY; 4894 cv_broadcast(&ibdm.ibdm_busy_cv); 4895 mutex_exit(&ibdm.ibdm_mutex); 4896 4897 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info); 4898 } 4899 return (ioc_info); 4900 } 4901 4902 4903 /* 4904 * ibdm_get_ioc_info_with_gid() 4905 * Returns pointer to ibdm_ioc_info_t if it finds 4906 * matching record for the ioc_guid. Otherwise NULL is returned. 4907 * The pointer to gid_info is set to the second argument in case that 4908 * the non-NULL value returns (and the second argument is not NULL). 4909 * 4910 * Note. use the same strings as "ibnex_get_ioc_info" in 4911 * IBTF_DPRINTF() to keep compatibility. 4912 */ 4913 static ibdm_ioc_info_t * 4914 ibdm_get_ioc_info_with_gid(ib_guid_t ioc_guid, 4915 ibdm_dp_gidinfo_t **gid_info) 4916 { 4917 int ii; 4918 ibdm_ioc_info_t *ioc = NULL, *tmp = NULL; 4919 ibdm_dp_gidinfo_t *gid_list; 4920 ib_dm_io_unitinfo_t *iou; 4921 4922 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_ioc_info: GUID %llx", ioc_guid); 4923 4924 mutex_enter(&ibdm.ibdm_mutex); 4925 while (ibdm.ibdm_busy & IBDM_BUSY) 4926 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4927 ibdm.ibdm_busy |= IBDM_BUSY; 4928 4929 if (gid_info) 4930 *gid_info = NULL; /* clear the value of gid_info */ 4931 4932 gid_list = ibdm.ibdm_dp_gidlist_head; 4933 while (gid_list) { 4934 mutex_enter(&gid_list->gl_mutex); 4935 if (gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) { 4936 mutex_exit(&gid_list->gl_mutex); 4937 gid_list = gid_list->gl_next; 4938 continue; 4939 } 4940 if (gid_list->gl_iou == NULL) { 4941 IBTF_DPRINTF_L2("ibdm", 4942 "\tget_ioc_info: No IOU info"); 4943 mutex_exit(&gid_list->gl_mutex); 4944 gid_list = gid_list->gl_next; 4945 continue; 4946 } 4947 iou = &gid_list->gl_iou->iou_info; 4948 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 4949 tmp = IBDM_GIDINFO2IOCINFO(gid_list, ii); 4950 if ((tmp->ioc_profile.ioc_guid == ioc_guid) && 4951 (tmp->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS)) { 4952 ioc = ibdm_dup_ioc_info(tmp, gid_list); 4953 if (gid_info) 4954 *gid_info = gid_list; /* set this ptr */ 4955 mutex_exit(&gid_list->gl_mutex); 4956 ibdm.ibdm_busy &= ~IBDM_BUSY; 4957 cv_broadcast(&ibdm.ibdm_busy_cv); 4958 mutex_exit(&ibdm.ibdm_mutex); 4959 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_info: End"); 4960 return (ioc); 4961 } 4962 } 4963 if (ii == iou->iou_num_ctrl_slots) 4964 ioc = NULL; 4965 4966 mutex_exit(&gid_list->gl_mutex); 4967 gid_list = gid_list->gl_next; 4968 } 4969 4970 ibdm.ibdm_busy &= ~IBDM_BUSY; 4971 cv_broadcast(&ibdm.ibdm_busy_cv); 4972 mutex_exit(&ibdm.ibdm_mutex); 4973 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_info: failure End"); 4974 return (ioc); 4975 } 4976 4977 /* 4978 * ibdm_ibnex_get_ioc_info() 4979 * Returns pointer to ibdm_ioc_info_t if it finds 4980 * matching record for the ioc_guid, otherwise NULL 4981 * is returned 4982 * 4983 * Note. this is a wrapper function to ibdm_get_ioc_info_with_gid() now. 4984 */ 4985 ibdm_ioc_info_t * 4986 ibdm_ibnex_get_ioc_info(ib_guid_t ioc_guid) 4987 { 4988 /* will not use the gid_info pointer, so the second arg is NULL */ 4989 return (ibdm_get_ioc_info_with_gid(ioc_guid, NULL)); 4990 } 4991 4992 /* 4993 * ibdm_ibnex_get_ioc_count() 4994 * Returns number of ibdm_ioc_info_t it finds 4995 */ 4996 int 4997 ibdm_ibnex_get_ioc_count(void) 4998 { 4999 int count = 0, k; 5000 ibdm_ioc_info_t *ioc; 5001 ibdm_dp_gidinfo_t *gid_list; 5002 5003 mutex_enter(&ibdm.ibdm_mutex); 5004 ibdm_sweep_fabric(0); 5005 5006 while (ibdm.ibdm_busy & IBDM_BUSY) 5007 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5008 ibdm.ibdm_busy |= IBDM_BUSY; 5009 5010 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 5011 gid_list = gid_list->gl_next) { 5012 mutex_enter(&gid_list->gl_mutex); 5013 if ((gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) || 5014 (gid_list->gl_iou == NULL)) { 5015 mutex_exit(&gid_list->gl_mutex); 5016 continue; 5017 } 5018 for (k = 0; k < gid_list->gl_iou->iou_info.iou_num_ctrl_slots; 5019 k++) { 5020 ioc = IBDM_GIDINFO2IOCINFO(gid_list, k); 5021 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) 5022 ++count; 5023 } 5024 mutex_exit(&gid_list->gl_mutex); 5025 } 5026 ibdm.ibdm_busy &= ~IBDM_BUSY; 5027 cv_broadcast(&ibdm.ibdm_busy_cv); 5028 mutex_exit(&ibdm.ibdm_mutex); 5029 5030 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_count: count = %d", count); 5031 return (count); 5032 } 5033 5034 5035 /* 5036 * ibdm_ibnex_get_ioc_list() 5037 * Returns information about all the IOCs present on the fabric. 5038 * Reprobes the IOCs and the GID list if list_flag is set to REPROBE_ALL. 5039 * Does not sweep fabric if DONOT_PROBE is set 5040 */ 5041 ibdm_ioc_info_t * 5042 ibdm_ibnex_get_ioc_list(ibdm_ibnex_get_ioclist_mtd_t list_flag) 5043 { 5044 int ii; 5045 ibdm_ioc_info_t *ioc_list = NULL, *tmp, *ioc; 5046 ibdm_dp_gidinfo_t *gid_list; 5047 ib_dm_io_unitinfo_t *iou; 5048 5049 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_list: Enter"); 5050 5051 mutex_enter(&ibdm.ibdm_mutex); 5052 if (list_flag != IBDM_IBNEX_DONOT_PROBE) 5053 ibdm_sweep_fabric(list_flag == IBDM_IBNEX_REPROBE_ALL); 5054 5055 while (ibdm.ibdm_busy & IBDM_BUSY) 5056 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5057 ibdm.ibdm_busy |= IBDM_BUSY; 5058 5059 gid_list = ibdm.ibdm_dp_gidlist_head; 5060 while (gid_list) { 5061 mutex_enter(&gid_list->gl_mutex); 5062 if (gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) { 5063 mutex_exit(&gid_list->gl_mutex); 5064 gid_list = gid_list->gl_next; 5065 continue; 5066 } 5067 if (gid_list->gl_iou == NULL) { 5068 IBTF_DPRINTF_L2("ibdm", 5069 "\tget_ioc_list: No IOU info"); 5070 mutex_exit(&gid_list->gl_mutex); 5071 gid_list = gid_list->gl_next; 5072 continue; 5073 } 5074 iou = &gid_list->gl_iou->iou_info; 5075 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 5076 ioc = IBDM_GIDINFO2IOCINFO(gid_list, ii); 5077 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) { 5078 tmp = ibdm_dup_ioc_info(ioc, gid_list); 5079 tmp->ioc_next = ioc_list; 5080 ioc_list = tmp; 5081 } 5082 } 5083 mutex_exit(&gid_list->gl_mutex); 5084 gid_list = gid_list->gl_next; 5085 } 5086 ibdm.ibdm_busy &= ~IBDM_BUSY; 5087 cv_broadcast(&ibdm.ibdm_busy_cv); 5088 mutex_exit(&ibdm.ibdm_mutex); 5089 5090 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_list: End"); 5091 return (ioc_list); 5092 } 5093 5094 /* 5095 * ibdm_dup_ioc_info() 5096 * Duplicate the IOC information and return the IOC 5097 * information. 5098 */ 5099 static ibdm_ioc_info_t * 5100 ibdm_dup_ioc_info(ibdm_ioc_info_t *in_ioc, ibdm_dp_gidinfo_t *gid_list) 5101 { 5102 ibdm_ioc_info_t *out_ioc; 5103 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*out_ioc)); 5104 ASSERT(MUTEX_HELD(&gid_list->gl_mutex)); 5105 5106 out_ioc = kmem_alloc(sizeof (ibdm_ioc_info_t), KM_SLEEP); 5107 bcopy(in_ioc, out_ioc, sizeof (ibdm_ioc_info_t)); 5108 ibdm_update_ioc_port_gidlist(out_ioc, gid_list); 5109 out_ioc->ioc_iou_dc_valid = gid_list->gl_iou->iou_dc_valid; 5110 out_ioc->ioc_iou_diagcode = gid_list->gl_iou->iou_diagcode; 5111 5112 return (out_ioc); 5113 } 5114 5115 5116 /* 5117 * ibdm_free_ioc_list() 5118 * Deallocate memory for IOC list structure 5119 */ 5120 void 5121 ibdm_ibnex_free_ioc_list(ibdm_ioc_info_t *ioc) 5122 { 5123 ibdm_ioc_info_t *temp; 5124 5125 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_ioc_list:"); 5126 while (ioc) { 5127 temp = ioc; 5128 ioc = ioc->ioc_next; 5129 kmem_free(temp->ioc_gid_list, 5130 (sizeof (ibdm_gid_t) * temp->ioc_nportgids)); 5131 if (temp->ioc_hca_list) 5132 ibdm_ibnex_free_hca_list(temp->ioc_hca_list); 5133 kmem_free(temp, sizeof (ibdm_ioc_info_t)); 5134 } 5135 } 5136 5137 5138 /* 5139 * ibdm_ibnex_update_pkey_tbls 5140 * Updates the DM P_Key database. 5141 * NOTE: Two cases are handled here: P_Key being added or removed. 5142 * 5143 * Arguments : NONE 5144 * Return Values : NONE 5145 */ 5146 void 5147 ibdm_ibnex_update_pkey_tbls(void) 5148 { 5149 int h, pp, pidx; 5150 uint_t nports; 5151 uint_t size; 5152 ib_pkey_t new_pkey; 5153 ib_pkey_t *orig_pkey; 5154 ibdm_hca_list_t *hca_list; 5155 ibdm_port_attr_t *port; 5156 ibt_hca_portinfo_t *pinfop; 5157 5158 IBTF_DPRINTF_L4("ibdm", "\tibnex_update_pkey_tbls:"); 5159 5160 mutex_enter(&ibdm.ibdm_hl_mutex); 5161 hca_list = ibdm.ibdm_hca_list_head; 5162 5163 for (h = 0; h < ibdm.ibdm_hca_count; h++) { 5164 5165 /* This updates P_Key Tables for all ports of this HCA */ 5166 (void) ibt_query_hca_ports(hca_list->hl_hca_hdl, 0, &pinfop, 5167 &nports, &size); 5168 5169 /* number of ports shouldn't have changed */ 5170 ASSERT(nports == hca_list->hl_nports); 5171 5172 for (pp = 0; pp < hca_list->hl_nports; pp++) { 5173 port = &hca_list->hl_port_attr[pp]; 5174 5175 /* 5176 * First figure out the P_Keys from IBTL. 5177 * Three things could have happened: 5178 * New P_Keys added 5179 * Existing P_Keys removed 5180 * Both of the above two 5181 * 5182 * Loop through the P_Key Indices and check if a 5183 * give P_Key_Ix matches that of the one seen by 5184 * IBDM. If they match no action is needed. 5185 * 5186 * If they don't match: 5187 * 1. if orig_pkey is invalid and new_pkey is valid 5188 * ---> add new_pkey to DM database 5189 * 2. if orig_pkey is valid and new_pkey is invalid 5190 * ---> remove orig_pkey from DM database 5191 * 3. if orig_pkey and new_pkey are both valid: 5192 * ---> remov orig_pkey from DM database 5193 * ---> add new_pkey to DM database 5194 * 4. if orig_pkey and new_pkey are both invalid: 5195 * ---> do nothing. Updated DM database. 5196 */ 5197 5198 for (pidx = 0; pidx < port->pa_npkeys; pidx++) { 5199 new_pkey = pinfop[pp].p_pkey_tbl[pidx]; 5200 orig_pkey = &port->pa_pkey_tbl[pidx].pt_pkey; 5201 5202 /* keys match - do nothing */ 5203 if (*orig_pkey == new_pkey) 5204 continue; 5205 5206 if (IBDM_INVALID_PKEY(*orig_pkey) && 5207 !IBDM_INVALID_PKEY(new_pkey)) { 5208 /* P_Key was added */ 5209 IBTF_DPRINTF_L5("ibdm", 5210 "\tibnex_update_pkey_tbls: new " 5211 "P_Key added = 0x%x", new_pkey); 5212 *orig_pkey = new_pkey; 5213 ibdm_port_attr_ibmf_init(port, 5214 new_pkey, pp); 5215 } else if (!IBDM_INVALID_PKEY(*orig_pkey) && 5216 IBDM_INVALID_PKEY(new_pkey)) { 5217 /* P_Key was removed */ 5218 IBTF_DPRINTF_L5("ibdm", 5219 "\tibnex_update_pkey_tbls: P_Key " 5220 "removed = 0x%x", *orig_pkey); 5221 *orig_pkey = new_pkey; 5222 (void) ibdm_port_attr_ibmf_fini(port, 5223 pidx); 5224 } else if (!IBDM_INVALID_PKEY(*orig_pkey) && 5225 !IBDM_INVALID_PKEY(new_pkey)) { 5226 /* P_Key were replaced */ 5227 IBTF_DPRINTF_L5("ibdm", 5228 "\tibnex_update_pkey_tbls: P_Key " 5229 "replaced 0x%x with 0x%x", 5230 *orig_pkey, new_pkey); 5231 (void) ibdm_port_attr_ibmf_fini(port, 5232 pidx); 5233 *orig_pkey = new_pkey; 5234 ibdm_port_attr_ibmf_init(port, 5235 new_pkey, pp); 5236 } else { 5237 /* 5238 * P_Keys are invalid 5239 * set anyway to reflect if 5240 * INVALID_FULL was changed to 5241 * INVALID_LIMITED or vice-versa. 5242 */ 5243 *orig_pkey = new_pkey; 5244 } /* end of else */ 5245 5246 } /* loop of p_key index */ 5247 5248 } /* loop of #ports of HCA */ 5249 5250 ibt_free_portinfo(pinfop, size); 5251 hca_list = hca_list->hl_next; 5252 5253 } /* loop for all HCAs in the system */ 5254 5255 mutex_exit(&ibdm.ibdm_hl_mutex); 5256 } 5257 5258 5259 /* 5260 * ibdm_send_ioc_profile() 5261 * Send IOC Controller Profile request. When the request is completed 5262 * IBMF calls ibdm_process_incoming_mad routine to inform about 5263 * the completion. 5264 */ 5265 static int 5266 ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *gid_info, uint8_t ioc_no) 5267 { 5268 ibmf_msg_t *msg; 5269 ib_mad_hdr_t *hdr; 5270 ibdm_ioc_info_t *ioc_info = &(gid_info->gl_iou->iou_ioc_info[ioc_no]); 5271 ibdm_timeout_cb_args_t *cb_args; 5272 5273 IBTF_DPRINTF_L4("ibdm", "\tsend_ioc_profile: " 5274 "gid info 0x%p, ioc_no = %d", gid_info, ioc_no); 5275 5276 /* 5277 * Send command to get IOC profile. 5278 * Allocate a IBMF packet and initialize the packet. 5279 */ 5280 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 5281 &msg) != IBMF_SUCCESS) { 5282 IBTF_DPRINTF_L4("ibdm", "\tsend_ioc_profile: pkt alloc fail"); 5283 return (IBDM_FAILURE); 5284 } 5285 5286 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 5287 ibdm_alloc_send_buffers(msg); 5288 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 5289 5290 mutex_enter(&gid_info->gl_mutex); 5291 ibdm_bump_transactionID(gid_info); 5292 mutex_exit(&gid_info->gl_mutex); 5293 5294 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 5295 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 5296 if (gid_info->gl_redirected == B_TRUE) { 5297 if (gid_info->gl_redirect_dlid != 0) { 5298 msg->im_local_addr.ia_remote_lid = 5299 gid_info->gl_redirect_dlid; 5300 } 5301 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 5302 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 5303 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 5304 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 5305 } else { 5306 msg->im_local_addr.ia_remote_qno = 1; 5307 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 5308 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 5309 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 5310 } 5311 5312 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 5313 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 5314 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 5315 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 5316 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 5317 hdr->Status = 0; 5318 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 5319 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 5320 hdr->AttributeModifier = h2b32(ioc_no + 1); 5321 5322 ioc_info->ioc_state = IBDM_IOC_STATE_REPROBE_PROGRESS; 5323 cb_args = &ioc_info->ioc_cb_args; 5324 cb_args->cb_gid_info = gid_info; 5325 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 5326 cb_args->cb_req_type = IBDM_REQ_TYPE_IOCINFO; 5327 cb_args->cb_ioc_num = ioc_no; 5328 5329 mutex_enter(&gid_info->gl_mutex); 5330 ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 5331 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 5332 mutex_exit(&gid_info->gl_mutex); 5333 5334 IBTF_DPRINTF_L5("ibdm", "\tsend_ioc_profile:" 5335 "timeout %x", ioc_info->ioc_timeout_id); 5336 5337 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg, 5338 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 5339 IBTF_DPRINTF_L2("ibdm", 5340 "\tsend_ioc_profile: msg transport failed"); 5341 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 5342 } 5343 ioc_info->ioc_state = IBDM_IOC_STATE_REPROBE_PROGRESS; 5344 return (IBDM_SUCCESS); 5345 } 5346 5347 5348 /* 5349 * ibdm_port_reachable 5350 * Returns B_TRUE if the port GID is reachable by sending 5351 * a SA query to get the NODE record for this port GUID. 5352 */ 5353 static boolean_t 5354 ibdm_port_reachable(ibmf_saa_handle_t sa_hdl, ib_guid_t guid) 5355 { 5356 sa_node_record_t *resp; 5357 size_t length; 5358 5359 /* 5360 * Verify if it's reachable by getting the node record. 5361 */ 5362 if (ibdm_get_node_record_by_port(sa_hdl, guid, &resp, &length) == 5363 IBDM_SUCCESS) { 5364 kmem_free(resp, length); 5365 return (B_TRUE); 5366 } 5367 return (B_FALSE); 5368 } 5369 5370 /* 5371 * ibdm_get_node_record_by_port 5372 * Sends a SA query to get the NODE record for port GUID 5373 * Returns IBDM_SUCCESS if the port GID is reachable. 5374 * 5375 * Note: the caller must be responsible for freeing the resource 5376 * by calling kmem_free(resp, length) later. 5377 */ 5378 static int 5379 ibdm_get_node_record_by_port(ibmf_saa_handle_t sa_hdl, ib_guid_t guid, 5380 sa_node_record_t **resp, size_t *length) 5381 { 5382 sa_node_record_t req; 5383 ibmf_saa_access_args_t args; 5384 int ret; 5385 ASSERT(resp != NULL && length != NULL); 5386 5387 IBTF_DPRINTF_L4("ibdm", "\tport_reachable: port_guid %llx", 5388 guid); 5389 5390 bzero(&req, sizeof (sa_node_record_t)); 5391 req.NodeInfo.PortGUID = guid; 5392 5393 args.sq_attr_id = SA_NODERECORD_ATTRID; 5394 args.sq_access_type = IBMF_SAA_RETRIEVE; 5395 args.sq_component_mask = SA_NODEINFO_COMPMASK_PORTGUID; 5396 args.sq_template = &req; 5397 args.sq_callback = NULL; 5398 args.sq_callback_arg = NULL; 5399 5400 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) resp); 5401 if (ret != IBMF_SUCCESS) { 5402 IBTF_DPRINTF_L2("ibdm", "\tport_reachable:" 5403 " SA Retrieve Failed: %d", ret); 5404 return (IBDM_FAILURE); 5405 } 5406 if (*resp == NULL || *length == 0) { 5407 IBTF_DPRINTF_L2("ibdm", "\tport_reachable: No records"); 5408 return (IBDM_FAILURE); 5409 } 5410 /* 5411 * There is one NodeRecord on each endport on a subnet. 5412 */ 5413 ASSERT(*length == sizeof (sa_node_record_t)); 5414 5415 return (IBDM_SUCCESS); 5416 } 5417 5418 5419 /* 5420 * Update the gidlist for all affected IOCs when GID becomes 5421 * available/unavailable. 5422 * 5423 * Parameters : 5424 * gidinfo - Incoming / Outgoing GID. 5425 * add_flag - 1 for GID added, 0 for GID removed. 5426 * - (-1) : IOC gid list updated, ioc_list required. 5427 * 5428 * This function gets the GID for the node GUID corresponding to the 5429 * port GID. Gets the IOU info 5430 */ 5431 static ibdm_ioc_info_t * 5432 ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *gid_info, int avail_flag) 5433 { 5434 ibdm_dp_gidinfo_t *node_gid = NULL; 5435 uint8_t niocs, ii; 5436 ibdm_ioc_info_t *ioc, *ioc_list = NULL, *tmp; 5437 5438 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist"); 5439 5440 switch (avail_flag) { 5441 case 1 : 5442 node_gid = ibdm_check_dest_nodeguid(gid_info); 5443 break; 5444 case 0 : 5445 node_gid = ibdm_handle_gid_rm(gid_info); 5446 break; 5447 case -1 : 5448 node_gid = gid_info; 5449 break; 5450 default : 5451 break; 5452 } 5453 5454 if (node_gid == NULL) { 5455 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist: " 5456 "No node GID found, port gid 0x%p, avail_flag %d", 5457 gid_info, avail_flag); 5458 return (NULL); 5459 } 5460 5461 mutex_enter(&node_gid->gl_mutex); 5462 if ((node_gid->gl_state != IBDM_GID_PROBING_COMPLETE && 5463 node_gid->gl_state != IBDM_GID_PROBING_SKIPPED) || 5464 node_gid->gl_iou == NULL) { 5465 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist " 5466 "gl_state %x, gl_iou %p", node_gid->gl_state, 5467 node_gid->gl_iou); 5468 mutex_exit(&node_gid->gl_mutex); 5469 return (NULL); 5470 } 5471 5472 niocs = node_gid->gl_iou->iou_info.iou_num_ctrl_slots; 5473 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist : niocs %x", 5474 niocs); 5475 for (ii = 0; ii < niocs; ii++) { 5476 ioc = IBDM_GIDINFO2IOCINFO(node_gid, ii); 5477 /* 5478 * Skip IOCs for which probe is not complete or 5479 * reprobe is progress 5480 */ 5481 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) { 5482 tmp = ibdm_dup_ioc_info(ioc, node_gid); 5483 tmp->ioc_info_updated.ib_gid_prop_updated = 1; 5484 tmp->ioc_next = ioc_list; 5485 ioc_list = tmp; 5486 } 5487 } 5488 mutex_exit(&node_gid->gl_mutex); 5489 5490 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist : return %p", 5491 ioc_list); 5492 return (ioc_list); 5493 } 5494 5495 /* 5496 * ibdm_saa_event_cb : 5497 * Event handling which does *not* require ibdm_hl_mutex to be 5498 * held are executed in the same thread. This is to prevent 5499 * deadlocks with HCA port down notifications which hold the 5500 * ibdm_hl_mutex. 5501 * 5502 * GID_AVAILABLE event is handled here. A taskq is spawned to 5503 * handle GID_UNAVAILABLE. 5504 * 5505 * A new mutex ibdm_ibnex_mutex has been introduced to protect 5506 * ibnex_callback. This has been done to prevent any possible 5507 * deadlock (described above) while handling GID_AVAILABLE. 5508 * 5509 * IBMF calls the event callback for a HCA port. The SA handle 5510 * for this port would be valid, till the callback returns. 5511 * IBDM calling IBDM using the above SA handle should be valid. 5512 * 5513 * IBDM will additionally check (SA handle != NULL), before 5514 * calling IBMF. 5515 */ 5516 /*ARGSUSED*/ 5517 static void 5518 ibdm_saa_event_cb(ibmf_saa_handle_t ibmf_saa_handle, 5519 ibmf_saa_subnet_event_t ibmf_saa_event, 5520 ibmf_saa_event_details_t *event_details, void *callback_arg) 5521 { 5522 ibdm_saa_event_arg_t *event_arg; 5523 ib_gid_t sgid, dgid; 5524 ibdm_port_attr_t *hca_port; 5525 ibdm_dp_gidinfo_t *gid_info, *node_gid_info = NULL; 5526 sa_node_record_t *nrec; 5527 size_t length; 5528 5529 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*event_arg)); 5530 5531 hca_port = (ibdm_port_attr_t *)callback_arg; 5532 5533 IBTF_DPRINTF_L4("ibdm", "\tsaa_event_cb(%x, %x, %x, %x)\n", 5534 ibmf_saa_handle, ibmf_saa_event, event_details, 5535 callback_arg); 5536 #ifdef DEBUG 5537 if (ibdm_ignore_saa_event) 5538 return; 5539 #endif 5540 5541 if (ibmf_saa_event == IBMF_SAA_EVENT_GID_AVAILABLE) { 5542 /* 5543 * Ensure no other probe / sweep fabric is in 5544 * progress. 5545 */ 5546 mutex_enter(&ibdm.ibdm_mutex); 5547 while (ibdm.ibdm_busy & IBDM_BUSY) 5548 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5549 ibdm.ibdm_busy |= IBDM_BUSY; 5550 mutex_exit(&ibdm.ibdm_mutex); 5551 5552 /* 5553 * If we already know about this GID, return. 5554 * GID_AVAILABLE may be reported for multiple HCA 5555 * ports. 5556 */ 5557 if ((ibdm_check_dgid(event_details->ie_gid.gid_guid, 5558 event_details->ie_gid.gid_prefix)) != NULL) { 5559 mutex_enter(&ibdm.ibdm_mutex); 5560 ibdm.ibdm_busy &= ~IBDM_BUSY; 5561 cv_broadcast(&ibdm.ibdm_busy_cv); 5562 mutex_exit(&ibdm.ibdm_mutex); 5563 return; 5564 } 5565 5566 IBTF_DPRINTF_L4("ibdm", "\tGID (prefix %x, guid %llx) " 5567 "Insertion notified", 5568 event_details->ie_gid.gid_prefix, 5569 event_details->ie_gid.gid_guid); 5570 5571 /* This is a new gid, insert it to GID list */ 5572 sgid.gid_prefix = hca_port->pa_sn_prefix; 5573 sgid.gid_guid = hca_port->pa_port_guid; 5574 dgid.gid_prefix = event_details->ie_gid.gid_prefix; 5575 dgid.gid_guid = event_details->ie_gid.gid_guid; 5576 gid_info = ibdm_create_gid_info(hca_port, sgid, dgid); 5577 if (gid_info == NULL) { 5578 IBTF_DPRINTF_L4("ibdm", "\tGID_AVAILABLE: " 5579 "create_gid_info returned NULL"); 5580 mutex_enter(&ibdm.ibdm_mutex); 5581 ibdm.ibdm_busy &= ~IBDM_BUSY; 5582 cv_broadcast(&ibdm.ibdm_busy_cv); 5583 mutex_exit(&ibdm.ibdm_mutex); 5584 return; 5585 } 5586 mutex_enter(&gid_info->gl_mutex); 5587 gid_info->gl_state = IBDM_GID_PROBING_SKIPPED; 5588 mutex_exit(&gid_info->gl_mutex); 5589 5590 /* Get the node GUID */ 5591 if (ibdm_get_node_record_by_port(ibmf_saa_handle, dgid.gid_guid, 5592 &nrec, &length) != IBDM_SUCCESS) { 5593 /* 5594 * Set the state to PROBE_NOT_DONE for the 5595 * next sweep to probe it 5596 */ 5597 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_taskq: " 5598 "Skipping GID : port GUID not found"); 5599 mutex_enter(&gid_info->gl_mutex); 5600 gid_info->gl_state = IBDM_GID_PROBE_NOT_DONE; 5601 mutex_exit(&gid_info->gl_mutex); 5602 mutex_enter(&ibdm.ibdm_mutex); 5603 ibdm.ibdm_busy &= ~IBDM_BUSY; 5604 cv_broadcast(&ibdm.ibdm_busy_cv); 5605 mutex_exit(&ibdm.ibdm_mutex); 5606 return; 5607 } 5608 gid_info->gl_nodeguid = nrec->NodeInfo.NodeGUID; 5609 gid_info->gl_devid = nrec->NodeInfo.DeviceID; 5610 kmem_free(nrec, length); 5611 gid_info->gl_portguid = dgid.gid_guid; 5612 5613 /* 5614 * Get the gid info with the same node GUID. 5615 */ 5616 mutex_enter(&ibdm.ibdm_mutex); 5617 node_gid_info = ibdm.ibdm_dp_gidlist_head; 5618 while (node_gid_info) { 5619 if (node_gid_info->gl_nodeguid == 5620 gid_info->gl_nodeguid && 5621 node_gid_info->gl_iou != NULL) { 5622 break; 5623 } 5624 node_gid_info = node_gid_info->gl_next; 5625 } 5626 mutex_exit(&ibdm.ibdm_mutex); 5627 5628 /* 5629 * Handling a new GID requires filling of gl_hca_list. 5630 * This require ibdm hca_list to be parsed and hence 5631 * holding the ibdm_hl_mutex. Spawning a new thread to 5632 * handle this. 5633 */ 5634 if (node_gid_info == NULL) { 5635 if (taskq_dispatch(system_taskq, 5636 ibdm_saa_handle_new_gid, (void *)gid_info, 5637 TQ_NOSLEEP) == NULL) { 5638 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: " 5639 "new_gid taskq_dispatch failed"); 5640 return; 5641 } 5642 } 5643 5644 mutex_enter(&ibdm.ibdm_mutex); 5645 ibdm.ibdm_busy &= ~IBDM_BUSY; 5646 cv_broadcast(&ibdm.ibdm_busy_cv); 5647 mutex_exit(&ibdm.ibdm_mutex); 5648 return; 5649 } 5650 5651 if (ibmf_saa_event != IBMF_SAA_EVENT_GID_UNAVAILABLE) 5652 return; 5653 5654 event_arg = (ibdm_saa_event_arg_t *)kmem_alloc( 5655 sizeof (ibdm_saa_event_arg_t), KM_SLEEP); 5656 event_arg->ibmf_saa_handle = ibmf_saa_handle; 5657 event_arg->ibmf_saa_event = ibmf_saa_event; 5658 bcopy(event_details, &event_arg->event_details, 5659 sizeof (ibmf_saa_event_details_t)); 5660 event_arg->callback_arg = callback_arg; 5661 5662 if (taskq_dispatch(system_taskq, ibdm_saa_event_taskq, 5663 (void *)event_arg, TQ_NOSLEEP) == NULL) { 5664 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: " 5665 "taskq_dispatch failed"); 5666 ibdm_free_saa_event_arg(event_arg); 5667 return; 5668 } 5669 } 5670 5671 /* 5672 * Handle a new GID discovered by GID_AVAILABLE saa event. 5673 */ 5674 void 5675 ibdm_saa_handle_new_gid(void *arg) 5676 { 5677 ibdm_dp_gidinfo_t *gid_info; 5678 ibdm_hca_list_t *hca_list = NULL; 5679 ibdm_port_attr_t *port = NULL; 5680 ibdm_ioc_info_t *ioc_list = NULL; 5681 5682 IBTF_DPRINTF_L4(ibdm_string, "\tsaa_handle_new_gid(%p)", arg); 5683 5684 gid_info = (ibdm_dp_gidinfo_t *)arg; 5685 5686 /* 5687 * Ensure that no other sweep / probe has completed 5688 * probing this gid. 5689 */ 5690 mutex_enter(&gid_info->gl_mutex); 5691 if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) { 5692 mutex_exit(&gid_info->gl_mutex); 5693 return; 5694 } 5695 mutex_exit(&gid_info->gl_mutex); 5696 5697 /* 5698 * Parse HCAs to fill gl_hca_list 5699 */ 5700 mutex_enter(&ibdm.ibdm_hl_mutex); 5701 for (ibdm_get_next_port(&hca_list, &port, 1); port; 5702 ibdm_get_next_port(&hca_list, &port, 1)) { 5703 if (ibdm_port_reachable(port->pa_sa_hdl, 5704 gid_info->gl_portguid) == B_TRUE) { 5705 ibdm_addto_glhcalist(gid_info, hca_list); 5706 } 5707 } 5708 mutex_exit(&ibdm.ibdm_hl_mutex); 5709 5710 /* 5711 * Ensure no other probe / sweep fabric is in 5712 * progress. 5713 */ 5714 mutex_enter(&ibdm.ibdm_mutex); 5715 while (ibdm.ibdm_busy & IBDM_BUSY) 5716 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5717 ibdm.ibdm_busy |= IBDM_BUSY; 5718 mutex_exit(&ibdm.ibdm_mutex); 5719 5720 /* 5721 * New IOU probe it, to check if new IOCs 5722 */ 5723 IBTF_DPRINTF_L4(ibdm_string, "\tsaa_handle_new_gid: " 5724 "new GID : probing"); 5725 mutex_enter(&ibdm.ibdm_mutex); 5726 ibdm.ibdm_ngid_probes_in_progress++; 5727 mutex_exit(&ibdm.ibdm_mutex); 5728 mutex_enter(&gid_info->gl_mutex); 5729 gid_info->gl_reprobe_flag = 0; 5730 gid_info->gl_state = IBDM_GID_PROBE_NOT_DONE; 5731 mutex_exit(&gid_info->gl_mutex); 5732 ibdm_probe_gid_thread((void *)gid_info); 5733 5734 mutex_enter(&ibdm.ibdm_mutex); 5735 ibdm_wait_probe_completion(); 5736 mutex_exit(&ibdm.ibdm_mutex); 5737 5738 if (gid_info->gl_iou == NULL) { 5739 mutex_enter(&ibdm.ibdm_mutex); 5740 ibdm.ibdm_busy &= ~IBDM_BUSY; 5741 cv_broadcast(&ibdm.ibdm_busy_cv); 5742 mutex_exit(&ibdm.ibdm_mutex); 5743 return; 5744 } 5745 5746 /* 5747 * Update GID list in all IOCs affected by this 5748 */ 5749 ioc_list = ibdm_update_ioc_gidlist(gid_info, 1); 5750 5751 /* 5752 * Pass on the IOCs with updated GIDs to IBnexus 5753 */ 5754 if (ioc_list) { 5755 mutex_enter(&ibdm.ibdm_ibnex_mutex); 5756 if (ibdm.ibdm_ibnex_callback != NULL) { 5757 (*ibdm.ibdm_ibnex_callback)((void *)ioc_list, 5758 IBDM_EVENT_IOC_PROP_UPDATE); 5759 } 5760 mutex_exit(&ibdm.ibdm_ibnex_mutex); 5761 } 5762 5763 mutex_enter(&ibdm.ibdm_mutex); 5764 ibdm.ibdm_busy &= ~IBDM_BUSY; 5765 cv_broadcast(&ibdm.ibdm_busy_cv); 5766 mutex_exit(&ibdm.ibdm_mutex); 5767 } 5768 5769 /* 5770 * ibdm_saa_event_taskq : 5771 * GID_UNAVAILABLE Event handling requires ibdm_hl_mutex to be 5772 * held. The GID_UNAVAILABLE handling is done in a taskq to 5773 * prevent deadlocks with HCA port down notifications which hold 5774 * ibdm_hl_mutex. 5775 */ 5776 void 5777 ibdm_saa_event_taskq(void *arg) 5778 { 5779 ibdm_saa_event_arg_t *event_arg; 5780 ibmf_saa_handle_t ibmf_saa_handle; 5781 ibmf_saa_subnet_event_t ibmf_saa_event; 5782 ibmf_saa_event_details_t *event_details; 5783 void *callback_arg; 5784 5785 ibdm_dp_gidinfo_t *gid_info; 5786 ibdm_port_attr_t *hca_port, *port = NULL; 5787 ibdm_hca_list_t *hca_list = NULL; 5788 int sa_handle_valid = 0; 5789 ibdm_ioc_info_t *ioc_list = NULL; 5790 5791 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*event_arg)); 5792 5793 event_arg = (ibdm_saa_event_arg_t *)arg; 5794 ibmf_saa_handle = event_arg->ibmf_saa_handle; 5795 ibmf_saa_event = event_arg->ibmf_saa_event; 5796 event_details = &event_arg->event_details; 5797 callback_arg = event_arg->callback_arg; 5798 5799 ASSERT(callback_arg != NULL); 5800 ASSERT(ibmf_saa_event == IBMF_SAA_EVENT_GID_UNAVAILABLE); 5801 IBTF_DPRINTF_L4("ibdm", "\tsaa_event_taskq(%x, %x, %x, %x)", 5802 ibmf_saa_handle, ibmf_saa_event, event_details, 5803 callback_arg); 5804 5805 hca_port = (ibdm_port_attr_t *)callback_arg; 5806 5807 /* Check if the port_attr is still valid */ 5808 mutex_enter(&ibdm.ibdm_hl_mutex); 5809 for (ibdm_get_next_port(&hca_list, &port, 0); port; 5810 ibdm_get_next_port(&hca_list, &port, 0)) { 5811 if (port == hca_port && port->pa_port_guid == 5812 hca_port->pa_port_guid) { 5813 if (ibmf_saa_handle == hca_port->pa_sa_hdl) 5814 sa_handle_valid = 1; 5815 break; 5816 } 5817 } 5818 mutex_exit(&ibdm.ibdm_hl_mutex); 5819 if (sa_handle_valid == 0) { 5820 ibdm_free_saa_event_arg(event_arg); 5821 return; 5822 } 5823 5824 if (hca_port && (hca_port->pa_sa_hdl == NULL || 5825 ibmf_saa_handle != hca_port->pa_sa_hdl)) { 5826 ibdm_free_saa_event_arg(event_arg); 5827 return; 5828 } 5829 hca_list = NULL; 5830 port = NULL; 5831 5832 /* 5833 * Check if the GID is visible to other HCA ports. 5834 * Return if so. 5835 */ 5836 mutex_enter(&ibdm.ibdm_hl_mutex); 5837 for (ibdm_get_next_port(&hca_list, &port, 1); port; 5838 ibdm_get_next_port(&hca_list, &port, 1)) { 5839 if (ibdm_port_reachable(port->pa_sa_hdl, 5840 event_details->ie_gid.gid_guid) == B_TRUE) { 5841 mutex_exit(&ibdm.ibdm_hl_mutex); 5842 ibdm_free_saa_event_arg(event_arg); 5843 return; 5844 } 5845 } 5846 mutex_exit(&ibdm.ibdm_hl_mutex); 5847 5848 /* 5849 * Ensure no other probe / sweep fabric is in 5850 * progress. 5851 */ 5852 mutex_enter(&ibdm.ibdm_mutex); 5853 while (ibdm.ibdm_busy & IBDM_BUSY) 5854 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5855 ibdm.ibdm_busy |= IBDM_BUSY; 5856 mutex_exit(&ibdm.ibdm_mutex); 5857 5858 /* 5859 * If this GID is no longer in GID list, return 5860 * GID_UNAVAILABLE may be reported for multiple HCA 5861 * ports. 5862 */ 5863 mutex_enter(&ibdm.ibdm_mutex); 5864 gid_info = ibdm.ibdm_dp_gidlist_head; 5865 while (gid_info) { 5866 if (gid_info->gl_portguid == 5867 event_details->ie_gid.gid_guid) { 5868 break; 5869 } 5870 gid_info = gid_info->gl_next; 5871 } 5872 mutex_exit(&ibdm.ibdm_mutex); 5873 if (gid_info == NULL) { 5874 mutex_enter(&ibdm.ibdm_mutex); 5875 ibdm.ibdm_busy &= ~IBDM_BUSY; 5876 cv_broadcast(&ibdm.ibdm_busy_cv); 5877 mutex_exit(&ibdm.ibdm_mutex); 5878 ibdm_free_saa_event_arg(event_arg); 5879 return; 5880 } 5881 5882 IBTF_DPRINTF_L4("ibdm", "\tGID (prefix %x, guid %llx) " 5883 "Unavailable notification", 5884 event_details->ie_gid.gid_prefix, 5885 event_details->ie_gid.gid_guid); 5886 5887 /* 5888 * Update GID list in all IOCs affected by this 5889 */ 5890 if (gid_info->gl_state == IBDM_GID_PROBING_SKIPPED || 5891 gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) 5892 ioc_list = ibdm_update_ioc_gidlist(gid_info, 0); 5893 5894 /* 5895 * Remove GID from the global GID list 5896 * Handle the case where all port GIDs for an 5897 * IOU have been hot-removed. Check both gid_info 5898 * & ioc_info for checking ngids. 5899 */ 5900 mutex_enter(&ibdm.ibdm_mutex); 5901 if (gid_info->gl_iou != NULL && gid_info->gl_ngids == 0) { 5902 mutex_enter(&gid_info->gl_mutex); 5903 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou); 5904 mutex_exit(&gid_info->gl_mutex); 5905 } 5906 if (gid_info->gl_prev != NULL) 5907 gid_info->gl_prev->gl_next = gid_info->gl_next; 5908 if (gid_info->gl_next != NULL) 5909 gid_info->gl_next->gl_prev = gid_info->gl_prev; 5910 5911 if (gid_info == ibdm.ibdm_dp_gidlist_head) 5912 ibdm.ibdm_dp_gidlist_head = gid_info->gl_next; 5913 if (gid_info == ibdm.ibdm_dp_gidlist_tail) 5914 ibdm.ibdm_dp_gidlist_tail = gid_info->gl_prev; 5915 ibdm.ibdm_ngids--; 5916 5917 ibdm.ibdm_busy &= ~IBDM_BUSY; 5918 cv_broadcast(&ibdm.ibdm_busy_cv); 5919 mutex_exit(&ibdm.ibdm_mutex); 5920 5921 /* free the hca_list on this gid_info */ 5922 ibdm_delete_glhca_list(gid_info); 5923 5924 mutex_destroy(&gid_info->gl_mutex); 5925 kmem_free(gid_info, sizeof (ibdm_dp_gidinfo_t)); 5926 5927 /* 5928 * Pass on the IOCs with updated GIDs to IBnexus 5929 */ 5930 if (ioc_list) { 5931 IBTF_DPRINTF_L4("ibdm", "\tGID_UNAVAILABLE " 5932 "IOC_PROP_UPDATE for %p\n", ioc_list); 5933 mutex_enter(&ibdm.ibdm_ibnex_mutex); 5934 if (ibdm.ibdm_ibnex_callback != NULL) { 5935 (*ibdm.ibdm_ibnex_callback)((void *) 5936 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 5937 } 5938 mutex_exit(&ibdm.ibdm_ibnex_mutex); 5939 } 5940 5941 ibdm_free_saa_event_arg(event_arg); 5942 } 5943 5944 5945 static int 5946 ibdm_cmp_gid_list(ibdm_gid_t *new, ibdm_gid_t *prev) 5947 { 5948 ibdm_gid_t *scan_new, *scan_prev; 5949 int cmp_failed = 0; 5950 5951 ASSERT(new != NULL); 5952 ASSERT(prev != NULL); 5953 5954 /* 5955 * Search for each new gid anywhere in the prev GID list. 5956 * Note that the gid list could have been re-ordered. 5957 */ 5958 for (scan_new = new; scan_new; scan_new = scan_new->gid_next) { 5959 for (scan_prev = prev, cmp_failed = 1; scan_prev; 5960 scan_prev = scan_prev->gid_next) { 5961 if (scan_prev->gid_dgid_hi == scan_new->gid_dgid_hi && 5962 scan_prev->gid_dgid_lo == scan_new->gid_dgid_lo) { 5963 cmp_failed = 0; 5964 break; 5965 } 5966 } 5967 5968 if (cmp_failed) 5969 return (1); 5970 } 5971 return (0); 5972 } 5973 5974 /* 5975 * This is always called in a single thread 5976 * This function updates the gid_list and serv_list of IOC 5977 * The current gid_list is in ioc_info_t(contains only port 5978 * guids for which probe is done) & gidinfo_t(other port gids) 5979 * The gids in both locations are used for comparision. 5980 */ 5981 static void 5982 ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *ioc, ibdm_dp_gidinfo_t *gidinfo) 5983 { 5984 ibdm_gid_t *cur_gid_list; 5985 uint_t cur_nportgids; 5986 5987 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 5988 5989 ioc->ioc_info_updated.ib_prop_updated = 0; 5990 5991 5992 /* Current GID list in gid_info only */ 5993 cur_gid_list = gidinfo->gl_gid; 5994 cur_nportgids = gidinfo->gl_ngids; 5995 5996 if (ioc->ioc_prev_serv_cnt != 5997 ioc->ioc_profile.ioc_service_entries || 5998 ibdm_serv_cmp(&ioc->ioc_serv[0], &ioc->ioc_prev_serv[0], 5999 ioc->ioc_prev_serv_cnt)) 6000 ioc->ioc_info_updated.ib_srv_prop_updated = 1; 6001 6002 if (ioc->ioc_prev_nportgids != cur_nportgids || 6003 ioc->ioc_prev_gid_list == NULL || cur_gid_list == NULL) { 6004 ioc->ioc_info_updated.ib_gid_prop_updated = 1; 6005 } else if (ibdm_cmp_gid_list(ioc->ioc_prev_gid_list, cur_gid_list)) { 6006 ioc->ioc_info_updated.ib_gid_prop_updated = 1; 6007 } 6008 6009 /* Zero out previous entries */ 6010 ibdm_free_gid_list(ioc->ioc_prev_gid_list); 6011 if (ioc->ioc_prev_serv) 6012 kmem_free(ioc->ioc_prev_serv, ioc->ioc_prev_serv_cnt * 6013 sizeof (ibdm_srvents_info_t)); 6014 ioc->ioc_prev_serv_cnt = 0; 6015 ioc->ioc_prev_nportgids = 0; 6016 ioc->ioc_prev_serv = NULL; 6017 ioc->ioc_prev_gid_list = NULL; 6018 } 6019 6020 /* 6021 * Handle GID removal. This returns gid_info of an GID for the same 6022 * node GUID, if found. For an GID with IOU information, the same 6023 * gid_info is returned if no gid_info with same node_guid is found. 6024 */ 6025 static ibdm_dp_gidinfo_t * 6026 ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *rm_gid) 6027 { 6028 ibdm_dp_gidinfo_t *gid_list; 6029 6030 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm(0x%p)", rm_gid); 6031 6032 if (rm_gid->gl_iou == NULL) { 6033 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm NO iou"); 6034 /* 6035 * Search for a GID with same node_guid and 6036 * gl_iou != NULL 6037 */ 6038 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 6039 gid_list = gid_list->gl_next) { 6040 if (gid_list->gl_iou != NULL && (gid_list->gl_nodeguid 6041 == rm_gid->gl_nodeguid)) 6042 break; 6043 } 6044 6045 if (gid_list) 6046 ibdm_rmfrom_glgid_list(gid_list, rm_gid); 6047 6048 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm ret %p", gid_list); 6049 return (gid_list); 6050 } else { 6051 /* 6052 * Search for a GID with same node_guid and 6053 * gl_iou == NULL 6054 */ 6055 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm with iou"); 6056 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 6057 gid_list = gid_list->gl_next) { 6058 if (gid_list->gl_iou == NULL && (gid_list->gl_nodeguid 6059 == rm_gid->gl_nodeguid)) 6060 break; 6061 } 6062 6063 if (gid_list) { 6064 /* 6065 * Copy the following fields from rm_gid : 6066 * 1. gl_state 6067 * 2. gl_iou 6068 * 3. gl_gid & gl_ngids 6069 * 6070 * Note : Function is synchronized by 6071 * ibdm_busy flag. 6072 * 6073 * Note : Redirect info is initialized if 6074 * any MADs for the GID fail 6075 */ 6076 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm " 6077 "copying info to GID with gl_iou != NULl"); 6078 gid_list->gl_state = rm_gid->gl_state; 6079 gid_list->gl_iou = rm_gid->gl_iou; 6080 gid_list->gl_gid = rm_gid->gl_gid; 6081 gid_list->gl_ngids = rm_gid->gl_ngids; 6082 6083 /* Remove the GID from gl_gid list */ 6084 ibdm_rmfrom_glgid_list(gid_list, rm_gid); 6085 } else { 6086 /* 6087 * Handle a case where all GIDs to the IOU have 6088 * been removed. 6089 */ 6090 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm 0 GID " 6091 "to IOU"); 6092 6093 ibdm_rmfrom_glgid_list(rm_gid, rm_gid); 6094 return (rm_gid); 6095 } 6096 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm ret %p", gid_list); 6097 return (gid_list); 6098 } 6099 } 6100 6101 static void 6102 ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *gid_info, 6103 ibdm_dp_gidinfo_t *rm_gid) 6104 { 6105 ibdm_gid_t *tmp, *prev; 6106 6107 IBTF_DPRINTF_L4("ibdm", "\trmfrom_glgid (%p, %p)", 6108 gid_info, rm_gid); 6109 6110 for (tmp = gid_info->gl_gid, prev = NULL; tmp; ) { 6111 if (tmp->gid_dgid_hi == rm_gid->gl_dgid_hi && 6112 tmp->gid_dgid_lo == rm_gid->gl_dgid_lo) { 6113 if (prev == NULL) 6114 gid_info->gl_gid = tmp->gid_next; 6115 else 6116 prev->gid_next = tmp->gid_next; 6117 6118 kmem_free(tmp, sizeof (ibdm_gid_t)); 6119 gid_info->gl_ngids--; 6120 break; 6121 } else { 6122 prev = tmp; 6123 tmp = tmp->gid_next; 6124 } 6125 } 6126 } 6127 6128 static void 6129 ibdm_addto_gidlist(ibdm_gid_t **src_ptr, ibdm_gid_t *dest) 6130 { 6131 ibdm_gid_t *head = NULL, *new, *tail; 6132 6133 /* First copy the destination */ 6134 for (; dest; dest = dest->gid_next) { 6135 new = kmem_zalloc(sizeof (ibdm_gid_t), KM_SLEEP); 6136 new->gid_dgid_hi = dest->gid_dgid_hi; 6137 new->gid_dgid_lo = dest->gid_dgid_lo; 6138 new->gid_next = head; 6139 head = new; 6140 } 6141 6142 /* Insert this to the source */ 6143 if (*src_ptr == NULL) 6144 *src_ptr = head; 6145 else { 6146 for (tail = *src_ptr; tail->gid_next != NULL; 6147 tail = tail->gid_next) 6148 ; 6149 6150 tail->gid_next = head; 6151 } 6152 } 6153 6154 static void 6155 ibdm_free_gid_list(ibdm_gid_t *head) 6156 { 6157 ibdm_gid_t *delete; 6158 6159 for (delete = head; delete; ) { 6160 head = delete->gid_next; 6161 kmem_free(delete, sizeof (ibdm_gid_t)); 6162 delete = head; 6163 } 6164 } 6165 6166 /* 6167 * This function rescans the DM capable GIDs (gl_state is 6168 * GID_PROBE_COMPLETE or IBDM_GID_PROBING_SKIPPED.This 6169 * basically checks if the DM capable GID is reachable. If 6170 * not this is handled the same way as GID_UNAVAILABLE, 6171 * except that notifications are not send to IBnexus. 6172 * 6173 * This function also initializes the ioc_prev_list for 6174 * a particular IOC (when called from probe_ioc, with 6175 * ioc_guidp != NULL) or all IOCs for the gid (called from 6176 * sweep_fabric, ioc_guidp == NULL). 6177 */ 6178 static void 6179 ibdm_rescan_gidlist(ib_guid_t *ioc_guidp) 6180 { 6181 ibdm_dp_gidinfo_t *gid_info, *tmp; 6182 int ii, niocs, found; 6183 ibdm_hca_list_t *hca_list = NULL; 6184 ibdm_port_attr_t *port = NULL; 6185 ibdm_ioc_info_t *ioc_list; 6186 6187 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; ) { 6188 found = 0; 6189 if (gid_info->gl_state != IBDM_GID_PROBING_SKIPPED && 6190 gid_info->gl_state != IBDM_GID_PROBING_COMPLETE) { 6191 gid_info = gid_info->gl_next; 6192 continue; 6193 } 6194 6195 /* 6196 * Check if the GID is visible to any HCA ports. 6197 * Return if so. 6198 */ 6199 mutex_enter(&ibdm.ibdm_hl_mutex); 6200 for (ibdm_get_next_port(&hca_list, &port, 1); port; 6201 ibdm_get_next_port(&hca_list, &port, 1)) { 6202 if (ibdm_port_reachable(port->pa_sa_hdl, 6203 gid_info->gl_dgid_lo) == B_TRUE) { 6204 found = 1; 6205 break; 6206 } 6207 } 6208 mutex_exit(&ibdm.ibdm_hl_mutex); 6209 6210 if (found) { 6211 if (gid_info->gl_iou == NULL) { 6212 gid_info = gid_info->gl_next; 6213 continue; 6214 } 6215 6216 /* Intialize the ioc_prev_gid_list */ 6217 niocs = 6218 gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 6219 for (ii = 0; ii < niocs; ii++) { 6220 ioc_list = IBDM_GIDINFO2IOCINFO(gid_info, ii); 6221 6222 if (ioc_guidp == NULL || (*ioc_guidp == 6223 ioc_list->ioc_profile.ioc_guid)) { 6224 /* Add info of GIDs in gid_info also */ 6225 ibdm_addto_gidlist( 6226 &ioc_list->ioc_prev_gid_list, 6227 gid_info->gl_gid); 6228 ioc_list->ioc_prev_nportgids = 6229 gid_info->gl_ngids; 6230 } 6231 } 6232 gid_info = gid_info->gl_next; 6233 continue; 6234 } 6235 6236 IBTF_DPRINTF_L4("ibdm", "\trescan_gidlist " 6237 "deleted port GUID %llx", 6238 gid_info->gl_dgid_lo); 6239 6240 /* 6241 * Update GID list in all IOCs affected by this 6242 */ 6243 ioc_list = ibdm_update_ioc_gidlist(gid_info, 0); 6244 6245 /* 6246 * Remove GID from the global GID list 6247 * Handle the case where all port GIDs for an 6248 * IOU have been hot-removed. 6249 */ 6250 mutex_enter(&ibdm.ibdm_mutex); 6251 if (gid_info->gl_iou != NULL && gid_info->gl_ngids == 0) { 6252 mutex_enter(&gid_info->gl_mutex); 6253 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou); 6254 mutex_exit(&gid_info->gl_mutex); 6255 } 6256 6257 tmp = gid_info->gl_next; 6258 if (gid_info->gl_prev != NULL) 6259 gid_info->gl_prev->gl_next = gid_info->gl_next; 6260 if (gid_info->gl_next != NULL) 6261 gid_info->gl_next->gl_prev = gid_info->gl_prev; 6262 6263 if (gid_info == ibdm.ibdm_dp_gidlist_head) 6264 ibdm.ibdm_dp_gidlist_head = gid_info->gl_next; 6265 if (gid_info == ibdm.ibdm_dp_gidlist_tail) 6266 ibdm.ibdm_dp_gidlist_tail = gid_info->gl_prev; 6267 ibdm.ibdm_ngids--; 6268 mutex_exit(&ibdm.ibdm_mutex); 6269 6270 /* free the hca_list on this gid_info */ 6271 ibdm_delete_glhca_list(gid_info); 6272 6273 mutex_destroy(&gid_info->gl_mutex); 6274 kmem_free(gid_info, sizeof (ibdm_dp_gidinfo_t)); 6275 6276 gid_info = tmp; 6277 6278 /* 6279 * Pass on the IOCs with updated GIDs to IBnexus 6280 */ 6281 if (ioc_list) { 6282 IBTF_DPRINTF_L4("ibdm", "\trescan_gidlist " 6283 "IOC_PROP_UPDATE for %p\n", ioc_list); 6284 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6285 if (ibdm.ibdm_ibnex_callback != NULL) { 6286 (*ibdm.ibdm_ibnex_callback)((void *) 6287 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 6288 } 6289 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6290 } 6291 } 6292 } 6293 6294 /* 6295 * This function notifies IBnex of IOCs on this GID. 6296 * Notification is for GIDs with gl_reprobe_flag set. 6297 * The flag is set when IOC probe / fabric sweep 6298 * probes a GID starting from CLASS port info. 6299 * 6300 * IBnexus will have information of a reconnected IOC 6301 * if it had probed it before. If this is a new IOC, 6302 * IBnexus ignores the notification. 6303 * 6304 * This function should be called with no locks held. 6305 */ 6306 static void 6307 ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *gid_info) 6308 { 6309 ibdm_ioc_info_t *ioc_list; 6310 6311 if (gid_info->gl_reprobe_flag == 0 || 6312 gid_info->gl_iou == NULL) 6313 return; 6314 6315 ioc_list = ibdm_update_ioc_gidlist(gid_info, -1); 6316 6317 /* 6318 * Pass on the IOCs with updated GIDs to IBnexus 6319 */ 6320 if (ioc_list) { 6321 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6322 if (ibdm.ibdm_ibnex_callback != NULL) { 6323 (*ibdm.ibdm_ibnex_callback)((void *)ioc_list, 6324 IBDM_EVENT_IOC_PROP_UPDATE); 6325 } 6326 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6327 } 6328 } 6329 6330 6331 static void 6332 ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *arg) 6333 { 6334 if (arg != NULL) 6335 kmem_free(arg, sizeof (ibdm_saa_event_arg_t)); 6336 } 6337 6338 /* 6339 * This function parses the list of HCAs and HCA ports 6340 * to return the port_attr of the next HCA port. A port 6341 * connected to IB fabric (port_state active) is returned, 6342 * if connected_flag is set. 6343 */ 6344 static void 6345 ibdm_get_next_port(ibdm_hca_list_t **inp_hcap, 6346 ibdm_port_attr_t **inp_portp, int connect_flag) 6347 { 6348 int ii; 6349 ibdm_port_attr_t *port, *next_port = NULL; 6350 ibdm_port_attr_t *inp_port; 6351 ibdm_hca_list_t *hca_list; 6352 int found = 0; 6353 6354 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 6355 IBTF_DPRINTF_L4(ibdm_string, "\tget_next_port(%p, %p, %x)", 6356 inp_hcap, inp_portp, connect_flag); 6357 6358 hca_list = *inp_hcap; 6359 inp_port = *inp_portp; 6360 6361 if (hca_list == NULL) 6362 hca_list = ibdm.ibdm_hca_list_head; 6363 6364 for (; hca_list; hca_list = hca_list->hl_next) { 6365 for (ii = 0; ii < hca_list->hl_nports; ii++) { 6366 port = &hca_list->hl_port_attr[ii]; 6367 6368 /* 6369 * inp_port != NULL; 6370 * Skip till we find the matching port 6371 */ 6372 if (inp_port && !found) { 6373 if (inp_port == port) 6374 found = 1; 6375 continue; 6376 } 6377 6378 if (!connect_flag) { 6379 next_port = port; 6380 break; 6381 } 6382 6383 if (port->pa_sa_hdl == NULL) 6384 ibdm_initialize_port(port); 6385 if (port->pa_sa_hdl == NULL) 6386 (void) ibdm_fini_port(port); 6387 else if (next_port == NULL && 6388 port->pa_sa_hdl != NULL && 6389 port->pa_state == IBT_PORT_ACTIVE) { 6390 next_port = port; 6391 break; 6392 } 6393 } 6394 6395 if (next_port) 6396 break; 6397 } 6398 6399 IBTF_DPRINTF_L4(ibdm_string, "\tget_next_port : " 6400 "returns hca_list %p port %p", hca_list, next_port); 6401 *inp_hcap = hca_list; 6402 *inp_portp = next_port; 6403 } 6404 6405 static void 6406 ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *nodegid, ibdm_dp_gidinfo_t *addgid) 6407 { 6408 ibdm_gid_t *tmp; 6409 6410 tmp = kmem_zalloc(sizeof (ibdm_gid_t), KM_SLEEP); 6411 tmp->gid_dgid_hi = addgid->gl_dgid_hi; 6412 tmp->gid_dgid_lo = addgid->gl_dgid_lo; 6413 6414 mutex_enter(&nodegid->gl_mutex); 6415 tmp->gid_next = nodegid->gl_gid; 6416 nodegid->gl_gid = tmp; 6417 nodegid->gl_ngids++; 6418 mutex_exit(&nodegid->gl_mutex); 6419 } 6420 6421 static void 6422 ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *gid_info, 6423 ibdm_hca_list_t *hca) 6424 { 6425 ibdm_hca_list_t *head, *prev = NULL, *temp; 6426 6427 IBTF_DPRINTF_L4(ibdm_string, "\taddto_glhcalist(%p, %p) " 6428 ": gl_hca_list %p", gid_info, hca, gid_info->gl_hca_list); 6429 ASSERT(!MUTEX_HELD(&gid_info->gl_mutex)); 6430 6431 mutex_enter(&gid_info->gl_mutex); 6432 head = gid_info->gl_hca_list; 6433 if (head == NULL) { 6434 head = ibdm_dup_hca_attr(hca); 6435 head->hl_next = NULL; 6436 gid_info->gl_hca_list = head; 6437 mutex_exit(&gid_info->gl_mutex); 6438 IBTF_DPRINTF_L4(ibdm_string, "\tadd_to_glhcalist: " 6439 "gid %p, gl_hca_list %p", gid_info, 6440 gid_info->gl_hca_list); 6441 return; 6442 } 6443 6444 /* Check if already in the list */ 6445 while (head) { 6446 if (head->hl_hca_guid == hca->hl_hca_guid) { 6447 mutex_exit(&gid_info->gl_mutex); 6448 IBTF_DPRINTF_L4(ibdm_string, 6449 "\taddto_glhcalist : gid %p hca %p dup", 6450 gid_info, hca); 6451 return; 6452 } 6453 prev = head; 6454 head = head->hl_next; 6455 } 6456 6457 /* Add this HCA to gl_hca_list */ 6458 temp = ibdm_dup_hca_attr(hca); 6459 temp->hl_next = NULL; 6460 prev->hl_next = temp; 6461 mutex_exit(&gid_info->gl_mutex); 6462 6463 IBTF_DPRINTF_L4(ibdm_string, "\tadd_to_glhcalist: " 6464 "gid %p, gl_hca_list %p", gid_info, gid_info->gl_hca_list); 6465 } 6466 6467 static void 6468 ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *gid_info) 6469 { 6470 ASSERT(!MUTEX_HELD(&gid_info->gl_mutex)); 6471 ASSERT(!MUTEX_HELD(&ibdm.ibdm_mutex)); 6472 6473 mutex_enter(&gid_info->gl_mutex); 6474 if (gid_info->gl_hca_list) 6475 ibdm_ibnex_free_hca_list(gid_info->gl_hca_list); 6476 gid_info->gl_hca_list = NULL; 6477 mutex_exit(&gid_info->gl_mutex); 6478 } 6479 6480 6481 static void 6482 ibdm_reset_all_dgids(ibmf_saa_handle_t port_sa_hdl) 6483 { 6484 IBTF_DPRINTF_L4(ibdm_string, "\treset_all_dgids(%X)", 6485 port_sa_hdl); 6486 6487 ASSERT(!MUTEX_HELD(&ibdm.ibdm_mutex)); 6488 ASSERT(!MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 6489 6490 /* Check : Not busy in another probe / sweep */ 6491 mutex_enter(&ibdm.ibdm_mutex); 6492 if ((ibdm.ibdm_busy & IBDM_BUSY) == 0) { 6493 ibdm_dp_gidinfo_t *gid_info; 6494 6495 ibdm.ibdm_busy |= IBDM_BUSY; 6496 mutex_exit(&ibdm.ibdm_mutex); 6497 6498 /* 6499 * Check if any GID is using the SA & IBMF handle 6500 * of HCA port going down. Reset ibdm_dp_gidinfo_t 6501 * using another HCA port which can reach the GID. 6502 * This is for DM capable GIDs only, no need to do 6503 * this for others 6504 * 6505 * Delete the GID if no alternate HCA port to reach 6506 * it is found. 6507 */ 6508 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; ) { 6509 ibdm_dp_gidinfo_t *tmp; 6510 6511 IBTF_DPRINTF_L4(ibdm_string, "\tevent_hdlr " 6512 "checking gidinfo %p", gid_info); 6513 6514 if (gid_info->gl_sa_hdl == port_sa_hdl) { 6515 IBTF_DPRINTF_L3(ibdm_string, 6516 "\tevent_hdlr: down HCA port hdl " 6517 "matches gid %p", gid_info); 6518 6519 /* 6520 * The non-DM GIDs can come back 6521 * with a new subnet prefix, when 6522 * the HCA port commes up again. To 6523 * avoid issues, delete non-DM 6524 * capable GIDs, if the gid was 6525 * discovered using the HCA port 6526 * going down. This is ensured by 6527 * setting gl_disconnected to 1. 6528 */ 6529 if (gid_info->gl_nodeguid == 0) 6530 gid_info->gl_disconnected = 1; 6531 else 6532 ibdm_reset_gidinfo(gid_info); 6533 6534 if (gid_info->gl_disconnected) { 6535 IBTF_DPRINTF_L3(ibdm_string, 6536 "\tevent_hdlr: deleting" 6537 " gid %p", gid_info); 6538 tmp = gid_info; 6539 gid_info = gid_info->gl_next; 6540 ibdm_delete_gidinfo(tmp); 6541 } else 6542 gid_info = gid_info->gl_next; 6543 } else 6544 gid_info = gid_info->gl_next; 6545 } 6546 6547 mutex_enter(&ibdm.ibdm_mutex); 6548 ibdm.ibdm_busy &= ~IBDM_BUSY; 6549 cv_signal(&ibdm.ibdm_busy_cv); 6550 } 6551 mutex_exit(&ibdm.ibdm_mutex); 6552 } 6553 6554 static void 6555 ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *gidinfo) 6556 { 6557 ibdm_hca_list_t *hca_list = NULL; 6558 ibdm_port_attr_t *port = NULL; 6559 int gid_reinited = 0; 6560 sa_node_record_t *nr, *tmp; 6561 sa_portinfo_record_t *pi; 6562 size_t nr_len = 0, pi_len = 0; 6563 size_t path_len; 6564 ib_gid_t sgid, dgid; 6565 int ret, ii, nrecords; 6566 sa_path_record_t *path; 6567 uint8_t npaths = 1; 6568 ibdm_pkey_tbl_t *pkey_tbl; 6569 6570 IBTF_DPRINTF_L4(ibdm_string, "\treset_gidinfo(%p)", gidinfo); 6571 6572 /* 6573 * Get list of all the ports reachable from the local known HCA 6574 * ports which are active 6575 */ 6576 mutex_enter(&ibdm.ibdm_hl_mutex); 6577 for (ibdm_get_next_port(&hca_list, &port, 1); port; 6578 ibdm_get_next_port(&hca_list, &port, 1)) { 6579 6580 6581 /* 6582 * Get the path and re-populate the gidinfo. 6583 * Getting the path is the same probe_ioc 6584 * Init the gid info as in ibdm_create_gidinfo() 6585 */ 6586 nr = ibdm_get_node_records(port->pa_sa_hdl, &nr_len, 6587 gidinfo->gl_nodeguid); 6588 if (nr == NULL) { 6589 IBTF_DPRINTF_L4(ibdm_string, 6590 "\treset_gidinfo : no records"); 6591 continue; 6592 } 6593 6594 nrecords = (nr_len / sizeof (sa_node_record_t)); 6595 for (tmp = nr, ii = 0; (ii < nrecords); ii++, tmp++) { 6596 if (tmp->NodeInfo.PortGUID == gidinfo->gl_portguid) 6597 break; 6598 } 6599 6600 if (ii == nrecords) { 6601 IBTF_DPRINTF_L4(ibdm_string, 6602 "\treset_gidinfo : no record for portguid"); 6603 kmem_free(nr, nr_len); 6604 continue; 6605 } 6606 6607 pi = ibdm_get_portinfo(port->pa_sa_hdl, &pi_len, tmp->LID); 6608 if (pi == NULL) { 6609 IBTF_DPRINTF_L4(ibdm_string, 6610 "\treset_gidinfo : no portinfo"); 6611 kmem_free(nr, nr_len); 6612 continue; 6613 } 6614 6615 sgid.gid_prefix = port->pa_sn_prefix; 6616 sgid.gid_guid = port->pa_port_guid; 6617 dgid.gid_prefix = pi->PortInfo.GidPrefix; 6618 dgid.gid_guid = tmp->NodeInfo.PortGUID; 6619 6620 ret = ibmf_saa_gid_to_pathrecords(port->pa_sa_hdl, sgid, dgid, 6621 IBMF_SAA_PKEY_WC, 0, B_TRUE, &npaths, 0, &path_len, &path); 6622 6623 if ((ret != IBMF_SUCCESS) || path == NULL) { 6624 IBTF_DPRINTF_L4(ibdm_string, 6625 "\treset_gidinfo : no paths"); 6626 kmem_free(pi, pi_len); 6627 kmem_free(nr, nr_len); 6628 continue; 6629 } 6630 6631 gidinfo->gl_dgid_hi = path->DGID.gid_prefix; 6632 gidinfo->gl_dgid_lo = path->DGID.gid_guid; 6633 gidinfo->gl_sgid_hi = path->SGID.gid_prefix; 6634 gidinfo->gl_sgid_lo = path->SGID.gid_guid; 6635 gidinfo->gl_p_key = path->P_Key; 6636 gidinfo->gl_sa_hdl = port->pa_sa_hdl; 6637 gidinfo->gl_ibmf_hdl = port->pa_ibmf_hdl; 6638 gidinfo->gl_slid = path->SLID; 6639 gidinfo->gl_dlid = path->DLID; 6640 /* Reset redirect info, next MAD will set if redirected */ 6641 gidinfo->gl_redirected = 0; 6642 gidinfo->gl_devid = (*tmp).NodeInfo.DeviceID; 6643 gidinfo->gl_SL = path->SL; 6644 6645 gidinfo->gl_qp_hdl = IBMF_QP_HANDLE_DEFAULT; 6646 for (ii = 0; ii < port->pa_npkeys; ii++) { 6647 if (port->pa_pkey_tbl == NULL) 6648 break; 6649 6650 pkey_tbl = &port->pa_pkey_tbl[ii]; 6651 if ((gidinfo->gl_p_key == pkey_tbl->pt_pkey) && 6652 (pkey_tbl->pt_qp_hdl != NULL)) { 6653 gidinfo->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 6654 break; 6655 } 6656 } 6657 6658 if (gidinfo->gl_qp_hdl == NULL) 6659 IBTF_DPRINTF_L2(ibdm_string, 6660 "\treset_gid_info: No matching Pkey"); 6661 else 6662 gid_reinited = 1; 6663 6664 kmem_free(path, path_len); 6665 kmem_free(pi, pi_len); 6666 kmem_free(nr, nr_len); 6667 break; 6668 } 6669 mutex_exit(&ibdm.ibdm_hl_mutex); 6670 6671 if (!gid_reinited) 6672 gidinfo->gl_disconnected = 1; 6673 } 6674 6675 static void 6676 ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *gidinfo) 6677 { 6678 ibdm_ioc_info_t *ioc_list; 6679 int in_gidlist = 0; 6680 6681 /* 6682 * Check if gidinfo has been inserted into the 6683 * ibdm_dp_gidlist_head list. gl_next or gl_prev 6684 * != NULL, if gidinfo is the list. 6685 */ 6686 if (gidinfo->gl_prev != NULL || 6687 gidinfo->gl_next != NULL || 6688 ibdm.ibdm_dp_gidlist_head == gidinfo) 6689 in_gidlist = 1; 6690 6691 ioc_list = ibdm_update_ioc_gidlist(gidinfo, 0); 6692 6693 /* 6694 * Remove GID from the global GID list 6695 * Handle the case where all port GIDs for an 6696 * IOU have been hot-removed. 6697 */ 6698 mutex_enter(&ibdm.ibdm_mutex); 6699 if (gidinfo->gl_iou != NULL && gidinfo->gl_ngids == 0) { 6700 mutex_enter(&gidinfo->gl_mutex); 6701 (void) ibdm_free_iou_info(gidinfo, &gidinfo->gl_iou); 6702 mutex_exit(&gidinfo->gl_mutex); 6703 } 6704 6705 /* Delete gl_hca_list */ 6706 mutex_exit(&ibdm.ibdm_mutex); 6707 ibdm_delete_glhca_list(gidinfo); 6708 mutex_enter(&ibdm.ibdm_mutex); 6709 6710 if (in_gidlist) { 6711 if (gidinfo->gl_prev != NULL) 6712 gidinfo->gl_prev->gl_next = gidinfo->gl_next; 6713 if (gidinfo->gl_next != NULL) 6714 gidinfo->gl_next->gl_prev = gidinfo->gl_prev; 6715 6716 if (gidinfo == ibdm.ibdm_dp_gidlist_head) 6717 ibdm.ibdm_dp_gidlist_head = gidinfo->gl_next; 6718 if (gidinfo == ibdm.ibdm_dp_gidlist_tail) 6719 ibdm.ibdm_dp_gidlist_tail = gidinfo->gl_prev; 6720 ibdm.ibdm_ngids--; 6721 } 6722 mutex_exit(&ibdm.ibdm_mutex); 6723 6724 mutex_destroy(&gidinfo->gl_mutex); 6725 cv_destroy(&gidinfo->gl_probe_cv); 6726 kmem_free(gidinfo, sizeof (ibdm_dp_gidinfo_t)); 6727 6728 /* 6729 * Pass on the IOCs with updated GIDs to IBnexus 6730 */ 6731 if (ioc_list) { 6732 IBTF_DPRINTF_L4("ibdm", "\tdelete_gidinfo " 6733 "IOC_PROP_UPDATE for %p\n", ioc_list); 6734 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6735 if (ibdm.ibdm_ibnex_callback != NULL) { 6736 (*ibdm.ibdm_ibnex_callback)((void *) 6737 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 6738 } 6739 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6740 } 6741 } 6742 6743 6744 static void 6745 ibdm_fill_srv_attr_mod(ib_mad_hdr_t *hdr, ibdm_timeout_cb_args_t *cb_args) 6746 { 6747 uint32_t attr_mod; 6748 6749 attr_mod = (cb_args->cb_ioc_num + 1) << 16; 6750 attr_mod |= cb_args->cb_srvents_start; 6751 attr_mod |= (cb_args->cb_srvents_end) << 8; 6752 hdr->AttributeModifier = h2b32(attr_mod); 6753 } 6754 6755 static void 6756 ibdm_bump_transactionID(ibdm_dp_gidinfo_t *gid_info) 6757 { 6758 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 6759 gid_info->gl_transactionID++; 6760 if (gid_info->gl_transactionID == gid_info->gl_max_transactionID) { 6761 IBTF_DPRINTF_L4(ibdm_string, 6762 "\tbump_transactionID(%p), wrapup", gid_info); 6763 gid_info->gl_transactionID = gid_info->gl_min_transactionID; 6764 } 6765 } 6766 6767 /* 6768 * gl_prev_iou is set for *non-reprobe* sweeep requests, which 6769 * detected that ChangeID in IOU info has changed. The service 6770 * entry also may have changed. Check if service entry in IOC 6771 * has changed wrt the prev iou, if so notify to IB Nexus. 6772 */ 6773 static ibdm_ioc_info_t * 6774 ibdm_handle_prev_iou() 6775 { 6776 ibdm_dp_gidinfo_t *gid_info; 6777 ibdm_ioc_info_t *ioc_list_head = NULL, *ioc_list; 6778 ibdm_ioc_info_t *prev_ioc, *ioc; 6779 int ii, jj, niocs, prev_niocs; 6780 6781 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 6782 6783 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iou enter"); 6784 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 6785 gid_info = gid_info->gl_next) { 6786 if (gid_info->gl_prev_iou == NULL) 6787 continue; 6788 6789 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iou gid %p", 6790 gid_info); 6791 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 6792 prev_niocs = 6793 gid_info->gl_prev_iou->iou_info.iou_num_ctrl_slots; 6794 for (ii = 0; ii < niocs; ii++) { 6795 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 6796 6797 /* Find matching IOC */ 6798 for (jj = 0; jj < prev_niocs; jj++) { 6799 prev_ioc = (ibdm_ioc_info_t *) 6800 &gid_info->gl_prev_iou->iou_ioc_info[jj]; 6801 if (prev_ioc->ioc_profile.ioc_guid == 6802 ioc->ioc_profile.ioc_guid) 6803 break; 6804 } 6805 if (jj == prev_niocs) 6806 prev_ioc = NULL; 6807 if (ioc == NULL || prev_ioc == NULL) 6808 continue; 6809 if ((ioc->ioc_profile.ioc_service_entries != 6810 prev_ioc->ioc_profile.ioc_service_entries) || 6811 ibdm_serv_cmp(&ioc->ioc_serv[0], 6812 &prev_ioc->ioc_serv[0], 6813 ioc->ioc_profile.ioc_service_entries) != 0) { 6814 IBTF_DPRINTF_L4(ibdm_string, 6815 "/thandle_prev_iou modified IOC: " 6816 "current ioc %p, old ioc %p", 6817 ioc, prev_ioc); 6818 mutex_enter(&gid_info->gl_mutex); 6819 ioc_list = ibdm_dup_ioc_info(ioc, gid_info); 6820 mutex_exit(&gid_info->gl_mutex); 6821 ioc_list->ioc_info_updated.ib_prop_updated 6822 = 0; 6823 ioc_list->ioc_info_updated.ib_srv_prop_updated 6824 = 1; 6825 6826 if (ioc_list_head == NULL) 6827 ioc_list_head = ioc_list; 6828 else { 6829 ioc_list_head->ioc_next = ioc_list; 6830 ioc_list_head = ioc_list; 6831 } 6832 } 6833 } 6834 6835 mutex_enter(&gid_info->gl_mutex); 6836 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_prev_iou); 6837 mutex_exit(&gid_info->gl_mutex); 6838 } 6839 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iouret %p", 6840 ioc_list_head); 6841 return (ioc_list_head); 6842 } 6843 6844 /* 6845 * Compares two service entries lists, returns 0 if same, returns 1 6846 * if no match. 6847 */ 6848 static int 6849 ibdm_serv_cmp(ibdm_srvents_info_t *serv1, ibdm_srvents_info_t *serv2, 6850 int nserv) 6851 { 6852 int ii; 6853 6854 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: enter"); 6855 for (ii = 0; ii < nserv; ii++, serv1++, serv2++) { 6856 if (serv1->se_attr.srv_id != serv2->se_attr.srv_id || 6857 bcmp(serv1->se_attr.srv_name, 6858 serv2->se_attr.srv_name, 6859 IB_DM_MAX_SVC_NAME_LEN) != 0) { 6860 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: ret 1"); 6861 return (1); 6862 } 6863 } 6864 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: ret 0"); 6865 return (0); 6866 } 6867 6868 /* For debugging purpose only */ 6869 #ifdef DEBUG 6870 void 6871 ibdm_dump_mad_hdr(ib_mad_hdr_t *mad_hdr) 6872 { 6873 IBTF_DPRINTF_L4("ibdm", "\t\t MAD Header info"); 6874 IBTF_DPRINTF_L4("ibdm", "\t\t ---------------"); 6875 6876 IBTF_DPRINTF_L4("ibdm", "\tBase version : 0x%x" 6877 "\tMgmt Class : 0x%x", mad_hdr->BaseVersion, mad_hdr->MgmtClass); 6878 IBTF_DPRINTF_L4("ibdm", "\tClass version : 0x%x" 6879 "\tR Method : 0x%x", 6880 mad_hdr->ClassVersion, mad_hdr->R_Method); 6881 IBTF_DPRINTF_L4("ibdm", "\tMAD Status : 0x%x" 6882 "\tTransaction ID : 0x%llx", 6883 b2h16(mad_hdr->Status), b2h64(mad_hdr->TransactionID)); 6884 IBTF_DPRINTF_L4("ibdm", "\t Attribute ID : 0x%x" 6885 "\tAttribute Modified : 0x%lx", 6886 b2h16(mad_hdr->AttributeID), b2h32(mad_hdr->AttributeModifier)); 6887 } 6888 6889 6890 void 6891 ibdm_dump_ibmf_msg(ibmf_msg_t *ibmf_msg, int flag) 6892 { 6893 ib_mad_hdr_t *mad_hdr; 6894 6895 IBTF_DPRINTF_L4("ibdm", "\t\t(IBMF_PKT): Local address info"); 6896 IBTF_DPRINTF_L4("ibdm", "\t\t ------------------"); 6897 6898 IBTF_DPRINTF_L4("ibdm", "\tLocal Lid : 0x%x\tRemote Lid : 0x%x" 6899 " Remote Qp : 0x%x", ibmf_msg->im_local_addr.ia_local_lid, 6900 ibmf_msg->im_local_addr.ia_remote_lid, 6901 ibmf_msg->im_local_addr.ia_remote_qno); 6902 IBTF_DPRINTF_L4("ibdm", "\tP_key : 0x%x\tQ_key : 0x%x" 6903 " SL : 0x%x", ibmf_msg->im_local_addr.ia_p_key, 6904 ibmf_msg->im_local_addr.ia_q_key, 6905 ibmf_msg->im_local_addr.ia_service_level); 6906 6907 if (flag) 6908 mad_hdr = (ib_mad_hdr_t *)IBDM_OUT_IBMFMSG_MADHDR(ibmf_msg); 6909 else 6910 mad_hdr = IBDM_IN_IBMFMSG_MADHDR(ibmf_msg); 6911 6912 ibdm_dump_mad_hdr(mad_hdr); 6913 } 6914 6915 6916 void 6917 ibdm_dump_path_info(sa_path_record_t *path) 6918 { 6919 IBTF_DPRINTF_L4("ibdm", "\t\t Path information"); 6920 IBTF_DPRINTF_L4("ibdm", "\t\t ----------------"); 6921 6922 IBTF_DPRINTF_L4("ibdm", "\t DGID hi : %llx\tDGID lo : %llx", 6923 path->DGID.gid_prefix, path->DGID.gid_guid); 6924 IBTF_DPRINTF_L4("ibdm", "\t SGID hi : %llx\tSGID lo : %llx", 6925 path->SGID.gid_prefix, path->SGID.gid_guid); 6926 IBTF_DPRINTF_L4("ibdm", "\t SLID : %x\t\tDlID : %x", 6927 path->SLID, path->DLID); 6928 IBTF_DPRINTF_L4("ibdm", "\t P Key : %x\t\tSL : %x", 6929 path->P_Key, path->SL); 6930 } 6931 6932 6933 void 6934 ibdm_dump_classportinfo(ib_mad_classportinfo_t *classportinfo) 6935 { 6936 IBTF_DPRINTF_L4("ibdm", "\t\t CLASSPORT INFO"); 6937 IBTF_DPRINTF_L4("ibdm", "\t\t --------------"); 6938 6939 IBTF_DPRINTF_L4("ibdm", "\t Response Time Value : 0x%x", 6940 ((b2h32(classportinfo->RespTimeValue)) & 0x1F)); 6941 6942 IBTF_DPRINTF_L4("ibdm", "\t Redirected GID hi : 0x%llx", 6943 b2h64(classportinfo->RedirectGID_hi)); 6944 IBTF_DPRINTF_L4("ibdm", "\t Redirected GID lo : 0x%llx", 6945 b2h64(classportinfo->RedirectGID_lo)); 6946 IBTF_DPRINTF_L4("ibdm", "\t Redirected TC : 0x%x", 6947 classportinfo->RedirectTC); 6948 IBTF_DPRINTF_L4("ibdm", "\t Redirected SL : 0x%x", 6949 classportinfo->RedirectSL); 6950 IBTF_DPRINTF_L4("ibdm", "\t Redirected FL : 0x%x", 6951 classportinfo->RedirectFL); 6952 IBTF_DPRINTF_L4("ibdm", "\t Redirected LID : 0x%x", 6953 b2h16(classportinfo->RedirectLID)); 6954 IBTF_DPRINTF_L4("ibdm", "\t Redirected P KEY : 0x%x", 6955 b2h16(classportinfo->RedirectP_Key)); 6956 IBTF_DPRINTF_L4("ibdm", "\t Redirected QP : 0x%x", 6957 classportinfo->RedirectQP); 6958 IBTF_DPRINTF_L4("ibdm", "\t Redirected Q KEY : 0x%x", 6959 b2h32(classportinfo->RedirectQ_Key)); 6960 IBTF_DPRINTF_L4("ibdm", "\t Trap GID hi : 0x%llx", 6961 b2h64(classportinfo->TrapGID_hi)); 6962 IBTF_DPRINTF_L4("ibdm", "\t Trap GID lo : 0x%llx", 6963 b2h64(classportinfo->TrapGID_lo)); 6964 IBTF_DPRINTF_L4("ibdm", "\t Trap TC : 0x%x", 6965 classportinfo->TrapTC); 6966 IBTF_DPRINTF_L4("ibdm", "\t Trap SL : 0x%x", 6967 classportinfo->TrapSL); 6968 IBTF_DPRINTF_L4("ibdm", "\t Trap FL : 0x%x", 6969 classportinfo->TrapFL); 6970 IBTF_DPRINTF_L4("ibdm", "\t Trap LID : 0x%x", 6971 b2h16(classportinfo->TrapLID)); 6972 IBTF_DPRINTF_L4("ibdm", "\t Trap P_Key : 0x%x", 6973 b2h16(classportinfo->TrapP_Key)); 6974 IBTF_DPRINTF_L4("ibdm", "\t Trap HL : 0x%x", 6975 classportinfo->TrapHL); 6976 IBTF_DPRINTF_L4("ibdm", "\t Trap QP : 0x%x", 6977 classportinfo->TrapQP); 6978 IBTF_DPRINTF_L4("ibdm", "\t Trap Q_Key : 0x%x", 6979 b2h32(classportinfo->TrapQ_Key)); 6980 } 6981 6982 6983 void 6984 ibdm_dump_iounitinfo(ib_dm_io_unitinfo_t *iou_info) 6985 { 6986 IBTF_DPRINTF_L4("ibdm", "\t\t I/O UnitInfo"); 6987 IBTF_DPRINTF_L4("ibdm", "\t\t ------------"); 6988 6989 IBTF_DPRINTF_L4("ibdm", "\tChange ID : 0x%x", 6990 b2h16(iou_info->iou_changeid)); 6991 IBTF_DPRINTF_L4("ibdm", "\t#of ctrl slots : %d", 6992 iou_info->iou_num_ctrl_slots); 6993 IBTF_DPRINTF_L4("ibdm", "\tIOU flag : 0x%x", 6994 iou_info->iou_flag); 6995 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 0 : 0x%x", 6996 iou_info->iou_ctrl_list[0]); 6997 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 1 : 0x%x", 6998 iou_info->iou_ctrl_list[1]); 6999 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 2 : 0x%x", 7000 iou_info->iou_ctrl_list[2]); 7001 } 7002 7003 7004 void 7005 ibdm_dump_ioc_profile(ib_dm_ioc_ctrl_profile_t *ioc) 7006 { 7007 IBTF_DPRINTF_L4("ibdm", "\t\t IOC Controller Profile"); 7008 IBTF_DPRINTF_L4("ibdm", "\t\t ----------------------"); 7009 7010 IBTF_DPRINTF_L4("ibdm", "\tIOC Guid : %llx", ioc->ioc_guid); 7011 IBTF_DPRINTF_L4("ibdm", "\tVendorID : 0x%x", ioc->ioc_vendorid); 7012 IBTF_DPRINTF_L4("ibdm", "\tDevice Id : 0x%x", ioc->ioc_deviceid); 7013 IBTF_DPRINTF_L4("ibdm", "\tDevice Ver : 0x%x", ioc->ioc_device_ver); 7014 IBTF_DPRINTF_L4("ibdm", "\tSubsys ID : 0x%x", ioc->ioc_subsys_id); 7015 IBTF_DPRINTF_L4("ibdm", "\tIO class : 0x%x", ioc->ioc_io_class); 7016 IBTF_DPRINTF_L4("ibdm", "\tIO subclass : 0x%x", ioc->ioc_io_subclass); 7017 IBTF_DPRINTF_L4("ibdm", "\tProtocol : 0x%x", ioc->ioc_protocol); 7018 IBTF_DPRINTF_L4("ibdm", "\tProtocolV : 0x%x", ioc->ioc_protocol_ver); 7019 IBTF_DPRINTF_L4("ibdm", "\tmsg qdepth : %d", ioc->ioc_send_msg_qdepth); 7020 IBTF_DPRINTF_L4("ibdm", "\trdma qdepth : %d", 7021 ioc->ioc_rdma_read_qdepth); 7022 IBTF_DPRINTF_L4("ibdm", "\tsndmsg sz : %d", ioc->ioc_send_msg_sz); 7023 IBTF_DPRINTF_L4("ibdm", "\trdma xfersz : %d", ioc->ioc_rdma_xfer_sz); 7024 IBTF_DPRINTF_L4("ibdm", "\topcal mask : 0x%x", 7025 ioc->ioc_ctrl_opcap_mask); 7026 IBTF_DPRINTF_L4("ibdm", "\tsrventries : %x", ioc->ioc_service_entries); 7027 } 7028 7029 7030 void 7031 ibdm_dump_service_entries(ib_dm_srv_t *srv_ents) 7032 { 7033 IBTF_DPRINTF_L4("ibdm", 7034 "\thandle_srventry_mad: service id : %llx", srv_ents->srv_id); 7035 7036 IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad: " 7037 "Service Name : %s", srv_ents->srv_name); 7038 } 7039 7040 int ibdm_allow_sweep_fabric_timestamp = 1; 7041 7042 void 7043 ibdm_dump_sweep_fabric_timestamp(int flag) 7044 { 7045 static hrtime_t x; 7046 if (flag) { 7047 if (ibdm_allow_sweep_fabric_timestamp) { 7048 IBTF_DPRINTF_L4("ibdm", "\tTime taken to complete " 7049 "sweep %lld ms", ((gethrtime() - x)/ 1000000)); 7050 } 7051 x = 0; 7052 } else 7053 x = gethrtime(); 7054 } 7055 #endif 7056