1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * ibdm.c 30 * 31 * This file contains the InifiniBand Device Manager (IBDM) support functions. 32 * IB nexus driver will only be the client for the IBDM module. 33 * 34 * IBDM registers with IBTF for HCA arrival/removal notification. 35 * IBDM registers with SA access to send DM MADs to discover the IOC's behind 36 * the IOU's. 37 * 38 * IB nexus driver registers with IBDM to find the information about the 39 * HCA's and IOC's (behind the IOU) present on the IB fabric. 40 */ 41 42 #include <sys/systm.h> 43 #include <sys/taskq.h> 44 #include <sys/ib/mgt/ibdm/ibdm_impl.h> 45 #include <sys/ib/mgt/ibmf/ibmf_impl.h> 46 #include <sys/modctl.h> 47 48 /* Function Prototype declarations */ 49 static int ibdm_free_iou_info(ibdm_dp_gidinfo_t *, ibdm_iou_info_t **); 50 static int ibdm_fini(void); 51 static int ibdm_init(void); 52 static int ibdm_get_reachable_ports(ibdm_port_attr_t *, 53 ibdm_hca_list_t *); 54 static ibdm_dp_gidinfo_t *ibdm_check_dgid(ib_guid_t, ib_sn_prefix_t); 55 static ibdm_dp_gidinfo_t *ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *); 56 static boolean_t ibdm_is_cisco(ib_guid_t); 57 static boolean_t ibdm_is_cisco_switch(ibdm_dp_gidinfo_t *); 58 static void ibdm_wait_cisco_probe_completion(ibdm_dp_gidinfo_t *); 59 static int ibdm_set_classportinfo(ibdm_dp_gidinfo_t *); 60 static int ibdm_send_classportinfo(ibdm_dp_gidinfo_t *); 61 static int ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *); 62 static int ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *); 63 static int ibdm_get_node_port_guids(ibmf_saa_handle_t, ib_lid_t, 64 ib_guid_t *, ib_guid_t *); 65 static int ibdm_retry_command(ibdm_timeout_cb_args_t *); 66 static int ibdm_get_diagcode(ibdm_dp_gidinfo_t *, int); 67 static int ibdm_verify_mad_status(ib_mad_hdr_t *); 68 static int ibdm_handle_redirection(ibmf_msg_t *, 69 ibdm_dp_gidinfo_t *, int *); 70 static void ibdm_wait_probe_completion(void); 71 static void ibdm_sweep_fabric(int); 72 static void ibdm_probe_gid_thread(void *); 73 static void ibdm_wakeup_probe_gid_cv(void); 74 static void ibdm_port_attr_ibmf_init(ibdm_port_attr_t *, ib_pkey_t, int); 75 static int ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *, int); 76 static void ibdm_update_port_attr(ibdm_port_attr_t *); 77 static void ibdm_handle_hca_attach(ib_guid_t); 78 static void ibdm_handle_srventry_mad(ibmf_msg_t *, 79 ibdm_dp_gidinfo_t *, int *); 80 static void ibdm_ibmf_recv_cb(ibmf_handle_t, ibmf_msg_t *, void *); 81 static void ibdm_recv_incoming_mad(void *); 82 static void ibdm_process_incoming_mad(ibmf_handle_t, ibmf_msg_t *, void *); 83 static void ibdm_ibmf_send_cb(ibmf_handle_t, ibmf_msg_t *, void *); 84 static void ibdm_pkt_timeout_hdlr(void *arg); 85 static void ibdm_initialize_port(ibdm_port_attr_t *); 86 static void ibdm_handle_diagcode(ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 87 static void ibdm_probe_gid(ibdm_dp_gidinfo_t *); 88 static void ibdm_alloc_send_buffers(ibmf_msg_t *); 89 static void ibdm_free_send_buffers(ibmf_msg_t *); 90 static void ibdm_handle_hca_detach(ib_guid_t); 91 static int ibdm_fini_port(ibdm_port_attr_t *); 92 static int ibdm_uninit_hca(ibdm_hca_list_t *); 93 static void ibdm_handle_setclassportinfo(ibmf_handle_t, ibmf_msg_t *, 94 ibdm_dp_gidinfo_t *, int *); 95 static void ibdm_handle_iounitinfo(ibmf_handle_t, 96 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 97 static void ibdm_handle_ioc_profile(ibmf_handle_t, 98 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 99 static void ibdm_event_hdlr(void *, ibt_hca_hdl_t, 100 ibt_async_code_t, ibt_async_event_t *); 101 static void ibdm_handle_classportinfo(ibmf_handle_t, 102 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 103 static void ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *, 104 ibdm_dp_gidinfo_t *); 105 106 static ibdm_hca_list_t *ibdm_dup_hca_attr(ibdm_hca_list_t *); 107 static ibdm_ioc_info_t *ibdm_dup_ioc_info(ibdm_ioc_info_t *, 108 ibdm_dp_gidinfo_t *gid_list); 109 static void ibdm_probe_ioc(ib_guid_t, ib_guid_t, int); 110 static ibdm_ioc_info_t *ibdm_is_ioc_present(ib_guid_t, 111 ibdm_dp_gidinfo_t *, int *); 112 static ibdm_port_attr_t *ibdm_get_port_attr(ibt_async_event_t *, 113 ibdm_hca_list_t **); 114 static sa_node_record_t *ibdm_get_node_records(ibmf_saa_handle_t, 115 size_t *, ib_guid_t); 116 static int ibdm_get_node_record_by_port(ibmf_saa_handle_t, 117 ib_guid_t, sa_node_record_t **, size_t *); 118 static sa_portinfo_record_t *ibdm_get_portinfo(ibmf_saa_handle_t, size_t *, 119 ib_lid_t); 120 static ibdm_dp_gidinfo_t *ibdm_create_gid_info(ibdm_port_attr_t *, 121 ib_gid_t, ib_gid_t); 122 static ibdm_dp_gidinfo_t *ibdm_find_gid(ib_guid_t, ib_guid_t); 123 static int ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *, uint8_t); 124 static ibdm_ioc_info_t *ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *, int); 125 static void ibdm_saa_event_cb(ibmf_saa_handle_t, ibmf_saa_subnet_event_t, 126 ibmf_saa_event_details_t *, void *); 127 static void ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *, 128 ibdm_dp_gidinfo_t *); 129 static ibdm_dp_gidinfo_t *ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *); 130 static void ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *, 131 ibdm_dp_gidinfo_t *); 132 static void ibdm_addto_gidlist(ibdm_gid_t **, ibdm_gid_t *); 133 static void ibdm_free_gid_list(ibdm_gid_t *); 134 static void ibdm_rescan_gidlist(ib_guid_t *ioc_guid); 135 static void ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *); 136 static void ibdm_saa_event_taskq(void *); 137 static void ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *); 138 static void ibdm_get_next_port(ibdm_hca_list_t **, 139 ibdm_port_attr_t **, int); 140 static void ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *, 141 ibdm_dp_gidinfo_t *); 142 static void ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *, 143 ibdm_hca_list_t *); 144 static void ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *); 145 static void ibdm_saa_handle_new_gid(void *); 146 static void ibdm_reset_all_dgids(ibmf_saa_handle_t); 147 static void ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *); 148 static void ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *); 149 static void ibdm_fill_srv_attr_mod(ib_mad_hdr_t *, ibdm_timeout_cb_args_t *); 150 static void ibdm_bump_transactionID(ibdm_dp_gidinfo_t *); 151 static ibdm_ioc_info_t *ibdm_handle_prev_iou(); 152 static int ibdm_serv_cmp(ibdm_srvents_info_t *, ibdm_srvents_info_t *, 153 int); 154 155 int ibdm_dft_timeout = IBDM_DFT_TIMEOUT; 156 int ibdm_dft_retry_cnt = IBDM_DFT_NRETRIES; 157 #ifdef DEBUG 158 int ibdm_ignore_saa_event = 0; 159 #endif 160 161 /* Modload support */ 162 static struct modlmisc ibdm_modlmisc = { 163 &mod_miscops, 164 "InfiniBand Device Manager %I%", 165 }; 166 167 struct modlinkage ibdm_modlinkage = { 168 MODREV_1, 169 (void *)&ibdm_modlmisc, 170 NULL 171 }; 172 173 static ibt_clnt_modinfo_t ibdm_ibt_modinfo = { 174 IBTI_V2, 175 IBT_DM, 176 ibdm_event_hdlr, 177 NULL, 178 "ibdm" 179 }; 180 181 /* Global variables */ 182 ibdm_t ibdm; 183 int ibdm_taskq_enable = IBDM_ENABLE_TASKQ_HANDLING; 184 char *ibdm_string = "ibdm"; 185 186 _NOTE(SCHEME_PROTECTS_DATA("Serialized access by cv", 187 ibdm.ibdm_dp_gidlist_head)) 188 189 /* 190 * _init 191 * Loadable module init, called before any other module. 192 * Initialize mutex 193 * Register with IBTF 194 */ 195 int 196 _init(void) 197 { 198 int err; 199 200 IBTF_DPRINTF_L4("ibdm", "\t_init: addr of ibdm %p", &ibdm); 201 202 if ((err = ibdm_init()) != IBDM_SUCCESS) { 203 IBTF_DPRINTF_L2("ibdm", "_init: ibdm_init failed 0x%x", err); 204 (void) ibdm_fini(); 205 return (DDI_FAILURE); 206 } 207 208 if ((err = mod_install(&ibdm_modlinkage)) != 0) { 209 IBTF_DPRINTF_L2("ibdm", "_init: mod_install failed 0x%x", err); 210 (void) ibdm_fini(); 211 } 212 return (err); 213 } 214 215 216 int 217 _fini(void) 218 { 219 int err; 220 221 if ((err = ibdm_fini()) != IBDM_SUCCESS) { 222 IBTF_DPRINTF_L2("ibdm", "_fini: ibdm_fini failed 0x%x", err); 223 (void) ibdm_init(); 224 return (EBUSY); 225 } 226 227 if ((err = mod_remove(&ibdm_modlinkage)) != 0) { 228 IBTF_DPRINTF_L2("ibdm", "_fini: mod_remove failed 0x%x", err); 229 (void) ibdm_init(); 230 } 231 return (err); 232 } 233 234 235 int 236 _info(struct modinfo *modinfop) 237 { 238 return (mod_info(&ibdm_modlinkage, modinfop)); 239 } 240 241 242 /* 243 * ibdm_init(): 244 * Register with IBTF 245 * Allocate memory for the HCAs 246 * Allocate minor-nodes for the HCAs 247 */ 248 static int 249 ibdm_init(void) 250 { 251 int i, hca_count; 252 ib_guid_t *hca_guids; 253 ibt_status_t status; 254 255 IBTF_DPRINTF_L4("ibdm", "\tibdm_init:"); 256 if (!(ibdm.ibdm_state & IBDM_LOCKS_ALLOCED)) { 257 mutex_init(&ibdm.ibdm_mutex, NULL, MUTEX_DEFAULT, NULL); 258 mutex_init(&ibdm.ibdm_hl_mutex, NULL, MUTEX_DEFAULT, NULL); 259 mutex_init(&ibdm.ibdm_ibnex_mutex, NULL, MUTEX_DEFAULT, NULL); 260 mutex_enter(&ibdm.ibdm_mutex); 261 ibdm.ibdm_state |= IBDM_LOCKS_ALLOCED; 262 } 263 264 if (!(ibdm.ibdm_state & IBDM_IBT_ATTACHED)) { 265 if ((status = ibt_attach(&ibdm_ibt_modinfo, NULL, NULL, 266 (void *)&ibdm.ibdm_ibt_clnt_hdl)) != IBT_SUCCESS) { 267 IBTF_DPRINTF_L2("ibdm", "ibdm_init: ibt_attach " 268 "failed %x", status); 269 mutex_exit(&ibdm.ibdm_mutex); 270 return (IBDM_FAILURE); 271 } 272 273 ibdm.ibdm_state |= IBDM_IBT_ATTACHED; 274 mutex_exit(&ibdm.ibdm_mutex); 275 } 276 277 278 if (!(ibdm.ibdm_state & IBDM_HCA_ATTACHED)) { 279 hca_count = ibt_get_hca_list(&hca_guids); 280 IBTF_DPRINTF_L4("ibdm", "ibdm_init: num_hcas = %d", hca_count); 281 for (i = 0; i < hca_count; i++) 282 (void) ibdm_handle_hca_attach(hca_guids[i]); 283 if (hca_count) 284 ibt_free_hca_list(hca_guids, hca_count); 285 286 mutex_enter(&ibdm.ibdm_mutex); 287 ibdm.ibdm_state |= IBDM_HCA_ATTACHED; 288 mutex_exit(&ibdm.ibdm_mutex); 289 } 290 291 if (!(ibdm.ibdm_state & IBDM_CVS_ALLOCED)) { 292 cv_init(&ibdm.ibdm_probe_cv, NULL, CV_DRIVER, NULL); 293 cv_init(&ibdm.ibdm_busy_cv, NULL, CV_DRIVER, NULL); 294 mutex_enter(&ibdm.ibdm_mutex); 295 ibdm.ibdm_state |= IBDM_CVS_ALLOCED; 296 mutex_exit(&ibdm.ibdm_mutex); 297 } 298 return (IBDM_SUCCESS); 299 } 300 301 302 static int 303 ibdm_free_iou_info(ibdm_dp_gidinfo_t *gid_info, ibdm_iou_info_t **ioup) 304 { 305 int ii, k, niocs; 306 size_t size; 307 ibdm_gid_t *delete, *head; 308 timeout_id_t timeout_id; 309 ibdm_ioc_info_t *ioc; 310 ibdm_iou_info_t *gl_iou = *ioup; 311 312 ASSERT(mutex_owned(&gid_info->gl_mutex)); 313 if (gl_iou == NULL) { 314 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: No IOU"); 315 return (0); 316 } 317 318 niocs = gl_iou->iou_info.iou_num_ctrl_slots; 319 IBTF_DPRINTF_L4("ibdm", "\tfree_iou_info: gid_info = %p, niocs %d", 320 gid_info, niocs); 321 322 for (ii = 0; ii < niocs; ii++) { 323 ioc = (ibdm_ioc_info_t *)&gl_iou->iou_ioc_info[ii]; 324 325 /* handle the case where an ioc_timeout_id is scheduled */ 326 if (ioc->ioc_timeout_id) { 327 timeout_id = ioc->ioc_timeout_id; 328 mutex_exit(&gid_info->gl_mutex); 329 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 330 "ioc_timeout_id = 0x%x", timeout_id); 331 if (untimeout(timeout_id) == -1) { 332 IBTF_DPRINTF_L2("ibdm", "free_iou_info: " 333 "untimeout ioc_timeout_id failed"); 334 mutex_enter(&gid_info->gl_mutex); 335 return (-1); 336 } 337 mutex_enter(&gid_info->gl_mutex); 338 ioc->ioc_timeout_id = 0; 339 } 340 341 /* handle the case where an ioc_dc_timeout_id is scheduled */ 342 if (ioc->ioc_dc_timeout_id) { 343 timeout_id = ioc->ioc_dc_timeout_id; 344 mutex_exit(&gid_info->gl_mutex); 345 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 346 "ioc_dc_timeout_id = 0x%x", timeout_id); 347 if (untimeout(timeout_id) == -1) { 348 IBTF_DPRINTF_L2("ibdm", "free_iou_info: " 349 "untimeout ioc_dc_timeout_id failed"); 350 mutex_enter(&gid_info->gl_mutex); 351 return (-1); 352 } 353 mutex_enter(&gid_info->gl_mutex); 354 ioc->ioc_dc_timeout_id = 0; 355 } 356 357 /* handle the case where serv[k].se_timeout_id is scheduled */ 358 for (k = 0; k < ioc->ioc_profile.ioc_service_entries; k++) { 359 if (ioc->ioc_serv[k].se_timeout_id) { 360 timeout_id = ioc->ioc_serv[k].se_timeout_id; 361 mutex_exit(&gid_info->gl_mutex); 362 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 363 "ioc->ioc_serv[%d].se_timeout_id = 0x%x", 364 k, timeout_id); 365 if (untimeout(timeout_id) == -1) { 366 IBTF_DPRINTF_L2("ibdm", "free_iou_info:" 367 " untimeout se_timeout_id failed"); 368 mutex_enter(&gid_info->gl_mutex); 369 return (-1); 370 } 371 mutex_enter(&gid_info->gl_mutex); 372 ioc->ioc_serv[k].se_timeout_id = 0; 373 } 374 } 375 376 /* delete GID list in IOC */ 377 head = ioc->ioc_gid_list; 378 while (head) { 379 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: " 380 "Deleting gid_list struct %p", head); 381 delete = head; 382 head = head->gid_next; 383 kmem_free(delete, sizeof (ibdm_gid_t)); 384 } 385 ioc->ioc_gid_list = NULL; 386 387 /* delete ioc_serv */ 388 size = ioc->ioc_profile.ioc_service_entries * 389 sizeof (ibdm_srvents_info_t); 390 if (ioc->ioc_serv && size) { 391 kmem_free(ioc->ioc_serv, size); 392 ioc->ioc_serv = NULL; 393 } 394 } 395 /* 396 * Clear the IBDM_CISCO_PROBE_DONE flag to get the IO Unit information 397 * via the switch during the probe process. 398 */ 399 gid_info->gl_flag &= ~IBDM_CISCO_PROBE_DONE; 400 401 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: deleting IOU & IOC"); 402 size = sizeof (ibdm_iou_info_t) + niocs * sizeof (ibdm_ioc_info_t); 403 kmem_free(gl_iou, size); 404 *ioup = NULL; 405 return (0); 406 } 407 408 409 /* 410 * ibdm_fini(): 411 * Un-register with IBTF 412 * De allocate memory for the GID info 413 */ 414 static int 415 ibdm_fini() 416 { 417 int ii; 418 ibdm_hca_list_t *hca_list, *temp; 419 ibdm_dp_gidinfo_t *gid_info, *tmp; 420 ibdm_gid_t *head, *delete; 421 422 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini"); 423 424 mutex_enter(&ibdm.ibdm_hl_mutex); 425 if (ibdm.ibdm_state & IBDM_IBT_ATTACHED) { 426 if (ibt_detach(ibdm.ibdm_ibt_clnt_hdl) != IBT_SUCCESS) { 427 IBTF_DPRINTF_L2("ibdm", "\t_fini: ibt_detach failed"); 428 mutex_exit(&ibdm.ibdm_hl_mutex); 429 return (IBDM_FAILURE); 430 } 431 ibdm.ibdm_state &= ~IBDM_IBT_ATTACHED; 432 ibdm.ibdm_ibt_clnt_hdl = NULL; 433 } 434 435 hca_list = ibdm.ibdm_hca_list_head; 436 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: nhcas %d", ibdm.ibdm_hca_count); 437 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 438 temp = hca_list; 439 hca_list = hca_list->hl_next; 440 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: hca %p", temp); 441 if (ibdm_uninit_hca(temp) != IBDM_SUCCESS) { 442 IBTF_DPRINTF_L2("ibdm", "\tibdm_fini: " 443 "uninit_hca %p failed", temp); 444 mutex_exit(&ibdm.ibdm_hl_mutex); 445 return (IBDM_FAILURE); 446 } 447 } 448 mutex_exit(&ibdm.ibdm_hl_mutex); 449 450 mutex_enter(&ibdm.ibdm_mutex); 451 if (ibdm.ibdm_state & IBDM_HCA_ATTACHED) 452 ibdm.ibdm_state &= ~IBDM_HCA_ATTACHED; 453 454 gid_info = ibdm.ibdm_dp_gidlist_head; 455 while (gid_info) { 456 mutex_enter(&gid_info->gl_mutex); 457 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou); 458 mutex_exit(&gid_info->gl_mutex); 459 ibdm_delete_glhca_list(gid_info); 460 461 tmp = gid_info; 462 gid_info = gid_info->gl_next; 463 mutex_destroy(&tmp->gl_mutex); 464 head = tmp->gl_gid; 465 while (head) { 466 IBTF_DPRINTF_L4("ibdm", 467 "\tibdm_fini: Deleting gid structs"); 468 delete = head; 469 head = head->gid_next; 470 kmem_free(delete, sizeof (ibdm_gid_t)); 471 } 472 kmem_free(tmp, sizeof (ibdm_dp_gidinfo_t)); 473 } 474 mutex_exit(&ibdm.ibdm_mutex); 475 476 if (ibdm.ibdm_state & IBDM_LOCKS_ALLOCED) { 477 ibdm.ibdm_state &= ~IBDM_LOCKS_ALLOCED; 478 mutex_destroy(&ibdm.ibdm_mutex); 479 mutex_destroy(&ibdm.ibdm_hl_mutex); 480 mutex_destroy(&ibdm.ibdm_ibnex_mutex); 481 } 482 if (ibdm.ibdm_state & IBDM_CVS_ALLOCED) { 483 ibdm.ibdm_state &= ~IBDM_CVS_ALLOCED; 484 cv_destroy(&ibdm.ibdm_probe_cv); 485 cv_destroy(&ibdm.ibdm_busy_cv); 486 } 487 return (IBDM_SUCCESS); 488 } 489 490 491 /* 492 * ibdm_event_hdlr() 493 * 494 * IBDM registers this asynchronous event handler at the time of 495 * ibt_attach. IBDM support the following async events. For other 496 * event, simply returns success. 497 * IBT_HCA_ATTACH_EVENT: 498 * Retrieves the information about all the port that are 499 * present on this HCA, allocates the port attributes 500 * structure and calls IB nexus callback routine with 501 * the port attributes structure as an input argument. 502 * IBT_HCA_DETACH_EVENT: 503 * Retrieves the information about all the ports that are 504 * present on this HCA and calls IB nexus callback with 505 * port guid as an argument 506 * IBT_EVENT_PORT_UP: 507 * Register with IBMF and SA access 508 * Setup IBMF receive callback routine 509 * IBT_EVENT_PORT_DOWN: 510 * Un-Register with IBMF and SA access 511 * Teardown IBMF receive callback routine 512 */ 513 /*ARGSUSED*/ 514 static void 515 ibdm_event_hdlr(void *clnt_hdl, 516 ibt_hca_hdl_t hca_hdl, ibt_async_code_t code, ibt_async_event_t *event) 517 { 518 ibdm_hca_list_t *hca_list; 519 ibdm_port_attr_t *port; 520 ibmf_saa_handle_t port_sa_hdl; 521 522 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: async code 0x%x", code); 523 524 switch (code) { 525 case IBT_HCA_ATTACH_EVENT: /* New HCA registered with IBTF */ 526 ibdm_handle_hca_attach(event->ev_hca_guid); 527 break; 528 529 case IBT_HCA_DETACH_EVENT: /* HCA unregistered with IBTF */ 530 ibdm_handle_hca_detach(event->ev_hca_guid); 531 mutex_enter(&ibdm.ibdm_ibnex_mutex); 532 if (ibdm.ibdm_ibnex_callback != NULL) { 533 (*ibdm.ibdm_ibnex_callback)((void *) 534 &event->ev_hca_guid, IBDM_EVENT_HCA_REMOVED); 535 } 536 mutex_exit(&ibdm.ibdm_ibnex_mutex); 537 break; 538 539 case IBT_EVENT_PORT_UP: 540 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_UP"); 541 mutex_enter(&ibdm.ibdm_hl_mutex); 542 port = ibdm_get_port_attr(event, &hca_list); 543 if (port == NULL) { 544 IBTF_DPRINTF_L2("ibdm", 545 "\tevent_hdlr: HCA not present"); 546 mutex_exit(&ibdm.ibdm_hl_mutex); 547 break; 548 } 549 ibdm_initialize_port(port); 550 hca_list->hl_nports_active++; 551 mutex_exit(&ibdm.ibdm_hl_mutex); 552 break; 553 554 case IBT_ERROR_PORT_DOWN: 555 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_DOWN"); 556 mutex_enter(&ibdm.ibdm_hl_mutex); 557 port = ibdm_get_port_attr(event, &hca_list); 558 if (port == NULL) { 559 IBTF_DPRINTF_L2("ibdm", 560 "\tevent_hdlr: HCA not present"); 561 mutex_exit(&ibdm.ibdm_hl_mutex); 562 break; 563 } 564 hca_list->hl_nports_active--; 565 port_sa_hdl = port->pa_sa_hdl; 566 (void) ibdm_fini_port(port); 567 port->pa_state = IBT_PORT_DOWN; 568 mutex_exit(&ibdm.ibdm_hl_mutex); 569 ibdm_reset_all_dgids(port_sa_hdl); 570 break; 571 572 default: /* Ignore all other events/errors */ 573 break; 574 } 575 } 576 577 578 /* 579 * ibdm_initialize_port() 580 * Register with IBMF 581 * Register with SA access 582 * Register a receive callback routine with IBMF. IBMF invokes 583 * this routine whenever a MAD arrives at this port. 584 * Update the port attributes 585 */ 586 static void 587 ibdm_initialize_port(ibdm_port_attr_t *port) 588 { 589 int ii; 590 uint_t nports, size; 591 uint_t pkey_idx; 592 ib_pkey_t pkey; 593 ibt_hca_portinfo_t *pinfop; 594 ibmf_register_info_t ibmf_reg; 595 ibmf_saa_subnet_event_args_t event_args; 596 597 IBTF_DPRINTF_L4("ibdm", "\tinitialize_port:"); 598 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 599 600 /* Check whether the port is active */ 601 if (ibt_get_port_state(port->pa_hca_hdl, port->pa_port_num, NULL, 602 NULL) != IBT_SUCCESS) 603 return; 604 605 if (port->pa_sa_hdl != NULL) 606 return; 607 608 if (ibt_query_hca_ports(port->pa_hca_hdl, port->pa_port_num, 609 &pinfop, &nports, &size) != IBT_SUCCESS) { 610 /* This should not occur */ 611 port->pa_npkeys = 0; 612 port->pa_pkey_tbl = NULL; 613 return; 614 } 615 port->pa_sn_prefix = pinfop->p_sgid_tbl[0].gid_prefix; 616 617 port->pa_state = pinfop->p_linkstate; 618 port->pa_npkeys = pinfop->p_pkey_tbl_sz; 619 port->pa_pkey_tbl = (ibdm_pkey_tbl_t *)kmem_zalloc( 620 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP); 621 622 for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++) 623 port->pa_pkey_tbl[pkey_idx].pt_pkey = 624 pinfop->p_pkey_tbl[pkey_idx]; 625 626 ibt_free_portinfo(pinfop, size); 627 628 event_args.is_event_callback = ibdm_saa_event_cb; 629 event_args.is_event_callback_arg = port; 630 if (ibmf_sa_session_open(port->pa_port_guid, 0, &event_args, 631 IBMF_VERSION, 0, &port->pa_sa_hdl) != IBMF_SUCCESS) { 632 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 633 "sa access registration failed"); 634 return; 635 } 636 ibmf_reg.ir_ci_guid = port->pa_hca_guid; 637 ibmf_reg.ir_port_num = port->pa_port_num; 638 ibmf_reg.ir_client_class = DEV_MGT_MANAGER; 639 640 if (ibmf_register(&ibmf_reg, IBMF_VERSION, 0, NULL, NULL, 641 &port->pa_ibmf_hdl, &port->pa_ibmf_caps) != IBMF_SUCCESS) { 642 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 643 "IBMF registration failed"); 644 (void) ibdm_fini_port(port); 645 return; 646 } 647 if (ibmf_setup_async_cb(port->pa_ibmf_hdl, IBMF_QP_HANDLE_DEFAULT, 648 ibdm_ibmf_recv_cb, 0, 0) != IBMF_SUCCESS) { 649 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 650 "IBMF setup recv cb failed"); 651 (void) ibdm_fini_port(port); 652 return; 653 } 654 655 for (ii = 0; ii < port->pa_npkeys; ii++) { 656 pkey = port->pa_pkey_tbl[ii].pt_pkey; 657 if (IBDM_INVALID_PKEY(pkey)) { 658 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 659 continue; 660 } 661 ibdm_port_attr_ibmf_init(port, pkey, ii); 662 } 663 } 664 665 666 /* 667 * ibdm_port_attr_ibmf_init: 668 * With IBMF - Alloc QP Handle and Setup Async callback 669 */ 670 static void 671 ibdm_port_attr_ibmf_init(ibdm_port_attr_t *port, ib_pkey_t pkey, int ii) 672 { 673 int ret; 674 675 if ((ret = ibmf_alloc_qp(port->pa_ibmf_hdl, pkey, IB_GSI_QKEY, 676 IBMF_ALT_QP_MAD_NO_RMPP, &port->pa_pkey_tbl[ii].pt_qp_hdl)) != 677 IBMF_SUCCESS) { 678 IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: " 679 "IBMF failed to alloc qp %d", ret); 680 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 681 return; 682 } 683 684 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_init: QP handle is %p", 685 port->pa_ibmf_hdl); 686 687 if ((ret = ibmf_setup_async_cb(port->pa_ibmf_hdl, 688 port->pa_pkey_tbl[ii].pt_qp_hdl, ibdm_ibmf_recv_cb, 0, 0)) != 689 IBMF_SUCCESS) { 690 IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: " 691 "IBMF setup recv cb failed %d", ret); 692 (void) ibmf_free_qp(port->pa_ibmf_hdl, 693 &port->pa_pkey_tbl[ii].pt_qp_hdl, 0); 694 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 695 } 696 } 697 698 699 /* 700 * ibdm_get_port_attr() 701 * Get port attributes from HCA guid and port number 702 * Return pointer to ibdm_port_attr_t on Success 703 * and NULL on failure 704 */ 705 static ibdm_port_attr_t * 706 ibdm_get_port_attr(ibt_async_event_t *event, ibdm_hca_list_t **retval) 707 { 708 ibdm_hca_list_t *hca_list; 709 ibdm_port_attr_t *port_attr; 710 int ii; 711 712 IBTF_DPRINTF_L4("ibdm", "\tget_port_attr: port# %d", event->ev_port); 713 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 714 hca_list = ibdm.ibdm_hca_list_head; 715 while (hca_list) { 716 if (hca_list->hl_hca_guid == event->ev_hca_guid) { 717 for (ii = 0; ii < hca_list->hl_nports; ii++) { 718 port_attr = &hca_list->hl_port_attr[ii]; 719 if (port_attr->pa_port_num == event->ev_port) { 720 *retval = hca_list; 721 return (port_attr); 722 } 723 } 724 } 725 hca_list = hca_list->hl_next; 726 } 727 return (NULL); 728 } 729 730 731 /* 732 * ibdm_update_port_attr() 733 * Update the port attributes 734 */ 735 static void 736 ibdm_update_port_attr(ibdm_port_attr_t *port) 737 { 738 uint_t nports, size; 739 uint_t pkey_idx; 740 ibt_hca_portinfo_t *portinfop; 741 742 IBTF_DPRINTF_L4("ibdm", "\tupdate_port_attr: Begin"); 743 if (ibt_query_hca_ports(port->pa_hca_hdl, 744 port->pa_port_num, &portinfop, &nports, &size) != IBT_SUCCESS) { 745 /* This should not occur */ 746 port->pa_npkeys = 0; 747 port->pa_pkey_tbl = NULL; 748 return; 749 } 750 port->pa_sn_prefix = portinfop->p_sgid_tbl[0].gid_prefix; 751 752 port->pa_state = portinfop->p_linkstate; 753 754 /* 755 * PKey information in portinfo valid only if port is 756 * ACTIVE. Bail out if not. 757 */ 758 if (port->pa_state != IBT_PORT_ACTIVE) { 759 port->pa_npkeys = 0; 760 port->pa_pkey_tbl = NULL; 761 ibt_free_portinfo(portinfop, size); 762 return; 763 } 764 765 port->pa_npkeys = portinfop->p_pkey_tbl_sz; 766 port->pa_pkey_tbl = (ibdm_pkey_tbl_t *)kmem_zalloc( 767 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP); 768 769 for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++) { 770 port->pa_pkey_tbl[pkey_idx].pt_pkey = 771 portinfop->p_pkey_tbl[pkey_idx]; 772 } 773 ibt_free_portinfo(portinfop, size); 774 } 775 776 777 /* 778 * ibdm_handle_hca_attach() 779 */ 780 static void 781 ibdm_handle_hca_attach(ib_guid_t hca_guid) 782 { 783 uint_t size; 784 uint_t ii, nports; 785 ibt_status_t status; 786 ibt_hca_hdl_t hca_hdl; 787 ibt_hca_attr_t *hca_attr; 788 ibdm_hca_list_t *hca_list, *temp; 789 ibdm_port_attr_t *port_attr; 790 ibt_hca_portinfo_t *portinfop; 791 792 IBTF_DPRINTF_L4("ibdm", 793 "\thandle_hca_attach: hca_guid = 0x%llX", hca_guid); 794 795 /* open the HCA first */ 796 if ((status = ibt_open_hca(ibdm.ibdm_ibt_clnt_hdl, hca_guid, 797 &hca_hdl)) != IBT_SUCCESS) { 798 IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: " 799 "open_hca failed, status 0x%x", status); 800 return; 801 } 802 803 hca_attr = (ibt_hca_attr_t *) 804 kmem_alloc(sizeof (ibt_hca_attr_t), KM_SLEEP); 805 /* ibt_query_hca always returns IBT_SUCCESS */ 806 (void) ibt_query_hca(hca_hdl, hca_attr); 807 808 IBTF_DPRINTF_L4("ibdm", "\tvid: 0x%x, pid: 0x%x, ver: 0x%x," 809 " #ports: %d", hca_attr->hca_vendor_id, hca_attr->hca_device_id, 810 hca_attr->hca_version_id, hca_attr->hca_nports); 811 812 if ((status = ibt_query_hca_ports(hca_hdl, 0, &portinfop, &nports, 813 &size)) != IBT_SUCCESS) { 814 IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: " 815 "ibt_query_hca_ports failed, status 0x%x", status); 816 kmem_free(hca_attr, sizeof (ibt_hca_attr_t)); 817 (void) ibt_close_hca(hca_hdl); 818 return; 819 } 820 hca_list = (ibdm_hca_list_t *) 821 kmem_zalloc((sizeof (ibdm_hca_list_t)), KM_SLEEP); 822 hca_list->hl_port_attr = (ibdm_port_attr_t *)kmem_zalloc( 823 (sizeof (ibdm_port_attr_t) * hca_attr->hca_nports), KM_SLEEP); 824 hca_list->hl_hca_guid = hca_attr->hca_node_guid; 825 hca_list->hl_nports = hca_attr->hca_nports; 826 hca_list->hl_attach_time = ddi_get_time(); 827 hca_list->hl_hca_hdl = hca_hdl; 828 829 /* 830 * Init a dummy port attribute for the HCA node 831 * This is for Per-HCA Node. Initialize port_attr : 832 * hca_guid & port_guid -> hca_guid 833 * npkeys, pkey_tbl is NULL 834 * port_num, sn_prefix is 0 835 * vendorid, product_id, dev_version from HCA 836 * pa_state is IBT_PORT_ACTIVE 837 */ 838 hca_list->hl_hca_port_attr = (ibdm_port_attr_t *)kmem_zalloc( 839 sizeof (ibdm_port_attr_t), KM_SLEEP); 840 port_attr = hca_list->hl_hca_port_attr; 841 port_attr->pa_vendorid = hca_attr->hca_vendor_id; 842 port_attr->pa_productid = hca_attr->hca_device_id; 843 port_attr->pa_dev_version = hca_attr->hca_version_id; 844 port_attr->pa_hca_guid = hca_attr->hca_node_guid; 845 port_attr->pa_hca_hdl = hca_list->hl_hca_hdl; 846 port_attr->pa_port_guid = hca_attr->hca_node_guid; 847 port_attr->pa_state = IBT_PORT_ACTIVE; 848 849 850 for (ii = 0; ii < nports; ii++) { 851 port_attr = &hca_list->hl_port_attr[ii]; 852 port_attr->pa_vendorid = hca_attr->hca_vendor_id; 853 port_attr->pa_productid = hca_attr->hca_device_id; 854 port_attr->pa_dev_version = hca_attr->hca_version_id; 855 port_attr->pa_hca_guid = hca_attr->hca_node_guid; 856 port_attr->pa_hca_hdl = hca_list->hl_hca_hdl; 857 port_attr->pa_port_guid = portinfop[ii].p_sgid_tbl->gid_guid; 858 port_attr->pa_sn_prefix = portinfop[ii].p_sgid_tbl->gid_prefix; 859 port_attr->pa_port_num = portinfop[ii].p_port_num; 860 port_attr->pa_state = portinfop[ii].p_linkstate; 861 862 /* 863 * Register with IBMF, SA access when the port is in 864 * ACTIVE state. Also register a callback routine 865 * with IBMF to receive incoming DM MAD's. 866 * The IBDM event handler takes care of registration of 867 * port which are not active. 868 */ 869 IBTF_DPRINTF_L4("ibdm", 870 "\thandle_hca_attach: port guid %llx Port state 0x%x", 871 port_attr->pa_port_guid, portinfop[ii].p_linkstate); 872 873 if (portinfop[ii].p_linkstate == IBT_PORT_ACTIVE) { 874 mutex_enter(&ibdm.ibdm_hl_mutex); 875 hca_list->hl_nports_active++; 876 ibdm_initialize_port(port_attr); 877 mutex_exit(&ibdm.ibdm_hl_mutex); 878 } 879 } 880 mutex_enter(&ibdm.ibdm_hl_mutex); 881 for (temp = ibdm.ibdm_hca_list_head; temp; temp = temp->hl_next) { 882 if (temp->hl_hca_guid == hca_guid) { 883 IBTF_DPRINTF_L2("ibdm", "hca_attach: HCA %llX " 884 "already seen by IBDM", hca_guid); 885 mutex_exit(&ibdm.ibdm_hl_mutex); 886 (void) ibdm_uninit_hca(hca_list); 887 return; 888 } 889 } 890 ibdm.ibdm_hca_count++; 891 if (ibdm.ibdm_hca_list_head == NULL) { 892 ibdm.ibdm_hca_list_head = hca_list; 893 ibdm.ibdm_hca_list_tail = hca_list; 894 } else { 895 ibdm.ibdm_hca_list_tail->hl_next = hca_list; 896 ibdm.ibdm_hca_list_tail = hca_list; 897 } 898 mutex_exit(&ibdm.ibdm_hl_mutex); 899 mutex_enter(&ibdm.ibdm_ibnex_mutex); 900 if (ibdm.ibdm_ibnex_callback != NULL) { 901 (*ibdm.ibdm_ibnex_callback)((void *) 902 &hca_guid, IBDM_EVENT_HCA_ADDED); 903 } 904 mutex_exit(&ibdm.ibdm_ibnex_mutex); 905 906 kmem_free(hca_attr, sizeof (ibt_hca_attr_t)); 907 ibt_free_portinfo(portinfop, size); 908 } 909 910 911 /* 912 * ibdm_handle_hca_detach() 913 */ 914 static void 915 ibdm_handle_hca_detach(ib_guid_t hca_guid) 916 { 917 ibdm_hca_list_t *head, *prev = NULL; 918 size_t len; 919 ibdm_dp_gidinfo_t *gidinfo; 920 921 IBTF_DPRINTF_L4("ibdm", 922 "\thandle_hca_detach: hca_guid = 0x%llx", hca_guid); 923 924 /* Make sure no probes are running */ 925 mutex_enter(&ibdm.ibdm_mutex); 926 while (ibdm.ibdm_busy & IBDM_BUSY) 927 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 928 ibdm.ibdm_busy |= IBDM_BUSY; 929 mutex_exit(&ibdm.ibdm_mutex); 930 931 mutex_enter(&ibdm.ibdm_hl_mutex); 932 head = ibdm.ibdm_hca_list_head; 933 while (head) { 934 if (head->hl_hca_guid == hca_guid) { 935 if (prev == NULL) 936 ibdm.ibdm_hca_list_head = head->hl_next; 937 else 938 prev->hl_next = head->hl_next; 939 ibdm.ibdm_hca_count--; 940 break; 941 } 942 prev = head; 943 head = head->hl_next; 944 } 945 mutex_exit(&ibdm.ibdm_hl_mutex); 946 if (ibdm_uninit_hca(head) != IBDM_SUCCESS) 947 (void) ibdm_handle_hca_attach(hca_guid); 948 949 /* 950 * Now clean up the HCA lists in the gidlist. 951 */ 952 for (gidinfo = ibdm.ibdm_dp_gidlist_head; gidinfo; gidinfo = 953 gidinfo->gl_next) { 954 prev = NULL; 955 head = gidinfo->gl_hca_list; 956 while (head) { 957 if (head->hl_hca_guid == hca_guid) { 958 if (prev == NULL) 959 gidinfo->gl_hca_list = 960 head->hl_next; 961 else 962 prev->hl_next = head->hl_next; 963 964 len = sizeof (ibdm_hca_list_t) + 965 (head->hl_nports * 966 sizeof (ibdm_port_attr_t)); 967 kmem_free(head, len); 968 969 break; 970 } 971 prev = head; 972 head = head->hl_next; 973 } 974 } 975 976 mutex_enter(&ibdm.ibdm_mutex); 977 ibdm.ibdm_busy &= ~IBDM_BUSY; 978 cv_broadcast(&ibdm.ibdm_busy_cv); 979 mutex_exit(&ibdm.ibdm_mutex); 980 } 981 982 983 static int 984 ibdm_uninit_hca(ibdm_hca_list_t *head) 985 { 986 int ii; 987 ibdm_port_attr_t *port_attr; 988 989 for (ii = 0; ii < head->hl_nports; ii++) { 990 port_attr = &head->hl_port_attr[ii]; 991 if (ibdm_fini_port(port_attr) != IBDM_SUCCESS) { 992 IBTF_DPRINTF_L2("ibdm", "uninit_hca: HCA %p port 0x%x " 993 "ibdm_fini_port() failed", head, ii); 994 return (IBDM_FAILURE); 995 } 996 } 997 if (head->hl_hca_hdl) 998 if (ibt_close_hca(head->hl_hca_hdl) != IBT_SUCCESS) 999 return (IBDM_FAILURE); 1000 kmem_free(head->hl_port_attr, 1001 head->hl_nports * sizeof (ibdm_port_attr_t)); 1002 kmem_free(head->hl_hca_port_attr, sizeof (ibdm_port_attr_t)); 1003 kmem_free(head, sizeof (ibdm_hca_list_t)); 1004 return (IBDM_SUCCESS); 1005 } 1006 1007 1008 /* 1009 * For each port on the HCA, 1010 * 1) Teardown IBMF receive callback function 1011 * 2) Unregister with IBMF 1012 * 3) Unregister with SA access 1013 */ 1014 static int 1015 ibdm_fini_port(ibdm_port_attr_t *port_attr) 1016 { 1017 int ii, ibmf_status; 1018 1019 for (ii = 0; ii < port_attr->pa_npkeys; ii++) { 1020 if (port_attr->pa_pkey_tbl == NULL) 1021 break; 1022 if (!port_attr->pa_pkey_tbl[ii].pt_qp_hdl) 1023 continue; 1024 if (ibdm_port_attr_ibmf_fini(port_attr, ii) != IBDM_SUCCESS) { 1025 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1026 "ibdm_port_attr_ibmf_fini failed for " 1027 "port pkey 0x%x", ii); 1028 return (IBDM_FAILURE); 1029 } 1030 } 1031 1032 if (port_attr->pa_ibmf_hdl) { 1033 ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl, 1034 IBMF_QP_HANDLE_DEFAULT, 0); 1035 if (ibmf_status != IBMF_SUCCESS) { 1036 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1037 "ibmf_tear_down_async_cb failed %d", ibmf_status); 1038 return (IBDM_FAILURE); 1039 } 1040 1041 ibmf_status = ibmf_unregister(&port_attr->pa_ibmf_hdl, 0); 1042 if (ibmf_status != IBMF_SUCCESS) { 1043 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1044 "ibmf_unregister failed %d", ibmf_status); 1045 return (IBDM_FAILURE); 1046 } 1047 1048 port_attr->pa_ibmf_hdl = NULL; 1049 } 1050 1051 if (port_attr->pa_sa_hdl) { 1052 ibmf_status = ibmf_sa_session_close(&port_attr->pa_sa_hdl, 0); 1053 if (ibmf_status != IBMF_SUCCESS) { 1054 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1055 "ibmf_sa_session_close failed %d", ibmf_status); 1056 return (IBDM_FAILURE); 1057 } 1058 port_attr->pa_sa_hdl = NULL; 1059 } 1060 1061 if (port_attr->pa_pkey_tbl != NULL) { 1062 kmem_free(port_attr->pa_pkey_tbl, 1063 port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t)); 1064 port_attr->pa_pkey_tbl = NULL; 1065 port_attr->pa_npkeys = 0; 1066 } 1067 1068 return (IBDM_SUCCESS); 1069 } 1070 1071 1072 /* 1073 * ibdm_port_attr_ibmf_fini: 1074 * With IBMF - Tear down Async callback and free QP Handle 1075 */ 1076 static int 1077 ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *port_attr, int ii) 1078 { 1079 int ibmf_status; 1080 1081 IBTF_DPRINTF_L5("ibdm", "\tport_attr_ibmf_fini:"); 1082 1083 if (port_attr->pa_pkey_tbl[ii].pt_qp_hdl) { 1084 ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl, 1085 port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0); 1086 if (ibmf_status != IBMF_SUCCESS) { 1087 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: " 1088 "ibmf_tear_down_async_cb failed %d", ibmf_status); 1089 return (IBDM_FAILURE); 1090 } 1091 ibmf_status = ibmf_free_qp(port_attr->pa_ibmf_hdl, 1092 &port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0); 1093 if (ibmf_status != IBMF_SUCCESS) { 1094 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: " 1095 "ibmf_free_qp failed %d", ibmf_status); 1096 return (IBDM_FAILURE); 1097 } 1098 port_attr->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 1099 } 1100 return (IBDM_SUCCESS); 1101 } 1102 1103 1104 /* 1105 * ibdm_gid_decr_pending: 1106 * decrement gl_pending_cmds. If zero wakeup sleeping threads 1107 */ 1108 static void 1109 ibdm_gid_decr_pending(ibdm_dp_gidinfo_t *gidinfo) 1110 { 1111 mutex_enter(&ibdm.ibdm_mutex); 1112 mutex_enter(&gidinfo->gl_mutex); 1113 if (--gidinfo->gl_pending_cmds == 0) { 1114 /* 1115 * Handle DGID getting removed. 1116 */ 1117 if (gidinfo->gl_disconnected) { 1118 mutex_exit(&gidinfo->gl_mutex); 1119 mutex_exit(&ibdm.ibdm_mutex); 1120 1121 IBTF_DPRINTF_L3(ibdm_string, "\tgid_decr_pending: " 1122 "gidinfo %p hot removal", gidinfo); 1123 ibdm_delete_gidinfo(gidinfo); 1124 1125 mutex_enter(&ibdm.ibdm_mutex); 1126 ibdm.ibdm_ngid_probes_in_progress--; 1127 ibdm_wait_probe_completion(); 1128 mutex_exit(&ibdm.ibdm_mutex); 1129 return; 1130 } 1131 mutex_exit(&gidinfo->gl_mutex); 1132 mutex_exit(&ibdm.ibdm_mutex); 1133 ibdm_notify_newgid_iocs(gidinfo); 1134 mutex_enter(&ibdm.ibdm_mutex); 1135 mutex_enter(&gidinfo->gl_mutex); 1136 1137 ibdm.ibdm_ngid_probes_in_progress--; 1138 ibdm_wait_probe_completion(); 1139 } 1140 mutex_exit(&gidinfo->gl_mutex); 1141 mutex_exit(&ibdm.ibdm_mutex); 1142 } 1143 1144 1145 /* 1146 * ibdm_wait_probe_completion: 1147 * wait for probing to complete 1148 */ 1149 static void 1150 ibdm_wait_probe_completion(void) 1151 { 1152 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1153 if (ibdm.ibdm_ngid_probes_in_progress) { 1154 IBTF_DPRINTF_L4("ibdm", "\twait for probe complete"); 1155 ibdm.ibdm_busy |= IBDM_PROBE_IN_PROGRESS; 1156 while (ibdm.ibdm_busy & IBDM_PROBE_IN_PROGRESS) 1157 cv_wait(&ibdm.ibdm_probe_cv, &ibdm.ibdm_mutex); 1158 } 1159 } 1160 1161 1162 /* 1163 * ibdm_wait_cisco_probe_completion: 1164 * wait for the reply from the Cisco FC GW switch after a setclassportinfo 1165 * request is sent. This wait can be achieved on each gid. 1166 */ 1167 static void 1168 ibdm_wait_cisco_probe_completion(ibdm_dp_gidinfo_t *gidinfo) 1169 { 1170 ASSERT(MUTEX_HELD(&gidinfo->gl_mutex)); 1171 IBTF_DPRINTF_L4("ibdm", "\twait for cisco probe complete"); 1172 gidinfo->gl_flag |= IBDM_CISCO_PROBE; 1173 while (gidinfo->gl_flag & IBDM_CISCO_PROBE) 1174 cv_wait(&gidinfo->gl_probe_cv, &gidinfo->gl_mutex); 1175 } 1176 1177 1178 /* 1179 * ibdm_wakeup_probe_gid_cv: 1180 * wakeup waiting threads (based on ibdm_ngid_probes_in_progress) 1181 */ 1182 static void 1183 ibdm_wakeup_probe_gid_cv(void) 1184 { 1185 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1186 if (!ibdm.ibdm_ngid_probes_in_progress) { 1187 IBTF_DPRINTF_L4("ibdm", "wakeup_probe_gid_thread: Wakeup"); 1188 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 1189 cv_broadcast(&ibdm.ibdm_probe_cv); 1190 } 1191 1192 } 1193 1194 1195 /* 1196 * ibdm_sweep_fabric(reprobe_flag) 1197 * Find all possible Managed IOU's and their IOC's that are visible 1198 * to the host. The algorithm used is as follows 1199 * 1200 * Send a "bus walk" request for each port on the host HCA to SA access 1201 * SA returns complete set of GID's that are reachable from 1202 * source port. This is done in parallel. 1203 * 1204 * Initialize GID state to IBDM_GID_PROBE_NOT_DONE 1205 * 1206 * Sort the GID list and eliminate duplicate GID's 1207 * 1) Use DGID for sorting 1208 * 2) use PortGuid for sorting 1209 * Send SA query to retrieve NodeRecord and 1210 * extract PortGuid from that. 1211 * 1212 * Set GID state to IBDM_GID_PROBE_FAILED to all the ports that dont 1213 * support DM MAD's 1214 * Send a "Portinfo" query to get the port capabilities and 1215 * then check for DM MAD's support 1216 * 1217 * Send "ClassPortInfo" request for all the GID's in parallel, 1218 * set the GID state to IBDM_GET_CLASSPORTINFO and wait on the 1219 * cv_signal to complete. 1220 * 1221 * When DM agent on the remote GID sends back the response, IBMF 1222 * invokes DM callback routine. 1223 * 1224 * If the response is proper, send "IOUnitInfo" request and set 1225 * GID state to IBDM_GET_IOUNITINFO. 1226 * 1227 * If the response is proper, send "IocProfileInfo" request to 1228 * all the IOC simultaneously and set GID state to IBDM_GET_IOC_DETAILS. 1229 * 1230 * Send request to get Service entries simultaneously 1231 * 1232 * Signal the waiting thread when received response for all the commands. 1233 * 1234 * Set the GID state to IBDM_GID_PROBE_FAILED when received a error 1235 * response during the probing period. 1236 * 1237 * Note: 1238 * ibdm.ibdm_ngid_probes_in_progress and ibdm_gid_list_t:gl_pending_cmds 1239 * keep track of number commands in progress at any point of time. 1240 * MAD transaction ID is used to identify a particular GID 1241 * TBD: Consider registering the IBMF receive callback on demand 1242 * 1243 * Note: This routine must be called with ibdm.ibdm_mutex held 1244 * TBD: Re probe the failure GID (for certain failures) when requested 1245 * for fabric sweep next time 1246 * 1247 * Parameters : If reprobe_flag is set, All IOCs will be reprobed. 1248 */ 1249 static void 1250 ibdm_sweep_fabric(int reprobe_flag) 1251 { 1252 int ii; 1253 int new_paths = 0; 1254 uint8_t niocs; 1255 taskqid_t tid; 1256 ibdm_ioc_info_t *ioc; 1257 ibdm_hca_list_t *hca_list = NULL; 1258 ibdm_port_attr_t *port = NULL; 1259 ibdm_dp_gidinfo_t *gid_info; 1260 1261 IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: Enter"); 1262 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1263 1264 /* 1265 * Check whether a sweep already in progress. If so, just 1266 * wait for the fabric sweep to complete 1267 */ 1268 while (ibdm.ibdm_busy & IBDM_BUSY) 1269 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 1270 ibdm.ibdm_busy |= IBDM_BUSY; 1271 mutex_exit(&ibdm.ibdm_mutex); 1272 1273 ibdm_dump_sweep_fabric_timestamp(0); 1274 1275 /* Rescan the GID list for any removed GIDs for reprobe */ 1276 if (reprobe_flag) 1277 ibdm_rescan_gidlist(NULL); 1278 1279 /* 1280 * Get list of all the ports reachable from the local known HCA 1281 * ports which are active 1282 */ 1283 mutex_enter(&ibdm.ibdm_hl_mutex); 1284 for (ibdm_get_next_port(&hca_list, &port, 1); port; 1285 ibdm_get_next_port(&hca_list, &port, 1)) { 1286 /* 1287 * Get PATHS to all the reachable ports from 1288 * SGID and update the global ibdm structure. 1289 */ 1290 new_paths = ibdm_get_reachable_ports(port, hca_list); 1291 ibdm.ibdm_ngids += new_paths; 1292 } 1293 mutex_exit(&ibdm.ibdm_hl_mutex); 1294 1295 mutex_enter(&ibdm.ibdm_mutex); 1296 ibdm.ibdm_ngid_probes_in_progress += ibdm.ibdm_ngids; 1297 mutex_exit(&ibdm.ibdm_mutex); 1298 1299 /* Send a request to probe GIDs asynchronously. */ 1300 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 1301 gid_info = gid_info->gl_next) { 1302 mutex_enter(&gid_info->gl_mutex); 1303 gid_info->gl_reprobe_flag = reprobe_flag; 1304 mutex_exit(&gid_info->gl_mutex); 1305 1306 /* process newly encountered GIDs */ 1307 tid = taskq_dispatch(system_taskq, ibdm_probe_gid_thread, 1308 (void *)gid_info, TQ_NOSLEEP); 1309 IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: gid_info = %p" 1310 " taskq_id = %x", gid_info, tid); 1311 /* taskq failed to dispatch call it directly */ 1312 if (tid == NULL) 1313 ibdm_probe_gid_thread((void *)gid_info); 1314 } 1315 1316 mutex_enter(&ibdm.ibdm_mutex); 1317 ibdm_wait_probe_completion(); 1318 1319 /* 1320 * Update the properties, if reprobe_flag is set 1321 * Skip if gl_reprobe_flag is set, this will be 1322 * a re-inserted / new GID, for which notifications 1323 * have already been send. 1324 */ 1325 if (reprobe_flag) { 1326 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 1327 gid_info = gid_info->gl_next) { 1328 if (gid_info->gl_iou == NULL) 1329 continue; 1330 if (gid_info->gl_reprobe_flag) { 1331 gid_info->gl_reprobe_flag = 0; 1332 continue; 1333 } 1334 1335 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 1336 for (ii = 0; ii < niocs; ii++) { 1337 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 1338 if (ioc) 1339 ibdm_reprobe_update_port_srv(ioc, 1340 gid_info); 1341 } 1342 } 1343 } else if (ibdm.ibdm_prev_iou) { 1344 ibdm_ioc_info_t *ioc_list; 1345 1346 /* 1347 * Get the list of IOCs which have changed. 1348 * If any IOCs have changed, Notify IBNexus 1349 */ 1350 ibdm.ibdm_prev_iou = 0; 1351 ioc_list = ibdm_handle_prev_iou(); 1352 if (ioc_list) { 1353 if (ibdm.ibdm_ibnex_callback != NULL) { 1354 (*ibdm.ibdm_ibnex_callback)( 1355 (void *)ioc_list, 1356 IBDM_EVENT_IOC_PROP_UPDATE); 1357 } 1358 } 1359 } 1360 1361 ibdm_dump_sweep_fabric_timestamp(1); 1362 1363 ibdm.ibdm_busy &= ~IBDM_BUSY; 1364 cv_broadcast(&ibdm.ibdm_busy_cv); 1365 IBTF_DPRINTF_L5("ibdm", "\tsweep_fabric: EXIT"); 1366 } 1367 1368 1369 /* 1370 * ibdm_is_cisco: 1371 * Check if this is a Cisco device or not. 1372 */ 1373 static boolean_t 1374 ibdm_is_cisco(ib_guid_t guid) 1375 { 1376 if ((guid >> IBDM_OUI_GUID_SHIFT) == IBDM_CISCO_COMPANY_ID) 1377 return (B_TRUE); 1378 return (B_FALSE); 1379 } 1380 1381 1382 /* 1383 * ibdm_is_cisco_switch: 1384 * Check if this switch is a CISCO switch or not. 1385 * Note that if this switch is already activated, ibdm_is_cisco_switch() 1386 * returns B_FALSE not to re-activate it again. 1387 */ 1388 static boolean_t 1389 ibdm_is_cisco_switch(ibdm_dp_gidinfo_t *gid_info) 1390 { 1391 int company_id, device_id; 1392 ASSERT(gid_info != 0); 1393 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 1394 1395 /* 1396 * If this switch is already activated, don't re-activate it. 1397 */ 1398 if (gid_info->gl_flag & IBDM_CISCO_PROBE_DONE) 1399 return (B_FALSE); 1400 1401 /* 1402 * Check if this switch is a Cisco FC GW or not. 1403 * Use the node guid (the OUI part) instead of the vendor id 1404 * since the vendor id is zero in practice. 1405 */ 1406 company_id = gid_info->gl_nodeguid >> IBDM_OUI_GUID_SHIFT; 1407 device_id = gid_info->gl_devid; 1408 1409 if (company_id == IBDM_CISCO_COMPANY_ID && 1410 device_id == IBDM_CISCO_DEVICE_ID) 1411 return (B_TRUE); 1412 return (B_FALSE); 1413 } 1414 1415 1416 /* 1417 * ibdm_probe_gid_thread: 1418 * thread that does the actual work for sweeping the fabric 1419 * for a given GID 1420 */ 1421 static void 1422 ibdm_probe_gid_thread(void *args) 1423 { 1424 int reprobe_flag; 1425 ib_guid_t node_guid; 1426 ib_guid_t port_guid; 1427 ibdm_dp_gidinfo_t *gid_info; 1428 1429 gid_info = (ibdm_dp_gidinfo_t *)args; 1430 reprobe_flag = gid_info->gl_reprobe_flag; 1431 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: gid_info = %p, flag = %d", 1432 gid_info, reprobe_flag); 1433 ASSERT(gid_info != NULL); 1434 ASSERT(gid_info->gl_pending_cmds == 0); 1435 1436 if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE && 1437 reprobe_flag == 0) { 1438 /* 1439 * This GID may have been already probed. Send 1440 * in a CLP to check if IOUnitInfo changed? 1441 * Explicitly set gl_reprobe_flag to 0 so that 1442 * IBnex is not notified on completion 1443 */ 1444 if (gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) { 1445 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: " 1446 "get new IOCs information"); 1447 mutex_enter(&gid_info->gl_mutex); 1448 gid_info->gl_pending_cmds++; 1449 gid_info->gl_state = IBDM_GET_IOUNITINFO; 1450 gid_info->gl_reprobe_flag = 0; 1451 mutex_exit(&gid_info->gl_mutex); 1452 if (ibdm_send_iounitinfo(gid_info) != IBDM_SUCCESS) { 1453 mutex_enter(&gid_info->gl_mutex); 1454 --gid_info->gl_pending_cmds; 1455 mutex_exit(&gid_info->gl_mutex); 1456 mutex_enter(&ibdm.ibdm_mutex); 1457 --ibdm.ibdm_ngid_probes_in_progress; 1458 ibdm_wakeup_probe_gid_cv(); 1459 mutex_exit(&ibdm.ibdm_mutex); 1460 } 1461 } else { 1462 mutex_enter(&ibdm.ibdm_mutex); 1463 --ibdm.ibdm_ngid_probes_in_progress; 1464 ibdm_wakeup_probe_gid_cv(); 1465 mutex_exit(&ibdm.ibdm_mutex); 1466 } 1467 return; 1468 } else if (reprobe_flag && gid_info->gl_state == 1469 IBDM_GID_PROBING_COMPLETE) { 1470 /* 1471 * Reprobe all IOCs for the GID which has completed 1472 * probe. Skip other port GIDs to same IOU. 1473 * Explicitly set gl_reprobe_flag to 0 so that 1474 * IBnex is not notified on completion 1475 */ 1476 ibdm_ioc_info_t *ioc_info; 1477 uint8_t niocs, ii; 1478 1479 ASSERT(gid_info->gl_iou); 1480 mutex_enter(&gid_info->gl_mutex); 1481 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 1482 gid_info->gl_state = IBDM_GET_IOC_DETAILS; 1483 gid_info->gl_pending_cmds += niocs; 1484 gid_info->gl_reprobe_flag = 0; 1485 mutex_exit(&gid_info->gl_mutex); 1486 for (ii = 0; ii < niocs; ii++) { 1487 uchar_t slot_info; 1488 ib_dm_io_unitinfo_t *giou_info; 1489 1490 /* 1491 * Check whether IOC is present in the slot 1492 * Series of nibbles (in the field 1493 * iou_ctrl_list) represents a slot in the 1494 * IOU. 1495 * Byte format: 76543210 1496 * Bits 0-3 of first byte represent Slot 2 1497 * bits 4-7 of first byte represent slot 1, 1498 * bits 0-3 of second byte represent slot 4 1499 * and so on 1500 * Each 4-bit nibble has the following meaning 1501 * 0x0 : IOC not installed 1502 * 0x1 : IOC is present 1503 * 0xf : Slot does not exist 1504 * and all other values are reserved. 1505 */ 1506 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii); 1507 giou_info = &gid_info->gl_iou->iou_info; 1508 slot_info = giou_info->iou_ctrl_list[(ii/2)]; 1509 if ((ii % 2) == 0) 1510 slot_info = (slot_info >> 4); 1511 1512 if ((slot_info & 0xf) != 1) { 1513 ioc_info->ioc_state = 1514 IBDM_IOC_STATE_PROBE_FAILED; 1515 ibdm_gid_decr_pending(gid_info); 1516 continue; 1517 } 1518 1519 if (ibdm_send_ioc_profile(gid_info, ii) != 1520 IBDM_SUCCESS) { 1521 ibdm_gid_decr_pending(gid_info); 1522 } 1523 } 1524 1525 return; 1526 } else if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) { 1527 mutex_enter(&ibdm.ibdm_mutex); 1528 --ibdm.ibdm_ngid_probes_in_progress; 1529 ibdm_wakeup_probe_gid_cv(); 1530 mutex_exit(&ibdm.ibdm_mutex); 1531 return; 1532 } 1533 1534 /* 1535 * Check whether the destination GID supports DM agents. If 1536 * not, stop probing the GID and continue with the next GID 1537 * in the list. 1538 */ 1539 if (ibdm_is_dev_mgt_supported(gid_info) != IBDM_SUCCESS) { 1540 mutex_enter(&gid_info->gl_mutex); 1541 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1542 mutex_exit(&gid_info->gl_mutex); 1543 ibdm_delete_glhca_list(gid_info); 1544 mutex_enter(&ibdm.ibdm_mutex); 1545 --ibdm.ibdm_ngid_probes_in_progress; 1546 ibdm_wakeup_probe_gid_cv(); 1547 mutex_exit(&ibdm.ibdm_mutex); 1548 return; 1549 } 1550 1551 /* Get the nodeguid and portguid of the port */ 1552 if (ibdm_get_node_port_guids(gid_info->gl_sa_hdl, gid_info->gl_dlid, 1553 &node_guid, &port_guid) != IBDM_SUCCESS) { 1554 mutex_enter(&gid_info->gl_mutex); 1555 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1556 mutex_exit(&gid_info->gl_mutex); 1557 ibdm_delete_glhca_list(gid_info); 1558 mutex_enter(&ibdm.ibdm_mutex); 1559 --ibdm.ibdm_ngid_probes_in_progress; 1560 ibdm_wakeup_probe_gid_cv(); 1561 mutex_exit(&ibdm.ibdm_mutex); 1562 return; 1563 } 1564 1565 /* 1566 * Check whether we already knew about this NodeGuid 1567 * If so, do not probe the GID and continue with the 1568 * next GID in the gid list. Set the GID state to 1569 * probing done. 1570 */ 1571 mutex_enter(&ibdm.ibdm_mutex); 1572 gid_info->gl_nodeguid = node_guid; 1573 gid_info->gl_portguid = port_guid; 1574 if (ibdm_check_dest_nodeguid(gid_info) != NULL) { 1575 mutex_exit(&ibdm.ibdm_mutex); 1576 mutex_enter(&gid_info->gl_mutex); 1577 gid_info->gl_state = IBDM_GID_PROBING_SKIPPED; 1578 mutex_exit(&gid_info->gl_mutex); 1579 ibdm_delete_glhca_list(gid_info); 1580 mutex_enter(&ibdm.ibdm_mutex); 1581 --ibdm.ibdm_ngid_probes_in_progress; 1582 ibdm_wakeup_probe_gid_cv(); 1583 mutex_exit(&ibdm.ibdm_mutex); 1584 return; 1585 } 1586 ibdm_add_to_gl_gid(gid_info, gid_info); 1587 mutex_exit(&ibdm.ibdm_mutex); 1588 1589 /* 1590 * New or reinserted GID : Enable notification to IBnex 1591 */ 1592 mutex_enter(&gid_info->gl_mutex); 1593 gid_info->gl_reprobe_flag = 1; 1594 1595 /* 1596 * A Cisco FC GW needs the special handling to get IOUnitInfo. 1597 */ 1598 if (ibdm_is_cisco_switch(gid_info)) { 1599 gid_info->gl_pending_cmds++; 1600 gid_info->gl_state = IBDM_SET_CLASSPORTINFO; 1601 mutex_exit(&gid_info->gl_mutex); 1602 1603 if (ibdm_set_classportinfo(gid_info) != IBDM_SUCCESS) { 1604 mutex_enter(&gid_info->gl_mutex); 1605 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1606 --gid_info->gl_pending_cmds; 1607 mutex_exit(&gid_info->gl_mutex); 1608 1609 /* free the hca_list on this gid_info */ 1610 ibdm_delete_glhca_list(gid_info); 1611 1612 mutex_enter(&ibdm.ibdm_mutex); 1613 --ibdm.ibdm_ngid_probes_in_progress; 1614 ibdm_wakeup_probe_gid_cv(); 1615 mutex_exit(&ibdm.ibdm_mutex); 1616 1617 return; 1618 } 1619 1620 mutex_enter(&gid_info->gl_mutex); 1621 ibdm_wait_cisco_probe_completion(gid_info); 1622 1623 IBTF_DPRINTF_L4("ibdm", "\tibdm_probe_gid_thread: " 1624 "CISCO Wakeup signal received"); 1625 } 1626 1627 /* move on to the 'GET_CLASSPORTINFO' stage */ 1628 gid_info->gl_pending_cmds++; 1629 gid_info->gl_state = IBDM_GET_CLASSPORTINFO; 1630 mutex_exit(&gid_info->gl_mutex); 1631 1632 IBTF_DPRINTF_L3(ibdm_string, "\tibdm_probe_gid_thread: " 1633 "%d: gid_info %p gl_state %d pending_cmds %d", 1634 __LINE__, gid_info, gid_info->gl_state, 1635 gid_info->gl_pending_cmds); 1636 1637 /* 1638 * Send ClassPortInfo request to the GID asynchronously. 1639 */ 1640 if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) { 1641 1642 mutex_enter(&gid_info->gl_mutex); 1643 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1644 --gid_info->gl_pending_cmds; 1645 mutex_exit(&gid_info->gl_mutex); 1646 1647 /* free the hca_list on this gid_info */ 1648 ibdm_delete_glhca_list(gid_info); 1649 1650 mutex_enter(&ibdm.ibdm_mutex); 1651 --ibdm.ibdm_ngid_probes_in_progress; 1652 ibdm_wakeup_probe_gid_cv(); 1653 mutex_exit(&ibdm.ibdm_mutex); 1654 1655 return; 1656 } 1657 } 1658 1659 1660 /* 1661 * ibdm_check_dest_nodeguid 1662 * Searches for the NodeGuid in the GID list 1663 * Returns matching gid_info if found and otherwise NULL 1664 * 1665 * This function is called to handle new GIDs discovered 1666 * during device sweep / probe or for GID_AVAILABLE event. 1667 * 1668 * Parameter : 1669 * gid_info GID to check 1670 */ 1671 static ibdm_dp_gidinfo_t * 1672 ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *gid_info) 1673 { 1674 ibdm_dp_gidinfo_t *gid_list; 1675 ibdm_gid_t *tmp; 1676 1677 IBTF_DPRINTF_L4("ibdm", "\tcheck_dest_nodeguid"); 1678 1679 gid_list = ibdm.ibdm_dp_gidlist_head; 1680 while (gid_list) { 1681 if ((gid_list != gid_info) && 1682 (gid_info->gl_nodeguid == gid_list->gl_nodeguid)) { 1683 IBTF_DPRINTF_L4("ibdm", 1684 "\tcheck_dest_nodeguid: NodeGuid is present"); 1685 1686 /* Add to gid_list */ 1687 tmp = kmem_zalloc(sizeof (ibdm_gid_t), 1688 KM_SLEEP); 1689 tmp->gid_dgid_hi = gid_info->gl_dgid_hi; 1690 tmp->gid_dgid_lo = gid_info->gl_dgid_lo; 1691 tmp->gid_next = gid_list->gl_gid; 1692 gid_list->gl_gid = tmp; 1693 gid_list->gl_ngids++; 1694 return (gid_list); 1695 } 1696 1697 gid_list = gid_list->gl_next; 1698 } 1699 1700 return (NULL); 1701 } 1702 1703 1704 /* 1705 * ibdm_is_dev_mgt_supported 1706 * Get the PortInfo attribute (SA Query) 1707 * Check "CompatabilityMask" field in the Portinfo. 1708 * Return IBDM_SUCCESS if DM MAD's supported (if bit 19 set) 1709 * by the port, otherwise IBDM_FAILURE 1710 */ 1711 static int 1712 ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *gid_info) 1713 { 1714 int ret; 1715 size_t length = 0; 1716 sa_portinfo_record_t req, *resp = NULL; 1717 ibmf_saa_access_args_t qargs; 1718 1719 bzero(&req, sizeof (sa_portinfo_record_t)); 1720 req.EndportLID = gid_info->gl_dlid; 1721 1722 qargs.sq_attr_id = SA_PORTINFORECORD_ATTRID; 1723 qargs.sq_access_type = IBMF_SAA_RETRIEVE; 1724 qargs.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID; 1725 qargs.sq_template = &req; 1726 qargs.sq_callback = NULL; 1727 qargs.sq_callback_arg = NULL; 1728 1729 ret = ibmf_sa_access(gid_info->gl_sa_hdl, 1730 &qargs, 0, &length, (void **)&resp); 1731 1732 if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) { 1733 IBTF_DPRINTF_L2("ibdm", "\tis_dev_mgt_supported:" 1734 "failed to get PORTINFO attribute %d", ret); 1735 return (IBDM_FAILURE); 1736 } 1737 1738 if (resp->PortInfo.CapabilityMask & SM_CAP_MASK_IS_DM_SUPPD) { 1739 IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: SUPPD !!"); 1740 ret = IBDM_SUCCESS; 1741 } else { 1742 IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: " 1743 "Not SUPPD !!, cap 0x%x", resp->PortInfo.CapabilityMask); 1744 ret = IBDM_FAILURE; 1745 } 1746 kmem_free(resp, length); 1747 return (ret); 1748 } 1749 1750 1751 /* 1752 * ibdm_get_node_port_guids() 1753 * Get the NodeInfoRecord of the port 1754 * Save NodeGuid and PortGUID values in the GID list structure. 1755 * Return IBDM_SUCCESS/IBDM_FAILURE 1756 */ 1757 static int 1758 ibdm_get_node_port_guids(ibmf_saa_handle_t sa_hdl, ib_lid_t dlid, 1759 ib_guid_t *node_guid, ib_guid_t *port_guid) 1760 { 1761 int ret; 1762 size_t length = 0; 1763 sa_node_record_t req, *resp = NULL; 1764 ibmf_saa_access_args_t qargs; 1765 1766 IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids"); 1767 1768 bzero(&req, sizeof (sa_node_record_t)); 1769 req.LID = dlid; 1770 1771 qargs.sq_attr_id = SA_NODERECORD_ATTRID; 1772 qargs.sq_access_type = IBMF_SAA_RETRIEVE; 1773 qargs.sq_component_mask = SA_NODEINFO_COMPMASK_NODELID; 1774 qargs.sq_template = &req; 1775 qargs.sq_callback = NULL; 1776 qargs.sq_callback_arg = NULL; 1777 1778 ret = ibmf_sa_access(sa_hdl, &qargs, 0, &length, (void **)&resp); 1779 if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) { 1780 IBTF_DPRINTF_L2("ibdm", "\tget_node_port_guids:" 1781 " SA Retrieve Failed: %d", ret); 1782 return (IBDM_FAILURE); 1783 } 1784 IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids: NodeGuid %llx Port" 1785 "GUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.NodeGUID); 1786 1787 *node_guid = resp->NodeInfo.NodeGUID; 1788 *port_guid = resp->NodeInfo.PortGUID; 1789 kmem_free(resp, length); 1790 return (IBDM_SUCCESS); 1791 } 1792 1793 1794 /* 1795 * ibdm_get_reachable_ports() 1796 * Get list of the destination GID (and its path records) by 1797 * querying the SA access. 1798 * 1799 * Returns Number paths 1800 */ 1801 static int 1802 ibdm_get_reachable_ports(ibdm_port_attr_t *portinfo, ibdm_hca_list_t *hca) 1803 { 1804 uint_t ii, jj, nrecs; 1805 uint_t npaths = 0; 1806 size_t length; 1807 ib_gid_t sgid; 1808 ibdm_pkey_tbl_t *pkey_tbl; 1809 sa_path_record_t *result; 1810 sa_path_record_t *precp; 1811 ibdm_dp_gidinfo_t *gid_info; 1812 1813 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 1814 IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: portinfo %p", portinfo); 1815 1816 sgid.gid_prefix = portinfo->pa_sn_prefix; 1817 sgid.gid_guid = portinfo->pa_port_guid; 1818 1819 /* get reversible paths */ 1820 if (portinfo->pa_sa_hdl && ibmf_saa_paths_from_gid(portinfo->pa_sa_hdl, 1821 sgid, IBMF_SAA_PKEY_WC, B_TRUE, 0, &nrecs, &length, &result) 1822 != IBMF_SUCCESS) { 1823 IBTF_DPRINTF_L2("ibdm", 1824 "\tget_reachable_ports: Getting path records failed"); 1825 return (0); 1826 } 1827 1828 for (ii = 0; ii < nrecs; ii++) { 1829 sa_node_record_t *nrec; 1830 size_t length; 1831 1832 precp = &result[ii]; 1833 if ((gid_info = ibdm_check_dgid(precp->DGID.gid_guid, 1834 precp->DGID.gid_prefix)) != NULL) { 1835 IBTF_DPRINTF_L5("ibdm", "\tget_reachable_ports: " 1836 "Already exists nrecs %d, ii %d", nrecs, ii); 1837 ibdm_addto_glhcalist(gid_info, hca); 1838 continue; 1839 } 1840 /* 1841 * This is a new GID. Allocate a GID structure and 1842 * initialize the structure 1843 * gl_state is initialized to IBDM_GID_PROBE_NOT_DONE (0) 1844 * by kmem_zalloc call 1845 */ 1846 gid_info = kmem_zalloc(sizeof (ibdm_dp_gidinfo_t), KM_SLEEP); 1847 mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL); 1848 cv_init(&gid_info->gl_probe_cv, NULL, CV_DRIVER, NULL); 1849 gid_info->gl_dgid_hi = precp->DGID.gid_prefix; 1850 gid_info->gl_dgid_lo = precp->DGID.gid_guid; 1851 gid_info->gl_sgid_hi = precp->SGID.gid_prefix; 1852 gid_info->gl_sgid_lo = precp->SGID.gid_guid; 1853 gid_info->gl_p_key = precp->P_Key; 1854 gid_info->gl_sa_hdl = portinfo->pa_sa_hdl; 1855 gid_info->gl_ibmf_hdl = portinfo->pa_ibmf_hdl; 1856 gid_info->gl_slid = precp->SLID; 1857 gid_info->gl_dlid = precp->DLID; 1858 gid_info->gl_transactionID = (++ibdm.ibdm_transactionID) 1859 << IBDM_GID_TRANSACTIONID_SHIFT; 1860 gid_info->gl_min_transactionID = gid_info->gl_transactionID; 1861 gid_info->gl_max_transactionID = (ibdm.ibdm_transactionID +1) 1862 << IBDM_GID_TRANSACTIONID_SHIFT; 1863 1864 /* 1865 * get the node record with this guid if the destination 1866 * device is a Cisco one. 1867 */ 1868 if (ibdm_is_cisco(precp->DGID.gid_guid) && 1869 (gid_info->gl_nodeguid == 0 || gid_info->gl_devid == 0) && 1870 ibdm_get_node_record_by_port(portinfo->pa_sa_hdl, 1871 precp->DGID.gid_guid, &nrec, &length) == IBDM_SUCCESS) { 1872 gid_info->gl_nodeguid = nrec->NodeInfo.NodeGUID; 1873 gid_info->gl_devid = nrec->NodeInfo.DeviceID; 1874 kmem_free(nrec, length); 1875 } 1876 1877 ibdm_addto_glhcalist(gid_info, hca); 1878 1879 ibdm_dump_path_info(precp); 1880 1881 gid_info->gl_qp_hdl = NULL; 1882 ASSERT(portinfo->pa_pkey_tbl != NULL && 1883 portinfo->pa_npkeys != 0); 1884 1885 for (jj = 0; jj < portinfo->pa_npkeys; jj++) { 1886 pkey_tbl = &portinfo->pa_pkey_tbl[jj]; 1887 if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) && 1888 (pkey_tbl->pt_qp_hdl != NULL)) { 1889 gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 1890 break; 1891 } 1892 } 1893 1894 /* 1895 * QP handle for GID not initialized. No matching Pkey 1896 * was found!! ibdm should *not* hit this case. Flag an 1897 * error and drop the GID if ibdm does encounter this. 1898 */ 1899 if (gid_info->gl_qp_hdl == NULL) { 1900 IBTF_DPRINTF_L2(ibdm_string, 1901 "\tget_reachable_ports: No matching Pkey"); 1902 ibdm_delete_gidinfo(gid_info); 1903 continue; 1904 } 1905 if (ibdm.ibdm_dp_gidlist_head == NULL) { 1906 ibdm.ibdm_dp_gidlist_head = gid_info; 1907 ibdm.ibdm_dp_gidlist_tail = gid_info; 1908 } else { 1909 ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info; 1910 gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail; 1911 ibdm.ibdm_dp_gidlist_tail = gid_info; 1912 } 1913 npaths++; 1914 } 1915 kmem_free(result, length); 1916 IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: npaths = %d", npaths); 1917 return (npaths); 1918 } 1919 1920 1921 /* 1922 * ibdm_check_dgid() 1923 * Look in the global list to check whether we know this DGID already 1924 * Return IBDM_GID_PRESENT/IBDM_GID_NOT_PRESENT 1925 */ 1926 static ibdm_dp_gidinfo_t * 1927 ibdm_check_dgid(ib_guid_t guid, ib_sn_prefix_t prefix) 1928 { 1929 ibdm_dp_gidinfo_t *gid_list; 1930 1931 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 1932 gid_list = gid_list->gl_next) { 1933 if ((guid == gid_list->gl_dgid_lo) && 1934 (prefix == gid_list->gl_dgid_hi)) { 1935 break; 1936 } 1937 } 1938 return (gid_list); 1939 } 1940 1941 1942 /* 1943 * ibdm_find_gid() 1944 * Look in the global list to find a GID entry with matching 1945 * port & node GUID. 1946 * Return pointer to gidinfo if found, else return NULL 1947 */ 1948 static ibdm_dp_gidinfo_t * 1949 ibdm_find_gid(ib_guid_t nodeguid, ib_guid_t portguid) 1950 { 1951 ibdm_dp_gidinfo_t *gid_list; 1952 1953 IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid(%llx, %llx)\n", 1954 nodeguid, portguid); 1955 1956 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 1957 gid_list = gid_list->gl_next) { 1958 if ((portguid == gid_list->gl_portguid) && 1959 (nodeguid == gid_list->gl_nodeguid)) { 1960 break; 1961 } 1962 } 1963 1964 IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid : returned %p\n", 1965 gid_list); 1966 return (gid_list); 1967 } 1968 1969 1970 /* 1971 * ibdm_set_classportinfo() 1972 * ibdm_set_classportinfo() is a function to activate a Cisco FC GW 1973 * by sending the setClassPortInfo request with the trapLID, trapGID 1974 * and etc. to the gateway since the gateway doesn't provide the IO 1975 * Unit Information othewise. This behavior is the Cisco specific one, 1976 * and this function is called to a Cisco FC GW only. 1977 * Returns IBDM_SUCCESS/IBDM_FAILURE 1978 */ 1979 static int 1980 ibdm_set_classportinfo(ibdm_dp_gidinfo_t *gid_info) 1981 { 1982 ibmf_msg_t *msg; 1983 ib_mad_hdr_t *hdr; 1984 ibdm_timeout_cb_args_t *cb_args; 1985 void *data; 1986 ib_mad_classportinfo_t *cpi; 1987 1988 IBTF_DPRINTF_L4("ibdm", 1989 "\tset_classportinfo: gid info 0x%p", gid_info); 1990 1991 /* 1992 * Send command to set classportinfo attribute. Allocate a IBMF 1993 * packet and initialize the packet. 1994 */ 1995 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 1996 &msg) != IBMF_SUCCESS) { 1997 IBTF_DPRINTF_L4("ibdm", "\tset_classportinfo: pkt alloc fail"); 1998 return (IBDM_FAILURE); 1999 } 2000 2001 ibdm_alloc_send_buffers(msg); 2002 2003 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2004 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2005 msg->im_local_addr.ia_remote_qno = 1; 2006 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2007 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2008 2009 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2010 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2011 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2012 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2013 hdr->R_Method = IB_DM_DEVMGT_METHOD_SET; 2014 hdr->Status = 0; 2015 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2016 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 2017 hdr->AttributeModifier = 0; 2018 2019 data = msg->im_msgbufs_send.im_bufs_cl_data; 2020 cpi = (ib_mad_classportinfo_t *)data; 2021 2022 /* 2023 * Set the classportinfo values to activate this Cisco FC GW. 2024 */ 2025 cpi->TrapGID_hi = h2b64(gid_info->gl_sgid_hi); 2026 cpi->TrapGID_lo = h2b64(gid_info->gl_sgid_lo); 2027 cpi->TrapLID = h2b16(gid_info->gl_slid); 2028 cpi->TrapSL = 0; 2029 cpi->TrapP_Key = h2b16(gid_info->gl_p_key); 2030 cpi->TrapQP = h2b32((((ibmf_alt_qp_t *)gid_info->gl_qp_hdl)->isq_qpn)); 2031 cpi->TrapQ_Key = h2b32((((ibmf_alt_qp_t *) 2032 gid_info->gl_qp_hdl)->isq_qkey)); 2033 2034 cb_args = &gid_info->gl_cpi_cb_args; 2035 cb_args->cb_gid_info = gid_info; 2036 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2037 cb_args->cb_req_type = IBDM_REQ_TYPE_CLASSPORTINFO; 2038 2039 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2040 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2041 2042 IBTF_DPRINTF_L5("ibdm", "\tset_classportinfo: " 2043 "timeout id %x", gid_info->gl_timeout_id); 2044 2045 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 2046 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2047 IBTF_DPRINTF_L2("ibdm", 2048 "\tset_classportinfo: ibmf send failed"); 2049 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 2050 } 2051 2052 return (IBDM_SUCCESS); 2053 } 2054 2055 2056 /* 2057 * ibdm_send_classportinfo() 2058 * Send classportinfo request. When the request is completed 2059 * IBMF calls ibdm_classportinfo_cb routine to inform about 2060 * the completion. 2061 * Returns IBDM_SUCCESS/IBDM_FAILURE 2062 */ 2063 static int 2064 ibdm_send_classportinfo(ibdm_dp_gidinfo_t *gid_info) 2065 { 2066 ibmf_msg_t *msg; 2067 ib_mad_hdr_t *hdr; 2068 ibdm_timeout_cb_args_t *cb_args; 2069 2070 IBTF_DPRINTF_L4("ibdm", 2071 "\tsend_classportinfo: gid info 0x%p", gid_info); 2072 2073 /* 2074 * Send command to get classportinfo attribute. Allocate a IBMF 2075 * packet and initialize the packet. 2076 */ 2077 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 2078 &msg) != IBMF_SUCCESS) { 2079 IBTF_DPRINTF_L4("ibdm", "\tsend_classportinfo: pkt alloc fail"); 2080 return (IBDM_FAILURE); 2081 } 2082 2083 ibdm_alloc_send_buffers(msg); 2084 2085 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2086 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2087 msg->im_local_addr.ia_remote_qno = 1; 2088 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2089 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2090 2091 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2092 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2093 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2094 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2095 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2096 hdr->Status = 0; 2097 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2098 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 2099 hdr->AttributeModifier = 0; 2100 2101 cb_args = &gid_info->gl_cpi_cb_args; 2102 cb_args->cb_gid_info = gid_info; 2103 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2104 cb_args->cb_req_type = IBDM_REQ_TYPE_CLASSPORTINFO; 2105 2106 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2107 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2108 2109 IBTF_DPRINTF_L5("ibdm", "\tsend_classportinfo: " 2110 "timeout id %x", gid_info->gl_timeout_id); 2111 2112 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 2113 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2114 IBTF_DPRINTF_L2("ibdm", 2115 "\tsend_classportinfo: ibmf send failed"); 2116 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 2117 } 2118 2119 return (IBDM_SUCCESS); 2120 } 2121 2122 2123 /* 2124 * ibdm_handle_setclassportinfo() 2125 * Invoked by the IBMF when setClassPortInfo request is completed. 2126 */ 2127 static void 2128 ibdm_handle_setclassportinfo(ibmf_handle_t ibmf_hdl, 2129 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2130 { 2131 void *data; 2132 timeout_id_t timeout_id; 2133 ib_mad_classportinfo_t *cpi; 2134 2135 IBTF_DPRINTF_L4("ibdm", "\thandle_setclassportinfo:ibmf hdl " 2136 "%p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2137 2138 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO) { 2139 IBTF_DPRINTF_L4("ibdm", "\thandle_setclassportinfo: " 2140 "Not a ClassPortInfo resp"); 2141 *flag |= IBDM_IBMF_PKT_UNEXP_RESP; 2142 return; 2143 } 2144 2145 /* 2146 * Verify whether timeout handler is created/active. 2147 * If created/ active, cancel the timeout handler 2148 */ 2149 mutex_enter(&gid_info->gl_mutex); 2150 if (gid_info->gl_state != IBDM_SET_CLASSPORTINFO) { 2151 IBTF_DPRINTF_L2("ibdm", "\thandle_setclassportinfo:DUP resp"); 2152 *flag |= IBDM_IBMF_PKT_DUP_RESP; 2153 mutex_exit(&gid_info->gl_mutex); 2154 return; 2155 } 2156 ibdm_bump_transactionID(gid_info); 2157 2158 gid_info->gl_iou_cb_args.cb_req_type = 0; 2159 if (gid_info->gl_timeout_id) { 2160 timeout_id = gid_info->gl_timeout_id; 2161 mutex_exit(&gid_info->gl_mutex); 2162 IBTF_DPRINTF_L5("ibdm", "handle_setlassportinfo: " 2163 "gl_timeout_id = 0x%x", timeout_id); 2164 if (untimeout(timeout_id) == -1) { 2165 IBTF_DPRINTF_L2("ibdm", "handle_setclassportinfo: " 2166 "untimeout gl_timeout_id failed"); 2167 } 2168 mutex_enter(&gid_info->gl_mutex); 2169 gid_info->gl_timeout_id = 0; 2170 } 2171 mutex_exit(&gid_info->gl_mutex); 2172 2173 data = msg->im_msgbufs_recv.im_bufs_cl_data; 2174 cpi = (ib_mad_classportinfo_t *)data; 2175 2176 ibdm_dump_classportinfo(cpi); 2177 } 2178 2179 2180 /* 2181 * ibdm_handle_classportinfo() 2182 * Invoked by the IBMF when the classportinfo request is completed. 2183 */ 2184 static void 2185 ibdm_handle_classportinfo(ibmf_handle_t ibmf_hdl, 2186 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2187 { 2188 void *data; 2189 timeout_id_t timeout_id; 2190 ib_mad_hdr_t *hdr; 2191 ib_mad_classportinfo_t *cpi; 2192 2193 IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo:ibmf hdl " 2194 "%p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2195 2196 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO) { 2197 IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo: " 2198 "Not a ClassPortInfo resp"); 2199 *flag |= IBDM_IBMF_PKT_UNEXP_RESP; 2200 return; 2201 } 2202 2203 /* 2204 * Verify whether timeout handler is created/active. 2205 * If created/ active, cancel the timeout handler 2206 */ 2207 mutex_enter(&gid_info->gl_mutex); 2208 ibdm_bump_transactionID(gid_info); 2209 if (gid_info->gl_state != IBDM_GET_CLASSPORTINFO) { 2210 IBTF_DPRINTF_L2("ibdm", "\thandle_classportinfo:DUP resp"); 2211 *flag |= IBDM_IBMF_PKT_DUP_RESP; 2212 mutex_exit(&gid_info->gl_mutex); 2213 return; 2214 } 2215 gid_info->gl_iou_cb_args.cb_req_type = 0; 2216 if (gid_info->gl_timeout_id) { 2217 timeout_id = gid_info->gl_timeout_id; 2218 mutex_exit(&gid_info->gl_mutex); 2219 IBTF_DPRINTF_L5("ibdm", "handle_ioclassportinfo: " 2220 "gl_timeout_id = 0x%x", timeout_id); 2221 if (untimeout(timeout_id) == -1) { 2222 IBTF_DPRINTF_L2("ibdm", "handle_classportinfo: " 2223 "untimeout gl_timeout_id failed"); 2224 } 2225 mutex_enter(&gid_info->gl_mutex); 2226 gid_info->gl_timeout_id = 0; 2227 } 2228 gid_info->gl_state = IBDM_GET_IOUNITINFO; 2229 gid_info->gl_pending_cmds++; 2230 mutex_exit(&gid_info->gl_mutex); 2231 2232 data = msg->im_msgbufs_recv.im_bufs_cl_data; 2233 cpi = (ib_mad_classportinfo_t *)data; 2234 2235 /* 2236 * Cache the "RespTimeValue" and redirection information in the 2237 * global gid list data structure. This cached information will 2238 * be used to send any further requests to the GID. 2239 */ 2240 gid_info->gl_resp_timeout = 2241 (b2h32(cpi->RespTimeValue) & 0x1F); 2242 2243 gid_info->gl_redirected = ((IBDM_IN_IBMFMSG_STATUS(msg) & 2244 MAD_STATUS_REDIRECT_REQUIRED) ? B_TRUE: B_FALSE); 2245 gid_info->gl_redirect_dlid = b2h16(cpi->RedirectLID); 2246 gid_info->gl_redirect_QP = (b2h32(cpi->RedirectQP) & 0xffffff); 2247 gid_info->gl_redirect_pkey = b2h16(cpi->RedirectP_Key); 2248 gid_info->gl_redirect_qkey = b2h32(cpi->RedirectQ_Key); 2249 gid_info->gl_redirectGID_hi = b2h64(cpi->RedirectGID_hi); 2250 gid_info->gl_redirectGID_lo = b2h64(cpi->RedirectGID_lo); 2251 2252 ibdm_dump_classportinfo(cpi); 2253 2254 /* 2255 * Send IOUnitInfo request 2256 * Reuse previously allocated IBMF packet for sending ClassPortInfo 2257 * Check whether DM agent on the remote node requested redirection 2258 * If so, send the request to the redirect DGID/DLID/PKEY/QP. 2259 */ 2260 ibdm_alloc_send_buffers(msg); 2261 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2262 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2263 2264 if (gid_info->gl_redirected == B_TRUE) { 2265 if (gid_info->gl_redirect_dlid != 0) { 2266 msg->im_local_addr.ia_remote_lid = 2267 gid_info->gl_redirect_dlid; 2268 } 2269 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 2270 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 2271 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 2272 } else { 2273 msg->im_local_addr.ia_remote_qno = 1; 2274 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2275 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2276 } 2277 2278 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2279 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2280 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2281 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2282 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2283 hdr->Status = 0; 2284 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2285 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 2286 hdr->AttributeModifier = 0; 2287 2288 gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO; 2289 gid_info->gl_iou_cb_args.cb_gid_info = gid_info; 2290 gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt; 2291 2292 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2293 &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2294 2295 IBTF_DPRINTF_L5("ibdm", "handle_classportinfo:" 2296 "timeout %x", gid_info->gl_timeout_id); 2297 2298 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, NULL, 2299 ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) != IBMF_SUCCESS) { 2300 IBTF_DPRINTF_L2("ibdm", 2301 "\thandle_classportinfo: msg transport failed"); 2302 ibdm_ibmf_send_cb(ibmf_hdl, msg, &gid_info->gl_iou_cb_args); 2303 } 2304 (*flag) |= IBDM_IBMF_PKT_REUSED; 2305 } 2306 2307 2308 /* 2309 * ibdm_send_iounitinfo: 2310 * Sends a DM request to get IOU unitinfo. 2311 */ 2312 static int 2313 ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *gid_info) 2314 { 2315 ibmf_msg_t *msg; 2316 ib_mad_hdr_t *hdr; 2317 2318 IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: gid info 0x%p", gid_info); 2319 2320 /* 2321 * Send command to get iounitinfo attribute. Allocate a IBMF 2322 * packet and initialize the packet. 2323 */ 2324 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, &msg) != 2325 IBMF_SUCCESS) { 2326 IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: pkt alloc fail"); 2327 return (IBDM_FAILURE); 2328 } 2329 2330 mutex_enter(&gid_info->gl_mutex); 2331 ibdm_bump_transactionID(gid_info); 2332 mutex_exit(&gid_info->gl_mutex); 2333 2334 2335 ibdm_alloc_send_buffers(msg); 2336 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2337 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2338 msg->im_local_addr.ia_remote_qno = 1; 2339 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2340 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2341 2342 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2343 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2344 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2345 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2346 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2347 hdr->Status = 0; 2348 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2349 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 2350 hdr->AttributeModifier = 0; 2351 2352 gid_info->gl_iou_cb_args.cb_gid_info = gid_info; 2353 gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt; 2354 gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO; 2355 2356 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2357 &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2358 2359 IBTF_DPRINTF_L5("ibdm", "send_iouunitinfo:" 2360 "timeout %x", gid_info->gl_timeout_id); 2361 2362 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg, 2363 NULL, ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) != 2364 IBMF_SUCCESS) { 2365 IBTF_DPRINTF_L2("ibdm", "\tsend_iounitinfo: ibmf send failed"); 2366 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, 2367 msg, &gid_info->gl_iou_cb_args); 2368 } 2369 return (IBDM_SUCCESS); 2370 } 2371 2372 /* 2373 * ibdm_handle_iounitinfo() 2374 * Invoked by the IBMF when IO Unitinfo request is completed. 2375 */ 2376 static void 2377 ibdm_handle_iounitinfo(ibmf_handle_t ibmf_hdl, 2378 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2379 { 2380 int ii, first = B_TRUE; 2381 int num_iocs; 2382 size_t size; 2383 uchar_t slot_info; 2384 timeout_id_t timeout_id; 2385 ib_mad_hdr_t *hdr; 2386 ibdm_ioc_info_t *ioc_info; 2387 ib_dm_io_unitinfo_t *iou_info; 2388 ib_dm_io_unitinfo_t *giou_info; 2389 ibdm_timeout_cb_args_t *cb_args; 2390 2391 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo:" 2392 " ibmf hdl %p pkt %p gid info %p", ibmf_hdl, msg, gid_info); 2393 2394 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_IO_UNITINFO) { 2395 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: " 2396 "Unexpected response"); 2397 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2398 return; 2399 } 2400 2401 mutex_enter(&gid_info->gl_mutex); 2402 if (gid_info->gl_state != IBDM_GET_IOUNITINFO) { 2403 IBTF_DPRINTF_L4("ibdm", 2404 "\thandle_iounitinfo: DUP resp"); 2405 mutex_exit(&gid_info->gl_mutex); 2406 (*flag) = IBDM_IBMF_PKT_DUP_RESP; 2407 return; 2408 } 2409 gid_info->gl_iou_cb_args.cb_req_type = 0; 2410 if (gid_info->gl_timeout_id) { 2411 timeout_id = gid_info->gl_timeout_id; 2412 mutex_exit(&gid_info->gl_mutex); 2413 IBTF_DPRINTF_L5("ibdm", "handle_iounitinfo: " 2414 "gl_timeout_id = 0x%x", timeout_id); 2415 if (untimeout(timeout_id) == -1) { 2416 IBTF_DPRINTF_L2("ibdm", "handle_iounitinfo: " 2417 "untimeout gl_timeout_id failed"); 2418 } 2419 mutex_enter(&gid_info->gl_mutex); 2420 gid_info->gl_timeout_id = 0; 2421 } 2422 gid_info->gl_state = IBDM_GET_IOC_DETAILS; 2423 2424 iou_info = IBDM_IN_IBMFMSG2IOU(msg); 2425 ibdm_dump_iounitinfo(iou_info); 2426 num_iocs = iou_info->iou_num_ctrl_slots; 2427 /* 2428 * check if number of IOCs reported is zero? if yes, return. 2429 * when num_iocs are reported zero internal IOC database needs 2430 * to be updated. To ensure that save the number of IOCs in 2431 * the new field "gl_num_iocs". Use a new field instead of 2432 * "giou_info->iou_num_ctrl_slots" as that would prevent 2433 * an unnecessary kmem_alloc/kmem_free when num_iocs is 0. 2434 */ 2435 if (num_iocs == 0 && gid_info->gl_num_iocs == 0) { 2436 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: no IOC's"); 2437 mutex_exit(&gid_info->gl_mutex); 2438 return; 2439 } 2440 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: num_iocs = %d", num_iocs); 2441 2442 /* 2443 * if there is an existing gl_iou (IOU has been probed before) 2444 * check if the "iou_changeid" is same as saved entry in 2445 * "giou_info->iou_changeid". 2446 * (note: this logic can prevent IOC enumeration if a given 2447 * vendor doesn't support setting iou_changeid field for its IOU) 2448 * 2449 * if there is an existing gl_iou and iou_changeid has changed : 2450 * free up existing gl_iou info and its related structures. 2451 * reallocate gl_iou info all over again. 2452 * if we donot free this up; then this leads to memory leaks 2453 */ 2454 if (gid_info->gl_iou) { 2455 giou_info = &gid_info->gl_iou->iou_info; 2456 if (b2h16(iou_info->iou_changeid) == 2457 giou_info->iou_changeid) { 2458 IBTF_DPRINTF_L3("ibdm", 2459 "\thandle_iounitinfo: no IOCs changed"); 2460 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE; 2461 mutex_exit(&gid_info->gl_mutex); 2462 return; 2463 } 2464 2465 /* 2466 * Store the iou info as prev_iou to be used after 2467 * sweep is done. 2468 */ 2469 ASSERT(gid_info->gl_prev_iou == NULL); 2470 IBTF_DPRINTF_L4(ibdm_string, 2471 "\thandle_iounitinfo: setting gl_prev_iou %p", 2472 gid_info->gl_prev_iou); 2473 gid_info->gl_prev_iou = gid_info->gl_iou; 2474 ibdm.ibdm_prev_iou = 1; 2475 gid_info->gl_iou = NULL; 2476 } 2477 2478 size = sizeof (ibdm_iou_info_t) + num_iocs * sizeof (ibdm_ioc_info_t); 2479 gid_info->gl_iou = (ibdm_iou_info_t *)kmem_zalloc(size, KM_SLEEP); 2480 giou_info = &gid_info->gl_iou->iou_info; 2481 gid_info->gl_iou->iou_ioc_info = (ibdm_ioc_info_t *) 2482 ((char *)gid_info->gl_iou + sizeof (ibdm_iou_info_t)); 2483 2484 giou_info->iou_num_ctrl_slots = gid_info->gl_num_iocs = num_iocs; 2485 giou_info->iou_flag = iou_info->iou_flag; 2486 bcopy(iou_info->iou_ctrl_list, giou_info->iou_ctrl_list, 128); 2487 giou_info->iou_changeid = b2h16(iou_info->iou_changeid); 2488 gid_info->gl_pending_cmds++; /* for diag code */ 2489 mutex_exit(&gid_info->gl_mutex); 2490 2491 if (ibdm_get_diagcode(gid_info, 0) != IBDM_SUCCESS) { 2492 mutex_enter(&gid_info->gl_mutex); 2493 gid_info->gl_pending_cmds--; 2494 mutex_exit(&gid_info->gl_mutex); 2495 } 2496 /* 2497 * Parallelize getting IOC controller profiles from here. 2498 * Allocate IBMF packets and send commands to get IOC profile for 2499 * each IOC present on the IOU. 2500 */ 2501 for (ii = 0; ii < num_iocs; ii++) { 2502 /* 2503 * Check whether IOC is present in the slot 2504 * Series of nibbles (in the field iou_ctrl_list) represents 2505 * a slot in the IOU. 2506 * Byte format: 76543210 2507 * Bits 0-3 of first byte represent Slot 2 2508 * bits 4-7 of first byte represent slot 1, 2509 * bits 0-3 of second byte represent slot 4 and so on 2510 * Each 4-bit nibble has the following meaning 2511 * 0x0 : IOC not installed 2512 * 0x1 : IOC is present 2513 * 0xf : Slot does not exist 2514 * and all other values are reserved. 2515 */ 2516 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii); 2517 slot_info = giou_info->iou_ctrl_list[(ii/2)]; 2518 if ((ii % 2) == 0) 2519 slot_info = (slot_info >> 4); 2520 2521 if ((slot_info & 0xf) != 1) { 2522 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: " 2523 "No IOC is present in the slot = %d", ii); 2524 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 2525 continue; 2526 } 2527 2528 mutex_enter(&gid_info->gl_mutex); 2529 ibdm_bump_transactionID(gid_info); 2530 mutex_exit(&gid_info->gl_mutex); 2531 2532 /* 2533 * Re use the already allocated packet (for IOUnitinfo) to 2534 * send the first IOC controller attribute. Allocate new 2535 * IBMF packets for the rest of the IOC's 2536 */ 2537 if (first != B_TRUE) { 2538 msg = NULL; 2539 if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP, 2540 &msg) != IBMF_SUCCESS) { 2541 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: " 2542 "IBMF packet allocation failed"); 2543 continue; 2544 } 2545 2546 } 2547 2548 /* allocate send buffers for all messages */ 2549 ibdm_alloc_send_buffers(msg); 2550 2551 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2552 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2553 if (gid_info->gl_redirected == B_TRUE) { 2554 if (gid_info->gl_redirect_dlid != 0) { 2555 msg->im_local_addr.ia_remote_lid = 2556 gid_info->gl_redirect_dlid; 2557 } 2558 msg->im_local_addr.ia_remote_qno = 2559 gid_info->gl_redirect_QP; 2560 msg->im_local_addr.ia_p_key = 2561 gid_info->gl_redirect_pkey; 2562 msg->im_local_addr.ia_q_key = 2563 gid_info->gl_redirect_qkey; 2564 } else { 2565 msg->im_local_addr.ia_remote_qno = 1; 2566 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2567 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2568 } 2569 2570 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2571 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2572 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2573 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2574 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2575 hdr->Status = 0; 2576 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2577 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 2578 hdr->AttributeModifier = h2b32(ii + 1); 2579 2580 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_INVALID; 2581 cb_args = &ioc_info->ioc_cb_args; 2582 cb_args->cb_gid_info = gid_info; 2583 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2584 cb_args->cb_req_type = IBDM_REQ_TYPE_IOCINFO; 2585 cb_args->cb_ioc_num = ii; 2586 2587 mutex_enter(&gid_info->gl_mutex); 2588 gid_info->gl_pending_cmds++; /* for diag code */ 2589 mutex_exit(&gid_info->gl_mutex); 2590 2591 ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2592 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2593 2594 IBTF_DPRINTF_L5("ibdm", "\thandle_iounitinfo:" 2595 "timeout 0x%x, ioc_num %d", ioc_info->ioc_timeout_id, ii); 2596 2597 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, 2598 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2599 IBTF_DPRINTF_L2("ibdm", 2600 "\thandle_iounitinfo: msg transport failed"); 2601 ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args); 2602 } 2603 (*flag) |= IBDM_IBMF_PKT_REUSED; 2604 first = B_FALSE; 2605 gid_info->gl_iou->iou_niocs_probe_in_progress++; 2606 } 2607 } 2608 2609 2610 /* 2611 * ibdm_handle_ioc_profile() 2612 * Invoked by the IBMF when the IOCControllerProfile request 2613 * gets completed 2614 */ 2615 static void 2616 ibdm_handle_ioc_profile(ibmf_handle_t ibmf_hdl, 2617 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2618 { 2619 int first = B_TRUE, reprobe = 0; 2620 uint_t ii, ioc_no, srv_start; 2621 uint_t nserv_entries; 2622 timeout_id_t timeout_id; 2623 ib_mad_hdr_t *hdr; 2624 ibdm_ioc_info_t *ioc_info; 2625 ibdm_timeout_cb_args_t *cb_args; 2626 ib_dm_ioc_ctrl_profile_t *ioc, *gioc; 2627 2628 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:" 2629 " ibmf hdl %p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2630 2631 ioc = IBDM_IN_IBMFMSG2IOC(msg); 2632 /* 2633 * Check whether we know this IOC already 2634 * This will return NULL if reprobe is in progress 2635 * IBDM_IOC_STATE_REPROBE_PROGRESS will be set. 2636 * Do not hold mutexes here. 2637 */ 2638 if (ibdm_is_ioc_present(ioc->ioc_guid, gid_info, flag) != NULL) { 2639 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:" 2640 "IOC guid %llx is present", ioc->ioc_guid); 2641 return; 2642 } 2643 ioc_no = IBDM_IN_IBMFMSG_ATTRMOD(msg); 2644 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile: ioc_no = %d", ioc_no-1); 2645 2646 /* Make sure that IOC index is with the valid range */ 2647 if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) { 2648 IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: " 2649 "IOC index Out of range, index %d", ioc); 2650 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2651 return; 2652 } 2653 ioc_info = &gid_info->gl_iou->iou_ioc_info[ioc_no - 1]; 2654 ioc_info->ioc_iou_info = gid_info->gl_iou; 2655 2656 mutex_enter(&gid_info->gl_mutex); 2657 if (ioc_info->ioc_state == IBDM_IOC_STATE_REPROBE_PROGRESS) { 2658 reprobe = 1; 2659 ioc_info->ioc_prev_serv = ioc_info->ioc_serv; 2660 ioc_info->ioc_serv = NULL; 2661 ioc_info->ioc_prev_serv_cnt = 2662 ioc_info->ioc_profile.ioc_service_entries; 2663 } else if (ioc_info->ioc_state != IBDM_IOC_STATE_PROBE_INVALID) { 2664 IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: DUP response" 2665 "ioc %d, ioc_state %x", ioc_no - 1, ioc_info->ioc_state); 2666 mutex_exit(&gid_info->gl_mutex); 2667 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 2668 return; 2669 } 2670 ioc_info->ioc_cb_args.cb_req_type = 0; 2671 if (ioc_info->ioc_timeout_id) { 2672 timeout_id = ioc_info->ioc_timeout_id; 2673 mutex_exit(&gid_info->gl_mutex); 2674 IBTF_DPRINTF_L5("ibdm", "handle_ioc_profile: " 2675 "ioc_timeout_id = 0x%x", timeout_id); 2676 if (untimeout(timeout_id) == -1) { 2677 IBTF_DPRINTF_L2("ibdm", "handle_ioc_profile: " 2678 "untimeout ioc_timeout_id failed"); 2679 } 2680 mutex_enter(&gid_info->gl_mutex); 2681 ioc_info->ioc_timeout_id = 0; 2682 } 2683 2684 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_SUCCESS; 2685 if (reprobe == 0) { 2686 ioc_info->ioc_iou_guid = gid_info->gl_nodeguid; 2687 ioc_info->ioc_nodeguid = gid_info->gl_nodeguid; 2688 } 2689 2690 /* 2691 * Save all the IOC information in the global structures. 2692 * Note the wire format is Big Endian and the Sparc process also 2693 * big endian. So, there is no need to convert the data fields 2694 * The conversion routines used below are ineffective on Sparc 2695 * machines where as they will be effective on little endian 2696 * machines such as Intel processors. 2697 */ 2698 gioc = (ib_dm_ioc_ctrl_profile_t *)&ioc_info->ioc_profile; 2699 2700 /* 2701 * Restrict updates to onlyport GIDs and service entries during reprobe 2702 */ 2703 if (reprobe == 0) { 2704 gioc->ioc_guid = b2h64(ioc->ioc_guid); 2705 gioc->ioc_vendorid = 2706 ((b2h32(ioc->ioc_vendorid) & IB_DM_VENDORID_MASK) 2707 >> IB_DM_VENDORID_SHIFT); 2708 gioc->ioc_deviceid = b2h32(ioc->ioc_deviceid); 2709 gioc->ioc_device_ver = b2h16(ioc->ioc_device_ver); 2710 gioc->ioc_subsys_vendorid = 2711 ((b2h32(ioc->ioc_subsys_vendorid) & IB_DM_VENDORID_MASK) 2712 >> IB_DM_VENDORID_SHIFT); 2713 gioc->ioc_subsys_id = b2h32(ioc->ioc_subsys_id); 2714 gioc->ioc_io_class = b2h16(ioc->ioc_io_class); 2715 gioc->ioc_io_subclass = b2h16(ioc->ioc_io_subclass); 2716 gioc->ioc_protocol = b2h16(ioc->ioc_protocol); 2717 gioc->ioc_protocol_ver = b2h16(ioc->ioc_protocol_ver); 2718 gioc->ioc_send_msg_qdepth = 2719 b2h16(ioc->ioc_send_msg_qdepth); 2720 gioc->ioc_rdma_read_qdepth = 2721 b2h16(ioc->ioc_rdma_read_qdepth); 2722 gioc->ioc_send_msg_sz = b2h32(ioc->ioc_send_msg_sz); 2723 gioc->ioc_rdma_xfer_sz = b2h32(ioc->ioc_rdma_xfer_sz); 2724 gioc->ioc_ctrl_opcap_mask = ioc->ioc_ctrl_opcap_mask; 2725 bcopy(ioc->ioc_id_string, gioc->ioc_id_string, 2726 IB_DM_IOC_ID_STRING_LEN); 2727 2728 ioc_info->ioc_iou_diagcode = gid_info->gl_iou->iou_diagcode; 2729 ioc_info->ioc_iou_dc_valid = gid_info->gl_iou->iou_dc_valid; 2730 ioc_info->ioc_diagdeviceid = (IB_DM_IOU_DEVICEID_MASK & 2731 gid_info->gl_iou->iou_info.iou_flag) ? B_TRUE : B_FALSE; 2732 2733 if (ioc_info->ioc_diagdeviceid == B_TRUE) { 2734 gid_info->gl_pending_cmds++; 2735 IBTF_DPRINTF_L3(ibdm_string, 2736 "\tibdm_handle_ioc_profile: " 2737 "%d: gid_info %p gl_state %d pending_cmds %d", 2738 __LINE__, gid_info, gid_info->gl_state, 2739 gid_info->gl_pending_cmds); 2740 } 2741 } 2742 gioc->ioc_service_entries = ioc->ioc_service_entries; 2743 mutex_exit(&gid_info->gl_mutex); 2744 2745 ibdm_dump_ioc_profile(gioc); 2746 2747 if ((ioc_info->ioc_diagdeviceid == B_TRUE) && (reprobe == 0)) { 2748 if (ibdm_get_diagcode(gid_info, ioc_no) != IBDM_SUCCESS) { 2749 mutex_enter(&gid_info->gl_mutex); 2750 gid_info->gl_pending_cmds--; 2751 mutex_exit(&gid_info->gl_mutex); 2752 } 2753 } 2754 ioc_info->ioc_serv = (ibdm_srvents_info_t *)kmem_zalloc( 2755 (gioc->ioc_service_entries * sizeof (ibdm_srvents_info_t)), 2756 KM_SLEEP); 2757 2758 /* 2759 * In one single request, maximum number of requests that can be 2760 * obtained is 4. If number of service entries are more than four, 2761 * calculate number requests needed and send them parallelly. 2762 */ 2763 nserv_entries = ioc->ioc_service_entries; 2764 ii = 0; 2765 while (nserv_entries) { 2766 mutex_enter(&gid_info->gl_mutex); 2767 gid_info->gl_pending_cmds++; 2768 ibdm_bump_transactionID(gid_info); 2769 mutex_exit(&gid_info->gl_mutex); 2770 2771 if (first != B_TRUE) { 2772 if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP, 2773 &msg) != IBMF_SUCCESS) { 2774 continue; 2775 } 2776 2777 } 2778 ibdm_alloc_send_buffers(msg); 2779 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2780 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2781 if (gid_info->gl_redirected == B_TRUE) { 2782 if (gid_info->gl_redirect_dlid != 0) { 2783 msg->im_local_addr.ia_remote_lid = 2784 gid_info->gl_redirect_dlid; 2785 } 2786 msg->im_local_addr.ia_remote_qno = 2787 gid_info->gl_redirect_QP; 2788 msg->im_local_addr.ia_p_key = 2789 gid_info->gl_redirect_pkey; 2790 msg->im_local_addr.ia_q_key = 2791 gid_info->gl_redirect_qkey; 2792 } else { 2793 msg->im_local_addr.ia_remote_qno = 1; 2794 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2795 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2796 } 2797 2798 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2799 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2800 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2801 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2802 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2803 hdr->Status = 0; 2804 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2805 hdr->AttributeID = h2b16(IB_DM_ATTR_SERVICE_ENTRIES); 2806 2807 srv_start = ii * 4; 2808 cb_args = &ioc_info->ioc_serv[srv_start].se_cb_args; 2809 cb_args->cb_gid_info = gid_info; 2810 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2811 cb_args->cb_req_type = IBDM_REQ_TYPE_SRVENTS; 2812 cb_args->cb_srvents_start = srv_start; 2813 cb_args->cb_ioc_num = ioc_no - 1; 2814 2815 if (nserv_entries >= IBDM_MAX_SERV_ENTRIES_PER_REQ) { 2816 nserv_entries -= IBDM_MAX_SERV_ENTRIES_PER_REQ; 2817 cb_args->cb_srvents_end = (cb_args->cb_srvents_start + 2818 IBDM_MAX_SERV_ENTRIES_PER_REQ - 1); 2819 } else { 2820 cb_args->cb_srvents_end = 2821 (cb_args->cb_srvents_start + nserv_entries - 1); 2822 nserv_entries = 0; 2823 } 2824 ibdm_fill_srv_attr_mod(hdr, cb_args); 2825 2826 ioc_info->ioc_serv[srv_start].se_timeout_id = timeout( 2827 ibdm_pkt_timeout_hdlr, cb_args, 2828 IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2829 2830 IBTF_DPRINTF_L5("ibdm", "\thandle_ioc_profile:" 2831 "timeout %x, ioc %d srv %d", 2832 ioc_info->ioc_serv[srv_start].se_timeout_id, 2833 ioc_no - 1, srv_start); 2834 2835 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, 2836 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2837 IBTF_DPRINTF_L2("ibdm", 2838 "\thandle_ioc_profile: msg send failed"); 2839 ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args); 2840 } 2841 (*flag) |= IBDM_IBMF_PKT_REUSED; 2842 first = B_FALSE; 2843 ii++; 2844 } 2845 } 2846 2847 2848 /* 2849 * ibdm_handle_srventry_mad() 2850 */ 2851 static void 2852 ibdm_handle_srventry_mad(ibmf_msg_t *msg, 2853 ibdm_dp_gidinfo_t *gid_info, int *flag) 2854 { 2855 uint_t ii, ioc_no, attrmod; 2856 uint_t nentries, start, end; 2857 timeout_id_t timeout_id; 2858 ib_dm_srv_t *srv_ents; 2859 ibdm_ioc_info_t *ioc_info; 2860 ibdm_srvents_info_t *gsrv_ents; 2861 2862 IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad:" 2863 " IBMF msg %p gid info %p", msg, gid_info); 2864 2865 srv_ents = IBDM_IN_IBMFMSG2SRVENT(msg); 2866 /* 2867 * Get the start and end index of the service entries 2868 * Upper 16 bits identify the IOC 2869 * Lower 16 bits specify the range of service entries 2870 * LSB specifies (Big endian) end of the range 2871 * MSB specifies (Big endian) start of the range 2872 */ 2873 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg); 2874 ioc_no = ((attrmod >> 16) & IBDM_16_BIT_MASK); 2875 end = ((attrmod >> 8) & IBDM_8_BIT_MASK); 2876 start = (attrmod & IBDM_8_BIT_MASK); 2877 2878 /* Make sure that IOC index is with the valid range */ 2879 if ((ioc_no < 1) | 2880 (ioc_no > gid_info->gl_iou->iou_info.iou_num_ctrl_slots)) { 2881 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 2882 "IOC index Out of range, index %d", ioc_no); 2883 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2884 return; 2885 } 2886 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1)); 2887 2888 /* 2889 * Make sure that the "start" and "end" service indexes are 2890 * with in the valid range 2891 */ 2892 nentries = ioc_info->ioc_profile.ioc_service_entries; 2893 if ((start > end) | (start >= nentries) | (end >= nentries)) { 2894 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 2895 "Attr modifier 0x%x, #Serv entries %d", attrmod, nentries); 2896 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2897 return; 2898 } 2899 gsrv_ents = &ioc_info->ioc_serv[start]; 2900 mutex_enter(&gid_info->gl_mutex); 2901 if (gsrv_ents->se_state != IBDM_SE_INVALID) { 2902 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 2903 "already known, ioc %d, srv %d, se_state %x", 2904 ioc_no - 1, start, gsrv_ents->se_state); 2905 mutex_exit(&gid_info->gl_mutex); 2906 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 2907 return; 2908 } 2909 ioc_info->ioc_serv[start].se_cb_args.cb_req_type = 0; 2910 if (ioc_info->ioc_serv[start].se_timeout_id) { 2911 IBTF_DPRINTF_L2("ibdm", 2912 "\thandle_srventry_mad: ioc %d start %d", ioc_no, start); 2913 timeout_id = ioc_info->ioc_serv[start].se_timeout_id; 2914 mutex_exit(&gid_info->gl_mutex); 2915 IBTF_DPRINTF_L5("ibdm", "handle_srverntry_mad: " 2916 "se_timeout_id = 0x%x", timeout_id); 2917 if (untimeout(timeout_id) == -1) { 2918 IBTF_DPRINTF_L2("ibdm", "handle_srventry_mad: " 2919 "untimeout se_timeout_id failed"); 2920 } 2921 mutex_enter(&gid_info->gl_mutex); 2922 ioc_info->ioc_serv[start].se_timeout_id = 0; 2923 } 2924 2925 gsrv_ents->se_state = IBDM_SE_VALID; 2926 mutex_exit(&gid_info->gl_mutex); 2927 for (ii = start; ii <= end; ii++, srv_ents++, gsrv_ents++) { 2928 gsrv_ents->se_attr.srv_id = b2h64(srv_ents->srv_id); 2929 bcopy(srv_ents->srv_name, 2930 gsrv_ents->se_attr.srv_name, IB_DM_MAX_SVC_NAME_LEN); 2931 ibdm_dump_service_entries(&gsrv_ents->se_attr); 2932 } 2933 } 2934 2935 2936 /* 2937 * ibdm_get_diagcode: 2938 * Send request to get IOU/IOC diag code 2939 * Returns IBDM_SUCCESS/IBDM_FAILURE 2940 */ 2941 static int 2942 ibdm_get_diagcode(ibdm_dp_gidinfo_t *gid_info, int attr) 2943 { 2944 ibmf_msg_t *msg; 2945 ib_mad_hdr_t *hdr; 2946 ibdm_ioc_info_t *ioc; 2947 ibdm_timeout_cb_args_t *cb_args; 2948 timeout_id_t *timeout_id; 2949 2950 IBTF_DPRINTF_L4("ibdm", "\tget_diagcode: gid info %p, attr = %d", 2951 gid_info, attr); 2952 2953 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 2954 &msg) != IBMF_SUCCESS) { 2955 IBTF_DPRINTF_L4("ibdm", "\tget_diagcode: pkt alloc fail"); 2956 return (IBDM_FAILURE); 2957 } 2958 2959 ibdm_alloc_send_buffers(msg); 2960 2961 mutex_enter(&gid_info->gl_mutex); 2962 ibdm_bump_transactionID(gid_info); 2963 mutex_exit(&gid_info->gl_mutex); 2964 2965 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2966 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2967 if (gid_info->gl_redirected == B_TRUE) { 2968 if (gid_info->gl_redirect_dlid != 0) { 2969 msg->im_local_addr.ia_remote_lid = 2970 gid_info->gl_redirect_dlid; 2971 } 2972 2973 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 2974 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 2975 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 2976 } else { 2977 msg->im_local_addr.ia_remote_qno = 1; 2978 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2979 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2980 } 2981 2982 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2983 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2984 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2985 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2986 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2987 hdr->Status = 0; 2988 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2989 2990 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 2991 hdr->AttributeModifier = h2b32(attr); 2992 2993 if (attr == 0) { 2994 cb_args = &gid_info->gl_iou_cb_args; 2995 gid_info->gl_iou->iou_dc_valid = B_FALSE; 2996 cb_args->cb_ioc_num = 0; 2997 cb_args->cb_req_type = IBDM_REQ_TYPE_IOU_DIAGCODE; 2998 timeout_id = &gid_info->gl_timeout_id; 2999 } else { 3000 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attr - 1)); 3001 ioc->ioc_dc_valid = B_FALSE; 3002 cb_args = &ioc->ioc_dc_cb_args; 3003 cb_args->cb_ioc_num = attr - 1; 3004 cb_args->cb_req_type = IBDM_REQ_TYPE_IOC_DIAGCODE; 3005 timeout_id = &ioc->ioc_dc_timeout_id; 3006 } 3007 cb_args->cb_gid_info = gid_info; 3008 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 3009 cb_args->cb_srvents_start = 0; 3010 3011 3012 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 3013 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3014 3015 IBTF_DPRINTF_L5("ibdm", "\tget_diagcode:" 3016 "timeout %x, ioc %d", *timeout_id, cb_args->cb_ioc_num); 3017 3018 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 3019 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 3020 IBTF_DPRINTF_L2("ibdm", "\tget_diagcode: ibmf send failed"); 3021 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 3022 } 3023 return (IBDM_SUCCESS); 3024 } 3025 3026 /* 3027 * ibdm_handle_diagcode: 3028 * Process the DiagCode MAD response and update local DM 3029 * data structure. 3030 */ 3031 static void 3032 ibdm_handle_diagcode(ibmf_msg_t *ibmf_msg, 3033 ibdm_dp_gidinfo_t *gid_info, int *flag) 3034 { 3035 uint16_t attrmod, *diagcode; 3036 ibdm_iou_info_t *iou; 3037 ibdm_ioc_info_t *ioc; 3038 timeout_id_t timeout_id; 3039 ibdm_timeout_cb_args_t *cb_args; 3040 3041 diagcode = (uint16_t *)ibmf_msg->im_msgbufs_recv.im_bufs_cl_data; 3042 3043 mutex_enter(&gid_info->gl_mutex); 3044 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(ibmf_msg); 3045 iou = gid_info->gl_iou; 3046 if (attrmod == 0) { 3047 if (iou->iou_dc_valid != B_FALSE) { 3048 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 3049 IBTF_DPRINTF_L4("ibdm", 3050 "\thandle_diagcode: Duplicate IOU DiagCode"); 3051 mutex_exit(&gid_info->gl_mutex); 3052 return; 3053 } 3054 cb_args = &gid_info->gl_iou_cb_args; 3055 cb_args->cb_req_type = 0; 3056 iou->iou_diagcode = b2h16(*diagcode); 3057 iou->iou_dc_valid = B_TRUE; 3058 if (gid_info->gl_timeout_id) { 3059 timeout_id = gid_info->gl_timeout_id; 3060 mutex_exit(&gid_info->gl_mutex); 3061 IBTF_DPRINTF_L5("ibdm", "\thandle_diagcode: " 3062 "gl_timeout_id = 0x%x", timeout_id); 3063 if (untimeout(timeout_id) == -1) { 3064 IBTF_DPRINTF_L2("ibdm", "handle_diagcode: " 3065 "untimeout gl_timeout_id failed"); 3066 } 3067 mutex_enter(&gid_info->gl_mutex); 3068 gid_info->gl_timeout_id = 0; 3069 } 3070 } else { 3071 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod - 1)); 3072 if (ioc->ioc_dc_valid != B_FALSE) { 3073 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 3074 IBTF_DPRINTF_L4("ibdm", 3075 "\thandle_diagcode: Duplicate IOC DiagCode"); 3076 mutex_exit(&gid_info->gl_mutex); 3077 return; 3078 } 3079 cb_args = &ioc->ioc_dc_cb_args; 3080 cb_args->cb_req_type = 0; 3081 ioc->ioc_diagcode = b2h16(*diagcode); 3082 ioc->ioc_dc_valid = B_TRUE; 3083 timeout_id = iou->iou_ioc_info[attrmod - 1].ioc_dc_timeout_id; 3084 if (timeout_id) { 3085 mutex_exit(&gid_info->gl_mutex); 3086 IBTF_DPRINTF_L5("ibdm", "handle_diagcode: " 3087 "timeout_id = 0x%x", timeout_id); 3088 if (untimeout(timeout_id) == -1) { 3089 IBTF_DPRINTF_L2("ibdm", "\thandle_diagcode: " 3090 "untimeout ioc_dc_timeout_id failed"); 3091 } 3092 mutex_enter(&gid_info->gl_mutex); 3093 iou->iou_ioc_info[attrmod - 1].ioc_dc_timeout_id = 0; 3094 } 3095 } 3096 mutex_exit(&gid_info->gl_mutex); 3097 3098 IBTF_DPRINTF_L4("ibdm", "\thandle_diagcode: DiagCode : 0x%x" 3099 "attrmod : 0x%x", b2h16(*diagcode), attrmod); 3100 } 3101 3102 3103 /* 3104 * ibdm_is_ioc_present() 3105 * Return ibdm_ioc_info_t if IOC guid is found in the global gid list 3106 */ 3107 static ibdm_ioc_info_t * 3108 ibdm_is_ioc_present(ib_guid_t ioc_guid, 3109 ibdm_dp_gidinfo_t *gid_info, int *flag) 3110 { 3111 int ii; 3112 ibdm_ioc_info_t *ioc; 3113 ibdm_dp_gidinfo_t *head; 3114 ib_dm_io_unitinfo_t *iou; 3115 3116 mutex_enter(&ibdm.ibdm_mutex); 3117 head = ibdm.ibdm_dp_gidlist_head; 3118 while (head) { 3119 mutex_enter(&head->gl_mutex); 3120 if (head->gl_iou == NULL) { 3121 mutex_exit(&head->gl_mutex); 3122 head = head->gl_next; 3123 continue; 3124 } 3125 iou = &head->gl_iou->iou_info; 3126 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 3127 ioc = IBDM_GIDINFO2IOCINFO(head, ii); 3128 if ((ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) && 3129 (ioc->ioc_profile.ioc_guid == ioc_guid)) { 3130 if (gid_info == head) { 3131 *flag |= IBDM_IBMF_PKT_DUP_RESP; 3132 } else if (ibdm_check_dgid(head->gl_dgid_lo, 3133 head->gl_dgid_hi) != NULL) { 3134 IBTF_DPRINTF_L4("ibdm", "\tis_ioc_" 3135 "present: gid not present"); 3136 ibdm_add_to_gl_gid(gid_info, head); 3137 } 3138 mutex_exit(&head->gl_mutex); 3139 mutex_exit(&ibdm.ibdm_mutex); 3140 return (ioc); 3141 } 3142 } 3143 mutex_exit(&head->gl_mutex); 3144 head = head->gl_next; 3145 } 3146 mutex_exit(&ibdm.ibdm_mutex); 3147 return (NULL); 3148 } 3149 3150 3151 /* 3152 * ibdm_ibmf_send_cb() 3153 * IBMF invokes this callback routine after posting the DM MAD to 3154 * the HCA. 3155 */ 3156 /*ARGSUSED*/ 3157 static void 3158 ibdm_ibmf_send_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *ibmf_msg, void *arg) 3159 { 3160 ibdm_dump_ibmf_msg(ibmf_msg, 1); 3161 ibdm_free_send_buffers(ibmf_msg); 3162 if (ibmf_free_msg(ibmf_hdl, &ibmf_msg) != IBMF_SUCCESS) { 3163 IBTF_DPRINTF_L4("ibdm", 3164 "\tibmf_send_cb: IBMF free msg failed"); 3165 } 3166 } 3167 3168 3169 /* 3170 * ibdm_ibmf_recv_cb() 3171 * Invoked by the IBMF when a response to the one of the DM requests 3172 * is received. 3173 */ 3174 /*ARGSUSED*/ 3175 static void 3176 ibdm_ibmf_recv_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msg, void *arg) 3177 { 3178 ibdm_taskq_args_t *taskq_args; 3179 3180 /* 3181 * If the taskq enable is set then dispatch a taskq to process 3182 * the MAD, otherwise just process it on this thread 3183 */ 3184 if (ibdm_taskq_enable != IBDM_ENABLE_TASKQ_HANDLING) { 3185 ibdm_process_incoming_mad(ibmf_hdl, msg, arg); 3186 return; 3187 } 3188 3189 /* 3190 * create a taskq and dispatch it to process the incoming MAD 3191 */ 3192 taskq_args = kmem_alloc(sizeof (ibdm_taskq_args_t), KM_NOSLEEP); 3193 if (taskq_args == NULL) { 3194 IBTF_DPRINTF_L2("ibdm", "ibmf_recv_cb: kmem_alloc failed for" 3195 "taskq_args"); 3196 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3197 IBTF_DPRINTF_L4("ibmf_recv_cb", 3198 "\tibmf_recv_cb: IBMF free msg failed"); 3199 } 3200 return; 3201 } 3202 taskq_args->tq_ibmf_handle = ibmf_hdl; 3203 taskq_args->tq_ibmf_msg = msg; 3204 taskq_args->tq_args = arg; 3205 3206 if (taskq_dispatch(system_taskq, ibdm_recv_incoming_mad, taskq_args, 3207 TQ_NOSLEEP) == 0) { 3208 IBTF_DPRINTF_L2("ibdm", "ibmf_recv_cb: taskq_dispatch failed"); 3209 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3210 IBTF_DPRINTF_L4("ibmf_recv_cb", 3211 "\tibmf_recv_cb: IBMF free msg failed"); 3212 } 3213 kmem_free(taskq_args, sizeof (ibdm_taskq_args_t)); 3214 return; 3215 } 3216 3217 /* taskq_args are deleted in ibdm_recv_incoming_mad() */ 3218 } 3219 3220 3221 void 3222 ibdm_recv_incoming_mad(void *args) 3223 { 3224 ibdm_taskq_args_t *taskq_args; 3225 3226 taskq_args = (ibdm_taskq_args_t *)args; 3227 3228 IBTF_DPRINTF_L4("ibdm", "\tibdm_recv_incoming_mad: " 3229 "Processing incoming MAD via taskq"); 3230 3231 ibdm_process_incoming_mad(taskq_args->tq_ibmf_handle, 3232 taskq_args->tq_ibmf_msg, taskq_args->tq_args); 3233 3234 kmem_free(taskq_args, sizeof (ibdm_taskq_args_t)); 3235 } 3236 3237 3238 /* 3239 * Calls ibdm_process_incoming_mad with all function arguments extracted 3240 * from args 3241 */ 3242 /*ARGSUSED*/ 3243 static void 3244 ibdm_process_incoming_mad(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msg, void *arg) 3245 { 3246 int flag = 0; 3247 int ret; 3248 uint64_t transaction_id; 3249 ib_mad_hdr_t *hdr; 3250 ibdm_dp_gidinfo_t *gid_info = NULL; 3251 3252 IBTF_DPRINTF_L4("ibdm", 3253 "\tprocess_incoming_mad: ibmf hdl %p pkt %p", ibmf_hdl, msg); 3254 ibdm_dump_ibmf_msg(msg, 0); 3255 3256 /* 3257 * IBMF calls this routine for every DM MAD that arrives at this port. 3258 * But we handle only the responses for requests we sent. We drop all 3259 * the DM packets that does not have response bit set in the MAD 3260 * header(this eliminates all the requests sent to this port). 3261 * We handle only DM class version 1 MAD's 3262 */ 3263 hdr = IBDM_IN_IBMFMSG_MADHDR(msg); 3264 if (ibdm_verify_mad_status(hdr) != IBDM_SUCCESS) { 3265 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3266 IBTF_DPRINTF_L2("ibdm", "\tprocess_incoming_mad: " 3267 "IBMF free msg failed DM request drop it"); 3268 } 3269 return; 3270 } 3271 3272 transaction_id = b2h64(hdr->TransactionID); 3273 3274 mutex_enter(&ibdm.ibdm_mutex); 3275 gid_info = ibdm.ibdm_dp_gidlist_head; 3276 while (gid_info) { 3277 if ((gid_info->gl_transactionID & 3278 IBDM_GID_TRANSACTIONID_MASK) == 3279 (transaction_id & IBDM_GID_TRANSACTIONID_MASK)) 3280 break; 3281 gid_info = gid_info->gl_next; 3282 } 3283 mutex_exit(&ibdm.ibdm_mutex); 3284 3285 if (gid_info == NULL) { 3286 /* Drop the packet */ 3287 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: transaction ID" 3288 " does not match: 0x%llx", transaction_id); 3289 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3290 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3291 "IBMF free msg failed DM request drop it"); 3292 } 3293 return; 3294 } 3295 3296 /* Handle redirection for all the MAD's, except ClassPortInfo */ 3297 if (((IBDM_IN_IBMFMSG_STATUS(msg) & MAD_STATUS_REDIRECT_REQUIRED)) && 3298 (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO)) { 3299 ret = ibdm_handle_redirection(msg, gid_info, &flag); 3300 if (ret == IBDM_SUCCESS) { 3301 return; 3302 } 3303 } else { 3304 uint_t gl_state; 3305 3306 mutex_enter(&gid_info->gl_mutex); 3307 gl_state = gid_info->gl_state; 3308 mutex_exit(&gid_info->gl_mutex); 3309 3310 switch (gl_state) { 3311 3312 case IBDM_SET_CLASSPORTINFO: 3313 ibdm_handle_setclassportinfo( 3314 ibmf_hdl, msg, gid_info, &flag); 3315 break; 3316 3317 case IBDM_GET_CLASSPORTINFO: 3318 ibdm_handle_classportinfo( 3319 ibmf_hdl, msg, gid_info, &flag); 3320 break; 3321 3322 case IBDM_GET_IOUNITINFO: 3323 ibdm_handle_iounitinfo(ibmf_hdl, msg, gid_info, &flag); 3324 break; 3325 3326 case IBDM_GET_IOC_DETAILS: 3327 switch (IBDM_IN_IBMFMSG_ATTR(msg)) { 3328 3329 case IB_DM_ATTR_SERVICE_ENTRIES: 3330 ibdm_handle_srventry_mad(msg, gid_info, &flag); 3331 break; 3332 3333 case IB_DM_ATTR_IOC_CTRL_PROFILE: 3334 ibdm_handle_ioc_profile( 3335 ibmf_hdl, msg, gid_info, &flag); 3336 break; 3337 3338 case IB_DM_ATTR_DIAG_CODE: 3339 ibdm_handle_diagcode(msg, gid_info, &flag); 3340 break; 3341 3342 default: 3343 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3344 "Error state, wrong attribute :-("); 3345 (void) ibmf_free_msg(ibmf_hdl, &msg); 3346 return; 3347 } 3348 break; 3349 default: 3350 IBTF_DPRINTF_L2("ibdm", 3351 "process_incoming_mad: Dropping the packet" 3352 " gl_state %x", gl_state); 3353 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3354 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3355 "IBMF free msg failed DM request drop it"); 3356 } 3357 return; 3358 } 3359 } 3360 3361 if ((flag & IBDM_IBMF_PKT_DUP_RESP) || 3362 (flag & IBDM_IBMF_PKT_UNEXP_RESP)) { 3363 IBTF_DPRINTF_L2("ibdm", 3364 "\tprocess_incoming_mad:Dup/unexp resp : 0x%x", flag); 3365 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3366 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3367 "IBMF free msg failed DM request drop it"); 3368 } 3369 return; 3370 } 3371 3372 mutex_enter(&gid_info->gl_mutex); 3373 if (gid_info->gl_pending_cmds < 1) { 3374 IBTF_DPRINTF_L2("ibdm", 3375 "\tprocess_incoming_mad: pending commands negative"); 3376 } 3377 if (--gid_info->gl_pending_cmds) { 3378 IBTF_DPRINTF_L4("ibdm", "\tprocess_incoming_mad: " 3379 "gid_info %p pending cmds %d", 3380 gid_info, gid_info->gl_pending_cmds); 3381 mutex_exit(&gid_info->gl_mutex); 3382 } else { 3383 uint_t prev_state; 3384 IBTF_DPRINTF_L4("ibdm", "\tprocess_incoming_mad: Probing DONE"); 3385 prev_state = gid_info->gl_state; 3386 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE; 3387 if (prev_state == IBDM_SET_CLASSPORTINFO) { 3388 IBTF_DPRINTF_L4("ibdm", 3389 "\tprocess_incoming_mad: " 3390 "Setclassportinfo for Cisco FC GW is done."); 3391 gid_info->gl_flag &= ~IBDM_CISCO_PROBE; 3392 gid_info->gl_flag |= IBDM_CISCO_PROBE_DONE; 3393 mutex_exit(&gid_info->gl_mutex); 3394 cv_broadcast(&gid_info->gl_probe_cv); 3395 } else { 3396 mutex_exit(&gid_info->gl_mutex); 3397 ibdm_notify_newgid_iocs(gid_info); 3398 mutex_enter(&ibdm.ibdm_mutex); 3399 if (--ibdm.ibdm_ngid_probes_in_progress == 0) { 3400 IBTF_DPRINTF_L4("ibdm", 3401 "\tprocess_incoming_mad: Wakeup"); 3402 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 3403 cv_broadcast(&ibdm.ibdm_probe_cv); 3404 } 3405 mutex_exit(&ibdm.ibdm_mutex); 3406 } 3407 } 3408 3409 /* 3410 * Do not deallocate the IBMF packet if atleast one request 3411 * is posted. IBMF packet is reused. 3412 */ 3413 if (!(flag & IBDM_IBMF_PKT_REUSED)) { 3414 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3415 IBTF_DPRINTF_L2("ibdm", "\tprocess_incoming_mad: " 3416 "IBMF free msg failed DM request drop it"); 3417 } 3418 } 3419 } 3420 3421 3422 /* 3423 * ibdm_verify_mad_status() 3424 * Verifies the MAD status 3425 * Returns IBDM_SUCCESS if status is correct 3426 * Returns IBDM_FAILURE for bogus MAD status 3427 */ 3428 static int 3429 ibdm_verify_mad_status(ib_mad_hdr_t *hdr) 3430 { 3431 int ret = 0; 3432 3433 if ((hdr->R_Method != IB_DM_DEVMGT_METHOD_GET_RESP) || 3434 (hdr->ClassVersion != IB_DM_CLASS_VERSION_1)) { 3435 return (IBDM_FAILURE); 3436 } 3437 3438 if (b2h16(hdr->Status) == 0) 3439 ret = IBDM_SUCCESS; 3440 else if ((b2h16(hdr->Status) & 0x1f) == MAD_STATUS_REDIRECT_REQUIRED) 3441 ret = IBDM_SUCCESS; 3442 else { 3443 IBTF_DPRINTF_L2("ibdm", 3444 "\tverify_mad_status: Status : 0x%x", b2h16(hdr->Status)); 3445 ret = IBDM_FAILURE; 3446 } 3447 return (ret); 3448 } 3449 3450 3451 3452 /* 3453 * ibdm_handle_redirection() 3454 * Returns IBDM_SUCCESS/IBDM_FAILURE 3455 */ 3456 static int 3457 ibdm_handle_redirection(ibmf_msg_t *msg, 3458 ibdm_dp_gidinfo_t *gid_info, int *flag) 3459 { 3460 int attrmod, ioc_no, start; 3461 void *data; 3462 timeout_id_t *timeout_id; 3463 ib_mad_hdr_t *hdr; 3464 ibdm_ioc_info_t *ioc = NULL; 3465 ibdm_timeout_cb_args_t *cb_args; 3466 ib_mad_classportinfo_t *cpi; 3467 3468 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection: Enter"); 3469 mutex_enter(&gid_info->gl_mutex); 3470 switch (gid_info->gl_state) { 3471 case IBDM_GET_IOUNITINFO: 3472 cb_args = &gid_info->gl_iou_cb_args; 3473 timeout_id = &gid_info->gl_timeout_id; 3474 break; 3475 3476 case IBDM_GET_IOC_DETAILS: 3477 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg); 3478 switch (IBDM_IN_IBMFMSG_ATTR(msg)) { 3479 3480 case IB_DM_ATTR_DIAG_CODE: 3481 if (attrmod == 0) { 3482 cb_args = &gid_info->gl_iou_cb_args; 3483 timeout_id = &gid_info->gl_timeout_id; 3484 break; 3485 } 3486 if (IBDM_IS_IOC_NUM_INVALID(attrmod, gid_info)) { 3487 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3488 "IOC# Out of range %d", attrmod); 3489 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3490 mutex_exit(&gid_info->gl_mutex); 3491 return (IBDM_FAILURE); 3492 } 3493 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod -1)); 3494 cb_args = &ioc->ioc_dc_cb_args; 3495 timeout_id = &ioc->ioc_dc_timeout_id; 3496 break; 3497 3498 case IB_DM_ATTR_IOC_CTRL_PROFILE: 3499 if (IBDM_IS_IOC_NUM_INVALID(attrmod, gid_info)) { 3500 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3501 "IOC# Out of range %d", attrmod); 3502 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3503 mutex_exit(&gid_info->gl_mutex); 3504 return (IBDM_FAILURE); 3505 } 3506 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod -1)); 3507 cb_args = &ioc->ioc_cb_args; 3508 timeout_id = &ioc->ioc_timeout_id; 3509 break; 3510 3511 case IB_DM_ATTR_SERVICE_ENTRIES: 3512 ioc_no = ((attrmod >> 16) & IBDM_16_BIT_MASK); 3513 if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) { 3514 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3515 "IOC# Out of range %d", ioc_no); 3516 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3517 mutex_exit(&gid_info->gl_mutex); 3518 return (IBDM_FAILURE); 3519 } 3520 start = (attrmod & IBDM_8_BIT_MASK); 3521 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1)); 3522 if (start > ioc->ioc_profile.ioc_service_entries) { 3523 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3524 " SE index Out of range %d", start); 3525 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3526 mutex_exit(&gid_info->gl_mutex); 3527 return (IBDM_FAILURE); 3528 } 3529 cb_args = &ioc->ioc_serv[start].se_cb_args; 3530 timeout_id = &ioc->ioc_serv[start].se_timeout_id; 3531 break; 3532 3533 default: 3534 /* ERROR State */ 3535 IBTF_DPRINTF_L2("ibdm", 3536 "\thandle_redirection: wrong attribute :-("); 3537 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3538 mutex_exit(&gid_info->gl_mutex); 3539 return (IBDM_FAILURE); 3540 } 3541 break; 3542 default: 3543 /* ERROR State */ 3544 IBTF_DPRINTF_L2("ibdm", 3545 "\thandle_redirection: Error state :-("); 3546 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3547 mutex_exit(&gid_info->gl_mutex); 3548 return (IBDM_FAILURE); 3549 } 3550 if ((*timeout_id) != 0) { 3551 mutex_exit(&gid_info->gl_mutex); 3552 if (untimeout(*timeout_id) == -1) { 3553 IBTF_DPRINTF_L2("ibdm", "\thandle_redirection: " 3554 "untimeout failed %x", *timeout_id); 3555 } else { 3556 IBTF_DPRINTF_L5("ibdm", 3557 "\thandle_redirection: timeout %x", *timeout_id); 3558 } 3559 mutex_enter(&gid_info->gl_mutex); 3560 *timeout_id = 0; 3561 } 3562 3563 data = msg->im_msgbufs_recv.im_bufs_cl_data; 3564 cpi = (ib_mad_classportinfo_t *)data; 3565 3566 gid_info->gl_resp_timeout = 3567 (b2h32(cpi->RespTimeValue) & 0x1F); 3568 3569 gid_info->gl_redirected = B_TRUE; 3570 gid_info->gl_redirect_dlid = b2h16(cpi->RedirectLID); 3571 gid_info->gl_redirect_QP = (b2h32(cpi->RedirectQP) & 0xffffff); 3572 gid_info->gl_redirect_pkey = b2h16(cpi->RedirectP_Key); 3573 gid_info->gl_redirect_qkey = b2h32(cpi->RedirectQ_Key); 3574 gid_info->gl_redirectGID_hi = b2h64(cpi->RedirectGID_hi); 3575 gid_info->gl_redirectGID_lo = b2h64(cpi->RedirectGID_lo); 3576 3577 if (gid_info->gl_redirect_dlid != 0) { 3578 msg->im_local_addr.ia_remote_lid = 3579 gid_info->gl_redirect_dlid; 3580 } 3581 ibdm_bump_transactionID(gid_info); 3582 mutex_exit(&gid_info->gl_mutex); 3583 3584 ibdm_alloc_send_buffers(msg); 3585 3586 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 3587 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 3588 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 3589 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 3590 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 3591 hdr->Status = 0; 3592 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 3593 hdr->AttributeID = 3594 msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeID; 3595 hdr->AttributeModifier = 3596 msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeModifier; 3597 3598 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 3599 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 3600 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 3601 3602 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 3603 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3604 3605 IBTF_DPRINTF_L5("ibdm", "\thandle_redirect:" 3606 "timeout %x", *timeout_id); 3607 3608 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 3609 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 3610 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection:" 3611 "message transport failed"); 3612 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 3613 } 3614 (*flag) |= IBDM_IBMF_PKT_REUSED; 3615 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection: Exit"); 3616 return (IBDM_SUCCESS); 3617 } 3618 3619 3620 /* 3621 * ibdm_pkt_timeout_hdlr 3622 * This timeout handler is registed for every IBMF packet that is 3623 * sent through the IBMF. It gets called when no response is received 3624 * within the specified time for the packet. No retries for the failed 3625 * commands currently. Drops the failed IBMF packet and update the 3626 * pending list commands. 3627 */ 3628 static void 3629 ibdm_pkt_timeout_hdlr(void *arg) 3630 { 3631 int probe_done = B_FALSE; 3632 ibdm_iou_info_t *iou; 3633 ibdm_ioc_info_t *ioc; 3634 ibdm_timeout_cb_args_t *cb_args = arg; 3635 ibdm_dp_gidinfo_t *gid_info; 3636 int srv_ent; 3637 uint_t new_gl_state; 3638 3639 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: gid_info: %p " 3640 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3641 cb_args->cb_req_type, cb_args->cb_ioc_num, 3642 cb_args->cb_srvents_start); 3643 3644 gid_info = cb_args->cb_gid_info; 3645 mutex_enter(&gid_info->gl_mutex); 3646 3647 if ((gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) || 3648 (cb_args->cb_req_type == 0)) { 3649 3650 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: req completed" 3651 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_req_type, 3652 cb_args->cb_ioc_num, cb_args->cb_srvents_start); 3653 3654 if (gid_info->gl_timeout_id) 3655 gid_info->gl_timeout_id = 0; 3656 mutex_exit(&gid_info->gl_mutex); 3657 return; 3658 } 3659 if (cb_args->cb_retry_count) { 3660 cb_args->cb_retry_count--; 3661 if (ibdm_retry_command(cb_args) == IBDM_SUCCESS) { 3662 if (gid_info->gl_timeout_id) 3663 gid_info->gl_timeout_id = 0; 3664 mutex_exit(&gid_info->gl_mutex); 3665 return; 3666 } 3667 cb_args->cb_retry_count = 0; 3668 } 3669 3670 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: command failed: gid %p" 3671 " rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3672 cb_args->cb_req_type, cb_args->cb_ioc_num, 3673 cb_args->cb_srvents_start); 3674 3675 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3676 switch (cb_args->cb_req_type) { 3677 3678 case IBDM_REQ_TYPE_CLASSPORTINFO: 3679 case IBDM_REQ_TYPE_IOUINFO: 3680 new_gl_state = IBDM_GID_PROBING_FAILED; 3681 if (--gid_info->gl_pending_cmds == 0) 3682 probe_done = B_TRUE; 3683 if (gid_info->gl_timeout_id) 3684 gid_info->gl_timeout_id = 0; 3685 mutex_exit(&gid_info->gl_mutex); 3686 ibdm_delete_glhca_list(gid_info); 3687 mutex_enter(&gid_info->gl_mutex); 3688 break; 3689 case IBDM_REQ_TYPE_IOCINFO: 3690 iou = gid_info->gl_iou; 3691 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3692 ioc->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 3693 if (--gid_info->gl_pending_cmds == 0) 3694 probe_done = B_TRUE; 3695 #ifndef __lock_lint 3696 if (ioc->ioc_timeout_id) 3697 ioc->ioc_timeout_id = 0; 3698 #endif 3699 break; 3700 case IBDM_REQ_TYPE_SRVENTS: 3701 iou = gid_info->gl_iou; 3702 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3703 ioc->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 3704 if (--gid_info->gl_pending_cmds == 0) 3705 probe_done = B_TRUE; 3706 srv_ent = cb_args->cb_srvents_start; 3707 #ifndef __lock_lint 3708 if (ioc->ioc_serv[srv_ent].se_timeout_id) 3709 ioc->ioc_serv[srv_ent].se_timeout_id = 0; 3710 #endif 3711 break; 3712 case IBDM_REQ_TYPE_IOU_DIAGCODE: 3713 iou = gid_info->gl_iou; 3714 iou->iou_dc_valid = B_FALSE; 3715 if (--gid_info->gl_pending_cmds == 0) 3716 probe_done = B_TRUE; 3717 if (gid_info->gl_timeout_id) 3718 gid_info->gl_timeout_id = 0; 3719 break; 3720 case IBDM_REQ_TYPE_IOC_DIAGCODE: 3721 iou = gid_info->gl_iou; 3722 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3723 ioc->ioc_dc_valid = B_FALSE; 3724 if (--gid_info->gl_pending_cmds == 0) 3725 probe_done = B_TRUE; 3726 #ifndef __lock_lint 3727 if (ioc->ioc_dc_timeout_id) 3728 ioc->ioc_dc_timeout_id = 0; 3729 #endif 3730 break; 3731 default: /* ERROR State */ 3732 IBTF_DPRINTF_L2("ibdm", 3733 "\tpkt_timeout_hdlr: wrong request type."); 3734 new_gl_state = IBDM_GID_PROBING_FAILED; 3735 if (gid_info->gl_timeout_id) 3736 gid_info->gl_timeout_id = 0; 3737 break; 3738 } 3739 if (probe_done == B_TRUE) { 3740 gid_info->gl_state = new_gl_state; 3741 mutex_exit(&gid_info->gl_mutex); 3742 ibdm_notify_newgid_iocs(gid_info); 3743 mutex_enter(&ibdm.ibdm_mutex); 3744 if (--ibdm.ibdm_ngid_probes_in_progress == 0) { 3745 IBTF_DPRINTF_L4("ibdm", "\tpkt_timeout_hdlr: Wakeup"); 3746 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 3747 cv_broadcast(&ibdm.ibdm_probe_cv); 3748 } 3749 mutex_exit(&ibdm.ibdm_mutex); 3750 } else 3751 mutex_exit(&gid_info->gl_mutex); 3752 } 3753 3754 3755 /* 3756 * ibdm_retry_command() 3757 * Retries the failed command. 3758 * Returns IBDM_FAILURE/IBDM_SUCCESS 3759 */ 3760 static int 3761 ibdm_retry_command(ibdm_timeout_cb_args_t *cb_args) 3762 { 3763 int ret; 3764 ibmf_msg_t *msg; 3765 ib_mad_hdr_t *hdr; 3766 ibdm_dp_gidinfo_t *gid_info = cb_args->cb_gid_info; 3767 timeout_id_t *timeout_id; 3768 ibdm_ioc_info_t *ioc; 3769 int ioc_no; 3770 3771 IBTF_DPRINTF_L2("ibdm", "\tretry_command: gid_info: %p " 3772 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3773 cb_args->cb_req_type, cb_args->cb_ioc_num, 3774 cb_args->cb_srvents_start); 3775 3776 ret = ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_NOSLEEP, &msg); 3777 3778 3779 /* 3780 * Reset the gid if alloc_msg failed with BAD_HANDLE 3781 * ibdm_reset_gidinfo reinits the gid_info 3782 */ 3783 if (ret == IBMF_BAD_HANDLE) { 3784 IBTF_DPRINTF_L3(ibdm_string, "\tretry_command: gid %p hdl bad", 3785 gid_info); 3786 3787 mutex_exit(&gid_info->gl_mutex); 3788 ibdm_reset_gidinfo(gid_info); 3789 mutex_enter(&gid_info->gl_mutex); 3790 3791 /* Retry alloc */ 3792 ret = ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_NOSLEEP, 3793 &msg); 3794 } 3795 3796 if (ret != IBDM_SUCCESS) { 3797 IBTF_DPRINTF_L2("ibdm", "\tretry_command: alloc failed: %p " 3798 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3799 cb_args->cb_req_type, cb_args->cb_ioc_num, 3800 cb_args->cb_srvents_start); 3801 return (IBDM_FAILURE); 3802 } 3803 3804 ibdm_alloc_send_buffers(msg); 3805 3806 ibdm_bump_transactionID(gid_info); 3807 3808 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 3809 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 3810 if (gid_info->gl_redirected == B_TRUE) { 3811 if (gid_info->gl_redirect_dlid != 0) { 3812 msg->im_local_addr.ia_remote_lid = 3813 gid_info->gl_redirect_dlid; 3814 } 3815 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 3816 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 3817 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 3818 } else { 3819 msg->im_local_addr.ia_remote_qno = 1; 3820 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 3821 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 3822 } 3823 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 3824 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 3825 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 3826 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 3827 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 3828 hdr->Status = 0; 3829 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 3830 3831 switch (cb_args->cb_req_type) { 3832 case IBDM_REQ_TYPE_CLASSPORTINFO: 3833 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 3834 hdr->AttributeModifier = 0; 3835 timeout_id = &gid_info->gl_timeout_id; 3836 break; 3837 case IBDM_REQ_TYPE_IOUINFO: 3838 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 3839 hdr->AttributeModifier = 0; 3840 timeout_id = &gid_info->gl_timeout_id; 3841 break; 3842 case IBDM_REQ_TYPE_IOCINFO: 3843 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 3844 hdr->AttributeModifier = h2b32(cb_args->cb_ioc_num + 1); 3845 ioc = IBDM_GIDINFO2IOCINFO(gid_info, cb_args->cb_ioc_num); 3846 timeout_id = &ioc->ioc_timeout_id; 3847 break; 3848 case IBDM_REQ_TYPE_SRVENTS: 3849 hdr->AttributeID = h2b16(IB_DM_ATTR_SERVICE_ENTRIES); 3850 ibdm_fill_srv_attr_mod(hdr, cb_args); 3851 ioc = IBDM_GIDINFO2IOCINFO(gid_info, cb_args->cb_ioc_num); 3852 timeout_id = 3853 &ioc->ioc_serv[cb_args->cb_srvents_start].se_timeout_id; 3854 break; 3855 case IBDM_REQ_TYPE_IOU_DIAGCODE: 3856 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 3857 hdr->AttributeModifier = 0; 3858 timeout_id = &gid_info->gl_timeout_id; 3859 break; 3860 case IBDM_REQ_TYPE_IOC_DIAGCODE: 3861 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 3862 hdr->AttributeModifier = h2b32(cb_args->cb_ioc_num + 1); 3863 ioc_no = cb_args->cb_ioc_num; 3864 ioc = &gid_info->gl_iou->iou_ioc_info[ioc_no]; 3865 timeout_id = &ioc->ioc_dc_timeout_id; 3866 break; 3867 } 3868 3869 mutex_exit(&gid_info->gl_mutex); 3870 3871 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 3872 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3873 3874 IBTF_DPRINTF_L5("ibdm", "\tretry_command: %p,%x,%d,%d:" 3875 "timeout %x", cb_args->cb_req_type, cb_args->cb_ioc_num, 3876 cb_args->cb_srvents_start, *timeout_id); 3877 3878 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, 3879 gid_info->gl_qp_hdl, msg, NULL, ibdm_ibmf_send_cb, 3880 cb_args, 0) != IBMF_SUCCESS) { 3881 IBTF_DPRINTF_L2("ibdm", "\tretry_command: send failed: %p " 3882 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3883 cb_args->cb_req_type, cb_args->cb_ioc_num, 3884 cb_args->cb_srvents_start); 3885 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 3886 } 3887 mutex_enter(&gid_info->gl_mutex); 3888 return (IBDM_SUCCESS); 3889 } 3890 3891 3892 /* 3893 * ibdm_update_ioc_port_gidlist() 3894 */ 3895 static void 3896 ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *dest, 3897 ibdm_dp_gidinfo_t *gid_info) 3898 { 3899 int ii, ngid_ents; 3900 ibdm_gid_t *tmp; 3901 ibdm_hca_list_t *gid_hca_head, *temp; 3902 ibdm_hca_list_t *ioc_head = NULL; 3903 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 3904 3905 IBTF_DPRINTF_L5("ibdm", "\tupdate_ioc_port_gidlist: Enter"); 3906 3907 ngid_ents = gid_info->gl_ngids; 3908 dest->ioc_nportgids = ngid_ents; 3909 dest->ioc_gid_list = kmem_zalloc(sizeof (ibdm_gid_t) * 3910 ngid_ents, KM_SLEEP); 3911 tmp = gid_info->gl_gid; 3912 for (ii = 0; (ii < ngid_ents) && (tmp); ii++) { 3913 dest->ioc_gid_list[ii].gid_dgid_hi = tmp->gid_dgid_hi; 3914 dest->ioc_gid_list[ii].gid_dgid_lo = tmp->gid_dgid_lo; 3915 tmp = tmp->gid_next; 3916 } 3917 3918 gid_hca_head = gid_info->gl_hca_list; 3919 while (gid_hca_head) { 3920 temp = ibdm_dup_hca_attr(gid_hca_head); 3921 temp->hl_next = ioc_head; 3922 ioc_head = temp; 3923 gid_hca_head = gid_hca_head->hl_next; 3924 } 3925 dest->ioc_hca_list = ioc_head; 3926 } 3927 3928 3929 /* 3930 * ibdm_alloc_send_buffers() 3931 * Allocates memory for the IBMF send buffer to send and/or receive 3932 * the Device Management MAD packet. 3933 */ 3934 static void 3935 ibdm_alloc_send_buffers(ibmf_msg_t *msgp) 3936 { 3937 msgp->im_msgbufs_send.im_bufs_mad_hdr = 3938 kmem_zalloc(IBDM_MAD_SIZE, KM_SLEEP); 3939 3940 msgp->im_msgbufs_send.im_bufs_cl_hdr = (uchar_t *) 3941 msgp->im_msgbufs_send.im_bufs_mad_hdr + sizeof (ib_mad_hdr_t); 3942 msgp->im_msgbufs_send.im_bufs_cl_hdr_len = IBDM_DM_MAD_HDR_SZ; 3943 3944 msgp->im_msgbufs_send.im_bufs_cl_data = 3945 ((char *)msgp->im_msgbufs_send.im_bufs_cl_hdr + IBDM_DM_MAD_HDR_SZ); 3946 msgp->im_msgbufs_send.im_bufs_cl_data_len = 3947 IBDM_MAD_SIZE - sizeof (ib_mad_hdr_t) - IBDM_DM_MAD_HDR_SZ; 3948 } 3949 3950 3951 /* 3952 * ibdm_alloc_send_buffers() 3953 * De-allocates memory for the IBMF send buffer 3954 */ 3955 static void 3956 ibdm_free_send_buffers(ibmf_msg_t *msgp) 3957 { 3958 if (msgp->im_msgbufs_send.im_bufs_mad_hdr != NULL) 3959 kmem_free(msgp->im_msgbufs_send.im_bufs_mad_hdr, IBDM_MAD_SIZE); 3960 } 3961 3962 /* 3963 * ibdm_probe_ioc() 3964 * 1. Gets the node records for the port GUID. This detects all the port 3965 * to the IOU. 3966 * 2. Selectively probes all the IOC, given it's node GUID 3967 * 3. In case of reprobe, only the IOC to be reprobed is send the IOC 3968 * Controller Profile asynchronously 3969 */ 3970 /*ARGSUSED*/ 3971 static void 3972 ibdm_probe_ioc(ib_guid_t nodeguid, ib_guid_t ioc_guid, int reprobe_flag) 3973 { 3974 int ii, nrecords; 3975 size_t nr_len = 0, pi_len = 0; 3976 ib_gid_t sgid, dgid; 3977 ibdm_hca_list_t *hca_list = NULL; 3978 sa_node_record_t *nr, *tmp; 3979 ibdm_port_attr_t *port = NULL; 3980 ibdm_dp_gidinfo_t *reprobe_gid, *new_gid, *node_gid; 3981 ibdm_dp_gidinfo_t *temp_gidinfo; 3982 ibdm_gid_t *temp_gid; 3983 sa_portinfo_record_t *pi; 3984 3985 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc(%llx, %llx, %x): Begin", 3986 nodeguid, ioc_guid, reprobe_flag); 3987 3988 /* Rescan the GID list for any removed GIDs for reprobe */ 3989 if (reprobe_flag) 3990 ibdm_rescan_gidlist(&ioc_guid); 3991 3992 mutex_enter(&ibdm.ibdm_hl_mutex); 3993 for (ibdm_get_next_port(&hca_list, &port, 1); port; 3994 ibdm_get_next_port(&hca_list, &port, 1)) { 3995 reprobe_gid = new_gid = node_gid = NULL; 3996 3997 nr = ibdm_get_node_records(port->pa_sa_hdl, &nr_len, nodeguid); 3998 if (nr == NULL) { 3999 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc: no records"); 4000 continue; 4001 } 4002 nrecords = (nr_len / sizeof (sa_node_record_t)); 4003 for (tmp = nr, ii = 0; (ii < nrecords); ii++, tmp++) { 4004 if ((pi = ibdm_get_portinfo( 4005 port->pa_sa_hdl, &pi_len, tmp->LID)) == NULL) { 4006 IBTF_DPRINTF_L4("ibdm", 4007 "\tibdm_get_portinfo: no portinfo recs"); 4008 continue; 4009 } 4010 4011 /* 4012 * If Device Management is not supported on 4013 * this port, skip the rest. 4014 */ 4015 if (!(pi->PortInfo.CapabilityMask & 4016 SM_CAP_MASK_IS_DM_SUPPD)) { 4017 kmem_free(pi, pi_len); 4018 continue; 4019 } 4020 4021 /* 4022 * For reprobes: Check if GID, already in 4023 * the list. If so, set the state to SKIPPED 4024 */ 4025 if (((temp_gidinfo = ibdm_find_gid(nodeguid, 4026 tmp->NodeInfo.PortGUID)) != NULL) && 4027 temp_gidinfo->gl_state == 4028 IBDM_GID_PROBING_COMPLETE) { 4029 ASSERT(reprobe_gid == NULL); 4030 ibdm_addto_glhcalist(temp_gidinfo, 4031 hca_list); 4032 reprobe_gid = temp_gidinfo; 4033 kmem_free(pi, pi_len); 4034 continue; 4035 } else if (temp_gidinfo != NULL) { 4036 kmem_free(pi, pi_len); 4037 ibdm_addto_glhcalist(temp_gidinfo, 4038 hca_list); 4039 continue; 4040 } 4041 4042 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : " 4043 "create_gid : prefix %llx, guid %llx\n", 4044 pi->PortInfo.GidPrefix, 4045 tmp->NodeInfo.PortGUID); 4046 4047 sgid.gid_prefix = port->pa_sn_prefix; 4048 sgid.gid_guid = port->pa_port_guid; 4049 dgid.gid_prefix = pi->PortInfo.GidPrefix; 4050 dgid.gid_guid = tmp->NodeInfo.PortGUID; 4051 new_gid = ibdm_create_gid_info(port, sgid, 4052 dgid); 4053 if (new_gid == NULL) { 4054 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4055 "create_gid_info failed\n"); 4056 kmem_free(pi, pi_len); 4057 continue; 4058 } 4059 if (node_gid == NULL) { 4060 node_gid = new_gid; 4061 ibdm_add_to_gl_gid(node_gid, node_gid); 4062 } else { 4063 IBTF_DPRINTF_L4("ibdm", 4064 "\tprobe_ioc: new gid"); 4065 temp_gid = kmem_zalloc( 4066 sizeof (ibdm_gid_t), KM_SLEEP); 4067 temp_gid->gid_dgid_hi = 4068 new_gid->gl_dgid_hi; 4069 temp_gid->gid_dgid_lo = 4070 new_gid->gl_dgid_lo; 4071 temp_gid->gid_next = node_gid->gl_gid; 4072 node_gid->gl_gid = temp_gid; 4073 node_gid->gl_ngids++; 4074 } 4075 new_gid->gl_nodeguid = nodeguid; 4076 new_gid->gl_portguid = dgid.gid_guid; 4077 ibdm_addto_glhcalist(new_gid, hca_list); 4078 4079 /* 4080 * Set the state to skipped as all these 4081 * gids point to the same node. 4082 * We (re)probe only one GID below and reset 4083 * state appropriately 4084 */ 4085 new_gid->gl_state = IBDM_GID_PROBING_SKIPPED; 4086 new_gid->gl_devid = (*tmp).NodeInfo.DeviceID; 4087 kmem_free(pi, pi_len); 4088 } 4089 kmem_free(nr, nr_len); 4090 4091 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : reprobe_flag %d " 4092 "reprobe_gid %p new_gid %p node_gid %p", 4093 reprobe_flag, reprobe_gid, new_gid, node_gid); 4094 4095 if (reprobe_flag != 0 && reprobe_gid != NULL) { 4096 int niocs, jj; 4097 ibdm_ioc_info_t *tmp_ioc; 4098 int ioc_matched = 0; 4099 4100 mutex_exit(&ibdm.ibdm_hl_mutex); 4101 mutex_enter(&reprobe_gid->gl_mutex); 4102 reprobe_gid->gl_state = IBDM_GET_IOC_DETAILS; 4103 niocs = 4104 reprobe_gid->gl_iou->iou_info.iou_num_ctrl_slots; 4105 reprobe_gid->gl_pending_cmds++; 4106 mutex_exit(&reprobe_gid->gl_mutex); 4107 4108 for (jj = 0; jj < niocs; jj++) { 4109 tmp_ioc = 4110 IBDM_GIDINFO2IOCINFO(reprobe_gid, jj); 4111 if (tmp_ioc->ioc_profile.ioc_guid != ioc_guid) 4112 continue; 4113 4114 ioc_matched = 1; 4115 4116 /* 4117 * Explicitly set gl_reprobe_flag to 0 so that 4118 * IBnex is not notified on completion 4119 */ 4120 mutex_enter(&reprobe_gid->gl_mutex); 4121 reprobe_gid->gl_reprobe_flag = 0; 4122 mutex_exit(&reprobe_gid->gl_mutex); 4123 4124 mutex_enter(&ibdm.ibdm_mutex); 4125 ibdm.ibdm_ngid_probes_in_progress++; 4126 mutex_exit(&ibdm.ibdm_mutex); 4127 if (ibdm_send_ioc_profile(reprobe_gid, jj) != 4128 IBDM_SUCCESS) { 4129 IBTF_DPRINTF_L4("ibdm", 4130 "\tprobe_ioc: " 4131 "send_ioc_profile failed " 4132 "for ioc %d", jj); 4133 ibdm_gid_decr_pending(reprobe_gid); 4134 break; 4135 } 4136 mutex_enter(&ibdm.ibdm_mutex); 4137 ibdm_wait_probe_completion(); 4138 mutex_exit(&ibdm.ibdm_mutex); 4139 break; 4140 } 4141 if (ioc_matched == 0) 4142 ibdm_gid_decr_pending(reprobe_gid); 4143 else { 4144 mutex_enter(&ibdm.ibdm_hl_mutex); 4145 break; 4146 } 4147 } else if (new_gid != NULL) { 4148 mutex_exit(&ibdm.ibdm_hl_mutex); 4149 node_gid = node_gid ? node_gid : new_gid; 4150 4151 /* 4152 * New or reinserted GID : Enable notification 4153 * to IBnex 4154 */ 4155 mutex_enter(&node_gid->gl_mutex); 4156 node_gid->gl_reprobe_flag = 1; 4157 mutex_exit(&node_gid->gl_mutex); 4158 4159 ibdm_probe_gid(node_gid); 4160 4161 mutex_enter(&ibdm.ibdm_hl_mutex); 4162 } 4163 } 4164 mutex_exit(&ibdm.ibdm_hl_mutex); 4165 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : End\n"); 4166 } 4167 4168 4169 /* 4170 * ibdm_probe_gid() 4171 * Selectively probes the GID 4172 */ 4173 static void 4174 ibdm_probe_gid(ibdm_dp_gidinfo_t *gid_info) 4175 { 4176 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid:"); 4177 4178 /* 4179 * A Cisco FC GW needs the special handling to get IOUnitInfo. 4180 */ 4181 mutex_enter(&gid_info->gl_mutex); 4182 if (ibdm_is_cisco_switch(gid_info)) { 4183 gid_info->gl_pending_cmds++; 4184 gid_info->gl_state = IBDM_SET_CLASSPORTINFO; 4185 mutex_exit(&gid_info->gl_mutex); 4186 4187 if (ibdm_set_classportinfo(gid_info) != IBDM_SUCCESS) { 4188 4189 mutex_enter(&gid_info->gl_mutex); 4190 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 4191 --gid_info->gl_pending_cmds; 4192 mutex_exit(&gid_info->gl_mutex); 4193 4194 /* free the hca_list on this gid_info */ 4195 ibdm_delete_glhca_list(gid_info); 4196 gid_info = gid_info->gl_next; 4197 return; 4198 } 4199 4200 mutex_enter(&gid_info->gl_mutex); 4201 ibdm_wait_cisco_probe_completion(gid_info); 4202 4203 IBTF_DPRINTF_L4("ibdm", 4204 "\tprobe_gid: CISCO Wakeup signal received"); 4205 } 4206 4207 /* move on to the 'GET_CLASSPORTINFO' stage */ 4208 gid_info->gl_pending_cmds++; 4209 gid_info->gl_state = IBDM_GET_CLASSPORTINFO; 4210 mutex_exit(&gid_info->gl_mutex); 4211 4212 if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) { 4213 4214 mutex_enter(&gid_info->gl_mutex); 4215 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 4216 --gid_info->gl_pending_cmds; 4217 mutex_exit(&gid_info->gl_mutex); 4218 4219 /* free the hca_list on this gid_info */ 4220 ibdm_delete_glhca_list(gid_info); 4221 gid_info = gid_info->gl_next; 4222 return; 4223 } 4224 4225 mutex_enter(&ibdm.ibdm_mutex); 4226 ibdm.ibdm_ngid_probes_in_progress++; 4227 gid_info = gid_info->gl_next; 4228 ibdm_wait_probe_completion(); 4229 mutex_exit(&ibdm.ibdm_mutex); 4230 4231 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid: Wakeup signal received"); 4232 } 4233 4234 4235 /* 4236 * ibdm_create_gid_info() 4237 * Allocates a gid_info structure and initializes 4238 * Returns pointer to the structure on success 4239 * and NULL on failure 4240 */ 4241 static ibdm_dp_gidinfo_t * 4242 ibdm_create_gid_info(ibdm_port_attr_t *port, ib_gid_t sgid, ib_gid_t dgid) 4243 { 4244 uint8_t ii, npaths; 4245 sa_path_record_t *path; 4246 size_t len; 4247 ibdm_pkey_tbl_t *pkey_tbl; 4248 ibdm_dp_gidinfo_t *gid_info = NULL; 4249 int ret; 4250 4251 IBTF_DPRINTF_L4("ibdm", "\tcreate_gid_info: Begin"); 4252 npaths = 1; 4253 4254 /* query for reversible paths */ 4255 if (port->pa_sa_hdl) 4256 ret = ibmf_saa_gid_to_pathrecords(port->pa_sa_hdl, 4257 sgid, dgid, IBMF_SAA_PKEY_WC, 0, B_TRUE, &npaths, 0, 4258 &len, &path); 4259 else 4260 return (NULL); 4261 4262 if (ret == IBMF_SUCCESS && path) { 4263 ibdm_dump_path_info(path); 4264 4265 gid_info = kmem_zalloc( 4266 sizeof (ibdm_dp_gidinfo_t), KM_SLEEP); 4267 mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL); 4268 cv_init(&gid_info->gl_probe_cv, NULL, CV_DRIVER, NULL); 4269 gid_info->gl_dgid_hi = path->DGID.gid_prefix; 4270 gid_info->gl_dgid_lo = path->DGID.gid_guid; 4271 gid_info->gl_sgid_hi = path->SGID.gid_prefix; 4272 gid_info->gl_sgid_lo = path->SGID.gid_guid; 4273 gid_info->gl_p_key = path->P_Key; 4274 gid_info->gl_sa_hdl = port->pa_sa_hdl; 4275 gid_info->gl_ibmf_hdl = port->pa_ibmf_hdl; 4276 gid_info->gl_slid = path->SLID; 4277 gid_info->gl_dlid = path->DLID; 4278 gid_info->gl_transactionID = (++ibdm.ibdm_transactionID) 4279 << IBDM_GID_TRANSACTIONID_SHIFT; 4280 gid_info->gl_min_transactionID = gid_info->gl_transactionID; 4281 gid_info->gl_max_transactionID = (ibdm.ibdm_transactionID +1) 4282 << IBDM_GID_TRANSACTIONID_SHIFT; 4283 4284 gid_info->gl_qp_hdl = IBMF_QP_HANDLE_DEFAULT; 4285 for (ii = 0; ii < port->pa_npkeys; ii++) { 4286 if (port->pa_pkey_tbl == NULL) 4287 break; 4288 4289 pkey_tbl = &port->pa_pkey_tbl[ii]; 4290 if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) && 4291 (pkey_tbl->pt_qp_hdl != NULL)) { 4292 gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 4293 break; 4294 } 4295 } 4296 kmem_free(path, len); 4297 4298 /* 4299 * QP handle for GID not initialized. No matching Pkey 4300 * was found!! ibdm should *not* hit this case. Flag an 4301 * error and drop the GID if ibdm does encounter this. 4302 */ 4303 if (gid_info->gl_qp_hdl == NULL) { 4304 IBTF_DPRINTF_L2(ibdm_string, 4305 "\tcreate_gid_info: No matching Pkey"); 4306 ibdm_delete_gidinfo(gid_info); 4307 return (NULL); 4308 } 4309 4310 ibdm.ibdm_ngids++; 4311 if (ibdm.ibdm_dp_gidlist_head == NULL) { 4312 ibdm.ibdm_dp_gidlist_head = gid_info; 4313 ibdm.ibdm_dp_gidlist_tail = gid_info; 4314 } else { 4315 ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info; 4316 gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail; 4317 ibdm.ibdm_dp_gidlist_tail = gid_info; 4318 } 4319 } 4320 4321 return (gid_info); 4322 } 4323 4324 4325 /* 4326 * ibdm_get_node_records 4327 * Sends a SA query to get the NODE record 4328 * Returns pointer to the sa_node_record_t on success 4329 * and NULL on failure 4330 */ 4331 static sa_node_record_t * 4332 ibdm_get_node_records(ibmf_saa_handle_t sa_hdl, size_t *length, ib_guid_t guid) 4333 { 4334 sa_node_record_t req, *resp = NULL; 4335 ibmf_saa_access_args_t args; 4336 int ret; 4337 4338 IBTF_DPRINTF_L4("ibdm", "\tget_node_records: Begin"); 4339 4340 bzero(&req, sizeof (sa_node_record_t)); 4341 req.NodeInfo.NodeGUID = guid; 4342 4343 args.sq_attr_id = SA_NODERECORD_ATTRID; 4344 args.sq_access_type = IBMF_SAA_RETRIEVE; 4345 args.sq_component_mask = SA_NODEINFO_COMPMASK_NODEGUID; 4346 args.sq_template = &req; 4347 args.sq_callback = NULL; 4348 args.sq_callback_arg = NULL; 4349 4350 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) &resp); 4351 if (ret != IBMF_SUCCESS) { 4352 IBTF_DPRINTF_L2("ibdm", "\tget_node_records:" 4353 " SA Retrieve Failed: %d", ret); 4354 return (NULL); 4355 } 4356 if ((resp == NULL) || (*length == 0)) { 4357 IBTF_DPRINTF_L2("ibdm", "\tget_node_records: No records"); 4358 return (NULL); 4359 } 4360 4361 IBTF_DPRINTF_L4("ibdm", "\tget_node_records: NodeGuid %llx " 4362 "PortGUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.PortGUID); 4363 4364 return (resp); 4365 } 4366 4367 4368 /* 4369 * ibdm_get_portinfo() 4370 * Sends a SA query to get the PortInfo record 4371 * Returns pointer to the sa_portinfo_record_t on success 4372 * and NULL on failure 4373 */ 4374 static sa_portinfo_record_t * 4375 ibdm_get_portinfo(ibmf_saa_handle_t sa_hdl, size_t *length, ib_lid_t lid) 4376 { 4377 sa_portinfo_record_t req, *resp = NULL; 4378 ibmf_saa_access_args_t args; 4379 int ret; 4380 4381 IBTF_DPRINTF_L4("ibdm", "\tget_portinfo: Begin"); 4382 4383 bzero(&req, sizeof (sa_portinfo_record_t)); 4384 req.EndportLID = lid; 4385 4386 args.sq_attr_id = SA_PORTINFORECORD_ATTRID; 4387 args.sq_access_type = IBMF_SAA_RETRIEVE; 4388 args.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID; 4389 args.sq_template = &req; 4390 args.sq_callback = NULL; 4391 args.sq_callback_arg = NULL; 4392 4393 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) &resp); 4394 if (ret != IBMF_SUCCESS) { 4395 IBTF_DPRINTF_L2("ibdm", "\tget_portinfo:" 4396 " SA Retrieve Failed: 0x%X", ret); 4397 return (NULL); 4398 } 4399 if ((*length == 0) || (resp == NULL)) 4400 return (NULL); 4401 4402 IBTF_DPRINTF_L4("ibdm", "\tget_portinfo: GidPrefix %llx Cap 0x%x", 4403 resp->PortInfo.GidPrefix, resp->PortInfo.CapabilityMask); 4404 return (resp); 4405 } 4406 4407 4408 /* 4409 * ibdm_ibnex_register_callback 4410 * IB nexus callback routine for HCA attach and detach notification 4411 */ 4412 void 4413 ibdm_ibnex_register_callback(ibdm_callback_t ibnex_dm_callback) 4414 { 4415 IBTF_DPRINTF_L4("ibdm", "\tibnex_register_callbacks"); 4416 mutex_enter(&ibdm.ibdm_ibnex_mutex); 4417 ibdm.ibdm_ibnex_callback = ibnex_dm_callback; 4418 mutex_exit(&ibdm.ibdm_ibnex_mutex); 4419 } 4420 4421 4422 /* 4423 * ibdm_ibnex_unregister_callbacks 4424 */ 4425 void 4426 ibdm_ibnex_unregister_callback() 4427 { 4428 IBTF_DPRINTF_L4("ibdm", "\tibnex_unregister_callbacks"); 4429 mutex_enter(&ibdm.ibdm_ibnex_mutex); 4430 ibdm.ibdm_ibnex_callback = NULL; 4431 mutex_exit(&ibdm.ibdm_ibnex_mutex); 4432 } 4433 4434 4435 /* 4436 * ibdm_ibnex_get_waittime() 4437 * Calculates the wait time based on the last HCA attach time 4438 */ 4439 time_t 4440 ibdm_ibnex_get_waittime(ib_guid_t hca_guid, int *dft_wait) 4441 { 4442 int ii; 4443 time_t temp, wait_time = 0; 4444 ibdm_hca_list_t *hca; 4445 4446 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_waittime hcaguid:%llx" 4447 "\tport settling time %d", hca_guid, *dft_wait); 4448 4449 mutex_enter(&ibdm.ibdm_hl_mutex); 4450 hca = ibdm.ibdm_hca_list_head; 4451 4452 if (hca_guid) { 4453 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4454 if ((hca_guid == hca->hl_hca_guid) && 4455 (hca->hl_nports != hca->hl_nports_active)) { 4456 wait_time = 4457 ddi_get_time() - hca->hl_attach_time; 4458 wait_time = ((wait_time >= *dft_wait) ? 4459 0 : (*dft_wait - wait_time)); 4460 break; 4461 } 4462 hca = hca->hl_next; 4463 } 4464 mutex_exit(&ibdm.ibdm_hl_mutex); 4465 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_waittime %llx", wait_time); 4466 return (wait_time); 4467 } 4468 4469 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4470 if (hca->hl_nports != hca->hl_nports_active) { 4471 temp = ddi_get_time() - hca->hl_attach_time; 4472 temp = ((temp >= *dft_wait) ? 0 : (*dft_wait - temp)); 4473 wait_time = (temp > wait_time) ? temp : wait_time; 4474 } 4475 } 4476 mutex_exit(&ibdm.ibdm_hl_mutex); 4477 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_waittime %llx", wait_time); 4478 return (wait_time); 4479 } 4480 4481 4482 /* 4483 * ibdm_ibnex_probe_hcaport 4484 * Probes the presence of HCA port (with HCA dip and port number) 4485 * Returns port attributes structure on SUCCESS 4486 */ 4487 ibdm_port_attr_t * 4488 ibdm_ibnex_probe_hcaport(ib_guid_t hca_guid, uint8_t port_num) 4489 { 4490 int ii, jj; 4491 ibdm_hca_list_t *hca_list; 4492 ibdm_port_attr_t *port_attr; 4493 4494 IBTF_DPRINTF_L4("ibdm", "\tibnex_probe_hcaport:"); 4495 4496 mutex_enter(&ibdm.ibdm_hl_mutex); 4497 hca_list = ibdm.ibdm_hca_list_head; 4498 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4499 if (hca_list->hl_hca_guid == hca_guid) { 4500 for (jj = 0; jj < hca_list->hl_nports; jj++) { 4501 if (hca_list->hl_port_attr[jj].pa_port_num == 4502 port_num) { 4503 break; 4504 } 4505 } 4506 if (jj != hca_list->hl_nports) 4507 break; 4508 } 4509 hca_list = hca_list->hl_next; 4510 } 4511 if (ii == ibdm.ibdm_hca_count) { 4512 IBTF_DPRINTF_L2("ibdm", "\tibnex_probe_hcaport: not found"); 4513 mutex_exit(&ibdm.ibdm_hl_mutex); 4514 return (NULL); 4515 } 4516 port_attr = (ibdm_port_attr_t *)kmem_zalloc( 4517 sizeof (ibdm_port_attr_t), KM_SLEEP); 4518 bcopy((char *)&hca_list->hl_port_attr[jj], 4519 port_attr, sizeof (ibdm_port_attr_t)); 4520 ibdm_update_port_attr(port_attr); 4521 4522 mutex_exit(&ibdm.ibdm_hl_mutex); 4523 return (port_attr); 4524 } 4525 4526 4527 /* 4528 * ibdm_ibnex_get_port_attrs 4529 * Scan all HCAs for a matching port_guid. 4530 * Returns "port attributes" structure on success. 4531 */ 4532 ibdm_port_attr_t * 4533 ibdm_ibnex_get_port_attrs(ib_guid_t port_guid) 4534 { 4535 int ii, jj; 4536 ibdm_hca_list_t *hca_list; 4537 ibdm_port_attr_t *port_attr; 4538 4539 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_port_attrs:"); 4540 4541 mutex_enter(&ibdm.ibdm_hl_mutex); 4542 hca_list = ibdm.ibdm_hca_list_head; 4543 4544 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4545 for (jj = 0; jj < hca_list->hl_nports; jj++) { 4546 if (hca_list->hl_port_attr[jj].pa_port_guid == 4547 port_guid) { 4548 break; 4549 } 4550 } 4551 if (jj != hca_list->hl_nports) 4552 break; 4553 hca_list = hca_list->hl_next; 4554 } 4555 4556 if (ii == ibdm.ibdm_hca_count) { 4557 IBTF_DPRINTF_L2("ibdm", "\tibnex_get_port_attrs: not found"); 4558 mutex_exit(&ibdm.ibdm_hl_mutex); 4559 return (NULL); 4560 } 4561 4562 port_attr = (ibdm_port_attr_t *)kmem_alloc(sizeof (ibdm_port_attr_t), 4563 KM_SLEEP); 4564 bcopy((char *)&hca_list->hl_port_attr[jj], port_attr, 4565 sizeof (ibdm_port_attr_t)); 4566 ibdm_update_port_attr(port_attr); 4567 4568 mutex_exit(&ibdm.ibdm_hl_mutex); 4569 return (port_attr); 4570 } 4571 4572 4573 /* 4574 * ibdm_ibnex_free_port_attr() 4575 */ 4576 void 4577 ibdm_ibnex_free_port_attr(ibdm_port_attr_t *port_attr) 4578 { 4579 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_port_attr:"); 4580 if (port_attr) { 4581 if (port_attr->pa_pkey_tbl != NULL) { 4582 kmem_free(port_attr->pa_pkey_tbl, 4583 (port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t))); 4584 } 4585 kmem_free(port_attr, sizeof (ibdm_port_attr_t)); 4586 } 4587 } 4588 4589 4590 /* 4591 * ibdm_ibnex_get_hca_list() 4592 * Returns portinfo for all the port for all the HCA's 4593 */ 4594 void 4595 ibdm_ibnex_get_hca_list(ibdm_hca_list_t **hca, int *count) 4596 { 4597 ibdm_hca_list_t *head = NULL, *temp, *temp1; 4598 int ii; 4599 4600 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_list:"); 4601 4602 mutex_enter(&ibdm.ibdm_hl_mutex); 4603 temp = ibdm.ibdm_hca_list_head; 4604 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4605 temp1 = ibdm_dup_hca_attr(temp); 4606 temp1->hl_next = head; 4607 head = temp1; 4608 temp = temp->hl_next; 4609 } 4610 *count = ibdm.ibdm_hca_count; 4611 *hca = head; 4612 mutex_exit(&ibdm.ibdm_hl_mutex); 4613 } 4614 4615 4616 /* 4617 * ibdm_ibnex_get_hca_info_by_guid() 4618 */ 4619 ibdm_hca_list_t * 4620 ibdm_ibnex_get_hca_info_by_guid(ib_guid_t hca_guid) 4621 { 4622 ibdm_hca_list_t *head = NULL, *hca = NULL; 4623 4624 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_info_by_dip"); 4625 4626 mutex_enter(&ibdm.ibdm_hl_mutex); 4627 head = ibdm.ibdm_hca_list_head; 4628 while (head) { 4629 if (head->hl_hca_guid == hca_guid) { 4630 hca = ibdm_dup_hca_attr(head); 4631 hca->hl_next = NULL; 4632 break; 4633 } 4634 head = head->hl_next; 4635 } 4636 mutex_exit(&ibdm.ibdm_hl_mutex); 4637 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_info_by_dip %p", hca); 4638 return (hca); 4639 } 4640 4641 4642 /* 4643 * ibdm_dup_hca_attr() 4644 * Allocate a new HCA attribute strucuture and initialize 4645 * hca attribute structure with the incoming HCA attributes 4646 * returned the allocated hca attributes. 4647 */ 4648 static ibdm_hca_list_t * 4649 ibdm_dup_hca_attr(ibdm_hca_list_t *in_hca) 4650 { 4651 int len; 4652 ibdm_hca_list_t *out_hca; 4653 4654 len = sizeof (ibdm_hca_list_t) + 4655 (in_hca->hl_nports * sizeof (ibdm_port_attr_t)); 4656 IBTF_DPRINTF_L4("ibdm", "\tdup_hca_attr len %d", len); 4657 out_hca = (ibdm_hca_list_t *)kmem_alloc(len, KM_SLEEP); 4658 bcopy((char *)in_hca, 4659 (char *)out_hca, sizeof (ibdm_hca_list_t)); 4660 if (in_hca->hl_nports) { 4661 out_hca->hl_port_attr = (ibdm_port_attr_t *) 4662 ((char *)out_hca + sizeof (ibdm_hca_list_t)); 4663 bcopy((char *)in_hca->hl_port_attr, 4664 (char *)out_hca->hl_port_attr, 4665 (in_hca->hl_nports * sizeof (ibdm_port_attr_t))); 4666 for (len = 0; len < out_hca->hl_nports; len++) 4667 ibdm_update_port_attr(&out_hca->hl_port_attr[len]); 4668 } 4669 return (out_hca); 4670 } 4671 4672 4673 /* 4674 * ibdm_ibnex_free_hca_list() 4675 * Free one/more HCA lists 4676 */ 4677 void 4678 ibdm_ibnex_free_hca_list(ibdm_hca_list_t *hca_list) 4679 { 4680 int ii; 4681 size_t len; 4682 ibdm_hca_list_t *temp; 4683 ibdm_port_attr_t *port; 4684 4685 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_hca_list:"); 4686 ASSERT(hca_list); 4687 while (hca_list) { 4688 temp = hca_list; 4689 hca_list = hca_list->hl_next; 4690 for (ii = 0; ii < temp->hl_nports; ii++) { 4691 port = &temp->hl_port_attr[ii]; 4692 len = (port->pa_npkeys * sizeof (ibdm_pkey_tbl_t)); 4693 if (len != 0) 4694 kmem_free(port->pa_pkey_tbl, len); 4695 } 4696 len = sizeof (ibdm_hca_list_t) + (temp->hl_nports * 4697 sizeof (ibdm_port_attr_t)); 4698 kmem_free(temp, len); 4699 } 4700 } 4701 4702 4703 /* 4704 * ibdm_ibnex_probe_iocguid() 4705 * Probes the IOC on the fabric and returns the IOC information 4706 * if present. Otherwise, NULL is returned 4707 */ 4708 /* ARGSUSED */ 4709 ibdm_ioc_info_t * 4710 ibdm_ibnex_probe_ioc(ib_guid_t iou, ib_guid_t ioc_guid, int reprobe_flag) 4711 { 4712 int k; 4713 ibdm_ioc_info_t *ioc_info; 4714 ibdm_dp_gidinfo_t *gid_info; 4715 4716 IBTF_DPRINTF_L4("ibdm", "\tibnex_probe_ioc: (%llX, %llX, %d) Begin", 4717 iou, ioc_guid, reprobe_flag); 4718 /* Check whether we know this already */ 4719 ioc_info = ibdm_ibnex_get_ioc_info(ioc_guid); 4720 if (ioc_info == NULL) { 4721 mutex_enter(&ibdm.ibdm_mutex); 4722 while (ibdm.ibdm_busy & IBDM_BUSY) 4723 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4724 ibdm.ibdm_busy |= IBDM_BUSY; 4725 mutex_exit(&ibdm.ibdm_mutex); 4726 ibdm_probe_ioc(iou, ioc_guid, 0); 4727 mutex_enter(&ibdm.ibdm_mutex); 4728 ibdm.ibdm_busy &= ~IBDM_BUSY; 4729 cv_broadcast(&ibdm.ibdm_busy_cv); 4730 mutex_exit(&ibdm.ibdm_mutex); 4731 ioc_info = ibdm_ibnex_get_ioc_info(ioc_guid); 4732 } else if (reprobe_flag) { /* Handle Reprobe for the IOC */ 4733 /* Free the ioc_list before reprobe; and cancel any timers */ 4734 mutex_enter(&ibdm.ibdm_mutex); 4735 if (ioc_info->ioc_timeout_id) { 4736 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4737 "ioc_timeout_id = 0x%x", 4738 ioc_info->ioc_timeout_id); 4739 if (untimeout(ioc_info->ioc_timeout_id) == -1) { 4740 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4741 "untimeout ioc_timeout_id failed"); 4742 } 4743 ioc_info->ioc_timeout_id = 0; 4744 } 4745 if (ioc_info->ioc_dc_timeout_id) { 4746 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4747 "ioc_dc_timeout_id = 0x%x", 4748 ioc_info->ioc_dc_timeout_id); 4749 if (untimeout(ioc_info->ioc_dc_timeout_id) == -1) { 4750 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4751 "untimeout ioc_dc_timeout_id failed"); 4752 } 4753 ioc_info->ioc_dc_timeout_id = 0; 4754 } 4755 for (k = 0; k < ioc_info->ioc_profile.ioc_service_entries; k++) 4756 if (ioc_info->ioc_serv[k].se_timeout_id) { 4757 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4758 "ioc_info->ioc_serv[k].se_timeout_id = %x", 4759 k, ioc_info->ioc_serv[k].se_timeout_id); 4760 if (untimeout(ioc_info->ioc_serv[k]. 4761 se_timeout_id) == -1) { 4762 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4763 "untimeout se_timeout_id %d " 4764 "failed", k); 4765 } 4766 ioc_info->ioc_serv[k].se_timeout_id = 0; 4767 } 4768 mutex_exit(&ibdm.ibdm_mutex); 4769 ibdm_ibnex_free_ioc_list(ioc_info); 4770 4771 mutex_enter(&ibdm.ibdm_mutex); 4772 while (ibdm.ibdm_busy & IBDM_BUSY) 4773 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4774 ibdm.ibdm_busy |= IBDM_BUSY; 4775 mutex_exit(&ibdm.ibdm_mutex); 4776 4777 ibdm_probe_ioc(iou, ioc_guid, 1); 4778 4779 /* 4780 * Skip if gl_reprobe_flag is set, this will be 4781 * a re-inserted / new GID, for which notifications 4782 * have already been send. 4783 */ 4784 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 4785 gid_info = gid_info->gl_next) { 4786 uint8_t ii, niocs; 4787 ibdm_ioc_info_t *ioc; 4788 4789 if (gid_info->gl_iou == NULL) 4790 continue; 4791 4792 if (gid_info->gl_reprobe_flag) { 4793 gid_info->gl_reprobe_flag = 0; 4794 continue; 4795 } 4796 4797 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 4798 for (ii = 0; ii < niocs; ii++) { 4799 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 4800 if (ioc->ioc_profile.ioc_guid == ioc_guid) { 4801 mutex_enter(&ibdm.ibdm_mutex); 4802 ibdm_reprobe_update_port_srv(ioc, 4803 gid_info); 4804 mutex_exit(&ibdm.ibdm_mutex); 4805 } 4806 } 4807 } 4808 mutex_enter(&ibdm.ibdm_mutex); 4809 ibdm.ibdm_busy &= ~IBDM_BUSY; 4810 cv_broadcast(&ibdm.ibdm_busy_cv); 4811 mutex_exit(&ibdm.ibdm_mutex); 4812 4813 ioc_info = ibdm_ibnex_get_ioc_info(ioc_guid); 4814 } 4815 return (ioc_info); 4816 } 4817 4818 4819 /* 4820 * ibdm_ibnex_get_ioc_info() 4821 * Returns pointer to ibdm_ioc_info_t if it finds 4822 * matching record for the ioc_guid, otherwise NULL 4823 * is returned 4824 */ 4825 ibdm_ioc_info_t * 4826 ibdm_ibnex_get_ioc_info(ib_guid_t ioc_guid) 4827 { 4828 int ii; 4829 ibdm_ioc_info_t *ioc = NULL, *tmp = NULL; 4830 ibdm_dp_gidinfo_t *gid_list; 4831 ib_dm_io_unitinfo_t *iou; 4832 4833 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_ioc_info: GUID %llx", ioc_guid); 4834 4835 mutex_enter(&ibdm.ibdm_mutex); 4836 while (ibdm.ibdm_busy & IBDM_BUSY) 4837 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4838 ibdm.ibdm_busy |= IBDM_BUSY; 4839 4840 gid_list = ibdm.ibdm_dp_gidlist_head; 4841 while (gid_list) { 4842 mutex_enter(&gid_list->gl_mutex); 4843 if (gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) { 4844 mutex_exit(&gid_list->gl_mutex); 4845 gid_list = gid_list->gl_next; 4846 continue; 4847 } 4848 if (gid_list->gl_iou == NULL) { 4849 IBTF_DPRINTF_L2("ibdm", 4850 "\tget_ioc_info: No IOU info"); 4851 mutex_exit(&gid_list->gl_mutex); 4852 gid_list = gid_list->gl_next; 4853 continue; 4854 } 4855 iou = &gid_list->gl_iou->iou_info; 4856 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 4857 tmp = IBDM_GIDINFO2IOCINFO(gid_list, ii); 4858 if ((tmp->ioc_profile.ioc_guid == ioc_guid) && 4859 (tmp->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS)) { 4860 ioc = ibdm_dup_ioc_info(tmp, gid_list); 4861 mutex_exit(&gid_list->gl_mutex); 4862 ibdm.ibdm_busy &= ~IBDM_BUSY; 4863 cv_broadcast(&ibdm.ibdm_busy_cv); 4864 mutex_exit(&ibdm.ibdm_mutex); 4865 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_info: End"); 4866 return (ioc); 4867 } 4868 } 4869 if (ii == iou->iou_num_ctrl_slots) 4870 ioc = NULL; 4871 4872 mutex_exit(&gid_list->gl_mutex); 4873 gid_list = gid_list->gl_next; 4874 } 4875 4876 ibdm.ibdm_busy &= ~IBDM_BUSY; 4877 cv_broadcast(&ibdm.ibdm_busy_cv); 4878 mutex_exit(&ibdm.ibdm_mutex); 4879 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_info: failure End"); 4880 return (ioc); 4881 } 4882 4883 4884 /* 4885 * ibdm_ibnex_get_ioc_count() 4886 * Returns number of ibdm_ioc_info_t it finds 4887 */ 4888 int 4889 ibdm_ibnex_get_ioc_count(void) 4890 { 4891 int count = 0, k; 4892 ibdm_ioc_info_t *ioc; 4893 ibdm_dp_gidinfo_t *gid_list; 4894 4895 mutex_enter(&ibdm.ibdm_mutex); 4896 ibdm_sweep_fabric(0); 4897 4898 while (ibdm.ibdm_busy & IBDM_BUSY) 4899 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4900 ibdm.ibdm_busy |= IBDM_BUSY; 4901 4902 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 4903 gid_list = gid_list->gl_next) { 4904 mutex_enter(&gid_list->gl_mutex); 4905 if ((gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) || 4906 (gid_list->gl_iou == NULL)) { 4907 mutex_exit(&gid_list->gl_mutex); 4908 continue; 4909 } 4910 for (k = 0; k < gid_list->gl_iou->iou_info.iou_num_ctrl_slots; 4911 k++) { 4912 ioc = IBDM_GIDINFO2IOCINFO(gid_list, k); 4913 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) 4914 ++count; 4915 } 4916 mutex_exit(&gid_list->gl_mutex); 4917 } 4918 ibdm.ibdm_busy &= ~IBDM_BUSY; 4919 cv_broadcast(&ibdm.ibdm_busy_cv); 4920 mutex_exit(&ibdm.ibdm_mutex); 4921 4922 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_count: count = %d", count); 4923 return (count); 4924 } 4925 4926 4927 /* 4928 * ibdm_ibnex_get_ioc_list() 4929 * Returns information about all the IOCs present on the fabric. 4930 * Reprobes the IOCs and the GID list if list_flag is set to REPROBE_ALL. 4931 * Does not sweep fabric if DONOT_PROBE is set 4932 */ 4933 ibdm_ioc_info_t * 4934 ibdm_ibnex_get_ioc_list(ibdm_ibnex_get_ioclist_mtd_t list_flag) 4935 { 4936 int ii; 4937 ibdm_ioc_info_t *ioc_list = NULL, *tmp, *ioc; 4938 ibdm_dp_gidinfo_t *gid_list; 4939 ib_dm_io_unitinfo_t *iou; 4940 4941 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_list: Enter"); 4942 4943 mutex_enter(&ibdm.ibdm_mutex); 4944 if (list_flag != IBDM_IBNEX_DONOT_PROBE) 4945 ibdm_sweep_fabric(list_flag == IBDM_IBNEX_REPROBE_ALL); 4946 4947 while (ibdm.ibdm_busy & IBDM_BUSY) 4948 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4949 ibdm.ibdm_busy |= IBDM_BUSY; 4950 4951 gid_list = ibdm.ibdm_dp_gidlist_head; 4952 while (gid_list) { 4953 mutex_enter(&gid_list->gl_mutex); 4954 if (gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) { 4955 mutex_exit(&gid_list->gl_mutex); 4956 gid_list = gid_list->gl_next; 4957 continue; 4958 } 4959 if (gid_list->gl_iou == NULL) { 4960 IBTF_DPRINTF_L2("ibdm", 4961 "\tget_ioc_list: No IOU info"); 4962 mutex_exit(&gid_list->gl_mutex); 4963 gid_list = gid_list->gl_next; 4964 continue; 4965 } 4966 iou = &gid_list->gl_iou->iou_info; 4967 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 4968 ioc = IBDM_GIDINFO2IOCINFO(gid_list, ii); 4969 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) { 4970 tmp = ibdm_dup_ioc_info(ioc, gid_list); 4971 tmp->ioc_next = ioc_list; 4972 ioc_list = tmp; 4973 } 4974 } 4975 mutex_exit(&gid_list->gl_mutex); 4976 gid_list = gid_list->gl_next; 4977 } 4978 ibdm.ibdm_busy &= ~IBDM_BUSY; 4979 cv_broadcast(&ibdm.ibdm_busy_cv); 4980 mutex_exit(&ibdm.ibdm_mutex); 4981 4982 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_list: End"); 4983 return (ioc_list); 4984 } 4985 4986 /* 4987 * ibdm_dup_ioc_info() 4988 * Duplicate the IOC information and return the IOC 4989 * information. 4990 */ 4991 static ibdm_ioc_info_t * 4992 ibdm_dup_ioc_info(ibdm_ioc_info_t *in_ioc, ibdm_dp_gidinfo_t *gid_list) 4993 { 4994 ibdm_ioc_info_t *out_ioc; 4995 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*out_ioc)); 4996 ASSERT(MUTEX_HELD(&gid_list->gl_mutex)); 4997 4998 out_ioc = kmem_alloc(sizeof (ibdm_ioc_info_t), KM_SLEEP); 4999 bcopy(in_ioc, out_ioc, sizeof (ibdm_ioc_info_t)); 5000 ibdm_update_ioc_port_gidlist(out_ioc, gid_list); 5001 out_ioc->ioc_iou_dc_valid = gid_list->gl_iou->iou_dc_valid; 5002 out_ioc->ioc_iou_diagcode = gid_list->gl_iou->iou_diagcode; 5003 5004 return (out_ioc); 5005 } 5006 5007 5008 /* 5009 * ibdm_free_ioc_list() 5010 * Deallocate memory for IOC list structure 5011 */ 5012 void 5013 ibdm_ibnex_free_ioc_list(ibdm_ioc_info_t *ioc) 5014 { 5015 ibdm_ioc_info_t *temp; 5016 5017 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_ioc_list:"); 5018 while (ioc) { 5019 temp = ioc; 5020 ioc = ioc->ioc_next; 5021 kmem_free(temp->ioc_gid_list, 5022 (sizeof (ibdm_gid_t) * temp->ioc_nportgids)); 5023 if (temp->ioc_hca_list) 5024 ibdm_ibnex_free_hca_list(temp->ioc_hca_list); 5025 kmem_free(temp, sizeof (ibdm_ioc_info_t)); 5026 } 5027 } 5028 5029 5030 /* 5031 * ibdm_ibnex_update_pkey_tbls 5032 * Updates the DM P_Key database. 5033 * NOTE: Two cases are handled here: P_Key being added or removed. 5034 * 5035 * Arguments : NONE 5036 * Return Values : NONE 5037 */ 5038 void 5039 ibdm_ibnex_update_pkey_tbls(void) 5040 { 5041 int h, pp, pidx; 5042 uint_t nports; 5043 uint_t size; 5044 ib_pkey_t new_pkey; 5045 ib_pkey_t *orig_pkey; 5046 ibdm_hca_list_t *hca_list; 5047 ibdm_port_attr_t *port; 5048 ibt_hca_portinfo_t *pinfop; 5049 5050 IBTF_DPRINTF_L4("ibdm", "\tibnex_update_pkey_tbls:"); 5051 5052 mutex_enter(&ibdm.ibdm_hl_mutex); 5053 hca_list = ibdm.ibdm_hca_list_head; 5054 5055 for (h = 0; h < ibdm.ibdm_hca_count; h++) { 5056 5057 /* This updates P_Key Tables for all ports of this HCA */ 5058 (void) ibt_query_hca_ports(hca_list->hl_hca_hdl, 0, &pinfop, 5059 &nports, &size); 5060 5061 /* number of ports shouldn't have changed */ 5062 ASSERT(nports == hca_list->hl_nports); 5063 5064 for (pp = 0; pp < hca_list->hl_nports; pp++) { 5065 port = &hca_list->hl_port_attr[pp]; 5066 5067 /* 5068 * First figure out the P_Keys from IBTL. 5069 * Three things could have happened: 5070 * New P_Keys added 5071 * Existing P_Keys removed 5072 * Both of the above two 5073 * 5074 * Loop through the P_Key Indices and check if a 5075 * give P_Key_Ix matches that of the one seen by 5076 * IBDM. If they match no action is needed. 5077 * 5078 * If they don't match: 5079 * 1. if orig_pkey is invalid and new_pkey is valid 5080 * ---> add new_pkey to DM database 5081 * 2. if orig_pkey is valid and new_pkey is invalid 5082 * ---> remove orig_pkey from DM database 5083 * 3. if orig_pkey and new_pkey are both valid: 5084 * ---> remov orig_pkey from DM database 5085 * ---> add new_pkey to DM database 5086 * 4. if orig_pkey and new_pkey are both invalid: 5087 * ---> do nothing. Updated DM database. 5088 */ 5089 5090 for (pidx = 0; pidx < port->pa_npkeys; pidx++) { 5091 new_pkey = pinfop[pp].p_pkey_tbl[pidx]; 5092 orig_pkey = &port->pa_pkey_tbl[pidx].pt_pkey; 5093 5094 /* keys match - do nothing */ 5095 if (*orig_pkey == new_pkey) 5096 continue; 5097 5098 if (IBDM_INVALID_PKEY(*orig_pkey) && 5099 !IBDM_INVALID_PKEY(new_pkey)) { 5100 /* P_Key was added */ 5101 IBTF_DPRINTF_L5("ibdm", 5102 "\tibnex_update_pkey_tbls: new " 5103 "P_Key added = 0x%x", new_pkey); 5104 *orig_pkey = new_pkey; 5105 ibdm_port_attr_ibmf_init(port, 5106 new_pkey, pp); 5107 } else if (!IBDM_INVALID_PKEY(*orig_pkey) && 5108 IBDM_INVALID_PKEY(new_pkey)) { 5109 /* P_Key was removed */ 5110 IBTF_DPRINTF_L5("ibdm", 5111 "\tibnex_update_pkey_tbls: P_Key " 5112 "removed = 0x%x", *orig_pkey); 5113 *orig_pkey = new_pkey; 5114 (void) ibdm_port_attr_ibmf_fini(port, 5115 pidx); 5116 } else if (!IBDM_INVALID_PKEY(*orig_pkey) && 5117 !IBDM_INVALID_PKEY(new_pkey)) { 5118 /* P_Key were replaced */ 5119 IBTF_DPRINTF_L5("ibdm", 5120 "\tibnex_update_pkey_tbls: P_Key " 5121 "replaced 0x%x with 0x%x", 5122 *orig_pkey, new_pkey); 5123 (void) ibdm_port_attr_ibmf_fini(port, 5124 pidx); 5125 *orig_pkey = new_pkey; 5126 ibdm_port_attr_ibmf_init(port, 5127 new_pkey, pp); 5128 } else { 5129 /* 5130 * P_Keys are invalid 5131 * set anyway to reflect if 5132 * INVALID_FULL was changed to 5133 * INVALID_LIMITED or vice-versa. 5134 */ 5135 *orig_pkey = new_pkey; 5136 } /* end of else */ 5137 5138 } /* loop of p_key index */ 5139 5140 } /* loop of #ports of HCA */ 5141 5142 ibt_free_portinfo(pinfop, size); 5143 hca_list = hca_list->hl_next; 5144 5145 } /* loop for all HCAs in the system */ 5146 5147 mutex_exit(&ibdm.ibdm_hl_mutex); 5148 } 5149 5150 5151 /* 5152 * ibdm_send_ioc_profile() 5153 * Send IOC Controller Profile request. When the request is completed 5154 * IBMF calls ibdm_process_incoming_mad routine to inform about 5155 * the completion. 5156 */ 5157 static int 5158 ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *gid_info, uint8_t ioc_no) 5159 { 5160 ibmf_msg_t *msg; 5161 ib_mad_hdr_t *hdr; 5162 ibdm_ioc_info_t *ioc_info = &(gid_info->gl_iou->iou_ioc_info[ioc_no]); 5163 ibdm_timeout_cb_args_t *cb_args; 5164 5165 IBTF_DPRINTF_L4("ibdm", "\tsend_ioc_profile: " 5166 "gid info 0x%p, ioc_no = %d", gid_info, ioc_no); 5167 5168 /* 5169 * Send command to get IOC profile. 5170 * Allocate a IBMF packet and initialize the packet. 5171 */ 5172 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 5173 &msg) != IBMF_SUCCESS) { 5174 IBTF_DPRINTF_L4("ibdm", "\tsend_ioc_profile: pkt alloc fail"); 5175 return (IBDM_FAILURE); 5176 } 5177 5178 ibdm_alloc_send_buffers(msg); 5179 5180 mutex_enter(&gid_info->gl_mutex); 5181 ibdm_bump_transactionID(gid_info); 5182 mutex_exit(&gid_info->gl_mutex); 5183 5184 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 5185 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 5186 if (gid_info->gl_redirected == B_TRUE) { 5187 if (gid_info->gl_redirect_dlid != 0) { 5188 msg->im_local_addr.ia_remote_lid = 5189 gid_info->gl_redirect_dlid; 5190 } 5191 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 5192 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 5193 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 5194 } else { 5195 msg->im_local_addr.ia_remote_qno = 1; 5196 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 5197 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 5198 } 5199 5200 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 5201 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 5202 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 5203 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 5204 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 5205 hdr->Status = 0; 5206 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 5207 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 5208 hdr->AttributeModifier = h2b32(ioc_no + 1); 5209 5210 ioc_info->ioc_state = IBDM_IOC_STATE_REPROBE_PROGRESS; 5211 cb_args = &ioc_info->ioc_cb_args; 5212 cb_args->cb_gid_info = gid_info; 5213 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 5214 cb_args->cb_req_type = IBDM_REQ_TYPE_IOCINFO; 5215 cb_args->cb_ioc_num = ioc_no; 5216 5217 ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 5218 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 5219 5220 IBTF_DPRINTF_L5("ibdm", "\tsend_ioc_profile:" 5221 "timeout %x", ioc_info->ioc_timeout_id); 5222 5223 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg, 5224 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 5225 IBTF_DPRINTF_L2("ibdm", 5226 "\tsend_ioc_profile: msg transport failed"); 5227 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 5228 } 5229 ioc_info->ioc_state = IBDM_IOC_STATE_REPROBE_PROGRESS; 5230 return (IBDM_SUCCESS); 5231 } 5232 5233 5234 /* 5235 * ibdm_port_reachable 5236 * Returns B_TRUE if the port GID is reachable by sending 5237 * a SA query to get the NODE record for this port GUID. 5238 */ 5239 static boolean_t 5240 ibdm_port_reachable(ibmf_saa_handle_t sa_hdl, ib_guid_t guid) 5241 { 5242 sa_node_record_t *resp; 5243 size_t length; 5244 5245 /* 5246 * Verify if it's reachable by getting the node record. 5247 */ 5248 if (ibdm_get_node_record_by_port(sa_hdl, guid, &resp, &length) == 5249 IBDM_SUCCESS) { 5250 kmem_free(resp, length); 5251 return (B_TRUE); 5252 } 5253 return (B_FALSE); 5254 } 5255 5256 /* 5257 * ibdm_get_node_record_by_port 5258 * Sends a SA query to get the NODE record for port GUID 5259 * Returns IBDM_SUCCESS if the port GID is reachable. 5260 * 5261 * Note: the caller must be responsible for freeing the resource 5262 * by calling kmem_free(resp, length) later. 5263 */ 5264 static int 5265 ibdm_get_node_record_by_port(ibmf_saa_handle_t sa_hdl, ib_guid_t guid, 5266 sa_node_record_t **resp, size_t *length) 5267 { 5268 sa_node_record_t req; 5269 ibmf_saa_access_args_t args; 5270 int ret; 5271 ASSERT(resp != NULL && length != NULL); 5272 5273 IBTF_DPRINTF_L4("ibdm", "\tport_reachable: port_guid %llx", 5274 guid); 5275 5276 bzero(&req, sizeof (sa_node_record_t)); 5277 req.NodeInfo.PortGUID = guid; 5278 5279 args.sq_attr_id = SA_NODERECORD_ATTRID; 5280 args.sq_access_type = IBMF_SAA_RETRIEVE; 5281 args.sq_component_mask = SA_NODEINFO_COMPMASK_PORTGUID; 5282 args.sq_template = &req; 5283 args.sq_callback = NULL; 5284 args.sq_callback_arg = NULL; 5285 5286 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) resp); 5287 if (ret != IBMF_SUCCESS) { 5288 IBTF_DPRINTF_L2("ibdm", "\tport_reachable:" 5289 " SA Retrieve Failed: %d", ret); 5290 return (IBDM_FAILURE); 5291 } 5292 /* 5293 * There is one NodeRecord on each endport on a subnet. 5294 */ 5295 ASSERT(*length == sizeof (sa_node_record_t)); 5296 5297 if (*resp == NULL || *length == 0) { 5298 IBTF_DPRINTF_L2("ibdm", "\tport_reachable: No records"); 5299 return (IBDM_FAILURE); 5300 } 5301 5302 return (IBDM_SUCCESS); 5303 } 5304 5305 5306 /* 5307 * Update the gidlist for all affected IOCs when GID becomes 5308 * available/unavailable. 5309 * 5310 * Parameters : 5311 * gidinfo - Incoming / Outgoing GID. 5312 * add_flag - 1 for GID added, 0 for GID removed. 5313 * - (-1) : IOC gid list updated, ioc_list required. 5314 * 5315 * This function gets the GID for the node GUID corresponding to the 5316 * port GID. Gets the IOU info 5317 */ 5318 static ibdm_ioc_info_t * 5319 ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *gid_info, int avail_flag) 5320 { 5321 ibdm_dp_gidinfo_t *node_gid = NULL; 5322 uint8_t niocs, ii; 5323 ibdm_ioc_info_t *ioc, *ioc_list = NULL, *tmp; 5324 5325 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist"); 5326 5327 switch (avail_flag) { 5328 case 1 : 5329 node_gid = ibdm_check_dest_nodeguid(gid_info); 5330 break; 5331 case 0 : 5332 node_gid = ibdm_handle_gid_rm(gid_info); 5333 break; 5334 case -1 : 5335 node_gid = gid_info; 5336 break; 5337 default : 5338 break; 5339 } 5340 5341 if (node_gid == NULL) { 5342 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist: " 5343 "No node GID found, port gid 0x%p, avail_flag %d", 5344 gid_info, avail_flag); 5345 return (NULL); 5346 } 5347 5348 mutex_enter(&node_gid->gl_mutex); 5349 if ((node_gid->gl_state != IBDM_GID_PROBING_COMPLETE && 5350 node_gid->gl_state != IBDM_GID_PROBING_SKIPPED) || 5351 node_gid->gl_iou == NULL) { 5352 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist " 5353 "gl_state %x, gl_iou %p", node_gid->gl_state, 5354 node_gid->gl_iou); 5355 mutex_exit(&node_gid->gl_mutex); 5356 return (NULL); 5357 } 5358 5359 niocs = node_gid->gl_iou->iou_info.iou_num_ctrl_slots; 5360 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist : niocs %x", 5361 niocs); 5362 for (ii = 0; ii < niocs; ii++) { 5363 ioc = IBDM_GIDINFO2IOCINFO(node_gid, ii); 5364 /* 5365 * Skip IOCs for which probe is not complete or 5366 * reprobe is progress 5367 */ 5368 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) { 5369 tmp = ibdm_dup_ioc_info(ioc, node_gid); 5370 tmp->ioc_info_updated.ib_gid_prop_updated = 1; 5371 tmp->ioc_next = ioc_list; 5372 ioc_list = tmp; 5373 } 5374 } 5375 mutex_exit(&node_gid->gl_mutex); 5376 5377 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist : return %p", 5378 ioc_list); 5379 return (ioc_list); 5380 } 5381 5382 /* 5383 * ibdm_saa_event_cb : 5384 * Event handling which does *not* require ibdm_hl_mutex to be 5385 * held are executed in the same thread. This is to prevent 5386 * deadlocks with HCA port down notifications which hold the 5387 * ibdm_hl_mutex. 5388 * 5389 * GID_AVAILABLE event is handled here. A taskq is spawned to 5390 * handle GID_UNAVAILABLE. 5391 * 5392 * A new mutex ibdm_ibnex_mutex has been introduced to protect 5393 * ibnex_callback. This has been done to prevent any possible 5394 * deadlock (described above) while handling GID_AVAILABLE. 5395 * 5396 * IBMF calls the event callback for a HCA port. The SA handle 5397 * for this port would be valid, till the callback returns. 5398 * IBDM calling IBDM using the above SA handle should be valid. 5399 * 5400 * IBDM will additionally check (SA handle != NULL), before 5401 * calling IBMF. 5402 */ 5403 /*ARGSUSED*/ 5404 static void 5405 ibdm_saa_event_cb(ibmf_saa_handle_t ibmf_saa_handle, 5406 ibmf_saa_subnet_event_t ibmf_saa_event, 5407 ibmf_saa_event_details_t *event_details, void *callback_arg) 5408 { 5409 ibdm_saa_event_arg_t *event_arg; 5410 ib_gid_t sgid, dgid; 5411 ibdm_port_attr_t *hca_port; 5412 ibdm_dp_gidinfo_t *gid_info, *node_gid_info = NULL; 5413 sa_node_record_t *nrec; 5414 size_t length; 5415 5416 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*event_arg)); 5417 5418 hca_port = (ibdm_port_attr_t *)callback_arg; 5419 5420 IBTF_DPRINTF_L4("ibdm", "\tsaa_event_cb(%x, %x, %x, %x)\n", 5421 ibmf_saa_handle, ibmf_saa_event, event_details, 5422 callback_arg); 5423 #ifdef DEBUG 5424 if (ibdm_ignore_saa_event) 5425 return; 5426 #endif 5427 5428 if (ibmf_saa_event == IBMF_SAA_EVENT_GID_AVAILABLE) { 5429 /* 5430 * Ensure no other probe / sweep fabric is in 5431 * progress. 5432 */ 5433 mutex_enter(&ibdm.ibdm_mutex); 5434 while (ibdm.ibdm_busy & IBDM_BUSY) 5435 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5436 ibdm.ibdm_busy |= IBDM_BUSY; 5437 mutex_exit(&ibdm.ibdm_mutex); 5438 5439 /* 5440 * If we already know about this GID, return. 5441 * GID_AVAILABLE may be reported for multiple HCA 5442 * ports. 5443 */ 5444 if ((ibdm_check_dgid(event_details->ie_gid.gid_guid, 5445 event_details->ie_gid.gid_prefix)) != NULL) { 5446 mutex_enter(&ibdm.ibdm_mutex); 5447 ibdm.ibdm_busy &= ~IBDM_BUSY; 5448 cv_broadcast(&ibdm.ibdm_busy_cv); 5449 mutex_exit(&ibdm.ibdm_mutex); 5450 return; 5451 } 5452 5453 IBTF_DPRINTF_L4("ibdm", "\tGID (prefix %x, guid %llx) " 5454 "Insertion notified", 5455 event_details->ie_gid.gid_prefix, 5456 event_details->ie_gid.gid_guid); 5457 5458 /* This is a new gid, insert it to GID list */ 5459 sgid.gid_prefix = hca_port->pa_sn_prefix; 5460 sgid.gid_guid = hca_port->pa_port_guid; 5461 dgid.gid_prefix = event_details->ie_gid.gid_prefix; 5462 dgid.gid_guid = event_details->ie_gid.gid_guid; 5463 gid_info = ibdm_create_gid_info(hca_port, sgid, dgid); 5464 if (gid_info == NULL) { 5465 IBTF_DPRINTF_L4("ibdm", "\tGID_AVAILABLE: " 5466 "create_gid_info returned NULL"); 5467 mutex_enter(&ibdm.ibdm_mutex); 5468 ibdm.ibdm_busy &= ~IBDM_BUSY; 5469 cv_broadcast(&ibdm.ibdm_busy_cv); 5470 mutex_exit(&ibdm.ibdm_mutex); 5471 return; 5472 } 5473 mutex_enter(&gid_info->gl_mutex); 5474 gid_info->gl_state = IBDM_GID_PROBING_SKIPPED; 5475 mutex_exit(&gid_info->gl_mutex); 5476 5477 /* Get the node GUID */ 5478 if (ibdm_get_node_record_by_port(ibmf_saa_handle, dgid.gid_guid, 5479 &nrec, &length) != IBDM_SUCCESS) { 5480 /* 5481 * Set the state to PROBE_NOT_DONE for the 5482 * next sweep to probe it 5483 */ 5484 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_taskq: " 5485 "Skipping GID : port GUID not found"); 5486 mutex_enter(&gid_info->gl_mutex); 5487 gid_info->gl_state = IBDM_GID_PROBE_NOT_DONE; 5488 mutex_exit(&gid_info->gl_mutex); 5489 mutex_enter(&ibdm.ibdm_mutex); 5490 ibdm.ibdm_busy &= ~IBDM_BUSY; 5491 cv_broadcast(&ibdm.ibdm_busy_cv); 5492 mutex_exit(&ibdm.ibdm_mutex); 5493 return; 5494 } 5495 gid_info->gl_nodeguid = nrec->NodeInfo.NodeGUID; 5496 gid_info->gl_devid = nrec->NodeInfo.DeviceID; 5497 kmem_free(nrec, length); 5498 gid_info->gl_portguid = dgid.gid_guid; 5499 5500 /* 5501 * Get the gid info with the same node GUID. 5502 */ 5503 mutex_enter(&ibdm.ibdm_mutex); 5504 node_gid_info = ibdm.ibdm_dp_gidlist_head; 5505 while (node_gid_info) { 5506 if (node_gid_info->gl_nodeguid == 5507 gid_info->gl_nodeguid && 5508 node_gid_info->gl_iou != NULL) { 5509 break; 5510 } 5511 node_gid_info = node_gid_info->gl_next; 5512 } 5513 mutex_exit(&ibdm.ibdm_mutex); 5514 5515 /* 5516 * Handling a new GID requires filling of gl_hca_list. 5517 * This require ibdm hca_list to be parsed and hence 5518 * holding the ibdm_hl_mutex. Spawning a new thread to 5519 * handle this. 5520 */ 5521 if (node_gid_info == NULL) { 5522 if (taskq_dispatch(system_taskq, 5523 ibdm_saa_handle_new_gid, (void *)gid_info, 5524 TQ_NOSLEEP) == NULL) { 5525 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: " 5526 "new_gid taskq_dispatch failed"); 5527 return; 5528 } 5529 } 5530 5531 mutex_enter(&ibdm.ibdm_mutex); 5532 ibdm.ibdm_busy &= ~IBDM_BUSY; 5533 cv_broadcast(&ibdm.ibdm_busy_cv); 5534 mutex_exit(&ibdm.ibdm_mutex); 5535 return; 5536 } 5537 5538 if (ibmf_saa_event != IBMF_SAA_EVENT_GID_UNAVAILABLE) 5539 return; 5540 5541 event_arg = (ibdm_saa_event_arg_t *)kmem_alloc( 5542 sizeof (ibdm_saa_event_arg_t), KM_SLEEP); 5543 event_arg->ibmf_saa_handle = ibmf_saa_handle; 5544 event_arg->ibmf_saa_event = ibmf_saa_event; 5545 bcopy(event_details, &event_arg->event_details, 5546 sizeof (ibmf_saa_event_details_t)); 5547 event_arg->callback_arg = callback_arg; 5548 5549 if (taskq_dispatch(system_taskq, ibdm_saa_event_taskq, 5550 (void *)event_arg, TQ_NOSLEEP) == NULL) { 5551 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: " 5552 "taskq_dispatch failed"); 5553 ibdm_free_saa_event_arg(event_arg); 5554 return; 5555 } 5556 } 5557 5558 /* 5559 * Handle a new GID discovered by GID_AVAILABLE saa event. 5560 */ 5561 void 5562 ibdm_saa_handle_new_gid(void *arg) 5563 { 5564 ibdm_dp_gidinfo_t *gid_info; 5565 ibdm_hca_list_t *hca_list = NULL; 5566 ibdm_port_attr_t *port = NULL; 5567 ibdm_ioc_info_t *ioc_list = NULL; 5568 5569 IBTF_DPRINTF_L4(ibdm_string, "\tsaa_handle_new_gid(%p)", arg); 5570 5571 gid_info = (ibdm_dp_gidinfo_t *)arg; 5572 5573 /* 5574 * Ensure that no other sweep / probe has completed 5575 * probing this gid. 5576 */ 5577 mutex_enter(&gid_info->gl_mutex); 5578 if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) { 5579 mutex_exit(&gid_info->gl_mutex); 5580 return; 5581 } 5582 mutex_exit(&gid_info->gl_mutex); 5583 5584 /* 5585 * Parse HCAs to fill gl_hca_list 5586 */ 5587 mutex_enter(&ibdm.ibdm_hl_mutex); 5588 for (ibdm_get_next_port(&hca_list, &port, 1); port; 5589 ibdm_get_next_port(&hca_list, &port, 1)) { 5590 if (ibdm_port_reachable(port->pa_sa_hdl, 5591 gid_info->gl_portguid) == B_TRUE) { 5592 ibdm_addto_glhcalist(gid_info, hca_list); 5593 } 5594 } 5595 mutex_exit(&ibdm.ibdm_hl_mutex); 5596 5597 /* 5598 * Ensure no other probe / sweep fabric is in 5599 * progress. 5600 */ 5601 mutex_enter(&ibdm.ibdm_mutex); 5602 while (ibdm.ibdm_busy & IBDM_BUSY) 5603 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5604 ibdm.ibdm_busy |= IBDM_BUSY; 5605 mutex_exit(&ibdm.ibdm_mutex); 5606 5607 /* 5608 * New IOU probe it, to check if new IOCs 5609 */ 5610 IBTF_DPRINTF_L4(ibdm_string, "\tsaa_handle_new_gid: " 5611 "new GID : probing"); 5612 mutex_enter(&ibdm.ibdm_mutex); 5613 ibdm.ibdm_ngid_probes_in_progress++; 5614 mutex_exit(&ibdm.ibdm_mutex); 5615 mutex_enter(&gid_info->gl_mutex); 5616 gid_info->gl_reprobe_flag = 0; 5617 gid_info->gl_state = IBDM_GID_PROBE_NOT_DONE; 5618 mutex_exit(&gid_info->gl_mutex); 5619 ibdm_probe_gid_thread((void *)gid_info); 5620 5621 mutex_enter(&ibdm.ibdm_mutex); 5622 ibdm_wait_probe_completion(); 5623 mutex_exit(&ibdm.ibdm_mutex); 5624 5625 if (gid_info->gl_iou == NULL) { 5626 mutex_enter(&ibdm.ibdm_mutex); 5627 ibdm.ibdm_busy &= ~IBDM_BUSY; 5628 cv_broadcast(&ibdm.ibdm_busy_cv); 5629 mutex_exit(&ibdm.ibdm_mutex); 5630 return; 5631 } 5632 5633 /* 5634 * Update GID list in all IOCs affected by this 5635 */ 5636 ioc_list = ibdm_update_ioc_gidlist(gid_info, 1); 5637 5638 /* 5639 * Pass on the IOCs with updated GIDs to IBnexus 5640 */ 5641 if (ioc_list) { 5642 mutex_enter(&ibdm.ibdm_ibnex_mutex); 5643 if (ibdm.ibdm_ibnex_callback != NULL) { 5644 (*ibdm.ibdm_ibnex_callback)((void *)ioc_list, 5645 IBDM_EVENT_IOC_PROP_UPDATE); 5646 } 5647 mutex_exit(&ibdm.ibdm_ibnex_mutex); 5648 } 5649 5650 mutex_enter(&ibdm.ibdm_mutex); 5651 ibdm.ibdm_busy &= ~IBDM_BUSY; 5652 cv_broadcast(&ibdm.ibdm_busy_cv); 5653 mutex_exit(&ibdm.ibdm_mutex); 5654 } 5655 5656 /* 5657 * ibdm_saa_event_taskq : 5658 * GID_UNAVAILABLE Event handling requires ibdm_hl_mutex to be 5659 * held. The GID_UNAVAILABLE handling is done in a taskq to 5660 * prevent deadlocks with HCA port down notifications which hold 5661 * ibdm_hl_mutex. 5662 */ 5663 void 5664 ibdm_saa_event_taskq(void *arg) 5665 { 5666 ibdm_saa_event_arg_t *event_arg; 5667 ibmf_saa_handle_t ibmf_saa_handle; 5668 ibmf_saa_subnet_event_t ibmf_saa_event; 5669 ibmf_saa_event_details_t *event_details; 5670 void *callback_arg; 5671 5672 ibdm_dp_gidinfo_t *gid_info; 5673 ibdm_port_attr_t *hca_port, *port = NULL; 5674 ibdm_hca_list_t *hca_list = NULL; 5675 int sa_handle_valid = 0; 5676 ibdm_ioc_info_t *ioc_list = NULL; 5677 5678 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*event_arg)); 5679 5680 event_arg = (ibdm_saa_event_arg_t *)arg; 5681 ibmf_saa_handle = event_arg->ibmf_saa_handle; 5682 ibmf_saa_event = event_arg->ibmf_saa_event; 5683 event_details = &event_arg->event_details; 5684 callback_arg = event_arg->callback_arg; 5685 5686 ASSERT(callback_arg != NULL); 5687 ASSERT(ibmf_saa_event == IBMF_SAA_EVENT_GID_UNAVAILABLE); 5688 IBTF_DPRINTF_L4("ibdm", "\tsaa_event_taskq(%x, %x, %x, %x)", 5689 ibmf_saa_handle, ibmf_saa_event, event_details, 5690 callback_arg); 5691 5692 hca_port = (ibdm_port_attr_t *)callback_arg; 5693 5694 /* Check if the port_attr is still valid */ 5695 mutex_enter(&ibdm.ibdm_hl_mutex); 5696 for (ibdm_get_next_port(&hca_list, &port, 0); port; 5697 ibdm_get_next_port(&hca_list, &port, 0)) { 5698 if (port == hca_port && port->pa_port_guid == 5699 hca_port->pa_port_guid) { 5700 if (ibmf_saa_handle == hca_port->pa_sa_hdl) 5701 sa_handle_valid = 1; 5702 break; 5703 } 5704 } 5705 mutex_exit(&ibdm.ibdm_hl_mutex); 5706 if (sa_handle_valid == 0) { 5707 ibdm_free_saa_event_arg(event_arg); 5708 return; 5709 } 5710 5711 if (hca_port && (hca_port->pa_sa_hdl == NULL || 5712 ibmf_saa_handle != hca_port->pa_sa_hdl)) { 5713 ibdm_free_saa_event_arg(event_arg); 5714 return; 5715 } 5716 hca_list = NULL; 5717 port = NULL; 5718 5719 /* 5720 * Check if the GID is visible to other HCA ports. 5721 * Return if so. 5722 */ 5723 mutex_enter(&ibdm.ibdm_hl_mutex); 5724 for (ibdm_get_next_port(&hca_list, &port, 1); port; 5725 ibdm_get_next_port(&hca_list, &port, 1)) { 5726 if (ibdm_port_reachable(port->pa_sa_hdl, 5727 event_details->ie_gid.gid_guid) == B_TRUE) { 5728 mutex_exit(&ibdm.ibdm_hl_mutex); 5729 ibdm_free_saa_event_arg(event_arg); 5730 return; 5731 } 5732 } 5733 mutex_exit(&ibdm.ibdm_hl_mutex); 5734 5735 /* 5736 * Ensure no other probe / sweep fabric is in 5737 * progress. 5738 */ 5739 mutex_enter(&ibdm.ibdm_mutex); 5740 while (ibdm.ibdm_busy & IBDM_BUSY) 5741 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5742 ibdm.ibdm_busy |= IBDM_BUSY; 5743 mutex_exit(&ibdm.ibdm_mutex); 5744 5745 /* 5746 * If this GID is no longer in GID list, return 5747 * GID_UNAVAILABLE may be reported for multiple HCA 5748 * ports. 5749 */ 5750 mutex_enter(&ibdm.ibdm_mutex); 5751 gid_info = ibdm.ibdm_dp_gidlist_head; 5752 while (gid_info) { 5753 if (gid_info->gl_portguid == 5754 event_details->ie_gid.gid_guid) { 5755 break; 5756 } 5757 gid_info = gid_info->gl_next; 5758 } 5759 mutex_exit(&ibdm.ibdm_mutex); 5760 if (gid_info == NULL) { 5761 mutex_enter(&ibdm.ibdm_mutex); 5762 ibdm.ibdm_busy &= ~IBDM_BUSY; 5763 cv_broadcast(&ibdm.ibdm_busy_cv); 5764 mutex_exit(&ibdm.ibdm_mutex); 5765 ibdm_free_saa_event_arg(event_arg); 5766 return; 5767 } 5768 5769 IBTF_DPRINTF_L4("ibdm", "\tGID (prefix %x, guid %llx) " 5770 "Unavailable notification", 5771 event_details->ie_gid.gid_prefix, 5772 event_details->ie_gid.gid_guid); 5773 5774 /* 5775 * Update GID list in all IOCs affected by this 5776 */ 5777 if (gid_info->gl_state == IBDM_GID_PROBING_SKIPPED || 5778 gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) 5779 ioc_list = ibdm_update_ioc_gidlist(gid_info, 0); 5780 5781 /* 5782 * Remove GID from the global GID list 5783 * Handle the case where all port GIDs for an 5784 * IOU have been hot-removed. Check both gid_info 5785 * & ioc_info for checking ngids. 5786 */ 5787 mutex_enter(&ibdm.ibdm_mutex); 5788 if (gid_info->gl_iou != NULL && gid_info->gl_ngids == 0) { 5789 mutex_enter(&gid_info->gl_mutex); 5790 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou); 5791 mutex_exit(&gid_info->gl_mutex); 5792 } 5793 if (gid_info->gl_prev != NULL) 5794 gid_info->gl_prev->gl_next = gid_info->gl_next; 5795 if (gid_info->gl_next != NULL) 5796 gid_info->gl_next->gl_prev = gid_info->gl_prev; 5797 5798 if (gid_info == ibdm.ibdm_dp_gidlist_head) 5799 ibdm.ibdm_dp_gidlist_head = gid_info->gl_next; 5800 if (gid_info == ibdm.ibdm_dp_gidlist_tail) 5801 ibdm.ibdm_dp_gidlist_tail = gid_info->gl_prev; 5802 ibdm.ibdm_ngids--; 5803 5804 ibdm.ibdm_busy &= ~IBDM_BUSY; 5805 cv_broadcast(&ibdm.ibdm_busy_cv); 5806 mutex_exit(&ibdm.ibdm_mutex); 5807 5808 /* free the hca_list on this gid_info */ 5809 ibdm_delete_glhca_list(gid_info); 5810 5811 mutex_destroy(&gid_info->gl_mutex); 5812 kmem_free(gid_info, sizeof (ibdm_dp_gidinfo_t)); 5813 5814 /* 5815 * Pass on the IOCs with updated GIDs to IBnexus 5816 */ 5817 if (ioc_list) { 5818 IBTF_DPRINTF_L4("ibdm", "\tGID_UNAVAILABLE " 5819 "IOC_PROP_UPDATE for %p\n", ioc_list); 5820 mutex_enter(&ibdm.ibdm_ibnex_mutex); 5821 if (ibdm.ibdm_ibnex_callback != NULL) { 5822 (*ibdm.ibdm_ibnex_callback)((void *) 5823 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 5824 } 5825 mutex_exit(&ibdm.ibdm_ibnex_mutex); 5826 } 5827 5828 ibdm_free_saa_event_arg(event_arg); 5829 } 5830 5831 5832 static int 5833 ibdm_cmp_gid_list(ibdm_gid_t *new, ibdm_gid_t *prev) 5834 { 5835 ibdm_gid_t *scan_new, *scan_prev; 5836 int cmp_failed = 0; 5837 5838 ASSERT(new != NULL); 5839 ASSERT(prev != NULL); 5840 5841 /* 5842 * Search for each new gid anywhere in the prev GID list. 5843 * Note that the gid list could have been re-ordered. 5844 */ 5845 for (scan_new = new; scan_new; scan_new = scan_new->gid_next) { 5846 for (scan_prev = prev, cmp_failed = 1; scan_prev; 5847 scan_prev = scan_prev->gid_next) { 5848 if (scan_prev->gid_dgid_hi == scan_new->gid_dgid_hi && 5849 scan_prev->gid_dgid_lo == scan_new->gid_dgid_lo) { 5850 cmp_failed = 0; 5851 break; 5852 } 5853 } 5854 5855 if (cmp_failed) 5856 return (1); 5857 } 5858 return (0); 5859 } 5860 5861 /* 5862 * This is always called in a single thread 5863 * This function updates the gid_list and serv_list of IOC 5864 * The current gid_list is in ioc_info_t(contains only port 5865 * guids for which probe is done) & gidinfo_t(other port gids) 5866 * The gids in both locations are used for comparision. 5867 */ 5868 static void 5869 ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *ioc, ibdm_dp_gidinfo_t *gidinfo) 5870 { 5871 ibdm_gid_t *cur_gid_list; 5872 uint_t cur_nportgids; 5873 5874 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 5875 5876 ioc->ioc_info_updated.ib_prop_updated = 0; 5877 5878 5879 /* Current GID list in gid_info only */ 5880 cur_gid_list = gidinfo->gl_gid; 5881 cur_nportgids = gidinfo->gl_ngids; 5882 5883 if (ioc->ioc_prev_serv_cnt != 5884 ioc->ioc_profile.ioc_service_entries || 5885 ibdm_serv_cmp(&ioc->ioc_serv[0], &ioc->ioc_prev_serv[0], 5886 ioc->ioc_prev_serv_cnt)) 5887 ioc->ioc_info_updated.ib_srv_prop_updated = 1; 5888 5889 if (ioc->ioc_prev_nportgids != cur_nportgids || 5890 ioc->ioc_prev_gid_list == NULL || cur_gid_list == NULL) { 5891 ioc->ioc_info_updated.ib_gid_prop_updated = 1; 5892 } else if (ibdm_cmp_gid_list(ioc->ioc_prev_gid_list, cur_gid_list)) { 5893 ioc->ioc_info_updated.ib_gid_prop_updated = 1; 5894 } 5895 5896 /* Zero out previous entries */ 5897 ibdm_free_gid_list(ioc->ioc_prev_gid_list); 5898 if (ioc->ioc_prev_serv) 5899 kmem_free(ioc->ioc_prev_serv, ioc->ioc_prev_serv_cnt * 5900 sizeof (ibdm_srvents_info_t)); 5901 ioc->ioc_prev_serv_cnt = 0; 5902 ioc->ioc_prev_nportgids = 0; 5903 ioc->ioc_prev_serv = NULL; 5904 ioc->ioc_prev_gid_list = NULL; 5905 } 5906 5907 /* 5908 * Handle GID removal. This returns gid_info of an GID for the same 5909 * node GUID, if found. For an GID with IOU information, the same 5910 * gid_info is returned if no gid_info with same node_guid is found. 5911 */ 5912 static ibdm_dp_gidinfo_t * 5913 ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *rm_gid) 5914 { 5915 ibdm_dp_gidinfo_t *gid_list; 5916 5917 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm(0x%p)", rm_gid); 5918 5919 if (rm_gid->gl_iou == NULL) { 5920 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm NO iou"); 5921 /* 5922 * Search for a GID with same node_guid and 5923 * gl_iou != NULL 5924 */ 5925 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 5926 gid_list = gid_list->gl_next) { 5927 if (gid_list->gl_iou != NULL && (gid_list->gl_nodeguid 5928 == rm_gid->gl_nodeguid)) 5929 break; 5930 } 5931 5932 if (gid_list) 5933 ibdm_rmfrom_glgid_list(gid_list, rm_gid); 5934 5935 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm ret %p", gid_list); 5936 return (gid_list); 5937 } else { 5938 /* 5939 * Search for a GID with same node_guid and 5940 * gl_iou == NULL 5941 */ 5942 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm with iou"); 5943 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 5944 gid_list = gid_list->gl_next) { 5945 if (gid_list->gl_iou == NULL && (gid_list->gl_nodeguid 5946 == rm_gid->gl_nodeguid)) 5947 break; 5948 } 5949 5950 if (gid_list) { 5951 /* 5952 * Copy the following fields from rm_gid : 5953 * 1. gl_state 5954 * 2. gl_iou 5955 * 3. gl_gid & gl_ngids 5956 * 5957 * Note : Function is synchronized by 5958 * ibdm_busy flag. 5959 * 5960 * Note : Redirect info is initialized if 5961 * any MADs for the GID fail 5962 */ 5963 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm " 5964 "copying info to GID with gl_iou != NULl"); 5965 gid_list->gl_state = rm_gid->gl_state; 5966 gid_list->gl_iou = rm_gid->gl_iou; 5967 gid_list->gl_gid = rm_gid->gl_gid; 5968 gid_list->gl_ngids = rm_gid->gl_ngids; 5969 5970 /* Remove the GID from gl_gid list */ 5971 ibdm_rmfrom_glgid_list(gid_list, rm_gid); 5972 } else { 5973 /* 5974 * Handle a case where all GIDs to the IOU have 5975 * been removed. 5976 */ 5977 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm 0 GID " 5978 "to IOU"); 5979 5980 ibdm_rmfrom_glgid_list(rm_gid, rm_gid); 5981 return (rm_gid); 5982 } 5983 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm ret %p", gid_list); 5984 return (gid_list); 5985 } 5986 } 5987 5988 static void 5989 ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *gid_info, 5990 ibdm_dp_gidinfo_t *rm_gid) 5991 { 5992 ibdm_gid_t *tmp, *prev; 5993 5994 IBTF_DPRINTF_L4("ibdm", "\trmfrom_glgid (%p, %p)", 5995 gid_info, rm_gid); 5996 5997 for (tmp = gid_info->gl_gid, prev = NULL; tmp; ) { 5998 if (tmp->gid_dgid_hi == rm_gid->gl_dgid_hi && 5999 tmp->gid_dgid_lo == rm_gid->gl_dgid_lo) { 6000 if (prev == NULL) 6001 gid_info->gl_gid = tmp->gid_next; 6002 else 6003 prev->gid_next = tmp->gid_next; 6004 6005 kmem_free(tmp, sizeof (ibdm_gid_t)); 6006 gid_info->gl_ngids--; 6007 break; 6008 } else { 6009 prev = tmp; 6010 tmp = tmp->gid_next; 6011 } 6012 } 6013 } 6014 6015 static void 6016 ibdm_addto_gidlist(ibdm_gid_t **src_ptr, ibdm_gid_t *dest) 6017 { 6018 ibdm_gid_t *head = NULL, *new, *tail; 6019 6020 /* First copy the destination */ 6021 for (; dest; dest = dest->gid_next) { 6022 new = kmem_zalloc(sizeof (ibdm_gid_t), KM_SLEEP); 6023 new->gid_dgid_hi = dest->gid_dgid_hi; 6024 new->gid_dgid_lo = dest->gid_dgid_lo; 6025 new->gid_next = head; 6026 head = new; 6027 } 6028 6029 /* Insert this to the source */ 6030 if (*src_ptr == NULL) 6031 *src_ptr = head; 6032 else { 6033 for (tail = *src_ptr; tail->gid_next != NULL; 6034 tail = tail->gid_next) 6035 ; 6036 6037 tail->gid_next = head; 6038 } 6039 } 6040 6041 static void 6042 ibdm_free_gid_list(ibdm_gid_t *head) 6043 { 6044 ibdm_gid_t *delete; 6045 6046 for (delete = head; delete; ) { 6047 head = delete->gid_next; 6048 kmem_free(delete, sizeof (ibdm_gid_t)); 6049 delete = head; 6050 } 6051 } 6052 6053 /* 6054 * This function rescans the DM capable GIDs (gl_state is 6055 * GID_PROBE_COMPLETE or IBDM_GID_PROBING_SKIPPED.This 6056 * basically checks if the DM capable GID is reachable. If 6057 * not this is handled the same way as GID_UNAVAILABLE, 6058 * except that notifications are not send to IBnexus. 6059 * 6060 * This function also initializes the ioc_prev_list for 6061 * a particular IOC (when called from probe_ioc, with 6062 * ioc_guidp != NULL) or all IOCs for the gid (called from 6063 * sweep_fabric, ioc_guidp == NULL). 6064 */ 6065 static void 6066 ibdm_rescan_gidlist(ib_guid_t *ioc_guidp) 6067 { 6068 ibdm_dp_gidinfo_t *gid_info, *tmp; 6069 int ii, niocs, found; 6070 ibdm_hca_list_t *hca_list = NULL; 6071 ibdm_port_attr_t *port = NULL; 6072 ibdm_ioc_info_t *ioc_list; 6073 6074 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; ) { 6075 found = 0; 6076 if (gid_info->gl_state != IBDM_GID_PROBING_SKIPPED && 6077 gid_info->gl_state != IBDM_GID_PROBING_COMPLETE) { 6078 gid_info = gid_info->gl_next; 6079 continue; 6080 } 6081 6082 /* 6083 * Check if the GID is visible to any HCA ports. 6084 * Return if so. 6085 */ 6086 mutex_enter(&ibdm.ibdm_hl_mutex); 6087 for (ibdm_get_next_port(&hca_list, &port, 1); port; 6088 ibdm_get_next_port(&hca_list, &port, 1)) { 6089 if (ibdm_port_reachable(port->pa_sa_hdl, 6090 gid_info->gl_dgid_lo) == B_TRUE) { 6091 found = 1; 6092 break; 6093 } 6094 } 6095 mutex_exit(&ibdm.ibdm_hl_mutex); 6096 6097 if (found) { 6098 if (gid_info->gl_iou == NULL) { 6099 gid_info = gid_info->gl_next; 6100 continue; 6101 } 6102 6103 /* Intialize the ioc_prev_gid_list */ 6104 niocs = 6105 gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 6106 for (ii = 0; ii < niocs; ii++) { 6107 ioc_list = IBDM_GIDINFO2IOCINFO(gid_info, ii); 6108 6109 if (ioc_guidp == NULL || (*ioc_guidp == 6110 ioc_list->ioc_profile.ioc_guid)) { 6111 /* Add info of GIDs in gid_info also */ 6112 ibdm_addto_gidlist( 6113 &ioc_list->ioc_prev_gid_list, 6114 gid_info->gl_gid); 6115 ioc_list->ioc_prev_nportgids = 6116 gid_info->gl_ngids; 6117 } 6118 } 6119 gid_info = gid_info->gl_next; 6120 continue; 6121 } 6122 6123 IBTF_DPRINTF_L4("ibdm", "\trescan_gidlist " 6124 "deleted port GUID %llx", 6125 gid_info->gl_dgid_lo); 6126 6127 /* 6128 * Update GID list in all IOCs affected by this 6129 */ 6130 ioc_list = ibdm_update_ioc_gidlist(gid_info, 0); 6131 6132 /* 6133 * Remove GID from the global GID list 6134 * Handle the case where all port GIDs for an 6135 * IOU have been hot-removed. 6136 */ 6137 mutex_enter(&ibdm.ibdm_mutex); 6138 if (gid_info->gl_iou != NULL && gid_info->gl_ngids == 0) { 6139 mutex_enter(&gid_info->gl_mutex); 6140 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou); 6141 mutex_exit(&gid_info->gl_mutex); 6142 } 6143 6144 tmp = gid_info->gl_next; 6145 if (gid_info->gl_prev != NULL) 6146 gid_info->gl_prev->gl_next = gid_info->gl_next; 6147 if (gid_info->gl_next != NULL) 6148 gid_info->gl_next->gl_prev = gid_info->gl_prev; 6149 6150 if (gid_info == ibdm.ibdm_dp_gidlist_head) 6151 ibdm.ibdm_dp_gidlist_head = gid_info->gl_next; 6152 if (gid_info == ibdm.ibdm_dp_gidlist_tail) 6153 ibdm.ibdm_dp_gidlist_tail = gid_info->gl_prev; 6154 ibdm.ibdm_ngids--; 6155 mutex_exit(&ibdm.ibdm_mutex); 6156 6157 /* free the hca_list on this gid_info */ 6158 ibdm_delete_glhca_list(gid_info); 6159 6160 mutex_destroy(&gid_info->gl_mutex); 6161 kmem_free(gid_info, sizeof (ibdm_dp_gidinfo_t)); 6162 6163 gid_info = tmp; 6164 6165 /* 6166 * Pass on the IOCs with updated GIDs to IBnexus 6167 */ 6168 if (ioc_list) { 6169 IBTF_DPRINTF_L4("ibdm", "\trescan_gidlist " 6170 "IOC_PROP_UPDATE for %p\n", ioc_list); 6171 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6172 if (ibdm.ibdm_ibnex_callback != NULL) { 6173 (*ibdm.ibdm_ibnex_callback)((void *) 6174 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 6175 } 6176 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6177 } 6178 } 6179 } 6180 6181 /* 6182 * This function notifies IBnex of IOCs on this GID. 6183 * Notification is for GIDs with gl_reprobe_flag set. 6184 * The flag is set when IOC probe / fabric sweep 6185 * probes a GID starting from CLASS port info. 6186 * 6187 * IBnexus will have information of a reconnected IOC 6188 * if it had probed it before. If this is a new IOC, 6189 * IBnexus ignores the notification. 6190 * 6191 * This function should be called with no locks held. 6192 */ 6193 static void 6194 ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *gid_info) 6195 { 6196 ibdm_ioc_info_t *ioc_list; 6197 6198 if (gid_info->gl_reprobe_flag == 0 || 6199 gid_info->gl_iou == NULL) 6200 return; 6201 6202 ioc_list = ibdm_update_ioc_gidlist(gid_info, -1); 6203 6204 /* 6205 * Pass on the IOCs with updated GIDs to IBnexus 6206 */ 6207 if (ioc_list) { 6208 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6209 if (ibdm.ibdm_ibnex_callback != NULL) { 6210 (*ibdm.ibdm_ibnex_callback)((void *)ioc_list, 6211 IBDM_EVENT_IOC_PROP_UPDATE); 6212 } 6213 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6214 } 6215 } 6216 6217 6218 static void 6219 ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *arg) 6220 { 6221 if (arg != NULL) 6222 kmem_free(arg, sizeof (ibdm_saa_event_arg_t)); 6223 } 6224 6225 /* 6226 * This function parses the list of HCAs and HCA ports 6227 * to return the port_attr of the next HCA port. A port 6228 * connected to IB fabric (port_state active) is returned, 6229 * if connected_flag is set. 6230 */ 6231 static void 6232 ibdm_get_next_port(ibdm_hca_list_t **inp_hcap, 6233 ibdm_port_attr_t **inp_portp, int connect_flag) 6234 { 6235 int ii; 6236 ibdm_port_attr_t *port, *next_port = NULL; 6237 ibdm_port_attr_t *inp_port; 6238 ibdm_hca_list_t *hca_list; 6239 int found = 0; 6240 6241 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 6242 IBTF_DPRINTF_L4(ibdm_string, "\tget_next_port(%p, %p, %x)", 6243 inp_hcap, inp_portp, connect_flag); 6244 6245 hca_list = *inp_hcap; 6246 inp_port = *inp_portp; 6247 6248 if (hca_list == NULL) 6249 hca_list = ibdm.ibdm_hca_list_head; 6250 6251 for (; hca_list; hca_list = hca_list->hl_next) { 6252 for (ii = 0; ii < hca_list->hl_nports; ii++) { 6253 port = &hca_list->hl_port_attr[ii]; 6254 6255 /* 6256 * inp_port != NULL; 6257 * Skip till we find the matching port 6258 */ 6259 if (inp_port && !found) { 6260 if (inp_port == port) 6261 found = 1; 6262 continue; 6263 } 6264 6265 if (!connect_flag) { 6266 next_port = port; 6267 break; 6268 } 6269 6270 if (port->pa_sa_hdl == NULL) 6271 ibdm_initialize_port(port); 6272 if (port->pa_sa_hdl == NULL) 6273 (void) ibdm_fini_port(port); 6274 else if (next_port == NULL && 6275 port->pa_sa_hdl != NULL && 6276 port->pa_state == IBT_PORT_ACTIVE) { 6277 next_port = port; 6278 break; 6279 } 6280 } 6281 6282 if (next_port) 6283 break; 6284 } 6285 6286 IBTF_DPRINTF_L4(ibdm_string, "\tget_next_port : " 6287 "returns hca_list %p port %p", hca_list, next_port); 6288 *inp_hcap = hca_list; 6289 *inp_portp = next_port; 6290 } 6291 6292 static void 6293 ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *nodegid, ibdm_dp_gidinfo_t *addgid) 6294 { 6295 ibdm_gid_t *tmp; 6296 6297 tmp = kmem_zalloc(sizeof (ibdm_gid_t), KM_SLEEP); 6298 tmp->gid_dgid_hi = addgid->gl_dgid_hi; 6299 tmp->gid_dgid_lo = addgid->gl_dgid_lo; 6300 6301 mutex_enter(&nodegid->gl_mutex); 6302 tmp->gid_next = nodegid->gl_gid; 6303 nodegid->gl_gid = tmp; 6304 nodegid->gl_ngids++; 6305 mutex_exit(&nodegid->gl_mutex); 6306 } 6307 6308 static void 6309 ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *gid_info, 6310 ibdm_hca_list_t *hca) 6311 { 6312 ibdm_hca_list_t *head, *prev = NULL, *temp; 6313 6314 IBTF_DPRINTF_L4(ibdm_string, "\taddto_glhcalist(%p, %p) " 6315 ": gl_hca_list %p", gid_info, hca, gid_info->gl_hca_list); 6316 ASSERT(!MUTEX_HELD(&gid_info->gl_mutex)); 6317 6318 mutex_enter(&gid_info->gl_mutex); 6319 head = gid_info->gl_hca_list; 6320 if (head == NULL) { 6321 head = ibdm_dup_hca_attr(hca); 6322 head->hl_next = NULL; 6323 gid_info->gl_hca_list = head; 6324 mutex_exit(&gid_info->gl_mutex); 6325 IBTF_DPRINTF_L4(ibdm_string, "\tadd_to_glhcalist: " 6326 "gid %p, gl_hca_list %p", gid_info, 6327 gid_info->gl_hca_list); 6328 return; 6329 } 6330 6331 /* Check if already in the list */ 6332 while (head) { 6333 if (head->hl_hca_guid == hca->hl_hca_guid) { 6334 mutex_exit(&gid_info->gl_mutex); 6335 IBTF_DPRINTF_L4(ibdm_string, 6336 "\taddto_glhcalist : gid %p hca %p dup", 6337 gid_info, hca); 6338 return; 6339 } 6340 prev = head; 6341 head = head->hl_next; 6342 } 6343 6344 /* Add this HCA to gl_hca_list */ 6345 temp = ibdm_dup_hca_attr(hca); 6346 temp->hl_next = NULL; 6347 prev->hl_next = temp; 6348 mutex_exit(&gid_info->gl_mutex); 6349 6350 IBTF_DPRINTF_L4(ibdm_string, "\tadd_to_glhcalist: " 6351 "gid %p, gl_hca_list %p", gid_info, gid_info->gl_hca_list); 6352 } 6353 6354 static void 6355 ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *gid_info) 6356 { 6357 ASSERT(!MUTEX_HELD(&gid_info->gl_mutex)); 6358 ASSERT(!MUTEX_HELD(&ibdm.ibdm_mutex)); 6359 6360 mutex_enter(&gid_info->gl_mutex); 6361 if (gid_info->gl_hca_list) 6362 ibdm_ibnex_free_hca_list(gid_info->gl_hca_list); 6363 gid_info->gl_hca_list = NULL; 6364 mutex_exit(&gid_info->gl_mutex); 6365 } 6366 6367 6368 static void 6369 ibdm_reset_all_dgids(ibmf_saa_handle_t port_sa_hdl) 6370 { 6371 IBTF_DPRINTF_L4(ibdm_string, "\treset_all_dgids(%X)", 6372 port_sa_hdl); 6373 6374 ASSERT(!MUTEX_HELD(&ibdm.ibdm_mutex)); 6375 ASSERT(!MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 6376 6377 /* Check : Not busy in another probe / sweep */ 6378 mutex_enter(&ibdm.ibdm_mutex); 6379 if ((ibdm.ibdm_busy & IBDM_BUSY) == 0) { 6380 ibdm_dp_gidinfo_t *gid_info; 6381 6382 ibdm.ibdm_busy |= IBDM_BUSY; 6383 mutex_exit(&ibdm.ibdm_mutex); 6384 6385 /* 6386 * Check if any GID is using the SA & IBMF handle 6387 * of HCA port going down. Reset ibdm_dp_gidinfo_t 6388 * using another HCA port which can reach the GID. 6389 * This is for DM capable GIDs only, no need to do 6390 * this for others 6391 * 6392 * Delete the GID if no alternate HCA port to reach 6393 * it is found. 6394 */ 6395 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; ) { 6396 ibdm_dp_gidinfo_t *tmp; 6397 6398 IBTF_DPRINTF_L4(ibdm_string, "\tevent_hdlr " 6399 "checking gidinfo %p", gid_info); 6400 6401 if (gid_info->gl_sa_hdl == port_sa_hdl) { 6402 IBTF_DPRINTF_L3(ibdm_string, 6403 "\tevent_hdlr: down HCA port hdl " 6404 "matches gid %p", gid_info); 6405 6406 /* 6407 * The non-DM GIDs can come back 6408 * with a new subnet prefix, when 6409 * the HCA port commes up again. To 6410 * avoid issues, delete non-DM 6411 * capable GIDs, if the gid was 6412 * discovered using the HCA port 6413 * going down. This is ensured by 6414 * setting gl_disconnected to 1. 6415 */ 6416 if (gid_info->gl_nodeguid == 0) 6417 gid_info->gl_disconnected = 1; 6418 else 6419 ibdm_reset_gidinfo(gid_info); 6420 6421 if (gid_info->gl_disconnected) { 6422 IBTF_DPRINTF_L3(ibdm_string, 6423 "\tevent_hdlr: deleting" 6424 " gid %p", gid_info); 6425 tmp = gid_info; 6426 gid_info = gid_info->gl_next; 6427 ibdm_delete_gidinfo(tmp); 6428 } else 6429 gid_info = gid_info->gl_next; 6430 } else 6431 gid_info = gid_info->gl_next; 6432 } 6433 6434 mutex_enter(&ibdm.ibdm_mutex); 6435 ibdm.ibdm_busy &= ~IBDM_BUSY; 6436 cv_signal(&ibdm.ibdm_busy_cv); 6437 } 6438 mutex_exit(&ibdm.ibdm_mutex); 6439 } 6440 6441 static void 6442 ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *gidinfo) 6443 { 6444 ibdm_hca_list_t *hca_list = NULL; 6445 ibdm_port_attr_t *port = NULL; 6446 int gid_reinited = 0; 6447 sa_node_record_t *nr, *tmp; 6448 sa_portinfo_record_t *pi; 6449 size_t nr_len = 0, pi_len = 0; 6450 size_t path_len; 6451 ib_gid_t sgid, dgid; 6452 int ret, ii, nrecords; 6453 sa_path_record_t *path; 6454 uint8_t npaths = 1; 6455 ibdm_pkey_tbl_t *pkey_tbl; 6456 6457 IBTF_DPRINTF_L4(ibdm_string, "\treset_gidinfo(%p)", gidinfo); 6458 6459 /* 6460 * Get list of all the ports reachable from the local known HCA 6461 * ports which are active 6462 */ 6463 mutex_enter(&ibdm.ibdm_hl_mutex); 6464 for (ibdm_get_next_port(&hca_list, &port, 1); port; 6465 ibdm_get_next_port(&hca_list, &port, 1)) { 6466 6467 6468 /* 6469 * Get the path and re-populate the gidinfo. 6470 * Getting the path is the same probe_ioc 6471 * Init the gid info as in ibdm_create_gidinfo() 6472 */ 6473 nr = ibdm_get_node_records(port->pa_sa_hdl, &nr_len, 6474 gidinfo->gl_nodeguid); 6475 if (nr == NULL) { 6476 IBTF_DPRINTF_L4(ibdm_string, 6477 "\treset_gidinfo : no records"); 6478 continue; 6479 } 6480 6481 nrecords = (nr_len / sizeof (sa_node_record_t)); 6482 for (tmp = nr, ii = 0; (ii < nrecords); ii++, tmp++) { 6483 if (tmp->NodeInfo.PortGUID == gidinfo->gl_portguid) 6484 break; 6485 } 6486 6487 if (ii == nrecords) { 6488 IBTF_DPRINTF_L4(ibdm_string, 6489 "\treset_gidinfo : no record for portguid"); 6490 kmem_free(nr, nr_len); 6491 continue; 6492 } 6493 6494 pi = ibdm_get_portinfo(port->pa_sa_hdl, &pi_len, tmp->LID); 6495 if (pi == NULL) { 6496 IBTF_DPRINTF_L4(ibdm_string, 6497 "\treset_gidinfo : no portinfo"); 6498 kmem_free(nr, nr_len); 6499 continue; 6500 } 6501 6502 sgid.gid_prefix = port->pa_sn_prefix; 6503 sgid.gid_guid = port->pa_port_guid; 6504 dgid.gid_prefix = pi->PortInfo.GidPrefix; 6505 dgid.gid_guid = tmp->NodeInfo.PortGUID; 6506 6507 ret = ibmf_saa_gid_to_pathrecords(port->pa_sa_hdl, sgid, dgid, 6508 IBMF_SAA_PKEY_WC, 0, B_TRUE, &npaths, 0, &path_len, &path); 6509 6510 if ((ret != IBMF_SUCCESS) || path == NULL) { 6511 IBTF_DPRINTF_L4(ibdm_string, 6512 "\treset_gidinfo : no paths"); 6513 kmem_free(pi, pi_len); 6514 kmem_free(nr, nr_len); 6515 continue; 6516 } 6517 6518 gidinfo->gl_dgid_hi = path->DGID.gid_prefix; 6519 gidinfo->gl_dgid_lo = path->DGID.gid_guid; 6520 gidinfo->gl_sgid_hi = path->SGID.gid_prefix; 6521 gidinfo->gl_sgid_lo = path->SGID.gid_guid; 6522 gidinfo->gl_p_key = path->P_Key; 6523 gidinfo->gl_sa_hdl = port->pa_sa_hdl; 6524 gidinfo->gl_ibmf_hdl = port->pa_ibmf_hdl; 6525 gidinfo->gl_slid = path->SLID; 6526 gidinfo->gl_dlid = path->DLID; 6527 /* Reset redirect info, next MAD will set if redirected */ 6528 gidinfo->gl_redirected = 0; 6529 gidinfo->gl_devid = (*tmp).NodeInfo.DeviceID; 6530 6531 gidinfo->gl_qp_hdl = IBMF_QP_HANDLE_DEFAULT; 6532 for (ii = 0; ii < port->pa_npkeys; ii++) { 6533 if (port->pa_pkey_tbl == NULL) 6534 break; 6535 6536 pkey_tbl = &port->pa_pkey_tbl[ii]; 6537 if ((gidinfo->gl_p_key == pkey_tbl->pt_pkey) && 6538 (pkey_tbl->pt_qp_hdl != NULL)) { 6539 gidinfo->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 6540 break; 6541 } 6542 } 6543 6544 if (gidinfo->gl_qp_hdl == NULL) 6545 IBTF_DPRINTF_L2(ibdm_string, 6546 "\treset_gid_info: No matching Pkey"); 6547 else 6548 gid_reinited = 1; 6549 6550 kmem_free(path, path_len); 6551 kmem_free(pi, pi_len); 6552 kmem_free(nr, nr_len); 6553 break; 6554 } 6555 mutex_exit(&ibdm.ibdm_hl_mutex); 6556 6557 if (!gid_reinited) 6558 gidinfo->gl_disconnected = 1; 6559 } 6560 6561 static void 6562 ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *gidinfo) 6563 { 6564 ibdm_ioc_info_t *ioc_list; 6565 int in_gidlist = 0; 6566 6567 /* 6568 * Check if gidinfo has been inserted into the 6569 * ibdm_dp_gidlist_head list. gl_next or gl_prev 6570 * != NULL, if gidinfo is the list. 6571 */ 6572 if (gidinfo->gl_prev != NULL || 6573 gidinfo->gl_next != NULL || 6574 ibdm.ibdm_dp_gidlist_head == gidinfo) 6575 in_gidlist = 1; 6576 6577 ioc_list = ibdm_update_ioc_gidlist(gidinfo, 0); 6578 6579 /* 6580 * Remove GID from the global GID list 6581 * Handle the case where all port GIDs for an 6582 * IOU have been hot-removed. 6583 */ 6584 mutex_enter(&ibdm.ibdm_mutex); 6585 if (gidinfo->gl_iou != NULL && gidinfo->gl_ngids == 0) { 6586 mutex_enter(&gidinfo->gl_mutex); 6587 (void) ibdm_free_iou_info(gidinfo, &gidinfo->gl_iou); 6588 mutex_exit(&gidinfo->gl_mutex); 6589 } 6590 6591 /* Delete gl_hca_list */ 6592 mutex_exit(&ibdm.ibdm_mutex); 6593 ibdm_delete_glhca_list(gidinfo); 6594 mutex_enter(&ibdm.ibdm_mutex); 6595 6596 if (in_gidlist) { 6597 if (gidinfo->gl_prev != NULL) 6598 gidinfo->gl_prev->gl_next = gidinfo->gl_next; 6599 if (gidinfo->gl_next != NULL) 6600 gidinfo->gl_next->gl_prev = gidinfo->gl_prev; 6601 6602 if (gidinfo == ibdm.ibdm_dp_gidlist_head) 6603 ibdm.ibdm_dp_gidlist_head = gidinfo->gl_next; 6604 if (gidinfo == ibdm.ibdm_dp_gidlist_tail) 6605 ibdm.ibdm_dp_gidlist_tail = gidinfo->gl_prev; 6606 ibdm.ibdm_ngids--; 6607 } 6608 mutex_exit(&ibdm.ibdm_mutex); 6609 6610 mutex_destroy(&gidinfo->gl_mutex); 6611 cv_destroy(&gidinfo->gl_probe_cv); 6612 kmem_free(gidinfo, sizeof (ibdm_dp_gidinfo_t)); 6613 6614 /* 6615 * Pass on the IOCs with updated GIDs to IBnexus 6616 */ 6617 if (ioc_list) { 6618 IBTF_DPRINTF_L4("ibdm", "\tdelete_gidinfo " 6619 "IOC_PROP_UPDATE for %p\n", ioc_list); 6620 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6621 if (ibdm.ibdm_ibnex_callback != NULL) { 6622 (*ibdm.ibdm_ibnex_callback)((void *) 6623 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 6624 } 6625 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6626 } 6627 } 6628 6629 6630 static void 6631 ibdm_fill_srv_attr_mod(ib_mad_hdr_t *hdr, ibdm_timeout_cb_args_t *cb_args) 6632 { 6633 uint32_t attr_mod; 6634 6635 attr_mod = (cb_args->cb_ioc_num + 1) << 16; 6636 attr_mod |= cb_args->cb_srvents_start; 6637 attr_mod |= (cb_args->cb_srvents_end) << 8; 6638 hdr->AttributeModifier = h2b32(attr_mod); 6639 } 6640 6641 static void 6642 ibdm_bump_transactionID(ibdm_dp_gidinfo_t *gid_info) 6643 { 6644 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 6645 gid_info->gl_transactionID++; 6646 if (gid_info->gl_transactionID == gid_info->gl_max_transactionID) { 6647 IBTF_DPRINTF_L4(ibdm_string, 6648 "\tbump_transactionID(%p), wrapup", gid_info); 6649 gid_info->gl_transactionID = gid_info->gl_min_transactionID; 6650 } 6651 } 6652 6653 /* 6654 * gl_prev_iou is set for *non-reprobe* sweeep requests, which 6655 * detected that ChangeID in IOU info has changed. The service 6656 * entry also may have changed. Check if service entry in IOC 6657 * has changed wrt the prev iou, if so notify to IB Nexus. 6658 */ 6659 static ibdm_ioc_info_t * 6660 ibdm_handle_prev_iou() 6661 { 6662 ibdm_dp_gidinfo_t *gid_info; 6663 ibdm_ioc_info_t *ioc_list_head = NULL, *ioc_list; 6664 ibdm_ioc_info_t *prev_ioc, *ioc; 6665 int ii, jj, niocs, prev_niocs; 6666 6667 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 6668 6669 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iou enter"); 6670 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 6671 gid_info = gid_info->gl_next) { 6672 if (gid_info->gl_prev_iou == NULL) 6673 continue; 6674 6675 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iou gid %p", 6676 gid_info); 6677 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 6678 prev_niocs = 6679 gid_info->gl_prev_iou->iou_info.iou_num_ctrl_slots; 6680 for (ii = 0; ii < niocs; ii++) { 6681 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 6682 6683 /* Find matching IOC */ 6684 for (jj = 0; jj < prev_niocs; jj++) { 6685 prev_ioc = (ibdm_ioc_info_t *) 6686 &gid_info->gl_prev_iou->iou_ioc_info[jj]; 6687 if (prev_ioc->ioc_profile.ioc_guid == 6688 ioc->ioc_profile.ioc_guid) 6689 break; 6690 } 6691 if (jj == prev_niocs) 6692 prev_ioc = NULL; 6693 if (ioc == NULL || prev_ioc == NULL) 6694 continue; 6695 if ((ioc->ioc_profile.ioc_service_entries != 6696 prev_ioc->ioc_profile.ioc_service_entries) || 6697 ibdm_serv_cmp(&ioc->ioc_serv[0], 6698 &prev_ioc->ioc_serv[0], 6699 ioc->ioc_profile.ioc_service_entries) != 0) { 6700 IBTF_DPRINTF_L4(ibdm_string, 6701 "/thandle_prev_iou modified IOC: " 6702 "current ioc %p, old ioc %p", 6703 ioc, prev_ioc); 6704 mutex_enter(&gid_info->gl_mutex); 6705 ioc_list = ibdm_dup_ioc_info(ioc, gid_info); 6706 mutex_exit(&gid_info->gl_mutex); 6707 ioc_list->ioc_info_updated.ib_prop_updated 6708 = 0; 6709 ioc_list->ioc_info_updated.ib_srv_prop_updated 6710 = 1; 6711 6712 if (ioc_list_head == NULL) 6713 ioc_list_head = ioc_list; 6714 else { 6715 ioc_list_head->ioc_next = ioc_list; 6716 ioc_list_head = ioc_list; 6717 } 6718 } 6719 } 6720 6721 mutex_enter(&gid_info->gl_mutex); 6722 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_prev_iou); 6723 mutex_exit(&gid_info->gl_mutex); 6724 } 6725 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iouret %p", 6726 ioc_list_head); 6727 return (ioc_list_head); 6728 } 6729 6730 /* 6731 * Compares two service entries lists, returns 0 if same, returns 1 6732 * if no match. 6733 */ 6734 static int 6735 ibdm_serv_cmp(ibdm_srvents_info_t *serv1, ibdm_srvents_info_t *serv2, 6736 int nserv) 6737 { 6738 int ii; 6739 6740 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: enter"); 6741 for (ii = 0; ii < nserv; ii++, serv1++, serv2++) { 6742 if (serv1->se_attr.srv_id != serv2->se_attr.srv_id || 6743 bcmp(serv1->se_attr.srv_name, 6744 serv2->se_attr.srv_name, 6745 IB_DM_MAX_SVC_NAME_LEN) != 0) { 6746 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: ret 1"); 6747 return (1); 6748 } 6749 } 6750 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: ret 0"); 6751 return (0); 6752 } 6753 6754 /* For debugging purpose only */ 6755 #ifdef DEBUG 6756 void 6757 ibdm_dump_mad_hdr(ib_mad_hdr_t *mad_hdr) 6758 { 6759 IBTF_DPRINTF_L4("ibdm", "\t\t MAD Header info"); 6760 IBTF_DPRINTF_L4("ibdm", "\t\t ---------------"); 6761 6762 IBTF_DPRINTF_L4("ibdm", "\tBase version : 0x%x" 6763 "\tMgmt Class : 0x%x", mad_hdr->BaseVersion, mad_hdr->MgmtClass); 6764 IBTF_DPRINTF_L4("ibdm", "\tClass version : 0x%x" 6765 "\tR Method : 0x%x", 6766 mad_hdr->ClassVersion, mad_hdr->R_Method); 6767 IBTF_DPRINTF_L4("ibdm", "\tMAD Status : 0x%x" 6768 "\tTransaction ID : 0x%llx", 6769 b2h16(mad_hdr->Status), b2h64(mad_hdr->TransactionID)); 6770 IBTF_DPRINTF_L4("ibdm", "\t Attribute ID : 0x%x" 6771 "\tAttribute Modified : 0x%lx", 6772 b2h16(mad_hdr->AttributeID), b2h32(mad_hdr->AttributeModifier)); 6773 } 6774 6775 6776 void 6777 ibdm_dump_ibmf_msg(ibmf_msg_t *ibmf_msg, int flag) 6778 { 6779 ib_mad_hdr_t *mad_hdr; 6780 6781 IBTF_DPRINTF_L4("ibdm", "\t\t(IBMF_PKT): Local address info"); 6782 IBTF_DPRINTF_L4("ibdm", "\t\t ------------------"); 6783 6784 IBTF_DPRINTF_L4("ibdm", "\tLocal Lid : 0x%x\tRemote Lid : 0x%x" 6785 " Remote Qp : 0x%x", ibmf_msg->im_local_addr.ia_local_lid, 6786 ibmf_msg->im_local_addr.ia_remote_lid, 6787 ibmf_msg->im_local_addr.ia_remote_qno); 6788 IBTF_DPRINTF_L4("ibdm", "\tP_key : 0x%x\tQ_key : 0x%x", 6789 ibmf_msg->im_local_addr.ia_p_key, ibmf_msg->im_local_addr.ia_q_key); 6790 6791 if (flag) 6792 mad_hdr = (ib_mad_hdr_t *)IBDM_OUT_IBMFMSG_MADHDR(ibmf_msg); 6793 else 6794 mad_hdr = IBDM_IN_IBMFMSG_MADHDR(ibmf_msg); 6795 6796 ibdm_dump_mad_hdr(mad_hdr); 6797 } 6798 6799 void 6800 ibdm_dump_path_info(sa_path_record_t *path) 6801 { 6802 IBTF_DPRINTF_L4("ibdm", "\t\t Path information"); 6803 IBTF_DPRINTF_L4("ibdm", "\t\t ----------------"); 6804 6805 IBTF_DPRINTF_L4("ibdm", "\t DGID hi : %llx\tDGID lo : %llx", 6806 path->DGID.gid_prefix, path->DGID.gid_guid); 6807 IBTF_DPRINTF_L4("ibdm", "\t SGID hi : %llx\tSGID lo : %llx", 6808 path->SGID.gid_prefix, path->SGID.gid_guid); 6809 IBTF_DPRINTF_L4("ibdm", "\t SLID : %x\tDlID : %x", 6810 path->SLID, path->DLID); 6811 IBTF_DPRINTF_L4("ibdm", "\t P Key : %x", path->P_Key); 6812 } 6813 6814 6815 void 6816 ibdm_dump_classportinfo(ib_mad_classportinfo_t *classportinfo) 6817 { 6818 IBTF_DPRINTF_L4("ibdm", "\t\t CLASSPORT INFO"); 6819 IBTF_DPRINTF_L4("ibdm", "\t\t --------------"); 6820 6821 IBTF_DPRINTF_L4("ibdm", "\t Response Time Value : 0x%x", 6822 ((b2h32(classportinfo->RespTimeValue)) & 0x1F)); 6823 6824 IBTF_DPRINTF_L4("ibdm", "\t Redirected QP : 0x%x", 6825 (b2h32(classportinfo->RedirectQP))); 6826 IBTF_DPRINTF_L4("ibdm", "\t Redirected P KEY : 0x%x", 6827 b2h16(classportinfo->RedirectP_Key)); 6828 IBTF_DPRINTF_L4("ibdm", "\t Redirected Q KEY : 0x%x", 6829 b2h16(classportinfo->RedirectQ_Key)); 6830 IBTF_DPRINTF_L4("ibdm", "\t Redirected GID hi : 0x%llx", 6831 b2h64(classportinfo->RedirectGID_hi)); 6832 IBTF_DPRINTF_L4("ibdm", "\t Redirected GID lo : 0x%llx", 6833 b2h64(classportinfo->RedirectGID_lo)); 6834 IBTF_DPRINTF_L4("ibdm", "\t TrapGID hi : 0x%llx", 6835 b2h64(classportinfo->TrapGID_hi)); 6836 IBTF_DPRINTF_L4("ibdm", "\t TrapGID lo : 0x%llx", 6837 b2h64(classportinfo->TrapGID_lo)); 6838 IBTF_DPRINTF_L4("ibdm", "\t TrapTC : 0x%x", 6839 b2h32(classportinfo->TrapTC)); 6840 IBTF_DPRINTF_L4("ibdm", "\t TrapLID : 0x%x", 6841 b2h16(classportinfo->TrapLID)); 6842 IBTF_DPRINTF_L4("ibdm", "\t TrapP_Key : 0x%x", 6843 b2h16(classportinfo->TrapP_Key)); 6844 IBTF_DPRINTF_L4("ibdm", "\t TrapHL : 0x%x", 6845 b2h16(classportinfo->TrapHL)); 6846 IBTF_DPRINTF_L4("ibdm", "\t TrapQ_Key : 0x%x", 6847 b2h32(classportinfo->TrapQ_Key)); 6848 } 6849 6850 6851 void 6852 ibdm_dump_iounitinfo(ib_dm_io_unitinfo_t *iou_info) 6853 { 6854 IBTF_DPRINTF_L4("ibdm", "\t\t I/O UnitInfo"); 6855 IBTF_DPRINTF_L4("ibdm", "\t\t ------------"); 6856 6857 IBTF_DPRINTF_L4("ibdm", "\tChange ID : 0x%x", 6858 b2h16(iou_info->iou_changeid)); 6859 IBTF_DPRINTF_L4("ibdm", "\t#of ctrl slots : %d", 6860 iou_info->iou_num_ctrl_slots); 6861 IBTF_DPRINTF_L4("ibdm", "\tIOU flag : 0x%x", 6862 iou_info->iou_flag); 6863 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 0 : 0x%x", 6864 iou_info->iou_ctrl_list[0]); 6865 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 1 : 0x%x", 6866 iou_info->iou_ctrl_list[1]); 6867 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 2 : 0x%x", 6868 iou_info->iou_ctrl_list[2]); 6869 } 6870 6871 6872 void 6873 ibdm_dump_ioc_profile(ib_dm_ioc_ctrl_profile_t *ioc) 6874 { 6875 IBTF_DPRINTF_L4("ibdm", "\t\t IOC Controller Profile"); 6876 IBTF_DPRINTF_L4("ibdm", "\t\t ----------------------"); 6877 6878 IBTF_DPRINTF_L4("ibdm", "\tIOC Guid : %llx", ioc->ioc_guid); 6879 IBTF_DPRINTF_L4("ibdm", "\tVendorID : 0x%x", ioc->ioc_vendorid); 6880 IBTF_DPRINTF_L4("ibdm", "\tDevice Id : 0x%x", ioc->ioc_deviceid); 6881 IBTF_DPRINTF_L4("ibdm", "\tDevice Ver : 0x%x", ioc->ioc_device_ver); 6882 IBTF_DPRINTF_L4("ibdm", "\tSubsys ID : 0x%x", ioc->ioc_subsys_id); 6883 IBTF_DPRINTF_L4("ibdm", "\tIO class : 0x%x", ioc->ioc_io_class); 6884 IBTF_DPRINTF_L4("ibdm", "\tIO subclass : 0x%x", ioc->ioc_io_subclass); 6885 IBTF_DPRINTF_L4("ibdm", "\tProtocol : 0x%x", ioc->ioc_protocol); 6886 IBTF_DPRINTF_L4("ibdm", "\tProtocolV : 0x%x", ioc->ioc_protocol_ver); 6887 IBTF_DPRINTF_L4("ibdm", "\tmsg qdepth : %d", ioc->ioc_send_msg_qdepth); 6888 IBTF_DPRINTF_L4("ibdm", "\trdma qdepth : %d", 6889 ioc->ioc_rdma_read_qdepth); 6890 IBTF_DPRINTF_L4("ibdm", "\tsndmsg sz : %d", ioc->ioc_send_msg_sz); 6891 IBTF_DPRINTF_L4("ibdm", "\trdma xfersz : %d", ioc->ioc_rdma_xfer_sz); 6892 IBTF_DPRINTF_L4("ibdm", "\topcal mask : 0x%x", 6893 ioc->ioc_ctrl_opcap_mask); 6894 IBTF_DPRINTF_L4("ibdm", "\tsrventries : %x", ioc->ioc_service_entries); 6895 } 6896 6897 6898 void 6899 ibdm_dump_service_entries(ib_dm_srv_t *srv_ents) 6900 { 6901 IBTF_DPRINTF_L4("ibdm", 6902 "\thandle_srventry_mad: service id : %llx", srv_ents->srv_id); 6903 6904 IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad: " 6905 "Service Name : %s", srv_ents->srv_name); 6906 } 6907 6908 int ibdm_allow_sweep_fabric_timestamp = 1; 6909 6910 void 6911 ibdm_dump_sweep_fabric_timestamp(int flag) 6912 { 6913 static hrtime_t x; 6914 if (flag) { 6915 if (ibdm_allow_sweep_fabric_timestamp) { 6916 IBTF_DPRINTF_L4("ibdm", "\tTime taken to complete " 6917 "sweep %lld ms", ((gethrtime() - x)/ 1000000)); 6918 } 6919 x = 0; 6920 } else 6921 x = gethrtime(); 6922 } 6923 #endif 6924