1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * ibdm.c 28 * 29 * This file contains the InifiniBand Device Manager (IBDM) support functions. 30 * IB nexus driver will only be the client for the IBDM module. 31 * 32 * IBDM registers with IBTF for HCA arrival/removal notification. 33 * IBDM registers with SA access to send DM MADs to discover the IOC's behind 34 * the IOU's. 35 * 36 * IB nexus driver registers with IBDM to find the information about the 37 * HCA's and IOC's (behind the IOU) present on the IB fabric. 38 */ 39 40 #include <sys/systm.h> 41 #include <sys/taskq.h> 42 #include <sys/ib/mgt/ibdm/ibdm_impl.h> 43 #include <sys/ib/mgt/ibmf/ibmf_impl.h> 44 #include <sys/ib/ibtl/impl/ibtl_ibnex.h> 45 #include <sys/modctl.h> 46 47 /* Function Prototype declarations */ 48 static int ibdm_free_iou_info(ibdm_dp_gidinfo_t *, ibdm_iou_info_t **); 49 static int ibdm_fini(void); 50 static int ibdm_init(void); 51 static int ibdm_get_reachable_ports(ibdm_port_attr_t *, 52 ibdm_hca_list_t *); 53 static ibdm_dp_gidinfo_t *ibdm_check_dgid(ib_guid_t, ib_sn_prefix_t); 54 static ibdm_dp_gidinfo_t *ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *); 55 static boolean_t ibdm_is_cisco(ib_guid_t); 56 static boolean_t ibdm_is_cisco_switch(ibdm_dp_gidinfo_t *); 57 static void ibdm_wait_cisco_probe_completion(ibdm_dp_gidinfo_t *); 58 static int ibdm_set_classportinfo(ibdm_dp_gidinfo_t *); 59 static int ibdm_send_classportinfo(ibdm_dp_gidinfo_t *); 60 static int ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *); 61 static int ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *); 62 static int ibdm_get_node_port_guids(ibmf_saa_handle_t, ib_lid_t, 63 ib_guid_t *, ib_guid_t *); 64 static int ibdm_retry_command(ibdm_timeout_cb_args_t *); 65 static int ibdm_get_diagcode(ibdm_dp_gidinfo_t *, int); 66 static int ibdm_verify_mad_status(ib_mad_hdr_t *); 67 static int ibdm_handle_redirection(ibmf_msg_t *, 68 ibdm_dp_gidinfo_t *, int *); 69 static void ibdm_wait_probe_completion(void); 70 static void ibdm_sweep_fabric(int); 71 static void ibdm_probe_gid_thread(void *); 72 static void ibdm_wakeup_probe_gid_cv(void); 73 static void ibdm_port_attr_ibmf_init(ibdm_port_attr_t *, ib_pkey_t, int); 74 static int ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *, int); 75 static void ibdm_update_port_attr(ibdm_port_attr_t *); 76 static void ibdm_handle_hca_attach(ib_guid_t); 77 static void ibdm_handle_srventry_mad(ibmf_msg_t *, 78 ibdm_dp_gidinfo_t *, int *); 79 static void ibdm_ibmf_recv_cb(ibmf_handle_t, ibmf_msg_t *, void *); 80 static void ibdm_recv_incoming_mad(void *); 81 static void ibdm_process_incoming_mad(ibmf_handle_t, ibmf_msg_t *, void *); 82 static void ibdm_ibmf_send_cb(ibmf_handle_t, ibmf_msg_t *, void *); 83 static void ibdm_pkt_timeout_hdlr(void *arg); 84 static void ibdm_initialize_port(ibdm_port_attr_t *); 85 static void ibdm_update_port_pkeys(ibdm_port_attr_t *port); 86 static void ibdm_handle_diagcode(ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 87 static void ibdm_probe_gid(ibdm_dp_gidinfo_t *); 88 static void ibdm_alloc_send_buffers(ibmf_msg_t *); 89 static void ibdm_free_send_buffers(ibmf_msg_t *); 90 static void ibdm_handle_hca_detach(ib_guid_t); 91 static void ibdm_handle_port_change_event(ibt_async_event_t *); 92 static int ibdm_fini_port(ibdm_port_attr_t *); 93 static int ibdm_uninit_hca(ibdm_hca_list_t *); 94 static void ibdm_handle_setclassportinfo(ibmf_handle_t, ibmf_msg_t *, 95 ibdm_dp_gidinfo_t *, int *); 96 static void ibdm_handle_iounitinfo(ibmf_handle_t, 97 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 98 static void ibdm_handle_ioc_profile(ibmf_handle_t, 99 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 100 static void ibdm_event_hdlr(void *, ibt_hca_hdl_t, 101 ibt_async_code_t, ibt_async_event_t *); 102 static void ibdm_handle_classportinfo(ibmf_handle_t, 103 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 104 static void ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *, 105 ibdm_dp_gidinfo_t *); 106 107 static ibdm_hca_list_t *ibdm_dup_hca_attr(ibdm_hca_list_t *); 108 static ibdm_ioc_info_t *ibdm_dup_ioc_info(ibdm_ioc_info_t *, 109 ibdm_dp_gidinfo_t *gid_list); 110 static void ibdm_probe_ioc(ib_guid_t, ib_guid_t, int); 111 static ibdm_ioc_info_t *ibdm_is_ioc_present(ib_guid_t, 112 ibdm_dp_gidinfo_t *, int *); 113 static ibdm_port_attr_t *ibdm_get_port_attr(ibt_async_event_t *, 114 ibdm_hca_list_t **); 115 static sa_node_record_t *ibdm_get_node_records(ibmf_saa_handle_t, 116 size_t *, ib_guid_t); 117 static int ibdm_get_node_record_by_port(ibmf_saa_handle_t, 118 ib_guid_t, sa_node_record_t **, size_t *); 119 static sa_portinfo_record_t *ibdm_get_portinfo(ibmf_saa_handle_t, size_t *, 120 ib_lid_t); 121 static ibdm_dp_gidinfo_t *ibdm_create_gid_info(ibdm_port_attr_t *, 122 ib_gid_t, ib_gid_t); 123 static ibdm_dp_gidinfo_t *ibdm_find_gid(ib_guid_t, ib_guid_t); 124 static int ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *, uint8_t); 125 static ibdm_ioc_info_t *ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *, int); 126 static void ibdm_saa_event_cb(ibmf_saa_handle_t, ibmf_saa_subnet_event_t, 127 ibmf_saa_event_details_t *, void *); 128 static void ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *, 129 ibdm_dp_gidinfo_t *); 130 static ibdm_dp_gidinfo_t *ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *); 131 static void ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *, 132 ibdm_dp_gidinfo_t *); 133 static void ibdm_addto_gidlist(ibdm_gid_t **, ibdm_gid_t *); 134 static void ibdm_free_gid_list(ibdm_gid_t *); 135 static void ibdm_rescan_gidlist(ib_guid_t *ioc_guid); 136 static void ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *); 137 static void ibdm_saa_event_taskq(void *); 138 static void ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *); 139 static void ibdm_get_next_port(ibdm_hca_list_t **, 140 ibdm_port_attr_t **, int); 141 static void ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *, 142 ibdm_dp_gidinfo_t *); 143 static void ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *, 144 ibdm_hca_list_t *); 145 static void ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *); 146 static void ibdm_saa_handle_new_gid(void *); 147 static void ibdm_reset_all_dgids(ibmf_saa_handle_t); 148 static void ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *); 149 static void ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *); 150 static void ibdm_fill_srv_attr_mod(ib_mad_hdr_t *, ibdm_timeout_cb_args_t *); 151 static void ibdm_bump_transactionID(ibdm_dp_gidinfo_t *); 152 static ibdm_ioc_info_t *ibdm_handle_prev_iou(); 153 static int ibdm_serv_cmp(ibdm_srvents_info_t *, ibdm_srvents_info_t *, 154 int); 155 static ibdm_ioc_info_t *ibdm_get_ioc_info_with_gid(ib_guid_t, 156 ibdm_dp_gidinfo_t **); 157 158 int ibdm_dft_timeout = IBDM_DFT_TIMEOUT; 159 int ibdm_dft_retry_cnt = IBDM_DFT_NRETRIES; 160 #ifdef DEBUG 161 int ibdm_ignore_saa_event = 0; 162 #endif 163 164 /* Modload support */ 165 static struct modlmisc ibdm_modlmisc = { 166 &mod_miscops, 167 "InfiniBand Device Manager" 168 }; 169 170 struct modlinkage ibdm_modlinkage = { 171 MODREV_1, 172 (void *)&ibdm_modlmisc, 173 NULL 174 }; 175 176 static ibt_clnt_modinfo_t ibdm_ibt_modinfo = { 177 IBTI_V_CURR, 178 IBT_DM, 179 ibdm_event_hdlr, 180 NULL, 181 "ibdm" 182 }; 183 184 /* Global variables */ 185 ibdm_t ibdm; 186 int ibdm_taskq_enable = IBDM_ENABLE_TASKQ_HANDLING; 187 char *ibdm_string = "ibdm"; 188 189 _NOTE(SCHEME_PROTECTS_DATA("Serialized access by cv", 190 ibdm.ibdm_dp_gidlist_head)) 191 192 /* 193 * _init 194 * Loadable module init, called before any other module. 195 * Initialize mutex 196 * Register with IBTF 197 */ 198 int 199 _init(void) 200 { 201 int err; 202 203 IBTF_DPRINTF_L4("ibdm", "\t_init: addr of ibdm %p", &ibdm); 204 205 if ((err = ibdm_init()) != IBDM_SUCCESS) { 206 IBTF_DPRINTF_L2("ibdm", "_init: ibdm_init failed 0x%x", err); 207 (void) ibdm_fini(); 208 return (DDI_FAILURE); 209 } 210 211 if ((err = mod_install(&ibdm_modlinkage)) != 0) { 212 IBTF_DPRINTF_L2("ibdm", "_init: mod_install failed 0x%x", err); 213 (void) ibdm_fini(); 214 } 215 return (err); 216 } 217 218 219 int 220 _fini(void) 221 { 222 int err; 223 224 if ((err = ibdm_fini()) != IBDM_SUCCESS) { 225 IBTF_DPRINTF_L2("ibdm", "_fini: ibdm_fini failed 0x%x", err); 226 (void) ibdm_init(); 227 return (EBUSY); 228 } 229 230 if ((err = mod_remove(&ibdm_modlinkage)) != 0) { 231 IBTF_DPRINTF_L2("ibdm", "_fini: mod_remove failed 0x%x", err); 232 (void) ibdm_init(); 233 } 234 return (err); 235 } 236 237 238 int 239 _info(struct modinfo *modinfop) 240 { 241 return (mod_info(&ibdm_modlinkage, modinfop)); 242 } 243 244 245 /* 246 * ibdm_init(): 247 * Register with IBTF 248 * Allocate memory for the HCAs 249 * Allocate minor-nodes for the HCAs 250 */ 251 static int 252 ibdm_init(void) 253 { 254 int i, hca_count; 255 ib_guid_t *hca_guids; 256 ibt_status_t status; 257 258 IBTF_DPRINTF_L4("ibdm", "\tibdm_init:"); 259 if (!(ibdm.ibdm_state & IBDM_LOCKS_ALLOCED)) { 260 mutex_init(&ibdm.ibdm_mutex, NULL, MUTEX_DEFAULT, NULL); 261 mutex_init(&ibdm.ibdm_hl_mutex, NULL, MUTEX_DEFAULT, NULL); 262 mutex_init(&ibdm.ibdm_ibnex_mutex, NULL, MUTEX_DEFAULT, NULL); 263 cv_init(&ibdm.ibdm_port_settle_cv, NULL, CV_DRIVER, NULL); 264 mutex_enter(&ibdm.ibdm_mutex); 265 ibdm.ibdm_state |= IBDM_LOCKS_ALLOCED; 266 } 267 268 if (!(ibdm.ibdm_state & IBDM_IBT_ATTACHED)) { 269 if ((status = ibt_attach(&ibdm_ibt_modinfo, NULL, NULL, 270 (void *)&ibdm.ibdm_ibt_clnt_hdl)) != IBT_SUCCESS) { 271 IBTF_DPRINTF_L2("ibdm", "ibdm_init: ibt_attach " 272 "failed %x", status); 273 mutex_exit(&ibdm.ibdm_mutex); 274 return (IBDM_FAILURE); 275 } 276 277 ibdm.ibdm_state |= IBDM_IBT_ATTACHED; 278 mutex_exit(&ibdm.ibdm_mutex); 279 } 280 281 282 if (!(ibdm.ibdm_state & IBDM_HCA_ATTACHED)) { 283 hca_count = ibt_get_hca_list(&hca_guids); 284 IBTF_DPRINTF_L4("ibdm", "ibdm_init: num_hcas = %d", hca_count); 285 for (i = 0; i < hca_count; i++) 286 (void) ibdm_handle_hca_attach(hca_guids[i]); 287 if (hca_count) 288 ibt_free_hca_list(hca_guids, hca_count); 289 290 mutex_enter(&ibdm.ibdm_mutex); 291 ibdm.ibdm_state |= IBDM_HCA_ATTACHED; 292 mutex_exit(&ibdm.ibdm_mutex); 293 } 294 295 if (!(ibdm.ibdm_state & IBDM_CVS_ALLOCED)) { 296 cv_init(&ibdm.ibdm_probe_cv, NULL, CV_DRIVER, NULL); 297 cv_init(&ibdm.ibdm_busy_cv, NULL, CV_DRIVER, NULL); 298 mutex_enter(&ibdm.ibdm_mutex); 299 ibdm.ibdm_state |= IBDM_CVS_ALLOCED; 300 mutex_exit(&ibdm.ibdm_mutex); 301 } 302 return (IBDM_SUCCESS); 303 } 304 305 306 static int 307 ibdm_free_iou_info(ibdm_dp_gidinfo_t *gid_info, ibdm_iou_info_t **ioup) 308 { 309 int ii, k, niocs; 310 size_t size; 311 ibdm_gid_t *delete, *head; 312 timeout_id_t timeout_id; 313 ibdm_ioc_info_t *ioc; 314 ibdm_iou_info_t *gl_iou = *ioup; 315 316 ASSERT(mutex_owned(&gid_info->gl_mutex)); 317 if (gl_iou == NULL) { 318 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: No IOU"); 319 return (0); 320 } 321 322 niocs = gl_iou->iou_info.iou_num_ctrl_slots; 323 IBTF_DPRINTF_L4("ibdm", "\tfree_iou_info: gid_info = %p, niocs %d", 324 gid_info, niocs); 325 326 for (ii = 0; ii < niocs; ii++) { 327 ioc = (ibdm_ioc_info_t *)&gl_iou->iou_ioc_info[ii]; 328 329 /* handle the case where an ioc_timeout_id is scheduled */ 330 if (ioc->ioc_timeout_id) { 331 timeout_id = ioc->ioc_timeout_id; 332 ioc->ioc_timeout_id = 0; 333 mutex_exit(&gid_info->gl_mutex); 334 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 335 "ioc_timeout_id = 0x%x", timeout_id); 336 if (untimeout(timeout_id) == -1) { 337 IBTF_DPRINTF_L2("ibdm", "free_iou_info: " 338 "untimeout ioc_timeout_id failed"); 339 mutex_enter(&gid_info->gl_mutex); 340 return (-1); 341 } 342 mutex_enter(&gid_info->gl_mutex); 343 } 344 345 /* handle the case where an ioc_dc_timeout_id is scheduled */ 346 if (ioc->ioc_dc_timeout_id) { 347 timeout_id = ioc->ioc_dc_timeout_id; 348 ioc->ioc_dc_timeout_id = 0; 349 mutex_exit(&gid_info->gl_mutex); 350 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 351 "ioc_dc_timeout_id = 0x%x", timeout_id); 352 if (untimeout(timeout_id) == -1) { 353 IBTF_DPRINTF_L2("ibdm", "free_iou_info: " 354 "untimeout ioc_dc_timeout_id failed"); 355 mutex_enter(&gid_info->gl_mutex); 356 return (-1); 357 } 358 mutex_enter(&gid_info->gl_mutex); 359 } 360 361 /* handle the case where serv[k].se_timeout_id is scheduled */ 362 for (k = 0; k < ioc->ioc_profile.ioc_service_entries; k++) { 363 if (ioc->ioc_serv[k].se_timeout_id) { 364 timeout_id = ioc->ioc_serv[k].se_timeout_id; 365 ioc->ioc_serv[k].se_timeout_id = 0; 366 mutex_exit(&gid_info->gl_mutex); 367 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 368 "ioc->ioc_serv[%d].se_timeout_id = 0x%x", 369 k, timeout_id); 370 if (untimeout(timeout_id) == -1) { 371 IBTF_DPRINTF_L2("ibdm", "free_iou_info:" 372 " untimeout se_timeout_id failed"); 373 mutex_enter(&gid_info->gl_mutex); 374 return (-1); 375 } 376 mutex_enter(&gid_info->gl_mutex); 377 } 378 } 379 380 /* delete GID list in IOC */ 381 head = ioc->ioc_gid_list; 382 while (head) { 383 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: " 384 "Deleting gid_list struct %p", head); 385 delete = head; 386 head = head->gid_next; 387 kmem_free(delete, sizeof (ibdm_gid_t)); 388 } 389 ioc->ioc_gid_list = NULL; 390 391 /* delete ioc_serv */ 392 size = ioc->ioc_profile.ioc_service_entries * 393 sizeof (ibdm_srvents_info_t); 394 if (ioc->ioc_serv && size) { 395 kmem_free(ioc->ioc_serv, size); 396 ioc->ioc_serv = NULL; 397 } 398 } 399 /* 400 * Clear the IBDM_CISCO_PROBE_DONE flag to get the IO Unit information 401 * via the switch during the probe process. 402 */ 403 gid_info->gl_flag &= ~IBDM_CISCO_PROBE_DONE; 404 405 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: deleting IOU & IOC"); 406 size = sizeof (ibdm_iou_info_t) + niocs * sizeof (ibdm_ioc_info_t); 407 kmem_free(gl_iou, size); 408 *ioup = NULL; 409 return (0); 410 } 411 412 413 /* 414 * ibdm_fini(): 415 * Un-register with IBTF 416 * De allocate memory for the GID info 417 */ 418 static int 419 ibdm_fini() 420 { 421 int ii; 422 ibdm_hca_list_t *hca_list, *temp; 423 ibdm_dp_gidinfo_t *gid_info, *tmp; 424 ibdm_gid_t *head, *delete; 425 426 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini"); 427 428 mutex_enter(&ibdm.ibdm_hl_mutex); 429 if (ibdm.ibdm_state & IBDM_IBT_ATTACHED) { 430 if (ibt_detach(ibdm.ibdm_ibt_clnt_hdl) != IBT_SUCCESS) { 431 IBTF_DPRINTF_L2("ibdm", "\t_fini: ibt_detach failed"); 432 mutex_exit(&ibdm.ibdm_hl_mutex); 433 return (IBDM_FAILURE); 434 } 435 ibdm.ibdm_state &= ~IBDM_IBT_ATTACHED; 436 ibdm.ibdm_ibt_clnt_hdl = NULL; 437 } 438 439 hca_list = ibdm.ibdm_hca_list_head; 440 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: nhcas %d", ibdm.ibdm_hca_count); 441 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 442 temp = hca_list; 443 hca_list = hca_list->hl_next; 444 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: hca %p", temp); 445 if (ibdm_uninit_hca(temp) != IBDM_SUCCESS) { 446 IBTF_DPRINTF_L2("ibdm", "\tibdm_fini: " 447 "uninit_hca %p failed", temp); 448 mutex_exit(&ibdm.ibdm_hl_mutex); 449 return (IBDM_FAILURE); 450 } 451 } 452 mutex_exit(&ibdm.ibdm_hl_mutex); 453 454 mutex_enter(&ibdm.ibdm_mutex); 455 if (ibdm.ibdm_state & IBDM_HCA_ATTACHED) 456 ibdm.ibdm_state &= ~IBDM_HCA_ATTACHED; 457 458 gid_info = ibdm.ibdm_dp_gidlist_head; 459 while (gid_info) { 460 mutex_enter(&gid_info->gl_mutex); 461 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou); 462 mutex_exit(&gid_info->gl_mutex); 463 ibdm_delete_glhca_list(gid_info); 464 465 tmp = gid_info; 466 gid_info = gid_info->gl_next; 467 mutex_destroy(&tmp->gl_mutex); 468 head = tmp->gl_gid; 469 while (head) { 470 IBTF_DPRINTF_L4("ibdm", 471 "\tibdm_fini: Deleting gid structs"); 472 delete = head; 473 head = head->gid_next; 474 kmem_free(delete, sizeof (ibdm_gid_t)); 475 } 476 kmem_free(tmp, sizeof (ibdm_dp_gidinfo_t)); 477 } 478 mutex_exit(&ibdm.ibdm_mutex); 479 480 if (ibdm.ibdm_state & IBDM_LOCKS_ALLOCED) { 481 ibdm.ibdm_state &= ~IBDM_LOCKS_ALLOCED; 482 mutex_destroy(&ibdm.ibdm_mutex); 483 mutex_destroy(&ibdm.ibdm_hl_mutex); 484 mutex_destroy(&ibdm.ibdm_ibnex_mutex); 485 cv_destroy(&ibdm.ibdm_port_settle_cv); 486 } 487 if (ibdm.ibdm_state & IBDM_CVS_ALLOCED) { 488 ibdm.ibdm_state &= ~IBDM_CVS_ALLOCED; 489 cv_destroy(&ibdm.ibdm_probe_cv); 490 cv_destroy(&ibdm.ibdm_busy_cv); 491 } 492 return (IBDM_SUCCESS); 493 } 494 495 496 /* 497 * ibdm_event_hdlr() 498 * 499 * IBDM registers this asynchronous event handler at the time of 500 * ibt_attach. IBDM support the following async events. For other 501 * event, simply returns success. 502 * IBT_HCA_ATTACH_EVENT: 503 * Retrieves the information about all the port that are 504 * present on this HCA, allocates the port attributes 505 * structure and calls IB nexus callback routine with 506 * the port attributes structure as an input argument. 507 * IBT_HCA_DETACH_EVENT: 508 * Retrieves the information about all the ports that are 509 * present on this HCA and calls IB nexus callback with 510 * port guid as an argument 511 * IBT_EVENT_PORT_UP: 512 * Register with IBMF and SA access 513 * Setup IBMF receive callback routine 514 * IBT_EVENT_PORT_DOWN: 515 * Un-Register with IBMF and SA access 516 * Teardown IBMF receive callback routine 517 */ 518 /*ARGSUSED*/ 519 static void 520 ibdm_event_hdlr(void *clnt_hdl, 521 ibt_hca_hdl_t hca_hdl, ibt_async_code_t code, ibt_async_event_t *event) 522 { 523 ibdm_hca_list_t *hca_list; 524 ibdm_port_attr_t *port; 525 ibmf_saa_handle_t port_sa_hdl; 526 527 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: async code 0x%x", code); 528 529 switch (code) { 530 case IBT_HCA_ATTACH_EVENT: /* New HCA registered with IBTF */ 531 ibdm_handle_hca_attach(event->ev_hca_guid); 532 break; 533 534 case IBT_HCA_DETACH_EVENT: /* HCA unregistered with IBTF */ 535 ibdm_handle_hca_detach(event->ev_hca_guid); 536 mutex_enter(&ibdm.ibdm_ibnex_mutex); 537 if (ibdm.ibdm_ibnex_callback != NULL) { 538 (*ibdm.ibdm_ibnex_callback)((void *) 539 &event->ev_hca_guid, IBDM_EVENT_HCA_REMOVED); 540 } 541 mutex_exit(&ibdm.ibdm_ibnex_mutex); 542 break; 543 544 case IBT_EVENT_PORT_UP: 545 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_UP"); 546 mutex_enter(&ibdm.ibdm_hl_mutex); 547 port = ibdm_get_port_attr(event, &hca_list); 548 if (port == NULL) { 549 IBTF_DPRINTF_L2("ibdm", 550 "\tevent_hdlr: HCA not present"); 551 mutex_exit(&ibdm.ibdm_hl_mutex); 552 break; 553 } 554 ibdm_initialize_port(port); 555 hca_list->hl_nports_active++; 556 cv_broadcast(&ibdm.ibdm_port_settle_cv); 557 mutex_exit(&ibdm.ibdm_hl_mutex); 558 559 /* Inform IB nexus driver */ 560 mutex_enter(&ibdm.ibdm_ibnex_mutex); 561 if (ibdm.ibdm_ibnex_callback != NULL) { 562 (*ibdm.ibdm_ibnex_callback)((void *) 563 &event->ev_hca_guid, IBDM_EVENT_PORT_UP); 564 } 565 mutex_exit(&ibdm.ibdm_ibnex_mutex); 566 break; 567 568 case IBT_ERROR_PORT_DOWN: 569 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_DOWN"); 570 mutex_enter(&ibdm.ibdm_hl_mutex); 571 port = ibdm_get_port_attr(event, &hca_list); 572 if (port == NULL) { 573 IBTF_DPRINTF_L2("ibdm", 574 "\tevent_hdlr: HCA not present"); 575 mutex_exit(&ibdm.ibdm_hl_mutex); 576 break; 577 } 578 hca_list->hl_nports_active--; 579 port_sa_hdl = port->pa_sa_hdl; 580 (void) ibdm_fini_port(port); 581 port->pa_state = IBT_PORT_DOWN; 582 cv_broadcast(&ibdm.ibdm_port_settle_cv); 583 mutex_exit(&ibdm.ibdm_hl_mutex); 584 ibdm_reset_all_dgids(port_sa_hdl); 585 break; 586 587 case IBT_PORT_CHANGE_EVENT: 588 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_CHANGE"); 589 if (event->ev_port_flags & IBT_PORT_CHANGE_PKEY) 590 ibdm_handle_port_change_event(event); 591 break; 592 593 default: /* Ignore all other events/errors */ 594 break; 595 } 596 } 597 598 static void 599 ibdm_handle_port_change_event(ibt_async_event_t *event) 600 { 601 ibdm_port_attr_t *port; 602 ibdm_hca_list_t *hca_list; 603 604 IBTF_DPRINTF_L2("ibdm", "\tibdm_handle_port_change_event:" 605 " HCA guid %llx", event->ev_hca_guid); 606 mutex_enter(&ibdm.ibdm_hl_mutex); 607 port = ibdm_get_port_attr(event, &hca_list); 608 if (port == NULL) { 609 IBTF_DPRINTF_L2("ibdm", "\tevent_hdlr: HCA not present"); 610 mutex_exit(&ibdm.ibdm_hl_mutex); 611 return; 612 } 613 ibdm_update_port_pkeys(port); 614 cv_broadcast(&ibdm.ibdm_port_settle_cv); 615 mutex_exit(&ibdm.ibdm_hl_mutex); 616 617 /* Inform IB nexus driver */ 618 mutex_enter(&ibdm.ibdm_ibnex_mutex); 619 if (ibdm.ibdm_ibnex_callback != NULL) { 620 (*ibdm.ibdm_ibnex_callback)((void *) 621 &event->ev_hca_guid, IBDM_EVENT_PORT_PKEY_CHANGE); 622 } 623 mutex_exit(&ibdm.ibdm_ibnex_mutex); 624 } 625 626 /* 627 * ibdm_update_port_pkeys() 628 * Update the pkey table 629 * Update the port attributes 630 */ 631 static void 632 ibdm_update_port_pkeys(ibdm_port_attr_t *port) 633 { 634 uint_t nports, size; 635 uint_t pkey_idx, opkey_idx; 636 uint16_t npkeys; 637 ibt_hca_portinfo_t *pinfop; 638 ib_pkey_t pkey; 639 ibdm_pkey_tbl_t *pkey_tbl; 640 ibdm_port_attr_t newport; 641 642 IBTF_DPRINTF_L4("ibdm", "\tupdate_port_pkeys:"); 643 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 644 645 /* Check whether the port is active */ 646 if (ibt_get_port_state(port->pa_hca_hdl, port->pa_port_num, NULL, 647 NULL) != IBT_SUCCESS) 648 return; 649 650 if (ibt_query_hca_ports(port->pa_hca_hdl, port->pa_port_num, 651 &pinfop, &nports, &size) != IBT_SUCCESS) { 652 /* This should not occur */ 653 port->pa_npkeys = 0; 654 port->pa_pkey_tbl = NULL; 655 return; 656 } 657 658 npkeys = pinfop->p_pkey_tbl_sz; 659 pkey_tbl = kmem_zalloc(npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP); 660 newport.pa_pkey_tbl = pkey_tbl; 661 newport.pa_ibmf_hdl = port->pa_ibmf_hdl; 662 663 for (pkey_idx = 0; pkey_idx < npkeys; pkey_idx++) { 664 pkey = pkey_tbl[pkey_idx].pt_pkey = 665 pinfop->p_pkey_tbl[pkey_idx]; 666 /* 667 * Is this pkey present in the current table ? 668 */ 669 for (opkey_idx = 0; opkey_idx < port->pa_npkeys; opkey_idx++) { 670 if (pkey == port->pa_pkey_tbl[opkey_idx].pt_pkey) { 671 pkey_tbl[pkey_idx].pt_qp_hdl = 672 port->pa_pkey_tbl[opkey_idx].pt_qp_hdl; 673 port->pa_pkey_tbl[opkey_idx].pt_qp_hdl = NULL; 674 break; 675 } 676 } 677 678 if (opkey_idx == port->pa_npkeys) { 679 pkey = pkey_tbl[pkey_idx].pt_pkey; 680 if (IBDM_INVALID_PKEY(pkey)) { 681 pkey_tbl[pkey_idx].pt_qp_hdl = NULL; 682 continue; 683 } 684 ibdm_port_attr_ibmf_init(&newport, pkey, pkey_idx); 685 } 686 } 687 688 for (opkey_idx = 0; opkey_idx < port->pa_npkeys; opkey_idx++) { 689 if (port->pa_pkey_tbl[opkey_idx].pt_qp_hdl != NULL) { 690 if (ibdm_port_attr_ibmf_fini(port, opkey_idx) != 691 IBDM_SUCCESS) { 692 IBTF_DPRINTF_L2("ibdm", "\tupdate_port_pkeys: " 693 "ibdm_port_attr_ibmf_fini failed for " 694 "port pkey 0x%x", 695 port->pa_pkey_tbl[opkey_idx].pt_pkey); 696 } 697 } 698 } 699 700 if (port->pa_pkey_tbl != NULL) { 701 kmem_free(port->pa_pkey_tbl, 702 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t)); 703 } 704 705 port->pa_npkeys = npkeys; 706 port->pa_pkey_tbl = pkey_tbl; 707 port->pa_sn_prefix = pinfop->p_sgid_tbl[0].gid_prefix; 708 port->pa_state = pinfop->p_linkstate; 709 ibt_free_portinfo(pinfop, size); 710 } 711 712 /* 713 * ibdm_initialize_port() 714 * Register with IBMF 715 * Register with SA access 716 * Register a receive callback routine with IBMF. IBMF invokes 717 * this routine whenever a MAD arrives at this port. 718 * Update the port attributes 719 */ 720 static void 721 ibdm_initialize_port(ibdm_port_attr_t *port) 722 { 723 int ii; 724 uint_t nports, size; 725 uint_t pkey_idx; 726 ib_pkey_t pkey; 727 ibt_hca_portinfo_t *pinfop; 728 ibmf_register_info_t ibmf_reg; 729 ibmf_saa_subnet_event_args_t event_args; 730 731 IBTF_DPRINTF_L4("ibdm", "\tinitialize_port:"); 732 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 733 734 /* Check whether the port is active */ 735 if (ibt_get_port_state(port->pa_hca_hdl, port->pa_port_num, NULL, 736 NULL) != IBT_SUCCESS) 737 return; 738 739 if (port->pa_sa_hdl != NULL) 740 return; 741 742 if (ibt_query_hca_ports(port->pa_hca_hdl, port->pa_port_num, 743 &pinfop, &nports, &size) != IBT_SUCCESS) { 744 /* This should not occur */ 745 port->pa_npkeys = 0; 746 port->pa_pkey_tbl = NULL; 747 return; 748 } 749 port->pa_sn_prefix = pinfop->p_sgid_tbl[0].gid_prefix; 750 751 port->pa_state = pinfop->p_linkstate; 752 port->pa_npkeys = pinfop->p_pkey_tbl_sz; 753 port->pa_pkey_tbl = (ibdm_pkey_tbl_t *)kmem_zalloc( 754 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP); 755 756 for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++) 757 port->pa_pkey_tbl[pkey_idx].pt_pkey = 758 pinfop->p_pkey_tbl[pkey_idx]; 759 760 ibt_free_portinfo(pinfop, size); 761 762 event_args.is_event_callback = ibdm_saa_event_cb; 763 event_args.is_event_callback_arg = port; 764 if (ibmf_sa_session_open(port->pa_port_guid, 0, &event_args, 765 IBMF_VERSION, 0, &port->pa_sa_hdl) != IBMF_SUCCESS) { 766 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 767 "sa access registration failed"); 768 return; 769 } 770 ibmf_reg.ir_ci_guid = port->pa_hca_guid; 771 ibmf_reg.ir_port_num = port->pa_port_num; 772 ibmf_reg.ir_client_class = DEV_MGT_MANAGER; 773 774 if (ibmf_register(&ibmf_reg, IBMF_VERSION, 0, NULL, NULL, 775 &port->pa_ibmf_hdl, &port->pa_ibmf_caps) != IBMF_SUCCESS) { 776 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 777 "IBMF registration failed"); 778 (void) ibdm_fini_port(port); 779 return; 780 } 781 if (ibmf_setup_async_cb(port->pa_ibmf_hdl, IBMF_QP_HANDLE_DEFAULT, 782 ibdm_ibmf_recv_cb, 0, 0) != IBMF_SUCCESS) { 783 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 784 "IBMF setup recv cb failed"); 785 (void) ibdm_fini_port(port); 786 return; 787 } 788 789 for (ii = 0; ii < port->pa_npkeys; ii++) { 790 pkey = port->pa_pkey_tbl[ii].pt_pkey; 791 if (IBDM_INVALID_PKEY(pkey)) { 792 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 793 continue; 794 } 795 ibdm_port_attr_ibmf_init(port, pkey, ii); 796 } 797 } 798 799 800 /* 801 * ibdm_port_attr_ibmf_init: 802 * With IBMF - Alloc QP Handle and Setup Async callback 803 */ 804 static void 805 ibdm_port_attr_ibmf_init(ibdm_port_attr_t *port, ib_pkey_t pkey, int ii) 806 { 807 int ret; 808 809 if ((ret = ibmf_alloc_qp(port->pa_ibmf_hdl, pkey, IB_GSI_QKEY, 810 IBMF_ALT_QP_MAD_NO_RMPP, &port->pa_pkey_tbl[ii].pt_qp_hdl)) != 811 IBMF_SUCCESS) { 812 IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: " 813 "IBMF failed to alloc qp %d", ret); 814 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 815 return; 816 } 817 818 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_init: QP handle is %p", 819 port->pa_ibmf_hdl); 820 821 if ((ret = ibmf_setup_async_cb(port->pa_ibmf_hdl, 822 port->pa_pkey_tbl[ii].pt_qp_hdl, ibdm_ibmf_recv_cb, 0, 0)) != 823 IBMF_SUCCESS) { 824 IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: " 825 "IBMF setup recv cb failed %d", ret); 826 (void) ibmf_free_qp(port->pa_ibmf_hdl, 827 &port->pa_pkey_tbl[ii].pt_qp_hdl, 0); 828 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 829 } 830 } 831 832 833 /* 834 * ibdm_get_port_attr() 835 * Get port attributes from HCA guid and port number 836 * Return pointer to ibdm_port_attr_t on Success 837 * and NULL on failure 838 */ 839 static ibdm_port_attr_t * 840 ibdm_get_port_attr(ibt_async_event_t *event, ibdm_hca_list_t **retval) 841 { 842 ibdm_hca_list_t *hca_list; 843 ibdm_port_attr_t *port_attr; 844 int ii; 845 846 IBTF_DPRINTF_L4("ibdm", "\tget_port_attr: port# %d", event->ev_port); 847 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 848 hca_list = ibdm.ibdm_hca_list_head; 849 while (hca_list) { 850 if (hca_list->hl_hca_guid == event->ev_hca_guid) { 851 for (ii = 0; ii < hca_list->hl_nports; ii++) { 852 port_attr = &hca_list->hl_port_attr[ii]; 853 if (port_attr->pa_port_num == event->ev_port) { 854 *retval = hca_list; 855 return (port_attr); 856 } 857 } 858 } 859 hca_list = hca_list->hl_next; 860 } 861 return (NULL); 862 } 863 864 865 /* 866 * ibdm_update_port_attr() 867 * Update the port attributes 868 */ 869 static void 870 ibdm_update_port_attr(ibdm_port_attr_t *port) 871 { 872 uint_t nports, size; 873 uint_t pkey_idx; 874 ibt_hca_portinfo_t *portinfop; 875 876 IBTF_DPRINTF_L4("ibdm", "\tupdate_port_attr: Begin"); 877 if (ibt_query_hca_ports(port->pa_hca_hdl, 878 port->pa_port_num, &portinfop, &nports, &size) != IBT_SUCCESS) { 879 /* This should not occur */ 880 port->pa_npkeys = 0; 881 port->pa_pkey_tbl = NULL; 882 return; 883 } 884 port->pa_sn_prefix = portinfop->p_sgid_tbl[0].gid_prefix; 885 886 port->pa_state = portinfop->p_linkstate; 887 888 /* 889 * PKey information in portinfo valid only if port is 890 * ACTIVE. Bail out if not. 891 */ 892 if (port->pa_state != IBT_PORT_ACTIVE) { 893 port->pa_npkeys = 0; 894 port->pa_pkey_tbl = NULL; 895 ibt_free_portinfo(portinfop, size); 896 return; 897 } 898 899 port->pa_npkeys = portinfop->p_pkey_tbl_sz; 900 port->pa_pkey_tbl = (ibdm_pkey_tbl_t *)kmem_zalloc( 901 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP); 902 903 for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++) { 904 port->pa_pkey_tbl[pkey_idx].pt_pkey = 905 portinfop->p_pkey_tbl[pkey_idx]; 906 } 907 ibt_free_portinfo(portinfop, size); 908 } 909 910 911 /* 912 * ibdm_handle_hca_attach() 913 */ 914 static void 915 ibdm_handle_hca_attach(ib_guid_t hca_guid) 916 { 917 uint_t size; 918 uint_t ii, nports; 919 ibt_status_t status; 920 ibt_hca_hdl_t hca_hdl; 921 ibt_hca_attr_t *hca_attr; 922 ibdm_hca_list_t *hca_list, *temp; 923 ibdm_port_attr_t *port_attr; 924 ibt_hca_portinfo_t *portinfop; 925 926 IBTF_DPRINTF_L4("ibdm", 927 "\thandle_hca_attach: hca_guid = 0x%llX", hca_guid); 928 929 /* open the HCA first */ 930 if ((status = ibt_open_hca(ibdm.ibdm_ibt_clnt_hdl, hca_guid, 931 &hca_hdl)) != IBT_SUCCESS) { 932 IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: " 933 "open_hca failed, status 0x%x", status); 934 return; 935 } 936 937 hca_attr = (ibt_hca_attr_t *) 938 kmem_alloc(sizeof (ibt_hca_attr_t), KM_SLEEP); 939 /* ibt_query_hca always returns IBT_SUCCESS */ 940 (void) ibt_query_hca(hca_hdl, hca_attr); 941 942 IBTF_DPRINTF_L4("ibdm", "\tvid: 0x%x, pid: 0x%x, ver: 0x%x," 943 " #ports: %d", hca_attr->hca_vendor_id, hca_attr->hca_device_id, 944 hca_attr->hca_version_id, hca_attr->hca_nports); 945 946 if ((status = ibt_query_hca_ports(hca_hdl, 0, &portinfop, &nports, 947 &size)) != IBT_SUCCESS) { 948 IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: " 949 "ibt_query_hca_ports failed, status 0x%x", status); 950 kmem_free(hca_attr, sizeof (ibt_hca_attr_t)); 951 (void) ibt_close_hca(hca_hdl); 952 return; 953 } 954 hca_list = (ibdm_hca_list_t *) 955 kmem_zalloc((sizeof (ibdm_hca_list_t)), KM_SLEEP); 956 hca_list->hl_port_attr = (ibdm_port_attr_t *)kmem_zalloc( 957 (sizeof (ibdm_port_attr_t) * hca_attr->hca_nports), KM_SLEEP); 958 hca_list->hl_hca_guid = hca_attr->hca_node_guid; 959 hca_list->hl_nports = hca_attr->hca_nports; 960 hca_list->hl_attach_time = ddi_get_time(); 961 hca_list->hl_hca_hdl = hca_hdl; 962 963 /* 964 * Init a dummy port attribute for the HCA node 965 * This is for Per-HCA Node. Initialize port_attr : 966 * hca_guid & port_guid -> hca_guid 967 * npkeys, pkey_tbl is NULL 968 * port_num, sn_prefix is 0 969 * vendorid, product_id, dev_version from HCA 970 * pa_state is IBT_PORT_ACTIVE 971 */ 972 hca_list->hl_hca_port_attr = (ibdm_port_attr_t *)kmem_zalloc( 973 sizeof (ibdm_port_attr_t), KM_SLEEP); 974 port_attr = hca_list->hl_hca_port_attr; 975 port_attr->pa_vendorid = hca_attr->hca_vendor_id; 976 port_attr->pa_productid = hca_attr->hca_device_id; 977 port_attr->pa_dev_version = hca_attr->hca_version_id; 978 port_attr->pa_hca_guid = hca_attr->hca_node_guid; 979 port_attr->pa_hca_hdl = hca_list->hl_hca_hdl; 980 port_attr->pa_port_guid = hca_attr->hca_node_guid; 981 port_attr->pa_state = IBT_PORT_ACTIVE; 982 983 984 for (ii = 0; ii < nports; ii++) { 985 port_attr = &hca_list->hl_port_attr[ii]; 986 port_attr->pa_vendorid = hca_attr->hca_vendor_id; 987 port_attr->pa_productid = hca_attr->hca_device_id; 988 port_attr->pa_dev_version = hca_attr->hca_version_id; 989 port_attr->pa_hca_guid = hca_attr->hca_node_guid; 990 port_attr->pa_hca_hdl = hca_list->hl_hca_hdl; 991 port_attr->pa_port_guid = portinfop[ii].p_sgid_tbl->gid_guid; 992 port_attr->pa_sn_prefix = portinfop[ii].p_sgid_tbl->gid_prefix; 993 port_attr->pa_port_num = portinfop[ii].p_port_num; 994 port_attr->pa_state = portinfop[ii].p_linkstate; 995 996 /* 997 * Register with IBMF, SA access when the port is in 998 * ACTIVE state. Also register a callback routine 999 * with IBMF to receive incoming DM MAD's. 1000 * The IBDM event handler takes care of registration of 1001 * port which are not active. 1002 */ 1003 IBTF_DPRINTF_L4("ibdm", 1004 "\thandle_hca_attach: port guid %llx Port state 0x%x", 1005 port_attr->pa_port_guid, portinfop[ii].p_linkstate); 1006 1007 if (portinfop[ii].p_linkstate == IBT_PORT_ACTIVE) { 1008 mutex_enter(&ibdm.ibdm_hl_mutex); 1009 hca_list->hl_nports_active++; 1010 ibdm_initialize_port(port_attr); 1011 cv_broadcast(&ibdm.ibdm_port_settle_cv); 1012 mutex_exit(&ibdm.ibdm_hl_mutex); 1013 } 1014 } 1015 mutex_enter(&ibdm.ibdm_hl_mutex); 1016 for (temp = ibdm.ibdm_hca_list_head; temp; temp = temp->hl_next) { 1017 if (temp->hl_hca_guid == hca_guid) { 1018 IBTF_DPRINTF_L2("ibdm", "hca_attach: HCA %llX " 1019 "already seen by IBDM", hca_guid); 1020 mutex_exit(&ibdm.ibdm_hl_mutex); 1021 (void) ibdm_uninit_hca(hca_list); 1022 return; 1023 } 1024 } 1025 ibdm.ibdm_hca_count++; 1026 if (ibdm.ibdm_hca_list_head == NULL) { 1027 ibdm.ibdm_hca_list_head = hca_list; 1028 ibdm.ibdm_hca_list_tail = hca_list; 1029 } else { 1030 ibdm.ibdm_hca_list_tail->hl_next = hca_list; 1031 ibdm.ibdm_hca_list_tail = hca_list; 1032 } 1033 mutex_exit(&ibdm.ibdm_hl_mutex); 1034 mutex_enter(&ibdm.ibdm_ibnex_mutex); 1035 if (ibdm.ibdm_ibnex_callback != NULL) { 1036 (*ibdm.ibdm_ibnex_callback)((void *) 1037 &hca_guid, IBDM_EVENT_HCA_ADDED); 1038 } 1039 mutex_exit(&ibdm.ibdm_ibnex_mutex); 1040 1041 kmem_free(hca_attr, sizeof (ibt_hca_attr_t)); 1042 ibt_free_portinfo(portinfop, size); 1043 } 1044 1045 1046 /* 1047 * ibdm_handle_hca_detach() 1048 */ 1049 static void 1050 ibdm_handle_hca_detach(ib_guid_t hca_guid) 1051 { 1052 ibdm_hca_list_t *head, *prev = NULL; 1053 size_t len; 1054 ibdm_dp_gidinfo_t *gidinfo; 1055 1056 IBTF_DPRINTF_L4("ibdm", 1057 "\thandle_hca_detach: hca_guid = 0x%llx", hca_guid); 1058 1059 /* Make sure no probes are running */ 1060 mutex_enter(&ibdm.ibdm_mutex); 1061 while (ibdm.ibdm_busy & IBDM_BUSY) 1062 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 1063 ibdm.ibdm_busy |= IBDM_BUSY; 1064 mutex_exit(&ibdm.ibdm_mutex); 1065 1066 mutex_enter(&ibdm.ibdm_hl_mutex); 1067 head = ibdm.ibdm_hca_list_head; 1068 while (head) { 1069 if (head->hl_hca_guid == hca_guid) { 1070 if (prev == NULL) 1071 ibdm.ibdm_hca_list_head = head->hl_next; 1072 else 1073 prev->hl_next = head->hl_next; 1074 if (ibdm.ibdm_hca_list_tail == head) 1075 ibdm.ibdm_hca_list_tail = prev; 1076 ibdm.ibdm_hca_count--; 1077 break; 1078 } 1079 prev = head; 1080 head = head->hl_next; 1081 } 1082 mutex_exit(&ibdm.ibdm_hl_mutex); 1083 if (ibdm_uninit_hca(head) != IBDM_SUCCESS) 1084 (void) ibdm_handle_hca_attach(hca_guid); 1085 1086 /* 1087 * Now clean up the HCA lists in the gidlist. 1088 */ 1089 for (gidinfo = ibdm.ibdm_dp_gidlist_head; gidinfo; gidinfo = 1090 gidinfo->gl_next) { 1091 prev = NULL; 1092 head = gidinfo->gl_hca_list; 1093 while (head) { 1094 if (head->hl_hca_guid == hca_guid) { 1095 if (prev == NULL) 1096 gidinfo->gl_hca_list = 1097 head->hl_next; 1098 else 1099 prev->hl_next = head->hl_next; 1100 1101 len = sizeof (ibdm_hca_list_t) + 1102 (head->hl_nports * 1103 sizeof (ibdm_port_attr_t)); 1104 kmem_free(head, len); 1105 1106 break; 1107 } 1108 prev = head; 1109 head = head->hl_next; 1110 } 1111 } 1112 1113 mutex_enter(&ibdm.ibdm_mutex); 1114 ibdm.ibdm_busy &= ~IBDM_BUSY; 1115 cv_broadcast(&ibdm.ibdm_busy_cv); 1116 mutex_exit(&ibdm.ibdm_mutex); 1117 } 1118 1119 1120 static int 1121 ibdm_uninit_hca(ibdm_hca_list_t *head) 1122 { 1123 int ii; 1124 ibdm_port_attr_t *port_attr; 1125 1126 for (ii = 0; ii < head->hl_nports; ii++) { 1127 port_attr = &head->hl_port_attr[ii]; 1128 if (ibdm_fini_port(port_attr) != IBDM_SUCCESS) { 1129 IBTF_DPRINTF_L2("ibdm", "uninit_hca: HCA %p port 0x%x " 1130 "ibdm_fini_port() failed", head, ii); 1131 return (IBDM_FAILURE); 1132 } 1133 } 1134 if (head->hl_hca_hdl) 1135 if (ibt_close_hca(head->hl_hca_hdl) != IBT_SUCCESS) { 1136 IBTF_DPRINTF_L2("ibdm", "uninit_hca: " 1137 "ibt_close_hca() failed"); 1138 return (IBDM_FAILURE); 1139 } 1140 kmem_free(head->hl_port_attr, 1141 head->hl_nports * sizeof (ibdm_port_attr_t)); 1142 kmem_free(head->hl_hca_port_attr, sizeof (ibdm_port_attr_t)); 1143 kmem_free(head, sizeof (ibdm_hca_list_t)); 1144 return (IBDM_SUCCESS); 1145 } 1146 1147 1148 /* 1149 * For each port on the HCA, 1150 * 1) Teardown IBMF receive callback function 1151 * 2) Unregister with IBMF 1152 * 3) Unregister with SA access 1153 */ 1154 static int 1155 ibdm_fini_port(ibdm_port_attr_t *port_attr) 1156 { 1157 int ii, ibmf_status; 1158 1159 for (ii = 0; ii < port_attr->pa_npkeys; ii++) { 1160 if (port_attr->pa_pkey_tbl == NULL) 1161 break; 1162 if (!port_attr->pa_pkey_tbl[ii].pt_qp_hdl) 1163 continue; 1164 if (ibdm_port_attr_ibmf_fini(port_attr, ii) != IBDM_SUCCESS) { 1165 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1166 "ibdm_port_attr_ibmf_fini failed for " 1167 "port pkey 0x%x", ii); 1168 return (IBDM_FAILURE); 1169 } 1170 } 1171 1172 if (port_attr->pa_ibmf_hdl) { 1173 ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl, 1174 IBMF_QP_HANDLE_DEFAULT, 0); 1175 if (ibmf_status != IBMF_SUCCESS) { 1176 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1177 "ibmf_tear_down_async_cb failed %d", ibmf_status); 1178 return (IBDM_FAILURE); 1179 } 1180 1181 ibmf_status = ibmf_unregister(&port_attr->pa_ibmf_hdl, 0); 1182 if (ibmf_status != IBMF_SUCCESS) { 1183 IBTF_DPRINTF_L2("ibdm", "\tfini_port: " 1184 "ibmf_unregister failed %d", ibmf_status); 1185 return (IBDM_FAILURE); 1186 } 1187 1188 port_attr->pa_ibmf_hdl = NULL; 1189 } 1190 1191 if (port_attr->pa_sa_hdl) { 1192 ibmf_status = ibmf_sa_session_close(&port_attr->pa_sa_hdl, 0); 1193 if (ibmf_status != IBMF_SUCCESS) { 1194 IBTF_DPRINTF_L2("ibdm", "\tfini_port: " 1195 "ibmf_sa_session_close failed %d", ibmf_status); 1196 return (IBDM_FAILURE); 1197 } 1198 port_attr->pa_sa_hdl = NULL; 1199 } 1200 1201 if (port_attr->pa_pkey_tbl != NULL) { 1202 kmem_free(port_attr->pa_pkey_tbl, 1203 port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t)); 1204 port_attr->pa_pkey_tbl = NULL; 1205 port_attr->pa_npkeys = 0; 1206 } 1207 1208 return (IBDM_SUCCESS); 1209 } 1210 1211 1212 /* 1213 * ibdm_port_attr_ibmf_fini: 1214 * With IBMF - Tear down Async callback and free QP Handle 1215 */ 1216 static int 1217 ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *port_attr, int ii) 1218 { 1219 int ibmf_status; 1220 1221 IBTF_DPRINTF_L5("ibdm", "\tport_attr_ibmf_fini:"); 1222 1223 if (port_attr->pa_pkey_tbl[ii].pt_qp_hdl) { 1224 ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl, 1225 port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0); 1226 if (ibmf_status != IBMF_SUCCESS) { 1227 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: " 1228 "ibmf_tear_down_async_cb failed %d", ibmf_status); 1229 return (IBDM_FAILURE); 1230 } 1231 ibmf_status = ibmf_free_qp(port_attr->pa_ibmf_hdl, 1232 &port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0); 1233 if (ibmf_status != IBMF_SUCCESS) { 1234 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: " 1235 "ibmf_free_qp failed %d", ibmf_status); 1236 return (IBDM_FAILURE); 1237 } 1238 port_attr->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 1239 } 1240 return (IBDM_SUCCESS); 1241 } 1242 1243 1244 /* 1245 * ibdm_gid_decr_pending: 1246 * decrement gl_pending_cmds. If zero wakeup sleeping threads 1247 */ 1248 static void 1249 ibdm_gid_decr_pending(ibdm_dp_gidinfo_t *gidinfo) 1250 { 1251 mutex_enter(&ibdm.ibdm_mutex); 1252 mutex_enter(&gidinfo->gl_mutex); 1253 if (--gidinfo->gl_pending_cmds == 0) { 1254 /* 1255 * Handle DGID getting removed. 1256 */ 1257 if (gidinfo->gl_disconnected) { 1258 mutex_exit(&gidinfo->gl_mutex); 1259 mutex_exit(&ibdm.ibdm_mutex); 1260 1261 IBTF_DPRINTF_L3(ibdm_string, "\tgid_decr_pending: " 1262 "gidinfo %p hot removal", gidinfo); 1263 ibdm_delete_gidinfo(gidinfo); 1264 1265 mutex_enter(&ibdm.ibdm_mutex); 1266 ibdm.ibdm_ngid_probes_in_progress--; 1267 ibdm_wait_probe_completion(); 1268 mutex_exit(&ibdm.ibdm_mutex); 1269 return; 1270 } 1271 mutex_exit(&gidinfo->gl_mutex); 1272 mutex_exit(&ibdm.ibdm_mutex); 1273 ibdm_notify_newgid_iocs(gidinfo); 1274 mutex_enter(&ibdm.ibdm_mutex); 1275 mutex_enter(&gidinfo->gl_mutex); 1276 1277 ibdm.ibdm_ngid_probes_in_progress--; 1278 ibdm_wait_probe_completion(); 1279 } 1280 mutex_exit(&gidinfo->gl_mutex); 1281 mutex_exit(&ibdm.ibdm_mutex); 1282 } 1283 1284 1285 /* 1286 * ibdm_wait_probe_completion: 1287 * wait for probing to complete 1288 */ 1289 static void 1290 ibdm_wait_probe_completion(void) 1291 { 1292 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1293 if (ibdm.ibdm_ngid_probes_in_progress) { 1294 IBTF_DPRINTF_L4("ibdm", "\twait for probe complete"); 1295 ibdm.ibdm_busy |= IBDM_PROBE_IN_PROGRESS; 1296 while (ibdm.ibdm_busy & IBDM_PROBE_IN_PROGRESS) 1297 cv_wait(&ibdm.ibdm_probe_cv, &ibdm.ibdm_mutex); 1298 } 1299 } 1300 1301 1302 /* 1303 * ibdm_wait_cisco_probe_completion: 1304 * wait for the reply from the Cisco FC GW switch after a setclassportinfo 1305 * request is sent. This wait can be achieved on each gid. 1306 */ 1307 static void 1308 ibdm_wait_cisco_probe_completion(ibdm_dp_gidinfo_t *gidinfo) 1309 { 1310 ASSERT(MUTEX_HELD(&gidinfo->gl_mutex)); 1311 IBTF_DPRINTF_L4("ibdm", "\twait for cisco probe complete"); 1312 gidinfo->gl_flag |= IBDM_CISCO_PROBE; 1313 while (gidinfo->gl_flag & IBDM_CISCO_PROBE) 1314 cv_wait(&gidinfo->gl_probe_cv, &gidinfo->gl_mutex); 1315 } 1316 1317 1318 /* 1319 * ibdm_wakeup_probe_gid_cv: 1320 * wakeup waiting threads (based on ibdm_ngid_probes_in_progress) 1321 */ 1322 static void 1323 ibdm_wakeup_probe_gid_cv(void) 1324 { 1325 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1326 if (!ibdm.ibdm_ngid_probes_in_progress) { 1327 IBTF_DPRINTF_L4("ibdm", "wakeup_probe_gid_thread: Wakeup"); 1328 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 1329 cv_broadcast(&ibdm.ibdm_probe_cv); 1330 } 1331 1332 } 1333 1334 1335 /* 1336 * ibdm_sweep_fabric(reprobe_flag) 1337 * Find all possible Managed IOU's and their IOC's that are visible 1338 * to the host. The algorithm used is as follows 1339 * 1340 * Send a "bus walk" request for each port on the host HCA to SA access 1341 * SA returns complete set of GID's that are reachable from 1342 * source port. This is done in parallel. 1343 * 1344 * Initialize GID state to IBDM_GID_PROBE_NOT_DONE 1345 * 1346 * Sort the GID list and eliminate duplicate GID's 1347 * 1) Use DGID for sorting 1348 * 2) use PortGuid for sorting 1349 * Send SA query to retrieve NodeRecord and 1350 * extract PortGuid from that. 1351 * 1352 * Set GID state to IBDM_GID_PROBE_FAILED to all the ports that dont 1353 * support DM MAD's 1354 * Send a "Portinfo" query to get the port capabilities and 1355 * then check for DM MAD's support 1356 * 1357 * Send "ClassPortInfo" request for all the GID's in parallel, 1358 * set the GID state to IBDM_GET_CLASSPORTINFO and wait on the 1359 * cv_signal to complete. 1360 * 1361 * When DM agent on the remote GID sends back the response, IBMF 1362 * invokes DM callback routine. 1363 * 1364 * If the response is proper, send "IOUnitInfo" request and set 1365 * GID state to IBDM_GET_IOUNITINFO. 1366 * 1367 * If the response is proper, send "IocProfileInfo" request to 1368 * all the IOC simultaneously and set GID state to IBDM_GET_IOC_DETAILS. 1369 * 1370 * Send request to get Service entries simultaneously 1371 * 1372 * Signal the waiting thread when received response for all the commands. 1373 * 1374 * Set the GID state to IBDM_GID_PROBE_FAILED when received a error 1375 * response during the probing period. 1376 * 1377 * Note: 1378 * ibdm.ibdm_ngid_probes_in_progress and ibdm_gid_list_t:gl_pending_cmds 1379 * keep track of number commands in progress at any point of time. 1380 * MAD transaction ID is used to identify a particular GID 1381 * TBD: Consider registering the IBMF receive callback on demand 1382 * 1383 * Note: This routine must be called with ibdm.ibdm_mutex held 1384 * TBD: Re probe the failure GID (for certain failures) when requested 1385 * for fabric sweep next time 1386 * 1387 * Parameters : If reprobe_flag is set, All IOCs will be reprobed. 1388 */ 1389 static void 1390 ibdm_sweep_fabric(int reprobe_flag) 1391 { 1392 int ii; 1393 int new_paths = 0; 1394 uint8_t niocs; 1395 taskqid_t tid; 1396 ibdm_ioc_info_t *ioc; 1397 ibdm_hca_list_t *hca_list = NULL; 1398 ibdm_port_attr_t *port = NULL; 1399 ibdm_dp_gidinfo_t *gid_info; 1400 1401 IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: Enter"); 1402 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1403 1404 /* 1405 * Check whether a sweep already in progress. If so, just 1406 * wait for the fabric sweep to complete 1407 */ 1408 while (ibdm.ibdm_busy & IBDM_BUSY) 1409 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 1410 ibdm.ibdm_busy |= IBDM_BUSY; 1411 mutex_exit(&ibdm.ibdm_mutex); 1412 1413 ibdm_dump_sweep_fabric_timestamp(0); 1414 1415 /* Rescan the GID list for any removed GIDs for reprobe */ 1416 if (reprobe_flag) 1417 ibdm_rescan_gidlist(NULL); 1418 1419 /* 1420 * Get list of all the ports reachable from the local known HCA 1421 * ports which are active 1422 */ 1423 mutex_enter(&ibdm.ibdm_hl_mutex); 1424 for (ibdm_get_next_port(&hca_list, &port, 1); port; 1425 ibdm_get_next_port(&hca_list, &port, 1)) { 1426 /* 1427 * Get PATHS to all the reachable ports from 1428 * SGID and update the global ibdm structure. 1429 */ 1430 new_paths = ibdm_get_reachable_ports(port, hca_list); 1431 ibdm.ibdm_ngids += new_paths; 1432 } 1433 mutex_exit(&ibdm.ibdm_hl_mutex); 1434 1435 mutex_enter(&ibdm.ibdm_mutex); 1436 ibdm.ibdm_ngid_probes_in_progress += ibdm.ibdm_ngids; 1437 mutex_exit(&ibdm.ibdm_mutex); 1438 1439 /* Send a request to probe GIDs asynchronously. */ 1440 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 1441 gid_info = gid_info->gl_next) { 1442 mutex_enter(&gid_info->gl_mutex); 1443 gid_info->gl_reprobe_flag = reprobe_flag; 1444 mutex_exit(&gid_info->gl_mutex); 1445 1446 /* process newly encountered GIDs */ 1447 tid = taskq_dispatch(system_taskq, ibdm_probe_gid_thread, 1448 (void *)gid_info, TQ_NOSLEEP); 1449 IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: gid_info = %p" 1450 " taskq_id = %x", gid_info, tid); 1451 /* taskq failed to dispatch call it directly */ 1452 if (tid == NULL) 1453 ibdm_probe_gid_thread((void *)gid_info); 1454 } 1455 1456 mutex_enter(&ibdm.ibdm_mutex); 1457 ibdm_wait_probe_completion(); 1458 1459 /* 1460 * Update the properties, if reprobe_flag is set 1461 * Skip if gl_reprobe_flag is set, this will be 1462 * a re-inserted / new GID, for which notifications 1463 * have already been send. 1464 */ 1465 if (reprobe_flag) { 1466 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 1467 gid_info = gid_info->gl_next) { 1468 if (gid_info->gl_iou == NULL) 1469 continue; 1470 if (gid_info->gl_reprobe_flag) { 1471 gid_info->gl_reprobe_flag = 0; 1472 continue; 1473 } 1474 1475 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 1476 for (ii = 0; ii < niocs; ii++) { 1477 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 1478 if (ioc) 1479 ibdm_reprobe_update_port_srv(ioc, 1480 gid_info); 1481 } 1482 } 1483 } else if (ibdm.ibdm_prev_iou) { 1484 ibdm_ioc_info_t *ioc_list; 1485 1486 /* 1487 * Get the list of IOCs which have changed. 1488 * If any IOCs have changed, Notify IBNexus 1489 */ 1490 ibdm.ibdm_prev_iou = 0; 1491 ioc_list = ibdm_handle_prev_iou(); 1492 if (ioc_list) { 1493 if (ibdm.ibdm_ibnex_callback != NULL) { 1494 (*ibdm.ibdm_ibnex_callback)( 1495 (void *)ioc_list, 1496 IBDM_EVENT_IOC_PROP_UPDATE); 1497 } 1498 } 1499 } 1500 1501 ibdm_dump_sweep_fabric_timestamp(1); 1502 1503 ibdm.ibdm_busy &= ~IBDM_BUSY; 1504 cv_broadcast(&ibdm.ibdm_busy_cv); 1505 IBTF_DPRINTF_L5("ibdm", "\tsweep_fabric: EXIT"); 1506 } 1507 1508 1509 /* 1510 * ibdm_is_cisco: 1511 * Check if this is a Cisco device or not. 1512 */ 1513 static boolean_t 1514 ibdm_is_cisco(ib_guid_t guid) 1515 { 1516 if ((guid >> IBDM_OUI_GUID_SHIFT) == IBDM_CISCO_COMPANY_ID) 1517 return (B_TRUE); 1518 return (B_FALSE); 1519 } 1520 1521 1522 /* 1523 * ibdm_is_cisco_switch: 1524 * Check if this switch is a CISCO switch or not. 1525 * Note that if this switch is already activated, ibdm_is_cisco_switch() 1526 * returns B_FALSE not to re-activate it again. 1527 */ 1528 static boolean_t 1529 ibdm_is_cisco_switch(ibdm_dp_gidinfo_t *gid_info) 1530 { 1531 int company_id, device_id; 1532 ASSERT(gid_info != 0); 1533 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 1534 1535 /* 1536 * If this switch is already activated, don't re-activate it. 1537 */ 1538 if (gid_info->gl_flag & IBDM_CISCO_PROBE_DONE) 1539 return (B_FALSE); 1540 1541 /* 1542 * Check if this switch is a Cisco FC GW or not. 1543 * Use the node guid (the OUI part) instead of the vendor id 1544 * since the vendor id is zero in practice. 1545 */ 1546 company_id = gid_info->gl_nodeguid >> IBDM_OUI_GUID_SHIFT; 1547 device_id = gid_info->gl_devid; 1548 1549 if (company_id == IBDM_CISCO_COMPANY_ID && 1550 device_id == IBDM_CISCO_DEVICE_ID) 1551 return (B_TRUE); 1552 return (B_FALSE); 1553 } 1554 1555 1556 /* 1557 * ibdm_probe_gid_thread: 1558 * thread that does the actual work for sweeping the fabric 1559 * for a given GID 1560 */ 1561 static void 1562 ibdm_probe_gid_thread(void *args) 1563 { 1564 int reprobe_flag; 1565 ib_guid_t node_guid; 1566 ib_guid_t port_guid; 1567 ibdm_dp_gidinfo_t *gid_info; 1568 1569 gid_info = (ibdm_dp_gidinfo_t *)args; 1570 reprobe_flag = gid_info->gl_reprobe_flag; 1571 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: gid_info = %p, flag = %d", 1572 gid_info, reprobe_flag); 1573 ASSERT(gid_info != NULL); 1574 ASSERT(gid_info->gl_pending_cmds == 0); 1575 1576 if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE && 1577 reprobe_flag == 0) { 1578 /* 1579 * This GID may have been already probed. Send 1580 * in a CLP to check if IOUnitInfo changed? 1581 * Explicitly set gl_reprobe_flag to 0 so that 1582 * IBnex is not notified on completion 1583 */ 1584 if (gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) { 1585 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: " 1586 "get new IOCs information"); 1587 mutex_enter(&gid_info->gl_mutex); 1588 gid_info->gl_pending_cmds++; 1589 gid_info->gl_state = IBDM_GET_IOUNITINFO; 1590 gid_info->gl_reprobe_flag = 0; 1591 mutex_exit(&gid_info->gl_mutex); 1592 if (ibdm_send_iounitinfo(gid_info) != IBDM_SUCCESS) { 1593 mutex_enter(&gid_info->gl_mutex); 1594 --gid_info->gl_pending_cmds; 1595 mutex_exit(&gid_info->gl_mutex); 1596 mutex_enter(&ibdm.ibdm_mutex); 1597 --ibdm.ibdm_ngid_probes_in_progress; 1598 ibdm_wakeup_probe_gid_cv(); 1599 mutex_exit(&ibdm.ibdm_mutex); 1600 } 1601 } else { 1602 mutex_enter(&ibdm.ibdm_mutex); 1603 --ibdm.ibdm_ngid_probes_in_progress; 1604 ibdm_wakeup_probe_gid_cv(); 1605 mutex_exit(&ibdm.ibdm_mutex); 1606 } 1607 return; 1608 } else if (reprobe_flag && gid_info->gl_state == 1609 IBDM_GID_PROBING_COMPLETE) { 1610 /* 1611 * Reprobe all IOCs for the GID which has completed 1612 * probe. Skip other port GIDs to same IOU. 1613 * Explicitly set gl_reprobe_flag to 0 so that 1614 * IBnex is not notified on completion 1615 */ 1616 ibdm_ioc_info_t *ioc_info; 1617 uint8_t niocs, ii; 1618 1619 ASSERT(gid_info->gl_iou); 1620 mutex_enter(&gid_info->gl_mutex); 1621 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 1622 gid_info->gl_state = IBDM_GET_IOC_DETAILS; 1623 gid_info->gl_pending_cmds += niocs; 1624 gid_info->gl_reprobe_flag = 0; 1625 mutex_exit(&gid_info->gl_mutex); 1626 for (ii = 0; ii < niocs; ii++) { 1627 uchar_t slot_info; 1628 ib_dm_io_unitinfo_t *giou_info; 1629 1630 /* 1631 * Check whether IOC is present in the slot 1632 * Series of nibbles (in the field 1633 * iou_ctrl_list) represents a slot in the 1634 * IOU. 1635 * Byte format: 76543210 1636 * Bits 0-3 of first byte represent Slot 2 1637 * bits 4-7 of first byte represent slot 1, 1638 * bits 0-3 of second byte represent slot 4 1639 * and so on 1640 * Each 4-bit nibble has the following meaning 1641 * 0x0 : IOC not installed 1642 * 0x1 : IOC is present 1643 * 0xf : Slot does not exist 1644 * and all other values are reserved. 1645 */ 1646 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii); 1647 giou_info = &gid_info->gl_iou->iou_info; 1648 slot_info = giou_info->iou_ctrl_list[(ii/2)]; 1649 if ((ii % 2) == 0) 1650 slot_info = (slot_info >> 4); 1651 1652 if ((slot_info & 0xf) != 1) { 1653 ioc_info->ioc_state = 1654 IBDM_IOC_STATE_PROBE_FAILED; 1655 ibdm_gid_decr_pending(gid_info); 1656 continue; 1657 } 1658 1659 if (ibdm_send_ioc_profile(gid_info, ii) != 1660 IBDM_SUCCESS) { 1661 ibdm_gid_decr_pending(gid_info); 1662 } 1663 } 1664 1665 return; 1666 } else if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) { 1667 mutex_enter(&ibdm.ibdm_mutex); 1668 --ibdm.ibdm_ngid_probes_in_progress; 1669 ibdm_wakeup_probe_gid_cv(); 1670 mutex_exit(&ibdm.ibdm_mutex); 1671 return; 1672 } 1673 1674 /* 1675 * Check whether the destination GID supports DM agents. If 1676 * not, stop probing the GID and continue with the next GID 1677 * in the list. 1678 */ 1679 if (ibdm_is_dev_mgt_supported(gid_info) != IBDM_SUCCESS) { 1680 mutex_enter(&gid_info->gl_mutex); 1681 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1682 mutex_exit(&gid_info->gl_mutex); 1683 ibdm_delete_glhca_list(gid_info); 1684 mutex_enter(&ibdm.ibdm_mutex); 1685 --ibdm.ibdm_ngid_probes_in_progress; 1686 ibdm_wakeup_probe_gid_cv(); 1687 mutex_exit(&ibdm.ibdm_mutex); 1688 return; 1689 } 1690 1691 /* Get the nodeguid and portguid of the port */ 1692 if (ibdm_get_node_port_guids(gid_info->gl_sa_hdl, gid_info->gl_dlid, 1693 &node_guid, &port_guid) != IBDM_SUCCESS) { 1694 mutex_enter(&gid_info->gl_mutex); 1695 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1696 mutex_exit(&gid_info->gl_mutex); 1697 ibdm_delete_glhca_list(gid_info); 1698 mutex_enter(&ibdm.ibdm_mutex); 1699 --ibdm.ibdm_ngid_probes_in_progress; 1700 ibdm_wakeup_probe_gid_cv(); 1701 mutex_exit(&ibdm.ibdm_mutex); 1702 return; 1703 } 1704 1705 /* 1706 * Check whether we already knew about this NodeGuid 1707 * If so, do not probe the GID and continue with the 1708 * next GID in the gid list. Set the GID state to 1709 * probing done. 1710 */ 1711 mutex_enter(&ibdm.ibdm_mutex); 1712 gid_info->gl_nodeguid = node_guid; 1713 gid_info->gl_portguid = port_guid; 1714 if (ibdm_check_dest_nodeguid(gid_info) != NULL) { 1715 mutex_exit(&ibdm.ibdm_mutex); 1716 mutex_enter(&gid_info->gl_mutex); 1717 gid_info->gl_state = IBDM_GID_PROBING_SKIPPED; 1718 mutex_exit(&gid_info->gl_mutex); 1719 ibdm_delete_glhca_list(gid_info); 1720 mutex_enter(&ibdm.ibdm_mutex); 1721 --ibdm.ibdm_ngid_probes_in_progress; 1722 ibdm_wakeup_probe_gid_cv(); 1723 mutex_exit(&ibdm.ibdm_mutex); 1724 return; 1725 } 1726 ibdm_add_to_gl_gid(gid_info, gid_info); 1727 mutex_exit(&ibdm.ibdm_mutex); 1728 1729 /* 1730 * New or reinserted GID : Enable notification to IBnex 1731 */ 1732 mutex_enter(&gid_info->gl_mutex); 1733 gid_info->gl_reprobe_flag = 1; 1734 1735 /* 1736 * A Cisco FC GW needs the special handling to get IOUnitInfo. 1737 */ 1738 if (ibdm_is_cisco_switch(gid_info)) { 1739 gid_info->gl_pending_cmds++; 1740 gid_info->gl_state = IBDM_SET_CLASSPORTINFO; 1741 mutex_exit(&gid_info->gl_mutex); 1742 1743 if (ibdm_set_classportinfo(gid_info) != IBDM_SUCCESS) { 1744 mutex_enter(&gid_info->gl_mutex); 1745 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1746 --gid_info->gl_pending_cmds; 1747 mutex_exit(&gid_info->gl_mutex); 1748 1749 /* free the hca_list on this gid_info */ 1750 ibdm_delete_glhca_list(gid_info); 1751 1752 mutex_enter(&ibdm.ibdm_mutex); 1753 --ibdm.ibdm_ngid_probes_in_progress; 1754 ibdm_wakeup_probe_gid_cv(); 1755 mutex_exit(&ibdm.ibdm_mutex); 1756 1757 return; 1758 } 1759 1760 mutex_enter(&gid_info->gl_mutex); 1761 ibdm_wait_cisco_probe_completion(gid_info); 1762 1763 IBTF_DPRINTF_L4("ibdm", "\tibdm_probe_gid_thread: " 1764 "CISCO Wakeup signal received"); 1765 } 1766 1767 /* move on to the 'GET_CLASSPORTINFO' stage */ 1768 gid_info->gl_pending_cmds++; 1769 gid_info->gl_state = IBDM_GET_CLASSPORTINFO; 1770 mutex_exit(&gid_info->gl_mutex); 1771 1772 IBTF_DPRINTF_L3(ibdm_string, "\tibdm_probe_gid_thread: " 1773 "%d: gid_info %p gl_state %d pending_cmds %d", 1774 __LINE__, gid_info, gid_info->gl_state, 1775 gid_info->gl_pending_cmds); 1776 1777 /* 1778 * Send ClassPortInfo request to the GID asynchronously. 1779 */ 1780 if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) { 1781 1782 mutex_enter(&gid_info->gl_mutex); 1783 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1784 --gid_info->gl_pending_cmds; 1785 mutex_exit(&gid_info->gl_mutex); 1786 1787 /* free the hca_list on this gid_info */ 1788 ibdm_delete_glhca_list(gid_info); 1789 1790 mutex_enter(&ibdm.ibdm_mutex); 1791 --ibdm.ibdm_ngid_probes_in_progress; 1792 ibdm_wakeup_probe_gid_cv(); 1793 mutex_exit(&ibdm.ibdm_mutex); 1794 1795 return; 1796 } 1797 } 1798 1799 1800 /* 1801 * ibdm_check_dest_nodeguid 1802 * Searches for the NodeGuid in the GID list 1803 * Returns matching gid_info if found and otherwise NULL 1804 * 1805 * This function is called to handle new GIDs discovered 1806 * during device sweep / probe or for GID_AVAILABLE event. 1807 * 1808 * Parameter : 1809 * gid_info GID to check 1810 */ 1811 static ibdm_dp_gidinfo_t * 1812 ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *gid_info) 1813 { 1814 ibdm_dp_gidinfo_t *gid_list; 1815 ibdm_gid_t *tmp; 1816 1817 IBTF_DPRINTF_L4("ibdm", "\tcheck_dest_nodeguid"); 1818 1819 gid_list = ibdm.ibdm_dp_gidlist_head; 1820 while (gid_list) { 1821 if ((gid_list != gid_info) && 1822 (gid_info->gl_nodeguid == gid_list->gl_nodeguid)) { 1823 IBTF_DPRINTF_L4("ibdm", 1824 "\tcheck_dest_nodeguid: NodeGuid is present"); 1825 1826 /* Add to gid_list */ 1827 tmp = kmem_zalloc(sizeof (ibdm_gid_t), 1828 KM_SLEEP); 1829 tmp->gid_dgid_hi = gid_info->gl_dgid_hi; 1830 tmp->gid_dgid_lo = gid_info->gl_dgid_lo; 1831 tmp->gid_next = gid_list->gl_gid; 1832 gid_list->gl_gid = tmp; 1833 gid_list->gl_ngids++; 1834 return (gid_list); 1835 } 1836 1837 gid_list = gid_list->gl_next; 1838 } 1839 1840 return (NULL); 1841 } 1842 1843 1844 /* 1845 * ibdm_is_dev_mgt_supported 1846 * Get the PortInfo attribute (SA Query) 1847 * Check "CompatabilityMask" field in the Portinfo. 1848 * Return IBDM_SUCCESS if DM MAD's supported (if bit 19 set) 1849 * by the port, otherwise IBDM_FAILURE 1850 */ 1851 static int 1852 ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *gid_info) 1853 { 1854 int ret; 1855 size_t length = 0; 1856 sa_portinfo_record_t req, *resp = NULL; 1857 ibmf_saa_access_args_t qargs; 1858 1859 bzero(&req, sizeof (sa_portinfo_record_t)); 1860 req.EndportLID = gid_info->gl_dlid; 1861 1862 qargs.sq_attr_id = SA_PORTINFORECORD_ATTRID; 1863 qargs.sq_access_type = IBMF_SAA_RETRIEVE; 1864 qargs.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID; 1865 qargs.sq_template = &req; 1866 qargs.sq_callback = NULL; 1867 qargs.sq_callback_arg = NULL; 1868 1869 ret = ibmf_sa_access(gid_info->gl_sa_hdl, 1870 &qargs, 0, &length, (void **)&resp); 1871 1872 if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) { 1873 IBTF_DPRINTF_L2("ibdm", "\tis_dev_mgt_supported:" 1874 "failed to get PORTINFO attribute %d", ret); 1875 return (IBDM_FAILURE); 1876 } 1877 1878 if (resp->PortInfo.CapabilityMask & SM_CAP_MASK_IS_DM_SUPPD) { 1879 IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: SUPPD !!"); 1880 ret = IBDM_SUCCESS; 1881 } else { 1882 IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: " 1883 "Not SUPPD !!, cap 0x%x", resp->PortInfo.CapabilityMask); 1884 ret = IBDM_FAILURE; 1885 } 1886 kmem_free(resp, length); 1887 return (ret); 1888 } 1889 1890 1891 /* 1892 * ibdm_get_node_port_guids() 1893 * Get the NodeInfoRecord of the port 1894 * Save NodeGuid and PortGUID values in the GID list structure. 1895 * Return IBDM_SUCCESS/IBDM_FAILURE 1896 */ 1897 static int 1898 ibdm_get_node_port_guids(ibmf_saa_handle_t sa_hdl, ib_lid_t dlid, 1899 ib_guid_t *node_guid, ib_guid_t *port_guid) 1900 { 1901 int ret; 1902 size_t length = 0; 1903 sa_node_record_t req, *resp = NULL; 1904 ibmf_saa_access_args_t qargs; 1905 1906 IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids"); 1907 1908 bzero(&req, sizeof (sa_node_record_t)); 1909 req.LID = dlid; 1910 1911 qargs.sq_attr_id = SA_NODERECORD_ATTRID; 1912 qargs.sq_access_type = IBMF_SAA_RETRIEVE; 1913 qargs.sq_component_mask = SA_NODEINFO_COMPMASK_NODELID; 1914 qargs.sq_template = &req; 1915 qargs.sq_callback = NULL; 1916 qargs.sq_callback_arg = NULL; 1917 1918 ret = ibmf_sa_access(sa_hdl, &qargs, 0, &length, (void **)&resp); 1919 if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) { 1920 IBTF_DPRINTF_L2("ibdm", "\tget_node_port_guids:" 1921 " SA Retrieve Failed: %d", ret); 1922 return (IBDM_FAILURE); 1923 } 1924 IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids: NodeGuid %llx Port" 1925 "GUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.NodeGUID); 1926 1927 *node_guid = resp->NodeInfo.NodeGUID; 1928 *port_guid = resp->NodeInfo.PortGUID; 1929 kmem_free(resp, length); 1930 return (IBDM_SUCCESS); 1931 } 1932 1933 1934 /* 1935 * ibdm_get_reachable_ports() 1936 * Get list of the destination GID (and its path records) by 1937 * querying the SA access. 1938 * 1939 * Returns Number paths 1940 */ 1941 static int 1942 ibdm_get_reachable_ports(ibdm_port_attr_t *portinfo, ibdm_hca_list_t *hca) 1943 { 1944 uint_t ii, jj, nrecs; 1945 uint_t npaths = 0; 1946 size_t length; 1947 ib_gid_t sgid; 1948 ibdm_pkey_tbl_t *pkey_tbl; 1949 sa_path_record_t *result; 1950 sa_path_record_t *precp; 1951 ibdm_dp_gidinfo_t *gid_info; 1952 1953 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 1954 IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: portinfo %p", portinfo); 1955 1956 sgid.gid_prefix = portinfo->pa_sn_prefix; 1957 sgid.gid_guid = portinfo->pa_port_guid; 1958 1959 /* get reversible paths */ 1960 if (portinfo->pa_sa_hdl && ibmf_saa_paths_from_gid(portinfo->pa_sa_hdl, 1961 sgid, IBMF_SAA_PKEY_WC, B_TRUE, 0, &nrecs, &length, &result) 1962 != IBMF_SUCCESS) { 1963 IBTF_DPRINTF_L2("ibdm", 1964 "\tget_reachable_ports: Getting path records failed"); 1965 return (0); 1966 } 1967 1968 for (ii = 0; ii < nrecs; ii++) { 1969 sa_node_record_t *nrec; 1970 size_t length; 1971 1972 precp = &result[ii]; 1973 if ((gid_info = ibdm_check_dgid(precp->DGID.gid_guid, 1974 precp->DGID.gid_prefix)) != NULL) { 1975 IBTF_DPRINTF_L5("ibdm", "\tget_reachable_ports: " 1976 "Already exists nrecs %d, ii %d", nrecs, ii); 1977 ibdm_addto_glhcalist(gid_info, hca); 1978 continue; 1979 } 1980 /* 1981 * This is a new GID. Allocate a GID structure and 1982 * initialize the structure 1983 * gl_state is initialized to IBDM_GID_PROBE_NOT_DONE (0) 1984 * by kmem_zalloc call 1985 */ 1986 gid_info = kmem_zalloc(sizeof (ibdm_dp_gidinfo_t), KM_SLEEP); 1987 mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL); 1988 cv_init(&gid_info->gl_probe_cv, NULL, CV_DRIVER, NULL); 1989 gid_info->gl_dgid_hi = precp->DGID.gid_prefix; 1990 gid_info->gl_dgid_lo = precp->DGID.gid_guid; 1991 gid_info->gl_sgid_hi = precp->SGID.gid_prefix; 1992 gid_info->gl_sgid_lo = precp->SGID.gid_guid; 1993 gid_info->gl_p_key = precp->P_Key; 1994 gid_info->gl_sa_hdl = portinfo->pa_sa_hdl; 1995 gid_info->gl_ibmf_hdl = portinfo->pa_ibmf_hdl; 1996 gid_info->gl_slid = precp->SLID; 1997 gid_info->gl_dlid = precp->DLID; 1998 gid_info->gl_transactionID = (++ibdm.ibdm_transactionID) 1999 << IBDM_GID_TRANSACTIONID_SHIFT; 2000 gid_info->gl_min_transactionID = gid_info->gl_transactionID; 2001 gid_info->gl_max_transactionID = (ibdm.ibdm_transactionID +1) 2002 << IBDM_GID_TRANSACTIONID_SHIFT; 2003 gid_info->gl_SL = precp->SL; 2004 2005 /* 2006 * get the node record with this guid if the destination 2007 * device is a Cisco one. 2008 */ 2009 if (ibdm_is_cisco(precp->DGID.gid_guid) && 2010 (gid_info->gl_nodeguid == 0 || gid_info->gl_devid == 0) && 2011 ibdm_get_node_record_by_port(portinfo->pa_sa_hdl, 2012 precp->DGID.gid_guid, &nrec, &length) == IBDM_SUCCESS) { 2013 gid_info->gl_nodeguid = nrec->NodeInfo.NodeGUID; 2014 gid_info->gl_devid = nrec->NodeInfo.DeviceID; 2015 kmem_free(nrec, length); 2016 } 2017 2018 ibdm_addto_glhcalist(gid_info, hca); 2019 2020 ibdm_dump_path_info(precp); 2021 2022 gid_info->gl_qp_hdl = NULL; 2023 ASSERT(portinfo->pa_pkey_tbl != NULL && 2024 portinfo->pa_npkeys != 0); 2025 2026 for (jj = 0; jj < portinfo->pa_npkeys; jj++) { 2027 pkey_tbl = &portinfo->pa_pkey_tbl[jj]; 2028 if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) && 2029 (pkey_tbl->pt_qp_hdl != NULL)) { 2030 gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 2031 break; 2032 } 2033 } 2034 2035 /* 2036 * QP handle for GID not initialized. No matching Pkey 2037 * was found!! ibdm should *not* hit this case. Flag an 2038 * error and drop the GID if ibdm does encounter this. 2039 */ 2040 if (gid_info->gl_qp_hdl == NULL) { 2041 IBTF_DPRINTF_L2(ibdm_string, 2042 "\tget_reachable_ports: No matching Pkey"); 2043 ibdm_delete_gidinfo(gid_info); 2044 continue; 2045 } 2046 if (ibdm.ibdm_dp_gidlist_head == NULL) { 2047 ibdm.ibdm_dp_gidlist_head = gid_info; 2048 ibdm.ibdm_dp_gidlist_tail = gid_info; 2049 } else { 2050 ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info; 2051 gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail; 2052 ibdm.ibdm_dp_gidlist_tail = gid_info; 2053 } 2054 npaths++; 2055 } 2056 kmem_free(result, length); 2057 IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: npaths = %d", npaths); 2058 return (npaths); 2059 } 2060 2061 2062 /* 2063 * ibdm_check_dgid() 2064 * Look in the global list to check whether we know this DGID already 2065 * Return IBDM_GID_PRESENT/IBDM_GID_NOT_PRESENT 2066 */ 2067 static ibdm_dp_gidinfo_t * 2068 ibdm_check_dgid(ib_guid_t guid, ib_sn_prefix_t prefix) 2069 { 2070 ibdm_dp_gidinfo_t *gid_list; 2071 2072 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 2073 gid_list = gid_list->gl_next) { 2074 if ((guid == gid_list->gl_dgid_lo) && 2075 (prefix == gid_list->gl_dgid_hi)) { 2076 break; 2077 } 2078 } 2079 return (gid_list); 2080 } 2081 2082 2083 /* 2084 * ibdm_find_gid() 2085 * Look in the global list to find a GID entry with matching 2086 * port & node GUID. 2087 * Return pointer to gidinfo if found, else return NULL 2088 */ 2089 static ibdm_dp_gidinfo_t * 2090 ibdm_find_gid(ib_guid_t nodeguid, ib_guid_t portguid) 2091 { 2092 ibdm_dp_gidinfo_t *gid_list; 2093 2094 IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid(%llx, %llx)\n", 2095 nodeguid, portguid); 2096 2097 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 2098 gid_list = gid_list->gl_next) { 2099 if ((portguid == gid_list->gl_portguid) && 2100 (nodeguid == gid_list->gl_nodeguid)) { 2101 break; 2102 } 2103 } 2104 2105 IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid : returned %p\n", 2106 gid_list); 2107 return (gid_list); 2108 } 2109 2110 2111 /* 2112 * ibdm_set_classportinfo() 2113 * ibdm_set_classportinfo() is a function to activate a Cisco FC GW 2114 * by sending the setClassPortInfo request with the trapLID, trapGID 2115 * and etc. to the gateway since the gateway doesn't provide the IO 2116 * Unit Information othewise. This behavior is the Cisco specific one, 2117 * and this function is called to a Cisco FC GW only. 2118 * Returns IBDM_SUCCESS/IBDM_FAILURE 2119 */ 2120 static int 2121 ibdm_set_classportinfo(ibdm_dp_gidinfo_t *gid_info) 2122 { 2123 ibmf_msg_t *msg; 2124 ib_mad_hdr_t *hdr; 2125 ibdm_timeout_cb_args_t *cb_args; 2126 void *data; 2127 ib_mad_classportinfo_t *cpi; 2128 2129 IBTF_DPRINTF_L4("ibdm", 2130 "\tset_classportinfo: gid info 0x%p", gid_info); 2131 2132 /* 2133 * Send command to set classportinfo attribute. Allocate a IBMF 2134 * packet and initialize the packet. 2135 */ 2136 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 2137 &msg) != IBMF_SUCCESS) { 2138 IBTF_DPRINTF_L4("ibdm", "\tset_classportinfo: pkt alloc fail"); 2139 return (IBDM_FAILURE); 2140 } 2141 2142 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2143 ibdm_alloc_send_buffers(msg); 2144 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2145 2146 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2147 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2148 msg->im_local_addr.ia_remote_qno = 1; 2149 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2150 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2151 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2152 2153 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2154 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2155 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2156 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2157 hdr->R_Method = IB_DM_DEVMGT_METHOD_SET; 2158 hdr->Status = 0; 2159 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2160 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 2161 hdr->AttributeModifier = 0; 2162 2163 data = msg->im_msgbufs_send.im_bufs_cl_data; 2164 cpi = (ib_mad_classportinfo_t *)data; 2165 2166 /* 2167 * Set the classportinfo values to activate this Cisco FC GW. 2168 */ 2169 cpi->TrapGID_hi = h2b64(gid_info->gl_sgid_hi); 2170 cpi->TrapGID_lo = h2b64(gid_info->gl_sgid_lo); 2171 cpi->TrapLID = h2b16(gid_info->gl_slid); 2172 cpi->TrapSL = gid_info->gl_SL; 2173 cpi->TrapP_Key = h2b16(gid_info->gl_p_key); 2174 cpi->TrapQP = h2b32((((ibmf_alt_qp_t *)gid_info->gl_qp_hdl)->isq_qpn)); 2175 cpi->TrapQ_Key = h2b32((((ibmf_alt_qp_t *) 2176 gid_info->gl_qp_hdl)->isq_qkey)); 2177 2178 cb_args = &gid_info->gl_cpi_cb_args; 2179 cb_args->cb_gid_info = gid_info; 2180 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2181 cb_args->cb_req_type = IBDM_REQ_TYPE_CLASSPORTINFO; 2182 2183 mutex_enter(&gid_info->gl_mutex); 2184 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2185 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2186 mutex_exit(&gid_info->gl_mutex); 2187 2188 IBTF_DPRINTF_L5("ibdm", "\tset_classportinfo: " 2189 "timeout id %x", gid_info->gl_timeout_id); 2190 2191 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 2192 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2193 IBTF_DPRINTF_L2("ibdm", 2194 "\tset_classportinfo: ibmf send failed"); 2195 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 2196 } 2197 2198 return (IBDM_SUCCESS); 2199 } 2200 2201 2202 /* 2203 * ibdm_send_classportinfo() 2204 * Send classportinfo request. When the request is completed 2205 * IBMF calls ibdm_classportinfo_cb routine to inform about 2206 * the completion. 2207 * Returns IBDM_SUCCESS/IBDM_FAILURE 2208 */ 2209 static int 2210 ibdm_send_classportinfo(ibdm_dp_gidinfo_t *gid_info) 2211 { 2212 ibmf_msg_t *msg; 2213 ib_mad_hdr_t *hdr; 2214 ibdm_timeout_cb_args_t *cb_args; 2215 2216 IBTF_DPRINTF_L4("ibdm", 2217 "\tsend_classportinfo: gid info 0x%p", gid_info); 2218 2219 /* 2220 * Send command to get classportinfo attribute. Allocate a IBMF 2221 * packet and initialize the packet. 2222 */ 2223 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 2224 &msg) != IBMF_SUCCESS) { 2225 IBTF_DPRINTF_L4("ibdm", "\tsend_classportinfo: pkt alloc fail"); 2226 return (IBDM_FAILURE); 2227 } 2228 2229 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2230 ibdm_alloc_send_buffers(msg); 2231 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2232 2233 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2234 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2235 msg->im_local_addr.ia_remote_qno = 1; 2236 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2237 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2238 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2239 2240 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2241 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2242 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2243 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2244 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2245 hdr->Status = 0; 2246 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2247 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 2248 hdr->AttributeModifier = 0; 2249 2250 cb_args = &gid_info->gl_cpi_cb_args; 2251 cb_args->cb_gid_info = gid_info; 2252 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2253 cb_args->cb_req_type = IBDM_REQ_TYPE_CLASSPORTINFO; 2254 2255 mutex_enter(&gid_info->gl_mutex); 2256 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2257 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2258 mutex_exit(&gid_info->gl_mutex); 2259 2260 IBTF_DPRINTF_L5("ibdm", "\tsend_classportinfo: " 2261 "timeout id %x", gid_info->gl_timeout_id); 2262 2263 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 2264 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2265 IBTF_DPRINTF_L2("ibdm", 2266 "\tsend_classportinfo: ibmf send failed"); 2267 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 2268 } 2269 2270 return (IBDM_SUCCESS); 2271 } 2272 2273 2274 /* 2275 * ibdm_handle_setclassportinfo() 2276 * Invoked by the IBMF when setClassPortInfo request is completed. 2277 */ 2278 static void 2279 ibdm_handle_setclassportinfo(ibmf_handle_t ibmf_hdl, 2280 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2281 { 2282 void *data; 2283 timeout_id_t timeout_id; 2284 ib_mad_classportinfo_t *cpi; 2285 2286 IBTF_DPRINTF_L4("ibdm", "\thandle_setclassportinfo:ibmf hdl " 2287 "%p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2288 2289 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO) { 2290 IBTF_DPRINTF_L4("ibdm", "\thandle_setclassportinfo: " 2291 "Not a ClassPortInfo resp"); 2292 *flag |= IBDM_IBMF_PKT_UNEXP_RESP; 2293 return; 2294 } 2295 2296 /* 2297 * Verify whether timeout handler is created/active. 2298 * If created/ active, cancel the timeout handler 2299 */ 2300 mutex_enter(&gid_info->gl_mutex); 2301 if (gid_info->gl_state != IBDM_SET_CLASSPORTINFO) { 2302 IBTF_DPRINTF_L2("ibdm", "\thandle_setclassportinfo:DUP resp"); 2303 *flag |= IBDM_IBMF_PKT_DUP_RESP; 2304 mutex_exit(&gid_info->gl_mutex); 2305 return; 2306 } 2307 ibdm_bump_transactionID(gid_info); 2308 2309 gid_info->gl_iou_cb_args.cb_req_type = 0; 2310 if (gid_info->gl_timeout_id) { 2311 timeout_id = gid_info->gl_timeout_id; 2312 mutex_exit(&gid_info->gl_mutex); 2313 IBTF_DPRINTF_L5("ibdm", "handle_setlassportinfo: " 2314 "gl_timeout_id = 0x%x", timeout_id); 2315 if (untimeout(timeout_id) == -1) { 2316 IBTF_DPRINTF_L2("ibdm", "handle_setclassportinfo: " 2317 "untimeout gl_timeout_id failed"); 2318 } 2319 mutex_enter(&gid_info->gl_mutex); 2320 gid_info->gl_timeout_id = 0; 2321 } 2322 mutex_exit(&gid_info->gl_mutex); 2323 2324 data = msg->im_msgbufs_recv.im_bufs_cl_data; 2325 cpi = (ib_mad_classportinfo_t *)data; 2326 2327 ibdm_dump_classportinfo(cpi); 2328 } 2329 2330 2331 /* 2332 * ibdm_handle_classportinfo() 2333 * Invoked by the IBMF when the classportinfo request is completed. 2334 */ 2335 static void 2336 ibdm_handle_classportinfo(ibmf_handle_t ibmf_hdl, 2337 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2338 { 2339 void *data; 2340 timeout_id_t timeout_id; 2341 ib_mad_hdr_t *hdr; 2342 ib_mad_classportinfo_t *cpi; 2343 2344 IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo:ibmf hdl " 2345 "%p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2346 2347 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO) { 2348 IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo: " 2349 "Not a ClassPortInfo resp"); 2350 *flag |= IBDM_IBMF_PKT_UNEXP_RESP; 2351 return; 2352 } 2353 2354 /* 2355 * Verify whether timeout handler is created/active. 2356 * If created/ active, cancel the timeout handler 2357 */ 2358 mutex_enter(&gid_info->gl_mutex); 2359 ibdm_bump_transactionID(gid_info); 2360 if (gid_info->gl_state != IBDM_GET_CLASSPORTINFO) { 2361 IBTF_DPRINTF_L2("ibdm", "\thandle_classportinfo:DUP resp"); 2362 *flag |= IBDM_IBMF_PKT_DUP_RESP; 2363 mutex_exit(&gid_info->gl_mutex); 2364 return; 2365 } 2366 gid_info->gl_iou_cb_args.cb_req_type = 0; 2367 if (gid_info->gl_timeout_id) { 2368 timeout_id = gid_info->gl_timeout_id; 2369 mutex_exit(&gid_info->gl_mutex); 2370 IBTF_DPRINTF_L5("ibdm", "handle_ioclassportinfo: " 2371 "gl_timeout_id = 0x%x", timeout_id); 2372 if (untimeout(timeout_id) == -1) { 2373 IBTF_DPRINTF_L2("ibdm", "handle_classportinfo: " 2374 "untimeout gl_timeout_id failed"); 2375 } 2376 mutex_enter(&gid_info->gl_mutex); 2377 gid_info->gl_timeout_id = 0; 2378 } 2379 gid_info->gl_state = IBDM_GET_IOUNITINFO; 2380 gid_info->gl_pending_cmds++; 2381 mutex_exit(&gid_info->gl_mutex); 2382 2383 data = msg->im_msgbufs_recv.im_bufs_cl_data; 2384 cpi = (ib_mad_classportinfo_t *)data; 2385 2386 /* 2387 * Cache the "RespTimeValue" and redirection information in the 2388 * global gid list data structure. This cached information will 2389 * be used to send any further requests to the GID. 2390 */ 2391 gid_info->gl_resp_timeout = 2392 (b2h32(cpi->RespTimeValue) & 0x1F); 2393 2394 gid_info->gl_redirected = ((IBDM_IN_IBMFMSG_STATUS(msg) & 2395 MAD_STATUS_REDIRECT_REQUIRED) ? B_TRUE: B_FALSE); 2396 gid_info->gl_redirect_dlid = b2h16(cpi->RedirectLID); 2397 gid_info->gl_redirect_QP = (b2h32(cpi->RedirectQP) & 0xffffff); 2398 gid_info->gl_redirect_pkey = b2h16(cpi->RedirectP_Key); 2399 gid_info->gl_redirect_qkey = b2h32(cpi->RedirectQ_Key); 2400 gid_info->gl_redirectGID_hi = b2h64(cpi->RedirectGID_hi); 2401 gid_info->gl_redirectGID_lo = b2h64(cpi->RedirectGID_lo); 2402 gid_info->gl_redirectSL = cpi->RedirectSL; 2403 2404 ibdm_dump_classportinfo(cpi); 2405 2406 /* 2407 * Send IOUnitInfo request 2408 * Reuse previously allocated IBMF packet for sending ClassPortInfo 2409 * Check whether DM agent on the remote node requested redirection 2410 * If so, send the request to the redirect DGID/DLID/PKEY/QP. 2411 */ 2412 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2413 ibdm_alloc_send_buffers(msg); 2414 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2415 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2416 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2417 2418 if (gid_info->gl_redirected == B_TRUE) { 2419 if (gid_info->gl_redirect_dlid != 0) { 2420 msg->im_local_addr.ia_remote_lid = 2421 gid_info->gl_redirect_dlid; 2422 } 2423 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 2424 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 2425 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 2426 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 2427 } else { 2428 msg->im_local_addr.ia_remote_qno = 1; 2429 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2430 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2431 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2432 } 2433 2434 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2435 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2436 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2437 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2438 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2439 hdr->Status = 0; 2440 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2441 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 2442 hdr->AttributeModifier = 0; 2443 2444 gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO; 2445 gid_info->gl_iou_cb_args.cb_gid_info = gid_info; 2446 gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt; 2447 2448 mutex_enter(&gid_info->gl_mutex); 2449 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2450 &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2451 mutex_exit(&gid_info->gl_mutex); 2452 2453 IBTF_DPRINTF_L5("ibdm", "handle_classportinfo:" 2454 "timeout %x", gid_info->gl_timeout_id); 2455 2456 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, NULL, 2457 ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) != IBMF_SUCCESS) { 2458 IBTF_DPRINTF_L2("ibdm", 2459 "\thandle_classportinfo: msg transport failed"); 2460 ibdm_ibmf_send_cb(ibmf_hdl, msg, &gid_info->gl_iou_cb_args); 2461 } 2462 (*flag) |= IBDM_IBMF_PKT_REUSED; 2463 } 2464 2465 2466 /* 2467 * ibdm_send_iounitinfo: 2468 * Sends a DM request to get IOU unitinfo. 2469 */ 2470 static int 2471 ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *gid_info) 2472 { 2473 ibmf_msg_t *msg; 2474 ib_mad_hdr_t *hdr; 2475 2476 IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: gid info 0x%p", gid_info); 2477 2478 /* 2479 * Send command to get iounitinfo attribute. Allocate a IBMF 2480 * packet and initialize the packet. 2481 */ 2482 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, &msg) != 2483 IBMF_SUCCESS) { 2484 IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: pkt alloc fail"); 2485 return (IBDM_FAILURE); 2486 } 2487 2488 mutex_enter(&gid_info->gl_mutex); 2489 ibdm_bump_transactionID(gid_info); 2490 mutex_exit(&gid_info->gl_mutex); 2491 2492 2493 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2494 ibdm_alloc_send_buffers(msg); 2495 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2496 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2497 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2498 msg->im_local_addr.ia_remote_qno = 1; 2499 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2500 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2501 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2502 2503 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2504 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2505 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2506 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2507 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2508 hdr->Status = 0; 2509 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2510 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 2511 hdr->AttributeModifier = 0; 2512 2513 gid_info->gl_iou_cb_args.cb_gid_info = gid_info; 2514 gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt; 2515 gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO; 2516 2517 mutex_enter(&gid_info->gl_mutex); 2518 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2519 &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2520 mutex_exit(&gid_info->gl_mutex); 2521 2522 IBTF_DPRINTF_L5("ibdm", "send_iouunitinfo:" 2523 "timeout %x", gid_info->gl_timeout_id); 2524 2525 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg, 2526 NULL, ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) != 2527 IBMF_SUCCESS) { 2528 IBTF_DPRINTF_L2("ibdm", "\tsend_iounitinfo: ibmf send failed"); 2529 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, 2530 msg, &gid_info->gl_iou_cb_args); 2531 } 2532 return (IBDM_SUCCESS); 2533 } 2534 2535 /* 2536 * ibdm_handle_iounitinfo() 2537 * Invoked by the IBMF when IO Unitinfo request is completed. 2538 */ 2539 static void 2540 ibdm_handle_iounitinfo(ibmf_handle_t ibmf_hdl, 2541 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2542 { 2543 int ii, first = B_TRUE; 2544 int num_iocs; 2545 size_t size; 2546 uchar_t slot_info; 2547 timeout_id_t timeout_id; 2548 ib_mad_hdr_t *hdr; 2549 ibdm_ioc_info_t *ioc_info; 2550 ib_dm_io_unitinfo_t *iou_info; 2551 ib_dm_io_unitinfo_t *giou_info; 2552 ibdm_timeout_cb_args_t *cb_args; 2553 2554 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo:" 2555 " ibmf hdl %p pkt %p gid info %p", ibmf_hdl, msg, gid_info); 2556 2557 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_IO_UNITINFO) { 2558 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: " 2559 "Unexpected response"); 2560 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2561 return; 2562 } 2563 2564 mutex_enter(&gid_info->gl_mutex); 2565 if (gid_info->gl_state != IBDM_GET_IOUNITINFO) { 2566 IBTF_DPRINTF_L4("ibdm", 2567 "\thandle_iounitinfo: DUP resp"); 2568 mutex_exit(&gid_info->gl_mutex); 2569 (*flag) = IBDM_IBMF_PKT_DUP_RESP; 2570 return; 2571 } 2572 gid_info->gl_iou_cb_args.cb_req_type = 0; 2573 if (gid_info->gl_timeout_id) { 2574 timeout_id = gid_info->gl_timeout_id; 2575 mutex_exit(&gid_info->gl_mutex); 2576 IBTF_DPRINTF_L5("ibdm", "handle_iounitinfo: " 2577 "gl_timeout_id = 0x%x", timeout_id); 2578 if (untimeout(timeout_id) == -1) { 2579 IBTF_DPRINTF_L2("ibdm", "handle_iounitinfo: " 2580 "untimeout gl_timeout_id failed"); 2581 } 2582 mutex_enter(&gid_info->gl_mutex); 2583 gid_info->gl_timeout_id = 0; 2584 } 2585 gid_info->gl_state = IBDM_GET_IOC_DETAILS; 2586 2587 iou_info = IBDM_IN_IBMFMSG2IOU(msg); 2588 ibdm_dump_iounitinfo(iou_info); 2589 num_iocs = iou_info->iou_num_ctrl_slots; 2590 /* 2591 * check if number of IOCs reported is zero? if yes, return. 2592 * when num_iocs are reported zero internal IOC database needs 2593 * to be updated. To ensure that save the number of IOCs in 2594 * the new field "gl_num_iocs". Use a new field instead of 2595 * "giou_info->iou_num_ctrl_slots" as that would prevent 2596 * an unnecessary kmem_alloc/kmem_free when num_iocs is 0. 2597 */ 2598 if (num_iocs == 0 && gid_info->gl_num_iocs == 0) { 2599 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: no IOC's"); 2600 mutex_exit(&gid_info->gl_mutex); 2601 return; 2602 } 2603 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: num_iocs = %d", num_iocs); 2604 2605 /* 2606 * if there is an existing gl_iou (IOU has been probed before) 2607 * check if the "iou_changeid" is same as saved entry in 2608 * "giou_info->iou_changeid". 2609 * (note: this logic can prevent IOC enumeration if a given 2610 * vendor doesn't support setting iou_changeid field for its IOU) 2611 * 2612 * if there is an existing gl_iou and iou_changeid has changed : 2613 * free up existing gl_iou info and its related structures. 2614 * reallocate gl_iou info all over again. 2615 * if we donot free this up; then this leads to memory leaks 2616 */ 2617 if (gid_info->gl_iou) { 2618 giou_info = &gid_info->gl_iou->iou_info; 2619 if (b2h16(iou_info->iou_changeid) == 2620 giou_info->iou_changeid) { 2621 IBTF_DPRINTF_L3("ibdm", 2622 "\thandle_iounitinfo: no IOCs changed"); 2623 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE; 2624 mutex_exit(&gid_info->gl_mutex); 2625 return; 2626 } 2627 2628 /* 2629 * Store the iou info as prev_iou to be used after 2630 * sweep is done. 2631 */ 2632 ASSERT(gid_info->gl_prev_iou == NULL); 2633 IBTF_DPRINTF_L4(ibdm_string, 2634 "\thandle_iounitinfo: setting gl_prev_iou %p", 2635 gid_info->gl_prev_iou); 2636 gid_info->gl_prev_iou = gid_info->gl_iou; 2637 ibdm.ibdm_prev_iou = 1; 2638 gid_info->gl_iou = NULL; 2639 } 2640 2641 size = sizeof (ibdm_iou_info_t) + num_iocs * sizeof (ibdm_ioc_info_t); 2642 gid_info->gl_iou = (ibdm_iou_info_t *)kmem_zalloc(size, KM_SLEEP); 2643 giou_info = &gid_info->gl_iou->iou_info; 2644 gid_info->gl_iou->iou_ioc_info = (ibdm_ioc_info_t *) 2645 ((char *)gid_info->gl_iou + sizeof (ibdm_iou_info_t)); 2646 2647 giou_info->iou_num_ctrl_slots = gid_info->gl_num_iocs = num_iocs; 2648 giou_info->iou_flag = iou_info->iou_flag; 2649 bcopy(iou_info->iou_ctrl_list, giou_info->iou_ctrl_list, 128); 2650 giou_info->iou_changeid = b2h16(iou_info->iou_changeid); 2651 gid_info->gl_pending_cmds++; /* for diag code */ 2652 mutex_exit(&gid_info->gl_mutex); 2653 2654 if (ibdm_get_diagcode(gid_info, 0) != IBDM_SUCCESS) { 2655 mutex_enter(&gid_info->gl_mutex); 2656 gid_info->gl_pending_cmds--; 2657 mutex_exit(&gid_info->gl_mutex); 2658 } 2659 /* 2660 * Parallelize getting IOC controller profiles from here. 2661 * Allocate IBMF packets and send commands to get IOC profile for 2662 * each IOC present on the IOU. 2663 */ 2664 for (ii = 0; ii < num_iocs; ii++) { 2665 /* 2666 * Check whether IOC is present in the slot 2667 * Series of nibbles (in the field iou_ctrl_list) represents 2668 * a slot in the IOU. 2669 * Byte format: 76543210 2670 * Bits 0-3 of first byte represent Slot 2 2671 * bits 4-7 of first byte represent slot 1, 2672 * bits 0-3 of second byte represent slot 4 and so on 2673 * Each 4-bit nibble has the following meaning 2674 * 0x0 : IOC not installed 2675 * 0x1 : IOC is present 2676 * 0xf : Slot does not exist 2677 * and all other values are reserved. 2678 */ 2679 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii); 2680 slot_info = giou_info->iou_ctrl_list[(ii/2)]; 2681 if ((ii % 2) == 0) 2682 slot_info = (slot_info >> 4); 2683 2684 if ((slot_info & 0xf) != 1) { 2685 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: " 2686 "No IOC is present in the slot = %d", ii); 2687 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 2688 continue; 2689 } 2690 2691 mutex_enter(&gid_info->gl_mutex); 2692 ibdm_bump_transactionID(gid_info); 2693 mutex_exit(&gid_info->gl_mutex); 2694 2695 /* 2696 * Re use the already allocated packet (for IOUnitinfo) to 2697 * send the first IOC controller attribute. Allocate new 2698 * IBMF packets for the rest of the IOC's 2699 */ 2700 if (first != B_TRUE) { 2701 msg = NULL; 2702 if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP, 2703 &msg) != IBMF_SUCCESS) { 2704 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: " 2705 "IBMF packet allocation failed"); 2706 continue; 2707 } 2708 2709 } 2710 2711 /* allocate send buffers for all messages */ 2712 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2713 ibdm_alloc_send_buffers(msg); 2714 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2715 2716 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2717 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2718 if (gid_info->gl_redirected == B_TRUE) { 2719 if (gid_info->gl_redirect_dlid != 0) { 2720 msg->im_local_addr.ia_remote_lid = 2721 gid_info->gl_redirect_dlid; 2722 } 2723 msg->im_local_addr.ia_remote_qno = 2724 gid_info->gl_redirect_QP; 2725 msg->im_local_addr.ia_p_key = 2726 gid_info->gl_redirect_pkey; 2727 msg->im_local_addr.ia_q_key = 2728 gid_info->gl_redirect_qkey; 2729 msg->im_local_addr.ia_service_level = 2730 gid_info->gl_redirectSL; 2731 } else { 2732 msg->im_local_addr.ia_remote_qno = 1; 2733 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2734 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2735 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2736 } 2737 2738 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2739 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2740 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2741 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2742 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2743 hdr->Status = 0; 2744 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2745 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 2746 hdr->AttributeModifier = h2b32(ii + 1); 2747 2748 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_INVALID; 2749 cb_args = &ioc_info->ioc_cb_args; 2750 cb_args->cb_gid_info = gid_info; 2751 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2752 cb_args->cb_req_type = IBDM_REQ_TYPE_IOCINFO; 2753 cb_args->cb_ioc_num = ii; 2754 2755 mutex_enter(&gid_info->gl_mutex); 2756 gid_info->gl_pending_cmds++; /* for diag code */ 2757 2758 ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2759 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2760 mutex_exit(&gid_info->gl_mutex); 2761 2762 IBTF_DPRINTF_L5("ibdm", "\thandle_iounitinfo:" 2763 "timeout 0x%x, ioc_num %d", ioc_info->ioc_timeout_id, ii); 2764 2765 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, 2766 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2767 IBTF_DPRINTF_L2("ibdm", 2768 "\thandle_iounitinfo: msg transport failed"); 2769 ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args); 2770 } 2771 (*flag) |= IBDM_IBMF_PKT_REUSED; 2772 first = B_FALSE; 2773 gid_info->gl_iou->iou_niocs_probe_in_progress++; 2774 } 2775 } 2776 2777 2778 /* 2779 * ibdm_handle_ioc_profile() 2780 * Invoked by the IBMF when the IOCControllerProfile request 2781 * gets completed 2782 */ 2783 static void 2784 ibdm_handle_ioc_profile(ibmf_handle_t ibmf_hdl, 2785 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2786 { 2787 int first = B_TRUE, reprobe = 0; 2788 uint_t ii, ioc_no, srv_start; 2789 uint_t nserv_entries; 2790 timeout_id_t timeout_id; 2791 ib_mad_hdr_t *hdr; 2792 ibdm_ioc_info_t *ioc_info; 2793 ibdm_timeout_cb_args_t *cb_args; 2794 ib_dm_ioc_ctrl_profile_t *ioc, *gioc; 2795 2796 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:" 2797 " ibmf hdl %p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2798 2799 ioc = IBDM_IN_IBMFMSG2IOC(msg); 2800 /* 2801 * Check whether we know this IOC already 2802 * This will return NULL if reprobe is in progress 2803 * IBDM_IOC_STATE_REPROBE_PROGRESS will be set. 2804 * Do not hold mutexes here. 2805 */ 2806 if (ibdm_is_ioc_present(ioc->ioc_guid, gid_info, flag) != NULL) { 2807 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:" 2808 "IOC guid %llx is present", ioc->ioc_guid); 2809 return; 2810 } 2811 ioc_no = IBDM_IN_IBMFMSG_ATTRMOD(msg); 2812 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile: ioc_no = %d", ioc_no-1); 2813 2814 /* Make sure that IOC index is with the valid range */ 2815 if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) { 2816 IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: " 2817 "IOC index Out of range, index %d", ioc); 2818 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2819 return; 2820 } 2821 ioc_info = &gid_info->gl_iou->iou_ioc_info[ioc_no - 1]; 2822 ioc_info->ioc_iou_info = gid_info->gl_iou; 2823 2824 mutex_enter(&gid_info->gl_mutex); 2825 if (ioc_info->ioc_state == IBDM_IOC_STATE_REPROBE_PROGRESS) { 2826 reprobe = 1; 2827 ioc_info->ioc_prev_serv = ioc_info->ioc_serv; 2828 ioc_info->ioc_serv = NULL; 2829 ioc_info->ioc_prev_serv_cnt = 2830 ioc_info->ioc_profile.ioc_service_entries; 2831 } else if (ioc_info->ioc_state != IBDM_IOC_STATE_PROBE_INVALID) { 2832 IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: DUP response" 2833 "ioc %d, ioc_state %x", ioc_no - 1, ioc_info->ioc_state); 2834 mutex_exit(&gid_info->gl_mutex); 2835 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 2836 return; 2837 } 2838 ioc_info->ioc_cb_args.cb_req_type = 0; 2839 if (ioc_info->ioc_timeout_id) { 2840 timeout_id = ioc_info->ioc_timeout_id; 2841 ioc_info->ioc_timeout_id = 0; 2842 mutex_exit(&gid_info->gl_mutex); 2843 IBTF_DPRINTF_L5("ibdm", "handle_ioc_profile: " 2844 "ioc_timeout_id = 0x%x", timeout_id); 2845 if (untimeout(timeout_id) == -1) { 2846 IBTF_DPRINTF_L2("ibdm", "handle_ioc_profile: " 2847 "untimeout ioc_timeout_id failed"); 2848 } 2849 mutex_enter(&gid_info->gl_mutex); 2850 } 2851 2852 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_SUCCESS; 2853 if (reprobe == 0) { 2854 ioc_info->ioc_iou_guid = gid_info->gl_nodeguid; 2855 ioc_info->ioc_nodeguid = gid_info->gl_nodeguid; 2856 } 2857 2858 /* 2859 * Save all the IOC information in the global structures. 2860 * Note the wire format is Big Endian and the Sparc process also 2861 * big endian. So, there is no need to convert the data fields 2862 * The conversion routines used below are ineffective on Sparc 2863 * machines where as they will be effective on little endian 2864 * machines such as Intel processors. 2865 */ 2866 gioc = (ib_dm_ioc_ctrl_profile_t *)&ioc_info->ioc_profile; 2867 2868 /* 2869 * Restrict updates to onlyport GIDs and service entries during reprobe 2870 */ 2871 if (reprobe == 0) { 2872 gioc->ioc_guid = b2h64(ioc->ioc_guid); 2873 gioc->ioc_vendorid = 2874 ((b2h32(ioc->ioc_vendorid) & IB_DM_VENDORID_MASK) 2875 >> IB_DM_VENDORID_SHIFT); 2876 gioc->ioc_deviceid = b2h32(ioc->ioc_deviceid); 2877 gioc->ioc_device_ver = b2h16(ioc->ioc_device_ver); 2878 gioc->ioc_subsys_vendorid = 2879 ((b2h32(ioc->ioc_subsys_vendorid) & IB_DM_VENDORID_MASK) 2880 >> IB_DM_VENDORID_SHIFT); 2881 gioc->ioc_subsys_id = b2h32(ioc->ioc_subsys_id); 2882 gioc->ioc_io_class = b2h16(ioc->ioc_io_class); 2883 gioc->ioc_io_subclass = b2h16(ioc->ioc_io_subclass); 2884 gioc->ioc_protocol = b2h16(ioc->ioc_protocol); 2885 gioc->ioc_protocol_ver = b2h16(ioc->ioc_protocol_ver); 2886 gioc->ioc_send_msg_qdepth = 2887 b2h16(ioc->ioc_send_msg_qdepth); 2888 gioc->ioc_rdma_read_qdepth = 2889 b2h16(ioc->ioc_rdma_read_qdepth); 2890 gioc->ioc_send_msg_sz = b2h32(ioc->ioc_send_msg_sz); 2891 gioc->ioc_rdma_xfer_sz = b2h32(ioc->ioc_rdma_xfer_sz); 2892 gioc->ioc_ctrl_opcap_mask = ioc->ioc_ctrl_opcap_mask; 2893 bcopy(ioc->ioc_id_string, gioc->ioc_id_string, 2894 IB_DM_IOC_ID_STRING_LEN); 2895 2896 ioc_info->ioc_iou_diagcode = gid_info->gl_iou->iou_diagcode; 2897 ioc_info->ioc_iou_dc_valid = gid_info->gl_iou->iou_dc_valid; 2898 ioc_info->ioc_diagdeviceid = (IB_DM_IOU_DEVICEID_MASK & 2899 gid_info->gl_iou->iou_info.iou_flag) ? B_TRUE : B_FALSE; 2900 2901 if (ioc_info->ioc_diagdeviceid == B_TRUE) { 2902 gid_info->gl_pending_cmds++; 2903 IBTF_DPRINTF_L3(ibdm_string, 2904 "\tibdm_handle_ioc_profile: " 2905 "%d: gid_info %p gl_state %d pending_cmds %d", 2906 __LINE__, gid_info, gid_info->gl_state, 2907 gid_info->gl_pending_cmds); 2908 } 2909 } 2910 gioc->ioc_service_entries = ioc->ioc_service_entries; 2911 mutex_exit(&gid_info->gl_mutex); 2912 2913 ibdm_dump_ioc_profile(gioc); 2914 2915 if ((ioc_info->ioc_diagdeviceid == B_TRUE) && (reprobe == 0)) { 2916 if (ibdm_get_diagcode(gid_info, ioc_no) != IBDM_SUCCESS) { 2917 mutex_enter(&gid_info->gl_mutex); 2918 gid_info->gl_pending_cmds--; 2919 mutex_exit(&gid_info->gl_mutex); 2920 } 2921 } 2922 ioc_info->ioc_serv = (ibdm_srvents_info_t *)kmem_zalloc( 2923 (gioc->ioc_service_entries * sizeof (ibdm_srvents_info_t)), 2924 KM_SLEEP); 2925 2926 /* 2927 * In one single request, maximum number of requests that can be 2928 * obtained is 4. If number of service entries are more than four, 2929 * calculate number requests needed and send them parallelly. 2930 */ 2931 nserv_entries = ioc->ioc_service_entries; 2932 ii = 0; 2933 while (nserv_entries) { 2934 mutex_enter(&gid_info->gl_mutex); 2935 gid_info->gl_pending_cmds++; 2936 ibdm_bump_transactionID(gid_info); 2937 mutex_exit(&gid_info->gl_mutex); 2938 2939 if (first != B_TRUE) { 2940 if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP, 2941 &msg) != IBMF_SUCCESS) { 2942 continue; 2943 } 2944 2945 } 2946 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 2947 ibdm_alloc_send_buffers(msg); 2948 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 2949 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2950 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2951 if (gid_info->gl_redirected == B_TRUE) { 2952 if (gid_info->gl_redirect_dlid != 0) { 2953 msg->im_local_addr.ia_remote_lid = 2954 gid_info->gl_redirect_dlid; 2955 } 2956 msg->im_local_addr.ia_remote_qno = 2957 gid_info->gl_redirect_QP; 2958 msg->im_local_addr.ia_p_key = 2959 gid_info->gl_redirect_pkey; 2960 msg->im_local_addr.ia_q_key = 2961 gid_info->gl_redirect_qkey; 2962 msg->im_local_addr.ia_service_level = 2963 gid_info->gl_redirectSL; 2964 } else { 2965 msg->im_local_addr.ia_remote_qno = 1; 2966 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2967 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2968 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 2969 } 2970 2971 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2972 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2973 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2974 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2975 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2976 hdr->Status = 0; 2977 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2978 hdr->AttributeID = h2b16(IB_DM_ATTR_SERVICE_ENTRIES); 2979 2980 srv_start = ii * 4; 2981 cb_args = &ioc_info->ioc_serv[srv_start].se_cb_args; 2982 cb_args->cb_gid_info = gid_info; 2983 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2984 cb_args->cb_req_type = IBDM_REQ_TYPE_SRVENTS; 2985 cb_args->cb_srvents_start = srv_start; 2986 cb_args->cb_ioc_num = ioc_no - 1; 2987 2988 if (nserv_entries >= IBDM_MAX_SERV_ENTRIES_PER_REQ) { 2989 nserv_entries -= IBDM_MAX_SERV_ENTRIES_PER_REQ; 2990 cb_args->cb_srvents_end = (cb_args->cb_srvents_start + 2991 IBDM_MAX_SERV_ENTRIES_PER_REQ - 1); 2992 } else { 2993 cb_args->cb_srvents_end = 2994 (cb_args->cb_srvents_start + nserv_entries - 1); 2995 nserv_entries = 0; 2996 } 2997 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr)) 2998 ibdm_fill_srv_attr_mod(hdr, cb_args); 2999 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr)) 3000 3001 mutex_enter(&gid_info->gl_mutex); 3002 ioc_info->ioc_serv[srv_start].se_timeout_id = timeout( 3003 ibdm_pkt_timeout_hdlr, cb_args, 3004 IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3005 mutex_exit(&gid_info->gl_mutex); 3006 3007 IBTF_DPRINTF_L5("ibdm", "\thandle_ioc_profile:" 3008 "timeout %x, ioc %d srv %d", 3009 ioc_info->ioc_serv[srv_start].se_timeout_id, 3010 ioc_no - 1, srv_start); 3011 3012 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, 3013 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 3014 IBTF_DPRINTF_L2("ibdm", 3015 "\thandle_ioc_profile: msg send failed"); 3016 ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args); 3017 } 3018 (*flag) |= IBDM_IBMF_PKT_REUSED; 3019 first = B_FALSE; 3020 ii++; 3021 } 3022 } 3023 3024 3025 /* 3026 * ibdm_handle_srventry_mad() 3027 */ 3028 static void 3029 ibdm_handle_srventry_mad(ibmf_msg_t *msg, 3030 ibdm_dp_gidinfo_t *gid_info, int *flag) 3031 { 3032 uint_t ii, ioc_no, attrmod; 3033 uint_t nentries, start, end; 3034 timeout_id_t timeout_id; 3035 ib_dm_srv_t *srv_ents; 3036 ibdm_ioc_info_t *ioc_info; 3037 ibdm_srvents_info_t *gsrv_ents; 3038 3039 IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad:" 3040 " IBMF msg %p gid info %p", msg, gid_info); 3041 3042 srv_ents = IBDM_IN_IBMFMSG2SRVENT(msg); 3043 /* 3044 * Get the start and end index of the service entries 3045 * Upper 16 bits identify the IOC 3046 * Lower 16 bits specify the range of service entries 3047 * LSB specifies (Big endian) end of the range 3048 * MSB specifies (Big endian) start of the range 3049 */ 3050 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg); 3051 ioc_no = ((attrmod >> 16) & IBDM_16_BIT_MASK); 3052 end = ((attrmod >> 8) & IBDM_8_BIT_MASK); 3053 start = (attrmod & IBDM_8_BIT_MASK); 3054 3055 /* Make sure that IOC index is with the valid range */ 3056 if ((ioc_no < 1) | 3057 (ioc_no > gid_info->gl_iou->iou_info.iou_num_ctrl_slots)) { 3058 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 3059 "IOC index Out of range, index %d", ioc_no); 3060 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3061 return; 3062 } 3063 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1)); 3064 3065 /* 3066 * Make sure that the "start" and "end" service indexes are 3067 * with in the valid range 3068 */ 3069 nentries = ioc_info->ioc_profile.ioc_service_entries; 3070 if ((start > end) | (start >= nentries) | (end >= nentries)) { 3071 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 3072 "Attr modifier 0x%x, #Serv entries %d", attrmod, nentries); 3073 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3074 return; 3075 } 3076 gsrv_ents = &ioc_info->ioc_serv[start]; 3077 mutex_enter(&gid_info->gl_mutex); 3078 if (gsrv_ents->se_state != IBDM_SE_INVALID) { 3079 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 3080 "already known, ioc %d, srv %d, se_state %x", 3081 ioc_no - 1, start, gsrv_ents->se_state); 3082 mutex_exit(&gid_info->gl_mutex); 3083 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 3084 return; 3085 } 3086 ioc_info->ioc_serv[start].se_cb_args.cb_req_type = 0; 3087 if (ioc_info->ioc_serv[start].se_timeout_id) { 3088 IBTF_DPRINTF_L2("ibdm", 3089 "\thandle_srventry_mad: ioc %d start %d", ioc_no, start); 3090 timeout_id = ioc_info->ioc_serv[start].se_timeout_id; 3091 ioc_info->ioc_serv[start].se_timeout_id = 0; 3092 mutex_exit(&gid_info->gl_mutex); 3093 IBTF_DPRINTF_L5("ibdm", "handle_srverntry_mad: " 3094 "se_timeout_id = 0x%x", timeout_id); 3095 if (untimeout(timeout_id) == -1) { 3096 IBTF_DPRINTF_L2("ibdm", "handle_srventry_mad: " 3097 "untimeout se_timeout_id failed"); 3098 } 3099 mutex_enter(&gid_info->gl_mutex); 3100 } 3101 3102 gsrv_ents->se_state = IBDM_SE_VALID; 3103 mutex_exit(&gid_info->gl_mutex); 3104 for (ii = start; ii <= end; ii++, srv_ents++, gsrv_ents++) { 3105 gsrv_ents->se_attr.srv_id = b2h64(srv_ents->srv_id); 3106 bcopy(srv_ents->srv_name, 3107 gsrv_ents->se_attr.srv_name, IB_DM_MAX_SVC_NAME_LEN); 3108 ibdm_dump_service_entries(&gsrv_ents->se_attr); 3109 } 3110 } 3111 3112 3113 /* 3114 * ibdm_get_diagcode: 3115 * Send request to get IOU/IOC diag code 3116 * Returns IBDM_SUCCESS/IBDM_FAILURE 3117 */ 3118 static int 3119 ibdm_get_diagcode(ibdm_dp_gidinfo_t *gid_info, int attr) 3120 { 3121 ibmf_msg_t *msg; 3122 ib_mad_hdr_t *hdr; 3123 ibdm_ioc_info_t *ioc; 3124 ibdm_timeout_cb_args_t *cb_args; 3125 timeout_id_t *timeout_id; 3126 3127 IBTF_DPRINTF_L4("ibdm", "\tget_diagcode: gid info %p, attr = %d", 3128 gid_info, attr); 3129 3130 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 3131 &msg) != IBMF_SUCCESS) { 3132 IBTF_DPRINTF_L4("ibdm", "\tget_diagcode: pkt alloc fail"); 3133 return (IBDM_FAILURE); 3134 } 3135 3136 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 3137 ibdm_alloc_send_buffers(msg); 3138 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 3139 3140 mutex_enter(&gid_info->gl_mutex); 3141 ibdm_bump_transactionID(gid_info); 3142 mutex_exit(&gid_info->gl_mutex); 3143 3144 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 3145 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 3146 if (gid_info->gl_redirected == B_TRUE) { 3147 if (gid_info->gl_redirect_dlid != 0) { 3148 msg->im_local_addr.ia_remote_lid = 3149 gid_info->gl_redirect_dlid; 3150 } 3151 3152 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 3153 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 3154 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 3155 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 3156 } else { 3157 msg->im_local_addr.ia_remote_qno = 1; 3158 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 3159 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 3160 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 3161 } 3162 3163 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 3164 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 3165 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 3166 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 3167 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 3168 hdr->Status = 0; 3169 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 3170 3171 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 3172 hdr->AttributeModifier = h2b32(attr); 3173 3174 if (attr == 0) { 3175 cb_args = &gid_info->gl_iou_cb_args; 3176 gid_info->gl_iou->iou_dc_valid = B_FALSE; 3177 cb_args->cb_ioc_num = 0; 3178 cb_args->cb_req_type = IBDM_REQ_TYPE_IOU_DIAGCODE; 3179 timeout_id = &gid_info->gl_timeout_id; 3180 } else { 3181 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attr - 1)); 3182 ioc->ioc_dc_valid = B_FALSE; 3183 cb_args = &ioc->ioc_dc_cb_args; 3184 cb_args->cb_ioc_num = attr - 1; 3185 cb_args->cb_req_type = IBDM_REQ_TYPE_IOC_DIAGCODE; 3186 timeout_id = &ioc->ioc_dc_timeout_id; 3187 } 3188 cb_args->cb_gid_info = gid_info; 3189 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 3190 cb_args->cb_srvents_start = 0; 3191 3192 mutex_enter(&gid_info->gl_mutex); 3193 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 3194 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3195 mutex_exit(&gid_info->gl_mutex); 3196 3197 IBTF_DPRINTF_L5("ibdm", "\tget_diagcode:" 3198 "timeout %x, ioc %d", *timeout_id, cb_args->cb_ioc_num); 3199 3200 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 3201 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 3202 IBTF_DPRINTF_L2("ibdm", "\tget_diagcode: ibmf send failed"); 3203 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 3204 } 3205 return (IBDM_SUCCESS); 3206 } 3207 3208 /* 3209 * ibdm_handle_diagcode: 3210 * Process the DiagCode MAD response and update local DM 3211 * data structure. 3212 */ 3213 static void 3214 ibdm_handle_diagcode(ibmf_msg_t *ibmf_msg, 3215 ibdm_dp_gidinfo_t *gid_info, int *flag) 3216 { 3217 uint16_t attrmod, *diagcode; 3218 ibdm_iou_info_t *iou; 3219 ibdm_ioc_info_t *ioc; 3220 timeout_id_t timeout_id; 3221 ibdm_timeout_cb_args_t *cb_args; 3222 3223 diagcode = (uint16_t *)ibmf_msg->im_msgbufs_recv.im_bufs_cl_data; 3224 3225 mutex_enter(&gid_info->gl_mutex); 3226 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(ibmf_msg); 3227 iou = gid_info->gl_iou; 3228 if (attrmod == 0) { 3229 if (iou->iou_dc_valid != B_FALSE) { 3230 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 3231 IBTF_DPRINTF_L4("ibdm", 3232 "\thandle_diagcode: Duplicate IOU DiagCode"); 3233 mutex_exit(&gid_info->gl_mutex); 3234 return; 3235 } 3236 cb_args = &gid_info->gl_iou_cb_args; 3237 cb_args->cb_req_type = 0; 3238 iou->iou_diagcode = b2h16(*diagcode); 3239 iou->iou_dc_valid = B_TRUE; 3240 if (gid_info->gl_timeout_id) { 3241 timeout_id = gid_info->gl_timeout_id; 3242 mutex_exit(&gid_info->gl_mutex); 3243 IBTF_DPRINTF_L5("ibdm", "\thandle_diagcode: " 3244 "gl_timeout_id = 0x%x", timeout_id); 3245 if (untimeout(timeout_id) == -1) { 3246 IBTF_DPRINTF_L2("ibdm", "handle_diagcode: " 3247 "untimeout gl_timeout_id failed"); 3248 } 3249 mutex_enter(&gid_info->gl_mutex); 3250 gid_info->gl_timeout_id = 0; 3251 } 3252 } else { 3253 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod - 1)); 3254 if (ioc->ioc_dc_valid != B_FALSE) { 3255 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 3256 IBTF_DPRINTF_L4("ibdm", 3257 "\thandle_diagcode: Duplicate IOC DiagCode"); 3258 mutex_exit(&gid_info->gl_mutex); 3259 return; 3260 } 3261 cb_args = &ioc->ioc_dc_cb_args; 3262 cb_args->cb_req_type = 0; 3263 ioc->ioc_diagcode = b2h16(*diagcode); 3264 ioc->ioc_dc_valid = B_TRUE; 3265 timeout_id = iou->iou_ioc_info[attrmod - 1].ioc_dc_timeout_id; 3266 if (timeout_id) { 3267 iou->iou_ioc_info[attrmod - 1].ioc_dc_timeout_id = 0; 3268 mutex_exit(&gid_info->gl_mutex); 3269 IBTF_DPRINTF_L5("ibdm", "handle_diagcode: " 3270 "timeout_id = 0x%x", timeout_id); 3271 if (untimeout(timeout_id) == -1) { 3272 IBTF_DPRINTF_L2("ibdm", "\thandle_diagcode: " 3273 "untimeout ioc_dc_timeout_id failed"); 3274 } 3275 mutex_enter(&gid_info->gl_mutex); 3276 } 3277 } 3278 mutex_exit(&gid_info->gl_mutex); 3279 3280 IBTF_DPRINTF_L4("ibdm", "\thandle_diagcode: DiagCode : 0x%x" 3281 "attrmod : 0x%x", b2h16(*diagcode), attrmod); 3282 } 3283 3284 3285 /* 3286 * ibdm_is_ioc_present() 3287 * Return ibdm_ioc_info_t if IOC guid is found in the global gid list 3288 */ 3289 static ibdm_ioc_info_t * 3290 ibdm_is_ioc_present(ib_guid_t ioc_guid, 3291 ibdm_dp_gidinfo_t *gid_info, int *flag) 3292 { 3293 int ii; 3294 ibdm_ioc_info_t *ioc; 3295 ibdm_dp_gidinfo_t *head; 3296 ib_dm_io_unitinfo_t *iou; 3297 3298 mutex_enter(&ibdm.ibdm_mutex); 3299 head = ibdm.ibdm_dp_gidlist_head; 3300 while (head) { 3301 mutex_enter(&head->gl_mutex); 3302 if (head->gl_iou == NULL) { 3303 mutex_exit(&head->gl_mutex); 3304 head = head->gl_next; 3305 continue; 3306 } 3307 iou = &head->gl_iou->iou_info; 3308 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 3309 ioc = IBDM_GIDINFO2IOCINFO(head, ii); 3310 if ((ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) && 3311 (ioc->ioc_profile.ioc_guid == ioc_guid)) { 3312 if (gid_info == head) { 3313 *flag |= IBDM_IBMF_PKT_DUP_RESP; 3314 } else if (ibdm_check_dgid(head->gl_dgid_lo, 3315 head->gl_dgid_hi) != NULL) { 3316 IBTF_DPRINTF_L4("ibdm", "\tis_ioc_" 3317 "present: gid not present"); 3318 ibdm_add_to_gl_gid(gid_info, head); 3319 } 3320 mutex_exit(&head->gl_mutex); 3321 mutex_exit(&ibdm.ibdm_mutex); 3322 return (ioc); 3323 } 3324 } 3325 mutex_exit(&head->gl_mutex); 3326 head = head->gl_next; 3327 } 3328 mutex_exit(&ibdm.ibdm_mutex); 3329 return (NULL); 3330 } 3331 3332 3333 /* 3334 * ibdm_ibmf_send_cb() 3335 * IBMF invokes this callback routine after posting the DM MAD to 3336 * the HCA. 3337 */ 3338 /*ARGSUSED*/ 3339 static void 3340 ibdm_ibmf_send_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *ibmf_msg, void *arg) 3341 { 3342 ibdm_dump_ibmf_msg(ibmf_msg, 1); 3343 ibdm_free_send_buffers(ibmf_msg); 3344 if (ibmf_free_msg(ibmf_hdl, &ibmf_msg) != IBMF_SUCCESS) { 3345 IBTF_DPRINTF_L4("ibdm", 3346 "\tibmf_send_cb: IBMF free msg failed"); 3347 } 3348 } 3349 3350 3351 /* 3352 * ibdm_ibmf_recv_cb() 3353 * Invoked by the IBMF when a response to the one of the DM requests 3354 * is received. 3355 */ 3356 /*ARGSUSED*/ 3357 static void 3358 ibdm_ibmf_recv_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msg, void *arg) 3359 { 3360 ibdm_taskq_args_t *taskq_args; 3361 3362 /* 3363 * If the taskq enable is set then dispatch a taskq to process 3364 * the MAD, otherwise just process it on this thread 3365 */ 3366 if (ibdm_taskq_enable != IBDM_ENABLE_TASKQ_HANDLING) { 3367 ibdm_process_incoming_mad(ibmf_hdl, msg, arg); 3368 return; 3369 } 3370 3371 /* 3372 * create a taskq and dispatch it to process the incoming MAD 3373 */ 3374 taskq_args = kmem_alloc(sizeof (ibdm_taskq_args_t), KM_NOSLEEP); 3375 if (taskq_args == NULL) { 3376 IBTF_DPRINTF_L2("ibdm", "ibmf_recv_cb: kmem_alloc failed for" 3377 "taskq_args"); 3378 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3379 IBTF_DPRINTF_L4("ibmf_recv_cb", 3380 "\tibmf_recv_cb: IBMF free msg failed"); 3381 } 3382 return; 3383 } 3384 taskq_args->tq_ibmf_handle = ibmf_hdl; 3385 taskq_args->tq_ibmf_msg = msg; 3386 taskq_args->tq_args = arg; 3387 3388 if (taskq_dispatch(system_taskq, ibdm_recv_incoming_mad, taskq_args, 3389 TQ_NOSLEEP) == 0) { 3390 IBTF_DPRINTF_L2("ibdm", "ibmf_recv_cb: taskq_dispatch failed"); 3391 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3392 IBTF_DPRINTF_L4("ibmf_recv_cb", 3393 "\tibmf_recv_cb: IBMF free msg failed"); 3394 } 3395 kmem_free(taskq_args, sizeof (ibdm_taskq_args_t)); 3396 return; 3397 } 3398 3399 /* taskq_args are deleted in ibdm_recv_incoming_mad() */ 3400 } 3401 3402 3403 void 3404 ibdm_recv_incoming_mad(void *args) 3405 { 3406 ibdm_taskq_args_t *taskq_args; 3407 3408 taskq_args = (ibdm_taskq_args_t *)args; 3409 3410 IBTF_DPRINTF_L4("ibdm", "\tibdm_recv_incoming_mad: " 3411 "Processing incoming MAD via taskq"); 3412 3413 ibdm_process_incoming_mad(taskq_args->tq_ibmf_handle, 3414 taskq_args->tq_ibmf_msg, taskq_args->tq_args); 3415 3416 kmem_free(taskq_args, sizeof (ibdm_taskq_args_t)); 3417 } 3418 3419 3420 /* 3421 * Calls ibdm_process_incoming_mad with all function arguments extracted 3422 * from args 3423 */ 3424 /*ARGSUSED*/ 3425 static void 3426 ibdm_process_incoming_mad(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msg, void *arg) 3427 { 3428 int flag = 0; 3429 int ret; 3430 uint64_t transaction_id; 3431 ib_mad_hdr_t *hdr; 3432 ibdm_dp_gidinfo_t *gid_info = NULL; 3433 3434 IBTF_DPRINTF_L4("ibdm", 3435 "\tprocess_incoming_mad: ibmf hdl %p pkt %p", ibmf_hdl, msg); 3436 ibdm_dump_ibmf_msg(msg, 0); 3437 3438 /* 3439 * IBMF calls this routine for every DM MAD that arrives at this port. 3440 * But we handle only the responses for requests we sent. We drop all 3441 * the DM packets that does not have response bit set in the MAD 3442 * header(this eliminates all the requests sent to this port). 3443 * We handle only DM class version 1 MAD's 3444 */ 3445 hdr = IBDM_IN_IBMFMSG_MADHDR(msg); 3446 if (ibdm_verify_mad_status(hdr) != IBDM_SUCCESS) { 3447 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3448 IBTF_DPRINTF_L2("ibdm", "\tprocess_incoming_mad: " 3449 "IBMF free msg failed DM request drop it"); 3450 } 3451 return; 3452 } 3453 3454 transaction_id = b2h64(hdr->TransactionID); 3455 3456 mutex_enter(&ibdm.ibdm_mutex); 3457 gid_info = ibdm.ibdm_dp_gidlist_head; 3458 while (gid_info) { 3459 if ((gid_info->gl_transactionID & 3460 IBDM_GID_TRANSACTIONID_MASK) == 3461 (transaction_id & IBDM_GID_TRANSACTIONID_MASK)) 3462 break; 3463 gid_info = gid_info->gl_next; 3464 } 3465 mutex_exit(&ibdm.ibdm_mutex); 3466 3467 if (gid_info == NULL) { 3468 /* Drop the packet */ 3469 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: transaction ID" 3470 " does not match: 0x%llx", transaction_id); 3471 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3472 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3473 "IBMF free msg failed DM request drop it"); 3474 } 3475 return; 3476 } 3477 3478 /* Handle redirection for all the MAD's, except ClassPortInfo */ 3479 if (((IBDM_IN_IBMFMSG_STATUS(msg) & MAD_STATUS_REDIRECT_REQUIRED)) && 3480 (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO)) { 3481 ret = ibdm_handle_redirection(msg, gid_info, &flag); 3482 if (ret == IBDM_SUCCESS) { 3483 return; 3484 } 3485 } else { 3486 uint_t gl_state; 3487 3488 mutex_enter(&gid_info->gl_mutex); 3489 gl_state = gid_info->gl_state; 3490 mutex_exit(&gid_info->gl_mutex); 3491 3492 switch (gl_state) { 3493 3494 case IBDM_SET_CLASSPORTINFO: 3495 ibdm_handle_setclassportinfo( 3496 ibmf_hdl, msg, gid_info, &flag); 3497 break; 3498 3499 case IBDM_GET_CLASSPORTINFO: 3500 ibdm_handle_classportinfo( 3501 ibmf_hdl, msg, gid_info, &flag); 3502 break; 3503 3504 case IBDM_GET_IOUNITINFO: 3505 ibdm_handle_iounitinfo(ibmf_hdl, msg, gid_info, &flag); 3506 break; 3507 3508 case IBDM_GET_IOC_DETAILS: 3509 switch (IBDM_IN_IBMFMSG_ATTR(msg)) { 3510 3511 case IB_DM_ATTR_SERVICE_ENTRIES: 3512 ibdm_handle_srventry_mad(msg, gid_info, &flag); 3513 break; 3514 3515 case IB_DM_ATTR_IOC_CTRL_PROFILE: 3516 ibdm_handle_ioc_profile( 3517 ibmf_hdl, msg, gid_info, &flag); 3518 break; 3519 3520 case IB_DM_ATTR_DIAG_CODE: 3521 ibdm_handle_diagcode(msg, gid_info, &flag); 3522 break; 3523 3524 default: 3525 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3526 "Error state, wrong attribute :-("); 3527 (void) ibmf_free_msg(ibmf_hdl, &msg); 3528 return; 3529 } 3530 break; 3531 default: 3532 IBTF_DPRINTF_L2("ibdm", 3533 "process_incoming_mad: Dropping the packet" 3534 " gl_state %x", gl_state); 3535 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3536 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3537 "IBMF free msg failed DM request drop it"); 3538 } 3539 return; 3540 } 3541 } 3542 3543 if ((flag & IBDM_IBMF_PKT_DUP_RESP) || 3544 (flag & IBDM_IBMF_PKT_UNEXP_RESP)) { 3545 IBTF_DPRINTF_L2("ibdm", 3546 "\tprocess_incoming_mad:Dup/unexp resp : 0x%x", flag); 3547 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3548 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 3549 "IBMF free msg failed DM request drop it"); 3550 } 3551 return; 3552 } 3553 3554 mutex_enter(&gid_info->gl_mutex); 3555 if (gid_info->gl_pending_cmds < 1) { 3556 IBTF_DPRINTF_L2("ibdm", 3557 "\tprocess_incoming_mad: pending commands negative"); 3558 } 3559 if (--gid_info->gl_pending_cmds) { 3560 IBTF_DPRINTF_L4("ibdm", "\tprocess_incoming_mad: " 3561 "gid_info %p pending cmds %d", 3562 gid_info, gid_info->gl_pending_cmds); 3563 mutex_exit(&gid_info->gl_mutex); 3564 } else { 3565 uint_t prev_state; 3566 IBTF_DPRINTF_L4("ibdm", "\tprocess_incoming_mad: Probing DONE"); 3567 prev_state = gid_info->gl_state; 3568 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE; 3569 if (prev_state == IBDM_SET_CLASSPORTINFO) { 3570 IBTF_DPRINTF_L4("ibdm", 3571 "\tprocess_incoming_mad: " 3572 "Setclassportinfo for Cisco FC GW is done."); 3573 gid_info->gl_flag &= ~IBDM_CISCO_PROBE; 3574 gid_info->gl_flag |= IBDM_CISCO_PROBE_DONE; 3575 mutex_exit(&gid_info->gl_mutex); 3576 cv_broadcast(&gid_info->gl_probe_cv); 3577 } else { 3578 mutex_exit(&gid_info->gl_mutex); 3579 ibdm_notify_newgid_iocs(gid_info); 3580 mutex_enter(&ibdm.ibdm_mutex); 3581 if (--ibdm.ibdm_ngid_probes_in_progress == 0) { 3582 IBTF_DPRINTF_L4("ibdm", 3583 "\tprocess_incoming_mad: Wakeup"); 3584 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 3585 cv_broadcast(&ibdm.ibdm_probe_cv); 3586 } 3587 mutex_exit(&ibdm.ibdm_mutex); 3588 } 3589 } 3590 3591 /* 3592 * Do not deallocate the IBMF packet if atleast one request 3593 * is posted. IBMF packet is reused. 3594 */ 3595 if (!(flag & IBDM_IBMF_PKT_REUSED)) { 3596 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3597 IBTF_DPRINTF_L2("ibdm", "\tprocess_incoming_mad: " 3598 "IBMF free msg failed DM request drop it"); 3599 } 3600 } 3601 } 3602 3603 3604 /* 3605 * ibdm_verify_mad_status() 3606 * Verifies the MAD status 3607 * Returns IBDM_SUCCESS if status is correct 3608 * Returns IBDM_FAILURE for bogus MAD status 3609 */ 3610 static int 3611 ibdm_verify_mad_status(ib_mad_hdr_t *hdr) 3612 { 3613 int ret = 0; 3614 3615 if ((hdr->R_Method != IB_DM_DEVMGT_METHOD_GET_RESP) || 3616 (hdr->ClassVersion != IB_DM_CLASS_VERSION_1)) { 3617 return (IBDM_FAILURE); 3618 } 3619 3620 if (b2h16(hdr->Status) == 0) 3621 ret = IBDM_SUCCESS; 3622 else if ((b2h16(hdr->Status) & 0x1f) == MAD_STATUS_REDIRECT_REQUIRED) 3623 ret = IBDM_SUCCESS; 3624 else { 3625 IBTF_DPRINTF_L2("ibdm", 3626 "\tverify_mad_status: Status : 0x%x", b2h16(hdr->Status)); 3627 ret = IBDM_FAILURE; 3628 } 3629 return (ret); 3630 } 3631 3632 3633 3634 /* 3635 * ibdm_handle_redirection() 3636 * Returns IBDM_SUCCESS/IBDM_FAILURE 3637 */ 3638 static int 3639 ibdm_handle_redirection(ibmf_msg_t *msg, 3640 ibdm_dp_gidinfo_t *gid_info, int *flag) 3641 { 3642 int attrmod, ioc_no, start; 3643 void *data; 3644 timeout_id_t *timeout_id; 3645 ib_mad_hdr_t *hdr; 3646 ibdm_ioc_info_t *ioc = NULL; 3647 ibdm_timeout_cb_args_t *cb_args; 3648 ib_mad_classportinfo_t *cpi; 3649 3650 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection: Enter"); 3651 mutex_enter(&gid_info->gl_mutex); 3652 switch (gid_info->gl_state) { 3653 case IBDM_GET_IOUNITINFO: 3654 cb_args = &gid_info->gl_iou_cb_args; 3655 timeout_id = &gid_info->gl_timeout_id; 3656 break; 3657 3658 case IBDM_GET_IOC_DETAILS: 3659 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg); 3660 switch (IBDM_IN_IBMFMSG_ATTR(msg)) { 3661 3662 case IB_DM_ATTR_DIAG_CODE: 3663 if (attrmod == 0) { 3664 cb_args = &gid_info->gl_iou_cb_args; 3665 timeout_id = &gid_info->gl_timeout_id; 3666 break; 3667 } 3668 if (IBDM_IS_IOC_NUM_INVALID(attrmod, gid_info)) { 3669 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3670 "IOC# Out of range %d", attrmod); 3671 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3672 mutex_exit(&gid_info->gl_mutex); 3673 return (IBDM_FAILURE); 3674 } 3675 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod -1)); 3676 cb_args = &ioc->ioc_dc_cb_args; 3677 timeout_id = &ioc->ioc_dc_timeout_id; 3678 break; 3679 3680 case IB_DM_ATTR_IOC_CTRL_PROFILE: 3681 if (IBDM_IS_IOC_NUM_INVALID(attrmod, gid_info)) { 3682 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3683 "IOC# Out of range %d", attrmod); 3684 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3685 mutex_exit(&gid_info->gl_mutex); 3686 return (IBDM_FAILURE); 3687 } 3688 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod -1)); 3689 cb_args = &ioc->ioc_cb_args; 3690 timeout_id = &ioc->ioc_timeout_id; 3691 break; 3692 3693 case IB_DM_ATTR_SERVICE_ENTRIES: 3694 ioc_no = ((attrmod >> 16) & IBDM_16_BIT_MASK); 3695 if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) { 3696 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3697 "IOC# Out of range %d", ioc_no); 3698 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3699 mutex_exit(&gid_info->gl_mutex); 3700 return (IBDM_FAILURE); 3701 } 3702 start = (attrmod & IBDM_8_BIT_MASK); 3703 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1)); 3704 if (start > ioc->ioc_profile.ioc_service_entries) { 3705 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3706 " SE index Out of range %d", start); 3707 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3708 mutex_exit(&gid_info->gl_mutex); 3709 return (IBDM_FAILURE); 3710 } 3711 cb_args = &ioc->ioc_serv[start].se_cb_args; 3712 timeout_id = &ioc->ioc_serv[start].se_timeout_id; 3713 break; 3714 3715 default: 3716 /* ERROR State */ 3717 IBTF_DPRINTF_L2("ibdm", 3718 "\thandle_redirection: wrong attribute :-("); 3719 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3720 mutex_exit(&gid_info->gl_mutex); 3721 return (IBDM_FAILURE); 3722 } 3723 break; 3724 default: 3725 /* ERROR State */ 3726 IBTF_DPRINTF_L2("ibdm", 3727 "\thandle_redirection: Error state :-("); 3728 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3729 mutex_exit(&gid_info->gl_mutex); 3730 return (IBDM_FAILURE); 3731 } 3732 if ((*timeout_id) != 0) { 3733 mutex_exit(&gid_info->gl_mutex); 3734 if (untimeout(*timeout_id) == -1) { 3735 IBTF_DPRINTF_L2("ibdm", "\thandle_redirection: " 3736 "untimeout failed %x", *timeout_id); 3737 } else { 3738 IBTF_DPRINTF_L5("ibdm", 3739 "\thandle_redirection: timeout %x", *timeout_id); 3740 } 3741 mutex_enter(&gid_info->gl_mutex); 3742 *timeout_id = 0; 3743 } 3744 3745 data = msg->im_msgbufs_recv.im_bufs_cl_data; 3746 cpi = (ib_mad_classportinfo_t *)data; 3747 3748 gid_info->gl_resp_timeout = 3749 (b2h32(cpi->RespTimeValue) & 0x1F); 3750 3751 gid_info->gl_redirected = B_TRUE; 3752 gid_info->gl_redirect_dlid = b2h16(cpi->RedirectLID); 3753 gid_info->gl_redirect_QP = (b2h32(cpi->RedirectQP) & 0xffffff); 3754 gid_info->gl_redirect_pkey = b2h16(cpi->RedirectP_Key); 3755 gid_info->gl_redirect_qkey = b2h32(cpi->RedirectQ_Key); 3756 gid_info->gl_redirectGID_hi = b2h64(cpi->RedirectGID_hi); 3757 gid_info->gl_redirectGID_lo = b2h64(cpi->RedirectGID_lo); 3758 gid_info->gl_redirectSL = cpi->RedirectSL; 3759 3760 if (gid_info->gl_redirect_dlid != 0) { 3761 msg->im_local_addr.ia_remote_lid = 3762 gid_info->gl_redirect_dlid; 3763 } 3764 ibdm_bump_transactionID(gid_info); 3765 mutex_exit(&gid_info->gl_mutex); 3766 3767 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg, *hdr)) 3768 ibdm_alloc_send_buffers(msg); 3769 3770 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 3771 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 3772 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 3773 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 3774 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 3775 hdr->Status = 0; 3776 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 3777 hdr->AttributeID = 3778 msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeID; 3779 hdr->AttributeModifier = 3780 msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeModifier; 3781 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg, *hdr)) 3782 3783 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 3784 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 3785 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 3786 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 3787 3788 mutex_enter(&gid_info->gl_mutex); 3789 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 3790 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3791 mutex_exit(&gid_info->gl_mutex); 3792 3793 IBTF_DPRINTF_L5("ibdm", "\thandle_redirect:" 3794 "timeout %x", *timeout_id); 3795 3796 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 3797 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 3798 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection:" 3799 "message transport failed"); 3800 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 3801 } 3802 (*flag) |= IBDM_IBMF_PKT_REUSED; 3803 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection: Exit"); 3804 return (IBDM_SUCCESS); 3805 } 3806 3807 3808 /* 3809 * ibdm_pkt_timeout_hdlr 3810 * This timeout handler is registed for every IBMF packet that is 3811 * sent through the IBMF. It gets called when no response is received 3812 * within the specified time for the packet. No retries for the failed 3813 * commands currently. Drops the failed IBMF packet and update the 3814 * pending list commands. 3815 */ 3816 static void 3817 ibdm_pkt_timeout_hdlr(void *arg) 3818 { 3819 ibdm_iou_info_t *iou; 3820 ibdm_ioc_info_t *ioc; 3821 ibdm_timeout_cb_args_t *cb_args = arg; 3822 ibdm_dp_gidinfo_t *gid_info; 3823 int srv_ent; 3824 uint_t new_gl_state; 3825 3826 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: gid_info: %p " 3827 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3828 cb_args->cb_req_type, cb_args->cb_ioc_num, 3829 cb_args->cb_srvents_start); 3830 3831 gid_info = cb_args->cb_gid_info; 3832 mutex_enter(&gid_info->gl_mutex); 3833 3834 if ((gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) || 3835 (cb_args->cb_req_type == 0)) { 3836 3837 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: req completed" 3838 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_req_type, 3839 cb_args->cb_ioc_num, cb_args->cb_srvents_start); 3840 3841 if (gid_info->gl_timeout_id) 3842 gid_info->gl_timeout_id = 0; 3843 mutex_exit(&gid_info->gl_mutex); 3844 return; 3845 } 3846 if (cb_args->cb_retry_count) { 3847 cb_args->cb_retry_count--; 3848 /* 3849 * A new timeout_id is set inside ibdm_retry_command(). 3850 * When the function returns an error, the timeout_id 3851 * is reset (to zero) in the switch statement below. 3852 */ 3853 if (ibdm_retry_command(cb_args) == IBDM_SUCCESS) { 3854 mutex_exit(&gid_info->gl_mutex); 3855 return; 3856 } 3857 cb_args->cb_retry_count = 0; 3858 } 3859 3860 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: command failed: gid %p" 3861 " rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3862 cb_args->cb_req_type, cb_args->cb_ioc_num, 3863 cb_args->cb_srvents_start); 3864 3865 switch (cb_args->cb_req_type) { 3866 3867 case IBDM_REQ_TYPE_CLASSPORTINFO: 3868 case IBDM_REQ_TYPE_IOUINFO: 3869 new_gl_state = IBDM_GID_PROBING_FAILED; 3870 if (gid_info->gl_timeout_id) 3871 gid_info->gl_timeout_id = 0; 3872 break; 3873 3874 case IBDM_REQ_TYPE_IOCINFO: 3875 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3876 iou = gid_info->gl_iou; 3877 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3878 ioc->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 3879 if (ioc->ioc_timeout_id) 3880 ioc->ioc_timeout_id = 0; 3881 break; 3882 3883 case IBDM_REQ_TYPE_SRVENTS: 3884 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3885 iou = gid_info->gl_iou; 3886 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3887 ioc->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 3888 srv_ent = cb_args->cb_srvents_start; 3889 if (ioc->ioc_serv[srv_ent].se_timeout_id) 3890 ioc->ioc_serv[srv_ent].se_timeout_id = 0; 3891 break; 3892 3893 case IBDM_REQ_TYPE_IOU_DIAGCODE: 3894 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3895 iou = gid_info->gl_iou; 3896 iou->iou_dc_valid = B_FALSE; 3897 if (gid_info->gl_timeout_id) 3898 gid_info->gl_timeout_id = 0; 3899 break; 3900 3901 case IBDM_REQ_TYPE_IOC_DIAGCODE: 3902 new_gl_state = IBDM_GID_PROBING_COMPLETE; 3903 iou = gid_info->gl_iou; 3904 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3905 ioc->ioc_dc_valid = B_FALSE; 3906 if (ioc->ioc_dc_timeout_id) 3907 ioc->ioc_dc_timeout_id = 0; 3908 break; 3909 3910 default: /* ERROR State */ 3911 new_gl_state = IBDM_GID_PROBING_FAILED; 3912 if (gid_info->gl_timeout_id) 3913 gid_info->gl_timeout_id = 0; 3914 IBTF_DPRINTF_L2("ibdm", 3915 "\tpkt_timeout_hdlr: wrong request type."); 3916 break; 3917 } 3918 3919 --gid_info->gl_pending_cmds; /* decrease the counter */ 3920 3921 if (gid_info->gl_pending_cmds == 0) { 3922 gid_info->gl_state = new_gl_state; 3923 mutex_exit(&gid_info->gl_mutex); 3924 /* 3925 * Delete this gid_info if the gid probe fails. 3926 */ 3927 if (new_gl_state == IBDM_GID_PROBING_FAILED) { 3928 ibdm_delete_glhca_list(gid_info); 3929 } 3930 ibdm_notify_newgid_iocs(gid_info); 3931 mutex_enter(&ibdm.ibdm_mutex); 3932 if (--ibdm.ibdm_ngid_probes_in_progress == 0) { 3933 IBTF_DPRINTF_L4("ibdm", "\tpkt_timeout_hdlr: Wakeup"); 3934 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 3935 cv_broadcast(&ibdm.ibdm_probe_cv); 3936 } 3937 mutex_exit(&ibdm.ibdm_mutex); 3938 } else { 3939 /* 3940 * Reset gl_pending_cmd if the extra timeout happens since 3941 * gl_pending_cmd becomes negative as a result. 3942 */ 3943 if (gid_info->gl_pending_cmds < 0) { 3944 gid_info->gl_pending_cmds = 0; 3945 IBTF_DPRINTF_L2("ibdm", 3946 "\tpkt_timeout_hdlr: extra timeout request." 3947 " reset gl_pending_cmds"); 3948 } 3949 mutex_exit(&gid_info->gl_mutex); 3950 /* 3951 * Delete this gid_info if the gid probe fails. 3952 */ 3953 if (new_gl_state == IBDM_GID_PROBING_FAILED) { 3954 ibdm_delete_glhca_list(gid_info); 3955 } 3956 } 3957 } 3958 3959 3960 /* 3961 * ibdm_retry_command() 3962 * Retries the failed command. 3963 * Returns IBDM_FAILURE/IBDM_SUCCESS 3964 */ 3965 static int 3966 ibdm_retry_command(ibdm_timeout_cb_args_t *cb_args) 3967 { 3968 int ret; 3969 ibmf_msg_t *msg; 3970 ib_mad_hdr_t *hdr; 3971 ibdm_dp_gidinfo_t *gid_info = cb_args->cb_gid_info; 3972 timeout_id_t *timeout_id; 3973 ibdm_ioc_info_t *ioc; 3974 int ioc_no; 3975 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 3976 3977 IBTF_DPRINTF_L2("ibdm", "\tretry_command: gid_info: %p " 3978 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3979 cb_args->cb_req_type, cb_args->cb_ioc_num, 3980 cb_args->cb_srvents_start); 3981 3982 ret = ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_NOSLEEP, &msg); 3983 3984 3985 /* 3986 * Reset the gid if alloc_msg failed with BAD_HANDLE 3987 * ibdm_reset_gidinfo reinits the gid_info 3988 */ 3989 if (ret == IBMF_BAD_HANDLE) { 3990 IBTF_DPRINTF_L3(ibdm_string, "\tretry_command: gid %p hdl bad", 3991 gid_info); 3992 3993 mutex_exit(&gid_info->gl_mutex); 3994 ibdm_reset_gidinfo(gid_info); 3995 mutex_enter(&gid_info->gl_mutex); 3996 3997 /* Retry alloc */ 3998 ret = ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_NOSLEEP, 3999 &msg); 4000 } 4001 4002 if (ret != IBDM_SUCCESS) { 4003 IBTF_DPRINTF_L2("ibdm", "\tretry_command: alloc failed: %p " 4004 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 4005 cb_args->cb_req_type, cb_args->cb_ioc_num, 4006 cb_args->cb_srvents_start); 4007 return (IBDM_FAILURE); 4008 } 4009 4010 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 4011 ibdm_alloc_send_buffers(msg); 4012 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 4013 4014 ibdm_bump_transactionID(gid_info); 4015 4016 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 4017 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 4018 if (gid_info->gl_redirected == B_TRUE) { 4019 if (gid_info->gl_redirect_dlid != 0) { 4020 msg->im_local_addr.ia_remote_lid = 4021 gid_info->gl_redirect_dlid; 4022 } 4023 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 4024 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 4025 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 4026 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 4027 } else { 4028 msg->im_local_addr.ia_remote_qno = 1; 4029 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 4030 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 4031 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 4032 } 4033 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 4034 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr)) 4035 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 4036 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 4037 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 4038 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 4039 hdr->Status = 0; 4040 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 4041 4042 switch (cb_args->cb_req_type) { 4043 case IBDM_REQ_TYPE_CLASSPORTINFO: 4044 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 4045 hdr->AttributeModifier = 0; 4046 timeout_id = &gid_info->gl_timeout_id; 4047 break; 4048 case IBDM_REQ_TYPE_IOUINFO: 4049 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 4050 hdr->AttributeModifier = 0; 4051 timeout_id = &gid_info->gl_timeout_id; 4052 break; 4053 case IBDM_REQ_TYPE_IOCINFO: 4054 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 4055 hdr->AttributeModifier = h2b32(cb_args->cb_ioc_num + 1); 4056 ioc = IBDM_GIDINFO2IOCINFO(gid_info, cb_args->cb_ioc_num); 4057 timeout_id = &ioc->ioc_timeout_id; 4058 break; 4059 case IBDM_REQ_TYPE_SRVENTS: 4060 hdr->AttributeID = h2b16(IB_DM_ATTR_SERVICE_ENTRIES); 4061 ibdm_fill_srv_attr_mod(hdr, cb_args); 4062 ioc = IBDM_GIDINFO2IOCINFO(gid_info, cb_args->cb_ioc_num); 4063 timeout_id = 4064 &ioc->ioc_serv[cb_args->cb_srvents_start].se_timeout_id; 4065 break; 4066 case IBDM_REQ_TYPE_IOU_DIAGCODE: 4067 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 4068 hdr->AttributeModifier = 0; 4069 timeout_id = &gid_info->gl_timeout_id; 4070 break; 4071 case IBDM_REQ_TYPE_IOC_DIAGCODE: 4072 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 4073 hdr->AttributeModifier = h2b32(cb_args->cb_ioc_num + 1); 4074 ioc_no = cb_args->cb_ioc_num; 4075 ioc = &gid_info->gl_iou->iou_ioc_info[ioc_no]; 4076 timeout_id = &ioc->ioc_dc_timeout_id; 4077 break; 4078 } 4079 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*hdr)) 4080 4081 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 4082 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 4083 4084 mutex_exit(&gid_info->gl_mutex); 4085 4086 IBTF_DPRINTF_L5("ibdm", "\tretry_command: %p,%x,%d,%d:" 4087 "timeout %x", cb_args->cb_req_type, cb_args->cb_ioc_num, 4088 cb_args->cb_srvents_start, *timeout_id); 4089 4090 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, 4091 gid_info->gl_qp_hdl, msg, NULL, ibdm_ibmf_send_cb, 4092 cb_args, 0) != IBMF_SUCCESS) { 4093 IBTF_DPRINTF_L2("ibdm", "\tretry_command: send failed: %p " 4094 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 4095 cb_args->cb_req_type, cb_args->cb_ioc_num, 4096 cb_args->cb_srvents_start); 4097 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 4098 } 4099 mutex_enter(&gid_info->gl_mutex); 4100 return (IBDM_SUCCESS); 4101 } 4102 4103 4104 /* 4105 * ibdm_update_ioc_port_gidlist() 4106 */ 4107 static void 4108 ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *dest, 4109 ibdm_dp_gidinfo_t *gid_info) 4110 { 4111 int ii, ngid_ents; 4112 ibdm_gid_t *tmp; 4113 ibdm_hca_list_t *gid_hca_head, *temp; 4114 ibdm_hca_list_t *ioc_head = NULL; 4115 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 4116 4117 IBTF_DPRINTF_L5("ibdm", "\tupdate_ioc_port_gidlist: Enter"); 4118 4119 ngid_ents = gid_info->gl_ngids; 4120 dest->ioc_nportgids = ngid_ents; 4121 dest->ioc_gid_list = kmem_zalloc(sizeof (ibdm_gid_t) * 4122 ngid_ents, KM_SLEEP); 4123 tmp = gid_info->gl_gid; 4124 for (ii = 0; (ii < ngid_ents) && (tmp); ii++) { 4125 dest->ioc_gid_list[ii].gid_dgid_hi = tmp->gid_dgid_hi; 4126 dest->ioc_gid_list[ii].gid_dgid_lo = tmp->gid_dgid_lo; 4127 tmp = tmp->gid_next; 4128 } 4129 4130 gid_hca_head = gid_info->gl_hca_list; 4131 while (gid_hca_head) { 4132 temp = ibdm_dup_hca_attr(gid_hca_head); 4133 temp->hl_next = ioc_head; 4134 ioc_head = temp; 4135 gid_hca_head = gid_hca_head->hl_next; 4136 } 4137 dest->ioc_hca_list = ioc_head; 4138 } 4139 4140 4141 /* 4142 * ibdm_alloc_send_buffers() 4143 * Allocates memory for the IBMF send buffer to send and/or receive 4144 * the Device Management MAD packet. 4145 */ 4146 static void 4147 ibdm_alloc_send_buffers(ibmf_msg_t *msgp) 4148 { 4149 msgp->im_msgbufs_send.im_bufs_mad_hdr = 4150 kmem_zalloc(IBDM_MAD_SIZE, KM_SLEEP); 4151 4152 msgp->im_msgbufs_send.im_bufs_cl_hdr = (uchar_t *) 4153 msgp->im_msgbufs_send.im_bufs_mad_hdr + sizeof (ib_mad_hdr_t); 4154 msgp->im_msgbufs_send.im_bufs_cl_hdr_len = IBDM_DM_MAD_HDR_SZ; 4155 4156 msgp->im_msgbufs_send.im_bufs_cl_data = 4157 ((char *)msgp->im_msgbufs_send.im_bufs_cl_hdr + IBDM_DM_MAD_HDR_SZ); 4158 msgp->im_msgbufs_send.im_bufs_cl_data_len = 4159 IBDM_MAD_SIZE - sizeof (ib_mad_hdr_t) - IBDM_DM_MAD_HDR_SZ; 4160 } 4161 4162 4163 /* 4164 * ibdm_alloc_send_buffers() 4165 * De-allocates memory for the IBMF send buffer 4166 */ 4167 static void 4168 ibdm_free_send_buffers(ibmf_msg_t *msgp) 4169 { 4170 if (msgp->im_msgbufs_send.im_bufs_mad_hdr != NULL) 4171 kmem_free(msgp->im_msgbufs_send.im_bufs_mad_hdr, IBDM_MAD_SIZE); 4172 } 4173 4174 /* 4175 * ibdm_probe_ioc() 4176 * 1. Gets the node records for the port GUID. This detects all the port 4177 * to the IOU. 4178 * 2. Selectively probes all the IOC, given it's node GUID 4179 * 3. In case of reprobe, only the IOC to be reprobed is send the IOC 4180 * Controller Profile asynchronously 4181 */ 4182 /*ARGSUSED*/ 4183 static void 4184 ibdm_probe_ioc(ib_guid_t nodeguid, ib_guid_t ioc_guid, int reprobe_flag) 4185 { 4186 int ii, nrecords; 4187 size_t nr_len = 0, pi_len = 0; 4188 ib_gid_t sgid, dgid; 4189 ibdm_hca_list_t *hca_list = NULL; 4190 sa_node_record_t *nr, *tmp; 4191 ibdm_port_attr_t *port = NULL; 4192 ibdm_dp_gidinfo_t *reprobe_gid, *new_gid, *node_gid; 4193 ibdm_dp_gidinfo_t *temp_gidinfo; 4194 ibdm_gid_t *temp_gid; 4195 sa_portinfo_record_t *pi; 4196 4197 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc(%llx, %llx, %x): Begin", 4198 nodeguid, ioc_guid, reprobe_flag); 4199 4200 /* Rescan the GID list for any removed GIDs for reprobe */ 4201 if (reprobe_flag) 4202 ibdm_rescan_gidlist(&ioc_guid); 4203 4204 mutex_enter(&ibdm.ibdm_hl_mutex); 4205 for (ibdm_get_next_port(&hca_list, &port, 1); port; 4206 ibdm_get_next_port(&hca_list, &port, 1)) { 4207 reprobe_gid = new_gid = node_gid = NULL; 4208 4209 nr = ibdm_get_node_records(port->pa_sa_hdl, &nr_len, nodeguid); 4210 if (nr == NULL) { 4211 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc: no records"); 4212 continue; 4213 } 4214 nrecords = (nr_len / sizeof (sa_node_record_t)); 4215 for (tmp = nr, ii = 0; (ii < nrecords); ii++, tmp++) { 4216 if ((pi = ibdm_get_portinfo( 4217 port->pa_sa_hdl, &pi_len, tmp->LID)) == NULL) { 4218 IBTF_DPRINTF_L4("ibdm", 4219 "\tibdm_get_portinfo: no portinfo recs"); 4220 continue; 4221 } 4222 4223 /* 4224 * If Device Management is not supported on 4225 * this port, skip the rest. 4226 */ 4227 if (!(pi->PortInfo.CapabilityMask & 4228 SM_CAP_MASK_IS_DM_SUPPD)) { 4229 kmem_free(pi, pi_len); 4230 continue; 4231 } 4232 4233 /* 4234 * For reprobes: Check if GID, already in 4235 * the list. If so, set the state to SKIPPED 4236 */ 4237 if (((temp_gidinfo = ibdm_find_gid(nodeguid, 4238 tmp->NodeInfo.PortGUID)) != NULL) && 4239 temp_gidinfo->gl_state == 4240 IBDM_GID_PROBING_COMPLETE) { 4241 ASSERT(reprobe_gid == NULL); 4242 ibdm_addto_glhcalist(temp_gidinfo, 4243 hca_list); 4244 reprobe_gid = temp_gidinfo; 4245 kmem_free(pi, pi_len); 4246 continue; 4247 } else if (temp_gidinfo != NULL) { 4248 kmem_free(pi, pi_len); 4249 ibdm_addto_glhcalist(temp_gidinfo, 4250 hca_list); 4251 continue; 4252 } 4253 4254 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : " 4255 "create_gid : prefix %llx, guid %llx\n", 4256 pi->PortInfo.GidPrefix, 4257 tmp->NodeInfo.PortGUID); 4258 4259 sgid.gid_prefix = port->pa_sn_prefix; 4260 sgid.gid_guid = port->pa_port_guid; 4261 dgid.gid_prefix = pi->PortInfo.GidPrefix; 4262 dgid.gid_guid = tmp->NodeInfo.PortGUID; 4263 new_gid = ibdm_create_gid_info(port, sgid, 4264 dgid); 4265 if (new_gid == NULL) { 4266 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4267 "create_gid_info failed\n"); 4268 kmem_free(pi, pi_len); 4269 continue; 4270 } 4271 if (node_gid == NULL) { 4272 node_gid = new_gid; 4273 ibdm_add_to_gl_gid(node_gid, node_gid); 4274 } else { 4275 IBTF_DPRINTF_L4("ibdm", 4276 "\tprobe_ioc: new gid"); 4277 temp_gid = kmem_zalloc( 4278 sizeof (ibdm_gid_t), KM_SLEEP); 4279 temp_gid->gid_dgid_hi = 4280 new_gid->gl_dgid_hi; 4281 temp_gid->gid_dgid_lo = 4282 new_gid->gl_dgid_lo; 4283 temp_gid->gid_next = node_gid->gl_gid; 4284 node_gid->gl_gid = temp_gid; 4285 node_gid->gl_ngids++; 4286 } 4287 new_gid->gl_nodeguid = nodeguid; 4288 new_gid->gl_portguid = dgid.gid_guid; 4289 ibdm_addto_glhcalist(new_gid, hca_list); 4290 4291 /* 4292 * Set the state to skipped as all these 4293 * gids point to the same node. 4294 * We (re)probe only one GID below and reset 4295 * state appropriately 4296 */ 4297 new_gid->gl_state = IBDM_GID_PROBING_SKIPPED; 4298 new_gid->gl_devid = (*tmp).NodeInfo.DeviceID; 4299 kmem_free(pi, pi_len); 4300 } 4301 kmem_free(nr, nr_len); 4302 4303 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : reprobe_flag %d " 4304 "reprobe_gid %p new_gid %p node_gid %p", 4305 reprobe_flag, reprobe_gid, new_gid, node_gid); 4306 4307 if (reprobe_flag != 0 && reprobe_gid != NULL) { 4308 int niocs, jj; 4309 ibdm_ioc_info_t *tmp_ioc; 4310 int ioc_matched = 0; 4311 4312 mutex_exit(&ibdm.ibdm_hl_mutex); 4313 mutex_enter(&reprobe_gid->gl_mutex); 4314 reprobe_gid->gl_state = IBDM_GET_IOC_DETAILS; 4315 niocs = 4316 reprobe_gid->gl_iou->iou_info.iou_num_ctrl_slots; 4317 reprobe_gid->gl_pending_cmds++; 4318 mutex_exit(&reprobe_gid->gl_mutex); 4319 4320 for (jj = 0; jj < niocs; jj++) { 4321 tmp_ioc = 4322 IBDM_GIDINFO2IOCINFO(reprobe_gid, jj); 4323 if (tmp_ioc->ioc_profile.ioc_guid != ioc_guid) 4324 continue; 4325 4326 ioc_matched = 1; 4327 4328 /* 4329 * Explicitly set gl_reprobe_flag to 0 so that 4330 * IBnex is not notified on completion 4331 */ 4332 mutex_enter(&reprobe_gid->gl_mutex); 4333 reprobe_gid->gl_reprobe_flag = 0; 4334 mutex_exit(&reprobe_gid->gl_mutex); 4335 4336 mutex_enter(&ibdm.ibdm_mutex); 4337 ibdm.ibdm_ngid_probes_in_progress++; 4338 mutex_exit(&ibdm.ibdm_mutex); 4339 if (ibdm_send_ioc_profile(reprobe_gid, jj) != 4340 IBDM_SUCCESS) { 4341 IBTF_DPRINTF_L4("ibdm", 4342 "\tprobe_ioc: " 4343 "send_ioc_profile failed " 4344 "for ioc %d", jj); 4345 ibdm_gid_decr_pending(reprobe_gid); 4346 break; 4347 } 4348 mutex_enter(&ibdm.ibdm_mutex); 4349 ibdm_wait_probe_completion(); 4350 mutex_exit(&ibdm.ibdm_mutex); 4351 break; 4352 } 4353 if (ioc_matched == 0) 4354 ibdm_gid_decr_pending(reprobe_gid); 4355 else { 4356 mutex_enter(&ibdm.ibdm_hl_mutex); 4357 break; 4358 } 4359 } else if (new_gid != NULL) { 4360 mutex_exit(&ibdm.ibdm_hl_mutex); 4361 node_gid = node_gid ? node_gid : new_gid; 4362 4363 /* 4364 * New or reinserted GID : Enable notification 4365 * to IBnex 4366 */ 4367 mutex_enter(&node_gid->gl_mutex); 4368 node_gid->gl_reprobe_flag = 1; 4369 mutex_exit(&node_gid->gl_mutex); 4370 4371 ibdm_probe_gid(node_gid); 4372 4373 mutex_enter(&ibdm.ibdm_hl_mutex); 4374 } 4375 } 4376 mutex_exit(&ibdm.ibdm_hl_mutex); 4377 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : End\n"); 4378 } 4379 4380 4381 /* 4382 * ibdm_probe_gid() 4383 * Selectively probes the GID 4384 */ 4385 static void 4386 ibdm_probe_gid(ibdm_dp_gidinfo_t *gid_info) 4387 { 4388 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid:"); 4389 4390 /* 4391 * A Cisco FC GW needs the special handling to get IOUnitInfo. 4392 */ 4393 mutex_enter(&gid_info->gl_mutex); 4394 if (ibdm_is_cisco_switch(gid_info)) { 4395 gid_info->gl_pending_cmds++; 4396 gid_info->gl_state = IBDM_SET_CLASSPORTINFO; 4397 mutex_exit(&gid_info->gl_mutex); 4398 4399 if (ibdm_set_classportinfo(gid_info) != IBDM_SUCCESS) { 4400 4401 mutex_enter(&gid_info->gl_mutex); 4402 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 4403 --gid_info->gl_pending_cmds; 4404 mutex_exit(&gid_info->gl_mutex); 4405 4406 /* free the hca_list on this gid_info */ 4407 ibdm_delete_glhca_list(gid_info); 4408 gid_info = gid_info->gl_next; 4409 return; 4410 } 4411 4412 mutex_enter(&gid_info->gl_mutex); 4413 ibdm_wait_cisco_probe_completion(gid_info); 4414 4415 IBTF_DPRINTF_L4("ibdm", 4416 "\tprobe_gid: CISCO Wakeup signal received"); 4417 } 4418 4419 /* move on to the 'GET_CLASSPORTINFO' stage */ 4420 gid_info->gl_pending_cmds++; 4421 gid_info->gl_state = IBDM_GET_CLASSPORTINFO; 4422 mutex_exit(&gid_info->gl_mutex); 4423 4424 if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) { 4425 4426 mutex_enter(&gid_info->gl_mutex); 4427 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 4428 --gid_info->gl_pending_cmds; 4429 mutex_exit(&gid_info->gl_mutex); 4430 4431 /* free the hca_list on this gid_info */ 4432 ibdm_delete_glhca_list(gid_info); 4433 gid_info = gid_info->gl_next; 4434 return; 4435 } 4436 4437 mutex_enter(&ibdm.ibdm_mutex); 4438 ibdm.ibdm_ngid_probes_in_progress++; 4439 gid_info = gid_info->gl_next; 4440 ibdm_wait_probe_completion(); 4441 mutex_exit(&ibdm.ibdm_mutex); 4442 4443 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid: Wakeup signal received"); 4444 } 4445 4446 4447 /* 4448 * ibdm_create_gid_info() 4449 * Allocates a gid_info structure and initializes 4450 * Returns pointer to the structure on success 4451 * and NULL on failure 4452 */ 4453 static ibdm_dp_gidinfo_t * 4454 ibdm_create_gid_info(ibdm_port_attr_t *port, ib_gid_t sgid, ib_gid_t dgid) 4455 { 4456 uint8_t ii, npaths; 4457 sa_path_record_t *path; 4458 size_t len; 4459 ibdm_pkey_tbl_t *pkey_tbl; 4460 ibdm_dp_gidinfo_t *gid_info = NULL; 4461 int ret; 4462 4463 IBTF_DPRINTF_L4("ibdm", "\tcreate_gid_info: Begin"); 4464 npaths = 1; 4465 4466 /* query for reversible paths */ 4467 if (port->pa_sa_hdl) 4468 ret = ibmf_saa_gid_to_pathrecords(port->pa_sa_hdl, 4469 sgid, dgid, IBMF_SAA_PKEY_WC, 0, B_TRUE, &npaths, 0, 4470 &len, &path); 4471 else 4472 return (NULL); 4473 4474 if (ret == IBMF_SUCCESS && path) { 4475 ibdm_dump_path_info(path); 4476 4477 gid_info = kmem_zalloc( 4478 sizeof (ibdm_dp_gidinfo_t), KM_SLEEP); 4479 mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL); 4480 cv_init(&gid_info->gl_probe_cv, NULL, CV_DRIVER, NULL); 4481 gid_info->gl_dgid_hi = path->DGID.gid_prefix; 4482 gid_info->gl_dgid_lo = path->DGID.gid_guid; 4483 gid_info->gl_sgid_hi = path->SGID.gid_prefix; 4484 gid_info->gl_sgid_lo = path->SGID.gid_guid; 4485 gid_info->gl_p_key = path->P_Key; 4486 gid_info->gl_sa_hdl = port->pa_sa_hdl; 4487 gid_info->gl_ibmf_hdl = port->pa_ibmf_hdl; 4488 gid_info->gl_slid = path->SLID; 4489 gid_info->gl_dlid = path->DLID; 4490 gid_info->gl_transactionID = (++ibdm.ibdm_transactionID) 4491 << IBDM_GID_TRANSACTIONID_SHIFT; 4492 gid_info->gl_min_transactionID = gid_info->gl_transactionID; 4493 gid_info->gl_max_transactionID = (ibdm.ibdm_transactionID +1) 4494 << IBDM_GID_TRANSACTIONID_SHIFT; 4495 gid_info->gl_SL = path->SL; 4496 4497 gid_info->gl_qp_hdl = IBMF_QP_HANDLE_DEFAULT; 4498 for (ii = 0; ii < port->pa_npkeys; ii++) { 4499 if (port->pa_pkey_tbl == NULL) 4500 break; 4501 4502 pkey_tbl = &port->pa_pkey_tbl[ii]; 4503 if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) && 4504 (pkey_tbl->pt_qp_hdl != NULL)) { 4505 gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 4506 break; 4507 } 4508 } 4509 kmem_free(path, len); 4510 4511 /* 4512 * QP handle for GID not initialized. No matching Pkey 4513 * was found!! ibdm should *not* hit this case. Flag an 4514 * error and drop the GID if ibdm does encounter this. 4515 */ 4516 if (gid_info->gl_qp_hdl == NULL) { 4517 IBTF_DPRINTF_L2(ibdm_string, 4518 "\tcreate_gid_info: No matching Pkey"); 4519 ibdm_delete_gidinfo(gid_info); 4520 return (NULL); 4521 } 4522 4523 ibdm.ibdm_ngids++; 4524 if (ibdm.ibdm_dp_gidlist_head == NULL) { 4525 ibdm.ibdm_dp_gidlist_head = gid_info; 4526 ibdm.ibdm_dp_gidlist_tail = gid_info; 4527 } else { 4528 ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info; 4529 gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail; 4530 ibdm.ibdm_dp_gidlist_tail = gid_info; 4531 } 4532 } 4533 4534 return (gid_info); 4535 } 4536 4537 4538 /* 4539 * ibdm_get_node_records 4540 * Sends a SA query to get the NODE record 4541 * Returns pointer to the sa_node_record_t on success 4542 * and NULL on failure 4543 */ 4544 static sa_node_record_t * 4545 ibdm_get_node_records(ibmf_saa_handle_t sa_hdl, size_t *length, ib_guid_t guid) 4546 { 4547 sa_node_record_t req, *resp = NULL; 4548 ibmf_saa_access_args_t args; 4549 int ret; 4550 4551 IBTF_DPRINTF_L4("ibdm", "\tget_node_records: Begin"); 4552 4553 bzero(&req, sizeof (sa_node_record_t)); 4554 req.NodeInfo.NodeGUID = guid; 4555 4556 args.sq_attr_id = SA_NODERECORD_ATTRID; 4557 args.sq_access_type = IBMF_SAA_RETRIEVE; 4558 args.sq_component_mask = SA_NODEINFO_COMPMASK_NODEGUID; 4559 args.sq_template = &req; 4560 args.sq_callback = NULL; 4561 args.sq_callback_arg = NULL; 4562 4563 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) &resp); 4564 if (ret != IBMF_SUCCESS) { 4565 IBTF_DPRINTF_L2("ibdm", "\tget_node_records:" 4566 " SA Retrieve Failed: %d", ret); 4567 return (NULL); 4568 } 4569 if ((resp == NULL) || (*length == 0)) { 4570 IBTF_DPRINTF_L2("ibdm", "\tget_node_records: No records"); 4571 return (NULL); 4572 } 4573 4574 IBTF_DPRINTF_L4("ibdm", "\tget_node_records: NodeGuid %llx " 4575 "PortGUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.PortGUID); 4576 4577 return (resp); 4578 } 4579 4580 4581 /* 4582 * ibdm_get_portinfo() 4583 * Sends a SA query to get the PortInfo record 4584 * Returns pointer to the sa_portinfo_record_t on success 4585 * and NULL on failure 4586 */ 4587 static sa_portinfo_record_t * 4588 ibdm_get_portinfo(ibmf_saa_handle_t sa_hdl, size_t *length, ib_lid_t lid) 4589 { 4590 sa_portinfo_record_t req, *resp = NULL; 4591 ibmf_saa_access_args_t args; 4592 int ret; 4593 4594 IBTF_DPRINTF_L4("ibdm", "\tget_portinfo: Begin"); 4595 4596 bzero(&req, sizeof (sa_portinfo_record_t)); 4597 req.EndportLID = lid; 4598 4599 args.sq_attr_id = SA_PORTINFORECORD_ATTRID; 4600 args.sq_access_type = IBMF_SAA_RETRIEVE; 4601 args.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID; 4602 args.sq_template = &req; 4603 args.sq_callback = NULL; 4604 args.sq_callback_arg = NULL; 4605 4606 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) &resp); 4607 if (ret != IBMF_SUCCESS) { 4608 IBTF_DPRINTF_L2("ibdm", "\tget_portinfo:" 4609 " SA Retrieve Failed: 0x%X", ret); 4610 return (NULL); 4611 } 4612 if ((*length == 0) || (resp == NULL)) 4613 return (NULL); 4614 4615 IBTF_DPRINTF_L4("ibdm", "\tget_portinfo: GidPrefix %llx Cap 0x%x", 4616 resp->PortInfo.GidPrefix, resp->PortInfo.CapabilityMask); 4617 return (resp); 4618 } 4619 4620 4621 /* 4622 * ibdm_ibnex_register_callback 4623 * IB nexus callback routine for HCA attach and detach notification 4624 */ 4625 void 4626 ibdm_ibnex_register_callback(ibdm_callback_t ibnex_dm_callback) 4627 { 4628 IBTF_DPRINTF_L4("ibdm", "\tibnex_register_callbacks"); 4629 mutex_enter(&ibdm.ibdm_ibnex_mutex); 4630 ibdm.ibdm_ibnex_callback = ibnex_dm_callback; 4631 mutex_exit(&ibdm.ibdm_ibnex_mutex); 4632 } 4633 4634 4635 /* 4636 * ibdm_ibnex_unregister_callbacks 4637 */ 4638 void 4639 ibdm_ibnex_unregister_callback() 4640 { 4641 IBTF_DPRINTF_L4("ibdm", "\tibnex_unregister_callbacks"); 4642 mutex_enter(&ibdm.ibdm_ibnex_mutex); 4643 ibdm.ibdm_ibnex_callback = NULL; 4644 mutex_exit(&ibdm.ibdm_ibnex_mutex); 4645 } 4646 4647 /* 4648 * ibdm_get_waittime() 4649 * Calculates the wait time based on the last HCA attach time 4650 */ 4651 static time_t 4652 ibdm_get_waittime(ib_guid_t hca_guid, int dft_wait) 4653 { 4654 int ii; 4655 time_t temp, wait_time = 0; 4656 ibdm_hca_list_t *hca; 4657 4658 IBTF_DPRINTF_L4("ibdm", "\tget_waittime hcaguid:%llx" 4659 "\tport settling time %d", hca_guid, dft_wait); 4660 4661 ASSERT(mutex_owned(&ibdm.ibdm_hl_mutex)); 4662 4663 hca = ibdm.ibdm_hca_list_head; 4664 4665 if (hca_guid) { 4666 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4667 if ((hca_guid == hca->hl_hca_guid) && 4668 (hca->hl_nports != hca->hl_nports_active)) { 4669 wait_time = 4670 ddi_get_time() - hca->hl_attach_time; 4671 wait_time = ((wait_time >= dft_wait) ? 4672 0 : (dft_wait - wait_time)); 4673 break; 4674 } 4675 hca = hca->hl_next; 4676 } 4677 IBTF_DPRINTF_L4("ibdm", "\tget_waittime %llx", wait_time); 4678 return (wait_time); 4679 } 4680 4681 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4682 if (hca->hl_nports != hca->hl_nports_active) { 4683 temp = ddi_get_time() - hca->hl_attach_time; 4684 temp = ((temp >= dft_wait) ? 0 : (dft_wait - temp)); 4685 wait_time = (temp > wait_time) ? temp : wait_time; 4686 } 4687 } 4688 IBTF_DPRINTF_L4("ibdm", "\tget_waittime %llx", wait_time); 4689 return (wait_time); 4690 } 4691 4692 void 4693 ibdm_ibnex_port_settle_wait(ib_guid_t hca_guid, int dft_wait) 4694 { 4695 time_t wait_time; 4696 4697 mutex_enter(&ibdm.ibdm_hl_mutex); 4698 4699 while ((wait_time = ibdm_get_waittime(hca_guid, dft_wait)) > 0) { 4700 (void) cv_timedwait(&ibdm.ibdm_port_settle_cv, 4701 &ibdm.ibdm_hl_mutex, 4702 ddi_get_lbolt() + drv_usectohz(wait_time * 1000000)); 4703 } 4704 4705 mutex_exit(&ibdm.ibdm_hl_mutex); 4706 } 4707 4708 4709 /* 4710 * ibdm_ibnex_probe_hcaport 4711 * Probes the presence of HCA port (with HCA dip and port number) 4712 * Returns port attributes structure on SUCCESS 4713 */ 4714 ibdm_port_attr_t * 4715 ibdm_ibnex_probe_hcaport(ib_guid_t hca_guid, uint8_t port_num) 4716 { 4717 int ii, jj; 4718 ibdm_hca_list_t *hca_list; 4719 ibdm_port_attr_t *port_attr; 4720 4721 IBTF_DPRINTF_L4("ibdm", "\tibnex_probe_hcaport:"); 4722 4723 mutex_enter(&ibdm.ibdm_hl_mutex); 4724 hca_list = ibdm.ibdm_hca_list_head; 4725 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4726 if (hca_list->hl_hca_guid == hca_guid) { 4727 for (jj = 0; jj < hca_list->hl_nports; jj++) { 4728 if (hca_list->hl_port_attr[jj].pa_port_num == 4729 port_num) { 4730 break; 4731 } 4732 } 4733 if (jj != hca_list->hl_nports) 4734 break; 4735 } 4736 hca_list = hca_list->hl_next; 4737 } 4738 if (ii == ibdm.ibdm_hca_count) { 4739 IBTF_DPRINTF_L2("ibdm", "\tibnex_probe_hcaport: not found"); 4740 mutex_exit(&ibdm.ibdm_hl_mutex); 4741 return (NULL); 4742 } 4743 port_attr = (ibdm_port_attr_t *)kmem_zalloc( 4744 sizeof (ibdm_port_attr_t), KM_SLEEP); 4745 bcopy((char *)&hca_list->hl_port_attr[jj], 4746 port_attr, sizeof (ibdm_port_attr_t)); 4747 ibdm_update_port_attr(port_attr); 4748 4749 mutex_exit(&ibdm.ibdm_hl_mutex); 4750 return (port_attr); 4751 } 4752 4753 4754 /* 4755 * ibdm_ibnex_get_port_attrs 4756 * Scan all HCAs for a matching port_guid. 4757 * Returns "port attributes" structure on success. 4758 */ 4759 ibdm_port_attr_t * 4760 ibdm_ibnex_get_port_attrs(ib_guid_t port_guid) 4761 { 4762 int ii, jj; 4763 ibdm_hca_list_t *hca_list; 4764 ibdm_port_attr_t *port_attr; 4765 4766 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_port_attrs:"); 4767 4768 mutex_enter(&ibdm.ibdm_hl_mutex); 4769 hca_list = ibdm.ibdm_hca_list_head; 4770 4771 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4772 for (jj = 0; jj < hca_list->hl_nports; jj++) { 4773 if (hca_list->hl_port_attr[jj].pa_port_guid == 4774 port_guid) { 4775 break; 4776 } 4777 } 4778 if (jj != hca_list->hl_nports) 4779 break; 4780 hca_list = hca_list->hl_next; 4781 } 4782 4783 if (ii == ibdm.ibdm_hca_count) { 4784 IBTF_DPRINTF_L2("ibdm", "\tibnex_get_port_attrs: not found"); 4785 mutex_exit(&ibdm.ibdm_hl_mutex); 4786 return (NULL); 4787 } 4788 4789 port_attr = (ibdm_port_attr_t *)kmem_alloc(sizeof (ibdm_port_attr_t), 4790 KM_SLEEP); 4791 bcopy((char *)&hca_list->hl_port_attr[jj], port_attr, 4792 sizeof (ibdm_port_attr_t)); 4793 ibdm_update_port_attr(port_attr); 4794 4795 mutex_exit(&ibdm.ibdm_hl_mutex); 4796 return (port_attr); 4797 } 4798 4799 4800 /* 4801 * ibdm_ibnex_free_port_attr() 4802 */ 4803 void 4804 ibdm_ibnex_free_port_attr(ibdm_port_attr_t *port_attr) 4805 { 4806 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_port_attr:"); 4807 if (port_attr) { 4808 if (port_attr->pa_pkey_tbl != NULL) { 4809 kmem_free(port_attr->pa_pkey_tbl, 4810 (port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t))); 4811 } 4812 kmem_free(port_attr, sizeof (ibdm_port_attr_t)); 4813 } 4814 } 4815 4816 4817 /* 4818 * ibdm_ibnex_get_hca_list() 4819 * Returns portinfo for all the port for all the HCA's 4820 */ 4821 void 4822 ibdm_ibnex_get_hca_list(ibdm_hca_list_t **hca, int *count) 4823 { 4824 ibdm_hca_list_t *head = NULL, *temp, *temp1; 4825 int ii; 4826 4827 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_list:"); 4828 4829 mutex_enter(&ibdm.ibdm_hl_mutex); 4830 temp = ibdm.ibdm_hca_list_head; 4831 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4832 temp1 = ibdm_dup_hca_attr(temp); 4833 temp1->hl_next = head; 4834 head = temp1; 4835 temp = temp->hl_next; 4836 } 4837 *count = ibdm.ibdm_hca_count; 4838 *hca = head; 4839 mutex_exit(&ibdm.ibdm_hl_mutex); 4840 } 4841 4842 4843 /* 4844 * ibdm_ibnex_get_hca_info_by_guid() 4845 */ 4846 ibdm_hca_list_t * 4847 ibdm_ibnex_get_hca_info_by_guid(ib_guid_t hca_guid) 4848 { 4849 ibdm_hca_list_t *head = NULL, *hca = NULL; 4850 4851 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_info_by_dip"); 4852 4853 mutex_enter(&ibdm.ibdm_hl_mutex); 4854 head = ibdm.ibdm_hca_list_head; 4855 while (head) { 4856 if (head->hl_hca_guid == hca_guid) { 4857 hca = ibdm_dup_hca_attr(head); 4858 hca->hl_next = NULL; 4859 break; 4860 } 4861 head = head->hl_next; 4862 } 4863 mutex_exit(&ibdm.ibdm_hl_mutex); 4864 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_info_by_dip %p", hca); 4865 return (hca); 4866 } 4867 4868 4869 /* 4870 * ibdm_dup_hca_attr() 4871 * Allocate a new HCA attribute strucuture and initialize 4872 * hca attribute structure with the incoming HCA attributes 4873 * returned the allocated hca attributes. 4874 */ 4875 static ibdm_hca_list_t * 4876 ibdm_dup_hca_attr(ibdm_hca_list_t *in_hca) 4877 { 4878 int len; 4879 ibdm_hca_list_t *out_hca; 4880 4881 len = sizeof (ibdm_hca_list_t) + 4882 (in_hca->hl_nports * sizeof (ibdm_port_attr_t)); 4883 IBTF_DPRINTF_L4("ibdm", "\tdup_hca_attr len %d", len); 4884 out_hca = (ibdm_hca_list_t *)kmem_alloc(len, KM_SLEEP); 4885 bcopy((char *)in_hca, 4886 (char *)out_hca, sizeof (ibdm_hca_list_t)); 4887 if (in_hca->hl_nports) { 4888 out_hca->hl_port_attr = (ibdm_port_attr_t *) 4889 ((char *)out_hca + sizeof (ibdm_hca_list_t)); 4890 bcopy((char *)in_hca->hl_port_attr, 4891 (char *)out_hca->hl_port_attr, 4892 (in_hca->hl_nports * sizeof (ibdm_port_attr_t))); 4893 for (len = 0; len < out_hca->hl_nports; len++) 4894 ibdm_update_port_attr(&out_hca->hl_port_attr[len]); 4895 } 4896 return (out_hca); 4897 } 4898 4899 4900 /* 4901 * ibdm_ibnex_free_hca_list() 4902 * Free one/more HCA lists 4903 */ 4904 void 4905 ibdm_ibnex_free_hca_list(ibdm_hca_list_t *hca_list) 4906 { 4907 int ii; 4908 size_t len; 4909 ibdm_hca_list_t *temp; 4910 ibdm_port_attr_t *port; 4911 4912 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_hca_list:"); 4913 ASSERT(hca_list); 4914 while (hca_list) { 4915 temp = hca_list; 4916 hca_list = hca_list->hl_next; 4917 for (ii = 0; ii < temp->hl_nports; ii++) { 4918 port = &temp->hl_port_attr[ii]; 4919 len = (port->pa_npkeys * sizeof (ibdm_pkey_tbl_t)); 4920 if (len != 0) 4921 kmem_free(port->pa_pkey_tbl, len); 4922 } 4923 len = sizeof (ibdm_hca_list_t) + (temp->hl_nports * 4924 sizeof (ibdm_port_attr_t)); 4925 kmem_free(temp, len); 4926 } 4927 } 4928 4929 4930 /* 4931 * ibdm_ibnex_probe_iocguid() 4932 * Probes the IOC on the fabric and returns the IOC information 4933 * if present. Otherwise, NULL is returned 4934 */ 4935 /* ARGSUSED */ 4936 ibdm_ioc_info_t * 4937 ibdm_ibnex_probe_ioc(ib_guid_t iou, ib_guid_t ioc_guid, int reprobe_flag) 4938 { 4939 int k; 4940 ibdm_ioc_info_t *ioc_info; 4941 ibdm_dp_gidinfo_t *gid_info; /* used as index and arg */ 4942 timeout_id_t *timeout_id; 4943 4944 IBTF_DPRINTF_L4("ibdm", "\tibnex_probe_ioc: (%llX, %llX, %d) Begin", 4945 iou, ioc_guid, reprobe_flag); 4946 /* Check whether we know this already */ 4947 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info); 4948 if (ioc_info == NULL) { 4949 mutex_enter(&ibdm.ibdm_mutex); 4950 while (ibdm.ibdm_busy & IBDM_BUSY) 4951 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4952 ibdm.ibdm_busy |= IBDM_BUSY; 4953 mutex_exit(&ibdm.ibdm_mutex); 4954 ibdm_probe_ioc(iou, ioc_guid, 0); 4955 mutex_enter(&ibdm.ibdm_mutex); 4956 ibdm.ibdm_busy &= ~IBDM_BUSY; 4957 cv_broadcast(&ibdm.ibdm_busy_cv); 4958 mutex_exit(&ibdm.ibdm_mutex); 4959 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info); 4960 } else if (reprobe_flag) { /* Handle Reprobe for the IOC */ 4961 ASSERT(gid_info != NULL); 4962 /* Free the ioc_list before reprobe; and cancel any timers */ 4963 mutex_enter(&ibdm.ibdm_mutex); 4964 mutex_enter(&gid_info->gl_mutex); 4965 if (ioc_info->ioc_timeout_id) { 4966 timeout_id = ioc_info->ioc_timeout_id; 4967 ioc_info->ioc_timeout_id = 0; 4968 mutex_exit(&gid_info->gl_mutex); 4969 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4970 "ioc_timeout_id = 0x%x", timeout_id); 4971 if (untimeout(timeout_id) == -1) { 4972 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4973 "untimeout ioc_timeout_id failed"); 4974 } 4975 mutex_enter(&gid_info->gl_mutex); 4976 } 4977 if (ioc_info->ioc_dc_timeout_id) { 4978 timeout_id = ioc_info->ioc_dc_timeout_id; 4979 ioc_info->ioc_dc_timeout_id = 0; 4980 mutex_exit(&gid_info->gl_mutex); 4981 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4982 "ioc_dc_timeout_id = 0x%x", timeout_id); 4983 if (untimeout(timeout_id) == -1) { 4984 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4985 "untimeout ioc_dc_timeout_id failed"); 4986 } 4987 mutex_enter(&gid_info->gl_mutex); 4988 } 4989 for (k = 0; k < ioc_info->ioc_profile.ioc_service_entries; k++) 4990 if (ioc_info->ioc_serv[k].se_timeout_id) { 4991 timeout_id = ioc_info->ioc_serv[k]. 4992 se_timeout_id; 4993 ioc_info->ioc_serv[k].se_timeout_id = 0; 4994 mutex_exit(&gid_info->gl_mutex); 4995 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4996 "ioc_info->ioc_serv[k].se_timeout_id = %x", 4997 k, timeout_id); 4998 if (untimeout(timeout_id) == -1) { 4999 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 5000 "untimeout se_timeout_id %d " 5001 "failed", k); 5002 } 5003 mutex_enter(&gid_info->gl_mutex); 5004 } 5005 mutex_exit(&gid_info->gl_mutex); 5006 mutex_exit(&ibdm.ibdm_mutex); 5007 ibdm_ibnex_free_ioc_list(ioc_info); 5008 5009 mutex_enter(&ibdm.ibdm_mutex); 5010 while (ibdm.ibdm_busy & IBDM_BUSY) 5011 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5012 ibdm.ibdm_busy |= IBDM_BUSY; 5013 mutex_exit(&ibdm.ibdm_mutex); 5014 5015 ibdm_probe_ioc(iou, ioc_guid, 1); 5016 5017 /* 5018 * Skip if gl_reprobe_flag is set, this will be 5019 * a re-inserted / new GID, for which notifications 5020 * have already been send. 5021 */ 5022 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 5023 gid_info = gid_info->gl_next) { 5024 uint8_t ii, niocs; 5025 ibdm_ioc_info_t *ioc; 5026 5027 if (gid_info->gl_iou == NULL) 5028 continue; 5029 5030 if (gid_info->gl_reprobe_flag) { 5031 gid_info->gl_reprobe_flag = 0; 5032 continue; 5033 } 5034 5035 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 5036 for (ii = 0; ii < niocs; ii++) { 5037 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 5038 if (ioc->ioc_profile.ioc_guid == ioc_guid) { 5039 mutex_enter(&ibdm.ibdm_mutex); 5040 ibdm_reprobe_update_port_srv(ioc, 5041 gid_info); 5042 mutex_exit(&ibdm.ibdm_mutex); 5043 } 5044 } 5045 } 5046 mutex_enter(&ibdm.ibdm_mutex); 5047 ibdm.ibdm_busy &= ~IBDM_BUSY; 5048 cv_broadcast(&ibdm.ibdm_busy_cv); 5049 mutex_exit(&ibdm.ibdm_mutex); 5050 5051 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info); 5052 } 5053 return (ioc_info); 5054 } 5055 5056 5057 /* 5058 * ibdm_get_ioc_info_with_gid() 5059 * Returns pointer to ibdm_ioc_info_t if it finds 5060 * matching record for the ioc_guid. Otherwise NULL is returned. 5061 * The pointer to gid_info is set to the second argument in case that 5062 * the non-NULL value returns (and the second argument is not NULL). 5063 * 5064 * Note. use the same strings as "ibnex_get_ioc_info" in 5065 * IBTF_DPRINTF() to keep compatibility. 5066 */ 5067 static ibdm_ioc_info_t * 5068 ibdm_get_ioc_info_with_gid(ib_guid_t ioc_guid, 5069 ibdm_dp_gidinfo_t **gid_info) 5070 { 5071 int ii; 5072 ibdm_ioc_info_t *ioc = NULL, *tmp = NULL; 5073 ibdm_dp_gidinfo_t *gid_list; 5074 ib_dm_io_unitinfo_t *iou; 5075 5076 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_ioc_info: GUID %llx", ioc_guid); 5077 5078 mutex_enter(&ibdm.ibdm_mutex); 5079 while (ibdm.ibdm_busy & IBDM_BUSY) 5080 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5081 ibdm.ibdm_busy |= IBDM_BUSY; 5082 5083 if (gid_info) 5084 *gid_info = NULL; /* clear the value of gid_info */ 5085 5086 gid_list = ibdm.ibdm_dp_gidlist_head; 5087 while (gid_list) { 5088 mutex_enter(&gid_list->gl_mutex); 5089 if (gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) { 5090 mutex_exit(&gid_list->gl_mutex); 5091 gid_list = gid_list->gl_next; 5092 continue; 5093 } 5094 if (gid_list->gl_iou == NULL) { 5095 IBTF_DPRINTF_L2("ibdm", 5096 "\tget_ioc_info: No IOU info"); 5097 mutex_exit(&gid_list->gl_mutex); 5098 gid_list = gid_list->gl_next; 5099 continue; 5100 } 5101 iou = &gid_list->gl_iou->iou_info; 5102 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 5103 tmp = IBDM_GIDINFO2IOCINFO(gid_list, ii); 5104 if ((tmp->ioc_profile.ioc_guid == ioc_guid) && 5105 (tmp->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS)) { 5106 ioc = ibdm_dup_ioc_info(tmp, gid_list); 5107 if (gid_info) 5108 *gid_info = gid_list; /* set this ptr */ 5109 mutex_exit(&gid_list->gl_mutex); 5110 ibdm.ibdm_busy &= ~IBDM_BUSY; 5111 cv_broadcast(&ibdm.ibdm_busy_cv); 5112 mutex_exit(&ibdm.ibdm_mutex); 5113 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_info: End"); 5114 return (ioc); 5115 } 5116 } 5117 if (ii == iou->iou_num_ctrl_slots) 5118 ioc = NULL; 5119 5120 mutex_exit(&gid_list->gl_mutex); 5121 gid_list = gid_list->gl_next; 5122 } 5123 5124 ibdm.ibdm_busy &= ~IBDM_BUSY; 5125 cv_broadcast(&ibdm.ibdm_busy_cv); 5126 mutex_exit(&ibdm.ibdm_mutex); 5127 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_info: failure End"); 5128 return (ioc); 5129 } 5130 5131 /* 5132 * ibdm_ibnex_get_ioc_info() 5133 * Returns pointer to ibdm_ioc_info_t if it finds 5134 * matching record for the ioc_guid, otherwise NULL 5135 * is returned 5136 * 5137 * Note. this is a wrapper function to ibdm_get_ioc_info_with_gid() now. 5138 */ 5139 ibdm_ioc_info_t * 5140 ibdm_ibnex_get_ioc_info(ib_guid_t ioc_guid) 5141 { 5142 /* will not use the gid_info pointer, so the second arg is NULL */ 5143 return (ibdm_get_ioc_info_with_gid(ioc_guid, NULL)); 5144 } 5145 5146 /* 5147 * ibdm_ibnex_get_ioc_count() 5148 * Returns number of ibdm_ioc_info_t it finds 5149 */ 5150 int 5151 ibdm_ibnex_get_ioc_count(void) 5152 { 5153 int count = 0, k; 5154 ibdm_ioc_info_t *ioc; 5155 ibdm_dp_gidinfo_t *gid_list; 5156 5157 mutex_enter(&ibdm.ibdm_mutex); 5158 ibdm_sweep_fabric(0); 5159 5160 while (ibdm.ibdm_busy & IBDM_BUSY) 5161 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5162 ibdm.ibdm_busy |= IBDM_BUSY; 5163 5164 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 5165 gid_list = gid_list->gl_next) { 5166 mutex_enter(&gid_list->gl_mutex); 5167 if ((gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) || 5168 (gid_list->gl_iou == NULL)) { 5169 mutex_exit(&gid_list->gl_mutex); 5170 continue; 5171 } 5172 for (k = 0; k < gid_list->gl_iou->iou_info.iou_num_ctrl_slots; 5173 k++) { 5174 ioc = IBDM_GIDINFO2IOCINFO(gid_list, k); 5175 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) 5176 ++count; 5177 } 5178 mutex_exit(&gid_list->gl_mutex); 5179 } 5180 ibdm.ibdm_busy &= ~IBDM_BUSY; 5181 cv_broadcast(&ibdm.ibdm_busy_cv); 5182 mutex_exit(&ibdm.ibdm_mutex); 5183 5184 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_count: count = %d", count); 5185 return (count); 5186 } 5187 5188 5189 /* 5190 * ibdm_ibnex_get_ioc_list() 5191 * Returns information about all the IOCs present on the fabric. 5192 * Reprobes the IOCs and the GID list if list_flag is set to REPROBE_ALL. 5193 * Does not sweep fabric if DONOT_PROBE is set 5194 */ 5195 ibdm_ioc_info_t * 5196 ibdm_ibnex_get_ioc_list(ibdm_ibnex_get_ioclist_mtd_t list_flag) 5197 { 5198 int ii; 5199 ibdm_ioc_info_t *ioc_list = NULL, *tmp, *ioc; 5200 ibdm_dp_gidinfo_t *gid_list; 5201 ib_dm_io_unitinfo_t *iou; 5202 5203 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_list: Enter"); 5204 5205 mutex_enter(&ibdm.ibdm_mutex); 5206 if (list_flag != IBDM_IBNEX_DONOT_PROBE) 5207 ibdm_sweep_fabric(list_flag == IBDM_IBNEX_REPROBE_ALL); 5208 5209 while (ibdm.ibdm_busy & IBDM_BUSY) 5210 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5211 ibdm.ibdm_busy |= IBDM_BUSY; 5212 5213 gid_list = ibdm.ibdm_dp_gidlist_head; 5214 while (gid_list) { 5215 mutex_enter(&gid_list->gl_mutex); 5216 if (gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) { 5217 mutex_exit(&gid_list->gl_mutex); 5218 gid_list = gid_list->gl_next; 5219 continue; 5220 } 5221 if (gid_list->gl_iou == NULL) { 5222 IBTF_DPRINTF_L2("ibdm", 5223 "\tget_ioc_list: No IOU info"); 5224 mutex_exit(&gid_list->gl_mutex); 5225 gid_list = gid_list->gl_next; 5226 continue; 5227 } 5228 iou = &gid_list->gl_iou->iou_info; 5229 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 5230 ioc = IBDM_GIDINFO2IOCINFO(gid_list, ii); 5231 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) { 5232 tmp = ibdm_dup_ioc_info(ioc, gid_list); 5233 tmp->ioc_next = ioc_list; 5234 ioc_list = tmp; 5235 } 5236 } 5237 mutex_exit(&gid_list->gl_mutex); 5238 gid_list = gid_list->gl_next; 5239 } 5240 ibdm.ibdm_busy &= ~IBDM_BUSY; 5241 cv_broadcast(&ibdm.ibdm_busy_cv); 5242 mutex_exit(&ibdm.ibdm_mutex); 5243 5244 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_list: End"); 5245 return (ioc_list); 5246 } 5247 5248 /* 5249 * ibdm_dup_ioc_info() 5250 * Duplicate the IOC information and return the IOC 5251 * information. 5252 */ 5253 static ibdm_ioc_info_t * 5254 ibdm_dup_ioc_info(ibdm_ioc_info_t *in_ioc, ibdm_dp_gidinfo_t *gid_list) 5255 { 5256 ibdm_ioc_info_t *out_ioc; 5257 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*out_ioc)); 5258 ASSERT(MUTEX_HELD(&gid_list->gl_mutex)); 5259 5260 out_ioc = kmem_alloc(sizeof (ibdm_ioc_info_t), KM_SLEEP); 5261 bcopy(in_ioc, out_ioc, sizeof (ibdm_ioc_info_t)); 5262 ibdm_update_ioc_port_gidlist(out_ioc, gid_list); 5263 out_ioc->ioc_iou_dc_valid = gid_list->gl_iou->iou_dc_valid; 5264 out_ioc->ioc_iou_diagcode = gid_list->gl_iou->iou_diagcode; 5265 5266 return (out_ioc); 5267 } 5268 5269 5270 /* 5271 * ibdm_free_ioc_list() 5272 * Deallocate memory for IOC list structure 5273 */ 5274 void 5275 ibdm_ibnex_free_ioc_list(ibdm_ioc_info_t *ioc) 5276 { 5277 ibdm_ioc_info_t *temp; 5278 5279 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_ioc_list:"); 5280 while (ioc) { 5281 temp = ioc; 5282 ioc = ioc->ioc_next; 5283 kmem_free(temp->ioc_gid_list, 5284 (sizeof (ibdm_gid_t) * temp->ioc_nportgids)); 5285 if (temp->ioc_hca_list) 5286 ibdm_ibnex_free_hca_list(temp->ioc_hca_list); 5287 kmem_free(temp, sizeof (ibdm_ioc_info_t)); 5288 } 5289 } 5290 5291 5292 /* 5293 * ibdm_ibnex_update_pkey_tbls 5294 * Updates the DM P_Key database. 5295 * NOTE: Two cases are handled here: P_Key being added or removed. 5296 * 5297 * Arguments : NONE 5298 * Return Values : NONE 5299 */ 5300 void 5301 ibdm_ibnex_update_pkey_tbls(void) 5302 { 5303 int h, pp, pidx; 5304 uint_t nports; 5305 uint_t size; 5306 ib_pkey_t new_pkey; 5307 ib_pkey_t *orig_pkey; 5308 ibdm_hca_list_t *hca_list; 5309 ibdm_port_attr_t *port; 5310 ibt_hca_portinfo_t *pinfop; 5311 5312 IBTF_DPRINTF_L4("ibdm", "\tibnex_update_pkey_tbls:"); 5313 5314 mutex_enter(&ibdm.ibdm_hl_mutex); 5315 hca_list = ibdm.ibdm_hca_list_head; 5316 5317 for (h = 0; h < ibdm.ibdm_hca_count; h++) { 5318 5319 /* This updates P_Key Tables for all ports of this HCA */ 5320 (void) ibt_query_hca_ports(hca_list->hl_hca_hdl, 0, &pinfop, 5321 &nports, &size); 5322 5323 /* number of ports shouldn't have changed */ 5324 ASSERT(nports == hca_list->hl_nports); 5325 5326 for (pp = 0; pp < hca_list->hl_nports; pp++) { 5327 port = &hca_list->hl_port_attr[pp]; 5328 5329 /* 5330 * First figure out the P_Keys from IBTL. 5331 * Three things could have happened: 5332 * New P_Keys added 5333 * Existing P_Keys removed 5334 * Both of the above two 5335 * 5336 * Loop through the P_Key Indices and check if a 5337 * give P_Key_Ix matches that of the one seen by 5338 * IBDM. If they match no action is needed. 5339 * 5340 * If they don't match: 5341 * 1. if orig_pkey is invalid and new_pkey is valid 5342 * ---> add new_pkey to DM database 5343 * 2. if orig_pkey is valid and new_pkey is invalid 5344 * ---> remove orig_pkey from DM database 5345 * 3. if orig_pkey and new_pkey are both valid: 5346 * ---> remov orig_pkey from DM database 5347 * ---> add new_pkey to DM database 5348 * 4. if orig_pkey and new_pkey are both invalid: 5349 * ---> do nothing. Updated DM database. 5350 */ 5351 5352 for (pidx = 0; pidx < port->pa_npkeys; pidx++) { 5353 new_pkey = pinfop[pp].p_pkey_tbl[pidx]; 5354 orig_pkey = &port->pa_pkey_tbl[pidx].pt_pkey; 5355 5356 /* keys match - do nothing */ 5357 if (*orig_pkey == new_pkey) 5358 continue; 5359 5360 if (IBDM_INVALID_PKEY(*orig_pkey) && 5361 !IBDM_INVALID_PKEY(new_pkey)) { 5362 /* P_Key was added */ 5363 IBTF_DPRINTF_L5("ibdm", 5364 "\tibnex_update_pkey_tbls: new " 5365 "P_Key added = 0x%x", new_pkey); 5366 *orig_pkey = new_pkey; 5367 ibdm_port_attr_ibmf_init(port, 5368 new_pkey, pp); 5369 } else if (!IBDM_INVALID_PKEY(*orig_pkey) && 5370 IBDM_INVALID_PKEY(new_pkey)) { 5371 /* P_Key was removed */ 5372 IBTF_DPRINTF_L5("ibdm", 5373 "\tibnex_update_pkey_tbls: P_Key " 5374 "removed = 0x%x", *orig_pkey); 5375 *orig_pkey = new_pkey; 5376 (void) ibdm_port_attr_ibmf_fini(port, 5377 pidx); 5378 } else if (!IBDM_INVALID_PKEY(*orig_pkey) && 5379 !IBDM_INVALID_PKEY(new_pkey)) { 5380 /* P_Key were replaced */ 5381 IBTF_DPRINTF_L5("ibdm", 5382 "\tibnex_update_pkey_tbls: P_Key " 5383 "replaced 0x%x with 0x%x", 5384 *orig_pkey, new_pkey); 5385 (void) ibdm_port_attr_ibmf_fini(port, 5386 pidx); 5387 *orig_pkey = new_pkey; 5388 ibdm_port_attr_ibmf_init(port, 5389 new_pkey, pp); 5390 } else { 5391 /* 5392 * P_Keys are invalid 5393 * set anyway to reflect if 5394 * INVALID_FULL was changed to 5395 * INVALID_LIMITED or vice-versa. 5396 */ 5397 *orig_pkey = new_pkey; 5398 } /* end of else */ 5399 5400 } /* loop of p_key index */ 5401 5402 } /* loop of #ports of HCA */ 5403 5404 ibt_free_portinfo(pinfop, size); 5405 hca_list = hca_list->hl_next; 5406 5407 } /* loop for all HCAs in the system */ 5408 5409 mutex_exit(&ibdm.ibdm_hl_mutex); 5410 } 5411 5412 5413 /* 5414 * ibdm_send_ioc_profile() 5415 * Send IOC Controller Profile request. When the request is completed 5416 * IBMF calls ibdm_process_incoming_mad routine to inform about 5417 * the completion. 5418 */ 5419 static int 5420 ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *gid_info, uint8_t ioc_no) 5421 { 5422 ibmf_msg_t *msg; 5423 ib_mad_hdr_t *hdr; 5424 ibdm_ioc_info_t *ioc_info = &(gid_info->gl_iou->iou_ioc_info[ioc_no]); 5425 ibdm_timeout_cb_args_t *cb_args; 5426 5427 IBTF_DPRINTF_L4("ibdm", "\tsend_ioc_profile: " 5428 "gid info 0x%p, ioc_no = %d", gid_info, ioc_no); 5429 5430 /* 5431 * Send command to get IOC profile. 5432 * Allocate a IBMF packet and initialize the packet. 5433 */ 5434 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 5435 &msg) != IBMF_SUCCESS) { 5436 IBTF_DPRINTF_L2("ibdm", "\tsend_ioc_profile: pkt alloc fail"); 5437 return (IBDM_FAILURE); 5438 } 5439 5440 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg)) 5441 ibdm_alloc_send_buffers(msg); 5442 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg)) 5443 5444 mutex_enter(&gid_info->gl_mutex); 5445 ibdm_bump_transactionID(gid_info); 5446 mutex_exit(&gid_info->gl_mutex); 5447 5448 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 5449 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 5450 if (gid_info->gl_redirected == B_TRUE) { 5451 if (gid_info->gl_redirect_dlid != 0) { 5452 msg->im_local_addr.ia_remote_lid = 5453 gid_info->gl_redirect_dlid; 5454 } 5455 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 5456 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 5457 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 5458 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL; 5459 } else { 5460 msg->im_local_addr.ia_remote_qno = 1; 5461 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 5462 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 5463 msg->im_local_addr.ia_service_level = gid_info->gl_SL; 5464 } 5465 5466 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 5467 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 5468 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 5469 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 5470 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 5471 hdr->Status = 0; 5472 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 5473 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 5474 hdr->AttributeModifier = h2b32(ioc_no + 1); 5475 5476 ioc_info->ioc_state = IBDM_IOC_STATE_REPROBE_PROGRESS; 5477 cb_args = &ioc_info->ioc_cb_args; 5478 cb_args->cb_gid_info = gid_info; 5479 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 5480 cb_args->cb_req_type = IBDM_REQ_TYPE_IOCINFO; 5481 cb_args->cb_ioc_num = ioc_no; 5482 5483 mutex_enter(&gid_info->gl_mutex); 5484 ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 5485 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 5486 mutex_exit(&gid_info->gl_mutex); 5487 5488 IBTF_DPRINTF_L5("ibdm", "\tsend_ioc_profile:" 5489 "timeout %x", ioc_info->ioc_timeout_id); 5490 5491 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg, 5492 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 5493 IBTF_DPRINTF_L2("ibdm", 5494 "\tsend_ioc_profile: msg transport failed"); 5495 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 5496 } 5497 ioc_info->ioc_state = IBDM_IOC_STATE_REPROBE_PROGRESS; 5498 return (IBDM_SUCCESS); 5499 } 5500 5501 5502 /* 5503 * ibdm_port_reachable 5504 * Returns B_TRUE if the port GID is reachable by sending 5505 * a SA query to get the NODE record for this port GUID. 5506 */ 5507 static boolean_t 5508 ibdm_port_reachable(ibmf_saa_handle_t sa_hdl, ib_guid_t guid) 5509 { 5510 sa_node_record_t *resp; 5511 size_t length; 5512 5513 /* 5514 * Verify if it's reachable by getting the node record. 5515 */ 5516 if (ibdm_get_node_record_by_port(sa_hdl, guid, &resp, &length) == 5517 IBDM_SUCCESS) { 5518 kmem_free(resp, length); 5519 return (B_TRUE); 5520 } 5521 return (B_FALSE); 5522 } 5523 5524 /* 5525 * ibdm_get_node_record_by_port 5526 * Sends a SA query to get the NODE record for port GUID 5527 * Returns IBDM_SUCCESS if the port GID is reachable. 5528 * 5529 * Note: the caller must be responsible for freeing the resource 5530 * by calling kmem_free(resp, length) later. 5531 */ 5532 static int 5533 ibdm_get_node_record_by_port(ibmf_saa_handle_t sa_hdl, ib_guid_t guid, 5534 sa_node_record_t **resp, size_t *length) 5535 { 5536 sa_node_record_t req; 5537 ibmf_saa_access_args_t args; 5538 int ret; 5539 ASSERT(resp != NULL && length != NULL); 5540 5541 IBTF_DPRINTF_L4("ibdm", "\tport_reachable: port_guid %llx", 5542 guid); 5543 5544 bzero(&req, sizeof (sa_node_record_t)); 5545 req.NodeInfo.PortGUID = guid; 5546 5547 args.sq_attr_id = SA_NODERECORD_ATTRID; 5548 args.sq_access_type = IBMF_SAA_RETRIEVE; 5549 args.sq_component_mask = SA_NODEINFO_COMPMASK_PORTGUID; 5550 args.sq_template = &req; 5551 args.sq_callback = NULL; 5552 args.sq_callback_arg = NULL; 5553 5554 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) resp); 5555 if (ret != IBMF_SUCCESS) { 5556 IBTF_DPRINTF_L2("ibdm", "\tport_reachable:" 5557 " SA Retrieve Failed: %d", ret); 5558 return (IBDM_FAILURE); 5559 } 5560 if (*resp == NULL || *length == 0) { 5561 IBTF_DPRINTF_L2("ibdm", "\tport_reachable: No records"); 5562 return (IBDM_FAILURE); 5563 } 5564 /* 5565 * There is one NodeRecord on each endport on a subnet. 5566 */ 5567 ASSERT(*length == sizeof (sa_node_record_t)); 5568 5569 return (IBDM_SUCCESS); 5570 } 5571 5572 5573 /* 5574 * Update the gidlist for all affected IOCs when GID becomes 5575 * available/unavailable. 5576 * 5577 * Parameters : 5578 * gidinfo - Incoming / Outgoing GID. 5579 * add_flag - 1 for GID added, 0 for GID removed. 5580 * - (-1) : IOC gid list updated, ioc_list required. 5581 * 5582 * This function gets the GID for the node GUID corresponding to the 5583 * port GID. Gets the IOU info 5584 */ 5585 static ibdm_ioc_info_t * 5586 ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *gid_info, int avail_flag) 5587 { 5588 ibdm_dp_gidinfo_t *node_gid = NULL; 5589 uint8_t niocs, ii; 5590 ibdm_ioc_info_t *ioc, *ioc_list = NULL, *tmp; 5591 5592 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist"); 5593 5594 switch (avail_flag) { 5595 case 1 : 5596 node_gid = ibdm_check_dest_nodeguid(gid_info); 5597 break; 5598 case 0 : 5599 node_gid = ibdm_handle_gid_rm(gid_info); 5600 break; 5601 case -1 : 5602 node_gid = gid_info; 5603 break; 5604 default : 5605 break; 5606 } 5607 5608 if (node_gid == NULL) { 5609 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist: " 5610 "No node GID found, port gid 0x%p, avail_flag %d", 5611 gid_info, avail_flag); 5612 return (NULL); 5613 } 5614 5615 mutex_enter(&node_gid->gl_mutex); 5616 if ((node_gid->gl_state != IBDM_GID_PROBING_COMPLETE && 5617 node_gid->gl_state != IBDM_GID_PROBING_SKIPPED) || 5618 node_gid->gl_iou == NULL) { 5619 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist " 5620 "gl_state %x, gl_iou %p", node_gid->gl_state, 5621 node_gid->gl_iou); 5622 mutex_exit(&node_gid->gl_mutex); 5623 return (NULL); 5624 } 5625 5626 niocs = node_gid->gl_iou->iou_info.iou_num_ctrl_slots; 5627 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist : niocs %x", 5628 niocs); 5629 for (ii = 0; ii < niocs; ii++) { 5630 ioc = IBDM_GIDINFO2IOCINFO(node_gid, ii); 5631 /* 5632 * Skip IOCs for which probe is not complete or 5633 * reprobe is progress 5634 */ 5635 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) { 5636 tmp = ibdm_dup_ioc_info(ioc, node_gid); 5637 tmp->ioc_info_updated.ib_gid_prop_updated = 1; 5638 tmp->ioc_next = ioc_list; 5639 ioc_list = tmp; 5640 } 5641 } 5642 mutex_exit(&node_gid->gl_mutex); 5643 5644 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist : return %p", 5645 ioc_list); 5646 return (ioc_list); 5647 } 5648 5649 /* 5650 * ibdm_saa_event_cb : 5651 * Event handling which does *not* require ibdm_hl_mutex to be 5652 * held are executed in the same thread. This is to prevent 5653 * deadlocks with HCA port down notifications which hold the 5654 * ibdm_hl_mutex. 5655 * 5656 * GID_AVAILABLE event is handled here. A taskq is spawned to 5657 * handle GID_UNAVAILABLE. 5658 * 5659 * A new mutex ibdm_ibnex_mutex has been introduced to protect 5660 * ibnex_callback. This has been done to prevent any possible 5661 * deadlock (described above) while handling GID_AVAILABLE. 5662 * 5663 * IBMF calls the event callback for a HCA port. The SA handle 5664 * for this port would be valid, till the callback returns. 5665 * IBDM calling IBDM using the above SA handle should be valid. 5666 * 5667 * IBDM will additionally check (SA handle != NULL), before 5668 * calling IBMF. 5669 */ 5670 /*ARGSUSED*/ 5671 static void 5672 ibdm_saa_event_cb(ibmf_saa_handle_t ibmf_saa_handle, 5673 ibmf_saa_subnet_event_t ibmf_saa_event, 5674 ibmf_saa_event_details_t *event_details, void *callback_arg) 5675 { 5676 ibdm_saa_event_arg_t *event_arg; 5677 ib_gid_t sgid, dgid; 5678 ibdm_port_attr_t *hca_port; 5679 ibdm_dp_gidinfo_t *gid_info, *node_gid_info = NULL; 5680 sa_node_record_t *nrec; 5681 size_t length; 5682 5683 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*event_arg)); 5684 5685 hca_port = (ibdm_port_attr_t *)callback_arg; 5686 5687 IBTF_DPRINTF_L4("ibdm", "\tsaa_event_cb(%x, %x, %x, %x)\n", 5688 ibmf_saa_handle, ibmf_saa_event, event_details, 5689 callback_arg); 5690 #ifdef DEBUG 5691 if (ibdm_ignore_saa_event) 5692 return; 5693 #endif 5694 5695 if (ibmf_saa_event == IBMF_SAA_EVENT_GID_AVAILABLE) { 5696 /* 5697 * Ensure no other probe / sweep fabric is in 5698 * progress. 5699 */ 5700 mutex_enter(&ibdm.ibdm_mutex); 5701 while (ibdm.ibdm_busy & IBDM_BUSY) 5702 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5703 ibdm.ibdm_busy |= IBDM_BUSY; 5704 mutex_exit(&ibdm.ibdm_mutex); 5705 5706 /* 5707 * If we already know about this GID, return. 5708 * GID_AVAILABLE may be reported for multiple HCA 5709 * ports. 5710 */ 5711 if ((ibdm_check_dgid(event_details->ie_gid.gid_guid, 5712 event_details->ie_gid.gid_prefix)) != NULL) { 5713 mutex_enter(&ibdm.ibdm_mutex); 5714 ibdm.ibdm_busy &= ~IBDM_BUSY; 5715 cv_broadcast(&ibdm.ibdm_busy_cv); 5716 mutex_exit(&ibdm.ibdm_mutex); 5717 return; 5718 } 5719 5720 IBTF_DPRINTF_L4("ibdm", "\tGID (prefix %x, guid %llx) " 5721 "Insertion notified", 5722 event_details->ie_gid.gid_prefix, 5723 event_details->ie_gid.gid_guid); 5724 5725 /* This is a new gid, insert it to GID list */ 5726 sgid.gid_prefix = hca_port->pa_sn_prefix; 5727 sgid.gid_guid = hca_port->pa_port_guid; 5728 dgid.gid_prefix = event_details->ie_gid.gid_prefix; 5729 dgid.gid_guid = event_details->ie_gid.gid_guid; 5730 gid_info = ibdm_create_gid_info(hca_port, sgid, dgid); 5731 if (gid_info == NULL) { 5732 IBTF_DPRINTF_L4("ibdm", "\tGID_AVAILABLE: " 5733 "create_gid_info returned NULL"); 5734 mutex_enter(&ibdm.ibdm_mutex); 5735 ibdm.ibdm_busy &= ~IBDM_BUSY; 5736 cv_broadcast(&ibdm.ibdm_busy_cv); 5737 mutex_exit(&ibdm.ibdm_mutex); 5738 return; 5739 } 5740 mutex_enter(&gid_info->gl_mutex); 5741 gid_info->gl_state = IBDM_GID_PROBING_SKIPPED; 5742 mutex_exit(&gid_info->gl_mutex); 5743 5744 /* Get the node GUID */ 5745 if (ibdm_get_node_record_by_port(ibmf_saa_handle, dgid.gid_guid, 5746 &nrec, &length) != IBDM_SUCCESS) { 5747 /* 5748 * Set the state to PROBE_NOT_DONE for the 5749 * next sweep to probe it 5750 */ 5751 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_taskq: " 5752 "Skipping GID : port GUID not found"); 5753 mutex_enter(&gid_info->gl_mutex); 5754 gid_info->gl_state = IBDM_GID_PROBE_NOT_DONE; 5755 mutex_exit(&gid_info->gl_mutex); 5756 mutex_enter(&ibdm.ibdm_mutex); 5757 ibdm.ibdm_busy &= ~IBDM_BUSY; 5758 cv_broadcast(&ibdm.ibdm_busy_cv); 5759 mutex_exit(&ibdm.ibdm_mutex); 5760 return; 5761 } 5762 gid_info->gl_nodeguid = nrec->NodeInfo.NodeGUID; 5763 gid_info->gl_devid = nrec->NodeInfo.DeviceID; 5764 kmem_free(nrec, length); 5765 gid_info->gl_portguid = dgid.gid_guid; 5766 5767 /* 5768 * Get the gid info with the same node GUID. 5769 */ 5770 mutex_enter(&ibdm.ibdm_mutex); 5771 node_gid_info = ibdm.ibdm_dp_gidlist_head; 5772 while (node_gid_info) { 5773 if (node_gid_info->gl_nodeguid == 5774 gid_info->gl_nodeguid && 5775 node_gid_info->gl_iou != NULL) { 5776 break; 5777 } 5778 node_gid_info = node_gid_info->gl_next; 5779 } 5780 mutex_exit(&ibdm.ibdm_mutex); 5781 5782 /* 5783 * Handling a new GID requires filling of gl_hca_list. 5784 * This require ibdm hca_list to be parsed and hence 5785 * holding the ibdm_hl_mutex. Spawning a new thread to 5786 * handle this. 5787 */ 5788 if (node_gid_info == NULL) { 5789 if (taskq_dispatch(system_taskq, 5790 ibdm_saa_handle_new_gid, (void *)gid_info, 5791 TQ_NOSLEEP) == NULL) { 5792 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: " 5793 "new_gid taskq_dispatch failed"); 5794 return; 5795 } 5796 } 5797 5798 mutex_enter(&ibdm.ibdm_mutex); 5799 ibdm.ibdm_busy &= ~IBDM_BUSY; 5800 cv_broadcast(&ibdm.ibdm_busy_cv); 5801 mutex_exit(&ibdm.ibdm_mutex); 5802 return; 5803 } 5804 5805 if (ibmf_saa_event != IBMF_SAA_EVENT_GID_UNAVAILABLE) 5806 return; 5807 5808 event_arg = (ibdm_saa_event_arg_t *)kmem_alloc( 5809 sizeof (ibdm_saa_event_arg_t), KM_SLEEP); 5810 event_arg->ibmf_saa_handle = ibmf_saa_handle; 5811 event_arg->ibmf_saa_event = ibmf_saa_event; 5812 bcopy(event_details, &event_arg->event_details, 5813 sizeof (ibmf_saa_event_details_t)); 5814 event_arg->callback_arg = callback_arg; 5815 5816 if (taskq_dispatch(system_taskq, ibdm_saa_event_taskq, 5817 (void *)event_arg, TQ_NOSLEEP) == NULL) { 5818 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: " 5819 "taskq_dispatch failed"); 5820 ibdm_free_saa_event_arg(event_arg); 5821 return; 5822 } 5823 } 5824 5825 /* 5826 * Handle a new GID discovered by GID_AVAILABLE saa event. 5827 */ 5828 void 5829 ibdm_saa_handle_new_gid(void *arg) 5830 { 5831 ibdm_dp_gidinfo_t *gid_info; 5832 ibdm_hca_list_t *hca_list = NULL; 5833 ibdm_port_attr_t *port = NULL; 5834 ibdm_ioc_info_t *ioc_list = NULL; 5835 5836 IBTF_DPRINTF_L4(ibdm_string, "\tsaa_handle_new_gid(%p)", arg); 5837 5838 gid_info = (ibdm_dp_gidinfo_t *)arg; 5839 5840 /* 5841 * Ensure that no other sweep / probe has completed 5842 * probing this gid. 5843 */ 5844 mutex_enter(&gid_info->gl_mutex); 5845 if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) { 5846 mutex_exit(&gid_info->gl_mutex); 5847 return; 5848 } 5849 mutex_exit(&gid_info->gl_mutex); 5850 5851 /* 5852 * Parse HCAs to fill gl_hca_list 5853 */ 5854 mutex_enter(&ibdm.ibdm_hl_mutex); 5855 for (ibdm_get_next_port(&hca_list, &port, 1); port; 5856 ibdm_get_next_port(&hca_list, &port, 1)) { 5857 if (ibdm_port_reachable(port->pa_sa_hdl, 5858 gid_info->gl_portguid) == B_TRUE) { 5859 ibdm_addto_glhcalist(gid_info, hca_list); 5860 } 5861 } 5862 mutex_exit(&ibdm.ibdm_hl_mutex); 5863 5864 /* 5865 * Ensure no other probe / sweep fabric is in 5866 * progress. 5867 */ 5868 mutex_enter(&ibdm.ibdm_mutex); 5869 while (ibdm.ibdm_busy & IBDM_BUSY) 5870 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5871 ibdm.ibdm_busy |= IBDM_BUSY; 5872 mutex_exit(&ibdm.ibdm_mutex); 5873 5874 /* 5875 * New IOU probe it, to check if new IOCs 5876 */ 5877 IBTF_DPRINTF_L4(ibdm_string, "\tsaa_handle_new_gid: " 5878 "new GID : probing"); 5879 mutex_enter(&ibdm.ibdm_mutex); 5880 ibdm.ibdm_ngid_probes_in_progress++; 5881 mutex_exit(&ibdm.ibdm_mutex); 5882 mutex_enter(&gid_info->gl_mutex); 5883 gid_info->gl_reprobe_flag = 0; 5884 gid_info->gl_state = IBDM_GID_PROBE_NOT_DONE; 5885 mutex_exit(&gid_info->gl_mutex); 5886 ibdm_probe_gid_thread((void *)gid_info); 5887 5888 mutex_enter(&ibdm.ibdm_mutex); 5889 ibdm_wait_probe_completion(); 5890 mutex_exit(&ibdm.ibdm_mutex); 5891 5892 if (gid_info->gl_iou == NULL) { 5893 mutex_enter(&ibdm.ibdm_mutex); 5894 ibdm.ibdm_busy &= ~IBDM_BUSY; 5895 cv_broadcast(&ibdm.ibdm_busy_cv); 5896 mutex_exit(&ibdm.ibdm_mutex); 5897 return; 5898 } 5899 5900 /* 5901 * Update GID list in all IOCs affected by this 5902 */ 5903 ioc_list = ibdm_update_ioc_gidlist(gid_info, 1); 5904 5905 /* 5906 * Pass on the IOCs with updated GIDs to IBnexus 5907 */ 5908 if (ioc_list) { 5909 mutex_enter(&ibdm.ibdm_ibnex_mutex); 5910 if (ibdm.ibdm_ibnex_callback != NULL) { 5911 (*ibdm.ibdm_ibnex_callback)((void *)ioc_list, 5912 IBDM_EVENT_IOC_PROP_UPDATE); 5913 } 5914 mutex_exit(&ibdm.ibdm_ibnex_mutex); 5915 } 5916 5917 mutex_enter(&ibdm.ibdm_mutex); 5918 ibdm.ibdm_busy &= ~IBDM_BUSY; 5919 cv_broadcast(&ibdm.ibdm_busy_cv); 5920 mutex_exit(&ibdm.ibdm_mutex); 5921 } 5922 5923 /* 5924 * ibdm_saa_event_taskq : 5925 * GID_UNAVAILABLE Event handling requires ibdm_hl_mutex to be 5926 * held. The GID_UNAVAILABLE handling is done in a taskq to 5927 * prevent deadlocks with HCA port down notifications which hold 5928 * ibdm_hl_mutex. 5929 */ 5930 void 5931 ibdm_saa_event_taskq(void *arg) 5932 { 5933 ibdm_saa_event_arg_t *event_arg; 5934 ibmf_saa_handle_t ibmf_saa_handle; 5935 ibmf_saa_subnet_event_t ibmf_saa_event; 5936 ibmf_saa_event_details_t *event_details; 5937 void *callback_arg; 5938 5939 ibdm_dp_gidinfo_t *gid_info; 5940 ibdm_port_attr_t *hca_port, *port = NULL; 5941 ibdm_hca_list_t *hca_list = NULL; 5942 int sa_handle_valid = 0; 5943 ibdm_ioc_info_t *ioc_list = NULL; 5944 5945 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*event_arg)); 5946 5947 event_arg = (ibdm_saa_event_arg_t *)arg; 5948 ibmf_saa_handle = event_arg->ibmf_saa_handle; 5949 ibmf_saa_event = event_arg->ibmf_saa_event; 5950 event_details = &event_arg->event_details; 5951 callback_arg = event_arg->callback_arg; 5952 5953 ASSERT(callback_arg != NULL); 5954 ASSERT(ibmf_saa_event == IBMF_SAA_EVENT_GID_UNAVAILABLE); 5955 IBTF_DPRINTF_L4("ibdm", "\tsaa_event_taskq(%x, %x, %x, %x)", 5956 ibmf_saa_handle, ibmf_saa_event, event_details, 5957 callback_arg); 5958 5959 hca_port = (ibdm_port_attr_t *)callback_arg; 5960 5961 /* Check if the port_attr is still valid */ 5962 mutex_enter(&ibdm.ibdm_hl_mutex); 5963 for (ibdm_get_next_port(&hca_list, &port, 0); port; 5964 ibdm_get_next_port(&hca_list, &port, 0)) { 5965 if (port == hca_port && port->pa_port_guid == 5966 hca_port->pa_port_guid) { 5967 if (ibmf_saa_handle == hca_port->pa_sa_hdl) 5968 sa_handle_valid = 1; 5969 break; 5970 } 5971 } 5972 mutex_exit(&ibdm.ibdm_hl_mutex); 5973 if (sa_handle_valid == 0) { 5974 ibdm_free_saa_event_arg(event_arg); 5975 return; 5976 } 5977 5978 if (hca_port && (hca_port->pa_sa_hdl == NULL || 5979 ibmf_saa_handle != hca_port->pa_sa_hdl)) { 5980 ibdm_free_saa_event_arg(event_arg); 5981 return; 5982 } 5983 hca_list = NULL; 5984 port = NULL; 5985 5986 /* 5987 * Check if the GID is visible to other HCA ports. 5988 * Return if so. 5989 */ 5990 mutex_enter(&ibdm.ibdm_hl_mutex); 5991 for (ibdm_get_next_port(&hca_list, &port, 1); port; 5992 ibdm_get_next_port(&hca_list, &port, 1)) { 5993 if (ibdm_port_reachable(port->pa_sa_hdl, 5994 event_details->ie_gid.gid_guid) == B_TRUE) { 5995 mutex_exit(&ibdm.ibdm_hl_mutex); 5996 ibdm_free_saa_event_arg(event_arg); 5997 return; 5998 } 5999 } 6000 mutex_exit(&ibdm.ibdm_hl_mutex); 6001 6002 /* 6003 * Ensure no other probe / sweep fabric is in 6004 * progress. 6005 */ 6006 mutex_enter(&ibdm.ibdm_mutex); 6007 while (ibdm.ibdm_busy & IBDM_BUSY) 6008 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 6009 ibdm.ibdm_busy |= IBDM_BUSY; 6010 mutex_exit(&ibdm.ibdm_mutex); 6011 6012 /* 6013 * If this GID is no longer in GID list, return 6014 * GID_UNAVAILABLE may be reported for multiple HCA 6015 * ports. 6016 */ 6017 mutex_enter(&ibdm.ibdm_mutex); 6018 gid_info = ibdm.ibdm_dp_gidlist_head; 6019 while (gid_info) { 6020 if (gid_info->gl_portguid == 6021 event_details->ie_gid.gid_guid) { 6022 break; 6023 } 6024 gid_info = gid_info->gl_next; 6025 } 6026 mutex_exit(&ibdm.ibdm_mutex); 6027 if (gid_info == NULL) { 6028 mutex_enter(&ibdm.ibdm_mutex); 6029 ibdm.ibdm_busy &= ~IBDM_BUSY; 6030 cv_broadcast(&ibdm.ibdm_busy_cv); 6031 mutex_exit(&ibdm.ibdm_mutex); 6032 ibdm_free_saa_event_arg(event_arg); 6033 return; 6034 } 6035 6036 IBTF_DPRINTF_L4("ibdm", "\tGID (prefix %x, guid %llx) " 6037 "Unavailable notification", 6038 event_details->ie_gid.gid_prefix, 6039 event_details->ie_gid.gid_guid); 6040 6041 /* 6042 * Update GID list in all IOCs affected by this 6043 */ 6044 if (gid_info->gl_state == IBDM_GID_PROBING_SKIPPED || 6045 gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) 6046 ioc_list = ibdm_update_ioc_gidlist(gid_info, 0); 6047 6048 /* 6049 * Remove GID from the global GID list 6050 * Handle the case where all port GIDs for an 6051 * IOU have been hot-removed. Check both gid_info 6052 * & ioc_info for checking ngids. 6053 */ 6054 mutex_enter(&ibdm.ibdm_mutex); 6055 if (gid_info->gl_iou != NULL && gid_info->gl_ngids == 0) { 6056 mutex_enter(&gid_info->gl_mutex); 6057 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou); 6058 mutex_exit(&gid_info->gl_mutex); 6059 } 6060 if (gid_info->gl_prev != NULL) 6061 gid_info->gl_prev->gl_next = gid_info->gl_next; 6062 if (gid_info->gl_next != NULL) 6063 gid_info->gl_next->gl_prev = gid_info->gl_prev; 6064 6065 if (gid_info == ibdm.ibdm_dp_gidlist_head) 6066 ibdm.ibdm_dp_gidlist_head = gid_info->gl_next; 6067 if (gid_info == ibdm.ibdm_dp_gidlist_tail) 6068 ibdm.ibdm_dp_gidlist_tail = gid_info->gl_prev; 6069 ibdm.ibdm_ngids--; 6070 6071 ibdm.ibdm_busy &= ~IBDM_BUSY; 6072 cv_broadcast(&ibdm.ibdm_busy_cv); 6073 mutex_exit(&ibdm.ibdm_mutex); 6074 6075 /* free the hca_list on this gid_info */ 6076 ibdm_delete_glhca_list(gid_info); 6077 6078 mutex_destroy(&gid_info->gl_mutex); 6079 kmem_free(gid_info, sizeof (ibdm_dp_gidinfo_t)); 6080 6081 /* 6082 * Pass on the IOCs with updated GIDs to IBnexus 6083 */ 6084 if (ioc_list) { 6085 IBTF_DPRINTF_L4("ibdm", "\tGID_UNAVAILABLE " 6086 "IOC_PROP_UPDATE for %p\n", ioc_list); 6087 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6088 if (ibdm.ibdm_ibnex_callback != NULL) { 6089 (*ibdm.ibdm_ibnex_callback)((void *) 6090 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 6091 } 6092 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6093 } 6094 6095 ibdm_free_saa_event_arg(event_arg); 6096 } 6097 6098 6099 static int 6100 ibdm_cmp_gid_list(ibdm_gid_t *new, ibdm_gid_t *prev) 6101 { 6102 ibdm_gid_t *scan_new, *scan_prev; 6103 int cmp_failed = 0; 6104 6105 ASSERT(new != NULL); 6106 ASSERT(prev != NULL); 6107 6108 /* 6109 * Search for each new gid anywhere in the prev GID list. 6110 * Note that the gid list could have been re-ordered. 6111 */ 6112 for (scan_new = new; scan_new; scan_new = scan_new->gid_next) { 6113 for (scan_prev = prev, cmp_failed = 1; scan_prev; 6114 scan_prev = scan_prev->gid_next) { 6115 if (scan_prev->gid_dgid_hi == scan_new->gid_dgid_hi && 6116 scan_prev->gid_dgid_lo == scan_new->gid_dgid_lo) { 6117 cmp_failed = 0; 6118 break; 6119 } 6120 } 6121 6122 if (cmp_failed) 6123 return (1); 6124 } 6125 return (0); 6126 } 6127 6128 /* 6129 * This is always called in a single thread 6130 * This function updates the gid_list and serv_list of IOC 6131 * The current gid_list is in ioc_info_t(contains only port 6132 * guids for which probe is done) & gidinfo_t(other port gids) 6133 * The gids in both locations are used for comparision. 6134 */ 6135 static void 6136 ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *ioc, ibdm_dp_gidinfo_t *gidinfo) 6137 { 6138 ibdm_gid_t *cur_gid_list; 6139 uint_t cur_nportgids; 6140 6141 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 6142 6143 ioc->ioc_info_updated.ib_prop_updated = 0; 6144 6145 6146 /* Current GID list in gid_info only */ 6147 cur_gid_list = gidinfo->gl_gid; 6148 cur_nportgids = gidinfo->gl_ngids; 6149 6150 if (ioc->ioc_prev_serv_cnt != 6151 ioc->ioc_profile.ioc_service_entries || 6152 ibdm_serv_cmp(&ioc->ioc_serv[0], &ioc->ioc_prev_serv[0], 6153 ioc->ioc_prev_serv_cnt)) 6154 ioc->ioc_info_updated.ib_srv_prop_updated = 1; 6155 6156 if (ioc->ioc_prev_nportgids != cur_nportgids || 6157 ioc->ioc_prev_gid_list == NULL || cur_gid_list == NULL) { 6158 ioc->ioc_info_updated.ib_gid_prop_updated = 1; 6159 } else if (ibdm_cmp_gid_list(ioc->ioc_prev_gid_list, cur_gid_list)) { 6160 ioc->ioc_info_updated.ib_gid_prop_updated = 1; 6161 } 6162 6163 /* Zero out previous entries */ 6164 ibdm_free_gid_list(ioc->ioc_prev_gid_list); 6165 if (ioc->ioc_prev_serv) 6166 kmem_free(ioc->ioc_prev_serv, ioc->ioc_prev_serv_cnt * 6167 sizeof (ibdm_srvents_info_t)); 6168 ioc->ioc_prev_serv_cnt = 0; 6169 ioc->ioc_prev_nportgids = 0; 6170 ioc->ioc_prev_serv = NULL; 6171 ioc->ioc_prev_gid_list = NULL; 6172 } 6173 6174 /* 6175 * Handle GID removal. This returns gid_info of an GID for the same 6176 * node GUID, if found. For an GID with IOU information, the same 6177 * gid_info is returned if no gid_info with same node_guid is found. 6178 */ 6179 static ibdm_dp_gidinfo_t * 6180 ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *rm_gid) 6181 { 6182 ibdm_dp_gidinfo_t *gid_list; 6183 6184 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm(0x%p)", rm_gid); 6185 6186 if (rm_gid->gl_iou == NULL) { 6187 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm NO iou"); 6188 /* 6189 * Search for a GID with same node_guid and 6190 * gl_iou != NULL 6191 */ 6192 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 6193 gid_list = gid_list->gl_next) { 6194 if (gid_list->gl_iou != NULL && (gid_list->gl_nodeguid 6195 == rm_gid->gl_nodeguid)) 6196 break; 6197 } 6198 6199 if (gid_list) 6200 ibdm_rmfrom_glgid_list(gid_list, rm_gid); 6201 6202 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm ret %p", gid_list); 6203 return (gid_list); 6204 } else { 6205 /* 6206 * Search for a GID with same node_guid and 6207 * gl_iou == NULL 6208 */ 6209 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm with iou"); 6210 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 6211 gid_list = gid_list->gl_next) { 6212 if (gid_list->gl_iou == NULL && (gid_list->gl_nodeguid 6213 == rm_gid->gl_nodeguid)) 6214 break; 6215 } 6216 6217 if (gid_list) { 6218 /* 6219 * Copy the following fields from rm_gid : 6220 * 1. gl_state 6221 * 2. gl_iou 6222 * 3. gl_gid & gl_ngids 6223 * 6224 * Note : Function is synchronized by 6225 * ibdm_busy flag. 6226 * 6227 * Note : Redirect info is initialized if 6228 * any MADs for the GID fail 6229 */ 6230 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm " 6231 "copying info to GID with gl_iou != NULl"); 6232 gid_list->gl_state = rm_gid->gl_state; 6233 gid_list->gl_iou = rm_gid->gl_iou; 6234 gid_list->gl_gid = rm_gid->gl_gid; 6235 gid_list->gl_ngids = rm_gid->gl_ngids; 6236 6237 /* Remove the GID from gl_gid list */ 6238 ibdm_rmfrom_glgid_list(gid_list, rm_gid); 6239 } else { 6240 /* 6241 * Handle a case where all GIDs to the IOU have 6242 * been removed. 6243 */ 6244 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm 0 GID " 6245 "to IOU"); 6246 6247 ibdm_rmfrom_glgid_list(rm_gid, rm_gid); 6248 return (rm_gid); 6249 } 6250 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm ret %p", gid_list); 6251 return (gid_list); 6252 } 6253 } 6254 6255 static void 6256 ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *gid_info, 6257 ibdm_dp_gidinfo_t *rm_gid) 6258 { 6259 ibdm_gid_t *tmp, *prev; 6260 6261 IBTF_DPRINTF_L4("ibdm", "\trmfrom_glgid (%p, %p)", 6262 gid_info, rm_gid); 6263 6264 for (tmp = gid_info->gl_gid, prev = NULL; tmp; ) { 6265 if (tmp->gid_dgid_hi == rm_gid->gl_dgid_hi && 6266 tmp->gid_dgid_lo == rm_gid->gl_dgid_lo) { 6267 if (prev == NULL) 6268 gid_info->gl_gid = tmp->gid_next; 6269 else 6270 prev->gid_next = tmp->gid_next; 6271 6272 kmem_free(tmp, sizeof (ibdm_gid_t)); 6273 gid_info->gl_ngids--; 6274 break; 6275 } else { 6276 prev = tmp; 6277 tmp = tmp->gid_next; 6278 } 6279 } 6280 } 6281 6282 static void 6283 ibdm_addto_gidlist(ibdm_gid_t **src_ptr, ibdm_gid_t *dest) 6284 { 6285 ibdm_gid_t *head = NULL, *new, *tail; 6286 6287 /* First copy the destination */ 6288 for (; dest; dest = dest->gid_next) { 6289 new = kmem_zalloc(sizeof (ibdm_gid_t), KM_SLEEP); 6290 new->gid_dgid_hi = dest->gid_dgid_hi; 6291 new->gid_dgid_lo = dest->gid_dgid_lo; 6292 new->gid_next = head; 6293 head = new; 6294 } 6295 6296 /* Insert this to the source */ 6297 if (*src_ptr == NULL) 6298 *src_ptr = head; 6299 else { 6300 for (tail = *src_ptr; tail->gid_next != NULL; 6301 tail = tail->gid_next) 6302 ; 6303 6304 tail->gid_next = head; 6305 } 6306 } 6307 6308 static void 6309 ibdm_free_gid_list(ibdm_gid_t *head) 6310 { 6311 ibdm_gid_t *delete; 6312 6313 for (delete = head; delete; ) { 6314 head = delete->gid_next; 6315 kmem_free(delete, sizeof (ibdm_gid_t)); 6316 delete = head; 6317 } 6318 } 6319 6320 /* 6321 * This function rescans the DM capable GIDs (gl_state is 6322 * GID_PROBE_COMPLETE or IBDM_GID_PROBING_SKIPPED.This 6323 * basically checks if the DM capable GID is reachable. If 6324 * not this is handled the same way as GID_UNAVAILABLE, 6325 * except that notifications are not send to IBnexus. 6326 * 6327 * This function also initializes the ioc_prev_list for 6328 * a particular IOC (when called from probe_ioc, with 6329 * ioc_guidp != NULL) or all IOCs for the gid (called from 6330 * sweep_fabric, ioc_guidp == NULL). 6331 */ 6332 static void 6333 ibdm_rescan_gidlist(ib_guid_t *ioc_guidp) 6334 { 6335 ibdm_dp_gidinfo_t *gid_info, *tmp; 6336 int ii, niocs, found; 6337 ibdm_hca_list_t *hca_list = NULL; 6338 ibdm_port_attr_t *port = NULL; 6339 ibdm_ioc_info_t *ioc_list; 6340 6341 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; ) { 6342 found = 0; 6343 if (gid_info->gl_state != IBDM_GID_PROBING_SKIPPED && 6344 gid_info->gl_state != IBDM_GID_PROBING_COMPLETE) { 6345 gid_info = gid_info->gl_next; 6346 continue; 6347 } 6348 6349 /* 6350 * Check if the GID is visible to any HCA ports. 6351 * Return if so. 6352 */ 6353 mutex_enter(&ibdm.ibdm_hl_mutex); 6354 for (ibdm_get_next_port(&hca_list, &port, 1); port; 6355 ibdm_get_next_port(&hca_list, &port, 1)) { 6356 if (ibdm_port_reachable(port->pa_sa_hdl, 6357 gid_info->gl_dgid_lo) == B_TRUE) { 6358 found = 1; 6359 break; 6360 } 6361 } 6362 mutex_exit(&ibdm.ibdm_hl_mutex); 6363 6364 if (found) { 6365 if (gid_info->gl_iou == NULL) { 6366 gid_info = gid_info->gl_next; 6367 continue; 6368 } 6369 6370 /* Intialize the ioc_prev_gid_list */ 6371 niocs = 6372 gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 6373 for (ii = 0; ii < niocs; ii++) { 6374 ioc_list = IBDM_GIDINFO2IOCINFO(gid_info, ii); 6375 6376 if (ioc_guidp == NULL || (*ioc_guidp == 6377 ioc_list->ioc_profile.ioc_guid)) { 6378 /* Add info of GIDs in gid_info also */ 6379 ibdm_addto_gidlist( 6380 &ioc_list->ioc_prev_gid_list, 6381 gid_info->gl_gid); 6382 ioc_list->ioc_prev_nportgids = 6383 gid_info->gl_ngids; 6384 } 6385 } 6386 gid_info = gid_info->gl_next; 6387 continue; 6388 } 6389 6390 IBTF_DPRINTF_L4("ibdm", "\trescan_gidlist " 6391 "deleted port GUID %llx", 6392 gid_info->gl_dgid_lo); 6393 6394 /* 6395 * Update GID list in all IOCs affected by this 6396 */ 6397 ioc_list = ibdm_update_ioc_gidlist(gid_info, 0); 6398 6399 /* 6400 * Remove GID from the global GID list 6401 * Handle the case where all port GIDs for an 6402 * IOU have been hot-removed. 6403 */ 6404 mutex_enter(&ibdm.ibdm_mutex); 6405 if (gid_info->gl_iou != NULL && gid_info->gl_ngids == 0) { 6406 mutex_enter(&gid_info->gl_mutex); 6407 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou); 6408 mutex_exit(&gid_info->gl_mutex); 6409 } 6410 6411 tmp = gid_info->gl_next; 6412 if (gid_info->gl_prev != NULL) 6413 gid_info->gl_prev->gl_next = gid_info->gl_next; 6414 if (gid_info->gl_next != NULL) 6415 gid_info->gl_next->gl_prev = gid_info->gl_prev; 6416 6417 if (gid_info == ibdm.ibdm_dp_gidlist_head) 6418 ibdm.ibdm_dp_gidlist_head = gid_info->gl_next; 6419 if (gid_info == ibdm.ibdm_dp_gidlist_tail) 6420 ibdm.ibdm_dp_gidlist_tail = gid_info->gl_prev; 6421 ibdm.ibdm_ngids--; 6422 mutex_exit(&ibdm.ibdm_mutex); 6423 6424 /* free the hca_list on this gid_info */ 6425 ibdm_delete_glhca_list(gid_info); 6426 6427 mutex_destroy(&gid_info->gl_mutex); 6428 kmem_free(gid_info, sizeof (ibdm_dp_gidinfo_t)); 6429 6430 gid_info = tmp; 6431 6432 /* 6433 * Pass on the IOCs with updated GIDs to IBnexus 6434 */ 6435 if (ioc_list) { 6436 IBTF_DPRINTF_L4("ibdm", "\trescan_gidlist " 6437 "IOC_PROP_UPDATE for %p\n", ioc_list); 6438 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6439 if (ibdm.ibdm_ibnex_callback != NULL) { 6440 (*ibdm.ibdm_ibnex_callback)((void *) 6441 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 6442 } 6443 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6444 } 6445 } 6446 } 6447 6448 /* 6449 * This function notifies IBnex of IOCs on this GID. 6450 * Notification is for GIDs with gl_reprobe_flag set. 6451 * The flag is set when IOC probe / fabric sweep 6452 * probes a GID starting from CLASS port info. 6453 * 6454 * IBnexus will have information of a reconnected IOC 6455 * if it had probed it before. If this is a new IOC, 6456 * IBnexus ignores the notification. 6457 * 6458 * This function should be called with no locks held. 6459 */ 6460 static void 6461 ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *gid_info) 6462 { 6463 ibdm_ioc_info_t *ioc_list; 6464 6465 if (gid_info->gl_reprobe_flag == 0 || 6466 gid_info->gl_iou == NULL) 6467 return; 6468 6469 ioc_list = ibdm_update_ioc_gidlist(gid_info, -1); 6470 6471 /* 6472 * Pass on the IOCs with updated GIDs to IBnexus 6473 */ 6474 if (ioc_list) { 6475 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6476 if (ibdm.ibdm_ibnex_callback != NULL) { 6477 (*ibdm.ibdm_ibnex_callback)((void *)ioc_list, 6478 IBDM_EVENT_IOC_PROP_UPDATE); 6479 } 6480 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6481 } 6482 } 6483 6484 6485 static void 6486 ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *arg) 6487 { 6488 if (arg != NULL) 6489 kmem_free(arg, sizeof (ibdm_saa_event_arg_t)); 6490 } 6491 6492 /* 6493 * This function parses the list of HCAs and HCA ports 6494 * to return the port_attr of the next HCA port. A port 6495 * connected to IB fabric (port_state active) is returned, 6496 * if connected_flag is set. 6497 */ 6498 static void 6499 ibdm_get_next_port(ibdm_hca_list_t **inp_hcap, 6500 ibdm_port_attr_t **inp_portp, int connect_flag) 6501 { 6502 int ii; 6503 ibdm_port_attr_t *port, *next_port = NULL; 6504 ibdm_port_attr_t *inp_port; 6505 ibdm_hca_list_t *hca_list; 6506 int found = 0; 6507 6508 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 6509 IBTF_DPRINTF_L4(ibdm_string, "\tget_next_port(%p, %p, %x)", 6510 inp_hcap, inp_portp, connect_flag); 6511 6512 hca_list = *inp_hcap; 6513 inp_port = *inp_portp; 6514 6515 if (hca_list == NULL) 6516 hca_list = ibdm.ibdm_hca_list_head; 6517 6518 for (; hca_list; hca_list = hca_list->hl_next) { 6519 for (ii = 0; ii < hca_list->hl_nports; ii++) { 6520 port = &hca_list->hl_port_attr[ii]; 6521 6522 /* 6523 * inp_port != NULL; 6524 * Skip till we find the matching port 6525 */ 6526 if (inp_port && !found) { 6527 if (inp_port == port) 6528 found = 1; 6529 continue; 6530 } 6531 6532 if (!connect_flag) { 6533 next_port = port; 6534 break; 6535 } 6536 6537 if (port->pa_sa_hdl == NULL) 6538 ibdm_initialize_port(port); 6539 if (port->pa_sa_hdl == NULL) 6540 (void) ibdm_fini_port(port); 6541 else if (next_port == NULL && 6542 port->pa_sa_hdl != NULL && 6543 port->pa_state == IBT_PORT_ACTIVE) { 6544 next_port = port; 6545 break; 6546 } 6547 } 6548 6549 if (next_port) 6550 break; 6551 } 6552 6553 IBTF_DPRINTF_L4(ibdm_string, "\tget_next_port : " 6554 "returns hca_list %p port %p", hca_list, next_port); 6555 *inp_hcap = hca_list; 6556 *inp_portp = next_port; 6557 } 6558 6559 static void 6560 ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *nodegid, ibdm_dp_gidinfo_t *addgid) 6561 { 6562 ibdm_gid_t *tmp; 6563 6564 tmp = kmem_zalloc(sizeof (ibdm_gid_t), KM_SLEEP); 6565 tmp->gid_dgid_hi = addgid->gl_dgid_hi; 6566 tmp->gid_dgid_lo = addgid->gl_dgid_lo; 6567 6568 mutex_enter(&nodegid->gl_mutex); 6569 tmp->gid_next = nodegid->gl_gid; 6570 nodegid->gl_gid = tmp; 6571 nodegid->gl_ngids++; 6572 mutex_exit(&nodegid->gl_mutex); 6573 } 6574 6575 static void 6576 ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *gid_info, 6577 ibdm_hca_list_t *hca) 6578 { 6579 ibdm_hca_list_t *head, *prev = NULL, *temp; 6580 6581 IBTF_DPRINTF_L4(ibdm_string, "\taddto_glhcalist(%p, %p) " 6582 ": gl_hca_list %p", gid_info, hca, gid_info->gl_hca_list); 6583 ASSERT(!MUTEX_HELD(&gid_info->gl_mutex)); 6584 6585 mutex_enter(&gid_info->gl_mutex); 6586 head = gid_info->gl_hca_list; 6587 if (head == NULL) { 6588 head = ibdm_dup_hca_attr(hca); 6589 head->hl_next = NULL; 6590 gid_info->gl_hca_list = head; 6591 mutex_exit(&gid_info->gl_mutex); 6592 IBTF_DPRINTF_L4(ibdm_string, "\tadd_to_glhcalist: " 6593 "gid %p, gl_hca_list %p", gid_info, 6594 gid_info->gl_hca_list); 6595 return; 6596 } 6597 6598 /* Check if already in the list */ 6599 while (head) { 6600 if (head->hl_hca_guid == hca->hl_hca_guid) { 6601 mutex_exit(&gid_info->gl_mutex); 6602 IBTF_DPRINTF_L4(ibdm_string, 6603 "\taddto_glhcalist : gid %p hca %p dup", 6604 gid_info, hca); 6605 return; 6606 } 6607 prev = head; 6608 head = head->hl_next; 6609 } 6610 6611 /* Add this HCA to gl_hca_list */ 6612 temp = ibdm_dup_hca_attr(hca); 6613 temp->hl_next = NULL; 6614 prev->hl_next = temp; 6615 mutex_exit(&gid_info->gl_mutex); 6616 6617 IBTF_DPRINTF_L4(ibdm_string, "\tadd_to_glhcalist: " 6618 "gid %p, gl_hca_list %p", gid_info, gid_info->gl_hca_list); 6619 } 6620 6621 static void 6622 ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *gid_info) 6623 { 6624 ASSERT(!MUTEX_HELD(&gid_info->gl_mutex)); 6625 ASSERT(!MUTEX_HELD(&ibdm.ibdm_mutex)); 6626 6627 mutex_enter(&gid_info->gl_mutex); 6628 if (gid_info->gl_hca_list) 6629 ibdm_ibnex_free_hca_list(gid_info->gl_hca_list); 6630 gid_info->gl_hca_list = NULL; 6631 mutex_exit(&gid_info->gl_mutex); 6632 } 6633 6634 6635 static void 6636 ibdm_reset_all_dgids(ibmf_saa_handle_t port_sa_hdl) 6637 { 6638 IBTF_DPRINTF_L4(ibdm_string, "\treset_all_dgids(%X)", 6639 port_sa_hdl); 6640 6641 ASSERT(!MUTEX_HELD(&ibdm.ibdm_mutex)); 6642 ASSERT(!MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 6643 6644 /* Check : Not busy in another probe / sweep */ 6645 mutex_enter(&ibdm.ibdm_mutex); 6646 if ((ibdm.ibdm_busy & IBDM_BUSY) == 0) { 6647 ibdm_dp_gidinfo_t *gid_info; 6648 6649 ibdm.ibdm_busy |= IBDM_BUSY; 6650 mutex_exit(&ibdm.ibdm_mutex); 6651 6652 /* 6653 * Check if any GID is using the SA & IBMF handle 6654 * of HCA port going down. Reset ibdm_dp_gidinfo_t 6655 * using another HCA port which can reach the GID. 6656 * This is for DM capable GIDs only, no need to do 6657 * this for others 6658 * 6659 * Delete the GID if no alternate HCA port to reach 6660 * it is found. 6661 */ 6662 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; ) { 6663 ibdm_dp_gidinfo_t *tmp; 6664 6665 IBTF_DPRINTF_L4(ibdm_string, "\tevent_hdlr " 6666 "checking gidinfo %p", gid_info); 6667 6668 if (gid_info->gl_sa_hdl == port_sa_hdl) { 6669 IBTF_DPRINTF_L3(ibdm_string, 6670 "\tevent_hdlr: down HCA port hdl " 6671 "matches gid %p", gid_info); 6672 6673 /* 6674 * The non-DM GIDs can come back 6675 * with a new subnet prefix, when 6676 * the HCA port commes up again. To 6677 * avoid issues, delete non-DM 6678 * capable GIDs, if the gid was 6679 * discovered using the HCA port 6680 * going down. This is ensured by 6681 * setting gl_disconnected to 1. 6682 */ 6683 if (gid_info->gl_nodeguid == 0) 6684 gid_info->gl_disconnected = 1; 6685 else 6686 ibdm_reset_gidinfo(gid_info); 6687 6688 if (gid_info->gl_disconnected) { 6689 IBTF_DPRINTF_L3(ibdm_string, 6690 "\tevent_hdlr: deleting" 6691 " gid %p", gid_info); 6692 tmp = gid_info; 6693 gid_info = gid_info->gl_next; 6694 ibdm_delete_gidinfo(tmp); 6695 } else 6696 gid_info = gid_info->gl_next; 6697 } else 6698 gid_info = gid_info->gl_next; 6699 } 6700 6701 mutex_enter(&ibdm.ibdm_mutex); 6702 ibdm.ibdm_busy &= ~IBDM_BUSY; 6703 cv_signal(&ibdm.ibdm_busy_cv); 6704 } 6705 mutex_exit(&ibdm.ibdm_mutex); 6706 } 6707 6708 static void 6709 ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *gidinfo) 6710 { 6711 ibdm_hca_list_t *hca_list = NULL; 6712 ibdm_port_attr_t *port = NULL; 6713 int gid_reinited = 0; 6714 sa_node_record_t *nr, *tmp; 6715 sa_portinfo_record_t *pi; 6716 size_t nr_len = 0, pi_len = 0; 6717 size_t path_len; 6718 ib_gid_t sgid, dgid; 6719 int ret, ii, nrecords; 6720 sa_path_record_t *path; 6721 uint8_t npaths = 1; 6722 ibdm_pkey_tbl_t *pkey_tbl; 6723 6724 IBTF_DPRINTF_L4(ibdm_string, "\treset_gidinfo(%p)", gidinfo); 6725 6726 /* 6727 * Get list of all the ports reachable from the local known HCA 6728 * ports which are active 6729 */ 6730 mutex_enter(&ibdm.ibdm_hl_mutex); 6731 for (ibdm_get_next_port(&hca_list, &port, 1); port; 6732 ibdm_get_next_port(&hca_list, &port, 1)) { 6733 6734 6735 /* 6736 * Get the path and re-populate the gidinfo. 6737 * Getting the path is the same probe_ioc 6738 * Init the gid info as in ibdm_create_gidinfo() 6739 */ 6740 nr = ibdm_get_node_records(port->pa_sa_hdl, &nr_len, 6741 gidinfo->gl_nodeguid); 6742 if (nr == NULL) { 6743 IBTF_DPRINTF_L4(ibdm_string, 6744 "\treset_gidinfo : no records"); 6745 continue; 6746 } 6747 6748 nrecords = (nr_len / sizeof (sa_node_record_t)); 6749 for (tmp = nr, ii = 0; (ii < nrecords); ii++, tmp++) { 6750 if (tmp->NodeInfo.PortGUID == gidinfo->gl_portguid) 6751 break; 6752 } 6753 6754 if (ii == nrecords) { 6755 IBTF_DPRINTF_L4(ibdm_string, 6756 "\treset_gidinfo : no record for portguid"); 6757 kmem_free(nr, nr_len); 6758 continue; 6759 } 6760 6761 pi = ibdm_get_portinfo(port->pa_sa_hdl, &pi_len, tmp->LID); 6762 if (pi == NULL) { 6763 IBTF_DPRINTF_L4(ibdm_string, 6764 "\treset_gidinfo : no portinfo"); 6765 kmem_free(nr, nr_len); 6766 continue; 6767 } 6768 6769 sgid.gid_prefix = port->pa_sn_prefix; 6770 sgid.gid_guid = port->pa_port_guid; 6771 dgid.gid_prefix = pi->PortInfo.GidPrefix; 6772 dgid.gid_guid = tmp->NodeInfo.PortGUID; 6773 6774 ret = ibmf_saa_gid_to_pathrecords(port->pa_sa_hdl, sgid, dgid, 6775 IBMF_SAA_PKEY_WC, 0, B_TRUE, &npaths, 0, &path_len, &path); 6776 6777 if ((ret != IBMF_SUCCESS) || path == NULL) { 6778 IBTF_DPRINTF_L4(ibdm_string, 6779 "\treset_gidinfo : no paths"); 6780 kmem_free(pi, pi_len); 6781 kmem_free(nr, nr_len); 6782 continue; 6783 } 6784 6785 gidinfo->gl_dgid_hi = path->DGID.gid_prefix; 6786 gidinfo->gl_dgid_lo = path->DGID.gid_guid; 6787 gidinfo->gl_sgid_hi = path->SGID.gid_prefix; 6788 gidinfo->gl_sgid_lo = path->SGID.gid_guid; 6789 gidinfo->gl_p_key = path->P_Key; 6790 gidinfo->gl_sa_hdl = port->pa_sa_hdl; 6791 gidinfo->gl_ibmf_hdl = port->pa_ibmf_hdl; 6792 gidinfo->gl_slid = path->SLID; 6793 gidinfo->gl_dlid = path->DLID; 6794 /* Reset redirect info, next MAD will set if redirected */ 6795 gidinfo->gl_redirected = 0; 6796 gidinfo->gl_devid = (*tmp).NodeInfo.DeviceID; 6797 gidinfo->gl_SL = path->SL; 6798 6799 gidinfo->gl_qp_hdl = IBMF_QP_HANDLE_DEFAULT; 6800 for (ii = 0; ii < port->pa_npkeys; ii++) { 6801 if (port->pa_pkey_tbl == NULL) 6802 break; 6803 6804 pkey_tbl = &port->pa_pkey_tbl[ii]; 6805 if ((gidinfo->gl_p_key == pkey_tbl->pt_pkey) && 6806 (pkey_tbl->pt_qp_hdl != NULL)) { 6807 gidinfo->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 6808 break; 6809 } 6810 } 6811 6812 if (gidinfo->gl_qp_hdl == NULL) 6813 IBTF_DPRINTF_L2(ibdm_string, 6814 "\treset_gid_info: No matching Pkey"); 6815 else 6816 gid_reinited = 1; 6817 6818 kmem_free(path, path_len); 6819 kmem_free(pi, pi_len); 6820 kmem_free(nr, nr_len); 6821 break; 6822 } 6823 mutex_exit(&ibdm.ibdm_hl_mutex); 6824 6825 if (!gid_reinited) 6826 gidinfo->gl_disconnected = 1; 6827 } 6828 6829 static void 6830 ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *gidinfo) 6831 { 6832 ibdm_ioc_info_t *ioc_list; 6833 int in_gidlist = 0; 6834 6835 /* 6836 * Check if gidinfo has been inserted into the 6837 * ibdm_dp_gidlist_head list. gl_next or gl_prev 6838 * != NULL, if gidinfo is the list. 6839 */ 6840 if (gidinfo->gl_prev != NULL || 6841 gidinfo->gl_next != NULL || 6842 ibdm.ibdm_dp_gidlist_head == gidinfo) 6843 in_gidlist = 1; 6844 6845 ioc_list = ibdm_update_ioc_gidlist(gidinfo, 0); 6846 6847 /* 6848 * Remove GID from the global GID list 6849 * Handle the case where all port GIDs for an 6850 * IOU have been hot-removed. 6851 */ 6852 mutex_enter(&ibdm.ibdm_mutex); 6853 if (gidinfo->gl_iou != NULL && gidinfo->gl_ngids == 0) { 6854 mutex_enter(&gidinfo->gl_mutex); 6855 (void) ibdm_free_iou_info(gidinfo, &gidinfo->gl_iou); 6856 mutex_exit(&gidinfo->gl_mutex); 6857 } 6858 6859 /* Delete gl_hca_list */ 6860 mutex_exit(&ibdm.ibdm_mutex); 6861 ibdm_delete_glhca_list(gidinfo); 6862 mutex_enter(&ibdm.ibdm_mutex); 6863 6864 if (in_gidlist) { 6865 if (gidinfo->gl_prev != NULL) 6866 gidinfo->gl_prev->gl_next = gidinfo->gl_next; 6867 if (gidinfo->gl_next != NULL) 6868 gidinfo->gl_next->gl_prev = gidinfo->gl_prev; 6869 6870 if (gidinfo == ibdm.ibdm_dp_gidlist_head) 6871 ibdm.ibdm_dp_gidlist_head = gidinfo->gl_next; 6872 if (gidinfo == ibdm.ibdm_dp_gidlist_tail) 6873 ibdm.ibdm_dp_gidlist_tail = gidinfo->gl_prev; 6874 ibdm.ibdm_ngids--; 6875 } 6876 mutex_exit(&ibdm.ibdm_mutex); 6877 6878 mutex_destroy(&gidinfo->gl_mutex); 6879 cv_destroy(&gidinfo->gl_probe_cv); 6880 kmem_free(gidinfo, sizeof (ibdm_dp_gidinfo_t)); 6881 6882 /* 6883 * Pass on the IOCs with updated GIDs to IBnexus 6884 */ 6885 if (ioc_list) { 6886 IBTF_DPRINTF_L4("ibdm", "\tdelete_gidinfo " 6887 "IOC_PROP_UPDATE for %p\n", ioc_list); 6888 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6889 if (ibdm.ibdm_ibnex_callback != NULL) { 6890 (*ibdm.ibdm_ibnex_callback)((void *) 6891 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 6892 } 6893 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6894 } 6895 } 6896 6897 6898 static void 6899 ibdm_fill_srv_attr_mod(ib_mad_hdr_t *hdr, ibdm_timeout_cb_args_t *cb_args) 6900 { 6901 uint32_t attr_mod; 6902 6903 attr_mod = (cb_args->cb_ioc_num + 1) << 16; 6904 attr_mod |= cb_args->cb_srvents_start; 6905 attr_mod |= (cb_args->cb_srvents_end) << 8; 6906 hdr->AttributeModifier = h2b32(attr_mod); 6907 } 6908 6909 static void 6910 ibdm_bump_transactionID(ibdm_dp_gidinfo_t *gid_info) 6911 { 6912 ASSERT(MUTEX_HELD(&gid_info->gl_mutex)); 6913 gid_info->gl_transactionID++; 6914 if (gid_info->gl_transactionID == gid_info->gl_max_transactionID) { 6915 IBTF_DPRINTF_L4(ibdm_string, 6916 "\tbump_transactionID(%p), wrapup", gid_info); 6917 gid_info->gl_transactionID = gid_info->gl_min_transactionID; 6918 } 6919 } 6920 6921 /* 6922 * gl_prev_iou is set for *non-reprobe* sweeep requests, which 6923 * detected that ChangeID in IOU info has changed. The service 6924 * entry also may have changed. Check if service entry in IOC 6925 * has changed wrt the prev iou, if so notify to IB Nexus. 6926 */ 6927 static ibdm_ioc_info_t * 6928 ibdm_handle_prev_iou() 6929 { 6930 ibdm_dp_gidinfo_t *gid_info; 6931 ibdm_ioc_info_t *ioc_list_head = NULL, *ioc_list; 6932 ibdm_ioc_info_t *prev_ioc, *ioc; 6933 int ii, jj, niocs, prev_niocs; 6934 6935 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 6936 6937 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iou enter"); 6938 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 6939 gid_info = gid_info->gl_next) { 6940 if (gid_info->gl_prev_iou == NULL) 6941 continue; 6942 6943 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iou gid %p", 6944 gid_info); 6945 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 6946 prev_niocs = 6947 gid_info->gl_prev_iou->iou_info.iou_num_ctrl_slots; 6948 for (ii = 0; ii < niocs; ii++) { 6949 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 6950 6951 /* Find matching IOC */ 6952 for (jj = 0; jj < prev_niocs; jj++) { 6953 prev_ioc = (ibdm_ioc_info_t *) 6954 &gid_info->gl_prev_iou->iou_ioc_info[jj]; 6955 if (prev_ioc->ioc_profile.ioc_guid == 6956 ioc->ioc_profile.ioc_guid) 6957 break; 6958 } 6959 if (jj == prev_niocs) 6960 prev_ioc = NULL; 6961 if (ioc == NULL || prev_ioc == NULL) 6962 continue; 6963 if ((ioc->ioc_profile.ioc_service_entries != 6964 prev_ioc->ioc_profile.ioc_service_entries) || 6965 ibdm_serv_cmp(&ioc->ioc_serv[0], 6966 &prev_ioc->ioc_serv[0], 6967 ioc->ioc_profile.ioc_service_entries) != 0) { 6968 IBTF_DPRINTF_L4(ibdm_string, 6969 "/thandle_prev_iou modified IOC: " 6970 "current ioc %p, old ioc %p", 6971 ioc, prev_ioc); 6972 mutex_enter(&gid_info->gl_mutex); 6973 ioc_list = ibdm_dup_ioc_info(ioc, gid_info); 6974 mutex_exit(&gid_info->gl_mutex); 6975 ioc_list->ioc_info_updated.ib_prop_updated 6976 = 0; 6977 ioc_list->ioc_info_updated.ib_srv_prop_updated 6978 = 1; 6979 6980 if (ioc_list_head == NULL) 6981 ioc_list_head = ioc_list; 6982 else { 6983 ioc_list_head->ioc_next = ioc_list; 6984 ioc_list_head = ioc_list; 6985 } 6986 } 6987 } 6988 6989 mutex_enter(&gid_info->gl_mutex); 6990 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_prev_iou); 6991 mutex_exit(&gid_info->gl_mutex); 6992 } 6993 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iouret %p", 6994 ioc_list_head); 6995 return (ioc_list_head); 6996 } 6997 6998 /* 6999 * Compares two service entries lists, returns 0 if same, returns 1 7000 * if no match. 7001 */ 7002 static int 7003 ibdm_serv_cmp(ibdm_srvents_info_t *serv1, ibdm_srvents_info_t *serv2, 7004 int nserv) 7005 { 7006 int ii; 7007 7008 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: enter"); 7009 for (ii = 0; ii < nserv; ii++, serv1++, serv2++) { 7010 if (serv1->se_attr.srv_id != serv2->se_attr.srv_id || 7011 bcmp(serv1->se_attr.srv_name, 7012 serv2->se_attr.srv_name, 7013 IB_DM_MAX_SVC_NAME_LEN) != 0) { 7014 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: ret 1"); 7015 return (1); 7016 } 7017 } 7018 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: ret 0"); 7019 return (0); 7020 } 7021 7022 /* For debugging purpose only */ 7023 #ifdef DEBUG 7024 void 7025 ibdm_dump_mad_hdr(ib_mad_hdr_t *mad_hdr) 7026 { 7027 IBTF_DPRINTF_L4("ibdm", "\t\t MAD Header info"); 7028 IBTF_DPRINTF_L4("ibdm", "\t\t ---------------"); 7029 7030 IBTF_DPRINTF_L4("ibdm", "\tBase version : 0x%x" 7031 "\tMgmt Class : 0x%x", mad_hdr->BaseVersion, mad_hdr->MgmtClass); 7032 IBTF_DPRINTF_L4("ibdm", "\tClass version : 0x%x" 7033 "\tR Method : 0x%x", 7034 mad_hdr->ClassVersion, mad_hdr->R_Method); 7035 IBTF_DPRINTF_L4("ibdm", "\tMAD Status : 0x%x" 7036 "\tTransaction ID : 0x%llx", 7037 b2h16(mad_hdr->Status), b2h64(mad_hdr->TransactionID)); 7038 IBTF_DPRINTF_L4("ibdm", "\t Attribute ID : 0x%x" 7039 "\tAttribute Modified : 0x%lx", 7040 b2h16(mad_hdr->AttributeID), b2h32(mad_hdr->AttributeModifier)); 7041 } 7042 7043 7044 void 7045 ibdm_dump_ibmf_msg(ibmf_msg_t *ibmf_msg, int flag) 7046 { 7047 ib_mad_hdr_t *mad_hdr; 7048 7049 IBTF_DPRINTF_L4("ibdm", "\t\t(IBMF_PKT): Local address info"); 7050 IBTF_DPRINTF_L4("ibdm", "\t\t ------------------"); 7051 7052 IBTF_DPRINTF_L4("ibdm", "\tLocal Lid : 0x%x\tRemote Lid : 0x%x" 7053 " Remote Qp : 0x%x", ibmf_msg->im_local_addr.ia_local_lid, 7054 ibmf_msg->im_local_addr.ia_remote_lid, 7055 ibmf_msg->im_local_addr.ia_remote_qno); 7056 IBTF_DPRINTF_L4("ibdm", "\tP_key : 0x%x\tQ_key : 0x%x" 7057 " SL : 0x%x", ibmf_msg->im_local_addr.ia_p_key, 7058 ibmf_msg->im_local_addr.ia_q_key, 7059 ibmf_msg->im_local_addr.ia_service_level); 7060 7061 if (flag) 7062 mad_hdr = (ib_mad_hdr_t *)IBDM_OUT_IBMFMSG_MADHDR(ibmf_msg); 7063 else 7064 mad_hdr = IBDM_IN_IBMFMSG_MADHDR(ibmf_msg); 7065 7066 ibdm_dump_mad_hdr(mad_hdr); 7067 } 7068 7069 7070 void 7071 ibdm_dump_path_info(sa_path_record_t *path) 7072 { 7073 IBTF_DPRINTF_L4("ibdm", "\t\t Path information"); 7074 IBTF_DPRINTF_L4("ibdm", "\t\t ----------------"); 7075 7076 IBTF_DPRINTF_L4("ibdm", "\t DGID hi : %llx\tDGID lo : %llx", 7077 path->DGID.gid_prefix, path->DGID.gid_guid); 7078 IBTF_DPRINTF_L4("ibdm", "\t SGID hi : %llx\tSGID lo : %llx", 7079 path->SGID.gid_prefix, path->SGID.gid_guid); 7080 IBTF_DPRINTF_L4("ibdm", "\t SLID : %x\t\tDlID : %x", 7081 path->SLID, path->DLID); 7082 IBTF_DPRINTF_L4("ibdm", "\t P Key : %x\t\tSL : %x", 7083 path->P_Key, path->SL); 7084 } 7085 7086 7087 void 7088 ibdm_dump_classportinfo(ib_mad_classportinfo_t *classportinfo) 7089 { 7090 IBTF_DPRINTF_L4("ibdm", "\t\t CLASSPORT INFO"); 7091 IBTF_DPRINTF_L4("ibdm", "\t\t --------------"); 7092 7093 IBTF_DPRINTF_L4("ibdm", "\t Response Time Value : 0x%x", 7094 ((b2h32(classportinfo->RespTimeValue)) & 0x1F)); 7095 7096 IBTF_DPRINTF_L4("ibdm", "\t Redirected GID hi : 0x%llx", 7097 b2h64(classportinfo->RedirectGID_hi)); 7098 IBTF_DPRINTF_L4("ibdm", "\t Redirected GID lo : 0x%llx", 7099 b2h64(classportinfo->RedirectGID_lo)); 7100 IBTF_DPRINTF_L4("ibdm", "\t Redirected TC : 0x%x", 7101 classportinfo->RedirectTC); 7102 IBTF_DPRINTF_L4("ibdm", "\t Redirected SL : 0x%x", 7103 classportinfo->RedirectSL); 7104 IBTF_DPRINTF_L4("ibdm", "\t Redirected FL : 0x%x", 7105 classportinfo->RedirectFL); 7106 IBTF_DPRINTF_L4("ibdm", "\t Redirected LID : 0x%x", 7107 b2h16(classportinfo->RedirectLID)); 7108 IBTF_DPRINTF_L4("ibdm", "\t Redirected P KEY : 0x%x", 7109 b2h16(classportinfo->RedirectP_Key)); 7110 IBTF_DPRINTF_L4("ibdm", "\t Redirected QP : 0x%x", 7111 classportinfo->RedirectQP); 7112 IBTF_DPRINTF_L4("ibdm", "\t Redirected Q KEY : 0x%x", 7113 b2h32(classportinfo->RedirectQ_Key)); 7114 IBTF_DPRINTF_L4("ibdm", "\t Trap GID hi : 0x%llx", 7115 b2h64(classportinfo->TrapGID_hi)); 7116 IBTF_DPRINTF_L4("ibdm", "\t Trap GID lo : 0x%llx", 7117 b2h64(classportinfo->TrapGID_lo)); 7118 IBTF_DPRINTF_L4("ibdm", "\t Trap TC : 0x%x", 7119 classportinfo->TrapTC); 7120 IBTF_DPRINTF_L4("ibdm", "\t Trap SL : 0x%x", 7121 classportinfo->TrapSL); 7122 IBTF_DPRINTF_L4("ibdm", "\t Trap FL : 0x%x", 7123 classportinfo->TrapFL); 7124 IBTF_DPRINTF_L4("ibdm", "\t Trap LID : 0x%x", 7125 b2h16(classportinfo->TrapLID)); 7126 IBTF_DPRINTF_L4("ibdm", "\t Trap P_Key : 0x%x", 7127 b2h16(classportinfo->TrapP_Key)); 7128 IBTF_DPRINTF_L4("ibdm", "\t Trap HL : 0x%x", 7129 classportinfo->TrapHL); 7130 IBTF_DPRINTF_L4("ibdm", "\t Trap QP : 0x%x", 7131 classportinfo->TrapQP); 7132 IBTF_DPRINTF_L4("ibdm", "\t Trap Q_Key : 0x%x", 7133 b2h32(classportinfo->TrapQ_Key)); 7134 } 7135 7136 7137 void 7138 ibdm_dump_iounitinfo(ib_dm_io_unitinfo_t *iou_info) 7139 { 7140 IBTF_DPRINTF_L4("ibdm", "\t\t I/O UnitInfo"); 7141 IBTF_DPRINTF_L4("ibdm", "\t\t ------------"); 7142 7143 IBTF_DPRINTF_L4("ibdm", "\tChange ID : 0x%x", 7144 b2h16(iou_info->iou_changeid)); 7145 IBTF_DPRINTF_L4("ibdm", "\t#of ctrl slots : %d", 7146 iou_info->iou_num_ctrl_slots); 7147 IBTF_DPRINTF_L4("ibdm", "\tIOU flag : 0x%x", 7148 iou_info->iou_flag); 7149 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 0 : 0x%x", 7150 iou_info->iou_ctrl_list[0]); 7151 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 1 : 0x%x", 7152 iou_info->iou_ctrl_list[1]); 7153 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 2 : 0x%x", 7154 iou_info->iou_ctrl_list[2]); 7155 } 7156 7157 7158 void 7159 ibdm_dump_ioc_profile(ib_dm_ioc_ctrl_profile_t *ioc) 7160 { 7161 IBTF_DPRINTF_L4("ibdm", "\t\t IOC Controller Profile"); 7162 IBTF_DPRINTF_L4("ibdm", "\t\t ----------------------"); 7163 7164 IBTF_DPRINTF_L4("ibdm", "\tIOC Guid : %llx", ioc->ioc_guid); 7165 IBTF_DPRINTF_L4("ibdm", "\tVendorID : 0x%x", ioc->ioc_vendorid); 7166 IBTF_DPRINTF_L4("ibdm", "\tDevice Id : 0x%x", ioc->ioc_deviceid); 7167 IBTF_DPRINTF_L4("ibdm", "\tDevice Ver : 0x%x", ioc->ioc_device_ver); 7168 IBTF_DPRINTF_L4("ibdm", "\tSubsys ID : 0x%x", ioc->ioc_subsys_id); 7169 IBTF_DPRINTF_L4("ibdm", "\tIO class : 0x%x", ioc->ioc_io_class); 7170 IBTF_DPRINTF_L4("ibdm", "\tIO subclass : 0x%x", ioc->ioc_io_subclass); 7171 IBTF_DPRINTF_L4("ibdm", "\tProtocol : 0x%x", ioc->ioc_protocol); 7172 IBTF_DPRINTF_L4("ibdm", "\tProtocolV : 0x%x", ioc->ioc_protocol_ver); 7173 IBTF_DPRINTF_L4("ibdm", "\tmsg qdepth : %d", ioc->ioc_send_msg_qdepth); 7174 IBTF_DPRINTF_L4("ibdm", "\trdma qdepth : %d", 7175 ioc->ioc_rdma_read_qdepth); 7176 IBTF_DPRINTF_L4("ibdm", "\tsndmsg sz : %d", ioc->ioc_send_msg_sz); 7177 IBTF_DPRINTF_L4("ibdm", "\trdma xfersz : %d", ioc->ioc_rdma_xfer_sz); 7178 IBTF_DPRINTF_L4("ibdm", "\topcal mask : 0x%x", 7179 ioc->ioc_ctrl_opcap_mask); 7180 IBTF_DPRINTF_L4("ibdm", "\tsrventries : %x", ioc->ioc_service_entries); 7181 } 7182 7183 7184 void 7185 ibdm_dump_service_entries(ib_dm_srv_t *srv_ents) 7186 { 7187 IBTF_DPRINTF_L4("ibdm", 7188 "\thandle_srventry_mad: service id : %llx", srv_ents->srv_id); 7189 7190 IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad: " 7191 "Service Name : %s", srv_ents->srv_name); 7192 } 7193 7194 int ibdm_allow_sweep_fabric_timestamp = 1; 7195 7196 void 7197 ibdm_dump_sweep_fabric_timestamp(int flag) 7198 { 7199 static hrtime_t x; 7200 if (flag) { 7201 if (ibdm_allow_sweep_fabric_timestamp) { 7202 IBTF_DPRINTF_L4("ibdm", "\tTime taken to complete " 7203 "sweep %lld ms", ((gethrtime() - x)/ 1000000)); 7204 } 7205 x = 0; 7206 } else 7207 x = gethrtime(); 7208 } 7209 #endif 7210