1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * ibdm.c 31 * 32 * This file contains the InifiniBand Device Manager (IBDM) support functions. 33 * IB nexus driver will only be the client for the IBDM module. 34 * 35 * IBDM registers with IBTF for HCA arrival/removal notification. 36 * IBDM registers with SA access to send DM MADs to discover the IOC's behind 37 * the IOU's. 38 * 39 * IB nexus driver registers with IBDM to find the information about the 40 * HCA's and IOC's (behind the IOU) present on the IB fabric. 41 */ 42 43 #include <sys/systm.h> 44 #include <sys/taskq.h> 45 #include <sys/ib/mgt/ibdm/ibdm_impl.h> 46 #include <sys/modctl.h> 47 48 /* Function Prototype declarations */ 49 static int ibdm_free_iou_info(ibdm_dp_gidinfo_t *); 50 static int ibdm_fini(void); 51 static int ibdm_init(void); 52 static int ibdm_get_reachable_ports(ibdm_port_attr_t *, 53 ibdm_hca_list_t *); 54 static ibdm_dp_gidinfo_t *ibdm_check_dgid(ib_guid_t, ib_sn_prefix_t); 55 static ibdm_dp_gidinfo_t *ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *); 56 static int ibdm_send_classportinfo(ibdm_dp_gidinfo_t *); 57 static int ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *); 58 static int ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *); 59 static int ibdm_get_node_port_guids(ibmf_saa_handle_t, ib_lid_t, 60 ib_guid_t *, ib_guid_t *); 61 static int ibdm_retry_command(ibdm_timeout_cb_args_t *); 62 static int ibdm_get_diagcode(ibdm_dp_gidinfo_t *, int); 63 static int ibdm_verify_mad_status(ib_mad_hdr_t *); 64 static int ibdm_handle_redirection(ibmf_msg_t *, 65 ibdm_dp_gidinfo_t *, int *); 66 static void ibdm_wait_probe_completion(void); 67 static void ibdm_sweep_fabric(int); 68 static void ibdm_probe_gid_thread(void *); 69 static void ibdm_wakeup_probe_gid_cv(void); 70 static void ibdm_port_attr_ibmf_init(ibdm_port_attr_t *, ib_pkey_t, int); 71 static int ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *, int); 72 static void ibdm_update_port_attr(ibdm_port_attr_t *); 73 static void ibdm_handle_hca_attach(ib_guid_t); 74 static void ibdm_handle_srventry_mad(ibmf_msg_t *, 75 ibdm_dp_gidinfo_t *, int *); 76 static void ibdm_ibmf_recv_cb(ibmf_handle_t, ibmf_msg_t *, void *); 77 static void ibdm_recv_incoming_mad(void *); 78 static void ibdm_process_incoming_mad(ibmf_handle_t, ibmf_msg_t *, void *); 79 static void ibdm_ibmf_send_cb(ibmf_handle_t, ibmf_msg_t *, void *); 80 static void ibdm_pkt_timeout_hdlr(void *arg); 81 static void ibdm_initialize_port(ibdm_port_attr_t *); 82 static void ibdm_handle_diagcode(ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 83 static void ibdm_probe_gid(ibdm_dp_gidinfo_t *); 84 static void ibdm_alloc_send_buffers(ibmf_msg_t *); 85 static void ibdm_free_send_buffers(ibmf_msg_t *); 86 static void ibdm_handle_hca_detach(ib_guid_t); 87 static int ibdm_fini_port(ibdm_port_attr_t *); 88 static int ibdm_uninit_hca(ibdm_hca_list_t *); 89 static void ibdm_handle_iounitinfo(ibmf_handle_t, 90 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 91 static void ibdm_handle_ioc_profile(ibmf_handle_t, 92 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 93 static void ibdm_event_hdlr(void *, ibt_hca_hdl_t, 94 ibt_async_code_t, ibt_async_event_t *); 95 static void ibdm_handle_classportinfo(ibmf_handle_t, 96 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *); 97 static void ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *, 98 ibdm_dp_gidinfo_t *); 99 100 static ibdm_hca_list_t *ibdm_dup_hca_attr(ibdm_hca_list_t *); 101 static ibdm_ioc_info_t *ibdm_dup_ioc_info(ibdm_ioc_info_t *, 102 ibdm_dp_gidinfo_t *gid_list); 103 static void ibdm_probe_ioc(ib_guid_t, ib_guid_t, int); 104 static ibdm_ioc_info_t *ibdm_is_ioc_present(ib_guid_t, 105 ibdm_dp_gidinfo_t *, int *); 106 static ibdm_port_attr_t *ibdm_get_port_attr(ibt_async_event_t *, 107 ibdm_hca_list_t **); 108 static sa_node_record_t *ibdm_get_node_records(ibmf_saa_handle_t, 109 size_t *, ib_guid_t); 110 static sa_portinfo_record_t *ibdm_get_portinfo(ibmf_saa_handle_t, size_t *, 111 ib_lid_t); 112 static ibdm_dp_gidinfo_t *ibdm_create_gid_info(ibdm_port_attr_t *, 113 ib_gid_t, ib_gid_t); 114 static ibdm_dp_gidinfo_t *ibdm_find_gid(ib_guid_t, ib_guid_t); 115 static int ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *, uint8_t); 116 static ibdm_ioc_info_t *ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *, int); 117 static void ibdm_saa_event_cb(ibmf_saa_handle_t, ibmf_saa_subnet_event_t, 118 ibmf_saa_event_details_t *, void *); 119 static void ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *, 120 ibdm_dp_gidinfo_t *); 121 static ibdm_dp_gidinfo_t *ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *); 122 static void ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *, 123 ibdm_dp_gidinfo_t *); 124 static void ibdm_addto_gidlist(ibdm_gid_t **, ibdm_gid_t *); 125 static void ibdm_free_gid_list(ibdm_gid_t *); 126 static void ibdm_rescan_gidlist(ib_guid_t *ioc_guid); 127 static void ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *); 128 static void ibdm_saa_event_taskq(void *); 129 static void ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *); 130 static void ibdm_get_next_port(ibdm_hca_list_t **, 131 ibdm_port_attr_t **, int); 132 static void ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *, 133 ibdm_dp_gidinfo_t *); 134 static void ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *, 135 ibdm_hca_list_t *); 136 static void ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *); 137 static void ibdm_saa_handle_new_gid(void *); 138 static void ibdm_reset_all_dgids(ibmf_saa_handle_t); 139 static void ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *); 140 static void ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *); 141 142 143 int ibdm_dft_timeout = IBDM_DFT_TIMEOUT; 144 int ibdm_dft_retry_cnt = IBDM_DFT_NRETRIES; 145 #ifdef DEBUG 146 int ibdm_ignore_saa_event = 0; 147 #endif 148 149 /* Modload support */ 150 static struct modlmisc ibdm_modlmisc = { 151 &mod_miscops, 152 "InfiniBand Device Manager %I%", 153 }; 154 155 struct modlinkage ibdm_modlinkage = { 156 MODREV_1, 157 (void *)&ibdm_modlmisc, 158 NULL 159 }; 160 161 static ibt_clnt_modinfo_t ibdm_ibt_modinfo = { 162 IBTI_V1, 163 IBT_DM, 164 ibdm_event_hdlr, 165 NULL, 166 "ibdm" 167 }; 168 169 /* Global variables */ 170 ibdm_t ibdm; 171 int ibdm_taskq_enable = IBDM_ENABLE_TASKQ_HANDLING; 172 char *ibdm_string = "ibdm"; 173 174 _NOTE(SCHEME_PROTECTS_DATA("Serialized access by cv", 175 ibdm.ibdm_dp_gidlist_head)) 176 177 /* 178 * _init 179 * Loadable module init, called before any other module. 180 * Initialize mutex 181 * Register with IBTF 182 */ 183 int 184 _init(void) 185 { 186 int err; 187 188 IBTF_DPRINTF_L4("ibdm", "\t_init: addr of ibdm %p", &ibdm); 189 190 if ((err = ibdm_init()) != IBDM_SUCCESS) { 191 IBTF_DPRINTF_L2("ibdm", "_init: ibdm_init failed 0x%x", err); 192 (void) ibdm_fini(); 193 return (DDI_FAILURE); 194 } 195 196 if ((err = mod_install(&ibdm_modlinkage)) != 0) { 197 IBTF_DPRINTF_L2("ibdm", "_init: mod_install failed 0x%x", err); 198 (void) ibdm_fini(); 199 } 200 return (err); 201 } 202 203 204 int 205 _fini(void) 206 { 207 int err; 208 209 if ((err = ibdm_fini()) != IBDM_SUCCESS) { 210 IBTF_DPRINTF_L2("ibdm", "_fini: ibdm_fini failed 0x%x", err); 211 (void) ibdm_init(); 212 return (EBUSY); 213 } 214 215 if ((err = mod_remove(&ibdm_modlinkage)) != 0) { 216 IBTF_DPRINTF_L2("ibdm", "_fini: mod_remove failed 0x%x", err); 217 (void) ibdm_init(); 218 } 219 return (err); 220 } 221 222 223 int 224 _info(struct modinfo *modinfop) 225 { 226 return (mod_info(&ibdm_modlinkage, modinfop)); 227 } 228 229 230 /* 231 * ibdm_init(): 232 * Register with IBTF 233 * Allocate memory for the HCAs 234 * Allocate minor-nodes for the HCAs 235 */ 236 static int 237 ibdm_init(void) 238 { 239 int i, hca_count; 240 ib_guid_t *hca_guids; 241 ibt_status_t status; 242 243 IBTF_DPRINTF_L4("ibdm", "\tibdm_init:"); 244 if (!(ibdm.ibdm_state & IBDM_LOCKS_ALLOCED)) { 245 mutex_init(&ibdm.ibdm_mutex, NULL, MUTEX_DEFAULT, NULL); 246 mutex_init(&ibdm.ibdm_hl_mutex, NULL, MUTEX_DEFAULT, NULL); 247 mutex_init(&ibdm.ibdm_ibnex_mutex, NULL, MUTEX_DEFAULT, NULL); 248 mutex_enter(&ibdm.ibdm_mutex); 249 ibdm.ibdm_state |= IBDM_LOCKS_ALLOCED; 250 } 251 252 if (!(ibdm.ibdm_state & IBDM_IBT_ATTACHED)) { 253 if ((status = ibt_attach(&ibdm_ibt_modinfo, NULL, NULL, 254 (void *)&ibdm.ibdm_ibt_clnt_hdl)) != IBT_SUCCESS) { 255 IBTF_DPRINTF_L2("ibdm", "ibdm_init: ibt_attach " 256 "failed %x", status); 257 mutex_exit(&ibdm.ibdm_mutex); 258 return (IBDM_FAILURE); 259 } 260 261 ibdm.ibdm_state |= IBDM_IBT_ATTACHED; 262 mutex_exit(&ibdm.ibdm_mutex); 263 } 264 265 266 if (!(ibdm.ibdm_state & IBDM_HCA_ATTACHED)) { 267 hca_count = ibt_get_hca_list(&hca_guids); 268 IBTF_DPRINTF_L4("ibdm", "ibdm_init: num_hcas = %d", hca_count); 269 for (i = 0; i < hca_count; i++) 270 (void) ibdm_handle_hca_attach(hca_guids[i]); 271 if (hca_count) 272 ibt_free_hca_list(hca_guids, hca_count); 273 274 mutex_enter(&ibdm.ibdm_mutex); 275 ibdm.ibdm_state |= IBDM_HCA_ATTACHED; 276 mutex_exit(&ibdm.ibdm_mutex); 277 } 278 279 if (!(ibdm.ibdm_state & IBDM_CVS_ALLOCED)) { 280 cv_init(&ibdm.ibdm_probe_cv, NULL, CV_DRIVER, NULL); 281 cv_init(&ibdm.ibdm_busy_cv, NULL, CV_DRIVER, NULL); 282 mutex_enter(&ibdm.ibdm_mutex); 283 ibdm.ibdm_state |= IBDM_CVS_ALLOCED; 284 mutex_exit(&ibdm.ibdm_mutex); 285 } 286 return (IBDM_SUCCESS); 287 } 288 289 290 static int 291 ibdm_free_iou_info(ibdm_dp_gidinfo_t *gid_info) 292 { 293 int ii, k, niocs; 294 size_t size; 295 ibdm_gid_t *delete, *head; 296 timeout_id_t timeout_id; 297 ibdm_ioc_info_t *ioc; 298 299 ASSERT(mutex_owned(&gid_info->gl_mutex)); 300 if (gid_info->gl_iou == NULL) { 301 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: No IOU"); 302 return (0); 303 } 304 305 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 306 IBTF_DPRINTF_L4("ibdm", "\tfree_iou_info: gid_info = %p, niocs %d", 307 gid_info, niocs); 308 309 for (ii = 0; ii < niocs; ii++) { 310 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 311 312 /* handle the case where an ioc_timeout_id is scheduled */ 313 if (ioc->ioc_timeout_id) { 314 timeout_id = ioc->ioc_timeout_id; 315 mutex_exit(&gid_info->gl_mutex); 316 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 317 "ioc_timeout_id = 0x%x", timeout_id); 318 if (untimeout(timeout_id) == -1) { 319 IBTF_DPRINTF_L2("ibdm", "free_iou_info: " 320 "untimeout ioc_timeout_id failed"); 321 mutex_enter(&gid_info->gl_mutex); 322 return (-1); 323 } 324 mutex_enter(&gid_info->gl_mutex); 325 ioc->ioc_timeout_id = 0; 326 } 327 328 /* handle the case where an ioc_dc_timeout_id is scheduled */ 329 if (ioc->ioc_dc_timeout_id) { 330 timeout_id = ioc->ioc_dc_timeout_id; 331 mutex_exit(&gid_info->gl_mutex); 332 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 333 "ioc_dc_timeout_id = 0x%x", timeout_id); 334 if (untimeout(timeout_id) == -1) { 335 IBTF_DPRINTF_L2("ibdm", "free_iou_info: " 336 "untimeout ioc_dc_timeout_id failed"); 337 mutex_enter(&gid_info->gl_mutex); 338 return (-1); 339 } 340 mutex_enter(&gid_info->gl_mutex); 341 ioc->ioc_dc_timeout_id = 0; 342 } 343 344 /* handle the case where serv[k].se_timeout_id is scheduled */ 345 for (k = 0; k < ioc->ioc_profile.ioc_service_entries; k++) { 346 if (ioc->ioc_serv[k].se_timeout_id) { 347 timeout_id = ioc->ioc_serv[k].se_timeout_id; 348 mutex_exit(&gid_info->gl_mutex); 349 IBTF_DPRINTF_L5("ibdm", "free_iou_info: " 350 "ioc->ioc_serv[%d].se_timeout_id = 0x%x", 351 k, timeout_id); 352 if (untimeout(timeout_id) == -1) { 353 IBTF_DPRINTF_L2("ibdm", "free_iou_info:" 354 " untimeout se_timeout_id failed"); 355 mutex_enter(&gid_info->gl_mutex); 356 return (-1); 357 } 358 mutex_enter(&gid_info->gl_mutex); 359 ioc->ioc_serv[k].se_timeout_id = 0; 360 } 361 } 362 363 /* delete GID list */ 364 head = ioc->ioc_gid_list; 365 while (head) { 366 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: " 367 "Deleting gid_list struct %p", head); 368 delete = head; 369 head = head->gid_next; 370 kmem_free(delete, sizeof (ibdm_gid_t)); 371 } 372 ioc->ioc_gid_list = NULL; 373 374 /* delete ioc_serv */ 375 size = ioc->ioc_profile.ioc_service_entries * 376 sizeof (ibdm_srvents_info_t); 377 if (ioc->ioc_serv && size) { 378 kmem_free(ioc->ioc_serv, size); 379 ioc->ioc_serv = NULL; 380 } 381 } 382 383 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: deleting IOU & IOC"); 384 size = sizeof (ibdm_iou_info_t) + niocs * sizeof (ibdm_ioc_info_t); 385 kmem_free(gid_info->gl_iou, size); 386 gid_info->gl_iou = NULL; 387 return (0); 388 } 389 390 391 /* 392 * ibdm_fini(): 393 * Un-register with IBTF 394 * De allocate memory for the GID info 395 */ 396 static int 397 ibdm_fini() 398 { 399 int ii; 400 ibdm_hca_list_t *hca_list, *temp; 401 ibdm_dp_gidinfo_t *gid_info, *tmp; 402 ibdm_gid_t *head, *delete; 403 404 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini"); 405 406 mutex_enter(&ibdm.ibdm_hl_mutex); 407 if (ibdm.ibdm_state & IBDM_IBT_ATTACHED) { 408 if (ibt_detach(ibdm.ibdm_ibt_clnt_hdl) != IBT_SUCCESS) { 409 IBTF_DPRINTF_L2("ibdm", "\t_fini: ibt_detach failed"); 410 mutex_exit(&ibdm.ibdm_hl_mutex); 411 return (IBDM_FAILURE); 412 } 413 ibdm.ibdm_state &= ~IBDM_IBT_ATTACHED; 414 ibdm.ibdm_ibt_clnt_hdl = NULL; 415 } 416 417 hca_list = ibdm.ibdm_hca_list_head; 418 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: nhcas %d", ibdm.ibdm_hca_count); 419 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 420 temp = hca_list; 421 hca_list = hca_list->hl_next; 422 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: hca %p", temp); 423 if (ibdm_uninit_hca(temp) != IBDM_SUCCESS) { 424 IBTF_DPRINTF_L2("ibdm", "\tibdm_fini: " 425 "uninit_hca %p failed", temp); 426 mutex_exit(&ibdm.ibdm_hl_mutex); 427 return (IBDM_FAILURE); 428 } 429 } 430 mutex_exit(&ibdm.ibdm_hl_mutex); 431 432 mutex_enter(&ibdm.ibdm_mutex); 433 if (ibdm.ibdm_state & IBDM_HCA_ATTACHED) 434 ibdm.ibdm_state &= ~IBDM_HCA_ATTACHED; 435 436 gid_info = ibdm.ibdm_dp_gidlist_head; 437 while (gid_info) { 438 mutex_enter(&gid_info->gl_mutex); 439 (void) ibdm_free_iou_info(gid_info); 440 mutex_exit(&gid_info->gl_mutex); 441 ibdm_delete_glhca_list(gid_info); 442 443 tmp = gid_info; 444 gid_info = gid_info->gl_next; 445 mutex_destroy(&tmp->gl_mutex); 446 head = tmp->gl_gid; 447 while (head) { 448 IBTF_DPRINTF_L4("ibdm", 449 "\tibdm_fini: Deleting gid structs"); 450 delete = head; 451 head = head->gid_next; 452 kmem_free(delete, sizeof (ibdm_gid_t)); 453 } 454 kmem_free(tmp, sizeof (ibdm_dp_gidinfo_t)); 455 } 456 mutex_exit(&ibdm.ibdm_mutex); 457 458 if (ibdm.ibdm_state & IBDM_LOCKS_ALLOCED) { 459 ibdm.ibdm_state &= ~IBDM_LOCKS_ALLOCED; 460 mutex_destroy(&ibdm.ibdm_mutex); 461 mutex_destroy(&ibdm.ibdm_hl_mutex); 462 mutex_destroy(&ibdm.ibdm_ibnex_mutex); 463 } 464 if (ibdm.ibdm_state & IBDM_CVS_ALLOCED) { 465 ibdm.ibdm_state &= ~IBDM_CVS_ALLOCED; 466 cv_destroy(&ibdm.ibdm_probe_cv); 467 cv_destroy(&ibdm.ibdm_busy_cv); 468 } 469 return (IBDM_SUCCESS); 470 } 471 472 473 /* 474 * ibdm_event_hdlr() 475 * 476 * IBDM registers this asynchronous event handler at the time of 477 * ibt_attach. IBDM support the following async events. For other 478 * event, simply returns success. 479 * IBT_HCA_ATTACH_EVENT: 480 * Retrieves the information about all the port that are 481 * present on this HCA, allocates the port attributes 482 * structure and calls IB nexus callback routine with 483 * the port attributes structure as an input argument. 484 * IBT_HCA_DETACH_EVENT: 485 * Retrieves the information about all the ports that are 486 * present on this HCA and calls IB nexus callback with 487 * port guid as an argument 488 * IBT_EVENT_PORT_UP: 489 * Register with IBMF and SA access 490 * Setup IBMF receive callback routine 491 * IBT_EVENT_PORT_DOWN: 492 * Un-Register with IBMF and SA access 493 * Teardown IBMF receive callback routine 494 */ 495 /*ARGSUSED*/ 496 static void 497 ibdm_event_hdlr(void *clnt_hdl, 498 ibt_hca_hdl_t hca_hdl, ibt_async_code_t code, ibt_async_event_t *event) 499 { 500 ibdm_hca_list_t *hca_list; 501 ibdm_port_attr_t *port; 502 ibmf_saa_handle_t port_sa_hdl; 503 504 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: async code 0x%x", code); 505 506 switch (code) { 507 case IBT_HCA_ATTACH_EVENT: /* New HCA registered with IBTF */ 508 ibdm_handle_hca_attach(event->ev_hca_guid); 509 break; 510 511 case IBT_HCA_DETACH_EVENT: /* HCA unregistered with IBTF */ 512 ibdm_handle_hca_detach(event->ev_hca_guid); 513 mutex_enter(&ibdm.ibdm_ibnex_mutex); 514 if (ibdm.ibdm_ibnex_callback != NULL) { 515 (*ibdm.ibdm_ibnex_callback)((void *) 516 &event->ev_hca_guid, IBDM_EVENT_HCA_REMOVED); 517 } 518 mutex_exit(&ibdm.ibdm_ibnex_mutex); 519 break; 520 521 case IBT_EVENT_PORT_UP: 522 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_UP"); 523 mutex_enter(&ibdm.ibdm_hl_mutex); 524 port = ibdm_get_port_attr(event, &hca_list); 525 if (port == NULL) { 526 IBTF_DPRINTF_L2("ibdm", 527 "\tevent_hdlr: HCA not present"); 528 mutex_exit(&ibdm.ibdm_hl_mutex); 529 break; 530 } 531 ibdm_initialize_port(port); 532 hca_list->hl_nports_active++; 533 mutex_exit(&ibdm.ibdm_hl_mutex); 534 break; 535 536 case IBT_ERROR_PORT_DOWN: 537 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_DOWN"); 538 mutex_enter(&ibdm.ibdm_hl_mutex); 539 port = ibdm_get_port_attr(event, &hca_list); 540 if (port == NULL) { 541 IBTF_DPRINTF_L2("ibdm", 542 "\tevent_hdlr: HCA not present"); 543 mutex_exit(&ibdm.ibdm_hl_mutex); 544 break; 545 } 546 hca_list->hl_nports_active--; 547 port_sa_hdl = port->pa_sa_hdl; 548 (void) ibdm_fini_port(port); 549 mutex_exit(&ibdm.ibdm_hl_mutex); 550 ibdm_reset_all_dgids(port_sa_hdl); 551 break; 552 553 default: /* Ignore all other events/errors */ 554 break; 555 } 556 } 557 558 559 /* 560 * ibdm_initialize_port() 561 * Register with IBMF 562 * Register with SA access 563 * Register a receive callback routine with IBMF. IBMF invokes 564 * this routine whenever a MAD arrives at this port. 565 * Update the port attributes 566 */ 567 static void 568 ibdm_initialize_port(ibdm_port_attr_t *port) 569 { 570 int ii; 571 uint_t nports, size; 572 uint_t pkey_idx; 573 ib_pkey_t pkey; 574 ibt_hca_portinfo_t *pinfop; 575 ibmf_register_info_t ibmf_reg; 576 ibmf_saa_subnet_event_args_t event_args; 577 578 IBTF_DPRINTF_L4("ibdm", "\tinitialize_port:"); 579 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 580 581 /* Check whether the port is active */ 582 if (ibt_get_port_state(port->pa_hca_hdl, port->pa_port_num, NULL, 583 NULL) != IBT_SUCCESS) 584 return; 585 586 if (port->pa_sa_hdl != NULL) 587 return; 588 589 if (ibt_query_hca_ports(port->pa_hca_hdl, port->pa_port_num, 590 &pinfop, &nports, &size) != IBT_SUCCESS) { 591 /* This should not occur */ 592 port->pa_npkeys = 0; 593 port->pa_pkey_tbl = NULL; 594 return; 595 } 596 port->pa_sn_prefix = pinfop->p_sgid_tbl[0].gid_prefix; 597 598 port->pa_state = pinfop->p_linkstate; 599 port->pa_npkeys = pinfop->p_pkey_tbl_sz; 600 port->pa_pkey_tbl = (ibdm_pkey_tbl_t *)kmem_zalloc( 601 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP); 602 603 for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++) 604 port->pa_pkey_tbl[pkey_idx].pt_pkey = 605 pinfop->p_pkey_tbl[pkey_idx]; 606 607 ibt_free_portinfo(pinfop, size); 608 609 event_args.is_event_callback = ibdm_saa_event_cb; 610 event_args.is_event_callback_arg = port; 611 if (ibmf_sa_session_open(port->pa_port_guid, 0, &event_args, 612 IBMF_VERSION, 0, &port->pa_sa_hdl) != IBMF_SUCCESS) { 613 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 614 "sa access registration failed"); 615 return; 616 } 617 ibmf_reg.ir_ci_guid = port->pa_hca_guid; 618 ibmf_reg.ir_port_num = port->pa_port_num; 619 ibmf_reg.ir_client_class = DEV_MGT_MANAGER; 620 621 if (ibmf_register(&ibmf_reg, IBMF_VERSION, 0, NULL, NULL, 622 &port->pa_ibmf_hdl, &port->pa_ibmf_caps) != IBMF_SUCCESS) { 623 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 624 "IBMF registration failed"); 625 (void) ibdm_fini_port(port); 626 return; 627 } 628 if (ibmf_setup_async_cb(port->pa_ibmf_hdl, IBMF_QP_HANDLE_DEFAULT, 629 ibdm_ibmf_recv_cb, 0, 0) != IBMF_SUCCESS) { 630 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: " 631 "IBMF setup recv cb failed"); 632 (void) ibdm_fini_port(port); 633 return; 634 } 635 636 for (ii = 0; ii < port->pa_npkeys; ii++) { 637 pkey = port->pa_pkey_tbl[ii].pt_pkey; 638 if (IBDM_INVALID_PKEY(pkey)) { 639 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 640 continue; 641 } 642 ibdm_port_attr_ibmf_init(port, pkey, ii); 643 } 644 } 645 646 647 /* 648 * ibdm_port_attr_ibmf_init: 649 * With IBMF - Alloc QP Handle and Setup Async callback 650 */ 651 static void 652 ibdm_port_attr_ibmf_init(ibdm_port_attr_t *port, ib_pkey_t pkey, int ii) 653 { 654 int ret; 655 656 if ((ret = ibmf_alloc_qp(port->pa_ibmf_hdl, pkey, IB_GSI_QKEY, 657 IBMF_ALT_QP_MAD_NO_RMPP, &port->pa_pkey_tbl[ii].pt_qp_hdl)) != 658 IBMF_SUCCESS) { 659 IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: " 660 "IBMF failed to alloc qp %d", ret); 661 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 662 return; 663 } 664 665 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_init: QP handle is %p", 666 port->pa_ibmf_hdl); 667 668 if ((ret = ibmf_setup_async_cb(port->pa_ibmf_hdl, 669 port->pa_pkey_tbl[ii].pt_qp_hdl, ibdm_ibmf_recv_cb, 0, 0)) != 670 IBMF_SUCCESS) { 671 IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: " 672 "IBMF setup recv cb failed %d", ret); 673 (void) ibmf_free_qp(port->pa_ibmf_hdl, 674 &port->pa_pkey_tbl[ii].pt_qp_hdl, 0); 675 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 676 } 677 } 678 679 680 /* 681 * ibdm_get_port_attr() 682 * Get port attributes from HCA guid and port number 683 * Return pointer to ibdm_port_attr_t on Success 684 * and NULL on failure 685 */ 686 static ibdm_port_attr_t * 687 ibdm_get_port_attr(ibt_async_event_t *event, ibdm_hca_list_t **retval) 688 { 689 ibdm_hca_list_t *hca_list; 690 ibdm_port_attr_t *port_attr; 691 int ii; 692 693 IBTF_DPRINTF_L4("ibdm", "\tget_port_attr: port# %d", event->ev_port); 694 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 695 hca_list = ibdm.ibdm_hca_list_head; 696 while (hca_list) { 697 if (hca_list->hl_hca_guid == event->ev_hca_guid) { 698 for (ii = 0; ii < hca_list->hl_nports; ii++) { 699 port_attr = &hca_list->hl_port_attr[ii]; 700 if (port_attr->pa_port_num == event->ev_port) { 701 *retval = hca_list; 702 return (port_attr); 703 } 704 } 705 } 706 hca_list = hca_list->hl_next; 707 } 708 return (NULL); 709 } 710 711 712 /* 713 * ibdm_update_port_attr() 714 * Update the port attributes 715 */ 716 static void 717 ibdm_update_port_attr(ibdm_port_attr_t *port) 718 { 719 uint_t nports, size; 720 uint_t pkey_idx; 721 ibt_hca_portinfo_t *portinfop; 722 723 IBTF_DPRINTF_L4("ibdm", "\tupdate_port_attr: Begin"); 724 if (ibt_query_hca_ports(port->pa_hca_hdl, 725 port->pa_port_num, &portinfop, &nports, &size) != IBT_SUCCESS) { 726 /* This should not occur */ 727 port->pa_npkeys = 0; 728 port->pa_pkey_tbl = NULL; 729 return; 730 } 731 port->pa_sn_prefix = portinfop->p_sgid_tbl[0].gid_prefix; 732 733 port->pa_state = portinfop->p_linkstate; 734 735 /* 736 * PKey information in portinfo valid only if port is 737 * ACTIVE. Bail out if not. 738 */ 739 if (port->pa_state != IBT_PORT_ACTIVE) { 740 port->pa_npkeys = 0; 741 port->pa_pkey_tbl = NULL; 742 ibt_free_portinfo(portinfop, size); 743 return; 744 } 745 746 port->pa_npkeys = portinfop->p_pkey_tbl_sz; 747 port->pa_pkey_tbl = (ibdm_pkey_tbl_t *)kmem_zalloc( 748 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP); 749 750 for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++) { 751 port->pa_pkey_tbl[pkey_idx].pt_pkey = 752 portinfop->p_pkey_tbl[pkey_idx]; 753 } 754 ibt_free_portinfo(portinfop, size); 755 } 756 757 758 /* 759 * ibdm_handle_hca_attach() 760 */ 761 static void 762 ibdm_handle_hca_attach(ib_guid_t hca_guid) 763 { 764 uint_t size; 765 uint_t ii, nports; 766 ibt_status_t status; 767 ibt_hca_hdl_t hca_hdl; 768 ibt_hca_attr_t *hca_attr; 769 ibdm_hca_list_t *hca_list, *temp; 770 ibdm_port_attr_t *port_attr; 771 ibt_hca_portinfo_t *portinfop; 772 773 IBTF_DPRINTF_L4("ibdm", 774 "\thandle_hca_attach: hca_guid = 0x%llX", hca_guid); 775 776 /* open the HCA first */ 777 if ((status = ibt_open_hca(ibdm.ibdm_ibt_clnt_hdl, hca_guid, 778 &hca_hdl)) != IBT_SUCCESS) { 779 IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: " 780 "open_hca failed, status 0x%x", status); 781 return; 782 } 783 784 hca_attr = (ibt_hca_attr_t *) 785 kmem_alloc(sizeof (ibt_hca_attr_t), KM_SLEEP); 786 /* ibt_query_hca always returns IBT_SUCCESS */ 787 (void) ibt_query_hca(hca_hdl, hca_attr); 788 789 IBTF_DPRINTF_L4("ibdm", "\tvid: 0x%x, pid: 0x%x, ver: 0x%x," 790 " #ports: %d", hca_attr->hca_vendor_id, hca_attr->hca_device_id, 791 hca_attr->hca_version_id, hca_attr->hca_nports); 792 793 if ((status = ibt_query_hca_ports(hca_hdl, 0, &portinfop, &nports, 794 &size)) != IBT_SUCCESS) { 795 IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: " 796 "ibt_query_hca_ports failed, status 0x%x", status); 797 kmem_free(hca_attr, sizeof (ibt_hca_attr_t)); 798 (void) ibt_close_hca(hca_hdl); 799 return; 800 } 801 hca_list = (ibdm_hca_list_t *) 802 kmem_zalloc((sizeof (ibdm_hca_list_t)), KM_SLEEP); 803 hca_list->hl_port_attr = (ibdm_port_attr_t *)kmem_zalloc( 804 (sizeof (ibdm_port_attr_t) * hca_attr->hca_nports), KM_SLEEP); 805 hca_list->hl_hca_guid = hca_attr->hca_node_guid; 806 hca_list->hl_nports = hca_attr->hca_nports; 807 hca_list->hl_attach_time = ddi_get_time(); 808 hca_list->hl_hca_hdl = hca_hdl; 809 810 /* 811 * Init a dummy port attribute for the HCA node 812 * This is for Per-HCA Node. Initialize port_attr : 813 * hca_guid & port_guid -> hca_guid 814 * npkeys, pkey_tbl is NULL 815 * port_num, sn_prefix is 0 816 * vendorid, product_id, dev_version from HCA 817 * pa_state is IBT_PORT_ACTIVE 818 */ 819 hca_list->hl_hca_port_attr = (ibdm_port_attr_t *)kmem_zalloc( 820 sizeof (ibdm_port_attr_t), KM_SLEEP); 821 port_attr = hca_list->hl_hca_port_attr; 822 port_attr->pa_vendorid = hca_attr->hca_vendor_id; 823 port_attr->pa_productid = hca_attr->hca_device_id; 824 port_attr->pa_dev_version = hca_attr->hca_version_id; 825 port_attr->pa_hca_guid = hca_attr->hca_node_guid; 826 port_attr->pa_hca_hdl = hca_list->hl_hca_hdl; 827 port_attr->pa_port_guid = hca_attr->hca_node_guid; 828 port_attr->pa_state = IBT_PORT_ACTIVE; 829 830 831 for (ii = 0; ii < nports; ii++) { 832 port_attr = &hca_list->hl_port_attr[ii]; 833 port_attr->pa_vendorid = hca_attr->hca_vendor_id; 834 port_attr->pa_productid = hca_attr->hca_device_id; 835 port_attr->pa_dev_version = hca_attr->hca_version_id; 836 port_attr->pa_hca_guid = hca_attr->hca_node_guid; 837 port_attr->pa_hca_hdl = hca_list->hl_hca_hdl; 838 port_attr->pa_port_guid = portinfop[ii].p_sgid_tbl->gid_guid; 839 port_attr->pa_sn_prefix = portinfop[ii].p_sgid_tbl->gid_prefix; 840 port_attr->pa_port_num = portinfop[ii].p_port_num; 841 port_attr->pa_state = portinfop[ii].p_linkstate; 842 843 /* 844 * Register with IBMF, SA access when the port is in 845 * ACTIVE state. Also register a callback routine 846 * with IBMF to receive incoming DM MAD's. 847 * The IBDM event handler takes care of registration of 848 * port which are not active. 849 */ 850 IBTF_DPRINTF_L4("ibdm", 851 "\thandle_hca_attach: port guid %llx Port state 0x%x", 852 port_attr->pa_port_guid, portinfop[ii].p_linkstate); 853 854 if (portinfop[ii].p_linkstate == IBT_PORT_ACTIVE) { 855 mutex_enter(&ibdm.ibdm_hl_mutex); 856 hca_list->hl_nports_active++; 857 ibdm_initialize_port(port_attr); 858 mutex_exit(&ibdm.ibdm_hl_mutex); 859 } 860 } 861 mutex_enter(&ibdm.ibdm_hl_mutex); 862 for (temp = ibdm.ibdm_hca_list_head; temp; temp = temp->hl_next) { 863 if (temp->hl_hca_guid == hca_guid) { 864 IBTF_DPRINTF_L2("ibdm", "hca_attach: HCA %llX " 865 "already seen by IBDM", hca_guid); 866 mutex_exit(&ibdm.ibdm_hl_mutex); 867 (void) ibdm_uninit_hca(hca_list); 868 return; 869 } 870 } 871 ibdm.ibdm_hca_count++; 872 if (ibdm.ibdm_hca_list_head == NULL) { 873 ibdm.ibdm_hca_list_head = hca_list; 874 ibdm.ibdm_hca_list_tail = hca_list; 875 } else { 876 ibdm.ibdm_hca_list_tail->hl_next = hca_list; 877 ibdm.ibdm_hca_list_tail = hca_list; 878 } 879 mutex_exit(&ibdm.ibdm_hl_mutex); 880 mutex_enter(&ibdm.ibdm_ibnex_mutex); 881 if (ibdm.ibdm_ibnex_callback != NULL) { 882 (*ibdm.ibdm_ibnex_callback)((void *) 883 &hca_guid, IBDM_EVENT_HCA_ADDED); 884 } 885 mutex_exit(&ibdm.ibdm_ibnex_mutex); 886 887 kmem_free(hca_attr, sizeof (ibt_hca_attr_t)); 888 ibt_free_portinfo(portinfop, size); 889 } 890 891 892 /* 893 * ibdm_handle_hca_detach() 894 */ 895 static void 896 ibdm_handle_hca_detach(ib_guid_t hca_guid) 897 { 898 ibdm_hca_list_t *head, *prev = NULL; 899 900 IBTF_DPRINTF_L4("ibdm", 901 "\thandle_hca_detach: hca_guid = 0x%llx", hca_guid); 902 903 /* Make sure no probes are running */ 904 mutex_enter(&ibdm.ibdm_mutex); 905 while (ibdm.ibdm_busy & IBDM_BUSY) 906 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 907 ibdm.ibdm_busy |= IBDM_BUSY; 908 mutex_exit(&ibdm.ibdm_mutex); 909 910 mutex_enter(&ibdm.ibdm_hl_mutex); 911 head = ibdm.ibdm_hca_list_head; 912 while (head) { 913 if (head->hl_hca_guid == hca_guid) { 914 if (prev == NULL) 915 ibdm.ibdm_hca_list_head = head->hl_next; 916 else 917 prev->hl_next = head->hl_next; 918 ibdm.ibdm_hca_count--; 919 break; 920 } 921 prev = head; 922 head = head->hl_next; 923 } 924 mutex_exit(&ibdm.ibdm_hl_mutex); 925 if (ibdm_uninit_hca(head) != IBDM_SUCCESS) 926 (void) ibdm_handle_hca_attach(hca_guid); 927 928 mutex_enter(&ibdm.ibdm_mutex); 929 ibdm.ibdm_busy &= ~IBDM_BUSY; 930 cv_broadcast(&ibdm.ibdm_busy_cv); 931 mutex_exit(&ibdm.ibdm_mutex); 932 } 933 934 935 static int 936 ibdm_uninit_hca(ibdm_hca_list_t *head) 937 { 938 int ii; 939 ibdm_port_attr_t *port_attr; 940 941 for (ii = 0; ii < head->hl_nports; ii++) { 942 port_attr = &head->hl_port_attr[ii]; 943 if (ibdm_fini_port(port_attr) != IBDM_SUCCESS) { 944 IBTF_DPRINTF_L2("ibdm", "uninit_hca: HCA %p port 0x%x " 945 "ibdm_fini_port() failed", head, ii); 946 return (IBDM_FAILURE); 947 } 948 } 949 if (head->hl_hca_hdl) 950 if (ibt_close_hca(head->hl_hca_hdl) != IBT_SUCCESS) 951 return (IBDM_FAILURE); 952 kmem_free(head->hl_port_attr, 953 head->hl_nports * sizeof (ibdm_port_attr_t)); 954 kmem_free(head->hl_hca_port_attr, sizeof (ibdm_port_attr_t)); 955 kmem_free(head, sizeof (ibdm_hca_list_t)); 956 return (IBDM_SUCCESS); 957 } 958 959 960 /* 961 * For each port on the HCA, 962 * 1) Teardown IBMF receive callback function 963 * 2) Unregister with IBMF 964 * 3) Unregister with SA access 965 */ 966 static int 967 ibdm_fini_port(ibdm_port_attr_t *port_attr) 968 { 969 int ii, ibmf_status; 970 971 for (ii = 0; ii < port_attr->pa_npkeys; ii++) { 972 if (port_attr->pa_pkey_tbl == NULL) 973 break; 974 if (!port_attr->pa_pkey_tbl[ii].pt_qp_hdl) 975 continue; 976 if (ibdm_port_attr_ibmf_fini(port_attr, ii) != IBDM_SUCCESS) { 977 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 978 "ibdm_port_attr_ibmf_fini failed for " 979 "port pkey 0x%x", ii); 980 return (IBDM_FAILURE); 981 } 982 } 983 984 if (port_attr->pa_ibmf_hdl) { 985 ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl, 986 IBMF_QP_HANDLE_DEFAULT, 0); 987 if (ibmf_status != IBMF_SUCCESS) { 988 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 989 "ibmf_tear_down_async_cb failed %d", ibmf_status); 990 return (IBDM_FAILURE); 991 } 992 993 ibmf_status = ibmf_unregister(&port_attr->pa_ibmf_hdl, 0); 994 if (ibmf_status != IBMF_SUCCESS) { 995 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 996 "ibmf_unregister failed %d", ibmf_status); 997 return (IBDM_FAILURE); 998 } 999 1000 port_attr->pa_ibmf_hdl = NULL; 1001 } 1002 1003 if (port_attr->pa_sa_hdl) { 1004 ibmf_status = ibmf_sa_session_close(&port_attr->pa_sa_hdl, 0); 1005 if (ibmf_status != IBMF_SUCCESS) { 1006 IBTF_DPRINTF_L4("ibdm", "\tfini_port: " 1007 "ibmf_sa_session_close failed %d", ibmf_status); 1008 return (IBDM_FAILURE); 1009 } 1010 port_attr->pa_sa_hdl = NULL; 1011 } 1012 1013 if (port_attr->pa_pkey_tbl != NULL) { 1014 kmem_free(port_attr->pa_pkey_tbl, 1015 port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t)); 1016 port_attr->pa_pkey_tbl = NULL; 1017 port_attr->pa_npkeys = 0; 1018 } 1019 1020 return (IBDM_SUCCESS); 1021 } 1022 1023 1024 /* 1025 * ibdm_port_attr_ibmf_fini: 1026 * With IBMF - Tear down Async callback and free QP Handle 1027 */ 1028 static int 1029 ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *port_attr, int ii) 1030 { 1031 int ibmf_status; 1032 1033 IBTF_DPRINTF_L5("ibdm", "\tport_attr_ibmf_fini:"); 1034 1035 if (port_attr->pa_pkey_tbl[ii].pt_qp_hdl) { 1036 ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl, 1037 port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0); 1038 if (ibmf_status != IBMF_SUCCESS) { 1039 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: " 1040 "ibmf_tear_down_async_cb failed %d", ibmf_status); 1041 return (IBDM_FAILURE); 1042 } 1043 ibmf_status = ibmf_free_qp(port_attr->pa_ibmf_hdl, 1044 &port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0); 1045 if (ibmf_status != IBMF_SUCCESS) { 1046 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: " 1047 "ibmf_free_qp failed %d", ibmf_status); 1048 return (IBDM_FAILURE); 1049 } 1050 port_attr->pa_pkey_tbl[ii].pt_qp_hdl = NULL; 1051 } 1052 return (IBDM_SUCCESS); 1053 } 1054 1055 1056 /* 1057 * ibdm_gid_decr_pending: 1058 * decrement gl_pending_cmds. If zero wakeup sleeping threads 1059 */ 1060 static void 1061 ibdm_gid_decr_pending(ibdm_dp_gidinfo_t *gidinfo) 1062 { 1063 mutex_enter(&ibdm.ibdm_mutex); 1064 mutex_enter(&gidinfo->gl_mutex); 1065 if (--gidinfo->gl_pending_cmds == 0) { 1066 /* 1067 * Handle DGID getting removed. 1068 */ 1069 if (gidinfo->gl_disconnected) { 1070 mutex_exit(&gidinfo->gl_mutex); 1071 mutex_exit(&ibdm.ibdm_mutex); 1072 1073 IBTF_DPRINTF_L3(ibdm_string, "\tgid_decr_pending: " 1074 "gidinfo %p hot removal", gidinfo); 1075 ibdm_delete_gidinfo(gidinfo); 1076 1077 mutex_enter(&ibdm.ibdm_mutex); 1078 ibdm.ibdm_ngid_probes_in_progress--; 1079 ibdm_wait_probe_completion(); 1080 mutex_exit(&ibdm.ibdm_mutex); 1081 return; 1082 } 1083 mutex_exit(&gidinfo->gl_mutex); 1084 mutex_exit(&ibdm.ibdm_mutex); 1085 ibdm_notify_newgid_iocs(gidinfo); 1086 mutex_enter(&ibdm.ibdm_mutex); 1087 mutex_enter(&gidinfo->gl_mutex); 1088 1089 ibdm.ibdm_ngid_probes_in_progress--; 1090 ibdm_wait_probe_completion(); 1091 } 1092 mutex_exit(&gidinfo->gl_mutex); 1093 mutex_exit(&ibdm.ibdm_mutex); 1094 } 1095 1096 1097 /* 1098 * ibdm_wait_probe_completion: 1099 * wait for probing to complete 1100 */ 1101 static void 1102 ibdm_wait_probe_completion(void) 1103 { 1104 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1105 if (ibdm.ibdm_ngid_probes_in_progress) { 1106 IBTF_DPRINTF_L4("ibdm", "\twait for probe complete"); 1107 ibdm.ibdm_busy |= IBDM_PROBE_IN_PROGRESS; 1108 while (ibdm.ibdm_busy & IBDM_PROBE_IN_PROGRESS) 1109 cv_wait(&ibdm.ibdm_probe_cv, &ibdm.ibdm_mutex); 1110 } 1111 } 1112 1113 1114 /* 1115 * ibdm_wakeup_probe_gid_cv: 1116 * wakeup waiting threads (based on ibdm_ngid_probes_in_progress) 1117 */ 1118 static void 1119 ibdm_wakeup_probe_gid_cv(void) 1120 { 1121 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1122 if (!ibdm.ibdm_ngid_probes_in_progress) { 1123 IBTF_DPRINTF_L4("ibdm", "wakeup_probe_gid_thread: Wakeup"); 1124 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 1125 cv_broadcast(&ibdm.ibdm_probe_cv); 1126 } 1127 1128 } 1129 1130 1131 /* 1132 * ibdm_sweep_fabric(reprobe_flag) 1133 * Find all possible Managed IOU's and their IOC's that are visible 1134 * to the host. The algorithm used is as follows 1135 * 1136 * Send a "bus walk" request for each port on the host HCA to SA access 1137 * SA returns complete set of GID's that are reachable from 1138 * source port. This is done in parallel. 1139 * 1140 * Initialize GID state to IBDM_GID_PROBE_NOT_DONE 1141 * 1142 * Sort the GID list and eliminate duplicate GID's 1143 * 1) Use DGID for sorting 1144 * 2) use PortGuid for sorting 1145 * Send SA query to retrieve NodeRecord and 1146 * extract PortGuid from that. 1147 * 1148 * Set GID state to IBDM_GID_PROBE_FAILED to all the ports that dont 1149 * support DM MAD's 1150 * Send a "Portinfo" query to get the port capabilities and 1151 * then check for DM MAD's support 1152 * 1153 * Send "ClassPortInfo" request for all the GID's in parallel, 1154 * set the GID state to IBDM_GET_CLASSPORTINFO and wait on the 1155 * cv_signal to complete. 1156 * 1157 * When DM agent on the remote GID sends back the response, IBMF 1158 * invokes DM callback routine. 1159 * 1160 * If the response is proper, send "IOUnitInfo" request and set 1161 * GID state to IBDM_GET_IOUNITINFO. 1162 * 1163 * If the response is proper, send "IocProfileInfo" request to 1164 * all the IOC simultaneously and set GID state to IBDM_GET_IOC_DETAILS. 1165 * 1166 * Send request to get Service entries simultaneously 1167 * 1168 * Signal the waiting thread when received response for all the commands. 1169 * 1170 * Set the GID state to IBDM_GID_PROBE_FAILED when received a error 1171 * response during the probing period. 1172 * 1173 * Note: 1174 * ibdm.ibdm_ngid_probes_in_progress and ibdm_gid_list_t:gl_pending_cmds 1175 * keep track of number commands in progress at any point of time. 1176 * MAD transaction ID is used to identify a particular GID 1177 * TBD: Consider registering the IBMF receive callback on demand 1178 * 1179 * Note: This routine must be called with ibdm.ibdm_mutex held 1180 * TBD: Re probe the failure GID (for certain failures) when requested 1181 * for fabric sweep next time 1182 * 1183 * Parameters : If reprobe_flag is set, All IOCs will be reprobed. 1184 */ 1185 static void 1186 ibdm_sweep_fabric(int reprobe_flag) 1187 { 1188 int ii; 1189 int new_paths = 0; 1190 uint8_t niocs; 1191 taskqid_t tid; 1192 ibdm_ioc_info_t *ioc; 1193 ibdm_hca_list_t *hca_list = NULL; 1194 ibdm_port_attr_t *port = NULL; 1195 ibdm_dp_gidinfo_t *gid_info; 1196 1197 IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: Enter"); 1198 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 1199 1200 /* 1201 * Check whether a sweep already in progress. If so, just 1202 * wait for the fabric sweep to complete 1203 */ 1204 while (ibdm.ibdm_busy & IBDM_BUSY) 1205 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 1206 ibdm.ibdm_busy |= IBDM_BUSY; 1207 mutex_exit(&ibdm.ibdm_mutex); 1208 1209 ibdm_dump_sweep_fabric_timestamp(0); 1210 1211 /* Rescan the GID list for any removed GIDs for reprobe */ 1212 if (reprobe_flag) 1213 ibdm_rescan_gidlist(NULL); 1214 1215 /* 1216 * Get list of all the ports reachable from the local known HCA 1217 * ports which are active 1218 */ 1219 mutex_enter(&ibdm.ibdm_hl_mutex); 1220 for (ibdm_get_next_port(&hca_list, &port, 1); port; 1221 ibdm_get_next_port(&hca_list, &port, 1)) { 1222 /* 1223 * Get PATHS to all the reachable ports from 1224 * SGID and update the global ibdm structure. 1225 */ 1226 new_paths = ibdm_get_reachable_ports(port, hca_list); 1227 ibdm.ibdm_ngids += new_paths; 1228 } 1229 mutex_exit(&ibdm.ibdm_hl_mutex); 1230 1231 mutex_enter(&ibdm.ibdm_mutex); 1232 ibdm.ibdm_ngid_probes_in_progress += ibdm.ibdm_ngids; 1233 mutex_exit(&ibdm.ibdm_mutex); 1234 1235 /* Send a request to probe GIDs asynchronously. */ 1236 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 1237 gid_info = gid_info->gl_next) { 1238 mutex_enter(&gid_info->gl_mutex); 1239 gid_info->gl_reprobe_flag = reprobe_flag; 1240 mutex_exit(&gid_info->gl_mutex); 1241 1242 /* process newly encountered GIDs */ 1243 tid = taskq_dispatch(system_taskq, ibdm_probe_gid_thread, 1244 (void *)gid_info, TQ_NOSLEEP); 1245 IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: gid_info = %p" 1246 " taskq_id = %x", gid_info, tid); 1247 /* taskq failed to dispatch call it directly */ 1248 if (tid == NULL) 1249 ibdm_probe_gid_thread((void *)gid_info); 1250 } 1251 1252 mutex_enter(&ibdm.ibdm_mutex); 1253 ibdm_wait_probe_completion(); 1254 1255 /* 1256 * Update the properties, if reprobe_flag is set 1257 * Skip if gl_reprobe_flag is set, this will be 1258 * a re-inserted / new GID, for which notifications 1259 * have already been send. 1260 */ 1261 if (reprobe_flag) { 1262 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 1263 gid_info = gid_info->gl_next) { 1264 if (gid_info->gl_iou == NULL) 1265 continue; 1266 if (gid_info->gl_reprobe_flag) { 1267 gid_info->gl_reprobe_flag = 0; 1268 continue; 1269 } 1270 1271 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 1272 for (ii = 0; ii < niocs; ii++) { 1273 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 1274 if (ioc) 1275 ibdm_reprobe_update_port_srv(ioc, 1276 gid_info); 1277 } 1278 } 1279 } 1280 ibdm_dump_sweep_fabric_timestamp(1); 1281 1282 ibdm.ibdm_busy &= ~IBDM_BUSY; 1283 cv_broadcast(&ibdm.ibdm_busy_cv); 1284 IBTF_DPRINTF_L5("ibdm", "\tsweep_fabric: EXIT"); 1285 } 1286 1287 1288 /* 1289 * ibdm_probe_gid_thread: 1290 * thread that does the actual work for sweeping the fabric 1291 * for a given GID 1292 */ 1293 static void 1294 ibdm_probe_gid_thread(void *args) 1295 { 1296 int reprobe_flag; 1297 ib_guid_t node_guid; 1298 ib_guid_t port_guid; 1299 ibdm_dp_gidinfo_t *gid_info; 1300 1301 gid_info = (ibdm_dp_gidinfo_t *)args; 1302 reprobe_flag = gid_info->gl_reprobe_flag; 1303 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: gid_info = %p, flag = %d", 1304 gid_info, reprobe_flag); 1305 ASSERT(gid_info != NULL); 1306 ASSERT(gid_info->gl_pending_cmds == 0); 1307 1308 if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE && 1309 reprobe_flag == 0) { 1310 /* 1311 * This GID may have been already probed. Send 1312 * in a CLP to check if IOUnitInfo changed? 1313 * Explicitly set gl_reprobe_flag to 0 so that 1314 * IBnex is not notified on completion 1315 */ 1316 if (gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) { 1317 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: " 1318 "get new IOCs information"); 1319 mutex_enter(&gid_info->gl_mutex); 1320 gid_info->gl_pending_cmds++; 1321 gid_info->gl_state = IBDM_GET_IOUNITINFO; 1322 gid_info->gl_reprobe_flag = 0; 1323 mutex_exit(&gid_info->gl_mutex); 1324 if (ibdm_send_iounitinfo(gid_info) != IBDM_SUCCESS) { 1325 mutex_enter(&gid_info->gl_mutex); 1326 gid_info->gl_pending_cmds = 0; 1327 mutex_exit(&gid_info->gl_mutex); 1328 mutex_enter(&ibdm.ibdm_mutex); 1329 --ibdm.ibdm_ngid_probes_in_progress; 1330 ibdm_wakeup_probe_gid_cv(); 1331 mutex_exit(&ibdm.ibdm_mutex); 1332 } 1333 } else { 1334 mutex_enter(&ibdm.ibdm_mutex); 1335 --ibdm.ibdm_ngid_probes_in_progress; 1336 ibdm_wakeup_probe_gid_cv(); 1337 mutex_exit(&ibdm.ibdm_mutex); 1338 } 1339 return; 1340 } else if (reprobe_flag && gid_info->gl_state == 1341 IBDM_GID_PROBING_COMPLETE) { 1342 /* 1343 * Reprobe all IOCs for the GID which has completed 1344 * probe. Skip other port GIDs to same IOU. 1345 * Explicitly set gl_reprobe_flag to 0 so that 1346 * IBnex is not notified on completion 1347 */ 1348 ibdm_ioc_info_t *ioc_info; 1349 uint8_t niocs, ii; 1350 1351 mutex_enter(&gid_info->gl_mutex); 1352 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 1353 gid_info->gl_state = IBDM_GET_IOC_DETAILS; 1354 gid_info->gl_pending_cmds += niocs; 1355 gid_info->gl_reprobe_flag = 0; 1356 mutex_exit(&gid_info->gl_mutex); 1357 for (ii = 0; ii < niocs; ii++) { 1358 uchar_t slot_info; 1359 ib_dm_io_unitinfo_t *giou_info; 1360 1361 /* 1362 * Check whether IOC is present in the slot 1363 * Series of nibbles (in the field 1364 * iou_ctrl_list) represents a slot in the 1365 * IOU. 1366 * Byte format: 76543210 1367 * Bits 0-3 of first byte represent Slot 2 1368 * bits 4-7 of first byte represent slot 1, 1369 * bits 0-3 of second byte represent slot 4 1370 * and so on 1371 * Each 4-bit nibble has the following meaning 1372 * 0x0 : IOC not installed 1373 * 0x1 : IOC is present 1374 * 0xf : Slot does not exist 1375 * and all other values are reserved. 1376 */ 1377 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii); 1378 giou_info = &gid_info->gl_iou->iou_info; 1379 slot_info = giou_info->iou_ctrl_list[(ii/2)]; 1380 if ((ii % 2) == 0) 1381 slot_info = (slot_info >> 4); 1382 1383 if ((slot_info & 0xf) != 1) { 1384 ioc_info->ioc_state = 1385 IBDM_IOC_STATE_PROBE_FAILED; 1386 ibdm_gid_decr_pending(gid_info); 1387 continue; 1388 } 1389 1390 if (ibdm_send_ioc_profile(gid_info, ii) != 1391 IBDM_SUCCESS) { 1392 ibdm_gid_decr_pending(gid_info); 1393 } 1394 } 1395 1396 return; 1397 } else if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) { 1398 mutex_enter(&ibdm.ibdm_mutex); 1399 --ibdm.ibdm_ngid_probes_in_progress; 1400 ibdm_wakeup_probe_gid_cv(); 1401 mutex_exit(&ibdm.ibdm_mutex); 1402 return; 1403 } 1404 1405 mutex_enter(&gid_info->gl_mutex); 1406 gid_info->gl_pending_cmds++; 1407 gid_info->gl_state = IBDM_GET_CLASSPORTINFO; 1408 mutex_exit(&gid_info->gl_mutex); 1409 1410 /* 1411 * Check whether the destination GID supports DM agents. If 1412 * not, stop probing the GID and continue with the next GID 1413 * in the list. 1414 */ 1415 if (ibdm_is_dev_mgt_supported(gid_info) != IBDM_SUCCESS) { 1416 mutex_enter(&gid_info->gl_mutex); 1417 gid_info->gl_pending_cmds = 0; 1418 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1419 mutex_exit(&gid_info->gl_mutex); 1420 ibdm_delete_glhca_list(gid_info); 1421 mutex_enter(&ibdm.ibdm_mutex); 1422 --ibdm.ibdm_ngid_probes_in_progress; 1423 ibdm_wakeup_probe_gid_cv(); 1424 mutex_exit(&ibdm.ibdm_mutex); 1425 return; 1426 } 1427 1428 /* Get the nodeguid and portguid of the port */ 1429 if (ibdm_get_node_port_guids(gid_info->gl_sa_hdl, gid_info->gl_dlid, 1430 &node_guid, &port_guid) != IBDM_SUCCESS) { 1431 mutex_enter(&gid_info->gl_mutex); 1432 gid_info->gl_pending_cmds = 0; 1433 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1434 mutex_exit(&gid_info->gl_mutex); 1435 ibdm_delete_glhca_list(gid_info); 1436 mutex_enter(&ibdm.ibdm_mutex); 1437 --ibdm.ibdm_ngid_probes_in_progress; 1438 ibdm_wakeup_probe_gid_cv(); 1439 mutex_exit(&ibdm.ibdm_mutex); 1440 return; 1441 } 1442 1443 /* 1444 * Check whether we already knew about this NodeGuid 1445 * If so, do not probe the GID and continue with the 1446 * next GID in the gid list. Set the GID state to 1447 * probing done. 1448 */ 1449 mutex_enter(&ibdm.ibdm_mutex); 1450 gid_info->gl_nodeguid = node_guid; 1451 gid_info->gl_portguid = port_guid; 1452 if (ibdm_check_dest_nodeguid(gid_info) != NULL) { 1453 mutex_exit(&ibdm.ibdm_mutex); 1454 mutex_enter(&gid_info->gl_mutex); 1455 gid_info->gl_pending_cmds = 0; 1456 gid_info->gl_state = IBDM_GID_PROBING_SKIPPED; 1457 mutex_exit(&gid_info->gl_mutex); 1458 ibdm_delete_glhca_list(gid_info); 1459 mutex_enter(&ibdm.ibdm_mutex); 1460 --ibdm.ibdm_ngid_probes_in_progress; 1461 ibdm_wakeup_probe_gid_cv(); 1462 mutex_exit(&ibdm.ibdm_mutex); 1463 return; 1464 } 1465 ibdm_add_to_gl_gid(gid_info, gid_info); 1466 mutex_exit(&ibdm.ibdm_mutex); 1467 1468 /* 1469 * New or reinserted GID : Enable notification to IBnex 1470 */ 1471 mutex_enter(&gid_info->gl_mutex); 1472 gid_info->gl_reprobe_flag = 1; 1473 mutex_exit(&gid_info->gl_mutex); 1474 1475 /* 1476 * Send ClassPortInfo request to the GID asynchronously. 1477 */ 1478 if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) { 1479 mutex_enter(&gid_info->gl_mutex); 1480 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1481 gid_info->gl_pending_cmds = 0; 1482 mutex_exit(&gid_info->gl_mutex); 1483 ibdm_delete_glhca_list(gid_info); 1484 mutex_enter(&ibdm.ibdm_mutex); 1485 --ibdm.ibdm_ngid_probes_in_progress; 1486 ibdm_wakeup_probe_gid_cv(); 1487 mutex_exit(&ibdm.ibdm_mutex); 1488 return; 1489 } 1490 } 1491 1492 1493 /* 1494 * ibdm_check_dest_nodeguid 1495 * Searches for the NodeGuid in the GID list 1496 * Returns matching gid_info if found and otherwise NULL 1497 * 1498 * This function is called to handle new GIDs discovered 1499 * during device sweep / probe or for GID_AVAILABLE event. 1500 * 1501 * Parameter : 1502 * gid_info GID to check 1503 */ 1504 static ibdm_dp_gidinfo_t * 1505 ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *gid_info) 1506 { 1507 ibdm_dp_gidinfo_t *gid_list; 1508 ibdm_gid_t *tmp; 1509 1510 IBTF_DPRINTF_L4("ibdm", "\tcheck_dest_nodeguid"); 1511 1512 gid_list = ibdm.ibdm_dp_gidlist_head; 1513 while (gid_list) { 1514 if ((gid_list != gid_info) && 1515 (gid_info->gl_nodeguid == gid_list->gl_nodeguid)) { 1516 IBTF_DPRINTF_L4("ibdm", 1517 "\tcheck_dest_nodeguid: NodeGuid is present"); 1518 1519 /* Add to gid_list */ 1520 tmp = kmem_zalloc(sizeof (ibdm_gid_t), 1521 KM_SLEEP); 1522 tmp->gid_dgid_hi = gid_info->gl_dgid_hi; 1523 tmp->gid_dgid_lo = gid_info->gl_dgid_lo; 1524 tmp->gid_next = gid_list->gl_gid; 1525 gid_list->gl_gid = tmp; 1526 gid_list->gl_ngids++; 1527 return (gid_list); 1528 } 1529 1530 gid_list = gid_list->gl_next; 1531 } 1532 1533 return (NULL); 1534 } 1535 1536 1537 /* 1538 * ibdm_is_dev_mgt_supported 1539 * Get the PortInfo attribute (SA Query) 1540 * Check "CompatabilityMask" field in the Portinfo. 1541 * Return IBDM_SUCCESS if DM MAD's supported (if bit 19 set) 1542 * by the port, otherwise IBDM_FAILURE 1543 */ 1544 static int 1545 ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *gid_info) 1546 { 1547 int ret; 1548 size_t length = 0; 1549 sa_portinfo_record_t req, *resp = NULL; 1550 ibmf_saa_access_args_t qargs; 1551 1552 bzero(&req, sizeof (sa_portinfo_record_t)); 1553 req.EndportLID = gid_info->gl_dlid; 1554 1555 qargs.sq_attr_id = SA_PORTINFORECORD_ATTRID; 1556 qargs.sq_access_type = IBMF_SAA_RETRIEVE; 1557 qargs.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID; 1558 qargs.sq_template = &req; 1559 qargs.sq_callback = NULL; 1560 qargs.sq_callback_arg = NULL; 1561 1562 ret = ibmf_sa_access(gid_info->gl_sa_hdl, 1563 &qargs, 0, &length, (void **)&resp); 1564 1565 if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) { 1566 IBTF_DPRINTF_L2("ibdm", "\tis_dev_mgt_supported:" 1567 "failed to get PORTINFO attribute %d", ret); 1568 return (IBDM_FAILURE); 1569 } 1570 1571 if (resp->PortInfo.CapabilityMask & SM_CAP_MASK_IS_DM_SUPPD) { 1572 IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: SUPPD !!"); 1573 ret = IBDM_SUCCESS; 1574 } else { 1575 IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: " 1576 "Not SUPPD !!, cap 0x%x", resp->PortInfo.CapabilityMask); 1577 ret = IBDM_FAILURE; 1578 } 1579 kmem_free(resp, length); 1580 return (ret); 1581 } 1582 1583 1584 /* 1585 * ibdm_get_node_port_guids() 1586 * Get the NodeInfoRecord of the port 1587 * Save NodeGuid and PortGUID values in the GID list structure. 1588 * Return IBDM_SUCCESS/IBDM_FAILURE 1589 */ 1590 static int 1591 ibdm_get_node_port_guids(ibmf_saa_handle_t sa_hdl, ib_lid_t dlid, 1592 ib_guid_t *node_guid, ib_guid_t *port_guid) 1593 { 1594 int ret; 1595 size_t length = 0; 1596 sa_node_record_t req, *resp = NULL; 1597 ibmf_saa_access_args_t qargs; 1598 1599 IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids"); 1600 1601 bzero(&req, sizeof (sa_node_record_t)); 1602 req.LID = dlid; 1603 1604 qargs.sq_attr_id = SA_NODERECORD_ATTRID; 1605 qargs.sq_access_type = IBMF_SAA_RETRIEVE; 1606 qargs.sq_component_mask = SA_NODEINFO_COMPMASK_NODELID; 1607 qargs.sq_template = &req; 1608 qargs.sq_callback = NULL; 1609 qargs.sq_callback_arg = NULL; 1610 1611 ret = ibmf_sa_access(sa_hdl, &qargs, 0, &length, (void **)&resp); 1612 if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) { 1613 IBTF_DPRINTF_L2("ibdm", "\tget_node_port_guids:" 1614 " SA Retrieve Failed: %d", ret); 1615 return (IBDM_FAILURE); 1616 } 1617 IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids: NodeGuid %llx Port" 1618 "GUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.NodeGUID); 1619 1620 *node_guid = resp->NodeInfo.NodeGUID; 1621 *port_guid = resp->NodeInfo.PortGUID; 1622 kmem_free(resp, length); 1623 return (IBDM_SUCCESS); 1624 } 1625 1626 1627 /* 1628 * ibdm_get_reachable_ports() 1629 * Get list of the destination GID (and its path records) by 1630 * querying the SA access. 1631 * 1632 * Returns Number paths 1633 */ 1634 static int 1635 ibdm_get_reachable_ports(ibdm_port_attr_t *portinfo, ibdm_hca_list_t *hca) 1636 { 1637 uint_t ii, jj, nrecs; 1638 uint_t npaths = 0; 1639 size_t length; 1640 ib_gid_t sgid; 1641 ibdm_pkey_tbl_t *pkey_tbl; 1642 sa_path_record_t *result; 1643 sa_path_record_t *precp; 1644 ibdm_dp_gidinfo_t *gid_info; 1645 1646 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 1647 IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: portinfo %p", portinfo); 1648 1649 sgid.gid_prefix = portinfo->pa_sn_prefix; 1650 sgid.gid_guid = portinfo->pa_port_guid; 1651 1652 /* get reversible paths */ 1653 if (portinfo->pa_sa_hdl && ibmf_saa_paths_from_gid(portinfo->pa_sa_hdl, 1654 sgid, IBMF_SAA_PKEY_WC, B_TRUE, 0, &nrecs, &length, &result) 1655 != IBMF_SUCCESS) { 1656 IBTF_DPRINTF_L2("ibdm", 1657 "\tget_reachable_ports: Getting path records failed"); 1658 return (0); 1659 } 1660 1661 for (ii = 0; ii < nrecs; ii++) { 1662 precp = &result[ii]; 1663 if ((gid_info = ibdm_check_dgid(precp->DGID.gid_guid, 1664 precp->DGID.gid_prefix)) != NULL) { 1665 IBTF_DPRINTF_L2("ibdm", "\tget_reachable_ports: " 1666 "Already exists nrecs %d, ii %d", nrecs, ii); 1667 ibdm_addto_glhcalist(gid_info, hca); 1668 continue; 1669 } 1670 /* 1671 * This is a new GID. Allocate a GID structure and 1672 * initialize the structure 1673 * gl_state is initialized to IBDM_GID_PROBE_NOT_DONE (0) 1674 * by kmem_zalloc call 1675 */ 1676 gid_info = kmem_zalloc(sizeof (ibdm_dp_gidinfo_t), KM_SLEEP); 1677 mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL); 1678 gid_info->gl_dgid_hi = precp->DGID.gid_prefix; 1679 gid_info->gl_dgid_lo = precp->DGID.gid_guid; 1680 gid_info->gl_sgid_hi = precp->SGID.gid_prefix; 1681 gid_info->gl_sgid_lo = precp->SGID.gid_guid; 1682 gid_info->gl_p_key = precp->P_Key; 1683 gid_info->gl_sa_hdl = portinfo->pa_sa_hdl; 1684 gid_info->gl_ibmf_hdl = portinfo->pa_ibmf_hdl; 1685 gid_info->gl_slid = precp->SLID; 1686 gid_info->gl_dlid = precp->DLID; 1687 gid_info->gl_transactionID = ++ibdm.ibdm_transactionID; 1688 ibdm_addto_glhcalist(gid_info, hca); 1689 1690 ibdm_dump_path_info(precp); 1691 1692 gid_info->gl_qp_hdl = NULL; 1693 if (portinfo->pa_pkey_tbl == NULL) 1694 break; 1695 1696 for (jj = 0; jj < portinfo->pa_npkeys; jj++) { 1697 pkey_tbl = &portinfo->pa_pkey_tbl[jj]; 1698 if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) && 1699 (pkey_tbl->pt_qp_hdl != NULL)) { 1700 gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 1701 break; 1702 } 1703 } 1704 1705 if (gid_info->gl_qp_hdl == NULL) { 1706 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 1707 ibdm_delete_glhca_list(gid_info); 1708 continue; 1709 } 1710 if (ibdm.ibdm_dp_gidlist_head == NULL) { 1711 ibdm.ibdm_dp_gidlist_head = gid_info; 1712 ibdm.ibdm_dp_gidlist_tail = gid_info; 1713 } else { 1714 ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info; 1715 gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail; 1716 ibdm.ibdm_dp_gidlist_tail = gid_info; 1717 } 1718 npaths++; 1719 } 1720 kmem_free(result, length); 1721 IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: npaths = %d", npaths); 1722 return (npaths); 1723 } 1724 1725 1726 /* 1727 * ibdm_check_dgid() 1728 * Look in the global list to check whether we know this DGID already 1729 * Return IBDM_GID_PRESENT/IBDM_GID_NOT_PRESENT 1730 */ 1731 static ibdm_dp_gidinfo_t * 1732 ibdm_check_dgid(ib_guid_t guid, ib_sn_prefix_t prefix) 1733 { 1734 ibdm_dp_gidinfo_t *gid_list; 1735 1736 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 1737 gid_list = gid_list->gl_next) { 1738 if ((guid == gid_list->gl_dgid_lo) && 1739 (prefix == gid_list->gl_dgid_hi)) { 1740 break; 1741 } 1742 } 1743 return (gid_list); 1744 } 1745 1746 1747 /* 1748 * ibdm_find_gid() 1749 * Look in the global list to find a GID entry with matching 1750 * port & node GUID. 1751 * Return pointer to gidinfo if found, else return NULL 1752 */ 1753 static ibdm_dp_gidinfo_t * 1754 ibdm_find_gid(ib_guid_t nodeguid, ib_guid_t portguid) 1755 { 1756 ibdm_dp_gidinfo_t *gid_list; 1757 1758 IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid(%llx, %llx)\n", 1759 nodeguid, portguid); 1760 1761 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 1762 gid_list = gid_list->gl_next) { 1763 if ((portguid == gid_list->gl_portguid) && 1764 (nodeguid == gid_list->gl_nodeguid)) { 1765 break; 1766 } 1767 } 1768 1769 IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid : returned %p\n", 1770 gid_list); 1771 return (gid_list); 1772 } 1773 1774 1775 /* 1776 * ibdm_send_classportinfo() 1777 * Send classportinfo request. When the request is completed 1778 * IBMF calls ibdm_classportinfo_cb routine to inform about 1779 * the completion. 1780 * Returns IBDM_SUCCESS/IBDM_FAILURE 1781 */ 1782 static int 1783 ibdm_send_classportinfo(ibdm_dp_gidinfo_t *gid_info) 1784 { 1785 ibmf_msg_t *msg; 1786 ib_mad_hdr_t *hdr; 1787 ibdm_timeout_cb_args_t *cb_args; 1788 1789 IBTF_DPRINTF_L4("ibdm", 1790 "\tsend_classportinfo: gid info 0x%p", gid_info); 1791 1792 /* 1793 * Send command to get classportinfo attribute. Allocate a IBMF 1794 * packet and initialize the packet. 1795 */ 1796 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 1797 &msg) != IBMF_SUCCESS) { 1798 IBTF_DPRINTF_L4("ibdm", "\tsend_classportinfo: pkt alloc fail"); 1799 return (IBDM_FAILURE); 1800 } 1801 1802 ibdm_alloc_send_buffers(msg); 1803 1804 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 1805 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 1806 msg->im_local_addr.ia_remote_qno = 1; 1807 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 1808 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 1809 1810 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 1811 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 1812 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 1813 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 1814 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 1815 hdr->Status = 0; 1816 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 1817 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 1818 hdr->AttributeModifier = 0; 1819 1820 cb_args = &gid_info->gl_cpi_cb_args; 1821 cb_args->cb_gid_info = gid_info; 1822 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 1823 cb_args->cb_req_type = IBDM_REQ_TYPE_CLASSPORTINFO; 1824 1825 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 1826 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 1827 1828 IBTF_DPRINTF_L5("ibdm", "\tsend_classportinfo: " 1829 "timeout id %x", gid_info->gl_timeout_id); 1830 1831 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 1832 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 1833 IBTF_DPRINTF_L2("ibdm", 1834 "\tsend_classportinfo: ibmf send failed"); 1835 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 1836 } 1837 1838 return (IBDM_SUCCESS); 1839 } 1840 1841 1842 /* 1843 * ibdm_handle_classportinfo() 1844 * Invoked by the IBMF when the classportinfo request is completed. 1845 */ 1846 static void 1847 ibdm_handle_classportinfo(ibmf_handle_t ibmf_hdl, 1848 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 1849 { 1850 void *data; 1851 timeout_id_t timeout_id; 1852 ib_mad_hdr_t *hdr; 1853 ibdm_mad_classportinfo_t *cpi; 1854 1855 IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo:ibmf hdl " 1856 "%p msg %p gid info %p", ibmf_hdl, msg, gid_info); 1857 1858 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO) { 1859 IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo: " 1860 "Not a ClassPortInfo resp"); 1861 *flag |= IBDM_IBMF_PKT_UNEXP_RESP; 1862 return; 1863 } 1864 1865 /* 1866 * Verify whether timeout handler is created/active. 1867 * If created/ active, cancel the timeout handler 1868 */ 1869 mutex_enter(&gid_info->gl_mutex); 1870 if (gid_info->gl_state != IBDM_GET_CLASSPORTINFO) { 1871 IBTF_DPRINTF_L2("ibdm", "\thandle_classportinfo:DUP resp"); 1872 *flag |= IBDM_IBMF_PKT_DUP_RESP; 1873 mutex_exit(&gid_info->gl_mutex); 1874 return; 1875 } 1876 gid_info->gl_iou_cb_args.cb_req_type = 0; 1877 if (gid_info->gl_timeout_id) { 1878 timeout_id = gid_info->gl_timeout_id; 1879 mutex_exit(&gid_info->gl_mutex); 1880 IBTF_DPRINTF_L5("ibdm", "handle_ioclassportinfo: " 1881 "gl_timeout_id = 0x%x", timeout_id); 1882 if (untimeout(timeout_id) == -1) { 1883 IBTF_DPRINTF_L2("ibdm", "handle_classportinfo: " 1884 "untimeout gl_timeout_id failed"); 1885 } 1886 mutex_enter(&gid_info->gl_mutex); 1887 gid_info->gl_timeout_id = 0; 1888 } 1889 gid_info->gl_state = IBDM_GET_IOUNITINFO; 1890 gid_info->gl_pending_cmds++; 1891 mutex_exit(&gid_info->gl_mutex); 1892 1893 data = msg->im_msgbufs_recv.im_bufs_cl_data; 1894 cpi = (ibdm_mad_classportinfo_t *)data; 1895 1896 /* 1897 * Cache the "RespTimeValue" and redirection information in the 1898 * global gid list data structure. This cached information will 1899 * be used to send any further requests to the GID. 1900 */ 1901 gid_info->gl_resp_timeout = 1902 (b2h32(cpi->RespTimeValue) & 0x1F); 1903 1904 gid_info->gl_redirected = ((IBDM_IN_IBMFMSG_STATUS(msg) & 1905 MAD_STATUS_REDIRECT_REQUIRED) ? B_TRUE: B_FALSE); 1906 gid_info->gl_redirect_dlid = b2h16(cpi->RedirectLID); 1907 gid_info->gl_redirect_QP = (b2h32(cpi->RedirectQP) & 0xffffff); 1908 gid_info->gl_redirect_pkey = b2h16(cpi->RedirectP_Key); 1909 gid_info->gl_redirect_qkey = b2h32(cpi->RedirectQ_Key); 1910 gid_info->gl_redirectGID_hi = b2h64(cpi->RedirectGID_hi); 1911 gid_info->gl_redirectGID_lo = b2h64(cpi->RedirectGID_lo); 1912 1913 ibdm_dump_classportinfo(cpi); 1914 1915 /* 1916 * Send IOUnitInfo request 1917 * Reuse previously allocated IBMF packet for sending ClassPortInfo 1918 * Check whether DM agent on the remote node requested redirection 1919 * If so, send the request to the redirect DGID/DLID/PKEY/QP. 1920 */ 1921 ibdm_alloc_send_buffers(msg); 1922 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 1923 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 1924 1925 if (gid_info->gl_redirected == B_TRUE) { 1926 if (gid_info->gl_redirect_dlid != 0) { 1927 msg->im_local_addr.ia_remote_lid = 1928 gid_info->gl_redirect_dlid; 1929 } 1930 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 1931 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 1932 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 1933 } else { 1934 msg->im_local_addr.ia_remote_qno = 1; 1935 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 1936 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 1937 } 1938 1939 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 1940 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 1941 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 1942 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 1943 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 1944 hdr->Status = 0; 1945 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 1946 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 1947 hdr->AttributeModifier = 0; 1948 1949 gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO; 1950 gid_info->gl_iou_cb_args.cb_gid_info = gid_info; 1951 gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt; 1952 1953 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 1954 &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 1955 1956 IBTF_DPRINTF_L5("ibdm", "handle_classportinfo:" 1957 "timeout %x", gid_info->gl_timeout_id); 1958 1959 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, NULL, 1960 ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) != IBMF_SUCCESS) { 1961 IBTF_DPRINTF_L2("ibdm", 1962 "\thandle_classportinfo: msg transport failed"); 1963 ibdm_ibmf_send_cb(ibmf_hdl, msg, &gid_info->gl_iou_cb_args); 1964 } 1965 (*flag) |= IBDM_IBMF_PKT_REUSED; 1966 } 1967 1968 1969 /* 1970 * ibdm_send_iounitinfo: 1971 * Sends a DM request to get IOU unitinfo. 1972 */ 1973 static int 1974 ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *gid_info) 1975 { 1976 ibmf_msg_t *msg; 1977 ib_mad_hdr_t *hdr; 1978 1979 IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: gid info 0x%p", gid_info); 1980 1981 /* 1982 * Send command to get iounitinfo attribute. Allocate a IBMF 1983 * packet and initialize the packet. 1984 */ 1985 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, &msg) != 1986 IBMF_SUCCESS) { 1987 IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: pkt alloc fail"); 1988 return (IBDM_FAILURE); 1989 } 1990 1991 ibdm_alloc_send_buffers(msg); 1992 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 1993 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 1994 msg->im_local_addr.ia_remote_qno = 1; 1995 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 1996 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 1997 1998 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 1999 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2000 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2001 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2002 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2003 hdr->Status = 0; 2004 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2005 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 2006 hdr->AttributeModifier = 0; 2007 2008 gid_info->gl_iou_cb_args.cb_gid_info = gid_info; 2009 gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt; 2010 gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO; 2011 2012 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2013 &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2014 2015 IBTF_DPRINTF_L5("ibdm", "send_iouunitinfo:" 2016 "timeout %x", gid_info->gl_timeout_id); 2017 2018 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg, 2019 NULL, ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) != 2020 IBMF_SUCCESS) { 2021 IBTF_DPRINTF_L2("ibdm", "\tsend_iounitinfo: ibmf send failed"); 2022 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, 2023 msg, &gid_info->gl_iou_cb_args); 2024 } 2025 return (IBDM_SUCCESS); 2026 } 2027 2028 /* 2029 * ibdm_handle_iounitinfo() 2030 * Invoked by the IBMF when IO Unitinfo request is completed. 2031 */ 2032 static void 2033 ibdm_handle_iounitinfo(ibmf_handle_t ibmf_hdl, 2034 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2035 { 2036 int ii, first = B_TRUE; 2037 int num_iocs; 2038 size_t size; 2039 uchar_t slot_info; 2040 timeout_id_t timeout_id; 2041 ib_mad_hdr_t *hdr; 2042 ibdm_ioc_info_t *ioc_info; 2043 ib_dm_io_unitinfo_t *iou_info; 2044 ib_dm_io_unitinfo_t *giou_info; 2045 ibdm_timeout_cb_args_t *cb_args; 2046 2047 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo:" 2048 " ibmf hdl %p pkt %p gid info %p", ibmf_hdl, msg, gid_info); 2049 2050 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_IO_UNITINFO) { 2051 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: " 2052 "Unexpected response"); 2053 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2054 return; 2055 } 2056 2057 mutex_enter(&gid_info->gl_mutex); 2058 if (gid_info->gl_state != IBDM_GET_IOUNITINFO) { 2059 IBTF_DPRINTF_L4("ibdm", 2060 "\thandle_iounitinfo: DUP resp"); 2061 mutex_exit(&gid_info->gl_mutex); 2062 (*flag) = IBDM_IBMF_PKT_DUP_RESP; 2063 return; 2064 } 2065 gid_info->gl_iou_cb_args.cb_req_type = 0; 2066 if (gid_info->gl_timeout_id) { 2067 timeout_id = gid_info->gl_timeout_id; 2068 mutex_exit(&gid_info->gl_mutex); 2069 IBTF_DPRINTF_L5("ibdm", "handle_iounitinfo: " 2070 "gl_timeout_id = 0x%x", timeout_id); 2071 if (untimeout(timeout_id) == -1) { 2072 IBTF_DPRINTF_L2("ibdm", "handle_iounitinfo: " 2073 "untimeout gl_timeout_id failed"); 2074 } 2075 mutex_enter(&gid_info->gl_mutex); 2076 gid_info->gl_timeout_id = 0; 2077 } 2078 gid_info->gl_state = IBDM_GET_IOC_DETAILS; 2079 2080 iou_info = IBDM_IN_IBMFMSG2IOU(msg); 2081 ibdm_dump_iounitinfo(iou_info); 2082 num_iocs = iou_info->iou_num_ctrl_slots; 2083 /* 2084 * check if number of IOCs reported is zero? if yes, return. 2085 * when num_iocs are reported zero internal IOC database needs 2086 * to be updated. To ensure that save the number of IOCs in 2087 * the new field "gl_num_iocs". Use a new field instead of 2088 * "giou_info->iou_num_ctrl_slots" as that would prevent 2089 * an unnecessary kmem_alloc/kmem_free when num_iocs is 0. 2090 */ 2091 if (num_iocs == 0 && gid_info->gl_num_iocs == 0) { 2092 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: no IOC's"); 2093 mutex_exit(&gid_info->gl_mutex); 2094 return; 2095 } 2096 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: num_iocs = %d", num_iocs); 2097 2098 /* 2099 * if there is an existing gl_iou (IOU has been probed before) 2100 * check if the "iou_changeid" is same as saved entry in 2101 * "giou_info->iou_changeid". 2102 * (note: this logic can prevent IOC enumeration if a given 2103 * vendor doesn't support setting iou_changeid field for its IOU) 2104 * 2105 * if there is an existing gl_iou and iou_changeid has changed : 2106 * free up existing gl_iou info and its related structures. 2107 * reallocate gl_iou info all over again. 2108 * if we donot free this up; then this leads to memory leaks 2109 */ 2110 if (gid_info->gl_iou) { 2111 giou_info = &gid_info->gl_iou->iou_info; 2112 if (iou_info->iou_changeid == giou_info->iou_changeid) { 2113 IBTF_DPRINTF_L3("ibdm", 2114 "\thandle_iounitinfo: no IOCs changed"); 2115 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE; 2116 mutex_exit(&gid_info->gl_mutex); 2117 return; 2118 } 2119 if (ibdm_free_iou_info(gid_info)) { 2120 IBTF_DPRINTF_L3("ibdm", 2121 "\thandle_iounitinfo: failed to cleanup resources"); 2122 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE; 2123 mutex_exit(&gid_info->gl_mutex); 2124 return; 2125 } 2126 } 2127 2128 size = sizeof (ibdm_iou_info_t) + num_iocs * sizeof (ibdm_ioc_info_t); 2129 gid_info->gl_iou = (ibdm_iou_info_t *)kmem_zalloc(size, KM_SLEEP); 2130 giou_info = &gid_info->gl_iou->iou_info; 2131 gid_info->gl_iou->iou_ioc_info = (ibdm_ioc_info_t *) 2132 ((char *)gid_info->gl_iou + sizeof (ibdm_iou_info_t)); 2133 2134 giou_info->iou_num_ctrl_slots = gid_info->gl_num_iocs = num_iocs; 2135 giou_info->iou_flag = iou_info->iou_flag; 2136 bcopy(iou_info->iou_ctrl_list, giou_info->iou_ctrl_list, 128); 2137 giou_info->iou_changeid = b2h16(iou_info->iou_changeid); 2138 gid_info->gl_pending_cmds += num_iocs; 2139 gid_info->gl_pending_cmds += 1; /* for diag code */ 2140 mutex_exit(&gid_info->gl_mutex); 2141 2142 if (ibdm_get_diagcode(gid_info, 0) != IBDM_SUCCESS) { 2143 mutex_enter(&gid_info->gl_mutex); 2144 gid_info->gl_pending_cmds--; 2145 mutex_exit(&gid_info->gl_mutex); 2146 } 2147 /* 2148 * Parallelize getting IOC controller profiles from here. 2149 * Allocate IBMF packets and send commands to get IOC profile for 2150 * each IOC present on the IOU. 2151 */ 2152 for (ii = 0; ii < num_iocs; ii++) { 2153 /* 2154 * Check whether IOC is present in the slot 2155 * Series of nibbles (in the field iou_ctrl_list) represents 2156 * a slot in the IOU. 2157 * Byte format: 76543210 2158 * Bits 0-3 of first byte represent Slot 2 2159 * bits 4-7 of first byte represent slot 1, 2160 * bits 0-3 of second byte represent slot 4 and so on 2161 * Each 4-bit nibble has the following meaning 2162 * 0x0 : IOC not installed 2163 * 0x1 : IOC is present 2164 * 0xf : Slot does not exist 2165 * and all other values are reserved. 2166 */ 2167 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii); 2168 slot_info = giou_info->iou_ctrl_list[(ii/2)]; 2169 if ((ii % 2) == 0) 2170 slot_info = (slot_info >> 4); 2171 2172 if ((slot_info & 0xf) != 1) { 2173 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: " 2174 "No IOC is present in the slot = %d", ii); 2175 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 2176 mutex_enter(&gid_info->gl_mutex); 2177 gid_info->gl_pending_cmds--; 2178 mutex_exit(&gid_info->gl_mutex); 2179 continue; 2180 } 2181 2182 /* 2183 * Re use the already allocated packet (for IOUnitinfo) to 2184 * send the first IOC controller attribute. Allocate new 2185 * IBMF packets for the rest of the IOC's 2186 */ 2187 if (first != B_TRUE) { 2188 msg = NULL; 2189 if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP, 2190 &msg) != IBMF_SUCCESS) { 2191 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: " 2192 "IBMF packet allocation failed"); 2193 mutex_enter(&gid_info->gl_mutex); 2194 gid_info->gl_pending_cmds--; 2195 mutex_exit(&gid_info->gl_mutex); 2196 continue; 2197 } 2198 2199 } 2200 2201 /* allocate send buffers for all messages */ 2202 ibdm_alloc_send_buffers(msg); 2203 2204 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2205 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2206 if (gid_info->gl_redirected == B_TRUE) { 2207 if (gid_info->gl_redirect_dlid != 0) { 2208 msg->im_local_addr.ia_remote_lid = 2209 gid_info->gl_redirect_dlid; 2210 } 2211 msg->im_local_addr.ia_remote_qno = 2212 gid_info->gl_redirect_QP; 2213 msg->im_local_addr.ia_p_key = 2214 gid_info->gl_redirect_pkey; 2215 msg->im_local_addr.ia_q_key = 2216 gid_info->gl_redirect_qkey; 2217 } else { 2218 msg->im_local_addr.ia_remote_qno = 1; 2219 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2220 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2221 } 2222 2223 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2224 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2225 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2226 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2227 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2228 hdr->Status = 0; 2229 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2230 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 2231 hdr->AttributeModifier = h2b32(ii + 1); 2232 2233 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_INVALID; 2234 cb_args = &ioc_info->ioc_cb_args; 2235 cb_args->cb_gid_info = gid_info; 2236 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2237 cb_args->cb_req_type = IBDM_REQ_TYPE_IOCINFO; 2238 cb_args->cb_ioc_num = ii; 2239 2240 ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2241 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2242 2243 IBTF_DPRINTF_L5("ibdm", "\thandle_iounitinfo:" 2244 "timeout 0x%x, ioc_num %d", ioc_info->ioc_timeout_id, ii); 2245 2246 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, 2247 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2248 IBTF_DPRINTF_L2("ibdm", 2249 "\thandle_iounitinfo: msg transport failed"); 2250 ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args); 2251 } 2252 (*flag) |= IBDM_IBMF_PKT_REUSED; 2253 first = B_FALSE; 2254 gid_info->gl_iou->iou_niocs_probe_in_progress++; 2255 } 2256 } 2257 2258 2259 /* 2260 * ibdm_handle_ioc_profile() 2261 * Invoked by the IBMF when the IOCControllerProfile request 2262 * gets completed 2263 */ 2264 static void 2265 ibdm_handle_ioc_profile(ibmf_handle_t ibmf_hdl, 2266 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag) 2267 { 2268 int first = B_TRUE, reprobe = 0; 2269 uint_t ii, ioc_no, srv_start; 2270 uint_t nserv_entries; 2271 timeout_id_t timeout_id; 2272 ib_mad_hdr_t *hdr; 2273 ibdm_ioc_info_t *ioc_info; 2274 ibdm_timeout_cb_args_t *cb_args; 2275 ib_dm_ioc_ctrl_profile_t *ioc, *gioc; 2276 2277 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:" 2278 " ibmf hdl %p msg %p gid info %p", ibmf_hdl, msg, gid_info); 2279 2280 ioc = IBDM_IN_IBMFMSG2IOC(msg); 2281 /* 2282 * Check whether we know this IOC already 2283 * This will return NULL if reprobe is in progress 2284 * IBDM_IOC_STATE_REPROBE_PROGRESS will be set. 2285 * Do not hold mutexes here. 2286 */ 2287 if (ibdm_is_ioc_present(ioc->ioc_guid, gid_info, flag) != NULL) { 2288 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:" 2289 "IOC guid %llx is present", ioc->ioc_guid); 2290 return; 2291 } 2292 ioc_no = IBDM_IN_IBMFMSG_ATTRMOD(msg); 2293 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile: ioc_no = %d", ioc_no-1); 2294 2295 /* Make sure that IOC index is with the valid range */ 2296 if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) { 2297 IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: " 2298 "IOC index Out of range, index %d", ioc); 2299 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2300 return; 2301 } 2302 ioc_info = &gid_info->gl_iou->iou_ioc_info[ioc_no - 1]; 2303 ioc_info->ioc_iou_info = gid_info->gl_iou; 2304 2305 mutex_enter(&gid_info->gl_mutex); 2306 if (ioc_info->ioc_state == IBDM_IOC_STATE_REPROBE_PROGRESS) { 2307 reprobe = 1; 2308 ioc_info->ioc_prev_serv = ioc_info->ioc_serv; 2309 ioc_info->ioc_serv = NULL; 2310 ioc_info->ioc_prev_serv_cnt = 2311 ioc_info->ioc_profile.ioc_service_entries; 2312 } else if (ioc_info->ioc_state != IBDM_IOC_STATE_PROBE_INVALID) { 2313 IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: DUP response" 2314 "ioc %d, ioc_state %x", ioc_no - 1, ioc_info->ioc_state); 2315 mutex_exit(&gid_info->gl_mutex); 2316 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 2317 return; 2318 } 2319 ioc_info->ioc_cb_args.cb_req_type = 0; 2320 if (ioc_info->ioc_timeout_id) { 2321 timeout_id = ioc_info->ioc_timeout_id; 2322 mutex_exit(&gid_info->gl_mutex); 2323 IBTF_DPRINTF_L5("ibdm", "handle_ioc_profile: " 2324 "ioc_timeout_id = 0x%x", timeout_id); 2325 if (untimeout(timeout_id) == -1) { 2326 IBTF_DPRINTF_L2("ibdm", "handle_ioc_profile: " 2327 "untimeout ioc_timeout_id failed"); 2328 } 2329 mutex_enter(&gid_info->gl_mutex); 2330 ioc_info->ioc_timeout_id = 0; 2331 } 2332 2333 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_SUCCESS; 2334 if (reprobe == 0) { 2335 ioc_info->ioc_iou_guid = gid_info->gl_nodeguid; 2336 ioc_info->ioc_nodeguid = gid_info->gl_nodeguid; 2337 } 2338 2339 /* 2340 * Save all the IOC information in the global structures. 2341 * Note the wire format is Big Endian and the Sparc process also 2342 * big endian. So, there is no need to convert the data fields 2343 * The conversion routines used below are ineffective on Sparc 2344 * machines where as they will be effective on little endian 2345 * machines such as Intel processors. 2346 */ 2347 gioc = (ib_dm_ioc_ctrl_profile_t *)&ioc_info->ioc_profile; 2348 2349 /* 2350 * Restrict updates to onlyport GIDs and service entries during reprobe 2351 */ 2352 if (reprobe == 0) { 2353 gioc->ioc_guid = b2h64(ioc->ioc_guid); 2354 gioc->ioc_vendorid = 2355 (b2h32(ioc->ioc_vendorid) & IB_DM_VENDORID_MASK); 2356 gioc->ioc_deviceid = b2h32(ioc->ioc_deviceid); 2357 gioc->ioc_device_ver = b2h16(ioc->ioc_device_ver); 2358 gioc->ioc_subsys_vendorid = 2359 (b2h32(ioc->ioc_subsys_vendorid) & IB_DM_VENDORID_MASK); 2360 gioc->ioc_subsys_id = b2h32(ioc->ioc_subsys_id); 2361 gioc->ioc_io_class = b2h16(ioc->ioc_io_class); 2362 gioc->ioc_io_subclass = b2h16(ioc->ioc_io_subclass); 2363 gioc->ioc_protocol = b2h16(ioc->ioc_protocol); 2364 gioc->ioc_protocol_ver = b2h16(ioc->ioc_protocol_ver); 2365 gioc->ioc_send_msg_qdepth = 2366 b2h16(ioc->ioc_send_msg_qdepth); 2367 gioc->ioc_rdma_read_qdepth = 2368 b2h16(ioc->ioc_rdma_read_qdepth); 2369 gioc->ioc_send_msg_sz = b2h32(ioc->ioc_send_msg_sz); 2370 gioc->ioc_rdma_xfer_sz = b2h32(ioc->ioc_rdma_xfer_sz); 2371 gioc->ioc_ctrl_opcap_mask = ioc->ioc_ctrl_opcap_mask; 2372 bcopy(ioc->ioc_id_string, gioc->ioc_id_string, 2373 IB_DM_IOC_ID_STRING_LEN); 2374 2375 ioc_info->ioc_iou_diagcode = gid_info->gl_iou->iou_diagcode; 2376 ioc_info->ioc_iou_dc_valid = gid_info->gl_iou->iou_dc_valid; 2377 ioc_info->ioc_diagdeviceid = (IB_DM_IOU_DEVICEID_MASK & 2378 gid_info->gl_iou->iou_info.iou_flag) ? B_TRUE : B_FALSE; 2379 2380 if (ioc_info->ioc_diagdeviceid == B_TRUE) 2381 gid_info->gl_pending_cmds++; 2382 } 2383 gioc->ioc_service_entries = ioc->ioc_service_entries; 2384 gid_info->gl_pending_cmds += (gioc->ioc_service_entries/4); 2385 if (gioc->ioc_service_entries % 4) 2386 gid_info->gl_pending_cmds++; 2387 2388 mutex_exit(&gid_info->gl_mutex); 2389 2390 ibdm_dump_ioc_profile(gioc); 2391 2392 if ((ioc_info->ioc_diagdeviceid == B_TRUE) && (reprobe == 0)) { 2393 if (ibdm_get_diagcode(gid_info, ioc_no) != IBDM_SUCCESS) { 2394 mutex_enter(&gid_info->gl_mutex); 2395 gid_info->gl_pending_cmds--; 2396 mutex_exit(&gid_info->gl_mutex); 2397 } 2398 } 2399 ioc_info->ioc_serv = (ibdm_srvents_info_t *)kmem_zalloc( 2400 (gioc->ioc_service_entries * sizeof (ibdm_srvents_info_t)), 2401 KM_SLEEP); 2402 2403 /* 2404 * In one single request, maximum number of requests that can be 2405 * obtained is 4. If number of service entries are more than four, 2406 * calculate number requests needed and send them parallelly. 2407 */ 2408 nserv_entries = ioc->ioc_service_entries; 2409 ii = 0; 2410 while (nserv_entries) { 2411 if (first != B_TRUE) { 2412 if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP, 2413 &msg) != IBMF_SUCCESS) { 2414 continue; 2415 } 2416 2417 } 2418 ibdm_alloc_send_buffers(msg); 2419 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2420 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2421 if (gid_info->gl_redirected == B_TRUE) { 2422 if (gid_info->gl_redirect_dlid != 0) { 2423 msg->im_local_addr.ia_remote_lid = 2424 gid_info->gl_redirect_dlid; 2425 } 2426 msg->im_local_addr.ia_remote_qno = 2427 gid_info->gl_redirect_QP; 2428 msg->im_local_addr.ia_p_key = 2429 gid_info->gl_redirect_pkey; 2430 msg->im_local_addr.ia_q_key = 2431 gid_info->gl_redirect_qkey; 2432 } else { 2433 msg->im_local_addr.ia_remote_qno = 1; 2434 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2435 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2436 } 2437 2438 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2439 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2440 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2441 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2442 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2443 hdr->Status = 0; 2444 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2445 hdr->AttributeID = h2b16(IB_DM_ATTR_SERVICE_ENTRIES); 2446 2447 srv_start = ii * 4; 2448 cb_args = &ioc_info->ioc_serv[srv_start].se_cb_args; 2449 cb_args->cb_gid_info = gid_info; 2450 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2451 cb_args->cb_req_type = IBDM_REQ_TYPE_SRVENTS; 2452 cb_args->cb_srvents_start = srv_start; 2453 cb_args->cb_ioc_num = ioc_no - 1; 2454 2455 if (nserv_entries >= IBDM_MAX_SERV_ENTRIES_PER_REQ) { 2456 hdr->AttributeModifier = h2b32(((ioc_no << 16) | 2457 (srv_start << 8) | (srv_start + 3))); 2458 nserv_entries -= IBDM_MAX_SERV_ENTRIES_PER_REQ; 2459 cb_args->cb_srvents_end = (cb_args->cb_srvents_start + 2460 IBDM_MAX_SERV_ENTRIES_PER_REQ - 1); 2461 } else { 2462 hdr->AttributeModifier = h2b32(((ioc_no << 16) | 2463 (srv_start << 8) | (srv_start + 2464 (nserv_entries -1)))); 2465 cb_args->cb_srvents_end = 2466 (cb_args->cb_srvents_start + nserv_entries - 1); 2467 nserv_entries = 0; 2468 } 2469 2470 ioc_info->ioc_serv[srv_start].se_timeout_id = timeout( 2471 ibdm_pkt_timeout_hdlr, cb_args, 2472 IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2473 2474 IBTF_DPRINTF_L5("ibdm", "\thandle_ioc_profile:" 2475 "timeout %x, ioc %d srv %d", 2476 ioc_info->ioc_serv[srv_start].se_timeout_id, 2477 ioc_no - 1, srv_start); 2478 2479 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, 2480 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2481 IBTF_DPRINTF_L2("ibdm", 2482 "\thandle_ioc_profile: msg send failed"); 2483 ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args); 2484 } 2485 (*flag) |= IBDM_IBMF_PKT_REUSED; 2486 first = B_FALSE; 2487 ii++; 2488 } 2489 } 2490 2491 2492 /* 2493 * ibdm_handle_srventry_mad() 2494 */ 2495 static void 2496 ibdm_handle_srventry_mad(ibmf_msg_t *msg, 2497 ibdm_dp_gidinfo_t *gid_info, int *flag) 2498 { 2499 uint_t ii, ioc_no, attrmod; 2500 uint_t nentries, start, end; 2501 timeout_id_t timeout_id; 2502 ib_dm_srv_t *srv_ents; 2503 ibdm_ioc_info_t *ioc_info; 2504 ibdm_srvents_info_t *gsrv_ents; 2505 2506 IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad:" 2507 " IBMF msg %p gid info %p", msg, gid_info); 2508 2509 srv_ents = IBDM_IN_IBMFMSG2SRVENT(msg); 2510 /* 2511 * Get the start and end index of the service entries 2512 * Upper 16 bits identify the IOC 2513 * Lower 16 bits specify the range of service entries 2514 * LSB specifies (Big endian) end of the range 2515 * MSB specifies (Big endian) start of the range 2516 */ 2517 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg); 2518 ioc_no = ((attrmod >> 16) & IBDM_16_BIT_MASK); 2519 start = ((attrmod >> 8) & IBDM_8_BIT_MASK); 2520 end = (attrmod & IBDM_8_BIT_MASK); 2521 2522 /* Make sure that IOC index is with the valid range */ 2523 if ((ioc_no < 1) | 2524 (ioc_no > gid_info->gl_iou->iou_info.iou_num_ctrl_slots)) { 2525 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 2526 "IOC index Out of range, index %d", ioc_no); 2527 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2528 return; 2529 } 2530 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1)); 2531 2532 /* 2533 * Make sure that the "start" and "end" service indexes are 2534 * with in the valid range 2535 */ 2536 nentries = ioc_info->ioc_profile.ioc_service_entries; 2537 if ((start > end) | (start >= nentries) | (end >= nentries)) { 2538 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 2539 "Attr modifier 0x%x, #Serv entries %d", attrmod, nentries); 2540 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 2541 return; 2542 } 2543 gsrv_ents = &ioc_info->ioc_serv[start]; 2544 mutex_enter(&gid_info->gl_mutex); 2545 if (gsrv_ents->se_state != IBDM_SE_INVALID) { 2546 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: " 2547 "already known, ioc %d, srv %d, se_state %x", 2548 ioc_no - 1, start, gsrv_ents->se_state); 2549 mutex_exit(&gid_info->gl_mutex); 2550 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 2551 return; 2552 } 2553 ioc_info->ioc_serv[start].se_cb_args.cb_req_type = 0; 2554 if (ioc_info->ioc_serv[start].se_timeout_id) { 2555 IBTF_DPRINTF_L2("ibdm", 2556 "\thandle_srventry_mad: ioc %d start %d", ioc_no, start); 2557 timeout_id = ioc_info->ioc_serv[start].se_timeout_id; 2558 mutex_exit(&gid_info->gl_mutex); 2559 IBTF_DPRINTF_L5("ibdm", "handle_srverntry_mad: " 2560 "se_timeout_id = 0x%x", timeout_id); 2561 if (untimeout(timeout_id) == -1) { 2562 IBTF_DPRINTF_L2("ibdm", "handle_srventry_mad: " 2563 "untimeout se_timeout_id failed"); 2564 } 2565 mutex_enter(&gid_info->gl_mutex); 2566 ioc_info->ioc_serv[start].se_timeout_id = 0; 2567 } 2568 2569 gsrv_ents->se_state = IBDM_SE_VALID; 2570 mutex_exit(&gid_info->gl_mutex); 2571 for (ii = start; ii <= end; ii++, srv_ents++, gsrv_ents++) { 2572 gsrv_ents->se_attr.srv_id = srv_ents->srv_id; 2573 bcopy(srv_ents->srv_name, 2574 gsrv_ents->se_attr.srv_name, IB_DM_MAX_SVC_NAME_LEN); 2575 ibdm_dump_service_entries(&gsrv_ents->se_attr); 2576 } 2577 } 2578 2579 2580 /* 2581 * ibdm_get_diagcode: 2582 * Send request to get IOU/IOC diag code 2583 * Returns IBDM_SUCCESS/IBDM_FAILURE 2584 */ 2585 static int 2586 ibdm_get_diagcode(ibdm_dp_gidinfo_t *gid_info, int attr) 2587 { 2588 ibmf_msg_t *msg; 2589 ib_mad_hdr_t *hdr; 2590 ibdm_ioc_info_t *ioc; 2591 ibdm_timeout_cb_args_t *cb_args; 2592 timeout_id_t *timeout_id; 2593 2594 IBTF_DPRINTF_L4("ibdm", "\tget_diagcode: gid info %p, attr = %d", 2595 gid_info, attr); 2596 2597 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 2598 &msg) != IBMF_SUCCESS) { 2599 IBTF_DPRINTF_L4("ibdm", "\tget_diagcode: pkt alloc fail"); 2600 return (IBDM_FAILURE); 2601 } 2602 2603 ibdm_alloc_send_buffers(msg); 2604 2605 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 2606 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 2607 if (gid_info->gl_redirected == B_TRUE) { 2608 if (gid_info->gl_redirect_dlid != 0) { 2609 msg->im_local_addr.ia_remote_lid = 2610 gid_info->gl_redirect_dlid; 2611 } 2612 2613 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 2614 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 2615 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 2616 } else { 2617 msg->im_local_addr.ia_remote_qno = 1; 2618 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 2619 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 2620 } 2621 2622 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 2623 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 2624 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 2625 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 2626 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 2627 hdr->Status = 0; 2628 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 2629 2630 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 2631 hdr->AttributeModifier = h2b32(attr); 2632 2633 if (attr == 0) { 2634 cb_args = &gid_info->gl_iou_cb_args; 2635 gid_info->gl_iou->iou_dc_valid = B_FALSE; 2636 cb_args->cb_ioc_num = 0; 2637 cb_args->cb_req_type = IBDM_REQ_TYPE_IOU_DIAGCODE; 2638 timeout_id = &gid_info->gl_timeout_id; 2639 } else { 2640 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attr - 1)); 2641 ioc->ioc_dc_valid = B_FALSE; 2642 cb_args = &ioc->ioc_dc_cb_args; 2643 cb_args->cb_ioc_num = attr - 1; 2644 cb_args->cb_req_type = IBDM_REQ_TYPE_IOC_DIAGCODE; 2645 timeout_id = &ioc->ioc_dc_timeout_id; 2646 } 2647 cb_args->cb_gid_info = gid_info; 2648 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 2649 cb_args->cb_srvents_start = 0; 2650 2651 2652 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 2653 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 2654 2655 IBTF_DPRINTF_L5("ibdm", "\tget_diagcode:" 2656 "timeout %x, ioc %d", *timeout_id, cb_args->cb_ioc_num); 2657 2658 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 2659 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 2660 IBTF_DPRINTF_L2("ibdm", "\tget_diagcode: ibmf send failed"); 2661 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 2662 } 2663 return (IBDM_SUCCESS); 2664 } 2665 2666 /* 2667 * ibdm_handle_diagcode: 2668 * Process the DiagCode MAD response and update local DM 2669 * data structure. 2670 */ 2671 static void 2672 ibdm_handle_diagcode(ibmf_msg_t *ibmf_msg, 2673 ibdm_dp_gidinfo_t *gid_info, int *flag) 2674 { 2675 uint16_t attrmod, *diagcode; 2676 ibdm_iou_info_t *iou; 2677 ibdm_ioc_info_t *ioc; 2678 timeout_id_t timeout_id; 2679 ibdm_timeout_cb_args_t *cb_args; 2680 2681 diagcode = (uint16_t *)ibmf_msg->im_msgbufs_recv.im_bufs_cl_data; 2682 2683 mutex_enter(&gid_info->gl_mutex); 2684 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(ibmf_msg); 2685 iou = gid_info->gl_iou; 2686 if (attrmod == 0) { 2687 if (iou->iou_dc_valid != B_FALSE) { 2688 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 2689 IBTF_DPRINTF_L4("ibdm", 2690 "\thandle_diagcode: Duplicate IOU DiagCode"); 2691 mutex_exit(&gid_info->gl_mutex); 2692 return; 2693 } 2694 cb_args = &gid_info->gl_iou_cb_args; 2695 cb_args->cb_req_type = 0; 2696 iou->iou_diagcode = b2h16(*diagcode); 2697 iou->iou_dc_valid = B_TRUE; 2698 if (gid_info->gl_timeout_id) { 2699 timeout_id = gid_info->gl_timeout_id; 2700 mutex_exit(&gid_info->gl_mutex); 2701 IBTF_DPRINTF_L5("ibdm", "\thandle_diagcode: " 2702 "gl_timeout_id = 0x%x", timeout_id); 2703 if (untimeout(timeout_id) == -1) { 2704 IBTF_DPRINTF_L2("ibdm", "handle_diagcode: " 2705 "untimeout gl_timeout_id failed"); 2706 } 2707 mutex_enter(&gid_info->gl_mutex); 2708 gid_info->gl_timeout_id = 0; 2709 } 2710 } else { 2711 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod - 1)); 2712 if (ioc->ioc_dc_valid != B_FALSE) { 2713 (*flag) |= IBDM_IBMF_PKT_DUP_RESP; 2714 IBTF_DPRINTF_L4("ibdm", 2715 "\thandle_diagcode: Duplicate IOC DiagCode"); 2716 mutex_exit(&gid_info->gl_mutex); 2717 return; 2718 } 2719 cb_args = &ioc->ioc_dc_cb_args; 2720 cb_args->cb_req_type = 0; 2721 ioc->ioc_diagcode = b2h16(*diagcode); 2722 ioc->ioc_dc_valid = B_TRUE; 2723 timeout_id = iou->iou_ioc_info[attrmod - 1].ioc_dc_timeout_id; 2724 if (timeout_id) { 2725 mutex_exit(&gid_info->gl_mutex); 2726 IBTF_DPRINTF_L5("ibdm", "handle_diagcode: " 2727 "timeout_id = 0x%x", timeout_id); 2728 if (untimeout(timeout_id) == -1) { 2729 IBTF_DPRINTF_L2("ibdm", "\thandle_diagcode: " 2730 "untimeout ioc_dc_timeout_id failed"); 2731 } 2732 mutex_enter(&gid_info->gl_mutex); 2733 iou->iou_ioc_info[attrmod - 1].ioc_dc_timeout_id = 0; 2734 } 2735 } 2736 mutex_exit(&gid_info->gl_mutex); 2737 2738 IBTF_DPRINTF_L4("ibdm", "\thandle_diagcode: DiagCode : 0x%x" 2739 "attrmod : 0x%x", b2h16(*diagcode), attrmod); 2740 } 2741 2742 2743 /* 2744 * ibdm_is_ioc_present() 2745 * Return ibdm_ioc_info_t if IOC guid is found in the global gid list 2746 */ 2747 static ibdm_ioc_info_t * 2748 ibdm_is_ioc_present(ib_guid_t ioc_guid, 2749 ibdm_dp_gidinfo_t *gid_info, int *flag) 2750 { 2751 int ii; 2752 ibdm_ioc_info_t *ioc; 2753 ibdm_dp_gidinfo_t *head; 2754 ib_dm_io_unitinfo_t *iou; 2755 2756 mutex_enter(&ibdm.ibdm_mutex); 2757 head = ibdm.ibdm_dp_gidlist_head; 2758 while (head) { 2759 mutex_enter(&head->gl_mutex); 2760 if (head->gl_iou == NULL) { 2761 mutex_exit(&head->gl_mutex); 2762 head = head->gl_next; 2763 continue; 2764 } 2765 iou = &head->gl_iou->iou_info; 2766 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 2767 ioc = IBDM_GIDINFO2IOCINFO(head, ii); 2768 if ((ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) && 2769 (ioc->ioc_profile.ioc_guid == ioc_guid)) { 2770 if (gid_info == head) { 2771 *flag |= IBDM_IBMF_PKT_DUP_RESP; 2772 } else if (ibdm_check_dgid(head->gl_dgid_lo, 2773 head->gl_dgid_hi) != NULL) { 2774 IBTF_DPRINTF_L4("ibdm", "\tis_ioc_" 2775 "present: gid not present"); 2776 ibdm_add_to_gl_gid(gid_info, head); 2777 } 2778 mutex_exit(&head->gl_mutex); 2779 mutex_exit(&ibdm.ibdm_mutex); 2780 return (ioc); 2781 } 2782 } 2783 mutex_exit(&head->gl_mutex); 2784 head = head->gl_next; 2785 } 2786 mutex_exit(&ibdm.ibdm_mutex); 2787 return (NULL); 2788 } 2789 2790 2791 /* 2792 * ibdm_ibmf_send_cb() 2793 * IBMF invokes this callback routine after posting the DM MAD to 2794 * the HCA. 2795 */ 2796 /*ARGSUSED*/ 2797 static void 2798 ibdm_ibmf_send_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *ibmf_msg, void *arg) 2799 { 2800 ibdm_dump_ibmf_msg(ibmf_msg, 1); 2801 ibdm_free_send_buffers(ibmf_msg); 2802 if (ibmf_free_msg(ibmf_hdl, &ibmf_msg) != IBMF_SUCCESS) { 2803 IBTF_DPRINTF_L4("ibdm", 2804 "\tibmf_send_cb: IBMF free msg failed"); 2805 } 2806 } 2807 2808 2809 /* 2810 * ibdm_ibmf_recv_cb() 2811 * Invoked by the IBMF when a response to the one of the DM requests 2812 * is received. 2813 */ 2814 /*ARGSUSED*/ 2815 static void 2816 ibdm_ibmf_recv_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msg, void *arg) 2817 { 2818 ibdm_taskq_args_t *taskq_args; 2819 2820 /* 2821 * If the taskq enable is set then dispatch a taskq to process 2822 * the MAD, otherwise just process it on this thread 2823 */ 2824 if (ibdm_taskq_enable != IBDM_ENABLE_TASKQ_HANDLING) { 2825 ibdm_process_incoming_mad(ibmf_hdl, msg, arg); 2826 return; 2827 } 2828 2829 /* 2830 * create a taskq and dispatch it to process the incoming MAD 2831 */ 2832 taskq_args = kmem_alloc(sizeof (ibdm_taskq_args_t), KM_NOSLEEP); 2833 if (taskq_args == NULL) { 2834 IBTF_DPRINTF_L2("ibdm", "ibmf_recv_cb: kmem_alloc failed for" 2835 "taskq_args"); 2836 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 2837 IBTF_DPRINTF_L4("ibmf_recv_cb", 2838 "\tibmf_recv_cb: IBMF free msg failed"); 2839 } 2840 return; 2841 } 2842 taskq_args->tq_ibmf_handle = ibmf_hdl; 2843 taskq_args->tq_ibmf_msg = msg; 2844 taskq_args->tq_args = arg; 2845 2846 if (taskq_dispatch(system_taskq, ibdm_recv_incoming_mad, taskq_args, 2847 TQ_NOSLEEP) == 0) { 2848 IBTF_DPRINTF_L2("ibdm", "ibmf_recv_cb: taskq_dispatch failed"); 2849 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 2850 IBTF_DPRINTF_L4("ibmf_recv_cb", 2851 "\tibmf_recv_cb: IBMF free msg failed"); 2852 } 2853 kmem_free(taskq_args, sizeof (ibdm_taskq_args_t)); 2854 return; 2855 } 2856 2857 /* taskq_args are deleted in ibdm_recv_incoming_mad() */ 2858 } 2859 2860 2861 void 2862 ibdm_recv_incoming_mad(void *args) 2863 { 2864 ibdm_taskq_args_t *taskq_args; 2865 2866 taskq_args = (ibdm_taskq_args_t *)args; 2867 2868 IBTF_DPRINTF_L4("ibdm", "\tibdm_recv_incoming_mad: " 2869 "Processing incoming MAD via taskq"); 2870 2871 ibdm_process_incoming_mad(taskq_args->tq_ibmf_handle, 2872 taskq_args->tq_ibmf_msg, taskq_args->tq_args); 2873 2874 kmem_free(taskq_args, sizeof (ibdm_taskq_args_t)); 2875 } 2876 2877 2878 /* 2879 * Calls ibdm_process_incoming_mad with all function arguments extracted 2880 * from args 2881 */ 2882 /*ARGSUSED*/ 2883 static void 2884 ibdm_process_incoming_mad(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msg, void *arg) 2885 { 2886 int flag = 0; 2887 int ret; 2888 uint64_t transaction_id; 2889 ib_mad_hdr_t *hdr; 2890 ibdm_dp_gidinfo_t *gid_info = NULL; 2891 2892 IBTF_DPRINTF_L4("ibdm", 2893 "\tprocess_incoming_mad: ibmf hdl %p pkt %p", ibmf_hdl, msg); 2894 ibdm_dump_ibmf_msg(msg, 0); 2895 2896 /* 2897 * IBMF calls this routine for every DM MAD that arrives at this port. 2898 * But we handle only the responses for requests we sent. We drop all 2899 * the DM packets that does not have response bit set in the MAD 2900 * header(this eliminates all the requests sent to this port). 2901 * We handle only DM class version 1 MAD's 2902 */ 2903 hdr = IBDM_IN_IBMFMSG_MADHDR(msg); 2904 if (ibdm_verify_mad_status(hdr) != IBDM_SUCCESS) { 2905 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 2906 IBTF_DPRINTF_L2("ibdm", "\tprocess_incoming_mad: " 2907 "IBMF free msg failed DM request drop it"); 2908 } 2909 return; 2910 } 2911 2912 transaction_id = b2h64(hdr->TransactionID); 2913 2914 mutex_enter(&ibdm.ibdm_mutex); 2915 gid_info = ibdm.ibdm_dp_gidlist_head; 2916 while (gid_info) { 2917 if (gid_info->gl_transactionID == transaction_id) 2918 break; 2919 gid_info = gid_info->gl_next; 2920 } 2921 mutex_exit(&ibdm.ibdm_mutex); 2922 2923 if (gid_info == NULL) { 2924 /* Drop the packet */ 2925 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: transaction ID" 2926 " does not match: 0x%llx", transaction_id); 2927 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 2928 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 2929 "IBMF free msg failed DM request drop it"); 2930 } 2931 return; 2932 } 2933 2934 /* Handle redirection for all the MAD's, except ClassPortInfo */ 2935 if (((IBDM_IN_IBMFMSG_STATUS(msg) & MAD_STATUS_REDIRECT_REQUIRED)) && 2936 (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO)) { 2937 ret = ibdm_handle_redirection(msg, gid_info, &flag); 2938 if (ret == IBDM_SUCCESS) { 2939 return; 2940 } 2941 } else { 2942 uint_t gl_state; 2943 2944 mutex_enter(&gid_info->gl_mutex); 2945 gl_state = gid_info->gl_state; 2946 mutex_exit(&gid_info->gl_mutex); 2947 2948 switch (gl_state) { 2949 case IBDM_GET_CLASSPORTINFO: 2950 ibdm_handle_classportinfo( 2951 ibmf_hdl, msg, gid_info, &flag); 2952 break; 2953 2954 case IBDM_GET_IOUNITINFO: 2955 ibdm_handle_iounitinfo(ibmf_hdl, msg, gid_info, &flag); 2956 break; 2957 2958 case IBDM_GET_IOC_DETAILS: 2959 switch (IBDM_IN_IBMFMSG_ATTR(msg)) { 2960 2961 case IB_DM_ATTR_SERVICE_ENTRIES: 2962 ibdm_handle_srventry_mad(msg, gid_info, &flag); 2963 break; 2964 2965 case IB_DM_ATTR_IOC_CTRL_PROFILE: 2966 ibdm_handle_ioc_profile( 2967 ibmf_hdl, msg, gid_info, &flag); 2968 break; 2969 2970 case IB_DM_ATTR_DIAG_CODE: 2971 ibdm_handle_diagcode(msg, gid_info, &flag); 2972 break; 2973 2974 default: 2975 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 2976 "Error state, wrong attribute :-("); 2977 (void) ibmf_free_msg(ibmf_hdl, &msg); 2978 return; 2979 } 2980 break; 2981 default: 2982 IBTF_DPRINTF_L2("ibdm", 2983 "process_incoming_mad: Dropping the packet" 2984 " gl_state %x", gl_state); 2985 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 2986 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 2987 "IBMF free msg failed DM request drop it"); 2988 } 2989 return; 2990 } 2991 } 2992 2993 if ((flag & IBDM_IBMF_PKT_DUP_RESP) || 2994 (flag & IBDM_IBMF_PKT_UNEXP_RESP)) { 2995 IBTF_DPRINTF_L2("ibdm", 2996 "\tprocess_incoming_mad:Dup/unexp resp : 0x%x", flag); 2997 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 2998 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: " 2999 "IBMF free msg failed DM request drop it"); 3000 } 3001 return; 3002 } 3003 3004 mutex_enter(&gid_info->gl_mutex); 3005 if (gid_info->gl_pending_cmds < 1) { 3006 IBTF_DPRINTF_L2("ibdm", 3007 "\tprocess_incoming_mad: pending commands negative"); 3008 } 3009 if (--gid_info->gl_pending_cmds) { 3010 IBTF_DPRINTF_L4("ibdm", "\tprocess_incoming_mad: " 3011 "gid_info %p pending cmds %d", 3012 gid_info, gid_info->gl_pending_cmds); 3013 mutex_exit(&gid_info->gl_mutex); 3014 } else { 3015 IBTF_DPRINTF_L4("ibdm", "\tprocess_incoming_mad: Probing DONE"); 3016 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE; 3017 mutex_exit(&gid_info->gl_mutex); 3018 ibdm_notify_newgid_iocs(gid_info); 3019 mutex_enter(&ibdm.ibdm_mutex); 3020 if (--ibdm.ibdm_ngid_probes_in_progress == 0) { 3021 IBTF_DPRINTF_L4("ibdm", 3022 "\tprocess_incoming_mad: Wakeup"); 3023 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 3024 cv_broadcast(&ibdm.ibdm_probe_cv); 3025 } 3026 mutex_exit(&ibdm.ibdm_mutex); 3027 } 3028 3029 /* 3030 * Do not deallocate the IBMF packet if atleast one request 3031 * is posted. IBMF packet is reused. 3032 */ 3033 if (!(flag & IBDM_IBMF_PKT_REUSED)) { 3034 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) { 3035 IBTF_DPRINTF_L2("ibdm", "\tprocess_incoming_mad: " 3036 "IBMF free msg failed DM request drop it"); 3037 } 3038 } 3039 } 3040 3041 3042 /* 3043 * ibdm_verify_mad_status() 3044 * Verifies the MAD status 3045 * Returns IBDM_SUCCESS if status is correct 3046 * Returns IBDM_FAILURE for bogus MAD status 3047 */ 3048 static int 3049 ibdm_verify_mad_status(ib_mad_hdr_t *hdr) 3050 { 3051 int ret = 0; 3052 3053 if ((hdr->R_Method != IB_DM_DEVMGT_METHOD_GET_RESP) || 3054 (hdr->ClassVersion != IB_DM_CLASS_VERSION_1)) { 3055 return (IBDM_FAILURE); 3056 } 3057 3058 if (b2h16(hdr->Status) == 0) 3059 ret = IBDM_SUCCESS; 3060 else if ((b2h16(hdr->Status) & 0x1f) == MAD_STATUS_REDIRECT_REQUIRED) 3061 ret = IBDM_SUCCESS; 3062 else { 3063 IBTF_DPRINTF_L4("ibdm", 3064 "\tverify_mad_status: Stauts : 0x%x", b2h16(hdr->Status)); 3065 ret = IBDM_FAILURE; 3066 } 3067 return (ret); 3068 } 3069 3070 3071 3072 /* 3073 * ibdm_handle_redirection() 3074 * Returns IBDM_SUCCESS/IBDM_FAILURE 3075 */ 3076 static int 3077 ibdm_handle_redirection(ibmf_msg_t *msg, 3078 ibdm_dp_gidinfo_t *gid_info, int *flag) 3079 { 3080 int attrmod, ioc_no, start; 3081 void *data; 3082 timeout_id_t *timeout_id; 3083 ib_mad_hdr_t *hdr; 3084 ibdm_ioc_info_t *ioc = NULL; 3085 ibdm_timeout_cb_args_t *cb_args; 3086 ibdm_mad_classportinfo_t *cpi; 3087 3088 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection: Enter"); 3089 mutex_enter(&gid_info->gl_mutex); 3090 switch (gid_info->gl_state) { 3091 case IBDM_GET_IOUNITINFO: 3092 cb_args = &gid_info->gl_iou_cb_args; 3093 timeout_id = &gid_info->gl_timeout_id; 3094 break; 3095 3096 case IBDM_GET_IOC_DETAILS: 3097 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg); 3098 switch (IBDM_IN_IBMFMSG_ATTR(msg)) { 3099 3100 case IB_DM_ATTR_DIAG_CODE: 3101 if (attrmod == 0) { 3102 cb_args = &gid_info->gl_iou_cb_args; 3103 timeout_id = &gid_info->gl_timeout_id; 3104 break; 3105 } 3106 if (IBDM_IS_IOC_NUM_INVALID(attrmod, gid_info)) { 3107 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3108 "IOC# Out of range %d", attrmod); 3109 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3110 mutex_exit(&gid_info->gl_mutex); 3111 return (IBDM_FAILURE); 3112 } 3113 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod -1)); 3114 cb_args = &ioc->ioc_dc_cb_args; 3115 timeout_id = &ioc->ioc_dc_timeout_id; 3116 break; 3117 3118 case IB_DM_ATTR_IOC_CTRL_PROFILE: 3119 if (IBDM_IS_IOC_NUM_INVALID(attrmod, gid_info)) { 3120 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3121 "IOC# Out of range %d", attrmod); 3122 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3123 mutex_exit(&gid_info->gl_mutex); 3124 return (IBDM_FAILURE); 3125 } 3126 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod -1)); 3127 cb_args = &ioc->ioc_cb_args; 3128 timeout_id = &ioc->ioc_timeout_id; 3129 break; 3130 3131 case IB_DM_ATTR_SERVICE_ENTRIES: 3132 ioc_no = ((attrmod >> 16) & IBDM_16_BIT_MASK); 3133 if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) { 3134 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3135 "IOC# Out of range %d", ioc_no); 3136 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3137 mutex_exit(&gid_info->gl_mutex); 3138 return (IBDM_FAILURE); 3139 } 3140 start = ((attrmod >> 8) & IBDM_8_BIT_MASK); 3141 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1)); 3142 if (start > ioc->ioc_profile.ioc_service_entries) { 3143 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:" 3144 " SE index Out of range %d", start); 3145 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3146 mutex_exit(&gid_info->gl_mutex); 3147 return (IBDM_FAILURE); 3148 } 3149 cb_args = &ioc->ioc_serv[start].se_cb_args; 3150 timeout_id = &ioc->ioc_serv[start].se_timeout_id; 3151 break; 3152 3153 default: 3154 /* ERROR State */ 3155 IBTF_DPRINTF_L2("ibdm", 3156 "\thandle_redirection: wrong attribute :-("); 3157 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3158 mutex_exit(&gid_info->gl_mutex); 3159 return (IBDM_FAILURE); 3160 } 3161 break; 3162 default: 3163 /* ERROR State */ 3164 IBTF_DPRINTF_L2("ibdm", 3165 "\thandle_redirection: Error state :-("); 3166 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP; 3167 mutex_exit(&gid_info->gl_mutex); 3168 return (IBDM_FAILURE); 3169 } 3170 if ((*timeout_id) != 0) { 3171 mutex_exit(&gid_info->gl_mutex); 3172 if (untimeout(*timeout_id) == -1) { 3173 IBTF_DPRINTF_L2("ibdm", "\thandle_redirection: " 3174 "untimeout failed %x", *timeout_id); 3175 } else { 3176 IBTF_DPRINTF_L5("ibdm", 3177 "\thandle_redirection: timeout %x", *timeout_id); 3178 } 3179 mutex_enter(&gid_info->gl_mutex); 3180 *timeout_id = 0; 3181 } 3182 3183 data = msg->im_msgbufs_recv.im_bufs_cl_data; 3184 cpi = (ibdm_mad_classportinfo_t *)data; 3185 3186 gid_info->gl_resp_timeout = 3187 (b2h32(cpi->RespTimeValue) & 0x1F); 3188 3189 gid_info->gl_redirected = B_TRUE; 3190 gid_info->gl_redirect_dlid = b2h16(cpi->RedirectLID); 3191 gid_info->gl_redirect_QP = (b2h32(cpi->RedirectQP) & 0xffffff); 3192 gid_info->gl_redirect_pkey = b2h16(cpi->RedirectP_Key); 3193 gid_info->gl_redirect_qkey = b2h32(cpi->RedirectQ_Key); 3194 gid_info->gl_redirectGID_hi = b2h64(cpi->RedirectGID_hi); 3195 gid_info->gl_redirectGID_lo = b2h64(cpi->RedirectGID_lo); 3196 3197 if (gid_info->gl_redirect_dlid != 0) { 3198 msg->im_local_addr.ia_remote_lid = 3199 gid_info->gl_redirect_dlid; 3200 } 3201 mutex_exit(&gid_info->gl_mutex); 3202 3203 ibdm_alloc_send_buffers(msg); 3204 3205 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 3206 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 3207 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 3208 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 3209 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 3210 hdr->Status = 0; 3211 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 3212 hdr->AttributeID = 3213 msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeID; 3214 hdr->AttributeModifier = 3215 msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeModifier; 3216 3217 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 3218 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 3219 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 3220 3221 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 3222 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3223 3224 IBTF_DPRINTF_L5("ibdm", "\thandle_redirect:" 3225 "timeout %x", *timeout_id); 3226 3227 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, 3228 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 3229 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection:" 3230 "message transport failed"); 3231 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 3232 } 3233 (*flag) |= IBDM_IBMF_PKT_REUSED; 3234 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection: Exit"); 3235 return (IBDM_SUCCESS); 3236 } 3237 3238 3239 /* 3240 * ibdm_pkt_timeout_hdlr 3241 * This timeout handler is registed for every IBMF packet that is 3242 * sent through the IBMF. It gets called when no response is received 3243 * within the specified time for the packet. No retries for the failed 3244 * commands currently. Drops the failed IBMF packet and update the 3245 * pending list commands. 3246 */ 3247 static void 3248 ibdm_pkt_timeout_hdlr(void *arg) 3249 { 3250 int probe_done = B_FALSE; 3251 ibdm_iou_info_t *iou; 3252 ibdm_ioc_info_t *ioc; 3253 ibdm_timeout_cb_args_t *cb_args = arg; 3254 ibdm_dp_gidinfo_t *gid_info; 3255 int srv_ent; 3256 3257 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: gid_info: %p " 3258 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3259 cb_args->cb_req_type, cb_args->cb_ioc_num, 3260 cb_args->cb_srvents_start); 3261 3262 gid_info = cb_args->cb_gid_info; 3263 mutex_enter(&gid_info->gl_mutex); 3264 3265 if ((gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) || 3266 (cb_args->cb_req_type == 0)) { 3267 3268 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: req completed" 3269 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_req_type, 3270 cb_args->cb_ioc_num, cb_args->cb_srvents_start); 3271 3272 if (gid_info->gl_timeout_id) 3273 gid_info->gl_timeout_id = 0; 3274 mutex_exit(&gid_info->gl_mutex); 3275 return; 3276 } 3277 if (cb_args->cb_retry_count) { 3278 cb_args->cb_retry_count--; 3279 if (ibdm_retry_command(cb_args) == IBDM_SUCCESS) { 3280 if (gid_info->gl_timeout_id) 3281 gid_info->gl_timeout_id = 0; 3282 mutex_exit(&gid_info->gl_mutex); 3283 return; 3284 } 3285 cb_args->cb_retry_count = 0; 3286 } 3287 3288 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: command failed: gid %p" 3289 " rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3290 cb_args->cb_req_type, cb_args->cb_ioc_num, 3291 cb_args->cb_srvents_start); 3292 3293 switch (cb_args->cb_req_type) { 3294 3295 case IBDM_REQ_TYPE_CLASSPORTINFO: 3296 case IBDM_REQ_TYPE_IOUINFO: 3297 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 3298 if (--gid_info->gl_pending_cmds == 0) 3299 probe_done = B_TRUE; 3300 if (gid_info->gl_timeout_id) 3301 gid_info->gl_timeout_id = 0; 3302 mutex_exit(&gid_info->gl_mutex); 3303 ibdm_delete_glhca_list(gid_info); 3304 mutex_enter(&gid_info->gl_mutex); 3305 break; 3306 case IBDM_REQ_TYPE_IOCINFO: 3307 iou = gid_info->gl_iou; 3308 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3309 ioc->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 3310 if (--gid_info->gl_pending_cmds == 0) 3311 probe_done = B_TRUE; 3312 #ifndef __lock_lint 3313 if (ioc->ioc_timeout_id) 3314 ioc->ioc_timeout_id = 0; 3315 #endif 3316 break; 3317 case IBDM_REQ_TYPE_SRVENTS: 3318 iou = gid_info->gl_iou; 3319 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3320 ioc->ioc_state = IBDM_IOC_STATE_PROBE_FAILED; 3321 if (--gid_info->gl_pending_cmds == 0) 3322 probe_done = B_TRUE; 3323 srv_ent = cb_args->cb_srvents_start; 3324 #ifndef __lock_lint 3325 if (ioc->ioc_serv[srv_ent].se_timeout_id) 3326 ioc->ioc_serv[srv_ent].se_timeout_id = 0; 3327 #endif 3328 break; 3329 case IBDM_REQ_TYPE_IOU_DIAGCODE: 3330 iou = gid_info->gl_iou; 3331 iou->iou_dc_valid = B_FALSE; 3332 if (--gid_info->gl_pending_cmds == 0) 3333 probe_done = B_TRUE; 3334 if (gid_info->gl_timeout_id) 3335 gid_info->gl_timeout_id = 0; 3336 break; 3337 case IBDM_REQ_TYPE_IOC_DIAGCODE: 3338 iou = gid_info->gl_iou; 3339 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num]; 3340 ioc->ioc_dc_valid = B_FALSE; 3341 if (--gid_info->gl_pending_cmds == 0) 3342 probe_done = B_TRUE; 3343 #ifndef __lock_lint 3344 if (ioc->ioc_dc_timeout_id) 3345 ioc->ioc_dc_timeout_id = 0; 3346 #endif 3347 break; 3348 } 3349 if (probe_done == B_TRUE) { 3350 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE; 3351 mutex_exit(&gid_info->gl_mutex); 3352 ibdm_notify_newgid_iocs(gid_info); 3353 mutex_enter(&ibdm.ibdm_mutex); 3354 if (--ibdm.ibdm_ngid_probes_in_progress == 0) { 3355 IBTF_DPRINTF_L4("ibdm", "\tpkt_timeout_hdlr: Wakeup"); 3356 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS; 3357 cv_broadcast(&ibdm.ibdm_probe_cv); 3358 } 3359 mutex_exit(&ibdm.ibdm_mutex); 3360 } else 3361 mutex_exit(&gid_info->gl_mutex); 3362 } 3363 3364 3365 /* 3366 * ibdm_retry_command() 3367 * Retries the failed command. 3368 * Returns IBDM_FAILURE/IBDM_SUCCESS 3369 */ 3370 static int 3371 ibdm_retry_command(ibdm_timeout_cb_args_t *cb_args) 3372 { 3373 int ret, rval = IBDM_SUCCESS; 3374 ibmf_msg_t *msg; 3375 ib_mad_hdr_t *hdr; 3376 ibdm_dp_gidinfo_t *gid_info = cb_args->cb_gid_info; 3377 timeout_id_t *timeout_id; 3378 ibdm_ioc_info_t *ioc; 3379 int ioc_no; 3380 3381 IBTF_DPRINTF_L2("ibdm", "\tretry_command: gid_info: %p " 3382 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3383 cb_args->cb_req_type, cb_args->cb_ioc_num, 3384 cb_args->cb_srvents_start); 3385 3386 ret = ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_NOSLEEP, &msg); 3387 3388 3389 /* 3390 * Reset the gid if alloc_msg failed with BAD_HANDLE 3391 * ibdm_reset_gidinfo reinits the gid_info 3392 */ 3393 if (ret == IBMF_BAD_HANDLE) { 3394 IBTF_DPRINTF_L3(ibdm_string, "\tretry_command: gid %p hdl bad", 3395 gid_info); 3396 3397 mutex_exit(&gid_info->gl_mutex); 3398 ibdm_reset_gidinfo(gid_info); 3399 mutex_enter(&gid_info->gl_mutex); 3400 3401 /* Retry alloc */ 3402 ret = ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_NOSLEEP, 3403 &msg); 3404 } 3405 3406 if (ret != IBDM_SUCCESS) { 3407 IBTF_DPRINTF_L2("ibdm", "\tretry_command: alloc failed: %p " 3408 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3409 cb_args->cb_req_type, cb_args->cb_ioc_num, 3410 cb_args->cb_srvents_start); 3411 return (IBDM_FAILURE); 3412 } 3413 3414 ibdm_alloc_send_buffers(msg); 3415 3416 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 3417 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 3418 if (gid_info->gl_redirected == B_TRUE) { 3419 if (gid_info->gl_redirect_dlid != 0) { 3420 msg->im_local_addr.ia_remote_lid = 3421 gid_info->gl_redirect_dlid; 3422 } 3423 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 3424 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 3425 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 3426 } else { 3427 msg->im_local_addr.ia_remote_qno = 1; 3428 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 3429 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 3430 } 3431 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 3432 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 3433 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 3434 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 3435 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 3436 hdr->Status = 0; 3437 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 3438 3439 switch (cb_args->cb_req_type) { 3440 case IBDM_REQ_TYPE_CLASSPORTINFO: 3441 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO); 3442 hdr->AttributeModifier = 0; 3443 timeout_id = &gid_info->gl_timeout_id; 3444 break; 3445 case IBDM_REQ_TYPE_IOUINFO: 3446 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO); 3447 hdr->AttributeModifier = 0; 3448 timeout_id = &gid_info->gl_timeout_id; 3449 break; 3450 case IBDM_REQ_TYPE_IOCINFO: 3451 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 3452 hdr->AttributeModifier = h2b32(cb_args->cb_ioc_num + 1); 3453 ioc = IBDM_GIDINFO2IOCINFO(gid_info, cb_args->cb_ioc_num); 3454 timeout_id = &ioc->ioc_timeout_id; 3455 break; 3456 case IBDM_REQ_TYPE_SRVENTS: 3457 hdr->AttributeID = h2b16(IB_DM_ATTR_SERVICE_ENTRIES); 3458 hdr->AttributeModifier = 3459 h2b32((((cb_args->cb_ioc_num+1) << 16) | 3460 ((cb_args->cb_srvents_start << 8) | 3461 (cb_args->cb_srvents_end)))); 3462 ioc = IBDM_GIDINFO2IOCINFO(gid_info, cb_args->cb_ioc_num); 3463 timeout_id = 3464 &ioc->ioc_serv[cb_args->cb_srvents_start].se_timeout_id; 3465 break; 3466 case IBDM_REQ_TYPE_IOU_DIAGCODE: 3467 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 3468 hdr->AttributeModifier = 0; 3469 timeout_id = &gid_info->gl_timeout_id; 3470 break; 3471 case IBDM_REQ_TYPE_IOC_DIAGCODE: 3472 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE); 3473 hdr->AttributeModifier = h2b32(cb_args->cb_ioc_num + 1); 3474 ioc_no = cb_args->cb_ioc_num; 3475 ioc = &gid_info->gl_iou->iou_ioc_info[ioc_no]; 3476 timeout_id = &ioc->ioc_dc_timeout_id; 3477 break; 3478 } 3479 3480 mutex_exit(&gid_info->gl_mutex); 3481 3482 *timeout_id = timeout(ibdm_pkt_timeout_hdlr, 3483 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 3484 3485 IBTF_DPRINTF_L5("ibdm", "\tretry_command: %p,%x,%d,%d:" 3486 "timeout %x", cb_args->cb_req_type, cb_args->cb_ioc_num, 3487 cb_args->cb_srvents_start, *timeout_id); 3488 3489 if ((rval = ibmf_msg_transport(gid_info->gl_ibmf_hdl, 3490 gid_info->gl_qp_hdl, msg, NULL, ibdm_ibmf_send_cb, 3491 cb_args, 0)) != IBMF_SUCCESS) { 3492 IBTF_DPRINTF_L2("ibdm", "\tretry_command: send failed: %p " 3493 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info, 3494 cb_args->cb_req_type, cb_args->cb_ioc_num, 3495 cb_args->cb_srvents_start); 3496 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 3497 rval = IBDM_FAILURE; 3498 } 3499 mutex_enter(&gid_info->gl_mutex); 3500 return (rval); 3501 } 3502 3503 3504 /* 3505 * ibdm_update_ioc_port_gidlist() 3506 */ 3507 static void 3508 ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *dest, 3509 ibdm_dp_gidinfo_t *gid_info) 3510 { 3511 int ii, ngid_ents; 3512 ibdm_gid_t *tmp; 3513 ibdm_hca_list_t *gid_hca_head, *temp; 3514 ibdm_hca_list_t *ioc_head = NULL; 3515 3516 IBTF_DPRINTF_L5("ibdm", "\tupdate_ioc_port_gidlist: Enter"); 3517 3518 ngid_ents = gid_info->gl_ngids; 3519 dest->ioc_nportgids = ngid_ents; 3520 dest->ioc_gid_list = kmem_zalloc(sizeof (ibdm_gid_t) * 3521 ngid_ents, KM_SLEEP); 3522 tmp = gid_info->gl_gid; 3523 for (ii = 0; (ii < ngid_ents) && (tmp); ii++) { 3524 dest->ioc_gid_list[ii].gid_dgid_hi = tmp->gid_dgid_hi; 3525 dest->ioc_gid_list[ii].gid_dgid_lo = tmp->gid_dgid_lo; 3526 tmp = tmp->gid_next; 3527 } 3528 3529 gid_hca_head = gid_info->gl_hca_list; 3530 while (gid_hca_head) { 3531 temp = ibdm_dup_hca_attr(gid_hca_head); 3532 temp->hl_next = ioc_head; 3533 ioc_head = temp; 3534 gid_hca_head = gid_hca_head->hl_next; 3535 } 3536 dest->ioc_hca_list = ioc_head; 3537 } 3538 3539 3540 /* 3541 * ibdm_alloc_send_buffers() 3542 * Allocates memory for the IBMF send buffer 3543 */ 3544 static void 3545 ibdm_alloc_send_buffers(ibmf_msg_t *msgp) 3546 { 3547 msgp->im_msgbufs_send.im_bufs_mad_hdr = 3548 kmem_zalloc(IBDM_MAD_SIZE, KM_SLEEP); 3549 msgp->im_msgbufs_send.im_bufs_cl_data = (uchar_t *) 3550 msgp->im_msgbufs_send.im_bufs_mad_hdr + sizeof (ib_mad_hdr_t); 3551 msgp->im_msgbufs_send.im_bufs_cl_data_len = 3552 IBDM_MAD_SIZE - sizeof (ib_mad_hdr_t); 3553 } 3554 3555 3556 /* 3557 * ibdm_alloc_send_buffers() 3558 * De-allocates memory for the IBMF send buffer 3559 */ 3560 static void 3561 ibdm_free_send_buffers(ibmf_msg_t *msgp) 3562 { 3563 if (msgp->im_msgbufs_send.im_bufs_mad_hdr != NULL) 3564 kmem_free(msgp->im_msgbufs_send.im_bufs_mad_hdr, IBDM_MAD_SIZE); 3565 } 3566 3567 /* 3568 * ibdm_probe_ioc() 3569 * 1. Gets the node records for the port GUID. This detects all the port 3570 * to the IOU. 3571 * 2. Selectively probes all the IOC, given it's node GUID 3572 * 3. In case of reprobe, only the IOC to be reprobed is send the IOC 3573 * Controller Profile asynchronously 3574 */ 3575 /*ARGSUSED*/ 3576 static void 3577 ibdm_probe_ioc(ib_guid_t nodeguid, ib_guid_t ioc_guid, int reprobe_flag) 3578 { 3579 int ii, nrecords; 3580 size_t nr_len = 0, pi_len = 0; 3581 ib_gid_t sgid, dgid; 3582 ibdm_hca_list_t *hca_list = NULL; 3583 sa_node_record_t *nr, *tmp; 3584 ibdm_port_attr_t *port = NULL; 3585 ibdm_dp_gidinfo_t *reprobe_gid, *new_gid, *node_gid; 3586 ibdm_dp_gidinfo_t *temp_gidinfo; 3587 ibdm_gid_t *temp_gid; 3588 sa_portinfo_record_t *pi; 3589 3590 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc(%x, %x, %x): Begin", 3591 nodeguid, ioc_guid, reprobe_flag); 3592 3593 /* Rescan the GID list for any removed GIDs for reprobe */ 3594 if (reprobe_flag) 3595 ibdm_rescan_gidlist(&ioc_guid); 3596 3597 mutex_enter(&ibdm.ibdm_hl_mutex); 3598 for (ibdm_get_next_port(&hca_list, &port, 1); port; 3599 ibdm_get_next_port(&hca_list, &port, 1)) { 3600 reprobe_gid = new_gid = node_gid = NULL; 3601 3602 nr = ibdm_get_node_records(port->pa_sa_hdl, &nr_len, nodeguid); 3603 if (nr == NULL) { 3604 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc: no records"); 3605 continue; 3606 } 3607 nrecords = (nr_len / sizeof (sa_node_record_t)); 3608 for (tmp = nr, ii = 0; (ii < nrecords); ii++, tmp++) { 3609 pi = ibdm_get_portinfo( 3610 port->pa_sa_hdl, &pi_len, tmp->LID); 3611 3612 if ((pi) && (pi->PortInfo.CapabilityMask & 3613 SM_CAP_MASK_IS_DM_SUPPD)) { 3614 /* 3615 * For reprobes: Check if GID, already in 3616 * the list. If so, set the state to SKIPPED 3617 */ 3618 if (((temp_gidinfo = ibdm_find_gid(nodeguid, 3619 tmp->NodeInfo.PortGUID)) != NULL) && 3620 temp_gidinfo->gl_state == 3621 IBDM_GID_PROBING_COMPLETE) { 3622 ASSERT(reprobe_gid == NULL); 3623 ibdm_addto_glhcalist(temp_gidinfo, 3624 hca_list); 3625 reprobe_gid = temp_gidinfo; 3626 kmem_free(pi, pi_len); 3627 continue; 3628 } else if (temp_gidinfo != NULL) { 3629 kmem_free(pi, pi_len); 3630 ibdm_addto_glhcalist(temp_gidinfo, 3631 hca_list); 3632 continue; 3633 } 3634 3635 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : " 3636 "create_gid : prefix %llx, guid %llx\n", 3637 pi->PortInfo.GidPrefix, 3638 tmp->NodeInfo.PortGUID); 3639 3640 sgid.gid_prefix = port->pa_sn_prefix; 3641 sgid.gid_guid = port->pa_port_guid; 3642 dgid.gid_prefix = pi->PortInfo.GidPrefix; 3643 dgid.gid_guid = tmp->NodeInfo.PortGUID; 3644 new_gid = ibdm_create_gid_info(port, sgid, 3645 dgid); 3646 if (new_gid == NULL) { 3647 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 3648 "create_gid_info failed\n"); 3649 kmem_free(pi, pi_len); 3650 continue; 3651 } 3652 if (node_gid == NULL) { 3653 node_gid = new_gid; 3654 } else { 3655 IBTF_DPRINTF_L4("ibdm", 3656 "\tprobe_ioc: new gid"); 3657 temp_gid = kmem_zalloc( 3658 sizeof (ibdm_gid_t), KM_SLEEP); 3659 temp_gid->gid_dgid_hi = 3660 new_gid->gl_dgid_hi; 3661 temp_gid->gid_dgid_lo = 3662 new_gid->gl_dgid_lo; 3663 temp_gid->gid_next = node_gid->gl_gid; 3664 node_gid->gl_gid = temp_gid; 3665 node_gid->gl_ngids++; 3666 } 3667 new_gid->gl_nodeguid = nodeguid; 3668 new_gid->gl_portguid = dgid.gid_guid; 3669 ibdm_addto_glhcalist(new_gid, hca_list); 3670 3671 /* 3672 * Set the state to skipped as all these 3673 * gids point to the same node. 3674 * We (re)probe only one GID below and reset 3675 * state appropriately 3676 */ 3677 new_gid->gl_state = IBDM_GID_PROBING_SKIPPED; 3678 kmem_free(pi, pi_len); 3679 } 3680 } 3681 kmem_free(nr, nr_len); 3682 3683 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : reprobe_flag %d " 3684 "reprobe_gid %p new_gid %p node_gid %p", 3685 reprobe_flag, reprobe_gid, new_gid, node_gid); 3686 3687 if (reprobe_flag != 0 && reprobe_gid != NULL) { 3688 int niocs, jj; 3689 ibdm_ioc_info_t *tmp_ioc; 3690 int ioc_matched = 0; 3691 3692 mutex_exit(&ibdm.ibdm_hl_mutex); 3693 mutex_enter(&reprobe_gid->gl_mutex); 3694 reprobe_gid->gl_state = IBDM_GET_IOC_DETAILS; 3695 niocs = 3696 reprobe_gid->gl_iou->iou_info.iou_num_ctrl_slots; 3697 reprobe_gid->gl_pending_cmds++; 3698 mutex_exit(&reprobe_gid->gl_mutex); 3699 3700 for (jj = 0; jj < niocs; jj++) { 3701 tmp_ioc = 3702 IBDM_GIDINFO2IOCINFO(reprobe_gid, jj); 3703 if (tmp_ioc->ioc_profile.ioc_guid != ioc_guid) 3704 continue; 3705 3706 ioc_matched = 1; 3707 3708 /* 3709 * Explicitly set gl_reprobe_flag to 0 so that 3710 * IBnex is not notified on completion 3711 */ 3712 mutex_enter(&reprobe_gid->gl_mutex); 3713 reprobe_gid->gl_reprobe_flag = 0; 3714 mutex_exit(&reprobe_gid->gl_mutex); 3715 3716 mutex_enter(&ibdm.ibdm_mutex); 3717 ibdm.ibdm_ngid_probes_in_progress++; 3718 mutex_exit(&ibdm.ibdm_mutex); 3719 if (ibdm_send_ioc_profile(reprobe_gid, jj) != 3720 IBDM_SUCCESS) { 3721 IBTF_DPRINTF_L4("ibdm", 3722 "\tprobe_ioc: " 3723 "send_ioc_profile failed " 3724 "for ioc %d", jj); 3725 ibdm_gid_decr_pending(reprobe_gid); 3726 break; 3727 } 3728 mutex_enter(&ibdm.ibdm_mutex); 3729 ibdm_wait_probe_completion(); 3730 mutex_exit(&ibdm.ibdm_mutex); 3731 break; 3732 } 3733 if (ioc_matched == 0) 3734 ibdm_gid_decr_pending(reprobe_gid); 3735 else { 3736 mutex_enter(&ibdm.ibdm_hl_mutex); 3737 break; 3738 } 3739 } else if (new_gid != NULL) { 3740 mutex_exit(&ibdm.ibdm_hl_mutex); 3741 node_gid = node_gid ? node_gid : new_gid; 3742 3743 /* 3744 * New or reinserted GID : Enable notification 3745 * to IBnex 3746 */ 3747 mutex_enter(&node_gid->gl_mutex); 3748 node_gid->gl_reprobe_flag = 1; 3749 mutex_exit(&node_gid->gl_mutex); 3750 3751 ibdm_probe_gid(node_gid); 3752 3753 mutex_enter(&ibdm.ibdm_hl_mutex); 3754 } else { 3755 IBTF_DPRINTF_L2("ibdm", "\tibdm_probe_ioc " 3756 "Invalid state!"); 3757 } 3758 3759 } 3760 mutex_exit(&ibdm.ibdm_hl_mutex); 3761 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : End\n"); 3762 } 3763 3764 3765 /* 3766 * ibdm_probe_gid() 3767 * Selectively probes the GID 3768 */ 3769 static void 3770 ibdm_probe_gid(ibdm_dp_gidinfo_t *gid_info) 3771 { 3772 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid:"); 3773 mutex_enter(&gid_info->gl_mutex); 3774 gid_info->gl_pending_cmds++; 3775 gid_info->gl_state = IBDM_GET_CLASSPORTINFO; 3776 mutex_exit(&gid_info->gl_mutex); 3777 if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) { 3778 mutex_enter(&gid_info->gl_mutex); 3779 gid_info->gl_state = IBDM_GID_PROBING_FAILED; 3780 --gid_info->gl_pending_cmds; 3781 mutex_exit(&gid_info->gl_mutex); 3782 ibdm_delete_glhca_list(gid_info); 3783 gid_info = gid_info->gl_next; 3784 return; 3785 } 3786 mutex_enter(&ibdm.ibdm_mutex); 3787 ibdm.ibdm_ngid_probes_in_progress++; 3788 gid_info = gid_info->gl_next; 3789 3790 ibdm_wait_probe_completion(); 3791 mutex_exit(&ibdm.ibdm_mutex); 3792 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid: Wakeup signal received"); 3793 } 3794 3795 3796 /* 3797 * ibdm_create_gid_info() 3798 * Allocates a gid_info structure and initializes 3799 * Returns pointer to the structure on success 3800 * and NULL on failure 3801 */ 3802 static ibdm_dp_gidinfo_t * 3803 ibdm_create_gid_info(ibdm_port_attr_t *port, ib_gid_t sgid, ib_gid_t dgid) 3804 { 3805 uint8_t ii, npaths; 3806 sa_path_record_t *path; 3807 size_t len; 3808 ibdm_pkey_tbl_t *pkey_tbl; 3809 ibdm_dp_gidinfo_t *gid_info = NULL; 3810 int ret; 3811 3812 IBTF_DPRINTF_L4("ibdm", "\tcreate_gid_info: Begin"); 3813 npaths = 1; 3814 3815 /* query for reversible paths */ 3816 if (port->pa_sa_hdl) 3817 ret = ibmf_saa_gid_to_pathrecords(port->pa_sa_hdl, 3818 sgid, dgid, IBMF_SAA_PKEY_WC, 0, B_TRUE, &npaths, 0, 3819 &len, &path); 3820 else 3821 return (NULL); 3822 3823 if (ret == IBMF_SUCCESS && path) { 3824 ibdm_dump_path_info(path); 3825 3826 gid_info = kmem_zalloc( 3827 sizeof (ibdm_dp_gidinfo_t), KM_SLEEP); 3828 mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL); 3829 gid_info->gl_dgid_hi = path->DGID.gid_prefix; 3830 gid_info->gl_dgid_lo = path->DGID.gid_guid; 3831 gid_info->gl_sgid_hi = path->SGID.gid_prefix; 3832 gid_info->gl_sgid_lo = path->SGID.gid_guid; 3833 gid_info->gl_p_key = path->P_Key; 3834 gid_info->gl_sa_hdl = port->pa_sa_hdl; 3835 gid_info->gl_ibmf_hdl = port->pa_ibmf_hdl; 3836 gid_info->gl_slid = path->SLID; 3837 gid_info->gl_dlid = path->DLID; 3838 gid_info->gl_transactionID = ++ibdm.ibdm_transactionID; 3839 3840 gid_info->gl_qp_hdl = IBMF_QP_HANDLE_DEFAULT; 3841 for (ii = 0; ii < port->pa_npkeys; ii++) { 3842 if (port->pa_pkey_tbl == NULL) 3843 break; 3844 3845 pkey_tbl = &port->pa_pkey_tbl[ii]; 3846 if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) && 3847 (pkey_tbl->pt_qp_hdl != NULL)) { 3848 gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl; 3849 break; 3850 } 3851 } 3852 kmem_free(path, len); 3853 ibdm.ibdm_ngids++; 3854 if (ibdm.ibdm_dp_gidlist_head == NULL) { 3855 ibdm.ibdm_dp_gidlist_head = gid_info; 3856 ibdm.ibdm_dp_gidlist_tail = gid_info; 3857 } else { 3858 ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info; 3859 gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail; 3860 ibdm.ibdm_dp_gidlist_tail = gid_info; 3861 } 3862 } 3863 3864 return (gid_info); 3865 } 3866 3867 3868 /* 3869 * ibdm_get_node_records 3870 * Sends a SA query to get the NODE record 3871 * Returns pointer to the sa_node_record_t on success 3872 * and NULL on failure 3873 */ 3874 static sa_node_record_t * 3875 ibdm_get_node_records(ibmf_saa_handle_t sa_hdl, size_t *length, ib_guid_t guid) 3876 { 3877 sa_node_record_t req, *resp = NULL; 3878 ibmf_saa_access_args_t args; 3879 int ret; 3880 3881 IBTF_DPRINTF_L4("ibdm", "\tget_node_records: Begin"); 3882 3883 bzero(&req, sizeof (sa_node_record_t)); 3884 req.NodeInfo.NodeGUID = guid; 3885 3886 args.sq_attr_id = SA_NODERECORD_ATTRID; 3887 args.sq_access_type = IBMF_SAA_RETRIEVE; 3888 args.sq_component_mask = SA_NODEINFO_COMPMASK_NODEGUID; 3889 args.sq_template = &req; 3890 args.sq_callback = NULL; 3891 args.sq_callback_arg = NULL; 3892 3893 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) &resp); 3894 if (ret != IBMF_SUCCESS) { 3895 IBTF_DPRINTF_L2("ibdm", "\tget_node_records:" 3896 " SA Retrieve Failed: %d", ret); 3897 return (NULL); 3898 } 3899 if ((resp == NULL) || (*length == 0)) { 3900 IBTF_DPRINTF_L2("ibdm", "\tget_node_records: No records"); 3901 return (NULL); 3902 } 3903 3904 IBTF_DPRINTF_L4("ibdm", "\tget_node_records: NodeGuid %llx " 3905 "PortGUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.PortGUID); 3906 3907 return (resp); 3908 } 3909 3910 3911 /* 3912 * ibdm_get_portinfo() 3913 * Sends a SA query to get the PortInfo record 3914 * Returns pointer to the sa_portinfo_record_t on success 3915 * and NULL on failure 3916 */ 3917 static sa_portinfo_record_t * 3918 ibdm_get_portinfo(ibmf_saa_handle_t sa_hdl, size_t *length, ib_lid_t lid) 3919 { 3920 sa_portinfo_record_t req, *resp = NULL; 3921 ibmf_saa_access_args_t args; 3922 int ret; 3923 3924 IBTF_DPRINTF_L4("ibdm", "\tget_portinfo: Begin"); 3925 3926 bzero(&req, sizeof (sa_portinfo_record_t)); 3927 req.EndportLID = lid; 3928 3929 args.sq_attr_id = SA_PORTINFORECORD_ATTRID; 3930 args.sq_access_type = IBMF_SAA_RETRIEVE; 3931 args.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID; 3932 args.sq_template = &req; 3933 args.sq_callback = NULL; 3934 args.sq_callback_arg = NULL; 3935 3936 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) &resp); 3937 if (ret != IBMF_SUCCESS) { 3938 IBTF_DPRINTF_L2("ibdm", "\tget_portinfo:" 3939 " SA Retrieve Failed: 0x%X", ret); 3940 return (NULL); 3941 } 3942 if ((*length == 0) || (resp == NULL)) 3943 return (NULL); 3944 3945 IBTF_DPRINTF_L4("ibdm", "\tget_portinfo: GidPrefix %llx Cap 0x%x", 3946 resp->PortInfo.GidPrefix, resp->PortInfo.CapabilityMask); 3947 return (resp); 3948 } 3949 3950 3951 /* 3952 * ibdm_ibnex_register_callback 3953 * IB nexus callback routine for HCA attach and detach notification 3954 */ 3955 void 3956 ibdm_ibnex_register_callback(ibdm_callback_t ibnex_dm_callback) 3957 { 3958 IBTF_DPRINTF_L4("ibdm", "\tibnex_register_callbacks"); 3959 mutex_enter(&ibdm.ibdm_ibnex_mutex); 3960 ibdm.ibdm_ibnex_callback = ibnex_dm_callback; 3961 mutex_exit(&ibdm.ibdm_ibnex_mutex); 3962 } 3963 3964 3965 /* 3966 * ibdm_ibnex_unregister_callbacks 3967 */ 3968 void 3969 ibdm_ibnex_unregister_callback() 3970 { 3971 IBTF_DPRINTF_L4("ibdm", "\tibnex_unregister_callbacks"); 3972 mutex_enter(&ibdm.ibdm_ibnex_mutex); 3973 ibdm.ibdm_ibnex_callback = NULL; 3974 mutex_exit(&ibdm.ibdm_ibnex_mutex); 3975 } 3976 3977 3978 /* 3979 * ibdm_ibnex_get_waittime() 3980 * Calculates the wait time based on the last HCA attach time 3981 */ 3982 time_t 3983 ibdm_ibnex_get_waittime(ib_guid_t hca_guid, int *dft_wait) 3984 { 3985 int ii; 3986 time_t temp, wait_time = 0; 3987 ibdm_hca_list_t *hca; 3988 3989 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_waittime hcaguid:%llx" 3990 "\tport settling time %d", hca_guid, *dft_wait); 3991 3992 mutex_enter(&ibdm.ibdm_hl_mutex); 3993 hca = ibdm.ibdm_hca_list_head; 3994 3995 if (hca_guid) { 3996 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 3997 if ((hca_guid == hca->hl_hca_guid) && 3998 (hca->hl_nports != hca->hl_nports_active)) { 3999 wait_time = 4000 ddi_get_time() - hca->hl_attach_time; 4001 wait_time = ((wait_time >= *dft_wait) ? 4002 0 : (*dft_wait - wait_time)); 4003 break; 4004 } 4005 hca = hca->hl_next; 4006 } 4007 mutex_exit(&ibdm.ibdm_hl_mutex); 4008 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_waittime %llx", wait_time); 4009 return (wait_time); 4010 } 4011 4012 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4013 if (hca->hl_nports != hca->hl_nports_active) { 4014 temp = ddi_get_time() - hca->hl_attach_time; 4015 temp = ((temp >= *dft_wait) ? 0 : (*dft_wait - temp)); 4016 wait_time = (temp > wait_time) ? temp : wait_time; 4017 } 4018 } 4019 mutex_exit(&ibdm.ibdm_hl_mutex); 4020 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_waittime %llx", wait_time); 4021 return (wait_time); 4022 } 4023 4024 4025 /* 4026 * ibdm_ibnex_probe_hcaport 4027 * Probes the presence of HCA port (with HCA dip and port number) 4028 * Returns port attributes structure on SUCCESS 4029 */ 4030 ibdm_port_attr_t * 4031 ibdm_ibnex_probe_hcaport(ib_guid_t hca_guid, uint8_t port_num) 4032 { 4033 int ii, jj; 4034 ibdm_hca_list_t *hca_list; 4035 ibdm_port_attr_t *port_attr; 4036 4037 IBTF_DPRINTF_L4("ibdm", "\tibnex_probe_hcaport:"); 4038 4039 mutex_enter(&ibdm.ibdm_hl_mutex); 4040 hca_list = ibdm.ibdm_hca_list_head; 4041 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4042 if (hca_list->hl_hca_guid == hca_guid) { 4043 for (jj = 0; jj < hca_list->hl_nports; jj++) { 4044 if (hca_list->hl_port_attr[jj].pa_port_num == 4045 port_num) { 4046 break; 4047 } 4048 } 4049 if (jj != hca_list->hl_nports) 4050 break; 4051 } 4052 hca_list = hca_list->hl_next; 4053 } 4054 if (ii == ibdm.ibdm_hca_count) { 4055 IBTF_DPRINTF_L2("ibdm", "\tibnex_probe_hcaport: not found"); 4056 mutex_exit(&ibdm.ibdm_hl_mutex); 4057 return (NULL); 4058 } 4059 port_attr = (ibdm_port_attr_t *)kmem_zalloc( 4060 sizeof (ibdm_port_attr_t), KM_SLEEP); 4061 bcopy((char *)&hca_list->hl_port_attr[jj], 4062 port_attr, sizeof (ibdm_port_attr_t)); 4063 ibdm_update_port_attr(port_attr); 4064 4065 mutex_exit(&ibdm.ibdm_hl_mutex); 4066 return (port_attr); 4067 } 4068 4069 4070 /* 4071 * ibdm_ibnex_get_port_attrs 4072 * Scan all HCAs for a matching port_guid. 4073 * Returns "port attributes" structure on success. 4074 */ 4075 ibdm_port_attr_t * 4076 ibdm_ibnex_get_port_attrs(ib_guid_t port_guid) 4077 { 4078 int ii, jj; 4079 ibdm_hca_list_t *hca_list; 4080 ibdm_port_attr_t *port_attr; 4081 4082 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_port_attrs:"); 4083 4084 mutex_enter(&ibdm.ibdm_hl_mutex); 4085 hca_list = ibdm.ibdm_hca_list_head; 4086 4087 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4088 for (jj = 0; jj < hca_list->hl_nports; jj++) { 4089 if (hca_list->hl_port_attr[jj].pa_port_guid == 4090 port_guid) { 4091 break; 4092 } 4093 } 4094 if (jj != hca_list->hl_nports) 4095 break; 4096 hca_list = hca_list->hl_next; 4097 } 4098 4099 if (ii == ibdm.ibdm_hca_count) { 4100 IBTF_DPRINTF_L2("ibdm", "\tibnex_get_port_attrs: not found"); 4101 mutex_exit(&ibdm.ibdm_hl_mutex); 4102 return (NULL); 4103 } 4104 4105 port_attr = (ibdm_port_attr_t *)kmem_alloc(sizeof (ibdm_port_attr_t), 4106 KM_SLEEP); 4107 bcopy((char *)&hca_list->hl_port_attr[jj], port_attr, 4108 sizeof (ibdm_port_attr_t)); 4109 ibdm_update_port_attr(port_attr); 4110 4111 mutex_exit(&ibdm.ibdm_hl_mutex); 4112 return (port_attr); 4113 } 4114 4115 4116 /* 4117 * ibdm_ibnex_free_port_attr() 4118 */ 4119 void 4120 ibdm_ibnex_free_port_attr(ibdm_port_attr_t *port_attr) 4121 { 4122 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_port_attr:"); 4123 if (port_attr) { 4124 if (port_attr->pa_pkey_tbl != NULL) { 4125 kmem_free(port_attr->pa_pkey_tbl, 4126 (port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t))); 4127 } 4128 kmem_free(port_attr, sizeof (ibdm_port_attr_t)); 4129 } 4130 } 4131 4132 4133 /* 4134 * ibdm_ibnex_get_hca_list() 4135 * Returns portinfo for all the port for all the HCA's 4136 */ 4137 void 4138 ibdm_ibnex_get_hca_list(ibdm_hca_list_t **hca, int *count) 4139 { 4140 ibdm_hca_list_t *head = NULL, *temp, *temp1; 4141 int ii; 4142 4143 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_list:"); 4144 4145 mutex_enter(&ibdm.ibdm_hl_mutex); 4146 temp = ibdm.ibdm_hca_list_head; 4147 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) { 4148 temp1 = ibdm_dup_hca_attr(temp); 4149 temp1->hl_next = head; 4150 head = temp1; 4151 temp = temp->hl_next; 4152 } 4153 *count = ibdm.ibdm_hca_count; 4154 *hca = head; 4155 mutex_exit(&ibdm.ibdm_hl_mutex); 4156 } 4157 4158 4159 /* 4160 * ibdm_ibnex_get_hca_info_by_guid() 4161 */ 4162 ibdm_hca_list_t * 4163 ibdm_ibnex_get_hca_info_by_guid(ib_guid_t hca_guid) 4164 { 4165 ibdm_hca_list_t *head = NULL, *hca = NULL; 4166 4167 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_info_by_dip"); 4168 4169 mutex_enter(&ibdm.ibdm_hl_mutex); 4170 head = ibdm.ibdm_hca_list_head; 4171 while (head) { 4172 if (head->hl_hca_guid == hca_guid) { 4173 hca = ibdm_dup_hca_attr(head); 4174 hca->hl_next = NULL; 4175 break; 4176 } 4177 head = head->hl_next; 4178 } 4179 mutex_exit(&ibdm.ibdm_hl_mutex); 4180 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_info_by_dip %p", hca); 4181 return (hca); 4182 } 4183 4184 4185 /* 4186 * ibdm_dup_hca_attr() 4187 * Allocate a new HCA attribute strucuture and initialize 4188 * hca attribute structure with the incoming HCA attributes 4189 * returned the allocated hca attributes. 4190 */ 4191 static ibdm_hca_list_t * 4192 ibdm_dup_hca_attr(ibdm_hca_list_t *in_hca) 4193 { 4194 int len; 4195 ibdm_hca_list_t *out_hca; 4196 4197 len = sizeof (ibdm_hca_list_t) + 4198 (in_hca->hl_nports * sizeof (ibdm_port_attr_t)); 4199 IBTF_DPRINTF_L4("ibdm", "\tdup_hca_attr len %d", len); 4200 out_hca = (ibdm_hca_list_t *)kmem_alloc(len, KM_SLEEP); 4201 bcopy((char *)in_hca, 4202 (char *)out_hca, sizeof (ibdm_hca_list_t)); 4203 if (in_hca->hl_nports) { 4204 out_hca->hl_port_attr = (ibdm_port_attr_t *) 4205 ((char *)out_hca + sizeof (ibdm_hca_list_t)); 4206 bcopy((char *)in_hca->hl_port_attr, 4207 (char *)out_hca->hl_port_attr, 4208 (in_hca->hl_nports * sizeof (ibdm_port_attr_t))); 4209 for (len = 0; len < out_hca->hl_nports; len++) 4210 ibdm_update_port_attr(&out_hca->hl_port_attr[len]); 4211 } 4212 return (out_hca); 4213 } 4214 4215 4216 /* 4217 * ibdm_ibnex_free_hca_list() 4218 * Free one/more HCA lists 4219 */ 4220 void 4221 ibdm_ibnex_free_hca_list(ibdm_hca_list_t *hca_list) 4222 { 4223 int ii, len; 4224 ibdm_hca_list_t *temp; 4225 ibdm_port_attr_t *port; 4226 4227 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_hca_list:"); 4228 ASSERT(hca_list); 4229 while (hca_list) { 4230 temp = hca_list; 4231 hca_list = hca_list->hl_next; 4232 for (ii = 0; ii < temp->hl_nports; ii++) { 4233 port = &temp->hl_port_attr[ii]; 4234 len = (port->pa_npkeys * sizeof (ibdm_pkey_tbl_t)); 4235 if (len != 0) 4236 kmem_free(port->pa_pkey_tbl, len); 4237 } 4238 len = sizeof (ibdm_hca_list_t) + (temp->hl_nports * 4239 sizeof (ibdm_port_attr_t)); 4240 kmem_free(temp, len); 4241 } 4242 } 4243 4244 4245 /* 4246 * ibdm_ibnex_probe_iocguid() 4247 * Probes the IOC on the fabric and returns the IOC information 4248 * if present. Otherwise, NULL is returned 4249 */ 4250 /* ARGSUSED */ 4251 ibdm_ioc_info_t * 4252 ibdm_ibnex_probe_ioc(ib_guid_t iou, ib_guid_t ioc_guid, int reprobe_flag) 4253 { 4254 int k; 4255 ibdm_ioc_info_t *ioc_info; 4256 ibdm_dp_gidinfo_t *gid_info; 4257 4258 IBTF_DPRINTF_L4("ibdm", "\tibnex_probe_ioc: (%llX, %llX, %d) Begin", 4259 iou, ioc_guid, reprobe_flag); 4260 /* Check whether we know this already */ 4261 ioc_info = ibdm_ibnex_get_ioc_info(ioc_guid); 4262 if (ioc_info == NULL) { 4263 mutex_enter(&ibdm.ibdm_mutex); 4264 while (ibdm.ibdm_busy & IBDM_BUSY) 4265 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4266 ibdm.ibdm_busy |= IBDM_BUSY; 4267 mutex_exit(&ibdm.ibdm_mutex); 4268 ibdm_probe_ioc(iou, ioc_guid, 0); 4269 mutex_enter(&ibdm.ibdm_mutex); 4270 ibdm.ibdm_busy &= ~IBDM_BUSY; 4271 cv_broadcast(&ibdm.ibdm_busy_cv); 4272 mutex_exit(&ibdm.ibdm_mutex); 4273 ioc_info = ibdm_ibnex_get_ioc_info(ioc_guid); 4274 } else if (reprobe_flag) { /* Handle Reprobe for the IOC */ 4275 /* Free the ioc_list before reprobe; and cancel any timers */ 4276 mutex_enter(&ibdm.ibdm_mutex); 4277 if (ioc_info->ioc_timeout_id) { 4278 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4279 "ioc_timeout_id = 0x%x", 4280 ioc_info->ioc_timeout_id); 4281 if (untimeout(ioc_info->ioc_timeout_id) == -1) { 4282 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4283 "untimeout ioc_timeout_id failed"); 4284 } 4285 ioc_info->ioc_timeout_id = 0; 4286 } 4287 if (ioc_info->ioc_dc_timeout_id) { 4288 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4289 "ioc_dc_timeout_id = 0x%x", 4290 ioc_info->ioc_dc_timeout_id); 4291 if (untimeout(ioc_info->ioc_dc_timeout_id) == -1) { 4292 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4293 "untimeout ioc_dc_timeout_id failed"); 4294 } 4295 ioc_info->ioc_dc_timeout_id = 0; 4296 } 4297 for (k = 0; k < ioc_info->ioc_profile.ioc_service_entries; k++) 4298 if (ioc_info->ioc_serv[k].se_timeout_id) { 4299 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: " 4300 "ioc_info->ioc_serv[k].se_timeout_id = %x", 4301 k, ioc_info->ioc_serv[k].se_timeout_id); 4302 if (untimeout(ioc_info->ioc_serv[k]. 4303 se_timeout_id) == -1) { 4304 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: " 4305 "untimeout se_timeout_id %d " 4306 "failed", k); 4307 } 4308 ioc_info->ioc_serv[k].se_timeout_id = 0; 4309 } 4310 mutex_exit(&ibdm.ibdm_mutex); 4311 ibdm_ibnex_free_ioc_list(ioc_info); 4312 4313 mutex_enter(&ibdm.ibdm_mutex); 4314 while (ibdm.ibdm_busy & IBDM_BUSY) 4315 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4316 ibdm.ibdm_busy |= IBDM_BUSY; 4317 mutex_exit(&ibdm.ibdm_mutex); 4318 4319 ibdm_probe_ioc(iou, ioc_guid, 1); 4320 4321 /* 4322 * Skip if gl_reprobe_flag is set, this will be 4323 * a re-inserted / new GID, for which notifications 4324 * have already been send. 4325 */ 4326 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; 4327 gid_info = gid_info->gl_next) { 4328 uint8_t ii, niocs; 4329 ibdm_ioc_info_t *ioc; 4330 4331 if (gid_info->gl_iou == NULL) 4332 continue; 4333 4334 if (gid_info->gl_reprobe_flag) { 4335 gid_info->gl_reprobe_flag = 0; 4336 continue; 4337 } 4338 4339 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 4340 for (ii = 0; ii < niocs; ii++) { 4341 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii); 4342 if (ioc->ioc_profile.ioc_guid == ioc_guid) { 4343 mutex_enter(&ibdm.ibdm_mutex); 4344 ibdm_reprobe_update_port_srv(ioc, 4345 gid_info); 4346 mutex_exit(&ibdm.ibdm_mutex); 4347 } 4348 } 4349 } 4350 mutex_enter(&ibdm.ibdm_mutex); 4351 ibdm.ibdm_busy &= ~IBDM_BUSY; 4352 cv_broadcast(&ibdm.ibdm_busy_cv); 4353 mutex_exit(&ibdm.ibdm_mutex); 4354 4355 ioc_info = ibdm_ibnex_get_ioc_info(ioc_guid); 4356 } 4357 return (ioc_info); 4358 } 4359 4360 4361 /* 4362 * ibdm_ibnex_get_ioc_info() 4363 * Returns pointer to ibdm_ioc_info_t if it finds 4364 * matching record for the ioc_guid, otherwise NULL 4365 * is returned 4366 */ 4367 ibdm_ioc_info_t * 4368 ibdm_ibnex_get_ioc_info(ib_guid_t ioc_guid) 4369 { 4370 int ii; 4371 ibdm_ioc_info_t *ioc = NULL, *tmp = NULL; 4372 ibdm_dp_gidinfo_t *gid_list; 4373 ib_dm_io_unitinfo_t *iou; 4374 4375 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_ioc_info: GUID %llx", ioc_guid); 4376 4377 mutex_enter(&ibdm.ibdm_mutex); 4378 while (ibdm.ibdm_busy & IBDM_BUSY) 4379 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4380 ibdm.ibdm_busy |= IBDM_BUSY; 4381 4382 gid_list = ibdm.ibdm_dp_gidlist_head; 4383 while (gid_list) { 4384 mutex_enter(&gid_list->gl_mutex); 4385 if (gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) { 4386 mutex_exit(&gid_list->gl_mutex); 4387 gid_list = gid_list->gl_next; 4388 continue; 4389 } 4390 if (gid_list->gl_iou == NULL) { 4391 IBTF_DPRINTF_L2("ibdm", 4392 "\tget_ioc_info: No IOU info"); 4393 mutex_exit(&gid_list->gl_mutex); 4394 gid_list = gid_list->gl_next; 4395 continue; 4396 } 4397 iou = &gid_list->gl_iou->iou_info; 4398 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 4399 tmp = IBDM_GIDINFO2IOCINFO(gid_list, ii); 4400 if ((tmp->ioc_profile.ioc_guid == ioc_guid) && 4401 (tmp->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS)) { 4402 ioc = ibdm_dup_ioc_info(tmp, gid_list); 4403 mutex_exit(&gid_list->gl_mutex); 4404 ibdm.ibdm_busy &= ~IBDM_BUSY; 4405 cv_broadcast(&ibdm.ibdm_busy_cv); 4406 mutex_exit(&ibdm.ibdm_mutex); 4407 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_info: End"); 4408 return (ioc); 4409 } 4410 } 4411 if (ii == iou->iou_num_ctrl_slots) 4412 ioc = NULL; 4413 4414 mutex_exit(&gid_list->gl_mutex); 4415 gid_list = gid_list->gl_next; 4416 } 4417 4418 ibdm.ibdm_busy &= ~IBDM_BUSY; 4419 cv_broadcast(&ibdm.ibdm_busy_cv); 4420 mutex_exit(&ibdm.ibdm_mutex); 4421 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_info: failure End"); 4422 return (ioc); 4423 } 4424 4425 4426 /* 4427 * ibdm_ibnex_get_ioc_count() 4428 * Returns number of ibdm_ioc_info_t it finds 4429 */ 4430 int 4431 ibdm_ibnex_get_ioc_count(void) 4432 { 4433 int count = 0, k; 4434 ibdm_ioc_info_t *ioc; 4435 ibdm_dp_gidinfo_t *gid_list; 4436 4437 mutex_enter(&ibdm.ibdm_mutex); 4438 ibdm_sweep_fabric(0); 4439 4440 while (ibdm.ibdm_busy & IBDM_BUSY) 4441 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4442 ibdm.ibdm_busy |= IBDM_BUSY; 4443 4444 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 4445 gid_list = gid_list->gl_next) { 4446 mutex_enter(&gid_list->gl_mutex); 4447 if ((gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) || 4448 (gid_list->gl_iou == NULL)) { 4449 mutex_exit(&gid_list->gl_mutex); 4450 continue; 4451 } 4452 for (k = 0; k < gid_list->gl_iou->iou_info.iou_num_ctrl_slots; 4453 k++) { 4454 ioc = IBDM_GIDINFO2IOCINFO(gid_list, k); 4455 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) 4456 ++count; 4457 } 4458 mutex_exit(&gid_list->gl_mutex); 4459 } 4460 ibdm.ibdm_busy &= ~IBDM_BUSY; 4461 cv_broadcast(&ibdm.ibdm_busy_cv); 4462 mutex_exit(&ibdm.ibdm_mutex); 4463 4464 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_count: count = %d", count); 4465 return (count); 4466 } 4467 4468 4469 /* 4470 * ibdm_ibnex_get_ioc_list() 4471 * Returns information about all the IOCs present on the fabric. 4472 * Reprobes the IOCs and the GID list if list_flag is set to REPROBE_ALL. 4473 * Does not sweep fabric if DONOT_PROBE is set 4474 */ 4475 ibdm_ioc_info_t * 4476 ibdm_ibnex_get_ioc_list(ibdm_ibnex_get_ioclist_mtd_t list_flag) 4477 { 4478 int ii; 4479 ibdm_ioc_info_t *ioc_list = NULL, *tmp, *ioc; 4480 ibdm_dp_gidinfo_t *gid_list; 4481 ib_dm_io_unitinfo_t *iou; 4482 4483 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_list: Enter"); 4484 4485 mutex_enter(&ibdm.ibdm_mutex); 4486 if (list_flag != IBDM_IBNEX_DONOT_PROBE) 4487 ibdm_sweep_fabric(list_flag == IBDM_IBNEX_REPROBE_ALL); 4488 4489 while (ibdm.ibdm_busy & IBDM_BUSY) 4490 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4491 ibdm.ibdm_busy |= IBDM_BUSY; 4492 4493 gid_list = ibdm.ibdm_dp_gidlist_head; 4494 while (gid_list) { 4495 mutex_enter(&gid_list->gl_mutex); 4496 if (gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) { 4497 mutex_exit(&gid_list->gl_mutex); 4498 gid_list = gid_list->gl_next; 4499 continue; 4500 } 4501 if (gid_list->gl_iou == NULL) { 4502 IBTF_DPRINTF_L2("ibdm", 4503 "\tget_ioc_list: No IOU info"); 4504 mutex_exit(&gid_list->gl_mutex); 4505 gid_list = gid_list->gl_next; 4506 continue; 4507 } 4508 iou = &gid_list->gl_iou->iou_info; 4509 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) { 4510 ioc = IBDM_GIDINFO2IOCINFO(gid_list, ii); 4511 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) { 4512 tmp = ibdm_dup_ioc_info(ioc, gid_list); 4513 tmp->ioc_next = ioc_list; 4514 ioc_list = tmp; 4515 } 4516 } 4517 mutex_exit(&gid_list->gl_mutex); 4518 gid_list = gid_list->gl_next; 4519 } 4520 ibdm.ibdm_busy &= ~IBDM_BUSY; 4521 cv_broadcast(&ibdm.ibdm_busy_cv); 4522 mutex_exit(&ibdm.ibdm_mutex); 4523 4524 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_list: End"); 4525 return (ioc_list); 4526 } 4527 4528 /* 4529 * ibdm_dup_ioc_info() 4530 * Duplicate the IOC information and return the IOC 4531 * information. 4532 */ 4533 static ibdm_ioc_info_t * 4534 ibdm_dup_ioc_info(ibdm_ioc_info_t *in_ioc, ibdm_dp_gidinfo_t *gid_list) 4535 { 4536 ibdm_ioc_info_t *out_ioc; 4537 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*out_ioc)); 4538 4539 out_ioc = kmem_alloc(sizeof (ibdm_ioc_info_t), KM_SLEEP); 4540 bcopy(in_ioc, out_ioc, sizeof (ibdm_ioc_info_t)); 4541 ibdm_update_ioc_port_gidlist(out_ioc, gid_list); 4542 out_ioc->ioc_iou_dc_valid = gid_list->gl_iou->iou_dc_valid; 4543 out_ioc->ioc_iou_diagcode = gid_list->gl_iou->iou_diagcode; 4544 4545 return (out_ioc); 4546 } 4547 4548 4549 /* 4550 * ibdm_free_ioc_list() 4551 * Deallocate memory for IOC list structure 4552 */ 4553 void 4554 ibdm_ibnex_free_ioc_list(ibdm_ioc_info_t *ioc) 4555 { 4556 ibdm_ioc_info_t *temp; 4557 4558 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_ioc_list:"); 4559 while (ioc) { 4560 temp = ioc; 4561 ioc = ioc->ioc_next; 4562 kmem_free(temp->ioc_gid_list, 4563 (sizeof (ibdm_gid_t) * temp->ioc_nportgids)); 4564 if (temp->ioc_hca_list) 4565 ibdm_ibnex_free_hca_list(temp->ioc_hca_list); 4566 kmem_free(temp, sizeof (ibdm_ioc_info_t)); 4567 } 4568 } 4569 4570 4571 /* 4572 * ibdm_ibnex_update_pkey_tbls 4573 * Updates the DM P_Key database. 4574 * NOTE: Two cases are handled here: P_Key being added or removed. 4575 * 4576 * Arguments : NONE 4577 * Return Values : NONE 4578 */ 4579 void 4580 ibdm_ibnex_update_pkey_tbls(void) 4581 { 4582 int h, pp, pidx; 4583 uint_t nports; 4584 uint_t size; 4585 ib_pkey_t new_pkey; 4586 ib_pkey_t *orig_pkey; 4587 ibdm_hca_list_t *hca_list; 4588 ibdm_port_attr_t *port; 4589 ibt_hca_portinfo_t *pinfop; 4590 4591 IBTF_DPRINTF_L4("ibdm", "\tibnex_update_pkey_tbls:"); 4592 4593 mutex_enter(&ibdm.ibdm_hl_mutex); 4594 hca_list = ibdm.ibdm_hca_list_head; 4595 4596 for (h = 0; h < ibdm.ibdm_hca_count; h++) { 4597 4598 /* This updates P_Key Tables for all ports of this HCA */ 4599 (void) ibt_query_hca_ports(hca_list->hl_hca_hdl, 0, &pinfop, 4600 &nports, &size); 4601 4602 /* number of ports shouldn't have changed */ 4603 ASSERT(nports == hca_list->hl_nports); 4604 4605 for (pp = 0; pp < hca_list->hl_nports; pp++) { 4606 port = &hca_list->hl_port_attr[pp]; 4607 4608 /* 4609 * First figure out the P_Keys from IBTL. 4610 * Three things could have happened: 4611 * New P_Keys added 4612 * Existing P_Keys removed 4613 * Both of the above two 4614 * 4615 * Loop through the P_Key Indices and check if a 4616 * give P_Key_Ix matches that of the one seen by 4617 * IBDM. If they match no action is needed. 4618 * 4619 * If they don't match: 4620 * 1. if orig_pkey is invalid and new_pkey is valid 4621 * ---> add new_pkey to DM database 4622 * 2. if orig_pkey is valid and new_pkey is invalid 4623 * ---> remove orig_pkey from DM database 4624 * 3. if orig_pkey and new_pkey are both valid: 4625 * ---> remov orig_pkey from DM database 4626 * ---> add new_pkey to DM database 4627 * 4. if orig_pkey and new_pkey are both invalid: 4628 * ---> do nothing. Updated DM database. 4629 */ 4630 4631 for (pidx = 0; pidx < port->pa_npkeys; pidx++) { 4632 new_pkey = pinfop[pp].p_pkey_tbl[pidx]; 4633 orig_pkey = &port->pa_pkey_tbl[pidx].pt_pkey; 4634 4635 /* keys match - do nothing */ 4636 if (*orig_pkey == new_pkey) 4637 continue; 4638 4639 if (IBDM_INVALID_PKEY(*orig_pkey) && 4640 !IBDM_INVALID_PKEY(new_pkey)) { 4641 /* P_Key was added */ 4642 IBTF_DPRINTF_L5("ibdm", 4643 "\tibnex_update_pkey_tbls: new " 4644 "P_Key added = 0x%x", new_pkey); 4645 *orig_pkey = new_pkey; 4646 ibdm_port_attr_ibmf_init(port, 4647 new_pkey, pp); 4648 } else if (!IBDM_INVALID_PKEY(*orig_pkey) && 4649 IBDM_INVALID_PKEY(new_pkey)) { 4650 /* P_Key was removed */ 4651 IBTF_DPRINTF_L5("ibdm", 4652 "\tibnex_update_pkey_tbls: P_Key " 4653 "removed = 0x%x", *orig_pkey); 4654 *orig_pkey = new_pkey; 4655 (void) ibdm_port_attr_ibmf_fini(port, 4656 pidx); 4657 } else if (!IBDM_INVALID_PKEY(*orig_pkey) && 4658 !IBDM_INVALID_PKEY(new_pkey)) { 4659 /* P_Key were replaced */ 4660 IBTF_DPRINTF_L5("ibdm", 4661 "\tibnex_update_pkey_tbls: P_Key " 4662 "replaced 0x%x with 0x%x", 4663 *orig_pkey, new_pkey); 4664 (void) ibdm_port_attr_ibmf_fini(port, 4665 pidx); 4666 *orig_pkey = new_pkey; 4667 ibdm_port_attr_ibmf_init(port, 4668 new_pkey, pp); 4669 } else { 4670 /* 4671 * P_Keys are invalid 4672 * set anyway to reflect if 4673 * INVALID_FULL was changed to 4674 * INVALID_LIMITED or vice-versa. 4675 */ 4676 *orig_pkey = new_pkey; 4677 } /* end of else */ 4678 4679 } /* loop of p_key index */ 4680 4681 } /* loop of #ports of HCA */ 4682 4683 ibt_free_portinfo(pinfop, size); 4684 hca_list = hca_list->hl_next; 4685 4686 } /* loop for all HCAs in the system */ 4687 4688 mutex_exit(&ibdm.ibdm_hl_mutex); 4689 } 4690 4691 4692 /* 4693 * ibdm_send_ioc_profile() 4694 * Send IOC Controller Profile request. When the request is completed 4695 * IBMF calls ibdm_process_incoming_mad routine to inform about 4696 * the completion. 4697 */ 4698 static int 4699 ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *gid_info, uint8_t ioc_no) 4700 { 4701 ibmf_msg_t *msg; 4702 ib_mad_hdr_t *hdr; 4703 ibdm_ioc_info_t *ioc_info = &(gid_info->gl_iou->iou_ioc_info[ioc_no]); 4704 ibdm_timeout_cb_args_t *cb_args; 4705 4706 IBTF_DPRINTF_L4("ibdm", "\tsend_ioc_profile: " 4707 "gid info 0x%p, ioc_no = %d", gid_info, ioc_no); 4708 4709 /* 4710 * Send command to get IOC profile. 4711 * Allocate a IBMF packet and initialize the packet. 4712 */ 4713 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, 4714 &msg) != IBMF_SUCCESS) { 4715 IBTF_DPRINTF_L4("ibdm", "\tsend_ioc_profile: pkt alloc fail"); 4716 return (IBDM_FAILURE); 4717 } 4718 4719 ibdm_alloc_send_buffers(msg); 4720 4721 msg->im_local_addr.ia_local_lid = gid_info->gl_slid; 4722 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid; 4723 if (gid_info->gl_redirected == B_TRUE) { 4724 if (gid_info->gl_redirect_dlid != 0) { 4725 msg->im_local_addr.ia_remote_lid = 4726 gid_info->gl_redirect_dlid; 4727 } 4728 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP; 4729 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey; 4730 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey; 4731 } else { 4732 msg->im_local_addr.ia_remote_qno = 1; 4733 msg->im_local_addr.ia_p_key = gid_info->gl_p_key; 4734 msg->im_local_addr.ia_q_key = IB_GSI_QKEY; 4735 } 4736 4737 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg); 4738 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1; 4739 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT; 4740 hdr->ClassVersion = IB_DM_CLASS_VERSION_1; 4741 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET; 4742 hdr->Status = 0; 4743 hdr->TransactionID = h2b64(gid_info->gl_transactionID); 4744 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE); 4745 hdr->AttributeModifier = h2b32(ioc_no + 1); 4746 4747 ioc_info->ioc_state = IBDM_IOC_STATE_REPROBE_PROGRESS; 4748 cb_args = &ioc_info->ioc_cb_args; 4749 cb_args->cb_gid_info = gid_info; 4750 cb_args->cb_retry_count = ibdm_dft_retry_cnt; 4751 cb_args->cb_req_type = IBDM_REQ_TYPE_IOCINFO; 4752 cb_args->cb_ioc_num = ioc_no; 4753 4754 ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr, 4755 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout)); 4756 4757 IBTF_DPRINTF_L5("ibdm", "\tsend_ioc_profile:" 4758 "timeout %x", ioc_info->ioc_timeout_id); 4759 4760 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg, 4761 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) { 4762 IBTF_DPRINTF_L2("ibdm", 4763 "\tsend_ioc_profile: msg transport failed"); 4764 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args); 4765 } 4766 ioc_info->ioc_state = IBDM_IOC_STATE_REPROBE_PROGRESS; 4767 return (IBDM_SUCCESS); 4768 } 4769 4770 4771 /* 4772 * ibdm_port_reachable 4773 * Sends a SA query to get the NODE record for port GUID 4774 * Returns IBDM_SUCCESS if the port GID is reachable 4775 */ 4776 static int 4777 ibdm_port_reachable(ibmf_saa_handle_t sa_hdl, ib_guid_t guid, 4778 ib_guid_t *node_guid) 4779 { 4780 sa_node_record_t req, *resp = NULL; 4781 ibmf_saa_access_args_t args; 4782 int ret; 4783 size_t length; 4784 4785 IBTF_DPRINTF_L4("ibdm", "\tport_reachable: port_guid %llx", 4786 guid); 4787 4788 bzero(&req, sizeof (sa_node_record_t)); 4789 req.NodeInfo.PortGUID = guid; 4790 4791 args.sq_attr_id = SA_NODERECORD_ATTRID; 4792 args.sq_access_type = IBMF_SAA_RETRIEVE; 4793 args.sq_component_mask = SA_NODEINFO_COMPMASK_PORTGUID; 4794 args.sq_template = &req; 4795 args.sq_callback = NULL; 4796 args.sq_callback_arg = NULL; 4797 4798 ret = ibmf_sa_access(sa_hdl, &args, 0, &length, (void **) &resp); 4799 if (ret != IBMF_SUCCESS) { 4800 IBTF_DPRINTF_L2("ibdm", "\tport_reachable:" 4801 " SA Retrieve Failed: %d", ret); 4802 return (IBDM_FAILURE); 4803 } 4804 4805 if ((resp == NULL) || (length == 0)) { 4806 IBTF_DPRINTF_L2("ibdm", "\tport_reachable: No records"); 4807 return (IBDM_FAILURE); 4808 } 4809 4810 if (node_guid != NULL) 4811 *node_guid = resp->NodeInfo.NodeGUID; 4812 4813 kmem_free(resp, length); 4814 4815 return (IBDM_SUCCESS); 4816 } 4817 4818 /* 4819 * Update the gidlist for all affected IOCs when GID becomes 4820 * available/unavailable. 4821 * 4822 * Parameters : 4823 * gidinfo - Incoming / Outgoing GID. 4824 * add_flag - 1 for GID added, 0 for GID removed. 4825 * - (-1) : IOC gid list updated, ioc_list required. 4826 * 4827 * This function gets the GID for the node GUID corresponding to the 4828 * port GID. Gets the IOU info 4829 */ 4830 static ibdm_ioc_info_t * 4831 ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *gid_info, int avail_flag) 4832 { 4833 ibdm_dp_gidinfo_t *node_gid = NULL; 4834 uint8_t niocs, ii; 4835 ibdm_ioc_info_t *ioc, *ioc_list = NULL, *tmp; 4836 4837 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist"); 4838 4839 switch (avail_flag) { 4840 case 1 : 4841 node_gid = ibdm_check_dest_nodeguid(gid_info); 4842 break; 4843 case 0 : 4844 node_gid = ibdm_handle_gid_rm(gid_info); 4845 break; 4846 case -1 : 4847 node_gid = gid_info; 4848 break; 4849 default : 4850 break; 4851 } 4852 4853 if (node_gid == NULL) { 4854 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist: " 4855 "No node GID found, port gid 0x%p, avail_flag %d", 4856 gid_info, avail_flag); 4857 return (NULL); 4858 } 4859 4860 mutex_enter(&node_gid->gl_mutex); 4861 if ((node_gid->gl_state != IBDM_GID_PROBING_COMPLETE && 4862 node_gid->gl_state != IBDM_GID_PROBING_SKIPPED) || 4863 node_gid->gl_iou == NULL) { 4864 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist " 4865 "gl_state %x, gl_iou %p", node_gid->gl_state, 4866 node_gid->gl_iou); 4867 mutex_exit(&node_gid->gl_mutex); 4868 return (NULL); 4869 } 4870 4871 niocs = node_gid->gl_iou->iou_info.iou_num_ctrl_slots; 4872 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist : niocs %x", 4873 niocs); 4874 for (ii = 0; ii < niocs; ii++) { 4875 ioc = IBDM_GIDINFO2IOCINFO(node_gid, ii); 4876 /* 4877 * Skip IOCs for which probe is not complete or 4878 * reprobe is progress 4879 */ 4880 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) { 4881 tmp = ibdm_dup_ioc_info(ioc, node_gid); 4882 tmp->ioc_info_updated.ib_gid_prop_updated = 1; 4883 tmp->ioc_next = ioc_list; 4884 ioc_list = tmp; 4885 } 4886 } 4887 mutex_exit(&node_gid->gl_mutex); 4888 4889 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist : return %p", 4890 ioc_list); 4891 return (ioc_list); 4892 } 4893 4894 /* 4895 * ibdm_saa_event_cb : 4896 * Event handling which does *not* require ibdm_hl_mutex to be 4897 * held are executed in the same thread. This is to prevent 4898 * deadlocks with HCA port down notifications which hold the 4899 * ibdm_hl_mutex. 4900 * 4901 * GID_AVAILABLE event is handled here. A taskq is spawned to 4902 * handle GID_UNAVAILABLE. 4903 * 4904 * A new mutex ibdm_ibnex_mutex has been introduced to protect 4905 * ibnex_callback. This has been done to prevent any possible 4906 * deadlock (described above) while handling GID_AVAILABLE. 4907 * 4908 * IBMF calls the event callback for a HCA port. The SA handle 4909 * for this port would be valid, till the callback returns. 4910 * IBDM calling IBDM using the above SA handle should be valid. 4911 * 4912 * IBDM will additionally check (SA handle != NULL), before 4913 * calling IBMF. 4914 */ 4915 /*ARGSUSED*/ 4916 static void 4917 ibdm_saa_event_cb(ibmf_saa_handle_t ibmf_saa_handle, 4918 ibmf_saa_subnet_event_t ibmf_saa_event, 4919 ibmf_saa_event_details_t *event_details, void *callback_arg) 4920 { 4921 ibdm_saa_event_arg_t *event_arg; 4922 ib_gid_t sgid, dgid; 4923 ibdm_port_attr_t *hca_port; 4924 ibdm_dp_gidinfo_t *gid_info, *node_gid_info = NULL; 4925 ib_guid_t nodeguid; 4926 4927 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*event_arg)); 4928 4929 hca_port = (ibdm_port_attr_t *)callback_arg; 4930 4931 IBTF_DPRINTF_L4("ibdm", "\tsaa_event_cb(%x, %x, %x, %x)\n", 4932 ibmf_saa_handle, ibmf_saa_event, event_details, 4933 callback_arg); 4934 #ifdef DEBUG 4935 if (ibdm_ignore_saa_event) 4936 return; 4937 #endif 4938 4939 if (ibmf_saa_event == IBMF_SAA_EVENT_GID_AVAILABLE) { 4940 /* 4941 * Ensure no other probe / sweep fabric is in 4942 * progress. 4943 */ 4944 mutex_enter(&ibdm.ibdm_mutex); 4945 while (ibdm.ibdm_busy & IBDM_BUSY) 4946 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 4947 ibdm.ibdm_busy |= IBDM_BUSY; 4948 mutex_exit(&ibdm.ibdm_mutex); 4949 4950 /* 4951 * If we already know about this GID, return. 4952 * GID_AVAILABLE may be reported for multiple HCA 4953 * ports. 4954 */ 4955 if ((ibdm_check_dgid(event_details->ie_gid.gid_guid, 4956 event_details->ie_gid.gid_prefix)) != NULL) { 4957 mutex_enter(&ibdm.ibdm_mutex); 4958 ibdm.ibdm_busy &= ~IBDM_BUSY; 4959 cv_broadcast(&ibdm.ibdm_busy_cv); 4960 mutex_exit(&ibdm.ibdm_mutex); 4961 return; 4962 } 4963 4964 IBTF_DPRINTF_L4("ibdm", "\tGID (prefix %x, guid %llx) " 4965 "Insertion notified", 4966 event_details->ie_gid.gid_prefix, 4967 event_details->ie_gid.gid_guid); 4968 4969 /* This is a new gid, insert it to GID list */ 4970 sgid.gid_prefix = hca_port->pa_sn_prefix; 4971 sgid.gid_guid = hca_port->pa_port_guid; 4972 dgid.gid_prefix = event_details->ie_gid.gid_prefix; 4973 dgid.gid_guid = event_details->ie_gid.gid_guid; 4974 gid_info = ibdm_create_gid_info(hca_port, sgid, dgid); 4975 if (gid_info == NULL) { 4976 IBTF_DPRINTF_L4("ibdm", "\tGID_AVAILABLE: " 4977 "create_gid_info returned NULL"); 4978 mutex_enter(&ibdm.ibdm_mutex); 4979 ibdm.ibdm_busy &= ~IBDM_BUSY; 4980 cv_broadcast(&ibdm.ibdm_busy_cv); 4981 mutex_exit(&ibdm.ibdm_mutex); 4982 return; 4983 } 4984 mutex_enter(&gid_info->gl_mutex); 4985 gid_info->gl_state = IBDM_GID_PROBING_SKIPPED; 4986 mutex_exit(&gid_info->gl_mutex); 4987 4988 /* Get the node GUID */ 4989 if (ibdm_port_reachable(ibmf_saa_handle, dgid.gid_guid, 4990 &nodeguid) != IBDM_SUCCESS) { 4991 /* 4992 * Set the state to PROBE_NOT_DONE for the 4993 * next sweep to probe it 4994 */ 4995 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_taskq: " 4996 "Skipping GID : port GUID not found"); 4997 mutex_enter(&gid_info->gl_mutex); 4998 gid_info->gl_state = IBDM_GID_PROBE_NOT_DONE; 4999 mutex_exit(&gid_info->gl_mutex); 5000 mutex_enter(&ibdm.ibdm_mutex); 5001 ibdm.ibdm_busy &= ~IBDM_BUSY; 5002 cv_broadcast(&ibdm.ibdm_busy_cv); 5003 mutex_exit(&ibdm.ibdm_mutex); 5004 return; 5005 } 5006 5007 gid_info->gl_nodeguid = nodeguid; 5008 gid_info->gl_portguid = dgid.gid_guid; 5009 5010 /* 5011 * Get the gid info with the same node GUID. 5012 */ 5013 mutex_enter(&ibdm.ibdm_mutex); 5014 node_gid_info = ibdm.ibdm_dp_gidlist_head; 5015 while (node_gid_info) { 5016 if (node_gid_info->gl_nodeguid == 5017 gid_info->gl_nodeguid && 5018 node_gid_info->gl_iou != NULL) { 5019 break; 5020 } 5021 node_gid_info = node_gid_info->gl_next; 5022 } 5023 mutex_exit(&ibdm.ibdm_mutex); 5024 5025 /* 5026 * Handling a new GID requires filling of gl_hca_list. 5027 * This require ibdm hca_list to be parsed and hence 5028 * holding the ibdm_hl_mutex. Spawning a new thread to 5029 * handle this. 5030 */ 5031 if (node_gid_info == NULL) { 5032 if (taskq_dispatch(system_taskq, 5033 ibdm_saa_handle_new_gid, (void *)gid_info, 5034 TQ_NOSLEEP) == NULL) { 5035 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: " 5036 "new_gid taskq_dispatch failed"); 5037 return; 5038 } 5039 } 5040 5041 mutex_enter(&ibdm.ibdm_mutex); 5042 ibdm.ibdm_busy &= ~IBDM_BUSY; 5043 cv_broadcast(&ibdm.ibdm_busy_cv); 5044 mutex_exit(&ibdm.ibdm_mutex); 5045 return; 5046 } 5047 5048 if (ibmf_saa_event != IBMF_SAA_EVENT_GID_UNAVAILABLE) 5049 return; 5050 5051 event_arg = (ibdm_saa_event_arg_t *)kmem_alloc( 5052 sizeof (ibdm_saa_event_arg_t), KM_SLEEP); 5053 event_arg->ibmf_saa_handle = ibmf_saa_handle; 5054 event_arg->ibmf_saa_event = ibmf_saa_event; 5055 bcopy(event_details, &event_arg->event_details, 5056 sizeof (ibmf_saa_event_details_t)); 5057 event_arg->callback_arg = callback_arg; 5058 5059 if (taskq_dispatch(system_taskq, ibdm_saa_event_taskq, 5060 (void *)event_arg, TQ_NOSLEEP) == NULL) { 5061 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: " 5062 "taskq_dispatch failed"); 5063 ibdm_free_saa_event_arg(event_arg); 5064 return; 5065 } 5066 } 5067 5068 /* 5069 * Handle a new GID discovered by GID_AVAILABLE saa event. 5070 */ 5071 void 5072 ibdm_saa_handle_new_gid(void *arg) 5073 { 5074 ibdm_dp_gidinfo_t *gid_info; 5075 ibdm_hca_list_t *hca_list = NULL; 5076 ibdm_port_attr_t *port = NULL; 5077 ibdm_ioc_info_t *ioc_list = NULL; 5078 5079 IBTF_DPRINTF_L4(ibdm_string, "\tsaa_handle_new_gid(%p)", arg); 5080 5081 gid_info = (ibdm_dp_gidinfo_t *)arg; 5082 5083 /* 5084 * Ensure that no other sweep / probe has completed 5085 * probing this gid. 5086 */ 5087 mutex_enter(&gid_info->gl_mutex); 5088 if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) { 5089 mutex_exit(&gid_info->gl_mutex); 5090 return; 5091 } 5092 mutex_exit(&gid_info->gl_mutex); 5093 5094 /* 5095 * Parse HCAs to fill gl_hca_list 5096 */ 5097 mutex_enter(&ibdm.ibdm_hl_mutex); 5098 for (ibdm_get_next_port(&hca_list, &port, 1); port; 5099 ibdm_get_next_port(&hca_list, &port, 1)) { 5100 if (ibdm_port_reachable(port->pa_sa_hdl, 5101 gid_info->gl_portguid, NULL) == 5102 IBDM_SUCCESS) { 5103 ibdm_addto_glhcalist(gid_info, hca_list); 5104 } 5105 } 5106 mutex_exit(&ibdm.ibdm_hl_mutex); 5107 5108 /* 5109 * Ensure no other probe / sweep fabric is in 5110 * progress. 5111 */ 5112 mutex_enter(&ibdm.ibdm_mutex); 5113 while (ibdm.ibdm_busy & IBDM_BUSY) 5114 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5115 ibdm.ibdm_busy |= IBDM_BUSY; 5116 mutex_exit(&ibdm.ibdm_mutex); 5117 5118 /* 5119 * New IOU probe it, to check if new IOCs 5120 */ 5121 IBTF_DPRINTF_L4(ibdm_string, "\tsaa_handle_new_gid: " 5122 "new GID : probing"); 5123 mutex_enter(&ibdm.ibdm_mutex); 5124 ibdm.ibdm_ngid_probes_in_progress++; 5125 mutex_exit(&ibdm.ibdm_mutex); 5126 mutex_enter(&gid_info->gl_mutex); 5127 gid_info->gl_reprobe_flag = 0; 5128 gid_info->gl_state = IBDM_GID_PROBE_NOT_DONE; 5129 mutex_exit(&gid_info->gl_mutex); 5130 ibdm_probe_gid_thread((void *)gid_info); 5131 5132 mutex_enter(&ibdm.ibdm_mutex); 5133 ibdm_wait_probe_completion(); 5134 mutex_exit(&ibdm.ibdm_mutex); 5135 5136 if (gid_info->gl_iou == NULL) { 5137 mutex_enter(&ibdm.ibdm_mutex); 5138 ibdm.ibdm_busy &= ~IBDM_BUSY; 5139 cv_broadcast(&ibdm.ibdm_busy_cv); 5140 mutex_exit(&ibdm.ibdm_mutex); 5141 return; 5142 } 5143 5144 /* 5145 * Update GID list in all IOCs affected by this 5146 */ 5147 ioc_list = ibdm_update_ioc_gidlist(gid_info, 1); 5148 5149 /* 5150 * Pass on the IOCs with updated GIDs to IBnexus 5151 */ 5152 if (ioc_list) { 5153 mutex_enter(&ibdm.ibdm_ibnex_mutex); 5154 if (ibdm.ibdm_ibnex_callback != NULL) { 5155 (*ibdm.ibdm_ibnex_callback)((void *)ioc_list, 5156 IBDM_EVENT_IOC_PROP_UPDATE); 5157 } 5158 mutex_exit(&ibdm.ibdm_ibnex_mutex); 5159 } 5160 5161 mutex_enter(&ibdm.ibdm_mutex); 5162 ibdm.ibdm_busy &= ~IBDM_BUSY; 5163 cv_broadcast(&ibdm.ibdm_busy_cv); 5164 mutex_exit(&ibdm.ibdm_mutex); 5165 } 5166 5167 /* 5168 * ibdm_saa_event_taskq : 5169 * GID_UNAVAILABLE Event handling requires ibdm_hl_mutex to be 5170 * held. The GID_UNAVAILABLE handling is done in a taskq to 5171 * prevent deadlocks with HCA port down notifications which hold 5172 * ibdm_hl_mutex. 5173 */ 5174 void 5175 ibdm_saa_event_taskq(void *arg) 5176 { 5177 ibdm_saa_event_arg_t *event_arg; 5178 ibmf_saa_handle_t ibmf_saa_handle; 5179 ibmf_saa_subnet_event_t ibmf_saa_event; 5180 ibmf_saa_event_details_t *event_details; 5181 void *callback_arg; 5182 5183 ibdm_dp_gidinfo_t *gid_info; 5184 ibdm_port_attr_t *hca_port, *port = NULL; 5185 ibdm_hca_list_t *hca_list = NULL; 5186 int sa_handle_valid = 0; 5187 ibdm_ioc_info_t *ioc_list = NULL; 5188 5189 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*event_arg)); 5190 5191 event_arg = (ibdm_saa_event_arg_t *)arg; 5192 ibmf_saa_handle = event_arg->ibmf_saa_handle; 5193 ibmf_saa_event = event_arg->ibmf_saa_event; 5194 event_details = &event_arg->event_details; 5195 callback_arg = event_arg->callback_arg; 5196 5197 ASSERT(callback_arg != NULL); 5198 ASSERT(ibmf_saa_event == IBMF_SAA_EVENT_GID_UNAVAILABLE); 5199 IBTF_DPRINTF_L4("ibdm", "\tsaa_event_taskq(%x, %x, %x, %x)", 5200 ibmf_saa_handle, ibmf_saa_event, event_details, 5201 callback_arg); 5202 5203 hca_port = (ibdm_port_attr_t *)callback_arg; 5204 5205 /* Check if the port_attr is still valid */ 5206 mutex_enter(&ibdm.ibdm_hl_mutex); 5207 for (ibdm_get_next_port(&hca_list, &port, 0); port; 5208 ibdm_get_next_port(&hca_list, &port, 0)) { 5209 if (port == hca_port && port->pa_port_guid == 5210 hca_port->pa_port_guid) { 5211 if (ibmf_saa_handle == hca_port->pa_sa_hdl) 5212 sa_handle_valid = 1; 5213 break; 5214 } 5215 } 5216 mutex_exit(&ibdm.ibdm_hl_mutex); 5217 if (sa_handle_valid == 0) { 5218 ibdm_free_saa_event_arg(event_arg); 5219 return; 5220 } 5221 5222 if (hca_port && (hca_port->pa_sa_hdl == NULL || 5223 ibmf_saa_handle != hca_port->pa_sa_hdl)) { 5224 ibdm_free_saa_event_arg(event_arg); 5225 return; 5226 } 5227 hca_list = NULL; 5228 port = NULL; 5229 5230 /* 5231 * Check if the GID is visible to other HCA ports. 5232 * Return if so. 5233 */ 5234 mutex_enter(&ibdm.ibdm_hl_mutex); 5235 for (ibdm_get_next_port(&hca_list, &port, 1); port; 5236 ibdm_get_next_port(&hca_list, &port, 1)) { 5237 if (ibdm_port_reachable(port->pa_sa_hdl, 5238 event_details->ie_gid.gid_guid, NULL) == 5239 IBDM_SUCCESS) { 5240 mutex_exit(&ibdm.ibdm_hl_mutex); 5241 ibdm_free_saa_event_arg(event_arg); 5242 return; 5243 } 5244 } 5245 mutex_exit(&ibdm.ibdm_hl_mutex); 5246 5247 /* 5248 * Ensure no other probe / sweep fabric is in 5249 * progress. 5250 */ 5251 mutex_enter(&ibdm.ibdm_mutex); 5252 while (ibdm.ibdm_busy & IBDM_BUSY) 5253 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex); 5254 ibdm.ibdm_busy |= IBDM_BUSY; 5255 mutex_exit(&ibdm.ibdm_mutex); 5256 5257 /* 5258 * If this GID is no longer in GID list, return 5259 * GID_UNAVAILABLE may be reported for multiple HCA 5260 * ports. 5261 */ 5262 mutex_enter(&ibdm.ibdm_mutex); 5263 gid_info = ibdm.ibdm_dp_gidlist_head; 5264 while (gid_info) { 5265 if (gid_info->gl_portguid == 5266 event_details->ie_gid.gid_guid) { 5267 break; 5268 } 5269 gid_info = gid_info->gl_next; 5270 } 5271 mutex_exit(&ibdm.ibdm_mutex); 5272 if (gid_info == NULL) { 5273 mutex_enter(&ibdm.ibdm_mutex); 5274 ibdm.ibdm_busy &= ~IBDM_BUSY; 5275 cv_broadcast(&ibdm.ibdm_busy_cv); 5276 mutex_exit(&ibdm.ibdm_mutex); 5277 ibdm_free_saa_event_arg(event_arg); 5278 return; 5279 } 5280 5281 IBTF_DPRINTF_L4("ibdm", "\tGID (prefix %x, guid %llx) " 5282 "Unavailable notification", 5283 event_details->ie_gid.gid_prefix, 5284 event_details->ie_gid.gid_guid); 5285 5286 /* 5287 * Update GID list in all IOCs affected by this 5288 */ 5289 if (gid_info->gl_state == IBDM_GID_PROBING_SKIPPED || 5290 gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) 5291 ioc_list = ibdm_update_ioc_gidlist(gid_info, 0); 5292 5293 /* 5294 * Remove GID from the global GID list 5295 * Handle the case where all port GIDs for an 5296 * IOU have been hot-removed. Check both gid_info 5297 * & ioc_info for checking ngids. 5298 */ 5299 mutex_enter(&ibdm.ibdm_mutex); 5300 if (gid_info->gl_iou != NULL && gid_info->gl_ngids == 0) { 5301 mutex_enter(&gid_info->gl_mutex); 5302 (void) ibdm_free_iou_info(gid_info); 5303 mutex_exit(&gid_info->gl_mutex); 5304 } 5305 if (gid_info->gl_prev != NULL) 5306 gid_info->gl_prev->gl_next = gid_info->gl_next; 5307 if (gid_info->gl_next != NULL) 5308 gid_info->gl_next->gl_prev = gid_info->gl_prev; 5309 5310 if (gid_info == ibdm.ibdm_dp_gidlist_head) 5311 ibdm.ibdm_dp_gidlist_head = gid_info->gl_next; 5312 if (gid_info == ibdm.ibdm_dp_gidlist_tail) 5313 ibdm.ibdm_dp_gidlist_tail = gid_info->gl_prev; 5314 ibdm.ibdm_ngids--; 5315 5316 ibdm.ibdm_busy &= ~IBDM_BUSY; 5317 cv_broadcast(&ibdm.ibdm_busy_cv); 5318 mutex_exit(&ibdm.ibdm_mutex); 5319 5320 mutex_destroy(&gid_info->gl_mutex); 5321 kmem_free(gid_info, sizeof (ibdm_dp_gidinfo_t)); 5322 5323 /* 5324 * Pass on the IOCs with updated GIDs to IBnexus 5325 */ 5326 if (ioc_list) { 5327 IBTF_DPRINTF_L4("ibdm", "\tGID_UNAVAILABLE " 5328 "IOC_PROP_UPDATE for %p\n", ioc_list); 5329 mutex_enter(&ibdm.ibdm_ibnex_mutex); 5330 if (ibdm.ibdm_ibnex_callback != NULL) { 5331 (*ibdm.ibdm_ibnex_callback)((void *) 5332 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 5333 } 5334 mutex_exit(&ibdm.ibdm_ibnex_mutex); 5335 } 5336 5337 ibdm_free_saa_event_arg(event_arg); 5338 } 5339 5340 5341 static int 5342 ibdm_cmp_gid_list(ibdm_gid_t *new, ibdm_gid_t *prev) 5343 { 5344 ibdm_gid_t *scan_new, *scan_prev; 5345 int cmp_failed = 0; 5346 5347 ASSERT(new != NULL); 5348 ASSERT(prev != NULL); 5349 5350 /* 5351 * Search for each new gid anywhere in the prev GID list. 5352 * Note that the gid list could have been re-ordered. 5353 */ 5354 for (scan_new = new; scan_new; scan_new = scan_new->gid_next) { 5355 for (scan_prev = prev, cmp_failed = 1; scan_prev; 5356 scan_prev = scan_prev->gid_next) { 5357 if (scan_prev->gid_dgid_hi == scan_new->gid_dgid_hi && 5358 scan_prev->gid_dgid_lo == scan_new->gid_dgid_lo) { 5359 cmp_failed = 0; 5360 break; 5361 } 5362 } 5363 5364 if (cmp_failed) 5365 return (1); 5366 } 5367 return (0); 5368 } 5369 5370 /* 5371 * This is always called in a single thread 5372 * This function updates the gid_list and serv_list of IOC 5373 * The current gid_list is in ioc_info_t(contains only port 5374 * guids for which probe is done) & gidinfo_t(other port gids) 5375 * The gids in both locations are used for comparision. 5376 */ 5377 static void 5378 ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *ioc, ibdm_dp_gidinfo_t *gidinfo) 5379 { 5380 ibdm_gid_t *cur_gid_list; 5381 uint_t cur_nportgids; 5382 5383 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex)); 5384 5385 ioc->ioc_info_updated.ib_prop_updated = 0; 5386 5387 5388 /* Current GID list in gid_info only */ 5389 cur_gid_list = gidinfo->gl_gid; 5390 cur_nportgids = gidinfo->gl_ngids; 5391 5392 /* 5393 * Service entry names and IDs are not compared currently. 5394 * This may require change. 5395 */ 5396 if (ioc->ioc_prev_serv_cnt != ioc->ioc_profile.ioc_service_entries) 5397 ioc->ioc_info_updated.ib_srv_prop_updated = 1; 5398 5399 if (ioc->ioc_prev_nportgids != cur_nportgids || 5400 ioc->ioc_prev_gid_list == NULL || cur_gid_list == NULL) { 5401 ioc->ioc_info_updated.ib_gid_prop_updated = 1; 5402 } else if (ibdm_cmp_gid_list(ioc->ioc_prev_gid_list, cur_gid_list)) { 5403 ioc->ioc_info_updated.ib_gid_prop_updated = 1; 5404 } 5405 5406 /* Zero out previous entries */ 5407 ibdm_free_gid_list(ioc->ioc_prev_gid_list); 5408 if (ioc->ioc_prev_serv) 5409 kmem_free(ioc->ioc_prev_serv, ioc->ioc_prev_serv_cnt * 5410 sizeof (ibdm_srvents_info_t)); 5411 ioc->ioc_prev_serv_cnt = 0; 5412 ioc->ioc_prev_nportgids = 0; 5413 ioc->ioc_prev_serv = NULL; 5414 ioc->ioc_prev_gid_list = NULL; 5415 } 5416 5417 /* 5418 * Handle GID removal. This returns gid_info of an GID for the same 5419 * node GUID, if found. For an GID with IOU information, the same 5420 * gid_info is returned if no gid_info with same node_guid is found. 5421 */ 5422 static ibdm_dp_gidinfo_t * 5423 ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *rm_gid) 5424 { 5425 ibdm_dp_gidinfo_t *gid_list; 5426 5427 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm(0x%p)", rm_gid); 5428 5429 if (rm_gid->gl_iou == NULL) { 5430 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm NO iou"); 5431 /* 5432 * Search for a GID with same node_guid and 5433 * gl_iou != NULL 5434 */ 5435 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 5436 gid_list = gid_list->gl_next) { 5437 if (gid_list->gl_iou != NULL && (gid_list->gl_nodeguid 5438 == rm_gid->gl_nodeguid)) 5439 break; 5440 } 5441 5442 if (gid_list) 5443 ibdm_rmfrom_glgid_list(gid_list, rm_gid); 5444 5445 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm ret %p", gid_list); 5446 return (gid_list); 5447 } else { 5448 /* 5449 * Search for a GID with same node_guid and 5450 * gl_iou == NULL 5451 */ 5452 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm with iou"); 5453 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list; 5454 gid_list = gid_list->gl_next) { 5455 if (gid_list->gl_iou == NULL && (gid_list->gl_nodeguid 5456 == rm_gid->gl_nodeguid)) 5457 break; 5458 } 5459 5460 if (gid_list) { 5461 /* 5462 * Copy the following fields from rm_gid : 5463 * 1. gl_state 5464 * 2. gl_iou 5465 * 3. gl_gid & gl_ngids 5466 * 5467 * Note : Function is synchronized by 5468 * ibdm_busy flag. 5469 * 5470 * Note : Redirect info is initialized if 5471 * any MADs for the GID fail 5472 */ 5473 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm " 5474 "copying info to GID with gl_iou != NULl"); 5475 gid_list->gl_state = rm_gid->gl_state; 5476 gid_list->gl_iou = rm_gid->gl_iou; 5477 gid_list->gl_gid = rm_gid->gl_gid; 5478 gid_list->gl_ngids = rm_gid->gl_ngids; 5479 5480 /* Remove the GID from gl_gid list */ 5481 ibdm_rmfrom_glgid_list(gid_list, rm_gid); 5482 } else { 5483 /* 5484 * Handle a case where all GIDs to the IOU have 5485 * been removed. 5486 */ 5487 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm 0 GID " 5488 "to IOU"); 5489 5490 ibdm_rmfrom_glgid_list(rm_gid, rm_gid); 5491 return (rm_gid); 5492 } 5493 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm ret %p", gid_list); 5494 return (gid_list); 5495 } 5496 } 5497 5498 static void 5499 ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *gid_info, 5500 ibdm_dp_gidinfo_t *rm_gid) 5501 { 5502 ibdm_gid_t *tmp, *prev; 5503 5504 IBTF_DPRINTF_L4("ibdm", "\trmfrom_glgid (%p, %p)", 5505 gid_info, rm_gid); 5506 5507 for (tmp = gid_info->gl_gid, prev = NULL; tmp; ) { 5508 if (tmp->gid_dgid_hi == rm_gid->gl_dgid_hi && 5509 tmp->gid_dgid_lo == rm_gid->gl_dgid_lo) { 5510 if (prev == NULL) 5511 gid_info->gl_gid = tmp->gid_next; 5512 else 5513 prev->gid_next = tmp->gid_next; 5514 5515 kmem_free(tmp, sizeof (ibdm_gid_t)); 5516 gid_info->gl_ngids--; 5517 break; 5518 } else { 5519 prev = tmp; 5520 tmp = tmp->gid_next; 5521 } 5522 } 5523 } 5524 5525 static void 5526 ibdm_addto_gidlist(ibdm_gid_t **src_ptr, ibdm_gid_t *dest) 5527 { 5528 ibdm_gid_t *head = NULL, *new, *tail; 5529 5530 /* First copy the destination */ 5531 for (; dest; dest = dest->gid_next) { 5532 new = kmem_zalloc(sizeof (ibdm_gid_t), KM_SLEEP); 5533 new->gid_dgid_hi = dest->gid_dgid_hi; 5534 new->gid_dgid_lo = dest->gid_dgid_lo; 5535 new->gid_next = head; 5536 head = new; 5537 } 5538 5539 /* Insert this to the source */ 5540 if (*src_ptr == NULL) 5541 *src_ptr = head; 5542 else { 5543 for (tail = *src_ptr; tail->gid_next != NULL; 5544 tail = tail->gid_next) 5545 ; 5546 5547 tail->gid_next = head; 5548 } 5549 } 5550 5551 static void 5552 ibdm_free_gid_list(ibdm_gid_t *head) 5553 { 5554 ibdm_gid_t *delete; 5555 5556 for (delete = head; delete; ) { 5557 head = delete->gid_next; 5558 kmem_free(delete, sizeof (ibdm_gid_t)); 5559 delete = head; 5560 } 5561 } 5562 5563 /* 5564 * This function rescans the DM capable GIDs (gl_state is 5565 * GID_PROBE_COMPLETE or IBDM_GID_PROBING_SKIPPED.This 5566 * basically checks if the DM capable GID is reachable. If 5567 * not this is handled the same way as GID_UNAVAILABLE, 5568 * except that notifications are not send to IBnexus. 5569 * 5570 * This function also initializes the ioc_prev_list for 5571 * a particular IOC (when called from probe_ioc, with 5572 * ioc_guidp != NULL) or all IOCs for the gid (called from 5573 * sweep_fabric, ioc_guidp == NULL). 5574 */ 5575 static void 5576 ibdm_rescan_gidlist(ib_guid_t *ioc_guidp) 5577 { 5578 ibdm_dp_gidinfo_t *gid_info, *tmp; 5579 int ii, niocs, found; 5580 ibdm_hca_list_t *hca_list = NULL; 5581 ibdm_port_attr_t *port = NULL; 5582 ibdm_ioc_info_t *ioc_list; 5583 5584 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; ) { 5585 found = 0; 5586 if (gid_info->gl_state != IBDM_GID_PROBING_SKIPPED && 5587 gid_info->gl_state != IBDM_GID_PROBING_COMPLETE) { 5588 gid_info = gid_info->gl_next; 5589 continue; 5590 } 5591 5592 /* 5593 * Check if the GID is visible to any HCA ports. 5594 * Return if so. 5595 */ 5596 mutex_enter(&ibdm.ibdm_hl_mutex); 5597 for (ibdm_get_next_port(&hca_list, &port, 1); port; 5598 ibdm_get_next_port(&hca_list, &port, 1)) { 5599 if (ibdm_port_reachable(port->pa_sa_hdl, 5600 gid_info->gl_dgid_lo, NULL) == IBDM_SUCCESS) { 5601 found = 1; 5602 break; 5603 } 5604 } 5605 mutex_exit(&ibdm.ibdm_hl_mutex); 5606 5607 if (found) { 5608 if (gid_info->gl_iou == NULL) { 5609 gid_info = gid_info->gl_next; 5610 continue; 5611 } 5612 5613 /* Intialize the ioc_prev_gid_list */ 5614 niocs = 5615 gid_info->gl_iou->iou_info.iou_num_ctrl_slots; 5616 for (ii = 0; ii < niocs; ii++) { 5617 ioc_list = IBDM_GIDINFO2IOCINFO(gid_info, ii); 5618 5619 if (ioc_guidp == NULL || (*ioc_guidp == 5620 ioc_list->ioc_profile.ioc_guid)) { 5621 /* Add info of GIDs in gid_info also */ 5622 ibdm_addto_gidlist( 5623 &ioc_list->ioc_prev_gid_list, 5624 gid_info->gl_gid); 5625 ioc_list->ioc_prev_nportgids = 5626 gid_info->gl_ngids; 5627 } 5628 } 5629 gid_info = gid_info->gl_next; 5630 continue; 5631 } 5632 5633 IBTF_DPRINTF_L4("ibdm", "\trescan_gidlist " 5634 "deleted port GUID %llx", 5635 gid_info->gl_dgid_lo); 5636 5637 /* 5638 * Update GID list in all IOCs affected by this 5639 */ 5640 ioc_list = ibdm_update_ioc_gidlist(gid_info, 0); 5641 5642 /* 5643 * Remove GID from the global GID list 5644 * Handle the case where all port GIDs for an 5645 * IOU have been hot-removed. 5646 */ 5647 mutex_enter(&ibdm.ibdm_mutex); 5648 if (gid_info->gl_iou != NULL && gid_info->gl_ngids == 0) { 5649 mutex_enter(&gid_info->gl_mutex); 5650 (void) ibdm_free_iou_info(gid_info); 5651 mutex_exit(&gid_info->gl_mutex); 5652 } 5653 tmp = gid_info->gl_next; 5654 if (gid_info->gl_prev != NULL) 5655 gid_info->gl_prev->gl_next = gid_info->gl_next; 5656 if (gid_info->gl_next != NULL) 5657 gid_info->gl_next->gl_prev = gid_info->gl_prev; 5658 5659 if (gid_info == ibdm.ibdm_dp_gidlist_head) 5660 ibdm.ibdm_dp_gidlist_head = gid_info->gl_next; 5661 if (gid_info == ibdm.ibdm_dp_gidlist_tail) 5662 ibdm.ibdm_dp_gidlist_tail = gid_info->gl_prev; 5663 ibdm.ibdm_ngids--; 5664 5665 mutex_destroy(&gid_info->gl_mutex); 5666 kmem_free(gid_info, sizeof (ibdm_dp_gidinfo_t)); 5667 gid_info = tmp; 5668 5669 mutex_exit(&ibdm.ibdm_mutex); 5670 5671 /* 5672 * Pass on the IOCs with updated GIDs to IBnexus 5673 */ 5674 if (ioc_list) { 5675 IBTF_DPRINTF_L4("ibdm", "\trescan_gidlist " 5676 "IOC_PROP_UPDATE for %p\n", ioc_list); 5677 mutex_enter(&ibdm.ibdm_ibnex_mutex); 5678 if (ibdm.ibdm_ibnex_callback != NULL) { 5679 (*ibdm.ibdm_ibnex_callback)((void *) 5680 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 5681 } 5682 mutex_exit(&ibdm.ibdm_ibnex_mutex); 5683 } 5684 } 5685 } 5686 5687 /* 5688 * This function notifies IBnex of IOCs on this GID. 5689 * Notification is for GIDs with gl_reprobe_flag set. 5690 * The flag is set when IOC probe / fabric sweep 5691 * probes a GID starting from CLASS port info. 5692 * 5693 * IBnexus will have information of a reconnected IOC 5694 * if it had probed it before. If this is a new IOC, 5695 * IBnexus ignores the notification. 5696 * 5697 * This function should be called with no locks held. 5698 */ 5699 static void 5700 ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *gid_info) 5701 { 5702 ibdm_ioc_info_t *ioc_list; 5703 5704 if (gid_info->gl_reprobe_flag == 0 || 5705 gid_info->gl_iou == NULL) 5706 return; 5707 5708 ioc_list = ibdm_update_ioc_gidlist(gid_info, -1); 5709 5710 /* 5711 * Pass on the IOCs with updated GIDs to IBnexus 5712 */ 5713 if (ioc_list) { 5714 mutex_enter(&ibdm.ibdm_ibnex_mutex); 5715 if (ibdm.ibdm_ibnex_callback != NULL) { 5716 (*ibdm.ibdm_ibnex_callback)((void *)ioc_list, 5717 IBDM_EVENT_IOC_PROP_UPDATE); 5718 } 5719 mutex_exit(&ibdm.ibdm_ibnex_mutex); 5720 } 5721 } 5722 5723 5724 static void 5725 ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *arg) 5726 { 5727 if (arg != NULL) 5728 kmem_free(arg, sizeof (ibdm_saa_event_arg_t)); 5729 } 5730 5731 /* 5732 * This function parses the list of HCAs and HCA ports 5733 * to return the port_attr of the next HCA port. A port 5734 * connected to IB fabric (port_state active) is returned, 5735 * if connected_flag is set. 5736 */ 5737 static void 5738 ibdm_get_next_port(ibdm_hca_list_t **inp_hcap, 5739 ibdm_port_attr_t **inp_portp, int connect_flag) 5740 { 5741 int ii; 5742 ibdm_port_attr_t *port, *next_port = NULL; 5743 ibdm_port_attr_t *inp_port; 5744 ibdm_hca_list_t *hca_list; 5745 int found = 0; 5746 5747 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 5748 IBTF_DPRINTF_L4(ibdm_string, "\tget_next_port(%p, %p, %x)", 5749 inp_hcap, inp_portp, connect_flag); 5750 5751 hca_list = *inp_hcap; 5752 inp_port = *inp_portp; 5753 5754 if (hca_list == NULL) 5755 hca_list = ibdm.ibdm_hca_list_head; 5756 5757 for (; hca_list; hca_list = hca_list->hl_next) { 5758 for (ii = 0; ii < hca_list->hl_nports; ii++) { 5759 port = &hca_list->hl_port_attr[ii]; 5760 5761 /* 5762 * inp_port != NULL; 5763 * Skip till we find the matching port 5764 */ 5765 if (inp_port && !found) { 5766 if (inp_port == port) 5767 found = 1; 5768 continue; 5769 } 5770 5771 if (!connect_flag) { 5772 next_port = port; 5773 break; 5774 } 5775 5776 if (port->pa_sa_hdl == NULL) 5777 ibdm_initialize_port(port); 5778 if (port->pa_sa_hdl == NULL) 5779 (void) ibdm_fini_port(port); 5780 else if (next_port == NULL && 5781 port->pa_sa_hdl != NULL && 5782 port->pa_state == IBT_PORT_ACTIVE) { 5783 next_port = port; 5784 break; 5785 } 5786 } 5787 5788 if (next_port) 5789 break; 5790 } 5791 5792 IBTF_DPRINTF_L4(ibdm_string, "\tget_next_port : " 5793 "returns hca_list %p port %p", hca_list, next_port); 5794 *inp_hcap = hca_list; 5795 *inp_portp = next_port; 5796 } 5797 5798 static void 5799 ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *nodegid, ibdm_dp_gidinfo_t *addgid) 5800 { 5801 ibdm_gid_t *tmp; 5802 5803 tmp = kmem_zalloc(sizeof (ibdm_gid_t), KM_SLEEP); 5804 tmp->gid_dgid_hi = addgid->gl_dgid_hi; 5805 tmp->gid_dgid_lo = addgid->gl_dgid_lo; 5806 5807 mutex_enter(&nodegid->gl_mutex); 5808 tmp->gid_next = nodegid->gl_gid; 5809 nodegid->gl_gid = tmp; 5810 nodegid->gl_ngids++; 5811 mutex_exit(&nodegid->gl_mutex); 5812 } 5813 5814 static void 5815 ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *gid_info, 5816 ibdm_hca_list_t *hca) 5817 { 5818 ibdm_hca_list_t *head, *prev = NULL, *temp; 5819 5820 IBTF_DPRINTF_L4(ibdm_string, "\taddto_glhcalist(%p, %p) " 5821 ": gl_hca_list %p", gid_info, hca, gid_info->gl_hca_list); 5822 ASSERT(!MUTEX_HELD(&gid_info->gl_mutex)); 5823 mutex_enter(&gid_info->gl_mutex); 5824 head = gid_info->gl_hca_list; 5825 if (head == NULL) { 5826 head = ibdm_dup_hca_attr(hca); 5827 head->hl_next = NULL; 5828 gid_info->gl_hca_list = head; 5829 mutex_exit(&gid_info->gl_mutex); 5830 IBTF_DPRINTF_L4(ibdm_string, "\tadd_to_glhcalist: " 5831 "gid %p, gl_hca_list %p", gid_info, 5832 gid_info->gl_hca_list); 5833 return; 5834 } 5835 5836 /* Check if already in the list */ 5837 while (head) { 5838 if (head->hl_hca_guid == hca->hl_hca_guid) { 5839 mutex_exit(&gid_info->gl_mutex); 5840 IBTF_DPRINTF_L4(ibdm_string, 5841 "\taddto_glhcalist : gid %x hca %x dup", 5842 gid_info, hca); 5843 return; 5844 } 5845 prev = head; 5846 head = head->hl_next; 5847 } 5848 5849 /* Add this HCA to gl_hca_list */ 5850 temp = ibdm_dup_hca_attr(hca); 5851 temp->hl_next = NULL; 5852 prev->hl_next = temp; 5853 5854 mutex_exit(&gid_info->gl_mutex); 5855 5856 IBTF_DPRINTF_L4(ibdm_string, "\tadd_to_glhcalist: " 5857 "gid %p, gl_hca_list %p", gid_info, gid_info->gl_hca_list); 5858 } 5859 5860 static void 5861 ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *gid_info) 5862 { 5863 ASSERT(!MUTEX_HELD(&gid_info->gl_mutex)); 5864 ASSERT(!MUTEX_HELD(&ibdm.ibdm_mutex)); 5865 5866 mutex_enter(&gid_info->gl_mutex); 5867 if (gid_info->gl_hca_list) 5868 ibdm_ibnex_free_hca_list(gid_info->gl_hca_list); 5869 gid_info->gl_hca_list = NULL; 5870 mutex_exit(&gid_info->gl_mutex); 5871 } 5872 5873 5874 static void 5875 ibdm_reset_all_dgids(ibmf_saa_handle_t port_sa_hdl) 5876 { 5877 IBTF_DPRINTF_L4(ibdm_string, "\treset_all_dgids(%X)", 5878 port_sa_hdl); 5879 5880 ASSERT(!MUTEX_HELD(&ibdm.ibdm_mutex)); 5881 ASSERT(!MUTEX_HELD(&ibdm.ibdm_hl_mutex)); 5882 5883 /* Check : Not busy in another probe / sweep */ 5884 mutex_enter(&ibdm.ibdm_mutex); 5885 if ((ibdm.ibdm_busy & IBDM_BUSY) == 0) { 5886 ibdm_dp_gidinfo_t *gid_info; 5887 5888 ibdm.ibdm_busy |= IBDM_BUSY; 5889 mutex_exit(&ibdm.ibdm_mutex); 5890 5891 /* 5892 * Check if any GID is using the SA & IBMF handle 5893 * of HCA port going down. Reset ibdm_dp_gidinfo_t 5894 * using another HCA port which can reach the GID. 5895 * This is for DM capable GIDs only, no need to do 5896 * this for others 5897 * 5898 * Delete the GID if no alternate HCA port to reach 5899 * it is found. 5900 */ 5901 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; ) { 5902 ibdm_dp_gidinfo_t *tmp; 5903 5904 IBTF_DPRINTF_L4(ibdm_string, "\tevent_hdlr " 5905 "checking gidinfo %p", gid_info); 5906 5907 if (gid_info->gl_nodeguid != 0 && 5908 gid_info->gl_sa_hdl == port_sa_hdl) { 5909 IBTF_DPRINTF_L3(ibdm_string, 5910 "\tevent_hdlr: down HCA port hdl " 5911 "matches gid %p", gid_info); 5912 5913 ibdm_reset_gidinfo(gid_info); 5914 if (gid_info->gl_disconnected) { 5915 IBTF_DPRINTF_L3(ibdm_string, 5916 "\tevent_hdlr: deleting" 5917 " gid %p", gid_info); 5918 tmp = gid_info; 5919 gid_info = gid_info->gl_next; 5920 ibdm_delete_gidinfo(tmp); 5921 } else 5922 gid_info = gid_info->gl_next; 5923 } else 5924 gid_info = gid_info->gl_next; 5925 } 5926 5927 mutex_enter(&ibdm.ibdm_mutex); 5928 ibdm.ibdm_busy &= ~IBDM_BUSY; 5929 cv_signal(&ibdm.ibdm_busy_cv); 5930 } 5931 mutex_exit(&ibdm.ibdm_mutex); 5932 } 5933 5934 static void 5935 ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *gidinfo) 5936 { 5937 ibdm_hca_list_t *hca_list = NULL; 5938 ibdm_port_attr_t *port = NULL; 5939 int gid_reinited = 0; 5940 sa_node_record_t *nr, *tmp; 5941 sa_portinfo_record_t *pi; 5942 size_t nr_len = 0, pi_len = 0; 5943 size_t path_len; 5944 ib_gid_t sgid, dgid; 5945 int ret, ii, nrecords; 5946 sa_path_record_t *path; 5947 uint8_t npaths = 1; 5948 5949 IBTF_DPRINTF_L4(ibdm_string, "\treset_gidinfo(%p)", gidinfo); 5950 5951 /* 5952 * Get list of all the ports reachable from the local known HCA 5953 * ports which are active 5954 */ 5955 mutex_enter(&ibdm.ibdm_hl_mutex); 5956 for (ibdm_get_next_port(&hca_list, &port, 1); port; 5957 ibdm_get_next_port(&hca_list, &port, 1)) { 5958 5959 5960 /* 5961 * Get the path and re-populate the gidinfo. 5962 * Getting the path is the same probe_ioc 5963 * Init the gid info as in ibdm_create_gidinfo() 5964 */ 5965 nr = ibdm_get_node_records(port->pa_sa_hdl, &nr_len, 5966 gidinfo->gl_nodeguid); 5967 if (nr == NULL) { 5968 IBTF_DPRINTF_L4(ibdm_string, 5969 "\treset_gidinfo : no records"); 5970 continue; 5971 } 5972 5973 nrecords = (nr_len / sizeof (sa_node_record_t)); 5974 for (tmp = nr, ii = 0; (ii < nrecords); ii++, tmp++) { 5975 if (tmp->NodeInfo.PortGUID == gidinfo->gl_portguid) 5976 break; 5977 } 5978 5979 if (ii == nrecords) { 5980 IBTF_DPRINTF_L4(ibdm_string, 5981 "\treset_gidinfo : no record for portguid"); 5982 kmem_free(nr, nr_len); 5983 continue; 5984 } 5985 5986 pi = ibdm_get_portinfo(port->pa_sa_hdl, &pi_len, tmp->LID); 5987 if (pi == NULL) { 5988 IBTF_DPRINTF_L4(ibdm_string, 5989 "\treset_gidinfo : no portinfo"); 5990 kmem_free(nr, nr_len); 5991 continue; 5992 } 5993 5994 sgid.gid_prefix = port->pa_sn_prefix; 5995 sgid.gid_guid = port->pa_port_guid; 5996 dgid.gid_prefix = pi->PortInfo.GidPrefix; 5997 dgid.gid_guid = tmp->NodeInfo.PortGUID; 5998 5999 ret = ibmf_saa_gid_to_pathrecords(port->pa_sa_hdl, sgid, dgid, 6000 IBMF_SAA_PKEY_WC, 0, B_TRUE, &npaths, 0, &path_len, &path); 6001 6002 if ((ret != IBMF_SUCCESS) && path) { 6003 IBTF_DPRINTF_L4(ibdm_string, 6004 "\treset_gidinfo : no paths"); 6005 kmem_free(pi, pi_len); 6006 kmem_free(nr, nr_len); 6007 continue; 6008 } 6009 6010 gidinfo->gl_dgid_hi = path->DGID.gid_prefix; 6011 gidinfo->gl_dgid_lo = path->DGID.gid_guid; 6012 gidinfo->gl_sgid_hi = path->SGID.gid_prefix; 6013 gidinfo->gl_sgid_lo = path->SGID.gid_guid; 6014 gidinfo->gl_p_key = path->P_Key; 6015 gidinfo->gl_sa_hdl = port->pa_sa_hdl; 6016 gidinfo->gl_ibmf_hdl = port->pa_ibmf_hdl; 6017 gidinfo->gl_slid = path->SLID; 6018 gidinfo->gl_dlid = path->DLID; 6019 6020 /* Reset redirect info, next MAD will set if redirected */ 6021 gidinfo->gl_redirected = 0; 6022 6023 gid_reinited = 1; 6024 6025 kmem_free(path, path_len); 6026 kmem_free(pi, pi_len); 6027 kmem_free(nr, nr_len); 6028 break; 6029 } 6030 mutex_exit(&ibdm.ibdm_hl_mutex); 6031 6032 if (!gid_reinited) 6033 gidinfo->gl_disconnected = 1; 6034 } 6035 6036 static void 6037 ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *gidinfo) 6038 { 6039 ibdm_ioc_info_t *ioc_list; 6040 6041 ioc_list = ibdm_update_ioc_gidlist(gidinfo, 0); 6042 6043 /* 6044 * Remove GID from the global GID list 6045 * Handle the case where all port GIDs for an 6046 * IOU have been hot-removed. 6047 */ 6048 mutex_enter(&ibdm.ibdm_mutex); 6049 if (gidinfo->gl_iou != NULL && gidinfo->gl_ngids == 0) { 6050 mutex_enter(&gidinfo->gl_mutex); 6051 (void) ibdm_free_iou_info(gidinfo); 6052 mutex_exit(&gidinfo->gl_mutex); 6053 } 6054 if (gidinfo->gl_prev != NULL) 6055 gidinfo->gl_prev->gl_next = gidinfo->gl_next; 6056 if (gidinfo->gl_next != NULL) 6057 gidinfo->gl_next->gl_prev = gidinfo->gl_prev; 6058 6059 if (gidinfo == ibdm.ibdm_dp_gidlist_head) 6060 ibdm.ibdm_dp_gidlist_head = gidinfo->gl_next; 6061 if (gidinfo == ibdm.ibdm_dp_gidlist_tail) 6062 ibdm.ibdm_dp_gidlist_tail = gidinfo->gl_prev; 6063 ibdm.ibdm_ngids--; 6064 mutex_exit(&ibdm.ibdm_mutex); 6065 6066 mutex_destroy(&gidinfo->gl_mutex); 6067 kmem_free(gidinfo, sizeof (ibdm_dp_gidinfo_t)); 6068 6069 /* 6070 * Pass on the IOCs with updated GIDs to IBnexus 6071 */ 6072 if (ioc_list) { 6073 IBTF_DPRINTF_L4("ibdm", "\tdelete_gidinfo " 6074 "IOC_PROP_UPDATE for %p\n", ioc_list); 6075 mutex_enter(&ibdm.ibdm_ibnex_mutex); 6076 if (ibdm.ibdm_ibnex_callback != NULL) { 6077 (*ibdm.ibdm_ibnex_callback)((void *) 6078 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE); 6079 } 6080 mutex_exit(&ibdm.ibdm_ibnex_mutex); 6081 } 6082 } 6083 6084 /* For debugging purpose only */ 6085 #ifdef DEBUG 6086 void 6087 ibdm_dump_ibmf_msg(ibmf_msg_t *ibmf_msg, int flag) 6088 { 6089 ib_mad_hdr_t *mad_hdr; 6090 6091 IBTF_DPRINTF_L4("ibdm", "\t\t(IBMF_PKT): Local address info"); 6092 IBTF_DPRINTF_L4("ibdm", "\t\t ------------------"); 6093 6094 IBTF_DPRINTF_L4("ibdm", "\tLocal Lid : 0x%x\tRemote Lid : 0x%x" 6095 " Remote Qp : 0x%x", ibmf_msg->im_local_addr.ia_local_lid, 6096 ibmf_msg->im_local_addr.ia_remote_lid, 6097 ibmf_msg->im_local_addr.ia_remote_qno); 6098 IBTF_DPRINTF_L4("ibdm", "\tP_key : 0x%x\tQ_key : 0x%x", 6099 ibmf_msg->im_local_addr.ia_p_key, ibmf_msg->im_local_addr.ia_q_key); 6100 6101 if (flag) 6102 mad_hdr = (ib_mad_hdr_t *)IBDM_OUT_IBMFMSG_MADHDR(ibmf_msg); 6103 else 6104 mad_hdr = IBDM_IN_IBMFMSG_MADHDR(ibmf_msg); 6105 6106 IBTF_DPRINTF_L4("ibdm", "\t\t MAD Header info"); 6107 IBTF_DPRINTF_L4("ibdm", "\t\t ---------------"); 6108 6109 IBTF_DPRINTF_L4("ibdm", "\tBase version : 0x%x" 6110 "\tMgmt Class : 0x%x", mad_hdr->BaseVersion, mad_hdr->MgmtClass); 6111 IBTF_DPRINTF_L4("ibdm", "\tClass version : 0x%x" 6112 "\tR Method : 0x%x", 6113 mad_hdr->ClassVersion, mad_hdr->R_Method); 6114 IBTF_DPRINTF_L4("ibdm", "\tMAD Status : 0x%x" 6115 "\tTransaction ID : 0x%llx", 6116 mad_hdr->Status, mad_hdr->TransactionID); 6117 IBTF_DPRINTF_L4("ibdm", "\t Attribute ID : 0x%x" 6118 "\tAttribute Modified : 0x%x", 6119 mad_hdr->AttributeID, mad_hdr->AttributeModifier); 6120 } 6121 6122 void 6123 ibdm_dump_path_info(sa_path_record_t *path) 6124 { 6125 IBTF_DPRINTF_L4("ibdm", "\t\t Path information"); 6126 IBTF_DPRINTF_L4("ibdm", "\t\t ----------------"); 6127 6128 IBTF_DPRINTF_L4("ibdm", "\t DGID hi : %llx\tDGID lo : %llx", 6129 path->DGID.gid_prefix, path->DGID.gid_guid); 6130 IBTF_DPRINTF_L4("ibdm", "\t SGID hi : %llx\tSGID lo : %llx", 6131 path->SGID.gid_prefix, path->SGID.gid_guid); 6132 IBTF_DPRINTF_L4("ibdm", "\t SLID : %x\tDlID : %x", 6133 path->SLID, path->DLID); 6134 IBTF_DPRINTF_L4("ibdm", "\t P Key : %x", path->P_Key); 6135 } 6136 6137 6138 void 6139 ibdm_dump_classportinfo(ibdm_mad_classportinfo_t *classportinfo) 6140 { 6141 IBTF_DPRINTF_L4("ibdm", "\t\t CLASSPORT INFO"); 6142 IBTF_DPRINTF_L4("ibdm", "\t\t --------------"); 6143 6144 IBTF_DPRINTF_L4("ibdm", "\t Response Time Value : 0x%x", 6145 ((b2h32(classportinfo->RespTimeValue)) & 0x1F)); 6146 6147 IBTF_DPRINTF_L4("ibdm", "\t Redirected QP : 0x%x", 6148 (b2h32(classportinfo->RedirectQP))); 6149 IBTF_DPRINTF_L4("ibdm", "\t Redirected P KEY : 0x%x", 6150 b2h16(classportinfo->RedirectP_Key)); 6151 IBTF_DPRINTF_L4("ibdm", "\t Redirected Q KEY : 0x%x", 6152 b2h16(classportinfo->RedirectQ_Key)); 6153 IBTF_DPRINTF_L4("ibdm", "\t Redirected GID hi : 0x%x", 6154 b2h64(classportinfo->RedirectGID_hi)); 6155 IBTF_DPRINTF_L4("ibdm", "\t Redirected GID lo : 0x%x", 6156 b2h64(classportinfo->RedirectGID_lo)); 6157 } 6158 6159 6160 void 6161 ibdm_dump_iounitinfo(ib_dm_io_unitinfo_t *iou_info) 6162 { 6163 IBTF_DPRINTF_L4("ibdm", "\t\t I/O UnitInfo"); 6164 IBTF_DPRINTF_L4("ibdm", "\t\t ------------"); 6165 6166 IBTF_DPRINTF_L4("ibdm", "\tChange ID : 0x%x", 6167 b2h16(iou_info->iou_changeid)); 6168 IBTF_DPRINTF_L4("ibdm", "\t#of ctrl slots : %d", 6169 iou_info->iou_num_ctrl_slots); 6170 IBTF_DPRINTF_L4("ibdm", "\tIOU flag : 0x%x", 6171 iou_info->iou_flag); 6172 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 0 : 0x%x", 6173 iou_info->iou_ctrl_list[0]); 6174 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 1 : 0x%x", 6175 iou_info->iou_ctrl_list[1]); 6176 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 2 : 0x%x", 6177 iou_info->iou_ctrl_list[2]); 6178 } 6179 6180 6181 void 6182 ibdm_dump_ioc_profile(ib_dm_ioc_ctrl_profile_t *ioc) 6183 { 6184 IBTF_DPRINTF_L4("ibdm", "\t\t IOC Controller Profile"); 6185 IBTF_DPRINTF_L4("ibdm", "\t\t ----------------------"); 6186 6187 IBTF_DPRINTF_L4("ibdm", "\tIOC Guid : %llx", ioc->ioc_guid); 6188 IBTF_DPRINTF_L4("ibdm", "\tVendorID : 0x%x", ioc->ioc_vendorid); 6189 IBTF_DPRINTF_L4("ibdm", "\tDevice Id : 0x%x", ioc->ioc_deviceid); 6190 IBTF_DPRINTF_L4("ibdm", "\tDevice Ver : 0x%x", ioc->ioc_device_ver); 6191 IBTF_DPRINTF_L4("ibdm", "\tSubsys ID : 0x%x", ioc->ioc_subsys_id); 6192 IBTF_DPRINTF_L4("ibdm", "\tIO class : 0x%x", ioc->ioc_io_class); 6193 IBTF_DPRINTF_L4("ibdm", "\tIO subclass : 0x%x", ioc->ioc_io_subclass); 6194 IBTF_DPRINTF_L4("ibdm", "\tProtocol : 0x%x", ioc->ioc_protocol); 6195 IBTF_DPRINTF_L4("ibdm", "\tProtocolV : 0x%x", ioc->ioc_protocol_ver); 6196 IBTF_DPRINTF_L4("ibdm", "\tmsg qdepth : %d", ioc->ioc_send_msg_qdepth); 6197 IBTF_DPRINTF_L4("ibdm", "\trdma qdepth : %d", 6198 ioc->ioc_rdma_read_qdepth); 6199 IBTF_DPRINTF_L4("ibdm", "\tsndmsg sz : %d", ioc->ioc_send_msg_sz); 6200 IBTF_DPRINTF_L4("ibdm", "\trdma xfersz : %d", ioc->ioc_rdma_xfer_sz); 6201 IBTF_DPRINTF_L4("ibdm", "\topcal mask : 0x%x", 6202 ioc->ioc_ctrl_opcap_mask); 6203 IBTF_DPRINTF_L4("ibdm", "\tsrventries : %x", ioc->ioc_service_entries); 6204 } 6205 6206 6207 void 6208 ibdm_dump_service_entries(ib_dm_srv_t *srv_ents) 6209 { 6210 IBTF_DPRINTF_L4("ibdm", 6211 "\thandle_srventry_mad: service id : %llx", srv_ents->srv_id); 6212 6213 IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad: " 6214 "Service Name : %s", srv_ents->srv_name); 6215 } 6216 6217 int ibdm_allow_sweep_fabric_timestamp = 1; 6218 6219 void 6220 ibdm_dump_sweep_fabric_timestamp(int flag) 6221 { 6222 static hrtime_t x; 6223 if (flag) { 6224 if (ibdm_allow_sweep_fabric_timestamp) { 6225 IBTF_DPRINTF_L4("ibdm", "\tTime taken to complete " 6226 "sweep %lld ms", ((gethrtime() - x)/ 1000000)); 6227 } 6228 x = 0; 6229 } else 6230 x = gethrtime(); 6231 } 6232 #endif 6233