1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24 /*
25 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
26 */
27
28 /*
29 * ibdm.c
30 *
31 * This file contains the InifiniBand Device Manager (IBDM) support functions.
32 * IB nexus driver will only be the client for the IBDM module.
33 *
34 * IBDM registers with IBTF for HCA arrival/removal notification.
35 * IBDM registers with SA access to send DM MADs to discover the IOC's behind
36 * the IOU's.
37 *
38 * IB nexus driver registers with IBDM to find the information about the
39 * HCA's and IOC's (behind the IOU) present on the IB fabric.
40 */
41
42 #include <sys/sysmacros.h>
43 #include <sys/systm.h>
44 #include <sys/taskq.h>
45 #include <sys/ib/mgt/ibdm/ibdm_impl.h>
46 #include <sys/ib/mgt/ibmf/ibmf_impl.h>
47 #include <sys/ib/ibtl/impl/ibtl_ibnex.h>
48 #include <sys/modctl.h>
49
50 /* Function Prototype declarations */
51 static int ibdm_free_iou_info(ibdm_dp_gidinfo_t *, ibdm_iou_info_t **);
52 static int ibdm_fini(void);
53 static int ibdm_init(void);
54 static int ibdm_get_reachable_ports(ibdm_port_attr_t *,
55 ibdm_hca_list_t *);
56 static ibdm_dp_gidinfo_t *ibdm_check_dgid(ib_guid_t, ib_sn_prefix_t);
57 static ibdm_dp_gidinfo_t *ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *);
58 static boolean_t ibdm_is_cisco(ib_guid_t);
59 static boolean_t ibdm_is_cisco_switch(ibdm_dp_gidinfo_t *);
60 static void ibdm_wait_cisco_probe_completion(ibdm_dp_gidinfo_t *);
61 static int ibdm_set_classportinfo(ibdm_dp_gidinfo_t *);
62 static int ibdm_send_classportinfo(ibdm_dp_gidinfo_t *);
63 static int ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *);
64 static int ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *);
65 static int ibdm_get_node_port_guids(ibmf_saa_handle_t, ib_lid_t,
66 ib_guid_t *, ib_guid_t *);
67 static int ibdm_retry_command(ibdm_timeout_cb_args_t *);
68 static int ibdm_get_diagcode(ibdm_dp_gidinfo_t *, int);
69 static int ibdm_verify_mad_status(ib_mad_hdr_t *);
70 static int ibdm_handle_redirection(ibmf_msg_t *,
71 ibdm_dp_gidinfo_t *, int *);
72 static void ibdm_wait_probe_completion(void);
73 static void ibdm_sweep_fabric(int);
74 static void ibdm_probe_gid_thread(void *);
75 static void ibdm_wakeup_probe_gid_cv(void);
76 static void ibdm_port_attr_ibmf_init(ibdm_port_attr_t *, ib_pkey_t, int);
77 static int ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *, int);
78 static void ibdm_update_port_attr(ibdm_port_attr_t *);
79 static void ibdm_handle_hca_attach(ib_guid_t);
80 static void ibdm_handle_srventry_mad(ibmf_msg_t *,
81 ibdm_dp_gidinfo_t *, int *);
82 static void ibdm_ibmf_recv_cb(ibmf_handle_t, ibmf_msg_t *, void *);
83 static void ibdm_recv_incoming_mad(void *);
84 static void ibdm_process_incoming_mad(ibmf_handle_t, ibmf_msg_t *, void *);
85 static void ibdm_ibmf_send_cb(ibmf_handle_t, ibmf_msg_t *, void *);
86 static void ibdm_pkt_timeout_hdlr(void *arg);
87 static void ibdm_initialize_port(ibdm_port_attr_t *);
88 static void ibdm_update_port_pkeys(ibdm_port_attr_t *port);
89 static void ibdm_handle_diagcode(ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *);
90 static void ibdm_probe_gid(ibdm_dp_gidinfo_t *);
91 static void ibdm_alloc_send_buffers(ibmf_msg_t *);
92 static void ibdm_free_send_buffers(ibmf_msg_t *);
93 static void ibdm_handle_hca_detach(ib_guid_t);
94 static void ibdm_handle_port_change_event(ibt_async_event_t *);
95 static int ibdm_fini_port(ibdm_port_attr_t *);
96 static int ibdm_uninit_hca(ibdm_hca_list_t *);
97 static void ibdm_handle_setclassportinfo(ibmf_handle_t, ibmf_msg_t *,
98 ibdm_dp_gidinfo_t *, int *);
99 static void ibdm_handle_iounitinfo(ibmf_handle_t,
100 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *);
101 static void ibdm_handle_ioc_profile(ibmf_handle_t,
102 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *);
103 static void ibdm_event_hdlr(void *, ibt_hca_hdl_t,
104 ibt_async_code_t, ibt_async_event_t *);
105 static void ibdm_handle_classportinfo(ibmf_handle_t,
106 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *);
107 static void ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *,
108 ibdm_dp_gidinfo_t *);
109
110 static ibdm_hca_list_t *ibdm_dup_hca_attr(ibdm_hca_list_t *);
111 static ibdm_ioc_info_t *ibdm_dup_ioc_info(ibdm_ioc_info_t *,
112 ibdm_dp_gidinfo_t *gid_list);
113 static void ibdm_probe_ioc(ib_guid_t, ib_guid_t, int);
114 static ibdm_ioc_info_t *ibdm_is_ioc_present(ib_guid_t,
115 ibdm_dp_gidinfo_t *, int *);
116 static ibdm_port_attr_t *ibdm_get_port_attr(ibt_async_event_t *,
117 ibdm_hca_list_t **);
118 static sa_node_record_t *ibdm_get_node_records(ibmf_saa_handle_t,
119 size_t *, ib_guid_t);
120 static int ibdm_get_node_record_by_port(ibmf_saa_handle_t,
121 ib_guid_t, sa_node_record_t **, size_t *);
122 static sa_portinfo_record_t *ibdm_get_portinfo(ibmf_saa_handle_t, size_t *,
123 ib_lid_t);
124 static ibdm_dp_gidinfo_t *ibdm_create_gid_info(ibdm_port_attr_t *,
125 ib_gid_t, ib_gid_t);
126 static ibdm_dp_gidinfo_t *ibdm_find_gid(ib_guid_t, ib_guid_t);
127 static int ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *, uint8_t);
128 static ibdm_ioc_info_t *ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *, int);
129 static void ibdm_saa_event_cb(ibmf_saa_handle_t, ibmf_saa_subnet_event_t,
130 ibmf_saa_event_details_t *, void *);
131 static void ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *,
132 ibdm_dp_gidinfo_t *);
133 static ibdm_dp_gidinfo_t *ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *);
134 static void ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *,
135 ibdm_dp_gidinfo_t *);
136 static void ibdm_addto_gidlist(ibdm_gid_t **, ibdm_gid_t *);
137 static void ibdm_free_gid_list(ibdm_gid_t *);
138 static void ibdm_rescan_gidlist(ib_guid_t *ioc_guid);
139 static void ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *);
140 static void ibdm_saa_event_taskq(void *);
141 static void ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *);
142 static void ibdm_get_next_port(ibdm_hca_list_t **,
143 ibdm_port_attr_t **, int);
144 static void ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *,
145 ibdm_dp_gidinfo_t *);
146 static void ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *,
147 ibdm_hca_list_t *);
148 static void ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *);
149 static void ibdm_saa_handle_new_gid(void *);
150 static void ibdm_reset_all_dgids(ibmf_saa_handle_t);
151 static void ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *);
152 static void ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *);
153 static void ibdm_fill_srv_attr_mod(ib_mad_hdr_t *, ibdm_timeout_cb_args_t *);
154 static void ibdm_bump_transactionID(ibdm_dp_gidinfo_t *);
155 static ibdm_ioc_info_t *ibdm_handle_prev_iou();
156 static int ibdm_serv_cmp(ibdm_srvents_info_t *, ibdm_srvents_info_t *,
157 int);
158 static ibdm_ioc_info_t *ibdm_get_ioc_info_with_gid(ib_guid_t,
159 ibdm_dp_gidinfo_t **);
160
161 int ibdm_dft_timeout = IBDM_DFT_TIMEOUT;
162 int ibdm_dft_retry_cnt = IBDM_DFT_NRETRIES;
163 #ifdef DEBUG
164 int ibdm_ignore_saa_event = 0;
165 #endif
166 int ibdm_enumerate_iocs = 0;
167
168 /* Modload support */
169 static struct modlmisc ibdm_modlmisc = {
170 &mod_miscops,
171 "InfiniBand Device Manager"
172 };
173
174 struct modlinkage ibdm_modlinkage = {
175 MODREV_1,
176 (void *)&ibdm_modlmisc,
177 NULL
178 };
179
180 static ibt_clnt_modinfo_t ibdm_ibt_modinfo = {
181 IBTI_V_CURR,
182 IBT_DM,
183 ibdm_event_hdlr,
184 NULL,
185 "ibdm"
186 };
187
188 /* Global variables */
189 ibdm_t ibdm;
190 int ibdm_taskq_enable = IBDM_ENABLE_TASKQ_HANDLING;
191 char *ibdm_string = "ibdm";
192
193 _NOTE(SCHEME_PROTECTS_DATA("Serialized access by cv",
194 ibdm.ibdm_dp_gidlist_head))
195
196 /*
197 * _init
198 * Loadable module init, called before any other module.
199 * Initialize mutex
200 * Register with IBTF
201 */
202 int
_init(void)203 _init(void)
204 {
205 int err;
206
207 IBTF_DPRINTF_L4("ibdm", "\t_init: addr of ibdm %p", &ibdm);
208
209 if ((err = ibdm_init()) != IBDM_SUCCESS) {
210 IBTF_DPRINTF_L2("ibdm", "_init: ibdm_init failed 0x%x", err);
211 (void) ibdm_fini();
212 return (DDI_FAILURE);
213 }
214
215 if ((err = mod_install(&ibdm_modlinkage)) != 0) {
216 IBTF_DPRINTF_L2("ibdm", "_init: mod_install failed 0x%x", err);
217 (void) ibdm_fini();
218 }
219 return (err);
220 }
221
222
223 int
_fini(void)224 _fini(void)
225 {
226 int err;
227
228 if ((err = ibdm_fini()) != IBDM_SUCCESS) {
229 IBTF_DPRINTF_L2("ibdm", "_fini: ibdm_fini failed 0x%x", err);
230 (void) ibdm_init();
231 return (EBUSY);
232 }
233
234 if ((err = mod_remove(&ibdm_modlinkage)) != 0) {
235 IBTF_DPRINTF_L2("ibdm", "_fini: mod_remove failed 0x%x", err);
236 (void) ibdm_init();
237 }
238 return (err);
239 }
240
241
242 int
_info(struct modinfo * modinfop)243 _info(struct modinfo *modinfop)
244 {
245 return (mod_info(&ibdm_modlinkage, modinfop));
246 }
247
248
249 /*
250 * ibdm_init():
251 * Register with IBTF
252 * Allocate memory for the HCAs
253 * Allocate minor-nodes for the HCAs
254 */
255 static int
ibdm_init(void)256 ibdm_init(void)
257 {
258 int i, hca_count;
259 ib_guid_t *hca_guids;
260 ibt_status_t status;
261
262 IBTF_DPRINTF_L4("ibdm", "\tibdm_init:");
263 if (!(ibdm.ibdm_state & IBDM_LOCKS_ALLOCED)) {
264 mutex_init(&ibdm.ibdm_mutex, NULL, MUTEX_DEFAULT, NULL);
265 mutex_init(&ibdm.ibdm_hl_mutex, NULL, MUTEX_DEFAULT, NULL);
266 mutex_init(&ibdm.ibdm_ibnex_mutex, NULL, MUTEX_DEFAULT, NULL);
267 cv_init(&ibdm.ibdm_port_settle_cv, NULL, CV_DRIVER, NULL);
268 mutex_enter(&ibdm.ibdm_mutex);
269 ibdm.ibdm_state |= IBDM_LOCKS_ALLOCED;
270 }
271
272 if (!(ibdm.ibdm_state & IBDM_IBT_ATTACHED)) {
273 if ((status = ibt_attach(&ibdm_ibt_modinfo, NULL, NULL,
274 (void *)&ibdm.ibdm_ibt_clnt_hdl)) != IBT_SUCCESS) {
275 IBTF_DPRINTF_L2("ibdm", "ibdm_init: ibt_attach "
276 "failed %x", status);
277 mutex_exit(&ibdm.ibdm_mutex);
278 return (IBDM_FAILURE);
279 }
280
281 ibdm.ibdm_state |= IBDM_IBT_ATTACHED;
282 mutex_exit(&ibdm.ibdm_mutex);
283 }
284
285
286 if (!(ibdm.ibdm_state & IBDM_HCA_ATTACHED)) {
287 hca_count = ibt_get_hca_list(&hca_guids);
288 IBTF_DPRINTF_L4("ibdm", "ibdm_init: num_hcas = %d", hca_count);
289 for (i = 0; i < hca_count; i++)
290 (void) ibdm_handle_hca_attach(hca_guids[i]);
291 if (hca_count)
292 ibt_free_hca_list(hca_guids, hca_count);
293
294 mutex_enter(&ibdm.ibdm_mutex);
295 ibdm.ibdm_state |= IBDM_HCA_ATTACHED;
296 mutex_exit(&ibdm.ibdm_mutex);
297 }
298
299 if (!(ibdm.ibdm_state & IBDM_CVS_ALLOCED)) {
300 cv_init(&ibdm.ibdm_probe_cv, NULL, CV_DRIVER, NULL);
301 cv_init(&ibdm.ibdm_busy_cv, NULL, CV_DRIVER, NULL);
302 mutex_enter(&ibdm.ibdm_mutex);
303 ibdm.ibdm_state |= IBDM_CVS_ALLOCED;
304 mutex_exit(&ibdm.ibdm_mutex);
305 }
306 return (IBDM_SUCCESS);
307 }
308
309
310 static int
ibdm_free_iou_info(ibdm_dp_gidinfo_t * gid_info,ibdm_iou_info_t ** ioup)311 ibdm_free_iou_info(ibdm_dp_gidinfo_t *gid_info, ibdm_iou_info_t **ioup)
312 {
313 int ii, k, niocs;
314 size_t size;
315 ibdm_gid_t *delete, *head;
316 timeout_id_t timeout_id;
317 ibdm_ioc_info_t *ioc;
318 ibdm_iou_info_t *gl_iou = *ioup;
319
320 ASSERT(mutex_owned(&gid_info->gl_mutex));
321 if (gl_iou == NULL) {
322 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: No IOU");
323 return (0);
324 }
325
326 niocs = gl_iou->iou_info.iou_num_ctrl_slots;
327 IBTF_DPRINTF_L4("ibdm", "\tfree_iou_info: gid_info = %p, niocs %d",
328 gid_info, niocs);
329
330 for (ii = 0; ii < niocs; ii++) {
331 ioc = (ibdm_ioc_info_t *)&gl_iou->iou_ioc_info[ii];
332
333 /* handle the case where an ioc_timeout_id is scheduled */
334 if (ioc->ioc_timeout_id) {
335 timeout_id = ioc->ioc_timeout_id;
336 ioc->ioc_timeout_id = 0;
337 mutex_exit(&gid_info->gl_mutex);
338 IBTF_DPRINTF_L5("ibdm", "free_iou_info: "
339 "ioc_timeout_id = 0x%x", timeout_id);
340 if (untimeout(timeout_id) == -1) {
341 IBTF_DPRINTF_L2("ibdm", "free_iou_info: "
342 "untimeout ioc_timeout_id failed");
343 mutex_enter(&gid_info->gl_mutex);
344 return (-1);
345 }
346 mutex_enter(&gid_info->gl_mutex);
347 }
348
349 /* handle the case where an ioc_dc_timeout_id is scheduled */
350 if (ioc->ioc_dc_timeout_id) {
351 timeout_id = ioc->ioc_dc_timeout_id;
352 ioc->ioc_dc_timeout_id = 0;
353 mutex_exit(&gid_info->gl_mutex);
354 IBTF_DPRINTF_L5("ibdm", "free_iou_info: "
355 "ioc_dc_timeout_id = 0x%x", timeout_id);
356 if (untimeout(timeout_id) == -1) {
357 IBTF_DPRINTF_L2("ibdm", "free_iou_info: "
358 "untimeout ioc_dc_timeout_id failed");
359 mutex_enter(&gid_info->gl_mutex);
360 return (-1);
361 }
362 mutex_enter(&gid_info->gl_mutex);
363 }
364
365 /* handle the case where serv[k].se_timeout_id is scheduled */
366 for (k = 0; k < ioc->ioc_profile.ioc_service_entries; k++) {
367 if (ioc->ioc_serv[k].se_timeout_id) {
368 timeout_id = ioc->ioc_serv[k].se_timeout_id;
369 ioc->ioc_serv[k].se_timeout_id = 0;
370 mutex_exit(&gid_info->gl_mutex);
371 IBTF_DPRINTF_L5("ibdm", "free_iou_info: "
372 "ioc->ioc_serv[%d].se_timeout_id = 0x%x",
373 k, timeout_id);
374 if (untimeout(timeout_id) == -1) {
375 IBTF_DPRINTF_L2("ibdm", "free_iou_info:"
376 " untimeout se_timeout_id failed");
377 mutex_enter(&gid_info->gl_mutex);
378 return (-1);
379 }
380 mutex_enter(&gid_info->gl_mutex);
381 }
382 }
383
384 /* delete GID list in IOC */
385 head = ioc->ioc_gid_list;
386 while (head) {
387 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: "
388 "Deleting gid_list struct %p", head);
389 delete = head;
390 head = head->gid_next;
391 kmem_free(delete, sizeof (ibdm_gid_t));
392 }
393 ioc->ioc_gid_list = NULL;
394
395 /* delete ioc_serv */
396 size = ioc->ioc_profile.ioc_service_entries *
397 sizeof (ibdm_srvents_info_t);
398 if (ioc->ioc_serv && size) {
399 kmem_free(ioc->ioc_serv, size);
400 ioc->ioc_serv = NULL;
401 }
402 }
403 /*
404 * Clear the IBDM_CISCO_PROBE_DONE flag to get the IO Unit information
405 * via the switch during the probe process.
406 */
407 gid_info->gl_flag &= ~IBDM_CISCO_PROBE_DONE;
408
409 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: deleting IOU & IOC");
410 size = sizeof (ibdm_iou_info_t) + niocs * sizeof (ibdm_ioc_info_t);
411 kmem_free(gl_iou, size);
412 *ioup = NULL;
413 return (0);
414 }
415
416
417 /*
418 * ibdm_fini():
419 * Un-register with IBTF
420 * De allocate memory for the GID info
421 */
422 static int
ibdm_fini()423 ibdm_fini()
424 {
425 int ii;
426 ibdm_hca_list_t *hca_list, *temp;
427 ibdm_dp_gidinfo_t *gid_info, *tmp;
428 ibdm_gid_t *head, *delete;
429
430 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini");
431
432 mutex_enter(&ibdm.ibdm_hl_mutex);
433 if (ibdm.ibdm_state & IBDM_IBT_ATTACHED) {
434 if (ibt_detach(ibdm.ibdm_ibt_clnt_hdl) != IBT_SUCCESS) {
435 IBTF_DPRINTF_L2("ibdm", "\t_fini: ibt_detach failed");
436 mutex_exit(&ibdm.ibdm_hl_mutex);
437 return (IBDM_FAILURE);
438 }
439 ibdm.ibdm_state &= ~IBDM_IBT_ATTACHED;
440 ibdm.ibdm_ibt_clnt_hdl = NULL;
441 }
442
443 hca_list = ibdm.ibdm_hca_list_head;
444 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: nhcas %d", ibdm.ibdm_hca_count);
445 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) {
446 temp = hca_list;
447 hca_list = hca_list->hl_next;
448 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: hca %p", temp);
449 if (ibdm_uninit_hca(temp) != IBDM_SUCCESS) {
450 IBTF_DPRINTF_L2("ibdm", "\tibdm_fini: "
451 "uninit_hca %p failed", temp);
452 mutex_exit(&ibdm.ibdm_hl_mutex);
453 return (IBDM_FAILURE);
454 }
455 }
456 mutex_exit(&ibdm.ibdm_hl_mutex);
457
458 mutex_enter(&ibdm.ibdm_mutex);
459 if (ibdm.ibdm_state & IBDM_HCA_ATTACHED)
460 ibdm.ibdm_state &= ~IBDM_HCA_ATTACHED;
461
462 gid_info = ibdm.ibdm_dp_gidlist_head;
463 while (gid_info) {
464 mutex_enter(&gid_info->gl_mutex);
465 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou);
466 mutex_exit(&gid_info->gl_mutex);
467 ibdm_delete_glhca_list(gid_info);
468
469 tmp = gid_info;
470 gid_info = gid_info->gl_next;
471 mutex_destroy(&tmp->gl_mutex);
472 head = tmp->gl_gid;
473 while (head) {
474 IBTF_DPRINTF_L4("ibdm",
475 "\tibdm_fini: Deleting gid structs");
476 delete = head;
477 head = head->gid_next;
478 kmem_free(delete, sizeof (ibdm_gid_t));
479 }
480 kmem_free(tmp, sizeof (ibdm_dp_gidinfo_t));
481 }
482 mutex_exit(&ibdm.ibdm_mutex);
483
484 if (ibdm.ibdm_state & IBDM_LOCKS_ALLOCED) {
485 ibdm.ibdm_state &= ~IBDM_LOCKS_ALLOCED;
486 mutex_destroy(&ibdm.ibdm_mutex);
487 mutex_destroy(&ibdm.ibdm_hl_mutex);
488 mutex_destroy(&ibdm.ibdm_ibnex_mutex);
489 cv_destroy(&ibdm.ibdm_port_settle_cv);
490 }
491 if (ibdm.ibdm_state & IBDM_CVS_ALLOCED) {
492 ibdm.ibdm_state &= ~IBDM_CVS_ALLOCED;
493 cv_destroy(&ibdm.ibdm_probe_cv);
494 cv_destroy(&ibdm.ibdm_busy_cv);
495 }
496 return (IBDM_SUCCESS);
497 }
498
499
500 /*
501 * ibdm_event_hdlr()
502 *
503 * IBDM registers this asynchronous event handler at the time of
504 * ibt_attach. IBDM support the following async events. For other
505 * event, simply returns success.
506 * IBT_HCA_ATTACH_EVENT:
507 * Retrieves the information about all the port that are
508 * present on this HCA, allocates the port attributes
509 * structure and calls IB nexus callback routine with
510 * the port attributes structure as an input argument.
511 * IBT_HCA_DETACH_EVENT:
512 * Retrieves the information about all the ports that are
513 * present on this HCA and calls IB nexus callback with
514 * port guid as an argument
515 * IBT_EVENT_PORT_UP:
516 * Register with IBMF and SA access
517 * Setup IBMF receive callback routine
518 * IBT_EVENT_PORT_DOWN:
519 * Un-Register with IBMF and SA access
520 * Teardown IBMF receive callback routine
521 */
522 /*ARGSUSED*/
523 static void
ibdm_event_hdlr(void * clnt_hdl,ibt_hca_hdl_t hca_hdl,ibt_async_code_t code,ibt_async_event_t * event)524 ibdm_event_hdlr(void *clnt_hdl,
525 ibt_hca_hdl_t hca_hdl, ibt_async_code_t code, ibt_async_event_t *event)
526 {
527 ibdm_hca_list_t *hca_list;
528 ibdm_port_attr_t *port;
529 ibmf_saa_handle_t port_sa_hdl;
530
531 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: async code 0x%x", code);
532
533 switch (code) {
534 case IBT_HCA_ATTACH_EVENT: /* New HCA registered with IBTF */
535 ibdm_handle_hca_attach(event->ev_hca_guid);
536 break;
537
538 case IBT_HCA_DETACH_EVENT: /* HCA unregistered with IBTF */
539 ibdm_handle_hca_detach(event->ev_hca_guid);
540 mutex_enter(&ibdm.ibdm_ibnex_mutex);
541 if (ibdm.ibdm_ibnex_callback != NULL) {
542 (*ibdm.ibdm_ibnex_callback)((void *)
543 &event->ev_hca_guid, IBDM_EVENT_HCA_REMOVED);
544 }
545 mutex_exit(&ibdm.ibdm_ibnex_mutex);
546 break;
547
548 case IBT_EVENT_PORT_UP:
549 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_UP");
550 mutex_enter(&ibdm.ibdm_hl_mutex);
551 port = ibdm_get_port_attr(event, &hca_list);
552 if (port == NULL) {
553 IBTF_DPRINTF_L2("ibdm",
554 "\tevent_hdlr: HCA not present");
555 mutex_exit(&ibdm.ibdm_hl_mutex);
556 break;
557 }
558 ibdm_initialize_port(port);
559 hca_list->hl_nports_active++;
560 cv_broadcast(&ibdm.ibdm_port_settle_cv);
561 mutex_exit(&ibdm.ibdm_hl_mutex);
562
563 /* Inform IB nexus driver */
564 mutex_enter(&ibdm.ibdm_ibnex_mutex);
565 if (ibdm.ibdm_ibnex_callback != NULL) {
566 (*ibdm.ibdm_ibnex_callback)((void *)
567 &event->ev_hca_guid, IBDM_EVENT_PORT_UP);
568 }
569 mutex_exit(&ibdm.ibdm_ibnex_mutex);
570 break;
571
572 case IBT_ERROR_PORT_DOWN:
573 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_DOWN");
574 mutex_enter(&ibdm.ibdm_hl_mutex);
575 port = ibdm_get_port_attr(event, &hca_list);
576 if (port == NULL) {
577 IBTF_DPRINTF_L2("ibdm",
578 "\tevent_hdlr: HCA not present");
579 mutex_exit(&ibdm.ibdm_hl_mutex);
580 break;
581 }
582 hca_list->hl_nports_active--;
583 port_sa_hdl = port->pa_sa_hdl;
584 (void) ibdm_fini_port(port);
585 port->pa_state = IBT_PORT_DOWN;
586 cv_broadcast(&ibdm.ibdm_port_settle_cv);
587 mutex_exit(&ibdm.ibdm_hl_mutex);
588 ibdm_reset_all_dgids(port_sa_hdl);
589 break;
590
591 case IBT_PORT_CHANGE_EVENT:
592 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_CHANGE");
593 if (event->ev_port_flags & IBT_PORT_CHANGE_PKEY)
594 ibdm_handle_port_change_event(event);
595 break;
596
597 default: /* Ignore all other events/errors */
598 break;
599 }
600 }
601
602 static void
ibdm_handle_port_change_event(ibt_async_event_t * event)603 ibdm_handle_port_change_event(ibt_async_event_t *event)
604 {
605 ibdm_port_attr_t *port;
606 ibdm_hca_list_t *hca_list;
607
608 IBTF_DPRINTF_L2("ibdm", "\tibdm_handle_port_change_event:"
609 " HCA guid %llx", event->ev_hca_guid);
610 mutex_enter(&ibdm.ibdm_hl_mutex);
611 port = ibdm_get_port_attr(event, &hca_list);
612 if (port == NULL) {
613 IBTF_DPRINTF_L2("ibdm", "\tevent_hdlr: HCA not present");
614 mutex_exit(&ibdm.ibdm_hl_mutex);
615 return;
616 }
617 ibdm_update_port_pkeys(port);
618 cv_broadcast(&ibdm.ibdm_port_settle_cv);
619 mutex_exit(&ibdm.ibdm_hl_mutex);
620
621 /* Inform IB nexus driver */
622 mutex_enter(&ibdm.ibdm_ibnex_mutex);
623 if (ibdm.ibdm_ibnex_callback != NULL) {
624 (*ibdm.ibdm_ibnex_callback)((void *)
625 &event->ev_hca_guid, IBDM_EVENT_PORT_PKEY_CHANGE);
626 }
627 mutex_exit(&ibdm.ibdm_ibnex_mutex);
628 }
629
630 /*
631 * ibdm_update_port_pkeys()
632 * Update the pkey table
633 * Update the port attributes
634 */
635 static void
ibdm_update_port_pkeys(ibdm_port_attr_t * port)636 ibdm_update_port_pkeys(ibdm_port_attr_t *port)
637 {
638 uint_t nports, size;
639 uint_t pkey_idx, opkey_idx;
640 uint16_t npkeys;
641 ibt_hca_portinfo_t *pinfop;
642 ib_pkey_t pkey;
643 ibdm_pkey_tbl_t *pkey_tbl;
644 ibdm_port_attr_t newport;
645
646 IBTF_DPRINTF_L4("ibdm", "\tupdate_port_pkeys:");
647 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex));
648
649 /* Check whether the port is active */
650 if (ibt_get_port_state(port->pa_hca_hdl, port->pa_port_num, NULL,
651 NULL) != IBT_SUCCESS)
652 return;
653
654 if (ibt_query_hca_ports(port->pa_hca_hdl, port->pa_port_num,
655 &pinfop, &nports, &size) != IBT_SUCCESS) {
656 /* This should not occur */
657 port->pa_npkeys = 0;
658 port->pa_pkey_tbl = NULL;
659 return;
660 }
661
662 npkeys = pinfop->p_pkey_tbl_sz;
663 pkey_tbl = kmem_zalloc(npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP);
664 newport.pa_pkey_tbl = pkey_tbl;
665 newport.pa_ibmf_hdl = port->pa_ibmf_hdl;
666
667 for (pkey_idx = 0; pkey_idx < npkeys; pkey_idx++) {
668 pkey = pkey_tbl[pkey_idx].pt_pkey =
669 pinfop->p_pkey_tbl[pkey_idx];
670 /*
671 * Is this pkey present in the current table ?
672 */
673 for (opkey_idx = 0; opkey_idx < port->pa_npkeys; opkey_idx++) {
674 if (pkey == port->pa_pkey_tbl[opkey_idx].pt_pkey) {
675 pkey_tbl[pkey_idx].pt_qp_hdl =
676 port->pa_pkey_tbl[opkey_idx].pt_qp_hdl;
677 port->pa_pkey_tbl[opkey_idx].pt_qp_hdl = NULL;
678 break;
679 }
680 }
681
682 if (opkey_idx == port->pa_npkeys) {
683 pkey = pkey_tbl[pkey_idx].pt_pkey;
684 if (IBDM_INVALID_PKEY(pkey)) {
685 pkey_tbl[pkey_idx].pt_qp_hdl = NULL;
686 continue;
687 }
688 ibdm_port_attr_ibmf_init(&newport, pkey, pkey_idx);
689 }
690 }
691
692 for (opkey_idx = 0; opkey_idx < port->pa_npkeys; opkey_idx++) {
693 if (port->pa_pkey_tbl[opkey_idx].pt_qp_hdl != NULL) {
694 if (ibdm_port_attr_ibmf_fini(port, opkey_idx) !=
695 IBDM_SUCCESS) {
696 IBTF_DPRINTF_L2("ibdm", "\tupdate_port_pkeys: "
697 "ibdm_port_attr_ibmf_fini failed for "
698 "port pkey 0x%x",
699 port->pa_pkey_tbl[opkey_idx].pt_pkey);
700 }
701 }
702 }
703
704 if (port->pa_pkey_tbl != NULL) {
705 kmem_free(port->pa_pkey_tbl,
706 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t));
707 }
708
709 port->pa_npkeys = npkeys;
710 port->pa_pkey_tbl = pkey_tbl;
711 port->pa_sn_prefix = pinfop->p_sgid_tbl[0].gid_prefix;
712 port->pa_state = pinfop->p_linkstate;
713 ibt_free_portinfo(pinfop, size);
714 }
715
716 /*
717 * ibdm_initialize_port()
718 * Register with IBMF
719 * Register with SA access
720 * Register a receive callback routine with IBMF. IBMF invokes
721 * this routine whenever a MAD arrives at this port.
722 * Update the port attributes
723 */
724 static void
ibdm_initialize_port(ibdm_port_attr_t * port)725 ibdm_initialize_port(ibdm_port_attr_t *port)
726 {
727 int ii;
728 uint_t nports, size;
729 uint_t pkey_idx;
730 ib_pkey_t pkey;
731 ibt_hca_portinfo_t *pinfop;
732 ibmf_register_info_t ibmf_reg;
733 ibmf_saa_subnet_event_args_t event_args;
734
735 IBTF_DPRINTF_L4("ibdm", "\tinitialize_port:");
736 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex));
737
738 /* Check whether the port is active */
739 if (ibt_get_port_state(port->pa_hca_hdl, port->pa_port_num, NULL,
740 NULL) != IBT_SUCCESS)
741 return;
742
743 if (port->pa_sa_hdl != NULL || port->pa_pkey_tbl != NULL)
744 return;
745
746 if (ibt_query_hca_ports(port->pa_hca_hdl, port->pa_port_num,
747 &pinfop, &nports, &size) != IBT_SUCCESS) {
748 /* This should not occur */
749 port->pa_npkeys = 0;
750 port->pa_pkey_tbl = NULL;
751 return;
752 }
753 port->pa_sn_prefix = pinfop->p_sgid_tbl[0].gid_prefix;
754
755 port->pa_state = pinfop->p_linkstate;
756 port->pa_npkeys = pinfop->p_pkey_tbl_sz;
757 port->pa_pkey_tbl = (ibdm_pkey_tbl_t *)kmem_zalloc(
758 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP);
759
760 for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++)
761 port->pa_pkey_tbl[pkey_idx].pt_pkey =
762 pinfop->p_pkey_tbl[pkey_idx];
763
764 ibt_free_portinfo(pinfop, size);
765
766 if (ibdm_enumerate_iocs) {
767 event_args.is_event_callback = ibdm_saa_event_cb;
768 event_args.is_event_callback_arg = port;
769 if (ibmf_sa_session_open(port->pa_port_guid, 0, &event_args,
770 IBMF_VERSION, 0, &port->pa_sa_hdl) != IBMF_SUCCESS) {
771 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: "
772 "sa access registration failed");
773 (void) ibdm_fini_port(port);
774 return;
775 }
776
777 ibmf_reg.ir_ci_guid = port->pa_hca_guid;
778 ibmf_reg.ir_port_num = port->pa_port_num;
779 ibmf_reg.ir_client_class = DEV_MGT_MANAGER;
780
781 if (ibmf_register(&ibmf_reg, IBMF_VERSION, 0, NULL, NULL,
782 &port->pa_ibmf_hdl, &port->pa_ibmf_caps) != IBMF_SUCCESS) {
783 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: "
784 "IBMF registration failed");
785 (void) ibdm_fini_port(port);
786 return;
787 }
788
789 if (ibmf_setup_async_cb(port->pa_ibmf_hdl,
790 IBMF_QP_HANDLE_DEFAULT,
791 ibdm_ibmf_recv_cb, 0, 0) != IBMF_SUCCESS) {
792 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: "
793 "IBMF setup recv cb failed");
794 (void) ibdm_fini_port(port);
795 return;
796 }
797 } else {
798 port->pa_sa_hdl = NULL;
799 port->pa_ibmf_hdl = NULL;
800 }
801
802 for (ii = 0; ii < port->pa_npkeys; ii++) {
803 pkey = port->pa_pkey_tbl[ii].pt_pkey;
804 if (IBDM_INVALID_PKEY(pkey)) {
805 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL;
806 continue;
807 }
808 ibdm_port_attr_ibmf_init(port, pkey, ii);
809 }
810 }
811
812
813 /*
814 * ibdm_port_attr_ibmf_init:
815 * With IBMF - Alloc QP Handle and Setup Async callback
816 */
817 static void
ibdm_port_attr_ibmf_init(ibdm_port_attr_t * port,ib_pkey_t pkey,int ii)818 ibdm_port_attr_ibmf_init(ibdm_port_attr_t *port, ib_pkey_t pkey, int ii)
819 {
820 int ret;
821
822 if (ibdm_enumerate_iocs == 0) {
823 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL;
824 return;
825 }
826
827 if ((ret = ibmf_alloc_qp(port->pa_ibmf_hdl, pkey, IB_GSI_QKEY,
828 IBMF_ALT_QP_MAD_NO_RMPP, &port->pa_pkey_tbl[ii].pt_qp_hdl)) !=
829 IBMF_SUCCESS) {
830 IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: "
831 "IBMF failed to alloc qp %d", ret);
832 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL;
833 return;
834 }
835
836 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_init: QP handle is %p",
837 port->pa_ibmf_hdl);
838
839 if ((ret = ibmf_setup_async_cb(port->pa_ibmf_hdl,
840 port->pa_pkey_tbl[ii].pt_qp_hdl, ibdm_ibmf_recv_cb, 0, 0)) !=
841 IBMF_SUCCESS) {
842 IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: "
843 "IBMF setup recv cb failed %d", ret);
844 (void) ibmf_free_qp(port->pa_ibmf_hdl,
845 &port->pa_pkey_tbl[ii].pt_qp_hdl, 0);
846 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL;
847 }
848 }
849
850
851 /*
852 * ibdm_get_port_attr()
853 * Get port attributes from HCA guid and port number
854 * Return pointer to ibdm_port_attr_t on Success
855 * and NULL on failure
856 */
857 static ibdm_port_attr_t *
ibdm_get_port_attr(ibt_async_event_t * event,ibdm_hca_list_t ** retval)858 ibdm_get_port_attr(ibt_async_event_t *event, ibdm_hca_list_t **retval)
859 {
860 ibdm_hca_list_t *hca_list;
861 ibdm_port_attr_t *port_attr;
862 int ii;
863
864 IBTF_DPRINTF_L4("ibdm", "\tget_port_attr: port# %d", event->ev_port);
865 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex));
866 hca_list = ibdm.ibdm_hca_list_head;
867 while (hca_list) {
868 if (hca_list->hl_hca_guid == event->ev_hca_guid) {
869 for (ii = 0; ii < hca_list->hl_nports; ii++) {
870 port_attr = &hca_list->hl_port_attr[ii];
871 if (port_attr->pa_port_num == event->ev_port) {
872 *retval = hca_list;
873 return (port_attr);
874 }
875 }
876 }
877 hca_list = hca_list->hl_next;
878 }
879 return (NULL);
880 }
881
882
883 /*
884 * ibdm_update_port_attr()
885 * Update the port attributes
886 */
887 static void
ibdm_update_port_attr(ibdm_port_attr_t * port)888 ibdm_update_port_attr(ibdm_port_attr_t *port)
889 {
890 uint_t nports, size;
891 uint_t pkey_idx;
892 ibt_hca_portinfo_t *portinfop;
893
894 IBTF_DPRINTF_L4("ibdm", "\tupdate_port_attr: Begin");
895 if (ibt_query_hca_ports(port->pa_hca_hdl,
896 port->pa_port_num, &portinfop, &nports, &size) != IBT_SUCCESS) {
897 /* This should not occur */
898 port->pa_npkeys = 0;
899 port->pa_pkey_tbl = NULL;
900 return;
901 }
902 port->pa_sn_prefix = portinfop->p_sgid_tbl[0].gid_prefix;
903
904 port->pa_state = portinfop->p_linkstate;
905
906 /*
907 * PKey information in portinfo valid only if port is
908 * ACTIVE. Bail out if not.
909 */
910 if (port->pa_state != IBT_PORT_ACTIVE) {
911 port->pa_npkeys = 0;
912 port->pa_pkey_tbl = NULL;
913 ibt_free_portinfo(portinfop, size);
914 return;
915 }
916
917 port->pa_npkeys = portinfop->p_pkey_tbl_sz;
918 port->pa_pkey_tbl = (ibdm_pkey_tbl_t *)kmem_zalloc(
919 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP);
920
921 for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++) {
922 port->pa_pkey_tbl[pkey_idx].pt_pkey =
923 portinfop->p_pkey_tbl[pkey_idx];
924 }
925 ibt_free_portinfo(portinfop, size);
926 }
927
928
929 /*
930 * ibdm_handle_hca_attach()
931 */
932 static void
ibdm_handle_hca_attach(ib_guid_t hca_guid)933 ibdm_handle_hca_attach(ib_guid_t hca_guid)
934 {
935 uint_t size;
936 uint_t ii, nports;
937 ibt_status_t status;
938 ibt_hca_hdl_t hca_hdl;
939 ibt_hca_attr_t *hca_attr;
940 ibdm_hca_list_t *hca_list, *temp;
941 ibdm_port_attr_t *port_attr;
942 ibt_hca_portinfo_t *portinfop;
943
944 IBTF_DPRINTF_L4("ibdm",
945 "\thandle_hca_attach: hca_guid = 0x%llX", hca_guid);
946
947 /* open the HCA first */
948 if ((status = ibt_open_hca(ibdm.ibdm_ibt_clnt_hdl, hca_guid,
949 &hca_hdl)) != IBT_SUCCESS) {
950 IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: "
951 "open_hca failed, status 0x%x", status);
952 return;
953 }
954
955 hca_attr = (ibt_hca_attr_t *)
956 kmem_alloc(sizeof (ibt_hca_attr_t), KM_SLEEP);
957 /* ibt_query_hca always returns IBT_SUCCESS */
958 (void) ibt_query_hca(hca_hdl, hca_attr);
959
960 IBTF_DPRINTF_L4("ibdm", "\tvid: 0x%x, pid: 0x%x, ver: 0x%x,"
961 " #ports: %d", hca_attr->hca_vendor_id, hca_attr->hca_device_id,
962 hca_attr->hca_version_id, hca_attr->hca_nports);
963
964 if ((status = ibt_query_hca_ports(hca_hdl, 0, &portinfop, &nports,
965 &size)) != IBT_SUCCESS) {
966 IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: "
967 "ibt_query_hca_ports failed, status 0x%x", status);
968 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
969 (void) ibt_close_hca(hca_hdl);
970 return;
971 }
972 hca_list = (ibdm_hca_list_t *)
973 kmem_zalloc((sizeof (ibdm_hca_list_t)), KM_SLEEP);
974 hca_list->hl_port_attr = (ibdm_port_attr_t *)kmem_zalloc(
975 (sizeof (ibdm_port_attr_t) * hca_attr->hca_nports), KM_SLEEP);
976 hca_list->hl_hca_guid = hca_attr->hca_node_guid;
977 hca_list->hl_nports = hca_attr->hca_nports;
978 hca_list->hl_attach_time = gethrtime();
979 hca_list->hl_hca_hdl = hca_hdl;
980
981 /*
982 * Init a dummy port attribute for the HCA node
983 * This is for Per-HCA Node. Initialize port_attr :
984 * hca_guid & port_guid -> hca_guid
985 * npkeys, pkey_tbl is NULL
986 * port_num, sn_prefix is 0
987 * vendorid, product_id, dev_version from HCA
988 * pa_state is IBT_PORT_ACTIVE
989 */
990 hca_list->hl_hca_port_attr = (ibdm_port_attr_t *)kmem_zalloc(
991 sizeof (ibdm_port_attr_t), KM_SLEEP);
992 port_attr = hca_list->hl_hca_port_attr;
993 port_attr->pa_vendorid = hca_attr->hca_vendor_id;
994 port_attr->pa_productid = hca_attr->hca_device_id;
995 port_attr->pa_dev_version = hca_attr->hca_version_id;
996 port_attr->pa_hca_guid = hca_attr->hca_node_guid;
997 port_attr->pa_hca_hdl = hca_list->hl_hca_hdl;
998 port_attr->pa_port_guid = hca_attr->hca_node_guid;
999 port_attr->pa_state = IBT_PORT_ACTIVE;
1000
1001
1002 for (ii = 0; ii < nports; ii++) {
1003 port_attr = &hca_list->hl_port_attr[ii];
1004 port_attr->pa_vendorid = hca_attr->hca_vendor_id;
1005 port_attr->pa_productid = hca_attr->hca_device_id;
1006 port_attr->pa_dev_version = hca_attr->hca_version_id;
1007 port_attr->pa_hca_guid = hca_attr->hca_node_guid;
1008 port_attr->pa_hca_hdl = hca_list->hl_hca_hdl;
1009 port_attr->pa_port_guid = portinfop[ii].p_sgid_tbl->gid_guid;
1010 port_attr->pa_sn_prefix = portinfop[ii].p_sgid_tbl->gid_prefix;
1011 port_attr->pa_port_num = portinfop[ii].p_port_num;
1012 port_attr->pa_state = portinfop[ii].p_linkstate;
1013
1014 /*
1015 * Register with IBMF, SA access when the port is in
1016 * ACTIVE state. Also register a callback routine
1017 * with IBMF to receive incoming DM MAD's.
1018 * The IBDM event handler takes care of registration of
1019 * port which are not active.
1020 */
1021 IBTF_DPRINTF_L4("ibdm",
1022 "\thandle_hca_attach: port guid %llx Port state 0x%x",
1023 port_attr->pa_port_guid, portinfop[ii].p_linkstate);
1024
1025 if (portinfop[ii].p_linkstate == IBT_PORT_ACTIVE) {
1026 mutex_enter(&ibdm.ibdm_hl_mutex);
1027 hca_list->hl_nports_active++;
1028 ibdm_initialize_port(port_attr);
1029 cv_broadcast(&ibdm.ibdm_port_settle_cv);
1030 mutex_exit(&ibdm.ibdm_hl_mutex);
1031 }
1032 }
1033 mutex_enter(&ibdm.ibdm_hl_mutex);
1034 for (temp = ibdm.ibdm_hca_list_head; temp; temp = temp->hl_next) {
1035 if (temp->hl_hca_guid == hca_guid) {
1036 IBTF_DPRINTF_L2("ibdm", "hca_attach: HCA %llX "
1037 "already seen by IBDM", hca_guid);
1038 mutex_exit(&ibdm.ibdm_hl_mutex);
1039 (void) ibdm_uninit_hca(hca_list);
1040 return;
1041 }
1042 }
1043 ibdm.ibdm_hca_count++;
1044 if (ibdm.ibdm_hca_list_head == NULL) {
1045 ibdm.ibdm_hca_list_head = hca_list;
1046 ibdm.ibdm_hca_list_tail = hca_list;
1047 } else {
1048 ibdm.ibdm_hca_list_tail->hl_next = hca_list;
1049 ibdm.ibdm_hca_list_tail = hca_list;
1050 }
1051 mutex_exit(&ibdm.ibdm_hl_mutex);
1052 mutex_enter(&ibdm.ibdm_ibnex_mutex);
1053 if (ibdm.ibdm_ibnex_callback != NULL) {
1054 (*ibdm.ibdm_ibnex_callback)((void *)
1055 &hca_guid, IBDM_EVENT_HCA_ADDED);
1056 }
1057 mutex_exit(&ibdm.ibdm_ibnex_mutex);
1058
1059 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
1060 ibt_free_portinfo(portinfop, size);
1061 }
1062
1063
1064 /*
1065 * ibdm_handle_hca_detach()
1066 */
1067 static void
ibdm_handle_hca_detach(ib_guid_t hca_guid)1068 ibdm_handle_hca_detach(ib_guid_t hca_guid)
1069 {
1070 ibdm_hca_list_t *head, *prev = NULL;
1071 size_t len;
1072 ibdm_dp_gidinfo_t *gidinfo;
1073 ibdm_port_attr_t *port_attr;
1074 int i;
1075
1076 IBTF_DPRINTF_L4("ibdm",
1077 "\thandle_hca_detach: hca_guid = 0x%llx", hca_guid);
1078
1079 /* Make sure no probes are running */
1080 mutex_enter(&ibdm.ibdm_mutex);
1081 while (ibdm.ibdm_busy & IBDM_BUSY)
1082 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex);
1083 ibdm.ibdm_busy |= IBDM_BUSY;
1084 mutex_exit(&ibdm.ibdm_mutex);
1085
1086 mutex_enter(&ibdm.ibdm_hl_mutex);
1087 head = ibdm.ibdm_hca_list_head;
1088 while (head) {
1089 if (head->hl_hca_guid == hca_guid) {
1090 if (prev == NULL)
1091 ibdm.ibdm_hca_list_head = head->hl_next;
1092 else
1093 prev->hl_next = head->hl_next;
1094 if (ibdm.ibdm_hca_list_tail == head)
1095 ibdm.ibdm_hca_list_tail = prev;
1096 ibdm.ibdm_hca_count--;
1097 break;
1098 }
1099 prev = head;
1100 head = head->hl_next;
1101 }
1102 mutex_exit(&ibdm.ibdm_hl_mutex);
1103 if (ibdm_uninit_hca(head) != IBDM_SUCCESS)
1104 (void) ibdm_handle_hca_attach(hca_guid);
1105
1106 #ifdef DEBUG
1107 if (ibdm_enumerate_iocs == 0) {
1108 ASSERT(ibdm.ibdm_dp_gidlist_head == NULL);
1109 }
1110 #endif
1111
1112 /*
1113 * Now clean up the HCA lists in the gidlist.
1114 */
1115 for (gidinfo = ibdm.ibdm_dp_gidlist_head; gidinfo; gidinfo =
1116 gidinfo->gl_next) {
1117 prev = NULL;
1118 head = gidinfo->gl_hca_list;
1119 while (head) {
1120 if (head->hl_hca_guid == hca_guid) {
1121 if (prev == NULL)
1122 gidinfo->gl_hca_list =
1123 head->hl_next;
1124 else
1125 prev->hl_next = head->hl_next;
1126 for (i = 0; i < head->hl_nports; i++) {
1127 port_attr = &head->hl_port_attr[i];
1128 if (port_attr->pa_pkey_tbl != NULL)
1129 kmem_free(
1130 port_attr->pa_pkey_tbl,
1131 port_attr->pa_npkeys *
1132 sizeof (ibdm_pkey_tbl_t));
1133 }
1134 len = sizeof (ibdm_hca_list_t) +
1135 (head->hl_nports *
1136 sizeof (ibdm_port_attr_t));
1137 kmem_free(head, len);
1138
1139 break;
1140 }
1141 prev = head;
1142 head = head->hl_next;
1143 }
1144 }
1145
1146 mutex_enter(&ibdm.ibdm_mutex);
1147 ibdm.ibdm_busy &= ~IBDM_BUSY;
1148 cv_broadcast(&ibdm.ibdm_busy_cv);
1149 mutex_exit(&ibdm.ibdm_mutex);
1150 }
1151
1152
1153 static int
ibdm_uninit_hca(ibdm_hca_list_t * head)1154 ibdm_uninit_hca(ibdm_hca_list_t *head)
1155 {
1156 int ii;
1157 ibdm_port_attr_t *port_attr;
1158
1159 for (ii = 0; ii < head->hl_nports; ii++) {
1160 port_attr = &head->hl_port_attr[ii];
1161 if (ibdm_fini_port(port_attr) != IBDM_SUCCESS) {
1162 IBTF_DPRINTF_L2("ibdm", "uninit_hca: HCA %p port 0x%x "
1163 "ibdm_fini_port() failed", head, ii);
1164 return (IBDM_FAILURE);
1165 }
1166 }
1167 if (head->hl_hca_hdl)
1168 if (ibt_close_hca(head->hl_hca_hdl) != IBT_SUCCESS) {
1169 IBTF_DPRINTF_L2("ibdm", "uninit_hca: "
1170 "ibt_close_hca() failed");
1171 return (IBDM_FAILURE);
1172 }
1173 kmem_free(head->hl_port_attr,
1174 head->hl_nports * sizeof (ibdm_port_attr_t));
1175 kmem_free(head->hl_hca_port_attr, sizeof (ibdm_port_attr_t));
1176 kmem_free(head, sizeof (ibdm_hca_list_t));
1177 return (IBDM_SUCCESS);
1178 }
1179
1180
1181 /*
1182 * For each port on the HCA,
1183 * 1) Teardown IBMF receive callback function
1184 * 2) Unregister with IBMF
1185 * 3) Unregister with SA access
1186 */
1187 static int
ibdm_fini_port(ibdm_port_attr_t * port_attr)1188 ibdm_fini_port(ibdm_port_attr_t *port_attr)
1189 {
1190 int ii, ibmf_status;
1191
1192 for (ii = 0; ii < port_attr->pa_npkeys; ii++) {
1193 if (port_attr->pa_pkey_tbl == NULL)
1194 break;
1195 if (!port_attr->pa_pkey_tbl[ii].pt_qp_hdl)
1196 continue;
1197 if (ibdm_port_attr_ibmf_fini(port_attr, ii) != IBDM_SUCCESS) {
1198 IBTF_DPRINTF_L4("ibdm", "\tfini_port: "
1199 "ibdm_port_attr_ibmf_fini failed for "
1200 "port pkey 0x%x", ii);
1201 return (IBDM_FAILURE);
1202 }
1203 }
1204
1205 if (port_attr->pa_ibmf_hdl) {
1206 ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl,
1207 IBMF_QP_HANDLE_DEFAULT, 0);
1208 if (ibmf_status != IBMF_SUCCESS) {
1209 IBTF_DPRINTF_L4("ibdm", "\tfini_port: "
1210 "ibmf_tear_down_async_cb failed %d", ibmf_status);
1211 return (IBDM_FAILURE);
1212 }
1213
1214 ibmf_status = ibmf_unregister(&port_attr->pa_ibmf_hdl, 0);
1215 if (ibmf_status != IBMF_SUCCESS) {
1216 IBTF_DPRINTF_L2("ibdm", "\tfini_port: "
1217 "ibmf_unregister failed %d", ibmf_status);
1218 return (IBDM_FAILURE);
1219 }
1220
1221 port_attr->pa_ibmf_hdl = NULL;
1222 }
1223
1224 if (port_attr->pa_sa_hdl) {
1225 ibmf_status = ibmf_sa_session_close(&port_attr->pa_sa_hdl, 0);
1226 if (ibmf_status != IBMF_SUCCESS) {
1227 IBTF_DPRINTF_L2("ibdm", "\tfini_port: "
1228 "ibmf_sa_session_close failed %d", ibmf_status);
1229 return (IBDM_FAILURE);
1230 }
1231 port_attr->pa_sa_hdl = NULL;
1232 }
1233
1234 if (port_attr->pa_pkey_tbl != NULL) {
1235 kmem_free(port_attr->pa_pkey_tbl,
1236 port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t));
1237 port_attr->pa_pkey_tbl = NULL;
1238 port_attr->pa_npkeys = 0;
1239 }
1240
1241 return (IBDM_SUCCESS);
1242 }
1243
1244
1245 /*
1246 * ibdm_port_attr_ibmf_fini:
1247 * With IBMF - Tear down Async callback and free QP Handle
1248 */
1249 static int
ibdm_port_attr_ibmf_fini(ibdm_port_attr_t * port_attr,int ii)1250 ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *port_attr, int ii)
1251 {
1252 int ibmf_status;
1253
1254 IBTF_DPRINTF_L5("ibdm", "\tport_attr_ibmf_fini:");
1255
1256 if (ibdm_enumerate_iocs == 0) {
1257 ASSERT(port_attr->pa_pkey_tbl[ii].pt_qp_hdl == NULL);
1258 return (IBDM_SUCCESS);
1259 }
1260
1261 if (port_attr->pa_pkey_tbl[ii].pt_qp_hdl) {
1262 ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl,
1263 port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0);
1264 if (ibmf_status != IBMF_SUCCESS) {
1265 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: "
1266 "ibmf_tear_down_async_cb failed %d", ibmf_status);
1267 return (IBDM_FAILURE);
1268 }
1269 ibmf_status = ibmf_free_qp(port_attr->pa_ibmf_hdl,
1270 &port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0);
1271 if (ibmf_status != IBMF_SUCCESS) {
1272 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: "
1273 "ibmf_free_qp failed %d", ibmf_status);
1274 return (IBDM_FAILURE);
1275 }
1276 port_attr->pa_pkey_tbl[ii].pt_qp_hdl = NULL;
1277 }
1278 return (IBDM_SUCCESS);
1279 }
1280
1281
1282 /*
1283 * ibdm_gid_decr_pending:
1284 * decrement gl_pending_cmds. If zero wakeup sleeping threads
1285 */
1286 static void
ibdm_gid_decr_pending(ibdm_dp_gidinfo_t * gidinfo)1287 ibdm_gid_decr_pending(ibdm_dp_gidinfo_t *gidinfo)
1288 {
1289 mutex_enter(&ibdm.ibdm_mutex);
1290 mutex_enter(&gidinfo->gl_mutex);
1291 if (--gidinfo->gl_pending_cmds == 0) {
1292 /*
1293 * Handle DGID getting removed.
1294 */
1295 if (gidinfo->gl_disconnected) {
1296 mutex_exit(&gidinfo->gl_mutex);
1297 mutex_exit(&ibdm.ibdm_mutex);
1298
1299 IBTF_DPRINTF_L3(ibdm_string, "\tgid_decr_pending: "
1300 "gidinfo %p hot removal", gidinfo);
1301 ibdm_delete_gidinfo(gidinfo);
1302
1303 mutex_enter(&ibdm.ibdm_mutex);
1304 ibdm.ibdm_ngid_probes_in_progress--;
1305 ibdm_wait_probe_completion();
1306 mutex_exit(&ibdm.ibdm_mutex);
1307 return;
1308 }
1309 mutex_exit(&gidinfo->gl_mutex);
1310 mutex_exit(&ibdm.ibdm_mutex);
1311 ibdm_notify_newgid_iocs(gidinfo);
1312 mutex_enter(&ibdm.ibdm_mutex);
1313 mutex_enter(&gidinfo->gl_mutex);
1314
1315 ibdm.ibdm_ngid_probes_in_progress--;
1316 ibdm_wait_probe_completion();
1317 }
1318 mutex_exit(&gidinfo->gl_mutex);
1319 mutex_exit(&ibdm.ibdm_mutex);
1320 }
1321
1322
1323 /*
1324 * ibdm_wait_probe_completion:
1325 * wait for probing to complete
1326 */
1327 static void
ibdm_wait_probe_completion(void)1328 ibdm_wait_probe_completion(void)
1329 {
1330 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex));
1331 if (ibdm.ibdm_ngid_probes_in_progress) {
1332 IBTF_DPRINTF_L4("ibdm", "\twait for probe complete");
1333 ibdm.ibdm_busy |= IBDM_PROBE_IN_PROGRESS;
1334 while (ibdm.ibdm_busy & IBDM_PROBE_IN_PROGRESS)
1335 cv_wait(&ibdm.ibdm_probe_cv, &ibdm.ibdm_mutex);
1336 }
1337 }
1338
1339
1340 /*
1341 * ibdm_wait_cisco_probe_completion:
1342 * wait for the reply from the Cisco FC GW switch after a setclassportinfo
1343 * request is sent. This wait can be achieved on each gid.
1344 */
1345 static void
ibdm_wait_cisco_probe_completion(ibdm_dp_gidinfo_t * gidinfo)1346 ibdm_wait_cisco_probe_completion(ibdm_dp_gidinfo_t *gidinfo)
1347 {
1348 ASSERT(MUTEX_HELD(&gidinfo->gl_mutex));
1349 IBTF_DPRINTF_L4("ibdm", "\twait for cisco probe complete");
1350 gidinfo->gl_flag |= IBDM_CISCO_PROBE;
1351 while (gidinfo->gl_flag & IBDM_CISCO_PROBE)
1352 cv_wait(&gidinfo->gl_probe_cv, &gidinfo->gl_mutex);
1353 }
1354
1355
1356 /*
1357 * ibdm_wakeup_probe_gid_cv:
1358 * wakeup waiting threads (based on ibdm_ngid_probes_in_progress)
1359 */
1360 static void
ibdm_wakeup_probe_gid_cv(void)1361 ibdm_wakeup_probe_gid_cv(void)
1362 {
1363 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex));
1364 if (!ibdm.ibdm_ngid_probes_in_progress) {
1365 IBTF_DPRINTF_L4("ibdm", "wakeup_probe_gid_thread: Wakeup");
1366 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS;
1367 cv_broadcast(&ibdm.ibdm_probe_cv);
1368 }
1369
1370 }
1371
1372
1373 /*
1374 * ibdm_sweep_fabric(reprobe_flag)
1375 * Find all possible Managed IOU's and their IOC's that are visible
1376 * to the host. The algorithm used is as follows
1377 *
1378 * Send a "bus walk" request for each port on the host HCA to SA access
1379 * SA returns complete set of GID's that are reachable from
1380 * source port. This is done in parallel.
1381 *
1382 * Initialize GID state to IBDM_GID_PROBE_NOT_DONE
1383 *
1384 * Sort the GID list and eliminate duplicate GID's
1385 * 1) Use DGID for sorting
1386 * 2) use PortGuid for sorting
1387 * Send SA query to retrieve NodeRecord and
1388 * extract PortGuid from that.
1389 *
1390 * Set GID state to IBDM_GID_PROBE_FAILED to all the ports that dont
1391 * support DM MAD's
1392 * Send a "Portinfo" query to get the port capabilities and
1393 * then check for DM MAD's support
1394 *
1395 * Send "ClassPortInfo" request for all the GID's in parallel,
1396 * set the GID state to IBDM_GET_CLASSPORTINFO and wait on the
1397 * cv_signal to complete.
1398 *
1399 * When DM agent on the remote GID sends back the response, IBMF
1400 * invokes DM callback routine.
1401 *
1402 * If the response is proper, send "IOUnitInfo" request and set
1403 * GID state to IBDM_GET_IOUNITINFO.
1404 *
1405 * If the response is proper, send "IocProfileInfo" request to
1406 * all the IOC simultaneously and set GID state to IBDM_GET_IOC_DETAILS.
1407 *
1408 * Send request to get Service entries simultaneously
1409 *
1410 * Signal the waiting thread when received response for all the commands.
1411 *
1412 * Set the GID state to IBDM_GID_PROBE_FAILED when received a error
1413 * response during the probing period.
1414 *
1415 * Note:
1416 * ibdm.ibdm_ngid_probes_in_progress and ibdm_gid_list_t:gl_pending_cmds
1417 * keep track of number commands in progress at any point of time.
1418 * MAD transaction ID is used to identify a particular GID
1419 * TBD: Consider registering the IBMF receive callback on demand
1420 *
1421 * Note: This routine must be called with ibdm.ibdm_mutex held
1422 * TBD: Re probe the failure GID (for certain failures) when requested
1423 * for fabric sweep next time
1424 *
1425 * Parameters : If reprobe_flag is set, All IOCs will be reprobed.
1426 */
1427 static void
ibdm_sweep_fabric(int reprobe_flag)1428 ibdm_sweep_fabric(int reprobe_flag)
1429 {
1430 int ii;
1431 int new_paths = 0;
1432 uint8_t niocs;
1433 taskqid_t tid;
1434 ibdm_ioc_info_t *ioc;
1435 ibdm_hca_list_t *hca_list = NULL;
1436 ibdm_port_attr_t *port = NULL;
1437 ibdm_dp_gidinfo_t *gid_info;
1438
1439 IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: Enter");
1440 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex));
1441
1442 /*
1443 * Check whether a sweep already in progress. If so, just
1444 * wait for the fabric sweep to complete
1445 */
1446 while (ibdm.ibdm_busy & IBDM_BUSY)
1447 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex);
1448 ibdm.ibdm_busy |= IBDM_BUSY;
1449 mutex_exit(&ibdm.ibdm_mutex);
1450
1451 ibdm_dump_sweep_fabric_timestamp(0);
1452
1453 /* Rescan the GID list for any removed GIDs for reprobe */
1454 if (reprobe_flag)
1455 ibdm_rescan_gidlist(NULL);
1456
1457 /*
1458 * Get list of all the ports reachable from the local known HCA
1459 * ports which are active
1460 */
1461 mutex_enter(&ibdm.ibdm_hl_mutex);
1462 for (ibdm_get_next_port(&hca_list, &port, 1); port;
1463 ibdm_get_next_port(&hca_list, &port, 1)) {
1464 /*
1465 * Get PATHS to all the reachable ports from
1466 * SGID and update the global ibdm structure.
1467 */
1468 new_paths = ibdm_get_reachable_ports(port, hca_list);
1469 ibdm.ibdm_ngids += new_paths;
1470 }
1471 mutex_exit(&ibdm.ibdm_hl_mutex);
1472
1473 mutex_enter(&ibdm.ibdm_mutex);
1474 ibdm.ibdm_ngid_probes_in_progress += ibdm.ibdm_ngids;
1475 mutex_exit(&ibdm.ibdm_mutex);
1476
1477 /* Send a request to probe GIDs asynchronously. */
1478 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info;
1479 gid_info = gid_info->gl_next) {
1480 mutex_enter(&gid_info->gl_mutex);
1481 gid_info->gl_reprobe_flag = reprobe_flag;
1482 mutex_exit(&gid_info->gl_mutex);
1483
1484 /* process newly encountered GIDs */
1485 tid = taskq_dispatch(system_taskq, ibdm_probe_gid_thread,
1486 (void *)gid_info, TQ_NOSLEEP);
1487 IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: gid_info = %p"
1488 " taskq_id = %x", gid_info, tid);
1489 /* taskq failed to dispatch call it directly */
1490 if (tid == TASKQID_INVALID)
1491 ibdm_probe_gid_thread((void *)gid_info);
1492 }
1493
1494 mutex_enter(&ibdm.ibdm_mutex);
1495 ibdm_wait_probe_completion();
1496
1497 /*
1498 * Update the properties, if reprobe_flag is set
1499 * Skip if gl_reprobe_flag is set, this will be
1500 * a re-inserted / new GID, for which notifications
1501 * have already been send.
1502 */
1503 if (reprobe_flag) {
1504 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info;
1505 gid_info = gid_info->gl_next) {
1506 if (gid_info->gl_iou == NULL)
1507 continue;
1508 if (gid_info->gl_reprobe_flag) {
1509 gid_info->gl_reprobe_flag = 0;
1510 continue;
1511 }
1512
1513 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots;
1514 for (ii = 0; ii < niocs; ii++) {
1515 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii);
1516 if (ioc)
1517 ibdm_reprobe_update_port_srv(ioc,
1518 gid_info);
1519 }
1520 }
1521 } else if (ibdm.ibdm_prev_iou) {
1522 ibdm_ioc_info_t *ioc_list;
1523
1524 /*
1525 * Get the list of IOCs which have changed.
1526 * If any IOCs have changed, Notify IBNexus
1527 */
1528 ibdm.ibdm_prev_iou = 0;
1529 ioc_list = ibdm_handle_prev_iou();
1530 if (ioc_list) {
1531 if (ibdm.ibdm_ibnex_callback != NULL) {
1532 (*ibdm.ibdm_ibnex_callback)(
1533 (void *)ioc_list,
1534 IBDM_EVENT_IOC_PROP_UPDATE);
1535 }
1536 }
1537 }
1538
1539 ibdm_dump_sweep_fabric_timestamp(1);
1540
1541 ibdm.ibdm_busy &= ~IBDM_BUSY;
1542 cv_broadcast(&ibdm.ibdm_busy_cv);
1543 IBTF_DPRINTF_L5("ibdm", "\tsweep_fabric: EXIT");
1544 }
1545
1546
1547 /*
1548 * ibdm_is_cisco:
1549 * Check if this is a Cisco device or not.
1550 */
1551 static boolean_t
ibdm_is_cisco(ib_guid_t guid)1552 ibdm_is_cisco(ib_guid_t guid)
1553 {
1554 if ((guid >> IBDM_OUI_GUID_SHIFT) == IBDM_CISCO_COMPANY_ID)
1555 return (B_TRUE);
1556 return (B_FALSE);
1557 }
1558
1559
1560 /*
1561 * ibdm_is_cisco_switch:
1562 * Check if this switch is a CISCO switch or not.
1563 * Note that if this switch is already activated, ibdm_is_cisco_switch()
1564 * returns B_FALSE not to re-activate it again.
1565 */
1566 static boolean_t
ibdm_is_cisco_switch(ibdm_dp_gidinfo_t * gid_info)1567 ibdm_is_cisco_switch(ibdm_dp_gidinfo_t *gid_info)
1568 {
1569 int company_id, device_id;
1570 ASSERT(gid_info != 0);
1571 ASSERT(MUTEX_HELD(&gid_info->gl_mutex));
1572
1573 /*
1574 * If this switch is already activated, don't re-activate it.
1575 */
1576 if (gid_info->gl_flag & IBDM_CISCO_PROBE_DONE)
1577 return (B_FALSE);
1578
1579 /*
1580 * Check if this switch is a Cisco FC GW or not.
1581 * Use the node guid (the OUI part) instead of the vendor id
1582 * since the vendor id is zero in practice.
1583 */
1584 company_id = gid_info->gl_nodeguid >> IBDM_OUI_GUID_SHIFT;
1585 device_id = gid_info->gl_devid;
1586
1587 if (company_id == IBDM_CISCO_COMPANY_ID &&
1588 device_id == IBDM_CISCO_DEVICE_ID)
1589 return (B_TRUE);
1590 return (B_FALSE);
1591 }
1592
1593
1594 /*
1595 * ibdm_probe_gid_thread:
1596 * thread that does the actual work for sweeping the fabric
1597 * for a given GID
1598 */
1599 static void
ibdm_probe_gid_thread(void * args)1600 ibdm_probe_gid_thread(void *args)
1601 {
1602 int reprobe_flag;
1603 ib_guid_t node_guid;
1604 ib_guid_t port_guid;
1605 ibdm_dp_gidinfo_t *gid_info;
1606
1607 gid_info = (ibdm_dp_gidinfo_t *)args;
1608 reprobe_flag = gid_info->gl_reprobe_flag;
1609 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: gid_info = %p, flag = %d",
1610 gid_info, reprobe_flag);
1611 ASSERT(gid_info != NULL);
1612 ASSERT(gid_info->gl_pending_cmds == 0);
1613
1614 if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE &&
1615 reprobe_flag == 0) {
1616 /*
1617 * This GID may have been already probed. Send
1618 * in a CLP to check if IOUnitInfo changed?
1619 * Explicitly set gl_reprobe_flag to 0 so that
1620 * IBnex is not notified on completion
1621 */
1622 if (gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) {
1623 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: "
1624 "get new IOCs information");
1625 mutex_enter(&gid_info->gl_mutex);
1626 gid_info->gl_pending_cmds++;
1627 gid_info->gl_state = IBDM_GET_IOUNITINFO;
1628 gid_info->gl_reprobe_flag = 0;
1629 mutex_exit(&gid_info->gl_mutex);
1630 if (ibdm_send_iounitinfo(gid_info) != IBDM_SUCCESS) {
1631 mutex_enter(&gid_info->gl_mutex);
1632 --gid_info->gl_pending_cmds;
1633 mutex_exit(&gid_info->gl_mutex);
1634 mutex_enter(&ibdm.ibdm_mutex);
1635 --ibdm.ibdm_ngid_probes_in_progress;
1636 ibdm_wakeup_probe_gid_cv();
1637 mutex_exit(&ibdm.ibdm_mutex);
1638 }
1639 } else {
1640 mutex_enter(&ibdm.ibdm_mutex);
1641 --ibdm.ibdm_ngid_probes_in_progress;
1642 ibdm_wakeup_probe_gid_cv();
1643 mutex_exit(&ibdm.ibdm_mutex);
1644 }
1645 return;
1646 } else if (reprobe_flag && gid_info->gl_state ==
1647 IBDM_GID_PROBING_COMPLETE) {
1648 /*
1649 * Reprobe all IOCs for the GID which has completed
1650 * probe. Skip other port GIDs to same IOU.
1651 * Explicitly set gl_reprobe_flag to 0 so that
1652 * IBnex is not notified on completion
1653 */
1654 ibdm_ioc_info_t *ioc_info;
1655 uint8_t niocs, ii;
1656
1657 ASSERT(gid_info->gl_iou);
1658 mutex_enter(&gid_info->gl_mutex);
1659 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots;
1660 gid_info->gl_state = IBDM_GET_IOC_DETAILS;
1661 gid_info->gl_pending_cmds += niocs;
1662 gid_info->gl_reprobe_flag = 0;
1663 mutex_exit(&gid_info->gl_mutex);
1664 for (ii = 0; ii < niocs; ii++) {
1665 uchar_t slot_info;
1666 ib_dm_io_unitinfo_t *giou_info;
1667
1668 /*
1669 * Check whether IOC is present in the slot
1670 * Series of nibbles (in the field
1671 * iou_ctrl_list) represents a slot in the
1672 * IOU.
1673 * Byte format: 76543210
1674 * Bits 0-3 of first byte represent Slot 2
1675 * bits 4-7 of first byte represent slot 1,
1676 * bits 0-3 of second byte represent slot 4
1677 * and so on
1678 * Each 4-bit nibble has the following meaning
1679 * 0x0 : IOC not installed
1680 * 0x1 : IOC is present
1681 * 0xf : Slot does not exist
1682 * and all other values are reserved.
1683 */
1684 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii);
1685 giou_info = &gid_info->gl_iou->iou_info;
1686 slot_info = giou_info->iou_ctrl_list[(ii/2)];
1687 if ((ii % 2) == 0)
1688 slot_info = (slot_info >> 4);
1689
1690 if ((slot_info & 0xf) != 1) {
1691 ioc_info->ioc_state =
1692 IBDM_IOC_STATE_PROBE_FAILED;
1693 ibdm_gid_decr_pending(gid_info);
1694 continue;
1695 }
1696
1697 if (ibdm_send_ioc_profile(gid_info, ii) !=
1698 IBDM_SUCCESS) {
1699 ibdm_gid_decr_pending(gid_info);
1700 }
1701 }
1702
1703 return;
1704 } else if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) {
1705 mutex_enter(&ibdm.ibdm_mutex);
1706 --ibdm.ibdm_ngid_probes_in_progress;
1707 ibdm_wakeup_probe_gid_cv();
1708 mutex_exit(&ibdm.ibdm_mutex);
1709 return;
1710 }
1711
1712 /*
1713 * Check whether the destination GID supports DM agents. If
1714 * not, stop probing the GID and continue with the next GID
1715 * in the list.
1716 */
1717 if (ibdm_is_dev_mgt_supported(gid_info) != IBDM_SUCCESS) {
1718 mutex_enter(&gid_info->gl_mutex);
1719 gid_info->gl_state = IBDM_GID_PROBING_FAILED;
1720 gid_info->gl_is_dm_capable = B_FALSE;
1721 mutex_exit(&gid_info->gl_mutex);
1722 ibdm_delete_glhca_list(gid_info);
1723 mutex_enter(&ibdm.ibdm_mutex);
1724 --ibdm.ibdm_ngid_probes_in_progress;
1725 ibdm_wakeup_probe_gid_cv();
1726 mutex_exit(&ibdm.ibdm_mutex);
1727 return;
1728 }
1729
1730 /*
1731 * This GID is Device management capable
1732 */
1733 mutex_enter(&gid_info->gl_mutex);
1734 gid_info->gl_is_dm_capable = B_TRUE;
1735 mutex_exit(&gid_info->gl_mutex);
1736
1737 /* Get the nodeguid and portguid of the port */
1738 if (ibdm_get_node_port_guids(gid_info->gl_sa_hdl, gid_info->gl_dlid,
1739 &node_guid, &port_guid) != IBDM_SUCCESS) {
1740 mutex_enter(&gid_info->gl_mutex);
1741 gid_info->gl_state = IBDM_GID_PROBING_FAILED;
1742 mutex_exit(&gid_info->gl_mutex);
1743 ibdm_delete_glhca_list(gid_info);
1744 mutex_enter(&ibdm.ibdm_mutex);
1745 --ibdm.ibdm_ngid_probes_in_progress;
1746 ibdm_wakeup_probe_gid_cv();
1747 mutex_exit(&ibdm.ibdm_mutex);
1748 return;
1749 }
1750
1751 /*
1752 * Check whether we already knew about this NodeGuid
1753 * If so, do not probe the GID and continue with the
1754 * next GID in the gid list. Set the GID state to
1755 * probing done.
1756 */
1757 mutex_enter(&ibdm.ibdm_mutex);
1758 gid_info->gl_nodeguid = node_guid;
1759 gid_info->gl_portguid = port_guid;
1760 if (ibdm_check_dest_nodeguid(gid_info) != NULL) {
1761 mutex_exit(&ibdm.ibdm_mutex);
1762 mutex_enter(&gid_info->gl_mutex);
1763 gid_info->gl_state = IBDM_GID_PROBING_SKIPPED;
1764 mutex_exit(&gid_info->gl_mutex);
1765 ibdm_delete_glhca_list(gid_info);
1766 mutex_enter(&ibdm.ibdm_mutex);
1767 --ibdm.ibdm_ngid_probes_in_progress;
1768 ibdm_wakeup_probe_gid_cv();
1769 mutex_exit(&ibdm.ibdm_mutex);
1770 return;
1771 }
1772 ibdm_add_to_gl_gid(gid_info, gid_info);
1773 mutex_exit(&ibdm.ibdm_mutex);
1774
1775 /*
1776 * New or reinserted GID : Enable notification to IBnex
1777 */
1778 mutex_enter(&gid_info->gl_mutex);
1779 gid_info->gl_reprobe_flag = 1;
1780
1781 /*
1782 * A Cisco FC GW needs the special handling to get IOUnitInfo.
1783 */
1784 if (ibdm_is_cisco_switch(gid_info)) {
1785 gid_info->gl_pending_cmds++;
1786 gid_info->gl_state = IBDM_SET_CLASSPORTINFO;
1787 mutex_exit(&gid_info->gl_mutex);
1788
1789 if (ibdm_set_classportinfo(gid_info) != IBDM_SUCCESS) {
1790 mutex_enter(&gid_info->gl_mutex);
1791 gid_info->gl_state = IBDM_GID_PROBING_FAILED;
1792 --gid_info->gl_pending_cmds;
1793 mutex_exit(&gid_info->gl_mutex);
1794
1795 /* free the hca_list on this gid_info */
1796 ibdm_delete_glhca_list(gid_info);
1797
1798 mutex_enter(&ibdm.ibdm_mutex);
1799 --ibdm.ibdm_ngid_probes_in_progress;
1800 ibdm_wakeup_probe_gid_cv();
1801 mutex_exit(&ibdm.ibdm_mutex);
1802
1803 return;
1804 }
1805
1806 mutex_enter(&gid_info->gl_mutex);
1807 ibdm_wait_cisco_probe_completion(gid_info);
1808
1809 IBTF_DPRINTF_L4("ibdm", "\tibdm_probe_gid_thread: "
1810 "CISCO Wakeup signal received");
1811 }
1812
1813 /* move on to the 'GET_CLASSPORTINFO' stage */
1814 gid_info->gl_pending_cmds++;
1815 gid_info->gl_state = IBDM_GET_CLASSPORTINFO;
1816 mutex_exit(&gid_info->gl_mutex);
1817
1818 IBTF_DPRINTF_L3(ibdm_string, "\tibdm_probe_gid_thread: "
1819 "%d: gid_info %p gl_state %d pending_cmds %d",
1820 __LINE__, gid_info, gid_info->gl_state,
1821 gid_info->gl_pending_cmds);
1822
1823 /*
1824 * Send ClassPortInfo request to the GID asynchronously.
1825 */
1826 if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) {
1827
1828 mutex_enter(&gid_info->gl_mutex);
1829 gid_info->gl_state = IBDM_GID_PROBING_FAILED;
1830 --gid_info->gl_pending_cmds;
1831 mutex_exit(&gid_info->gl_mutex);
1832
1833 /* free the hca_list on this gid_info */
1834 ibdm_delete_glhca_list(gid_info);
1835
1836 mutex_enter(&ibdm.ibdm_mutex);
1837 --ibdm.ibdm_ngid_probes_in_progress;
1838 ibdm_wakeup_probe_gid_cv();
1839 mutex_exit(&ibdm.ibdm_mutex);
1840
1841 return;
1842 }
1843 }
1844
1845
1846 /*
1847 * ibdm_check_dest_nodeguid
1848 * Searches for the NodeGuid in the GID list
1849 * Returns matching gid_info if found and otherwise NULL
1850 *
1851 * This function is called to handle new GIDs discovered
1852 * during device sweep / probe or for GID_AVAILABLE event.
1853 *
1854 * Parameter :
1855 * gid_info GID to check
1856 */
1857 static ibdm_dp_gidinfo_t *
ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t * gid_info)1858 ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *gid_info)
1859 {
1860 ibdm_dp_gidinfo_t *gid_list;
1861 ibdm_gid_t *tmp;
1862
1863 IBTF_DPRINTF_L4("ibdm", "\tcheck_dest_nodeguid");
1864
1865 gid_list = ibdm.ibdm_dp_gidlist_head;
1866 while (gid_list) {
1867 if ((gid_list != gid_info) &&
1868 (gid_info->gl_nodeguid == gid_list->gl_nodeguid)) {
1869 IBTF_DPRINTF_L4("ibdm",
1870 "\tcheck_dest_nodeguid: NodeGuid is present");
1871
1872 /* Add to gid_list */
1873 tmp = kmem_zalloc(sizeof (ibdm_gid_t),
1874 KM_SLEEP);
1875 tmp->gid_dgid_hi = gid_info->gl_dgid_hi;
1876 tmp->gid_dgid_lo = gid_info->gl_dgid_lo;
1877 tmp->gid_next = gid_list->gl_gid;
1878 gid_list->gl_gid = tmp;
1879 gid_list->gl_ngids++;
1880 return (gid_list);
1881 }
1882
1883 gid_list = gid_list->gl_next;
1884 }
1885
1886 return (NULL);
1887 }
1888
1889
1890 /*
1891 * ibdm_is_dev_mgt_supported
1892 * Get the PortInfo attribute (SA Query)
1893 * Check "CompatabilityMask" field in the Portinfo.
1894 * Return IBDM_SUCCESS if DM MAD's supported (if bit 19 set)
1895 * by the port, otherwise IBDM_FAILURE
1896 */
1897 static int
ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t * gid_info)1898 ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *gid_info)
1899 {
1900 int ret;
1901 size_t length = 0;
1902 sa_portinfo_record_t req, *resp = NULL;
1903 ibmf_saa_access_args_t qargs;
1904
1905 bzero(&req, sizeof (sa_portinfo_record_t));
1906 req.EndportLID = gid_info->gl_dlid;
1907
1908 qargs.sq_attr_id = SA_PORTINFORECORD_ATTRID;
1909 qargs.sq_access_type = IBMF_SAA_RETRIEVE;
1910 qargs.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID;
1911 qargs.sq_template = &req;
1912 qargs.sq_callback = NULL;
1913 qargs.sq_callback_arg = NULL;
1914
1915 ret = ibmf_sa_access(gid_info->gl_sa_hdl,
1916 &qargs, 0, &length, (void **)&resp);
1917
1918 if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) {
1919 IBTF_DPRINTF_L2("ibdm", "\tis_dev_mgt_supported:"
1920 "failed to get PORTINFO attribute %d", ret);
1921 return (IBDM_FAILURE);
1922 }
1923
1924 if (resp->PortInfo.CapabilityMask & SM_CAP_MASK_IS_DM_SUPPD) {
1925 IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: SUPPD !!");
1926 ret = IBDM_SUCCESS;
1927 } else {
1928 IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: "
1929 "Not SUPPD !!, cap 0x%x", resp->PortInfo.CapabilityMask);
1930 ret = IBDM_FAILURE;
1931 }
1932 kmem_free(resp, length);
1933 return (ret);
1934 }
1935
1936
1937 /*
1938 * ibdm_get_node_port_guids()
1939 * Get the NodeInfoRecord of the port
1940 * Save NodeGuid and PortGUID values in the GID list structure.
1941 * Return IBDM_SUCCESS/IBDM_FAILURE
1942 */
1943 static int
ibdm_get_node_port_guids(ibmf_saa_handle_t sa_hdl,ib_lid_t dlid,ib_guid_t * node_guid,ib_guid_t * port_guid)1944 ibdm_get_node_port_guids(ibmf_saa_handle_t sa_hdl, ib_lid_t dlid,
1945 ib_guid_t *node_guid, ib_guid_t *port_guid)
1946 {
1947 int ret;
1948 size_t length = 0;
1949 sa_node_record_t req, *resp = NULL;
1950 ibmf_saa_access_args_t qargs;
1951
1952 IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids");
1953
1954 bzero(&req, sizeof (sa_node_record_t));
1955 req.LID = dlid;
1956
1957 qargs.sq_attr_id = SA_NODERECORD_ATTRID;
1958 qargs.sq_access_type = IBMF_SAA_RETRIEVE;
1959 qargs.sq_component_mask = SA_NODEINFO_COMPMASK_NODELID;
1960 qargs.sq_template = &req;
1961 qargs.sq_callback = NULL;
1962 qargs.sq_callback_arg = NULL;
1963
1964 ret = ibmf_sa_access(sa_hdl, &qargs, 0, &length, (void **)&resp);
1965 if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) {
1966 IBTF_DPRINTF_L2("ibdm", "\tget_node_port_guids:"
1967 " SA Retrieve Failed: %d", ret);
1968 return (IBDM_FAILURE);
1969 }
1970 IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids: NodeGuid %llx Port"
1971 "GUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.NodeGUID);
1972
1973 *node_guid = resp->NodeInfo.NodeGUID;
1974 *port_guid = resp->NodeInfo.PortGUID;
1975 kmem_free(resp, length);
1976 return (IBDM_SUCCESS);
1977 }
1978
1979
1980 /*
1981 * ibdm_get_reachable_ports()
1982 * Get list of the destination GID (and its path records) by
1983 * querying the SA access.
1984 *
1985 * Returns Number paths
1986 */
1987 static int
ibdm_get_reachable_ports(ibdm_port_attr_t * portinfo,ibdm_hca_list_t * hca)1988 ibdm_get_reachable_ports(ibdm_port_attr_t *portinfo, ibdm_hca_list_t *hca)
1989 {
1990 uint_t ii, jj, nrecs;
1991 uint_t npaths = 0;
1992 size_t length;
1993 ib_gid_t sgid;
1994 ibdm_pkey_tbl_t *pkey_tbl;
1995 sa_path_record_t *result;
1996 sa_path_record_t *precp;
1997 ibdm_dp_gidinfo_t *gid_info;
1998
1999 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex));
2000 IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: portinfo %p", portinfo);
2001
2002 sgid.gid_prefix = portinfo->pa_sn_prefix;
2003 sgid.gid_guid = portinfo->pa_port_guid;
2004
2005 /* get reversible paths */
2006 if (portinfo->pa_sa_hdl && ibmf_saa_paths_from_gid(portinfo->pa_sa_hdl,
2007 sgid, IBMF_SAA_PKEY_WC, B_TRUE, 0, &nrecs, &length, &result)
2008 != IBMF_SUCCESS) {
2009 IBTF_DPRINTF_L2("ibdm",
2010 "\tget_reachable_ports: Getting path records failed");
2011 return (0);
2012 }
2013
2014 for (ii = 0; ii < nrecs; ii++) {
2015 sa_node_record_t *nrec;
2016 size_t length;
2017
2018 precp = &result[ii];
2019 if ((gid_info = ibdm_check_dgid(precp->DGID.gid_guid,
2020 precp->DGID.gid_prefix)) != NULL) {
2021 IBTF_DPRINTF_L5("ibdm", "\tget_reachable_ports: "
2022 "Already exists nrecs %d, ii %d", nrecs, ii);
2023 ibdm_addto_glhcalist(gid_info, hca);
2024 continue;
2025 }
2026 /*
2027 * This is a new GID. Allocate a GID structure and
2028 * initialize the structure
2029 * gl_state is initialized to IBDM_GID_PROBE_NOT_DONE (0)
2030 * by kmem_zalloc call
2031 */
2032 gid_info = kmem_zalloc(sizeof (ibdm_dp_gidinfo_t), KM_SLEEP);
2033 mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL);
2034 cv_init(&gid_info->gl_probe_cv, NULL, CV_DRIVER, NULL);
2035 gid_info->gl_dgid_hi = precp->DGID.gid_prefix;
2036 gid_info->gl_dgid_lo = precp->DGID.gid_guid;
2037 gid_info->gl_sgid_hi = precp->SGID.gid_prefix;
2038 gid_info->gl_sgid_lo = precp->SGID.gid_guid;
2039 gid_info->gl_p_key = precp->P_Key;
2040 gid_info->gl_sa_hdl = portinfo->pa_sa_hdl;
2041 gid_info->gl_ibmf_hdl = portinfo->pa_ibmf_hdl;
2042 gid_info->gl_slid = precp->SLID;
2043 gid_info->gl_dlid = precp->DLID;
2044 gid_info->gl_transactionID = (++ibdm.ibdm_transactionID)
2045 << IBDM_GID_TRANSACTIONID_SHIFT;
2046 gid_info->gl_min_transactionID = gid_info->gl_transactionID;
2047 gid_info->gl_max_transactionID = (ibdm.ibdm_transactionID +1)
2048 << IBDM_GID_TRANSACTIONID_SHIFT;
2049 gid_info->gl_SL = precp->SL;
2050
2051 /*
2052 * get the node record with this guid if the destination
2053 * device is a Cisco one.
2054 */
2055 if (ibdm_is_cisco(precp->DGID.gid_guid) &&
2056 (gid_info->gl_nodeguid == 0 || gid_info->gl_devid == 0) &&
2057 ibdm_get_node_record_by_port(portinfo->pa_sa_hdl,
2058 precp->DGID.gid_guid, &nrec, &length) == IBDM_SUCCESS) {
2059 gid_info->gl_nodeguid = nrec->NodeInfo.NodeGUID;
2060 gid_info->gl_devid = nrec->NodeInfo.DeviceID;
2061 kmem_free(nrec, length);
2062 }
2063
2064 ibdm_addto_glhcalist(gid_info, hca);
2065
2066 ibdm_dump_path_info(precp);
2067
2068 gid_info->gl_qp_hdl = NULL;
2069 ASSERT(portinfo->pa_pkey_tbl != NULL &&
2070 portinfo->pa_npkeys != 0);
2071
2072 for (jj = 0; jj < portinfo->pa_npkeys; jj++) {
2073 pkey_tbl = &portinfo->pa_pkey_tbl[jj];
2074 if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) &&
2075 (pkey_tbl->pt_qp_hdl != NULL)) {
2076 gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl;
2077 break;
2078 }
2079 }
2080
2081 /*
2082 * QP handle for GID not initialized. No matching Pkey
2083 * was found!! ibdm should *not* hit this case. Flag an
2084 * error and drop the GID if ibdm does encounter this.
2085 */
2086 if (gid_info->gl_qp_hdl == NULL) {
2087 IBTF_DPRINTF_L2(ibdm_string,
2088 "\tget_reachable_ports: No matching Pkey");
2089 ibdm_delete_gidinfo(gid_info);
2090 continue;
2091 }
2092 if (ibdm.ibdm_dp_gidlist_head == NULL) {
2093 ibdm.ibdm_dp_gidlist_head = gid_info;
2094 ibdm.ibdm_dp_gidlist_tail = gid_info;
2095 } else {
2096 ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info;
2097 gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail;
2098 ibdm.ibdm_dp_gidlist_tail = gid_info;
2099 }
2100 npaths++;
2101 }
2102 kmem_free(result, length);
2103 IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: npaths = %d", npaths);
2104 return (npaths);
2105 }
2106
2107
2108 /*
2109 * ibdm_check_dgid()
2110 * Look in the global list to check whether we know this DGID already
2111 * Return IBDM_GID_PRESENT/IBDM_GID_NOT_PRESENT
2112 */
2113 static ibdm_dp_gidinfo_t *
ibdm_check_dgid(ib_guid_t guid,ib_sn_prefix_t prefix)2114 ibdm_check_dgid(ib_guid_t guid, ib_sn_prefix_t prefix)
2115 {
2116 ibdm_dp_gidinfo_t *gid_list;
2117
2118 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list;
2119 gid_list = gid_list->gl_next) {
2120 if ((guid == gid_list->gl_dgid_lo) &&
2121 (prefix == gid_list->gl_dgid_hi)) {
2122 break;
2123 }
2124 }
2125 return (gid_list);
2126 }
2127
2128
2129 /*
2130 * ibdm_find_gid()
2131 * Look in the global list to find a GID entry with matching
2132 * port & node GUID.
2133 * Return pointer to gidinfo if found, else return NULL
2134 */
2135 static ibdm_dp_gidinfo_t *
ibdm_find_gid(ib_guid_t nodeguid,ib_guid_t portguid)2136 ibdm_find_gid(ib_guid_t nodeguid, ib_guid_t portguid)
2137 {
2138 ibdm_dp_gidinfo_t *gid_list;
2139
2140 IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid(%llx, %llx)\n",
2141 nodeguid, portguid);
2142
2143 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list;
2144 gid_list = gid_list->gl_next) {
2145 if ((portguid == gid_list->gl_portguid) &&
2146 (nodeguid == gid_list->gl_nodeguid)) {
2147 break;
2148 }
2149 }
2150
2151 IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid : returned %p\n",
2152 gid_list);
2153 return (gid_list);
2154 }
2155
2156
2157 /*
2158 * ibdm_set_classportinfo()
2159 * ibdm_set_classportinfo() is a function to activate a Cisco FC GW
2160 * by sending the setClassPortInfo request with the trapLID, trapGID
2161 * and etc. to the gateway since the gateway doesn't provide the IO
2162 * Unit Information othewise. This behavior is the Cisco specific one,
2163 * and this function is called to a Cisco FC GW only.
2164 * Returns IBDM_SUCCESS/IBDM_FAILURE
2165 */
2166 static int
ibdm_set_classportinfo(ibdm_dp_gidinfo_t * gid_info)2167 ibdm_set_classportinfo(ibdm_dp_gidinfo_t *gid_info)
2168 {
2169 ibmf_msg_t *msg;
2170 ib_mad_hdr_t *hdr;
2171 ibdm_timeout_cb_args_t *cb_args;
2172 void *data;
2173 ib_mad_classportinfo_t *cpi;
2174
2175 IBTF_DPRINTF_L4("ibdm",
2176 "\tset_classportinfo: gid info 0x%p", gid_info);
2177
2178 /*
2179 * Send command to set classportinfo attribute. Allocate a IBMF
2180 * packet and initialize the packet.
2181 */
2182 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP,
2183 &msg) != IBMF_SUCCESS) {
2184 IBTF_DPRINTF_L4("ibdm", "\tset_classportinfo: pkt alloc fail");
2185 return (IBDM_FAILURE);
2186 }
2187
2188 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg))
2189 ibdm_alloc_send_buffers(msg);
2190 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg))
2191
2192 msg->im_local_addr.ia_local_lid = gid_info->gl_slid;
2193 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid;
2194 msg->im_local_addr.ia_remote_qno = 1;
2195 msg->im_local_addr.ia_p_key = gid_info->gl_p_key;
2196 msg->im_local_addr.ia_q_key = IB_GSI_QKEY;
2197 msg->im_local_addr.ia_service_level = gid_info->gl_SL;
2198
2199 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg);
2200 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1;
2201 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT;
2202 hdr->ClassVersion = IB_DM_CLASS_VERSION_1;
2203 hdr->R_Method = IB_DM_DEVMGT_METHOD_SET;
2204 hdr->Status = 0;
2205 hdr->TransactionID = h2b64(gid_info->gl_transactionID);
2206 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO);
2207 hdr->AttributeModifier = 0;
2208
2209 data = msg->im_msgbufs_send.im_bufs_cl_data;
2210 cpi = (ib_mad_classportinfo_t *)data;
2211
2212 /*
2213 * Set the classportinfo values to activate this Cisco FC GW.
2214 */
2215 cpi->TrapGID_hi = h2b64(gid_info->gl_sgid_hi);
2216 cpi->TrapGID_lo = h2b64(gid_info->gl_sgid_lo);
2217 cpi->TrapLID = h2b16(gid_info->gl_slid);
2218 cpi->TrapSL = gid_info->gl_SL;
2219 cpi->TrapP_Key = h2b16(gid_info->gl_p_key);
2220 cpi->TrapQP = h2b32((((ibmf_alt_qp_t *)gid_info->gl_qp_hdl)->isq_qpn));
2221 cpi->TrapQ_Key = h2b32((((ibmf_alt_qp_t *)
2222 gid_info->gl_qp_hdl)->isq_qkey));
2223
2224 cb_args = &gid_info->gl_cpi_cb_args;
2225 cb_args->cb_gid_info = gid_info;
2226 cb_args->cb_retry_count = ibdm_dft_retry_cnt;
2227 cb_args->cb_req_type = IBDM_REQ_TYPE_CLASSPORTINFO;
2228
2229 mutex_enter(&gid_info->gl_mutex);
2230 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr,
2231 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout));
2232 mutex_exit(&gid_info->gl_mutex);
2233
2234 IBTF_DPRINTF_L5("ibdm", "\tset_classportinfo: "
2235 "timeout id %x", gid_info->gl_timeout_id);
2236
2237 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl,
2238 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) {
2239 IBTF_DPRINTF_L2("ibdm",
2240 "\tset_classportinfo: ibmf send failed");
2241 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args);
2242 }
2243
2244 return (IBDM_SUCCESS);
2245 }
2246
2247
2248 /*
2249 * ibdm_send_classportinfo()
2250 * Send classportinfo request. When the request is completed
2251 * IBMF calls ibdm_classportinfo_cb routine to inform about
2252 * the completion.
2253 * Returns IBDM_SUCCESS/IBDM_FAILURE
2254 */
2255 static int
ibdm_send_classportinfo(ibdm_dp_gidinfo_t * gid_info)2256 ibdm_send_classportinfo(ibdm_dp_gidinfo_t *gid_info)
2257 {
2258 ibmf_msg_t *msg;
2259 ib_mad_hdr_t *hdr;
2260 ibdm_timeout_cb_args_t *cb_args;
2261
2262 IBTF_DPRINTF_L4("ibdm",
2263 "\tsend_classportinfo: gid info 0x%p", gid_info);
2264
2265 /*
2266 * Send command to get classportinfo attribute. Allocate a IBMF
2267 * packet and initialize the packet.
2268 */
2269 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP,
2270 &msg) != IBMF_SUCCESS) {
2271 IBTF_DPRINTF_L4("ibdm", "\tsend_classportinfo: pkt alloc fail");
2272 return (IBDM_FAILURE);
2273 }
2274
2275 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg))
2276 ibdm_alloc_send_buffers(msg);
2277 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg))
2278
2279 msg->im_local_addr.ia_local_lid = gid_info->gl_slid;
2280 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid;
2281 msg->im_local_addr.ia_remote_qno = 1;
2282 msg->im_local_addr.ia_p_key = gid_info->gl_p_key;
2283 msg->im_local_addr.ia_q_key = IB_GSI_QKEY;
2284 msg->im_local_addr.ia_service_level = gid_info->gl_SL;
2285
2286 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg);
2287 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1;
2288 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT;
2289 hdr->ClassVersion = IB_DM_CLASS_VERSION_1;
2290 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET;
2291 hdr->Status = 0;
2292 hdr->TransactionID = h2b64(gid_info->gl_transactionID);
2293 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO);
2294 hdr->AttributeModifier = 0;
2295
2296 cb_args = &gid_info->gl_cpi_cb_args;
2297 cb_args->cb_gid_info = gid_info;
2298 cb_args->cb_retry_count = ibdm_dft_retry_cnt;
2299 cb_args->cb_req_type = IBDM_REQ_TYPE_CLASSPORTINFO;
2300
2301 mutex_enter(&gid_info->gl_mutex);
2302 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr,
2303 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout));
2304 mutex_exit(&gid_info->gl_mutex);
2305
2306 IBTF_DPRINTF_L5("ibdm", "\tsend_classportinfo: "
2307 "timeout id %x", gid_info->gl_timeout_id);
2308
2309 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl,
2310 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) {
2311 IBTF_DPRINTF_L2("ibdm",
2312 "\tsend_classportinfo: ibmf send failed");
2313 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args);
2314 }
2315
2316 return (IBDM_SUCCESS);
2317 }
2318
2319
2320 /*
2321 * ibdm_handle_setclassportinfo()
2322 * Invoked by the IBMF when setClassPortInfo request is completed.
2323 */
2324 static void
ibdm_handle_setclassportinfo(ibmf_handle_t ibmf_hdl,ibmf_msg_t * msg,ibdm_dp_gidinfo_t * gid_info,int * flag)2325 ibdm_handle_setclassportinfo(ibmf_handle_t ibmf_hdl,
2326 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag)
2327 {
2328 void *data;
2329 timeout_id_t timeout_id;
2330 ib_mad_classportinfo_t *cpi;
2331
2332 IBTF_DPRINTF_L4("ibdm", "\thandle_setclassportinfo:ibmf hdl "
2333 "%p msg %p gid info %p", ibmf_hdl, msg, gid_info);
2334
2335 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO) {
2336 IBTF_DPRINTF_L4("ibdm", "\thandle_setclassportinfo: "
2337 "Not a ClassPortInfo resp");
2338 *flag |= IBDM_IBMF_PKT_UNEXP_RESP;
2339 return;
2340 }
2341
2342 /*
2343 * Verify whether timeout handler is created/active.
2344 * If created/ active, cancel the timeout handler
2345 */
2346 mutex_enter(&gid_info->gl_mutex);
2347 if (gid_info->gl_state != IBDM_SET_CLASSPORTINFO) {
2348 IBTF_DPRINTF_L2("ibdm", "\thandle_setclassportinfo:DUP resp");
2349 *flag |= IBDM_IBMF_PKT_DUP_RESP;
2350 mutex_exit(&gid_info->gl_mutex);
2351 return;
2352 }
2353 ibdm_bump_transactionID(gid_info);
2354
2355 gid_info->gl_iou_cb_args.cb_req_type = 0;
2356 if (gid_info->gl_timeout_id) {
2357 timeout_id = gid_info->gl_timeout_id;
2358 mutex_exit(&gid_info->gl_mutex);
2359 IBTF_DPRINTF_L5("ibdm", "handle_setlassportinfo: "
2360 "gl_timeout_id = 0x%x", timeout_id);
2361 if (untimeout(timeout_id) == -1) {
2362 IBTF_DPRINTF_L2("ibdm", "handle_setclassportinfo: "
2363 "untimeout gl_timeout_id failed");
2364 }
2365 mutex_enter(&gid_info->gl_mutex);
2366 gid_info->gl_timeout_id = 0;
2367 }
2368 mutex_exit(&gid_info->gl_mutex);
2369
2370 data = msg->im_msgbufs_recv.im_bufs_cl_data;
2371 cpi = (ib_mad_classportinfo_t *)data;
2372
2373 ibdm_dump_classportinfo(cpi);
2374 }
2375
2376
2377 /*
2378 * ibdm_handle_classportinfo()
2379 * Invoked by the IBMF when the classportinfo request is completed.
2380 */
2381 static void
ibdm_handle_classportinfo(ibmf_handle_t ibmf_hdl,ibmf_msg_t * msg,ibdm_dp_gidinfo_t * gid_info,int * flag)2382 ibdm_handle_classportinfo(ibmf_handle_t ibmf_hdl,
2383 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag)
2384 {
2385 void *data;
2386 timeout_id_t timeout_id;
2387 ib_mad_hdr_t *hdr;
2388 ib_mad_classportinfo_t *cpi;
2389
2390 IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo:ibmf hdl "
2391 "%p msg %p gid info %p", ibmf_hdl, msg, gid_info);
2392
2393 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO) {
2394 IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo: "
2395 "Not a ClassPortInfo resp");
2396 *flag |= IBDM_IBMF_PKT_UNEXP_RESP;
2397 return;
2398 }
2399
2400 /*
2401 * Verify whether timeout handler is created/active.
2402 * If created/ active, cancel the timeout handler
2403 */
2404 mutex_enter(&gid_info->gl_mutex);
2405 ibdm_bump_transactionID(gid_info);
2406 if (gid_info->gl_state != IBDM_GET_CLASSPORTINFO) {
2407 IBTF_DPRINTF_L2("ibdm", "\thandle_classportinfo:DUP resp");
2408 *flag |= IBDM_IBMF_PKT_DUP_RESP;
2409 mutex_exit(&gid_info->gl_mutex);
2410 return;
2411 }
2412 gid_info->gl_iou_cb_args.cb_req_type = 0;
2413 if (gid_info->gl_timeout_id) {
2414 timeout_id = gid_info->gl_timeout_id;
2415 mutex_exit(&gid_info->gl_mutex);
2416 IBTF_DPRINTF_L5("ibdm", "handle_ioclassportinfo: "
2417 "gl_timeout_id = 0x%x", timeout_id);
2418 if (untimeout(timeout_id) == -1) {
2419 IBTF_DPRINTF_L2("ibdm", "handle_classportinfo: "
2420 "untimeout gl_timeout_id failed");
2421 }
2422 mutex_enter(&gid_info->gl_mutex);
2423 gid_info->gl_timeout_id = 0;
2424 }
2425 gid_info->gl_state = IBDM_GET_IOUNITINFO;
2426 gid_info->gl_pending_cmds++;
2427 mutex_exit(&gid_info->gl_mutex);
2428
2429 data = msg->im_msgbufs_recv.im_bufs_cl_data;
2430 cpi = (ib_mad_classportinfo_t *)data;
2431
2432 /*
2433 * Cache the "RespTimeValue" and redirection information in the
2434 * global gid list data structure. This cached information will
2435 * be used to send any further requests to the GID.
2436 */
2437 gid_info->gl_resp_timeout =
2438 (b2h32(cpi->RespTimeValue) & 0x1F);
2439
2440 gid_info->gl_redirected = ((IBDM_IN_IBMFMSG_STATUS(msg) &
2441 MAD_STATUS_REDIRECT_REQUIRED) ? B_TRUE: B_FALSE);
2442 gid_info->gl_redirect_dlid = b2h16(cpi->RedirectLID);
2443 gid_info->gl_redirect_QP = (b2h32(cpi->RedirectQP) & 0xffffff);
2444 gid_info->gl_redirect_pkey = b2h16(cpi->RedirectP_Key);
2445 gid_info->gl_redirect_qkey = b2h32(cpi->RedirectQ_Key);
2446 gid_info->gl_redirectGID_hi = b2h64(cpi->RedirectGID_hi);
2447 gid_info->gl_redirectGID_lo = b2h64(cpi->RedirectGID_lo);
2448 gid_info->gl_redirectSL = cpi->RedirectSL;
2449
2450 ibdm_dump_classportinfo(cpi);
2451
2452 /*
2453 * Send IOUnitInfo request
2454 * Reuse previously allocated IBMF packet for sending ClassPortInfo
2455 * Check whether DM agent on the remote node requested redirection
2456 * If so, send the request to the redirect DGID/DLID/PKEY/QP.
2457 */
2458 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg))
2459 ibdm_alloc_send_buffers(msg);
2460 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg))
2461 msg->im_local_addr.ia_local_lid = gid_info->gl_slid;
2462 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid;
2463
2464 if (gid_info->gl_redirected == B_TRUE) {
2465 if (gid_info->gl_redirect_dlid != 0) {
2466 msg->im_local_addr.ia_remote_lid =
2467 gid_info->gl_redirect_dlid;
2468 }
2469 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP;
2470 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey;
2471 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey;
2472 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL;
2473 } else {
2474 msg->im_local_addr.ia_remote_qno = 1;
2475 msg->im_local_addr.ia_p_key = gid_info->gl_p_key;
2476 msg->im_local_addr.ia_q_key = IB_GSI_QKEY;
2477 msg->im_local_addr.ia_service_level = gid_info->gl_SL;
2478 }
2479
2480 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg);
2481 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1;
2482 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT;
2483 hdr->ClassVersion = IB_DM_CLASS_VERSION_1;
2484 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET;
2485 hdr->Status = 0;
2486 hdr->TransactionID = h2b64(gid_info->gl_transactionID);
2487 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO);
2488 hdr->AttributeModifier = 0;
2489
2490 gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO;
2491 gid_info->gl_iou_cb_args.cb_gid_info = gid_info;
2492 gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt;
2493
2494 mutex_enter(&gid_info->gl_mutex);
2495 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr,
2496 &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout));
2497 mutex_exit(&gid_info->gl_mutex);
2498
2499 IBTF_DPRINTF_L5("ibdm", "handle_classportinfo:"
2500 "timeout %x", gid_info->gl_timeout_id);
2501
2502 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, NULL,
2503 ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) != IBMF_SUCCESS) {
2504 IBTF_DPRINTF_L2("ibdm",
2505 "\thandle_classportinfo: msg transport failed");
2506 ibdm_ibmf_send_cb(ibmf_hdl, msg, &gid_info->gl_iou_cb_args);
2507 }
2508 (*flag) |= IBDM_IBMF_PKT_REUSED;
2509 }
2510
2511
2512 /*
2513 * ibdm_send_iounitinfo:
2514 * Sends a DM request to get IOU unitinfo.
2515 */
2516 static int
ibdm_send_iounitinfo(ibdm_dp_gidinfo_t * gid_info)2517 ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *gid_info)
2518 {
2519 ibmf_msg_t *msg;
2520 ib_mad_hdr_t *hdr;
2521
2522 IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: gid info 0x%p", gid_info);
2523
2524 /*
2525 * Send command to get iounitinfo attribute. Allocate a IBMF
2526 * packet and initialize the packet.
2527 */
2528 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, &msg) !=
2529 IBMF_SUCCESS) {
2530 IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: pkt alloc fail");
2531 return (IBDM_FAILURE);
2532 }
2533
2534 mutex_enter(&gid_info->gl_mutex);
2535 ibdm_bump_transactionID(gid_info);
2536 mutex_exit(&gid_info->gl_mutex);
2537
2538
2539 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg))
2540 ibdm_alloc_send_buffers(msg);
2541 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg))
2542 msg->im_local_addr.ia_local_lid = gid_info->gl_slid;
2543 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid;
2544 msg->im_local_addr.ia_remote_qno = 1;
2545 msg->im_local_addr.ia_p_key = gid_info->gl_p_key;
2546 msg->im_local_addr.ia_q_key = IB_GSI_QKEY;
2547 msg->im_local_addr.ia_service_level = gid_info->gl_SL;
2548
2549 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg);
2550 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1;
2551 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT;
2552 hdr->ClassVersion = IB_DM_CLASS_VERSION_1;
2553 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET;
2554 hdr->Status = 0;
2555 hdr->TransactionID = h2b64(gid_info->gl_transactionID);
2556 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO);
2557 hdr->AttributeModifier = 0;
2558
2559 gid_info->gl_iou_cb_args.cb_gid_info = gid_info;
2560 gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt;
2561 gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO;
2562
2563 mutex_enter(&gid_info->gl_mutex);
2564 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr,
2565 &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout));
2566 mutex_exit(&gid_info->gl_mutex);
2567
2568 IBTF_DPRINTF_L5("ibdm", "send_iouunitinfo:"
2569 "timeout %x", gid_info->gl_timeout_id);
2570
2571 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg,
2572 NULL, ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) !=
2573 IBMF_SUCCESS) {
2574 IBTF_DPRINTF_L2("ibdm", "\tsend_iounitinfo: ibmf send failed");
2575 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl,
2576 msg, &gid_info->gl_iou_cb_args);
2577 }
2578 return (IBDM_SUCCESS);
2579 }
2580
2581 /*
2582 * ibdm_handle_iounitinfo()
2583 * Invoked by the IBMF when IO Unitinfo request is completed.
2584 */
2585 static void
ibdm_handle_iounitinfo(ibmf_handle_t ibmf_hdl,ibmf_msg_t * msg,ibdm_dp_gidinfo_t * gid_info,int * flag)2586 ibdm_handle_iounitinfo(ibmf_handle_t ibmf_hdl,
2587 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag)
2588 {
2589 int ii, first = B_TRUE;
2590 int num_iocs;
2591 size_t size;
2592 uchar_t slot_info;
2593 timeout_id_t timeout_id;
2594 ib_mad_hdr_t *hdr;
2595 ibdm_ioc_info_t *ioc_info;
2596 ib_dm_io_unitinfo_t *iou_info;
2597 ib_dm_io_unitinfo_t *giou_info;
2598 ibdm_timeout_cb_args_t *cb_args;
2599
2600 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo:"
2601 " ibmf hdl %p pkt %p gid info %p", ibmf_hdl, msg, gid_info);
2602
2603 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_IO_UNITINFO) {
2604 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: "
2605 "Unexpected response");
2606 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP;
2607 return;
2608 }
2609
2610 mutex_enter(&gid_info->gl_mutex);
2611 if (gid_info->gl_state != IBDM_GET_IOUNITINFO) {
2612 IBTF_DPRINTF_L4("ibdm",
2613 "\thandle_iounitinfo: DUP resp");
2614 mutex_exit(&gid_info->gl_mutex);
2615 (*flag) = IBDM_IBMF_PKT_DUP_RESP;
2616 return;
2617 }
2618 gid_info->gl_iou_cb_args.cb_req_type = 0;
2619 if (gid_info->gl_timeout_id) {
2620 timeout_id = gid_info->gl_timeout_id;
2621 mutex_exit(&gid_info->gl_mutex);
2622 IBTF_DPRINTF_L5("ibdm", "handle_iounitinfo: "
2623 "gl_timeout_id = 0x%x", timeout_id);
2624 if (untimeout(timeout_id) == -1) {
2625 IBTF_DPRINTF_L2("ibdm", "handle_iounitinfo: "
2626 "untimeout gl_timeout_id failed");
2627 }
2628 mutex_enter(&gid_info->gl_mutex);
2629 gid_info->gl_timeout_id = 0;
2630 }
2631 gid_info->gl_state = IBDM_GET_IOC_DETAILS;
2632
2633 iou_info = IBDM_IN_IBMFMSG2IOU(msg);
2634 ibdm_dump_iounitinfo(iou_info);
2635 num_iocs = iou_info->iou_num_ctrl_slots;
2636 /*
2637 * check if number of IOCs reported is zero? if yes, return.
2638 * when num_iocs are reported zero internal IOC database needs
2639 * to be updated. To ensure that save the number of IOCs in
2640 * the new field "gl_num_iocs". Use a new field instead of
2641 * "giou_info->iou_num_ctrl_slots" as that would prevent
2642 * an unnecessary kmem_alloc/kmem_free when num_iocs is 0.
2643 */
2644 if (num_iocs == 0 && gid_info->gl_num_iocs == 0) {
2645 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: no IOC's");
2646 mutex_exit(&gid_info->gl_mutex);
2647 return;
2648 }
2649 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: num_iocs = %d", num_iocs);
2650
2651 /*
2652 * if there is an existing gl_iou (IOU has been probed before)
2653 * check if the "iou_changeid" is same as saved entry in
2654 * "giou_info->iou_changeid".
2655 * (note: this logic can prevent IOC enumeration if a given
2656 * vendor doesn't support setting iou_changeid field for its IOU)
2657 *
2658 * if there is an existing gl_iou and iou_changeid has changed :
2659 * free up existing gl_iou info and its related structures.
2660 * reallocate gl_iou info all over again.
2661 * if we donot free this up; then this leads to memory leaks
2662 */
2663 if (gid_info->gl_iou) {
2664 giou_info = &gid_info->gl_iou->iou_info;
2665 if (b2h16(iou_info->iou_changeid) ==
2666 giou_info->iou_changeid) {
2667 IBTF_DPRINTF_L3("ibdm",
2668 "\thandle_iounitinfo: no IOCs changed");
2669 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE;
2670 mutex_exit(&gid_info->gl_mutex);
2671 return;
2672 }
2673
2674 /*
2675 * Store the iou info as prev_iou to be used after
2676 * sweep is done.
2677 */
2678 ASSERT(gid_info->gl_prev_iou == NULL);
2679 IBTF_DPRINTF_L4(ibdm_string,
2680 "\thandle_iounitinfo: setting gl_prev_iou %p",
2681 gid_info->gl_prev_iou);
2682 gid_info->gl_prev_iou = gid_info->gl_iou;
2683 ibdm.ibdm_prev_iou = 1;
2684 gid_info->gl_iou = NULL;
2685 }
2686
2687 size = sizeof (ibdm_iou_info_t) + num_iocs * sizeof (ibdm_ioc_info_t);
2688 gid_info->gl_iou = (ibdm_iou_info_t *)kmem_zalloc(size, KM_SLEEP);
2689 giou_info = &gid_info->gl_iou->iou_info;
2690 gid_info->gl_iou->iou_ioc_info = (ibdm_ioc_info_t *)
2691 ((char *)gid_info->gl_iou + sizeof (ibdm_iou_info_t));
2692
2693 giou_info->iou_num_ctrl_slots = gid_info->gl_num_iocs = num_iocs;
2694 giou_info->iou_flag = iou_info->iou_flag;
2695 bcopy(iou_info->iou_ctrl_list, giou_info->iou_ctrl_list, 128);
2696 giou_info->iou_changeid = b2h16(iou_info->iou_changeid);
2697 gid_info->gl_pending_cmds++; /* for diag code */
2698 mutex_exit(&gid_info->gl_mutex);
2699
2700 if (ibdm_get_diagcode(gid_info, 0) != IBDM_SUCCESS) {
2701 mutex_enter(&gid_info->gl_mutex);
2702 gid_info->gl_pending_cmds--;
2703 mutex_exit(&gid_info->gl_mutex);
2704 }
2705 /*
2706 * Parallelize getting IOC controller profiles from here.
2707 * Allocate IBMF packets and send commands to get IOC profile for
2708 * each IOC present on the IOU.
2709 */
2710 for (ii = 0; ii < num_iocs; ii++) {
2711 /*
2712 * Check whether IOC is present in the slot
2713 * Series of nibbles (in the field iou_ctrl_list) represents
2714 * a slot in the IOU.
2715 * Byte format: 76543210
2716 * Bits 0-3 of first byte represent Slot 2
2717 * bits 4-7 of first byte represent slot 1,
2718 * bits 0-3 of second byte represent slot 4 and so on
2719 * Each 4-bit nibble has the following meaning
2720 * 0x0 : IOC not installed
2721 * 0x1 : IOC is present
2722 * 0xf : Slot does not exist
2723 * and all other values are reserved.
2724 */
2725 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii);
2726 slot_info = giou_info->iou_ctrl_list[(ii/2)];
2727 if ((ii % 2) == 0)
2728 slot_info = (slot_info >> 4);
2729
2730 if ((slot_info & 0xf) != 1) {
2731 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: "
2732 "No IOC is present in the slot = %d", ii);
2733 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_FAILED;
2734 continue;
2735 }
2736
2737 mutex_enter(&gid_info->gl_mutex);
2738 ibdm_bump_transactionID(gid_info);
2739 mutex_exit(&gid_info->gl_mutex);
2740
2741 /*
2742 * Re use the already allocated packet (for IOUnitinfo) to
2743 * send the first IOC controller attribute. Allocate new
2744 * IBMF packets for the rest of the IOC's
2745 */
2746 if (first != B_TRUE) {
2747 msg = NULL;
2748 if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP,
2749 &msg) != IBMF_SUCCESS) {
2750 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: "
2751 "IBMF packet allocation failed");
2752 continue;
2753 }
2754
2755 }
2756
2757 /* allocate send buffers for all messages */
2758 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg))
2759 ibdm_alloc_send_buffers(msg);
2760 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg))
2761
2762 msg->im_local_addr.ia_local_lid = gid_info->gl_slid;
2763 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid;
2764 if (gid_info->gl_redirected == B_TRUE) {
2765 if (gid_info->gl_redirect_dlid != 0) {
2766 msg->im_local_addr.ia_remote_lid =
2767 gid_info->gl_redirect_dlid;
2768 }
2769 msg->im_local_addr.ia_remote_qno =
2770 gid_info->gl_redirect_QP;
2771 msg->im_local_addr.ia_p_key =
2772 gid_info->gl_redirect_pkey;
2773 msg->im_local_addr.ia_q_key =
2774 gid_info->gl_redirect_qkey;
2775 msg->im_local_addr.ia_service_level =
2776 gid_info->gl_redirectSL;
2777 } else {
2778 msg->im_local_addr.ia_remote_qno = 1;
2779 msg->im_local_addr.ia_p_key = gid_info->gl_p_key;
2780 msg->im_local_addr.ia_q_key = IB_GSI_QKEY;
2781 msg->im_local_addr.ia_service_level = gid_info->gl_SL;
2782 }
2783
2784 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg);
2785 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1;
2786 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT;
2787 hdr->ClassVersion = IB_DM_CLASS_VERSION_1;
2788 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET;
2789 hdr->Status = 0;
2790 hdr->TransactionID = h2b64(gid_info->gl_transactionID);
2791 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE);
2792 hdr->AttributeModifier = h2b32(ii + 1);
2793
2794 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_INVALID;
2795 cb_args = &ioc_info->ioc_cb_args;
2796 cb_args->cb_gid_info = gid_info;
2797 cb_args->cb_retry_count = ibdm_dft_retry_cnt;
2798 cb_args->cb_req_type = IBDM_REQ_TYPE_IOCINFO;
2799 cb_args->cb_ioc_num = ii;
2800
2801 mutex_enter(&gid_info->gl_mutex);
2802 gid_info->gl_pending_cmds++; /* for diag code */
2803
2804 ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr,
2805 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout));
2806 mutex_exit(&gid_info->gl_mutex);
2807
2808 IBTF_DPRINTF_L5("ibdm", "\thandle_iounitinfo:"
2809 "timeout 0x%x, ioc_num %d", ioc_info->ioc_timeout_id, ii);
2810
2811 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg,
2812 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) {
2813 IBTF_DPRINTF_L2("ibdm",
2814 "\thandle_iounitinfo: msg transport failed");
2815 ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args);
2816 }
2817 (*flag) |= IBDM_IBMF_PKT_REUSED;
2818 first = B_FALSE;
2819 gid_info->gl_iou->iou_niocs_probe_in_progress++;
2820 }
2821 }
2822
2823
2824 /*
2825 * ibdm_handle_ioc_profile()
2826 * Invoked by the IBMF when the IOCControllerProfile request
2827 * gets completed
2828 */
2829 static void
ibdm_handle_ioc_profile(ibmf_handle_t ibmf_hdl,ibmf_msg_t * msg,ibdm_dp_gidinfo_t * gid_info,int * flag)2830 ibdm_handle_ioc_profile(ibmf_handle_t ibmf_hdl,
2831 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag)
2832 {
2833 int first = B_TRUE, reprobe = 0;
2834 uint_t ii, ioc_no, srv_start;
2835 uint_t nserv_entries;
2836 timeout_id_t timeout_id;
2837 ib_mad_hdr_t *hdr;
2838 ibdm_ioc_info_t *ioc_info;
2839 ibdm_timeout_cb_args_t *cb_args;
2840 ib_dm_ioc_ctrl_profile_t *ioc, *gioc;
2841
2842 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:"
2843 " ibmf hdl %p msg %p gid info %p", ibmf_hdl, msg, gid_info);
2844
2845 ioc = IBDM_IN_IBMFMSG2IOC(msg);
2846 /*
2847 * Check whether we know this IOC already
2848 * This will return NULL if reprobe is in progress
2849 * IBDM_IOC_STATE_REPROBE_PROGRESS will be set.
2850 * Do not hold mutexes here.
2851 */
2852 if (ibdm_is_ioc_present(ioc->ioc_guid, gid_info, flag) != NULL) {
2853 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:"
2854 "IOC guid %llx is present", ioc->ioc_guid);
2855 return;
2856 }
2857 ioc_no = IBDM_IN_IBMFMSG_ATTRMOD(msg);
2858 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile: ioc_no = %d", ioc_no-1);
2859
2860 /* Make sure that IOC index is with the valid range */
2861 if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) {
2862 IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: "
2863 "IOC index Out of range, index %d", ioc);
2864 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP;
2865 return;
2866 }
2867 ioc_info = &gid_info->gl_iou->iou_ioc_info[ioc_no - 1];
2868 ioc_info->ioc_iou_info = gid_info->gl_iou;
2869
2870 mutex_enter(&gid_info->gl_mutex);
2871 if (ioc_info->ioc_state == IBDM_IOC_STATE_REPROBE_PROGRESS) {
2872 reprobe = 1;
2873 ioc_info->ioc_prev_serv = ioc_info->ioc_serv;
2874 ioc_info->ioc_serv = NULL;
2875 ioc_info->ioc_prev_serv_cnt =
2876 ioc_info->ioc_profile.ioc_service_entries;
2877 } else if (ioc_info->ioc_state != IBDM_IOC_STATE_PROBE_INVALID) {
2878 IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: DUP response"
2879 "ioc %d, ioc_state %x", ioc_no - 1, ioc_info->ioc_state);
2880 mutex_exit(&gid_info->gl_mutex);
2881 (*flag) |= IBDM_IBMF_PKT_DUP_RESP;
2882 return;
2883 }
2884 ioc_info->ioc_cb_args.cb_req_type = 0;
2885 if (ioc_info->ioc_timeout_id) {
2886 timeout_id = ioc_info->ioc_timeout_id;
2887 ioc_info->ioc_timeout_id = 0;
2888 mutex_exit(&gid_info->gl_mutex);
2889 IBTF_DPRINTF_L5("ibdm", "handle_ioc_profile: "
2890 "ioc_timeout_id = 0x%x", timeout_id);
2891 if (untimeout(timeout_id) == -1) {
2892 IBTF_DPRINTF_L2("ibdm", "handle_ioc_profile: "
2893 "untimeout ioc_timeout_id failed");
2894 }
2895 mutex_enter(&gid_info->gl_mutex);
2896 }
2897
2898 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_SUCCESS;
2899 if (reprobe == 0) {
2900 ioc_info->ioc_iou_guid = gid_info->gl_nodeguid;
2901 ioc_info->ioc_nodeguid = gid_info->gl_nodeguid;
2902 }
2903
2904 /*
2905 * Save all the IOC information in the global structures.
2906 * Note the wire format is Big Endian and the Sparc process also
2907 * big endian. So, there is no need to convert the data fields
2908 * The conversion routines used below are ineffective on Sparc
2909 * machines where as they will be effective on little endian
2910 * machines such as Intel processors.
2911 */
2912 gioc = (ib_dm_ioc_ctrl_profile_t *)&ioc_info->ioc_profile;
2913
2914 /*
2915 * Restrict updates to onlyport GIDs and service entries during reprobe
2916 */
2917 if (reprobe == 0) {
2918 gioc->ioc_guid = b2h64(ioc->ioc_guid);
2919 gioc->ioc_vendorid =
2920 ((b2h32(ioc->ioc_vendorid) & IB_DM_VENDORID_MASK)
2921 >> IB_DM_VENDORID_SHIFT);
2922 gioc->ioc_deviceid = b2h32(ioc->ioc_deviceid);
2923 gioc->ioc_device_ver = b2h16(ioc->ioc_device_ver);
2924 gioc->ioc_subsys_vendorid =
2925 ((b2h32(ioc->ioc_subsys_vendorid) & IB_DM_VENDORID_MASK)
2926 >> IB_DM_VENDORID_SHIFT);
2927 gioc->ioc_subsys_id = b2h32(ioc->ioc_subsys_id);
2928 gioc->ioc_io_class = b2h16(ioc->ioc_io_class);
2929 gioc->ioc_io_subclass = b2h16(ioc->ioc_io_subclass);
2930 gioc->ioc_protocol = b2h16(ioc->ioc_protocol);
2931 gioc->ioc_protocol_ver = b2h16(ioc->ioc_protocol_ver);
2932 gioc->ioc_send_msg_qdepth =
2933 b2h16(ioc->ioc_send_msg_qdepth);
2934 gioc->ioc_rdma_read_qdepth =
2935 b2h16(ioc->ioc_rdma_read_qdepth);
2936 gioc->ioc_send_msg_sz = b2h32(ioc->ioc_send_msg_sz);
2937 gioc->ioc_rdma_xfer_sz = b2h32(ioc->ioc_rdma_xfer_sz);
2938 gioc->ioc_ctrl_opcap_mask = ioc->ioc_ctrl_opcap_mask;
2939 bcopy(ioc->ioc_id_string, gioc->ioc_id_string,
2940 IB_DM_IOC_ID_STRING_LEN);
2941
2942 ioc_info->ioc_iou_diagcode = gid_info->gl_iou->iou_diagcode;
2943 ioc_info->ioc_iou_dc_valid = gid_info->gl_iou->iou_dc_valid;
2944 ioc_info->ioc_diagdeviceid = (IB_DM_IOU_DEVICEID_MASK &
2945 gid_info->gl_iou->iou_info.iou_flag) ? B_TRUE : B_FALSE;
2946
2947 if (ioc_info->ioc_diagdeviceid == B_TRUE) {
2948 gid_info->gl_pending_cmds++;
2949 IBTF_DPRINTF_L3(ibdm_string,
2950 "\tibdm_handle_ioc_profile: "
2951 "%d: gid_info %p gl_state %d pending_cmds %d",
2952 __LINE__, gid_info, gid_info->gl_state,
2953 gid_info->gl_pending_cmds);
2954 }
2955 }
2956 gioc->ioc_service_entries = ioc->ioc_service_entries;
2957 mutex_exit(&gid_info->gl_mutex);
2958
2959 ibdm_dump_ioc_profile(gioc);
2960
2961 if ((ioc_info->ioc_diagdeviceid == B_TRUE) && (reprobe == 0)) {
2962 if (ibdm_get_diagcode(gid_info, ioc_no) != IBDM_SUCCESS) {
2963 mutex_enter(&gid_info->gl_mutex);
2964 gid_info->gl_pending_cmds--;
2965 mutex_exit(&gid_info->gl_mutex);
2966 }
2967 }
2968 ioc_info->ioc_serv = (ibdm_srvents_info_t *)kmem_zalloc(
2969 (gioc->ioc_service_entries * sizeof (ibdm_srvents_info_t)),
2970 KM_SLEEP);
2971
2972 /*
2973 * In one single request, maximum number of requests that can be
2974 * obtained is 4. If number of service entries are more than four,
2975 * calculate number requests needed and send them parallelly.
2976 */
2977 nserv_entries = ioc->ioc_service_entries;
2978 ii = 0;
2979 while (nserv_entries) {
2980 mutex_enter(&gid_info->gl_mutex);
2981 gid_info->gl_pending_cmds++;
2982 ibdm_bump_transactionID(gid_info);
2983 mutex_exit(&gid_info->gl_mutex);
2984
2985 if (first != B_TRUE) {
2986 if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP,
2987 &msg) != IBMF_SUCCESS) {
2988 continue;
2989 }
2990
2991 }
2992 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg))
2993 ibdm_alloc_send_buffers(msg);
2994 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg))
2995 msg->im_local_addr.ia_local_lid = gid_info->gl_slid;
2996 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid;
2997 if (gid_info->gl_redirected == B_TRUE) {
2998 if (gid_info->gl_redirect_dlid != 0) {
2999 msg->im_local_addr.ia_remote_lid =
3000 gid_info->gl_redirect_dlid;
3001 }
3002 msg->im_local_addr.ia_remote_qno =
3003 gid_info->gl_redirect_QP;
3004 msg->im_local_addr.ia_p_key =
3005 gid_info->gl_redirect_pkey;
3006 msg->im_local_addr.ia_q_key =
3007 gid_info->gl_redirect_qkey;
3008 msg->im_local_addr.ia_service_level =
3009 gid_info->gl_redirectSL;
3010 } else {
3011 msg->im_local_addr.ia_remote_qno = 1;
3012 msg->im_local_addr.ia_p_key = gid_info->gl_p_key;
3013 msg->im_local_addr.ia_q_key = IB_GSI_QKEY;
3014 msg->im_local_addr.ia_service_level = gid_info->gl_SL;
3015 }
3016
3017 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg);
3018 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1;
3019 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT;
3020 hdr->ClassVersion = IB_DM_CLASS_VERSION_1;
3021 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET;
3022 hdr->Status = 0;
3023 hdr->TransactionID = h2b64(gid_info->gl_transactionID);
3024 hdr->AttributeID = h2b16(IB_DM_ATTR_SERVICE_ENTRIES);
3025
3026 srv_start = ii * 4;
3027 cb_args = &ioc_info->ioc_serv[srv_start].se_cb_args;
3028 cb_args->cb_gid_info = gid_info;
3029 cb_args->cb_retry_count = ibdm_dft_retry_cnt;
3030 cb_args->cb_req_type = IBDM_REQ_TYPE_SRVENTS;
3031 cb_args->cb_srvents_start = srv_start;
3032 cb_args->cb_ioc_num = ioc_no - 1;
3033
3034 if (nserv_entries >= IBDM_MAX_SERV_ENTRIES_PER_REQ) {
3035 nserv_entries -= IBDM_MAX_SERV_ENTRIES_PER_REQ;
3036 cb_args->cb_srvents_end = (cb_args->cb_srvents_start +
3037 IBDM_MAX_SERV_ENTRIES_PER_REQ - 1);
3038 } else {
3039 cb_args->cb_srvents_end =
3040 (cb_args->cb_srvents_start + nserv_entries - 1);
3041 nserv_entries = 0;
3042 }
3043 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr))
3044 ibdm_fill_srv_attr_mod(hdr, cb_args);
3045 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr))
3046
3047 mutex_enter(&gid_info->gl_mutex);
3048 ioc_info->ioc_serv[srv_start].se_timeout_id = timeout(
3049 ibdm_pkt_timeout_hdlr, cb_args,
3050 IBDM_TIMEOUT_VALUE(ibdm_dft_timeout));
3051 mutex_exit(&gid_info->gl_mutex);
3052
3053 IBTF_DPRINTF_L5("ibdm", "\thandle_ioc_profile:"
3054 "timeout %x, ioc %d srv %d",
3055 ioc_info->ioc_serv[srv_start].se_timeout_id,
3056 ioc_no - 1, srv_start);
3057
3058 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg,
3059 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) {
3060 IBTF_DPRINTF_L2("ibdm",
3061 "\thandle_ioc_profile: msg send failed");
3062 ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args);
3063 }
3064 (*flag) |= IBDM_IBMF_PKT_REUSED;
3065 first = B_FALSE;
3066 ii++;
3067 }
3068 }
3069
3070
3071 /*
3072 * ibdm_handle_srventry_mad()
3073 */
3074 static void
ibdm_handle_srventry_mad(ibmf_msg_t * msg,ibdm_dp_gidinfo_t * gid_info,int * flag)3075 ibdm_handle_srventry_mad(ibmf_msg_t *msg,
3076 ibdm_dp_gidinfo_t *gid_info, int *flag)
3077 {
3078 uint_t ii, ioc_no, attrmod;
3079 uint_t nentries, start, end;
3080 timeout_id_t timeout_id;
3081 ib_dm_srv_t *srv_ents;
3082 ibdm_ioc_info_t *ioc_info;
3083 ibdm_srvents_info_t *gsrv_ents;
3084
3085 IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad:"
3086 " IBMF msg %p gid info %p", msg, gid_info);
3087
3088 srv_ents = IBDM_IN_IBMFMSG2SRVENT(msg);
3089 /*
3090 * Get the start and end index of the service entries
3091 * Upper 16 bits identify the IOC
3092 * Lower 16 bits specify the range of service entries
3093 * LSB specifies (Big endian) end of the range
3094 * MSB specifies (Big endian) start of the range
3095 */
3096 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg);
3097 ioc_no = ((attrmod >> 16) & IBDM_16_BIT_MASK);
3098 end = ((attrmod >> 8) & IBDM_8_BIT_MASK);
3099 start = (attrmod & IBDM_8_BIT_MASK);
3100
3101 /* Make sure that IOC index is with the valid range */
3102 if ((ioc_no < 1) |
3103 (ioc_no > gid_info->gl_iou->iou_info.iou_num_ctrl_slots)) {
3104 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: "
3105 "IOC index Out of range, index %d", ioc_no);
3106 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP;
3107 return;
3108 }
3109 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1));
3110
3111 /*
3112 * Make sure that the "start" and "end" service indexes are
3113 * with in the valid range
3114 */
3115 nentries = ioc_info->ioc_profile.ioc_service_entries;
3116 if ((start > end) | (start >= nentries) | (end >= nentries)) {
3117 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: "
3118 "Attr modifier 0x%x, #Serv entries %d", attrmod, nentries);
3119 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP;
3120 return;
3121 }
3122 gsrv_ents = &ioc_info->ioc_serv[start];
3123 mutex_enter(&gid_info->gl_mutex);
3124 if (gsrv_ents->se_state != IBDM_SE_INVALID) {
3125 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: "
3126 "already known, ioc %d, srv %d, se_state %x",
3127 ioc_no - 1, start, gsrv_ents->se_state);
3128 mutex_exit(&gid_info->gl_mutex);
3129 (*flag) |= IBDM_IBMF_PKT_DUP_RESP;
3130 return;
3131 }
3132 ioc_info->ioc_serv[start].se_cb_args.cb_req_type = 0;
3133 if (ioc_info->ioc_serv[start].se_timeout_id) {
3134 IBTF_DPRINTF_L2("ibdm",
3135 "\thandle_srventry_mad: ioc %d start %d", ioc_no, start);
3136 timeout_id = ioc_info->ioc_serv[start].se_timeout_id;
3137 ioc_info->ioc_serv[start].se_timeout_id = 0;
3138 mutex_exit(&gid_info->gl_mutex);
3139 IBTF_DPRINTF_L5("ibdm", "handle_srverntry_mad: "
3140 "se_timeout_id = 0x%x", timeout_id);
3141 if (untimeout(timeout_id) == -1) {
3142 IBTF_DPRINTF_L2("ibdm", "handle_srventry_mad: "
3143 "untimeout se_timeout_id failed");
3144 }
3145 mutex_enter(&gid_info->gl_mutex);
3146 }
3147
3148 gsrv_ents->se_state = IBDM_SE_VALID;
3149 mutex_exit(&gid_info->gl_mutex);
3150 for (ii = start; ii <= end; ii++, srv_ents++, gsrv_ents++) {
3151 gsrv_ents->se_attr.srv_id = b2h64(srv_ents->srv_id);
3152 bcopy(srv_ents->srv_name,
3153 gsrv_ents->se_attr.srv_name, IB_DM_MAX_SVC_NAME_LEN);
3154 ibdm_dump_service_entries(&gsrv_ents->se_attr);
3155 }
3156 }
3157
3158
3159 /*
3160 * ibdm_get_diagcode:
3161 * Send request to get IOU/IOC diag code
3162 * Returns IBDM_SUCCESS/IBDM_FAILURE
3163 */
3164 static int
ibdm_get_diagcode(ibdm_dp_gidinfo_t * gid_info,int attr)3165 ibdm_get_diagcode(ibdm_dp_gidinfo_t *gid_info, int attr)
3166 {
3167 ibmf_msg_t *msg;
3168 ib_mad_hdr_t *hdr;
3169 ibdm_ioc_info_t *ioc;
3170 ibdm_timeout_cb_args_t *cb_args;
3171 timeout_id_t *timeout_id;
3172
3173 IBTF_DPRINTF_L4("ibdm", "\tget_diagcode: gid info %p, attr = %d",
3174 gid_info, attr);
3175
3176 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP,
3177 &msg) != IBMF_SUCCESS) {
3178 IBTF_DPRINTF_L4("ibdm", "\tget_diagcode: pkt alloc fail");
3179 return (IBDM_FAILURE);
3180 }
3181
3182 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg))
3183 ibdm_alloc_send_buffers(msg);
3184 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg))
3185
3186 mutex_enter(&gid_info->gl_mutex);
3187 ibdm_bump_transactionID(gid_info);
3188 mutex_exit(&gid_info->gl_mutex);
3189
3190 msg->im_local_addr.ia_local_lid = gid_info->gl_slid;
3191 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid;
3192 if (gid_info->gl_redirected == B_TRUE) {
3193 if (gid_info->gl_redirect_dlid != 0) {
3194 msg->im_local_addr.ia_remote_lid =
3195 gid_info->gl_redirect_dlid;
3196 }
3197
3198 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP;
3199 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey;
3200 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey;
3201 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL;
3202 } else {
3203 msg->im_local_addr.ia_remote_qno = 1;
3204 msg->im_local_addr.ia_p_key = gid_info->gl_p_key;
3205 msg->im_local_addr.ia_q_key = IB_GSI_QKEY;
3206 msg->im_local_addr.ia_service_level = gid_info->gl_SL;
3207 }
3208
3209 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg);
3210 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1;
3211 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT;
3212 hdr->ClassVersion = IB_DM_CLASS_VERSION_1;
3213 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET;
3214 hdr->Status = 0;
3215 hdr->TransactionID = h2b64(gid_info->gl_transactionID);
3216
3217 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE);
3218 hdr->AttributeModifier = h2b32(attr);
3219
3220 if (attr == 0) {
3221 cb_args = &gid_info->gl_iou_cb_args;
3222 gid_info->gl_iou->iou_dc_valid = B_FALSE;
3223 cb_args->cb_ioc_num = 0;
3224 cb_args->cb_req_type = IBDM_REQ_TYPE_IOU_DIAGCODE;
3225 timeout_id = &gid_info->gl_timeout_id;
3226 } else {
3227 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attr - 1));
3228 ioc->ioc_dc_valid = B_FALSE;
3229 cb_args = &ioc->ioc_dc_cb_args;
3230 cb_args->cb_ioc_num = attr - 1;
3231 cb_args->cb_req_type = IBDM_REQ_TYPE_IOC_DIAGCODE;
3232 timeout_id = &ioc->ioc_dc_timeout_id;
3233 }
3234 cb_args->cb_gid_info = gid_info;
3235 cb_args->cb_retry_count = ibdm_dft_retry_cnt;
3236 cb_args->cb_srvents_start = 0;
3237
3238 mutex_enter(&gid_info->gl_mutex);
3239 *timeout_id = timeout(ibdm_pkt_timeout_hdlr,
3240 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout));
3241 mutex_exit(&gid_info->gl_mutex);
3242
3243 IBTF_DPRINTF_L5("ibdm", "\tget_diagcode:"
3244 "timeout %x, ioc %d", *timeout_id, cb_args->cb_ioc_num);
3245
3246 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl,
3247 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) {
3248 IBTF_DPRINTF_L2("ibdm", "\tget_diagcode: ibmf send failed");
3249 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args);
3250 }
3251 return (IBDM_SUCCESS);
3252 }
3253
3254 /*
3255 * ibdm_handle_diagcode:
3256 * Process the DiagCode MAD response and update local DM
3257 * data structure.
3258 */
3259 static void
ibdm_handle_diagcode(ibmf_msg_t * ibmf_msg,ibdm_dp_gidinfo_t * gid_info,int * flag)3260 ibdm_handle_diagcode(ibmf_msg_t *ibmf_msg,
3261 ibdm_dp_gidinfo_t *gid_info, int *flag)
3262 {
3263 uint16_t attrmod, *diagcode;
3264 ibdm_iou_info_t *iou;
3265 ibdm_ioc_info_t *ioc;
3266 timeout_id_t timeout_id;
3267 ibdm_timeout_cb_args_t *cb_args;
3268
3269 diagcode = (uint16_t *)ibmf_msg->im_msgbufs_recv.im_bufs_cl_data;
3270
3271 mutex_enter(&gid_info->gl_mutex);
3272 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(ibmf_msg);
3273 iou = gid_info->gl_iou;
3274 if (attrmod == 0) {
3275 if (iou->iou_dc_valid != B_FALSE) {
3276 (*flag) |= IBDM_IBMF_PKT_DUP_RESP;
3277 IBTF_DPRINTF_L4("ibdm",
3278 "\thandle_diagcode: Duplicate IOU DiagCode");
3279 mutex_exit(&gid_info->gl_mutex);
3280 return;
3281 }
3282 cb_args = &gid_info->gl_iou_cb_args;
3283 cb_args->cb_req_type = 0;
3284 iou->iou_diagcode = b2h16(*diagcode);
3285 iou->iou_dc_valid = B_TRUE;
3286 if (gid_info->gl_timeout_id) {
3287 timeout_id = gid_info->gl_timeout_id;
3288 mutex_exit(&gid_info->gl_mutex);
3289 IBTF_DPRINTF_L5("ibdm", "\thandle_diagcode: "
3290 "gl_timeout_id = 0x%x", timeout_id);
3291 if (untimeout(timeout_id) == -1) {
3292 IBTF_DPRINTF_L2("ibdm", "handle_diagcode: "
3293 "untimeout gl_timeout_id failed");
3294 }
3295 mutex_enter(&gid_info->gl_mutex);
3296 gid_info->gl_timeout_id = 0;
3297 }
3298 } else {
3299 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod - 1));
3300 if (ioc->ioc_dc_valid != B_FALSE) {
3301 (*flag) |= IBDM_IBMF_PKT_DUP_RESP;
3302 IBTF_DPRINTF_L4("ibdm",
3303 "\thandle_diagcode: Duplicate IOC DiagCode");
3304 mutex_exit(&gid_info->gl_mutex);
3305 return;
3306 }
3307 cb_args = &ioc->ioc_dc_cb_args;
3308 cb_args->cb_req_type = 0;
3309 ioc->ioc_diagcode = b2h16(*diagcode);
3310 ioc->ioc_dc_valid = B_TRUE;
3311 timeout_id = iou->iou_ioc_info[attrmod - 1].ioc_dc_timeout_id;
3312 if (timeout_id) {
3313 iou->iou_ioc_info[attrmod - 1].ioc_dc_timeout_id = 0;
3314 mutex_exit(&gid_info->gl_mutex);
3315 IBTF_DPRINTF_L5("ibdm", "handle_diagcode: "
3316 "timeout_id = 0x%x", timeout_id);
3317 if (untimeout(timeout_id) == -1) {
3318 IBTF_DPRINTF_L2("ibdm", "\thandle_diagcode: "
3319 "untimeout ioc_dc_timeout_id failed");
3320 }
3321 mutex_enter(&gid_info->gl_mutex);
3322 }
3323 }
3324 mutex_exit(&gid_info->gl_mutex);
3325
3326 IBTF_DPRINTF_L4("ibdm", "\thandle_diagcode: DiagCode : 0x%x"
3327 "attrmod : 0x%x", b2h16(*diagcode), attrmod);
3328 }
3329
3330
3331 /*
3332 * ibdm_is_ioc_present()
3333 * Return ibdm_ioc_info_t if IOC guid is found in the global gid list
3334 */
3335 static ibdm_ioc_info_t *
ibdm_is_ioc_present(ib_guid_t ioc_guid,ibdm_dp_gidinfo_t * gid_info,int * flag)3336 ibdm_is_ioc_present(ib_guid_t ioc_guid,
3337 ibdm_dp_gidinfo_t *gid_info, int *flag)
3338 {
3339 int ii;
3340 ibdm_ioc_info_t *ioc;
3341 ibdm_dp_gidinfo_t *head;
3342 ib_dm_io_unitinfo_t *iou;
3343
3344 mutex_enter(&ibdm.ibdm_mutex);
3345 head = ibdm.ibdm_dp_gidlist_head;
3346 while (head) {
3347 mutex_enter(&head->gl_mutex);
3348 if (head->gl_iou == NULL) {
3349 mutex_exit(&head->gl_mutex);
3350 head = head->gl_next;
3351 continue;
3352 }
3353 iou = &head->gl_iou->iou_info;
3354 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) {
3355 ioc = IBDM_GIDINFO2IOCINFO(head, ii);
3356 if ((ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) &&
3357 (ioc->ioc_profile.ioc_guid == ioc_guid)) {
3358 if (gid_info == head) {
3359 *flag |= IBDM_IBMF_PKT_DUP_RESP;
3360 } else if (ibdm_check_dgid(head->gl_dgid_lo,
3361 head->gl_dgid_hi) != NULL) {
3362 IBTF_DPRINTF_L4("ibdm", "\tis_ioc_"
3363 "present: gid not present");
3364 ibdm_add_to_gl_gid(gid_info, head);
3365 }
3366 mutex_exit(&head->gl_mutex);
3367 mutex_exit(&ibdm.ibdm_mutex);
3368 return (ioc);
3369 }
3370 }
3371 mutex_exit(&head->gl_mutex);
3372 head = head->gl_next;
3373 }
3374 mutex_exit(&ibdm.ibdm_mutex);
3375 return (NULL);
3376 }
3377
3378
3379 /*
3380 * ibdm_ibmf_send_cb()
3381 * IBMF invokes this callback routine after posting the DM MAD to
3382 * the HCA.
3383 */
3384 /*ARGSUSED*/
3385 static void
ibdm_ibmf_send_cb(ibmf_handle_t ibmf_hdl,ibmf_msg_t * ibmf_msg,void * arg)3386 ibdm_ibmf_send_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *ibmf_msg, void *arg)
3387 {
3388 ibdm_dump_ibmf_msg(ibmf_msg, 1);
3389 ibdm_free_send_buffers(ibmf_msg);
3390 if (ibmf_free_msg(ibmf_hdl, &ibmf_msg) != IBMF_SUCCESS) {
3391 IBTF_DPRINTF_L4("ibdm",
3392 "\tibmf_send_cb: IBMF free msg failed");
3393 }
3394 }
3395
3396
3397 /*
3398 * ibdm_ibmf_recv_cb()
3399 * Invoked by the IBMF when a response to the one of the DM requests
3400 * is received.
3401 */
3402 /*ARGSUSED*/
3403 static void
ibdm_ibmf_recv_cb(ibmf_handle_t ibmf_hdl,ibmf_msg_t * msg,void * arg)3404 ibdm_ibmf_recv_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msg, void *arg)
3405 {
3406 ibdm_taskq_args_t *taskq_args;
3407
3408 /*
3409 * If the taskq enable is set then dispatch a taskq to process
3410 * the MAD, otherwise just process it on this thread
3411 */
3412 if (ibdm_taskq_enable != IBDM_ENABLE_TASKQ_HANDLING) {
3413 ibdm_process_incoming_mad(ibmf_hdl, msg, arg);
3414 return;
3415 }
3416
3417 /*
3418 * create a taskq and dispatch it to process the incoming MAD
3419 */
3420 taskq_args = kmem_alloc(sizeof (ibdm_taskq_args_t), KM_NOSLEEP);
3421 if (taskq_args == NULL) {
3422 IBTF_DPRINTF_L2("ibdm", "ibmf_recv_cb: kmem_alloc failed for"
3423 "taskq_args");
3424 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) {
3425 IBTF_DPRINTF_L4("ibmf_recv_cb",
3426 "\tibmf_recv_cb: IBMF free msg failed");
3427 }
3428 return;
3429 }
3430 taskq_args->tq_ibmf_handle = ibmf_hdl;
3431 taskq_args->tq_ibmf_msg = msg;
3432 taskq_args->tq_args = arg;
3433
3434 if (taskq_dispatch(system_taskq, ibdm_recv_incoming_mad, taskq_args,
3435 TQ_NOSLEEP) == TASKQID_INVALID) {
3436 IBTF_DPRINTF_L2("ibdm", "ibmf_recv_cb: taskq_dispatch failed");
3437 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) {
3438 IBTF_DPRINTF_L4("ibmf_recv_cb",
3439 "\tibmf_recv_cb: IBMF free msg failed");
3440 }
3441 kmem_free(taskq_args, sizeof (ibdm_taskq_args_t));
3442 return;
3443 }
3444
3445 /* taskq_args are deleted in ibdm_recv_incoming_mad() */
3446 }
3447
3448
3449 void
ibdm_recv_incoming_mad(void * args)3450 ibdm_recv_incoming_mad(void *args)
3451 {
3452 ibdm_taskq_args_t *taskq_args;
3453
3454 taskq_args = (ibdm_taskq_args_t *)args;
3455
3456 IBTF_DPRINTF_L4("ibdm", "\tibdm_recv_incoming_mad: "
3457 "Processing incoming MAD via taskq");
3458
3459 ibdm_process_incoming_mad(taskq_args->tq_ibmf_handle,
3460 taskq_args->tq_ibmf_msg, taskq_args->tq_args);
3461
3462 kmem_free(taskq_args, sizeof (ibdm_taskq_args_t));
3463 }
3464
3465
3466 /*
3467 * Calls ibdm_process_incoming_mad with all function arguments extracted
3468 * from args
3469 */
3470 /*ARGSUSED*/
3471 static void
ibdm_process_incoming_mad(ibmf_handle_t ibmf_hdl,ibmf_msg_t * msg,void * arg)3472 ibdm_process_incoming_mad(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msg, void *arg)
3473 {
3474 int flag = 0;
3475 int ret;
3476 uint64_t transaction_id;
3477 ib_mad_hdr_t *hdr;
3478 ibdm_dp_gidinfo_t *gid_info = NULL;
3479
3480 IBTF_DPRINTF_L4("ibdm",
3481 "\tprocess_incoming_mad: ibmf hdl %p pkt %p", ibmf_hdl, msg);
3482 ibdm_dump_ibmf_msg(msg, 0);
3483
3484 /*
3485 * IBMF calls this routine for every DM MAD that arrives at this port.
3486 * But we handle only the responses for requests we sent. We drop all
3487 * the DM packets that does not have response bit set in the MAD
3488 * header(this eliminates all the requests sent to this port).
3489 * We handle only DM class version 1 MAD's
3490 */
3491 hdr = IBDM_IN_IBMFMSG_MADHDR(msg);
3492 if (ibdm_verify_mad_status(hdr) != IBDM_SUCCESS) {
3493 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) {
3494 IBTF_DPRINTF_L2("ibdm", "\tprocess_incoming_mad: "
3495 "IBMF free msg failed DM request drop it");
3496 }
3497 return;
3498 }
3499
3500 transaction_id = b2h64(hdr->TransactionID);
3501
3502 mutex_enter(&ibdm.ibdm_mutex);
3503 gid_info = ibdm.ibdm_dp_gidlist_head;
3504 while (gid_info) {
3505 if ((gid_info->gl_transactionID &
3506 IBDM_GID_TRANSACTIONID_MASK) ==
3507 (transaction_id & IBDM_GID_TRANSACTIONID_MASK))
3508 break;
3509 gid_info = gid_info->gl_next;
3510 }
3511 mutex_exit(&ibdm.ibdm_mutex);
3512
3513 if (gid_info == NULL) {
3514 /* Drop the packet */
3515 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: transaction ID"
3516 " does not match: 0x%llx", transaction_id);
3517 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) {
3518 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: "
3519 "IBMF free msg failed DM request drop it");
3520 }
3521 return;
3522 }
3523
3524 /* Handle redirection for all the MAD's, except ClassPortInfo */
3525 if (((IBDM_IN_IBMFMSG_STATUS(msg) & MAD_STATUS_REDIRECT_REQUIRED)) &&
3526 (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO)) {
3527 ret = ibdm_handle_redirection(msg, gid_info, &flag);
3528 if (ret == IBDM_SUCCESS) {
3529 return;
3530 }
3531 } else {
3532 uint_t gl_state;
3533
3534 mutex_enter(&gid_info->gl_mutex);
3535 gl_state = gid_info->gl_state;
3536 mutex_exit(&gid_info->gl_mutex);
3537
3538 switch (gl_state) {
3539
3540 case IBDM_SET_CLASSPORTINFO:
3541 ibdm_handle_setclassportinfo(
3542 ibmf_hdl, msg, gid_info, &flag);
3543 break;
3544
3545 case IBDM_GET_CLASSPORTINFO:
3546 ibdm_handle_classportinfo(
3547 ibmf_hdl, msg, gid_info, &flag);
3548 break;
3549
3550 case IBDM_GET_IOUNITINFO:
3551 ibdm_handle_iounitinfo(ibmf_hdl, msg, gid_info, &flag);
3552 break;
3553
3554 case IBDM_GET_IOC_DETAILS:
3555 switch (IBDM_IN_IBMFMSG_ATTR(msg)) {
3556
3557 case IB_DM_ATTR_SERVICE_ENTRIES:
3558 ibdm_handle_srventry_mad(msg, gid_info, &flag);
3559 break;
3560
3561 case IB_DM_ATTR_IOC_CTRL_PROFILE:
3562 ibdm_handle_ioc_profile(
3563 ibmf_hdl, msg, gid_info, &flag);
3564 break;
3565
3566 case IB_DM_ATTR_DIAG_CODE:
3567 ibdm_handle_diagcode(msg, gid_info, &flag);
3568 break;
3569
3570 default:
3571 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: "
3572 "Error state, wrong attribute :-(");
3573 (void) ibmf_free_msg(ibmf_hdl, &msg);
3574 return;
3575 }
3576 break;
3577 default:
3578 IBTF_DPRINTF_L2("ibdm",
3579 "process_incoming_mad: Dropping the packet"
3580 " gl_state %x", gl_state);
3581 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) {
3582 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: "
3583 "IBMF free msg failed DM request drop it");
3584 }
3585 return;
3586 }
3587 }
3588
3589 if ((flag & IBDM_IBMF_PKT_DUP_RESP) ||
3590 (flag & IBDM_IBMF_PKT_UNEXP_RESP)) {
3591 IBTF_DPRINTF_L2("ibdm",
3592 "\tprocess_incoming_mad:Dup/unexp resp : 0x%x", flag);
3593 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) {
3594 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: "
3595 "IBMF free msg failed DM request drop it");
3596 }
3597 return;
3598 }
3599
3600 mutex_enter(&gid_info->gl_mutex);
3601 if (gid_info->gl_pending_cmds < 1) {
3602 IBTF_DPRINTF_L2("ibdm",
3603 "\tprocess_incoming_mad: pending commands negative");
3604 }
3605 if (--gid_info->gl_pending_cmds) {
3606 IBTF_DPRINTF_L4("ibdm", "\tprocess_incoming_mad: "
3607 "gid_info %p pending cmds %d",
3608 gid_info, gid_info->gl_pending_cmds);
3609 mutex_exit(&gid_info->gl_mutex);
3610 } else {
3611 uint_t prev_state;
3612 IBTF_DPRINTF_L4("ibdm", "\tprocess_incoming_mad: Probing DONE");
3613 prev_state = gid_info->gl_state;
3614 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE;
3615 if (prev_state == IBDM_SET_CLASSPORTINFO) {
3616 IBTF_DPRINTF_L4("ibdm",
3617 "\tprocess_incoming_mad: "
3618 "Setclassportinfo for Cisco FC GW is done.");
3619 gid_info->gl_flag &= ~IBDM_CISCO_PROBE;
3620 gid_info->gl_flag |= IBDM_CISCO_PROBE_DONE;
3621 mutex_exit(&gid_info->gl_mutex);
3622 cv_broadcast(&gid_info->gl_probe_cv);
3623 } else {
3624 mutex_exit(&gid_info->gl_mutex);
3625 ibdm_notify_newgid_iocs(gid_info);
3626 mutex_enter(&ibdm.ibdm_mutex);
3627 if (--ibdm.ibdm_ngid_probes_in_progress == 0) {
3628 IBTF_DPRINTF_L4("ibdm",
3629 "\tprocess_incoming_mad: Wakeup");
3630 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS;
3631 cv_broadcast(&ibdm.ibdm_probe_cv);
3632 }
3633 mutex_exit(&ibdm.ibdm_mutex);
3634 }
3635 }
3636
3637 /*
3638 * Do not deallocate the IBMF packet if atleast one request
3639 * is posted. IBMF packet is reused.
3640 */
3641 if (!(flag & IBDM_IBMF_PKT_REUSED)) {
3642 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) {
3643 IBTF_DPRINTF_L2("ibdm", "\tprocess_incoming_mad: "
3644 "IBMF free msg failed DM request drop it");
3645 }
3646 }
3647 }
3648
3649
3650 /*
3651 * ibdm_verify_mad_status()
3652 * Verifies the MAD status
3653 * Returns IBDM_SUCCESS if status is correct
3654 * Returns IBDM_FAILURE for bogus MAD status
3655 */
3656 static int
ibdm_verify_mad_status(ib_mad_hdr_t * hdr)3657 ibdm_verify_mad_status(ib_mad_hdr_t *hdr)
3658 {
3659 int ret = 0;
3660
3661 if ((hdr->R_Method != IB_DM_DEVMGT_METHOD_GET_RESP) ||
3662 (hdr->ClassVersion != IB_DM_CLASS_VERSION_1)) {
3663 return (IBDM_FAILURE);
3664 }
3665
3666 if (b2h16(hdr->Status) == 0)
3667 ret = IBDM_SUCCESS;
3668 else if ((b2h16(hdr->Status) & 0x1f) == MAD_STATUS_REDIRECT_REQUIRED)
3669 ret = IBDM_SUCCESS;
3670 else {
3671 IBTF_DPRINTF_L2("ibdm",
3672 "\tverify_mad_status: Status : 0x%x", b2h16(hdr->Status));
3673 ret = IBDM_FAILURE;
3674 }
3675 return (ret);
3676 }
3677
3678
3679
3680 /*
3681 * ibdm_handle_redirection()
3682 * Returns IBDM_SUCCESS/IBDM_FAILURE
3683 */
3684 static int
ibdm_handle_redirection(ibmf_msg_t * msg,ibdm_dp_gidinfo_t * gid_info,int * flag)3685 ibdm_handle_redirection(ibmf_msg_t *msg,
3686 ibdm_dp_gidinfo_t *gid_info, int *flag)
3687 {
3688 int attrmod, ioc_no, start;
3689 void *data;
3690 timeout_id_t *timeout_id;
3691 ib_mad_hdr_t *hdr;
3692 ibdm_ioc_info_t *ioc = NULL;
3693 ibdm_timeout_cb_args_t *cb_args;
3694 ib_mad_classportinfo_t *cpi;
3695
3696 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection: Enter");
3697 mutex_enter(&gid_info->gl_mutex);
3698 switch (gid_info->gl_state) {
3699 case IBDM_GET_IOUNITINFO:
3700 cb_args = &gid_info->gl_iou_cb_args;
3701 timeout_id = &gid_info->gl_timeout_id;
3702 break;
3703
3704 case IBDM_GET_IOC_DETAILS:
3705 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg);
3706 switch (IBDM_IN_IBMFMSG_ATTR(msg)) {
3707
3708 case IB_DM_ATTR_DIAG_CODE:
3709 if (attrmod == 0) {
3710 cb_args = &gid_info->gl_iou_cb_args;
3711 timeout_id = &gid_info->gl_timeout_id;
3712 break;
3713 }
3714 if (IBDM_IS_IOC_NUM_INVALID(attrmod, gid_info)) {
3715 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:"
3716 "IOC# Out of range %d", attrmod);
3717 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP;
3718 mutex_exit(&gid_info->gl_mutex);
3719 return (IBDM_FAILURE);
3720 }
3721 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod -1));
3722 cb_args = &ioc->ioc_dc_cb_args;
3723 timeout_id = &ioc->ioc_dc_timeout_id;
3724 break;
3725
3726 case IB_DM_ATTR_IOC_CTRL_PROFILE:
3727 if (IBDM_IS_IOC_NUM_INVALID(attrmod, gid_info)) {
3728 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:"
3729 "IOC# Out of range %d", attrmod);
3730 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP;
3731 mutex_exit(&gid_info->gl_mutex);
3732 return (IBDM_FAILURE);
3733 }
3734 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod -1));
3735 cb_args = &ioc->ioc_cb_args;
3736 timeout_id = &ioc->ioc_timeout_id;
3737 break;
3738
3739 case IB_DM_ATTR_SERVICE_ENTRIES:
3740 ioc_no = ((attrmod >> 16) & IBDM_16_BIT_MASK);
3741 if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) {
3742 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:"
3743 "IOC# Out of range %d", ioc_no);
3744 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP;
3745 mutex_exit(&gid_info->gl_mutex);
3746 return (IBDM_FAILURE);
3747 }
3748 start = (attrmod & IBDM_8_BIT_MASK);
3749 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1));
3750 if (start > ioc->ioc_profile.ioc_service_entries) {
3751 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:"
3752 " SE index Out of range %d", start);
3753 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP;
3754 mutex_exit(&gid_info->gl_mutex);
3755 return (IBDM_FAILURE);
3756 }
3757 cb_args = &ioc->ioc_serv[start].se_cb_args;
3758 timeout_id = &ioc->ioc_serv[start].se_timeout_id;
3759 break;
3760
3761 default:
3762 /* ERROR State */
3763 IBTF_DPRINTF_L2("ibdm",
3764 "\thandle_redirection: wrong attribute :-(");
3765 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP;
3766 mutex_exit(&gid_info->gl_mutex);
3767 return (IBDM_FAILURE);
3768 }
3769 break;
3770 default:
3771 /* ERROR State */
3772 IBTF_DPRINTF_L2("ibdm",
3773 "\thandle_redirection: Error state :-(");
3774 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP;
3775 mutex_exit(&gid_info->gl_mutex);
3776 return (IBDM_FAILURE);
3777 }
3778 if ((*timeout_id) != 0) {
3779 mutex_exit(&gid_info->gl_mutex);
3780 if (untimeout(*timeout_id) == -1) {
3781 IBTF_DPRINTF_L2("ibdm", "\thandle_redirection: "
3782 "untimeout failed %x", *timeout_id);
3783 } else {
3784 IBTF_DPRINTF_L5("ibdm",
3785 "\thandle_redirection: timeout %x", *timeout_id);
3786 }
3787 mutex_enter(&gid_info->gl_mutex);
3788 *timeout_id = 0;
3789 }
3790
3791 data = msg->im_msgbufs_recv.im_bufs_cl_data;
3792 cpi = (ib_mad_classportinfo_t *)data;
3793
3794 gid_info->gl_resp_timeout =
3795 (b2h32(cpi->RespTimeValue) & 0x1F);
3796
3797 gid_info->gl_redirected = B_TRUE;
3798 gid_info->gl_redirect_dlid = b2h16(cpi->RedirectLID);
3799 gid_info->gl_redirect_QP = (b2h32(cpi->RedirectQP) & 0xffffff);
3800 gid_info->gl_redirect_pkey = b2h16(cpi->RedirectP_Key);
3801 gid_info->gl_redirect_qkey = b2h32(cpi->RedirectQ_Key);
3802 gid_info->gl_redirectGID_hi = b2h64(cpi->RedirectGID_hi);
3803 gid_info->gl_redirectGID_lo = b2h64(cpi->RedirectGID_lo);
3804 gid_info->gl_redirectSL = cpi->RedirectSL;
3805
3806 if (gid_info->gl_redirect_dlid != 0) {
3807 msg->im_local_addr.ia_remote_lid =
3808 gid_info->gl_redirect_dlid;
3809 }
3810 ibdm_bump_transactionID(gid_info);
3811 mutex_exit(&gid_info->gl_mutex);
3812
3813 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg, *hdr))
3814 ibdm_alloc_send_buffers(msg);
3815
3816 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg);
3817 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1;
3818 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT;
3819 hdr->ClassVersion = IB_DM_CLASS_VERSION_1;
3820 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET;
3821 hdr->Status = 0;
3822 hdr->TransactionID = h2b64(gid_info->gl_transactionID);
3823 hdr->AttributeID =
3824 msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeID;
3825 hdr->AttributeModifier =
3826 msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeModifier;
3827 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg, *hdr))
3828
3829 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP;
3830 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey;
3831 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey;
3832 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL;
3833
3834 mutex_enter(&gid_info->gl_mutex);
3835 *timeout_id = timeout(ibdm_pkt_timeout_hdlr,
3836 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout));
3837 mutex_exit(&gid_info->gl_mutex);
3838
3839 IBTF_DPRINTF_L5("ibdm", "\thandle_redirect:"
3840 "timeout %x", *timeout_id);
3841
3842 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl,
3843 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) {
3844 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection:"
3845 "message transport failed");
3846 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args);
3847 }
3848 (*flag) |= IBDM_IBMF_PKT_REUSED;
3849 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection: Exit");
3850 return (IBDM_SUCCESS);
3851 }
3852
3853
3854 /*
3855 * ibdm_pkt_timeout_hdlr
3856 * This timeout handler is registed for every IBMF packet that is
3857 * sent through the IBMF. It gets called when no response is received
3858 * within the specified time for the packet. No retries for the failed
3859 * commands currently. Drops the failed IBMF packet and update the
3860 * pending list commands.
3861 */
3862 static void
ibdm_pkt_timeout_hdlr(void * arg)3863 ibdm_pkt_timeout_hdlr(void *arg)
3864 {
3865 ibdm_iou_info_t *iou;
3866 ibdm_ioc_info_t *ioc;
3867 ibdm_timeout_cb_args_t *cb_args = arg;
3868 ibdm_dp_gidinfo_t *gid_info;
3869 int srv_ent;
3870 uint_t new_gl_state;
3871
3872 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: gid_info: %p "
3873 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info,
3874 cb_args->cb_req_type, cb_args->cb_ioc_num,
3875 cb_args->cb_srvents_start);
3876
3877 gid_info = cb_args->cb_gid_info;
3878 mutex_enter(&gid_info->gl_mutex);
3879
3880 if ((gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) ||
3881 (cb_args->cb_req_type == 0)) {
3882
3883 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: req completed"
3884 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_req_type,
3885 cb_args->cb_ioc_num, cb_args->cb_srvents_start);
3886
3887 if (gid_info->gl_timeout_id)
3888 gid_info->gl_timeout_id = 0;
3889 mutex_exit(&gid_info->gl_mutex);
3890 return;
3891 }
3892 if (cb_args->cb_retry_count) {
3893 cb_args->cb_retry_count--;
3894 /*
3895 * A new timeout_id is set inside ibdm_retry_command().
3896 * When the function returns an error, the timeout_id
3897 * is reset (to zero) in the switch statement below.
3898 */
3899 if (ibdm_retry_command(cb_args) == IBDM_SUCCESS) {
3900 mutex_exit(&gid_info->gl_mutex);
3901 return;
3902 }
3903 cb_args->cb_retry_count = 0;
3904 }
3905
3906 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: command failed: gid %p"
3907 " rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info,
3908 cb_args->cb_req_type, cb_args->cb_ioc_num,
3909 cb_args->cb_srvents_start);
3910
3911 switch (cb_args->cb_req_type) {
3912
3913 case IBDM_REQ_TYPE_CLASSPORTINFO:
3914 case IBDM_REQ_TYPE_IOUINFO:
3915 new_gl_state = IBDM_GID_PROBING_FAILED;
3916 if (gid_info->gl_timeout_id)
3917 gid_info->gl_timeout_id = 0;
3918 break;
3919
3920 case IBDM_REQ_TYPE_IOCINFO:
3921 new_gl_state = IBDM_GID_PROBING_COMPLETE;
3922 iou = gid_info->gl_iou;
3923 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num];
3924 ioc->ioc_state = IBDM_IOC_STATE_PROBE_FAILED;
3925 if (ioc->ioc_timeout_id)
3926 ioc->ioc_timeout_id = 0;
3927 break;
3928
3929 case IBDM_REQ_TYPE_SRVENTS:
3930 new_gl_state = IBDM_GID_PROBING_COMPLETE;
3931 iou = gid_info->gl_iou;
3932 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num];
3933 ioc->ioc_state = IBDM_IOC_STATE_PROBE_FAILED;
3934 srv_ent = cb_args->cb_srvents_start;
3935 if (ioc->ioc_serv[srv_ent].se_timeout_id)
3936 ioc->ioc_serv[srv_ent].se_timeout_id = 0;
3937 break;
3938
3939 case IBDM_REQ_TYPE_IOU_DIAGCODE:
3940 new_gl_state = IBDM_GID_PROBING_COMPLETE;
3941 iou = gid_info->gl_iou;
3942 iou->iou_dc_valid = B_FALSE;
3943 if (gid_info->gl_timeout_id)
3944 gid_info->gl_timeout_id = 0;
3945 break;
3946
3947 case IBDM_REQ_TYPE_IOC_DIAGCODE:
3948 new_gl_state = IBDM_GID_PROBING_COMPLETE;
3949 iou = gid_info->gl_iou;
3950 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num];
3951 ioc->ioc_dc_valid = B_FALSE;
3952 if (ioc->ioc_dc_timeout_id)
3953 ioc->ioc_dc_timeout_id = 0;
3954 break;
3955
3956 default: /* ERROR State */
3957 new_gl_state = IBDM_GID_PROBING_FAILED;
3958 if (gid_info->gl_timeout_id)
3959 gid_info->gl_timeout_id = 0;
3960 IBTF_DPRINTF_L2("ibdm",
3961 "\tpkt_timeout_hdlr: wrong request type.");
3962 break;
3963 }
3964
3965 --gid_info->gl_pending_cmds; /* decrease the counter */
3966
3967 if (gid_info->gl_pending_cmds == 0) {
3968 gid_info->gl_state = new_gl_state;
3969 mutex_exit(&gid_info->gl_mutex);
3970 /*
3971 * Delete this gid_info if the gid probe fails.
3972 */
3973 if (new_gl_state == IBDM_GID_PROBING_FAILED) {
3974 ibdm_delete_glhca_list(gid_info);
3975 }
3976 ibdm_notify_newgid_iocs(gid_info);
3977 mutex_enter(&ibdm.ibdm_mutex);
3978 if (--ibdm.ibdm_ngid_probes_in_progress == 0) {
3979 IBTF_DPRINTF_L4("ibdm", "\tpkt_timeout_hdlr: Wakeup");
3980 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS;
3981 cv_broadcast(&ibdm.ibdm_probe_cv);
3982 }
3983 mutex_exit(&ibdm.ibdm_mutex);
3984 } else {
3985 /*
3986 * Reset gl_pending_cmd if the extra timeout happens since
3987 * gl_pending_cmd becomes negative as a result.
3988 */
3989 if (gid_info->gl_pending_cmds < 0) {
3990 gid_info->gl_pending_cmds = 0;
3991 IBTF_DPRINTF_L2("ibdm",
3992 "\tpkt_timeout_hdlr: extra timeout request."
3993 " reset gl_pending_cmds");
3994 }
3995 mutex_exit(&gid_info->gl_mutex);
3996 /*
3997 * Delete this gid_info if the gid probe fails.
3998 */
3999 if (new_gl_state == IBDM_GID_PROBING_FAILED) {
4000 ibdm_delete_glhca_list(gid_info);
4001 }
4002 }
4003 }
4004
4005
4006 /*
4007 * ibdm_retry_command()
4008 * Retries the failed command.
4009 * Returns IBDM_FAILURE/IBDM_SUCCESS
4010 */
4011 static int
ibdm_retry_command(ibdm_timeout_cb_args_t * cb_args)4012 ibdm_retry_command(ibdm_timeout_cb_args_t *cb_args)
4013 {
4014 int ret;
4015 ibmf_msg_t *msg;
4016 ib_mad_hdr_t *hdr;
4017 ibdm_dp_gidinfo_t *gid_info = cb_args->cb_gid_info;
4018 timeout_id_t *timeout_id;
4019 ibdm_ioc_info_t *ioc;
4020 int ioc_no;
4021 ASSERT(MUTEX_HELD(&gid_info->gl_mutex));
4022
4023 IBTF_DPRINTF_L2("ibdm", "\tretry_command: gid_info: %p "
4024 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info,
4025 cb_args->cb_req_type, cb_args->cb_ioc_num,
4026 cb_args->cb_srvents_start);
4027
4028 ret = ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_NOSLEEP, &msg);
4029
4030
4031 /*
4032 * Reset the gid if alloc_msg failed with BAD_HANDLE
4033 * ibdm_reset_gidinfo reinits the gid_info
4034 */
4035 if (ret == IBMF_BAD_HANDLE) {
4036 IBTF_DPRINTF_L3(ibdm_string, "\tretry_command: gid %p hdl bad",
4037 gid_info);
4038
4039 mutex_exit(&gid_info->gl_mutex);
4040 ibdm_reset_gidinfo(gid_info);
4041 mutex_enter(&gid_info->gl_mutex);
4042
4043 /* Retry alloc */
4044 ret = ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_NOSLEEP,
4045 &msg);
4046 }
4047
4048 if (ret != IBDM_SUCCESS) {
4049 IBTF_DPRINTF_L2("ibdm", "\tretry_command: alloc failed: %p "
4050 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info,
4051 cb_args->cb_req_type, cb_args->cb_ioc_num,
4052 cb_args->cb_srvents_start);
4053 return (IBDM_FAILURE);
4054 }
4055
4056 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg))
4057 ibdm_alloc_send_buffers(msg);
4058 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg))
4059
4060 ibdm_bump_transactionID(gid_info);
4061
4062 msg->im_local_addr.ia_local_lid = gid_info->gl_slid;
4063 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid;
4064 if (gid_info->gl_redirected == B_TRUE) {
4065 if (gid_info->gl_redirect_dlid != 0) {
4066 msg->im_local_addr.ia_remote_lid =
4067 gid_info->gl_redirect_dlid;
4068 }
4069 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP;
4070 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey;
4071 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey;
4072 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL;
4073 } else {
4074 msg->im_local_addr.ia_remote_qno = 1;
4075 msg->im_local_addr.ia_p_key = gid_info->gl_p_key;
4076 msg->im_local_addr.ia_q_key = IB_GSI_QKEY;
4077 msg->im_local_addr.ia_service_level = gid_info->gl_SL;
4078 }
4079 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg);
4080 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr))
4081 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1;
4082 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT;
4083 hdr->ClassVersion = IB_DM_CLASS_VERSION_1;
4084 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET;
4085 hdr->Status = 0;
4086 hdr->TransactionID = h2b64(gid_info->gl_transactionID);
4087
4088 switch (cb_args->cb_req_type) {
4089 case IBDM_REQ_TYPE_CLASSPORTINFO:
4090 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO);
4091 hdr->AttributeModifier = 0;
4092 timeout_id = &gid_info->gl_timeout_id;
4093 break;
4094 case IBDM_REQ_TYPE_IOUINFO:
4095 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO);
4096 hdr->AttributeModifier = 0;
4097 timeout_id = &gid_info->gl_timeout_id;
4098 break;
4099 case IBDM_REQ_TYPE_IOCINFO:
4100 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE);
4101 hdr->AttributeModifier = h2b32(cb_args->cb_ioc_num + 1);
4102 ioc = IBDM_GIDINFO2IOCINFO(gid_info, cb_args->cb_ioc_num);
4103 timeout_id = &ioc->ioc_timeout_id;
4104 break;
4105 case IBDM_REQ_TYPE_SRVENTS:
4106 hdr->AttributeID = h2b16(IB_DM_ATTR_SERVICE_ENTRIES);
4107 ibdm_fill_srv_attr_mod(hdr, cb_args);
4108 ioc = IBDM_GIDINFO2IOCINFO(gid_info, cb_args->cb_ioc_num);
4109 timeout_id =
4110 &ioc->ioc_serv[cb_args->cb_srvents_start].se_timeout_id;
4111 break;
4112 case IBDM_REQ_TYPE_IOU_DIAGCODE:
4113 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE);
4114 hdr->AttributeModifier = 0;
4115 timeout_id = &gid_info->gl_timeout_id;
4116 break;
4117 case IBDM_REQ_TYPE_IOC_DIAGCODE:
4118 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE);
4119 hdr->AttributeModifier = h2b32(cb_args->cb_ioc_num + 1);
4120 ioc_no = cb_args->cb_ioc_num;
4121 ioc = &gid_info->gl_iou->iou_ioc_info[ioc_no];
4122 timeout_id = &ioc->ioc_dc_timeout_id;
4123 break;
4124 }
4125 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*hdr))
4126
4127 *timeout_id = timeout(ibdm_pkt_timeout_hdlr,
4128 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout));
4129
4130 mutex_exit(&gid_info->gl_mutex);
4131
4132 IBTF_DPRINTF_L5("ibdm", "\tretry_command: %p,%x,%d,%d:"
4133 "timeout %x", cb_args->cb_req_type, cb_args->cb_ioc_num,
4134 cb_args->cb_srvents_start, *timeout_id);
4135
4136 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl,
4137 gid_info->gl_qp_hdl, msg, NULL, ibdm_ibmf_send_cb,
4138 cb_args, 0) != IBMF_SUCCESS) {
4139 IBTF_DPRINTF_L2("ibdm", "\tretry_command: send failed: %p "
4140 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info,
4141 cb_args->cb_req_type, cb_args->cb_ioc_num,
4142 cb_args->cb_srvents_start);
4143 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args);
4144 }
4145 mutex_enter(&gid_info->gl_mutex);
4146 return (IBDM_SUCCESS);
4147 }
4148
4149
4150 /*
4151 * ibdm_update_ioc_port_gidlist()
4152 */
4153 static void
ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t * dest,ibdm_dp_gidinfo_t * gid_info)4154 ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *dest,
4155 ibdm_dp_gidinfo_t *gid_info)
4156 {
4157 int ii, ngid_ents;
4158 ibdm_gid_t *tmp;
4159 ibdm_hca_list_t *gid_hca_head, *temp;
4160 ibdm_hca_list_t *ioc_head = NULL;
4161 ASSERT(MUTEX_HELD(&gid_info->gl_mutex));
4162
4163 IBTF_DPRINTF_L5("ibdm", "\tupdate_ioc_port_gidlist: Enter");
4164
4165 ngid_ents = gid_info->gl_ngids;
4166 dest->ioc_nportgids = ngid_ents;
4167 dest->ioc_gid_list = kmem_zalloc(sizeof (ibdm_gid_t) *
4168 ngid_ents, KM_SLEEP);
4169 tmp = gid_info->gl_gid;
4170 for (ii = 0; (ii < ngid_ents) && (tmp); ii++) {
4171 dest->ioc_gid_list[ii].gid_dgid_hi = tmp->gid_dgid_hi;
4172 dest->ioc_gid_list[ii].gid_dgid_lo = tmp->gid_dgid_lo;
4173 tmp = tmp->gid_next;
4174 }
4175
4176 gid_hca_head = gid_info->gl_hca_list;
4177 while (gid_hca_head) {
4178 temp = ibdm_dup_hca_attr(gid_hca_head);
4179 temp->hl_next = ioc_head;
4180 ioc_head = temp;
4181 gid_hca_head = gid_hca_head->hl_next;
4182 }
4183 dest->ioc_hca_list = ioc_head;
4184 }
4185
4186
4187 /*
4188 * ibdm_alloc_send_buffers()
4189 * Allocates memory for the IBMF send buffer to send and/or receive
4190 * the Device Management MAD packet.
4191 */
4192 static void
ibdm_alloc_send_buffers(ibmf_msg_t * msgp)4193 ibdm_alloc_send_buffers(ibmf_msg_t *msgp)
4194 {
4195 msgp->im_msgbufs_send.im_bufs_mad_hdr =
4196 kmem_zalloc(IBDM_MAD_SIZE, KM_SLEEP);
4197
4198 msgp->im_msgbufs_send.im_bufs_cl_hdr = (uchar_t *)
4199 msgp->im_msgbufs_send.im_bufs_mad_hdr + sizeof (ib_mad_hdr_t);
4200 msgp->im_msgbufs_send.im_bufs_cl_hdr_len = IBDM_DM_MAD_HDR_SZ;
4201
4202 msgp->im_msgbufs_send.im_bufs_cl_data =
4203 ((char *)msgp->im_msgbufs_send.im_bufs_cl_hdr + IBDM_DM_MAD_HDR_SZ);
4204 msgp->im_msgbufs_send.im_bufs_cl_data_len =
4205 IBDM_MAD_SIZE - sizeof (ib_mad_hdr_t) - IBDM_DM_MAD_HDR_SZ;
4206 }
4207
4208
4209 /*
4210 * ibdm_alloc_send_buffers()
4211 * De-allocates memory for the IBMF send buffer
4212 */
4213 static void
ibdm_free_send_buffers(ibmf_msg_t * msgp)4214 ibdm_free_send_buffers(ibmf_msg_t *msgp)
4215 {
4216 if (msgp->im_msgbufs_send.im_bufs_mad_hdr != NULL)
4217 kmem_free(msgp->im_msgbufs_send.im_bufs_mad_hdr, IBDM_MAD_SIZE);
4218 }
4219
4220 /*
4221 * ibdm_probe_ioc()
4222 * 1. Gets the node records for the port GUID. This detects all the port
4223 * to the IOU.
4224 * 2. Selectively probes all the IOC, given it's node GUID
4225 * 3. In case of reprobe, only the IOC to be reprobed is send the IOC
4226 * Controller Profile asynchronously
4227 */
4228 /*ARGSUSED*/
4229 static void
ibdm_probe_ioc(ib_guid_t nodeguid,ib_guid_t ioc_guid,int reprobe_flag)4230 ibdm_probe_ioc(ib_guid_t nodeguid, ib_guid_t ioc_guid, int reprobe_flag)
4231 {
4232 int ii, nrecords;
4233 size_t nr_len = 0, pi_len = 0;
4234 ib_gid_t sgid, dgid;
4235 ibdm_hca_list_t *hca_list = NULL;
4236 sa_node_record_t *nr, *tmp;
4237 ibdm_port_attr_t *port = NULL;
4238 ibdm_dp_gidinfo_t *reprobe_gid, *new_gid, *node_gid;
4239 ibdm_dp_gidinfo_t *temp_gidinfo;
4240 ibdm_gid_t *temp_gid;
4241 sa_portinfo_record_t *pi;
4242
4243 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc(%llx, %llx, %x): Begin",
4244 nodeguid, ioc_guid, reprobe_flag);
4245
4246 /* Rescan the GID list for any removed GIDs for reprobe */
4247 if (reprobe_flag)
4248 ibdm_rescan_gidlist(&ioc_guid);
4249
4250 mutex_enter(&ibdm.ibdm_hl_mutex);
4251 for (ibdm_get_next_port(&hca_list, &port, 1); port;
4252 ibdm_get_next_port(&hca_list, &port, 1)) {
4253 reprobe_gid = new_gid = node_gid = NULL;
4254
4255 nr = ibdm_get_node_records(port->pa_sa_hdl, &nr_len, nodeguid);
4256 if (nr == NULL) {
4257 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc: no records");
4258 continue;
4259 }
4260 nrecords = (nr_len / sizeof (sa_node_record_t));
4261 for (tmp = nr, ii = 0; (ii < nrecords); ii++, tmp++) {
4262 if ((pi = ibdm_get_portinfo(
4263 port->pa_sa_hdl, &pi_len, tmp->LID)) == NULL) {
4264 IBTF_DPRINTF_L4("ibdm",
4265 "\tibdm_get_portinfo: no portinfo recs");
4266 continue;
4267 }
4268
4269 /*
4270 * If Device Management is not supported on
4271 * this port, skip the rest.
4272 */
4273 if (!(pi->PortInfo.CapabilityMask &
4274 SM_CAP_MASK_IS_DM_SUPPD)) {
4275 kmem_free(pi, pi_len);
4276 continue;
4277 }
4278
4279 /*
4280 * For reprobes: Check if GID, already in
4281 * the list. If so, set the state to SKIPPED
4282 */
4283 if (((temp_gidinfo = ibdm_find_gid(nodeguid,
4284 tmp->NodeInfo.PortGUID)) != NULL) &&
4285 temp_gidinfo->gl_state ==
4286 IBDM_GID_PROBING_COMPLETE) {
4287 ASSERT(reprobe_gid == NULL);
4288 ibdm_addto_glhcalist(temp_gidinfo,
4289 hca_list);
4290 reprobe_gid = temp_gidinfo;
4291 kmem_free(pi, pi_len);
4292 continue;
4293 } else if (temp_gidinfo != NULL) {
4294 kmem_free(pi, pi_len);
4295 ibdm_addto_glhcalist(temp_gidinfo,
4296 hca_list);
4297 continue;
4298 }
4299
4300 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : "
4301 "create_gid : prefix %llx, guid %llx\n",
4302 pi->PortInfo.GidPrefix,
4303 tmp->NodeInfo.PortGUID);
4304
4305 sgid.gid_prefix = port->pa_sn_prefix;
4306 sgid.gid_guid = port->pa_port_guid;
4307 dgid.gid_prefix = pi->PortInfo.GidPrefix;
4308 dgid.gid_guid = tmp->NodeInfo.PortGUID;
4309 new_gid = ibdm_create_gid_info(port, sgid,
4310 dgid);
4311 if (new_gid == NULL) {
4312 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: "
4313 "create_gid_info failed\n");
4314 kmem_free(pi, pi_len);
4315 continue;
4316 }
4317 if (node_gid == NULL) {
4318 node_gid = new_gid;
4319 ibdm_add_to_gl_gid(node_gid, node_gid);
4320 } else {
4321 IBTF_DPRINTF_L4("ibdm",
4322 "\tprobe_ioc: new gid");
4323 temp_gid = kmem_zalloc(
4324 sizeof (ibdm_gid_t), KM_SLEEP);
4325 temp_gid->gid_dgid_hi =
4326 new_gid->gl_dgid_hi;
4327 temp_gid->gid_dgid_lo =
4328 new_gid->gl_dgid_lo;
4329 temp_gid->gid_next = node_gid->gl_gid;
4330 node_gid->gl_gid = temp_gid;
4331 node_gid->gl_ngids++;
4332 }
4333 new_gid->gl_is_dm_capable = B_TRUE;
4334 new_gid->gl_nodeguid = nodeguid;
4335 new_gid->gl_portguid = dgid.gid_guid;
4336 ibdm_addto_glhcalist(new_gid, hca_list);
4337
4338 /*
4339 * Set the state to skipped as all these
4340 * gids point to the same node.
4341 * We (re)probe only one GID below and reset
4342 * state appropriately
4343 */
4344 new_gid->gl_state = IBDM_GID_PROBING_SKIPPED;
4345 new_gid->gl_devid = (*tmp).NodeInfo.DeviceID;
4346 kmem_free(pi, pi_len);
4347 }
4348 kmem_free(nr, nr_len);
4349
4350 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : reprobe_flag %d "
4351 "reprobe_gid %p new_gid %p node_gid %p",
4352 reprobe_flag, reprobe_gid, new_gid, node_gid);
4353
4354 if (reprobe_flag != 0 && reprobe_gid != NULL) {
4355 int niocs, jj;
4356 ibdm_ioc_info_t *tmp_ioc;
4357 int ioc_matched = 0;
4358
4359 mutex_exit(&ibdm.ibdm_hl_mutex);
4360 mutex_enter(&reprobe_gid->gl_mutex);
4361 reprobe_gid->gl_state = IBDM_GET_IOC_DETAILS;
4362 niocs =
4363 reprobe_gid->gl_iou->iou_info.iou_num_ctrl_slots;
4364 reprobe_gid->gl_pending_cmds++;
4365 mutex_exit(&reprobe_gid->gl_mutex);
4366
4367 for (jj = 0; jj < niocs; jj++) {
4368 tmp_ioc =
4369 IBDM_GIDINFO2IOCINFO(reprobe_gid, jj);
4370 if (tmp_ioc->ioc_profile.ioc_guid != ioc_guid)
4371 continue;
4372
4373 ioc_matched = 1;
4374
4375 /*
4376 * Explicitly set gl_reprobe_flag to 0 so that
4377 * IBnex is not notified on completion
4378 */
4379 mutex_enter(&reprobe_gid->gl_mutex);
4380 reprobe_gid->gl_reprobe_flag = 0;
4381 mutex_exit(&reprobe_gid->gl_mutex);
4382
4383 mutex_enter(&ibdm.ibdm_mutex);
4384 ibdm.ibdm_ngid_probes_in_progress++;
4385 mutex_exit(&ibdm.ibdm_mutex);
4386 if (ibdm_send_ioc_profile(reprobe_gid, jj) !=
4387 IBDM_SUCCESS) {
4388 IBTF_DPRINTF_L4("ibdm",
4389 "\tprobe_ioc: "
4390 "send_ioc_profile failed "
4391 "for ioc %d", jj);
4392 ibdm_gid_decr_pending(reprobe_gid);
4393 break;
4394 }
4395 mutex_enter(&ibdm.ibdm_mutex);
4396 ibdm_wait_probe_completion();
4397 mutex_exit(&ibdm.ibdm_mutex);
4398 break;
4399 }
4400 if (ioc_matched == 0)
4401 ibdm_gid_decr_pending(reprobe_gid);
4402 else {
4403 mutex_enter(&ibdm.ibdm_hl_mutex);
4404 break;
4405 }
4406 } else if (new_gid != NULL) {
4407 mutex_exit(&ibdm.ibdm_hl_mutex);
4408 node_gid = node_gid ? node_gid : new_gid;
4409
4410 /*
4411 * New or reinserted GID : Enable notification
4412 * to IBnex
4413 */
4414 mutex_enter(&node_gid->gl_mutex);
4415 node_gid->gl_reprobe_flag = 1;
4416 mutex_exit(&node_gid->gl_mutex);
4417
4418 ibdm_probe_gid(node_gid);
4419
4420 mutex_enter(&ibdm.ibdm_hl_mutex);
4421 }
4422 }
4423 mutex_exit(&ibdm.ibdm_hl_mutex);
4424 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : End\n");
4425 }
4426
4427
4428 /*
4429 * ibdm_probe_gid()
4430 * Selectively probes the GID
4431 */
4432 static void
ibdm_probe_gid(ibdm_dp_gidinfo_t * gid_info)4433 ibdm_probe_gid(ibdm_dp_gidinfo_t *gid_info)
4434 {
4435 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid:");
4436
4437 /*
4438 * A Cisco FC GW needs the special handling to get IOUnitInfo.
4439 */
4440 mutex_enter(&gid_info->gl_mutex);
4441 if (ibdm_is_cisco_switch(gid_info)) {
4442 gid_info->gl_pending_cmds++;
4443 gid_info->gl_state = IBDM_SET_CLASSPORTINFO;
4444 mutex_exit(&gid_info->gl_mutex);
4445
4446 if (ibdm_set_classportinfo(gid_info) != IBDM_SUCCESS) {
4447
4448 mutex_enter(&gid_info->gl_mutex);
4449 gid_info->gl_state = IBDM_GID_PROBING_FAILED;
4450 --gid_info->gl_pending_cmds;
4451 mutex_exit(&gid_info->gl_mutex);
4452
4453 /* free the hca_list on this gid_info */
4454 ibdm_delete_glhca_list(gid_info);
4455 gid_info = gid_info->gl_next;
4456 return;
4457 }
4458
4459 mutex_enter(&gid_info->gl_mutex);
4460 ibdm_wait_cisco_probe_completion(gid_info);
4461
4462 IBTF_DPRINTF_L4("ibdm",
4463 "\tprobe_gid: CISCO Wakeup signal received");
4464 }
4465
4466 /* move on to the 'GET_CLASSPORTINFO' stage */
4467 gid_info->gl_pending_cmds++;
4468 gid_info->gl_state = IBDM_GET_CLASSPORTINFO;
4469 mutex_exit(&gid_info->gl_mutex);
4470
4471 if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) {
4472
4473 mutex_enter(&gid_info->gl_mutex);
4474 gid_info->gl_state = IBDM_GID_PROBING_FAILED;
4475 --gid_info->gl_pending_cmds;
4476 mutex_exit(&gid_info->gl_mutex);
4477
4478 /* free the hca_list on this gid_info */
4479 ibdm_delete_glhca_list(gid_info);
4480 gid_info = gid_info->gl_next;
4481 return;
4482 }
4483
4484 mutex_enter(&ibdm.ibdm_mutex);
4485 ibdm.ibdm_ngid_probes_in_progress++;
4486 gid_info = gid_info->gl_next;
4487 ibdm_wait_probe_completion();
4488 mutex_exit(&ibdm.ibdm_mutex);
4489
4490 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid: Wakeup signal received");
4491 }
4492
4493
4494 /*
4495 * ibdm_create_gid_info()
4496 * Allocates a gid_info structure and initializes
4497 * Returns pointer to the structure on success
4498 * and NULL on failure
4499 */
4500 static ibdm_dp_gidinfo_t *
ibdm_create_gid_info(ibdm_port_attr_t * port,ib_gid_t sgid,ib_gid_t dgid)4501 ibdm_create_gid_info(ibdm_port_attr_t *port, ib_gid_t sgid, ib_gid_t dgid)
4502 {
4503 uint8_t ii, npaths;
4504 sa_path_record_t *path;
4505 size_t len;
4506 ibdm_pkey_tbl_t *pkey_tbl;
4507 ibdm_dp_gidinfo_t *gid_info = NULL;
4508 int ret;
4509
4510 IBTF_DPRINTF_L4("ibdm", "\tcreate_gid_info: Begin");
4511 npaths = 1;
4512
4513 /* query for reversible paths */
4514 if (port->pa_sa_hdl)
4515 ret = ibmf_saa_gid_to_pathrecords(port->pa_sa_hdl,
4516 sgid, dgid, IBMF_SAA_PKEY_WC, 0, B_TRUE, &npaths, 0,
4517 &len, &path);
4518 else
4519 return (NULL);
4520
4521 if (ret == IBMF_SUCCESS && path) {
4522 ibdm_dump_path_info(path);
4523
4524 gid_info = kmem_zalloc(
4525 sizeof (ibdm_dp_gidinfo_t), KM_SLEEP);
4526 mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL);
4527 cv_init(&gid_info->gl_probe_cv, NULL, CV_DRIVER, NULL);
4528 gid_info->gl_dgid_hi = path->DGID.gid_prefix;
4529 gid_info->gl_dgid_lo = path->DGID.gid_guid;
4530 gid_info->gl_sgid_hi = path->SGID.gid_prefix;
4531 gid_info->gl_sgid_lo = path->SGID.gid_guid;
4532 gid_info->gl_p_key = path->P_Key;
4533 gid_info->gl_sa_hdl = port->pa_sa_hdl;
4534 gid_info->gl_ibmf_hdl = port->pa_ibmf_hdl;
4535 gid_info->gl_slid = path->SLID;
4536 gid_info->gl_dlid = path->DLID;
4537 gid_info->gl_transactionID = (++ibdm.ibdm_transactionID)
4538 << IBDM_GID_TRANSACTIONID_SHIFT;
4539 gid_info->gl_min_transactionID = gid_info->gl_transactionID;
4540 gid_info->gl_max_transactionID = (ibdm.ibdm_transactionID +1)
4541 << IBDM_GID_TRANSACTIONID_SHIFT;
4542 gid_info->gl_SL = path->SL;
4543
4544 gid_info->gl_qp_hdl = IBMF_QP_HANDLE_DEFAULT;
4545 for (ii = 0; ii < port->pa_npkeys; ii++) {
4546 if (port->pa_pkey_tbl == NULL)
4547 break;
4548
4549 pkey_tbl = &port->pa_pkey_tbl[ii];
4550 if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) &&
4551 (pkey_tbl->pt_qp_hdl != NULL)) {
4552 gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl;
4553 break;
4554 }
4555 }
4556 kmem_free(path, len);
4557
4558 /*
4559 * QP handle for GID not initialized. No matching Pkey
4560 * was found!! ibdm should *not* hit this case. Flag an
4561 * error and drop the GID if ibdm does encounter this.
4562 */
4563 if (gid_info->gl_qp_hdl == NULL) {
4564 IBTF_DPRINTF_L2(ibdm_string,
4565 "\tcreate_gid_info: No matching Pkey");
4566 ibdm_delete_gidinfo(gid_info);
4567 return (NULL);
4568 }
4569
4570 ibdm.ibdm_ngids++;
4571 if (ibdm.ibdm_dp_gidlist_head == NULL) {
4572 ibdm.ibdm_dp_gidlist_head = gid_info;
4573 ibdm.ibdm_dp_gidlist_tail = gid_info;
4574 } else {
4575 ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info;
4576 gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail;
4577 ibdm.ibdm_dp_gidlist_tail = gid_info;
4578 }
4579 }
4580
4581 return (gid_info);
4582 }
4583
4584
4585 /*
4586 * ibdm_get_node_records
4587 * Sends a SA query to get the NODE record
4588 * Returns pointer to the sa_node_record_t on success
4589 * and NULL on failure
4590 */
4591 static sa_node_record_t *
ibdm_get_node_records(ibmf_saa_handle_t sa_hdl,size_t * length,ib_guid_t guid)4592 ibdm_get_node_records(ibmf_saa_handle_t sa_hdl, size_t *length, ib_guid_t guid)
4593 {
4594 sa_node_record_t req, *resp = NULL;
4595 ibmf_saa_access_args_t args;
4596 int ret;
4597
4598 IBTF_DPRINTF_L4("ibdm", "\tget_node_records: Begin");
4599
4600 bzero(&req, sizeof (sa_node_record_t));
4601 req.NodeInfo.NodeGUID = guid;
4602
4603 args.sq_attr_id = SA_NODERECORD_ATTRID;
4604 args.sq_access_type = IBMF_SAA_RETRIEVE;
4605 args.sq_component_mask = SA_NODEINFO_COMPMASK_NODEGUID;
4606 args.sq_template = &req;
4607 args.sq_callback = NULL;
4608 args.sq_callback_arg = NULL;
4609
4610 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) &resp);
4611 if (ret != IBMF_SUCCESS) {
4612 IBTF_DPRINTF_L2("ibdm", "\tget_node_records:"
4613 " SA Retrieve Failed: %d", ret);
4614 return (NULL);
4615 }
4616 if ((resp == NULL) || (*length == 0)) {
4617 IBTF_DPRINTF_L2("ibdm", "\tget_node_records: No records");
4618 return (NULL);
4619 }
4620
4621 IBTF_DPRINTF_L4("ibdm", "\tget_node_records: NodeGuid %llx "
4622 "PortGUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.PortGUID);
4623
4624 return (resp);
4625 }
4626
4627
4628 /*
4629 * ibdm_get_portinfo()
4630 * Sends a SA query to get the PortInfo record
4631 * Returns pointer to the sa_portinfo_record_t on success
4632 * and NULL on failure
4633 */
4634 static sa_portinfo_record_t *
ibdm_get_portinfo(ibmf_saa_handle_t sa_hdl,size_t * length,ib_lid_t lid)4635 ibdm_get_portinfo(ibmf_saa_handle_t sa_hdl, size_t *length, ib_lid_t lid)
4636 {
4637 sa_portinfo_record_t req, *resp = NULL;
4638 ibmf_saa_access_args_t args;
4639 int ret;
4640
4641 IBTF_DPRINTF_L4("ibdm", "\tget_portinfo: Begin");
4642
4643 bzero(&req, sizeof (sa_portinfo_record_t));
4644 req.EndportLID = lid;
4645
4646 args.sq_attr_id = SA_PORTINFORECORD_ATTRID;
4647 args.sq_access_type = IBMF_SAA_RETRIEVE;
4648 args.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID;
4649 args.sq_template = &req;
4650 args.sq_callback = NULL;
4651 args.sq_callback_arg = NULL;
4652
4653 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) &resp);
4654 if (ret != IBMF_SUCCESS) {
4655 IBTF_DPRINTF_L2("ibdm", "\tget_portinfo:"
4656 " SA Retrieve Failed: 0x%X", ret);
4657 return (NULL);
4658 }
4659 if ((*length == 0) || (resp == NULL))
4660 return (NULL);
4661
4662 IBTF_DPRINTF_L4("ibdm", "\tget_portinfo: GidPrefix %llx Cap 0x%x",
4663 resp->PortInfo.GidPrefix, resp->PortInfo.CapabilityMask);
4664 return (resp);
4665 }
4666
4667
4668 /*
4669 * ibdm_ibnex_register_callback
4670 * IB nexus callback routine for HCA attach and detach notification
4671 */
4672 void
ibdm_ibnex_register_callback(ibdm_callback_t ibnex_dm_callback)4673 ibdm_ibnex_register_callback(ibdm_callback_t ibnex_dm_callback)
4674 {
4675 IBTF_DPRINTF_L4("ibdm", "\tibnex_register_callbacks");
4676 mutex_enter(&ibdm.ibdm_ibnex_mutex);
4677 ibdm.ibdm_ibnex_callback = ibnex_dm_callback;
4678 mutex_exit(&ibdm.ibdm_ibnex_mutex);
4679 }
4680
4681
4682 /*
4683 * ibdm_ibnex_unregister_callbacks
4684 */
4685 void
ibdm_ibnex_unregister_callback()4686 ibdm_ibnex_unregister_callback()
4687 {
4688 IBTF_DPRINTF_L4("ibdm", "\tibnex_unregister_callbacks");
4689 mutex_enter(&ibdm.ibdm_ibnex_mutex);
4690 ibdm.ibdm_ibnex_callback = NULL;
4691 mutex_exit(&ibdm.ibdm_ibnex_mutex);
4692 }
4693
4694 /*
4695 * ibdm_get_waittime()
4696 * Calculates the wait time based on the last HCA attach time
4697 */
4698 static clock_t
ibdm_get_waittime(ib_guid_t hca_guid,int dft_wait_sec)4699 ibdm_get_waittime(ib_guid_t hca_guid, int dft_wait_sec)
4700 {
4701 const hrtime_t dft_wait = dft_wait_sec * NANOSEC;
4702 hrtime_t temp, wait_time = 0;
4703 clock_t usecs;
4704 int i;
4705 ibdm_hca_list_t *hca;
4706
4707 IBTF_DPRINTF_L4("ibdm", "\tget_waittime hcaguid:%llx"
4708 "\tport settling time %d", hca_guid, dft_wait);
4709
4710 ASSERT(mutex_owned(&ibdm.ibdm_hl_mutex));
4711
4712 hca = ibdm.ibdm_hca_list_head;
4713
4714 for (i = 0; i < ibdm.ibdm_hca_count; i++, hca = hca->hl_next) {
4715 if (hca->hl_nports == hca->hl_nports_active)
4716 continue;
4717
4718 if (hca_guid && (hca_guid != hca->hl_hca_guid))
4719 continue;
4720
4721 temp = gethrtime() - hca->hl_attach_time;
4722 temp = MAX(0, (dft_wait - temp));
4723
4724 if (hca_guid) {
4725 wait_time = temp;
4726 break;
4727 }
4728
4729 wait_time = MAX(temp, wait_time);
4730 }
4731
4732 /* convert to microseconds */
4733 usecs = MIN(wait_time, dft_wait) / (NANOSEC / MICROSEC);
4734
4735 IBTF_DPRINTF_L2("ibdm", "\tget_waittime: wait_time = %ld usecs",
4736 (long)usecs);
4737
4738 return (drv_usectohz(usecs));
4739 }
4740
4741 void
ibdm_ibnex_port_settle_wait(ib_guid_t hca_guid,int dft_wait)4742 ibdm_ibnex_port_settle_wait(ib_guid_t hca_guid, int dft_wait)
4743 {
4744 clock_t wait_time;
4745
4746 mutex_enter(&ibdm.ibdm_hl_mutex);
4747
4748 while ((wait_time = ibdm_get_waittime(hca_guid, dft_wait)) > 0)
4749 (void) cv_reltimedwait(&ibdm.ibdm_port_settle_cv,
4750 &ibdm.ibdm_hl_mutex, wait_time, TR_CLOCK_TICK);
4751
4752 mutex_exit(&ibdm.ibdm_hl_mutex);
4753 }
4754
4755
4756 /*
4757 * ibdm_ibnex_probe_hcaport
4758 * Probes the presence of HCA port (with HCA dip and port number)
4759 * Returns port attributes structure on SUCCESS
4760 */
4761 ibdm_port_attr_t *
ibdm_ibnex_probe_hcaport(ib_guid_t hca_guid,uint8_t port_num)4762 ibdm_ibnex_probe_hcaport(ib_guid_t hca_guid, uint8_t port_num)
4763 {
4764 int ii, jj;
4765 ibdm_hca_list_t *hca_list;
4766 ibdm_port_attr_t *port_attr;
4767
4768 IBTF_DPRINTF_L4("ibdm", "\tibnex_probe_hcaport:");
4769
4770 mutex_enter(&ibdm.ibdm_hl_mutex);
4771 hca_list = ibdm.ibdm_hca_list_head;
4772 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) {
4773 if (hca_list->hl_hca_guid == hca_guid) {
4774 for (jj = 0; jj < hca_list->hl_nports; jj++) {
4775 if (hca_list->hl_port_attr[jj].pa_port_num ==
4776 port_num) {
4777 break;
4778 }
4779 }
4780 if (jj != hca_list->hl_nports)
4781 break;
4782 }
4783 hca_list = hca_list->hl_next;
4784 }
4785 if (ii == ibdm.ibdm_hca_count) {
4786 IBTF_DPRINTF_L2("ibdm", "\tibnex_probe_hcaport: not found");
4787 mutex_exit(&ibdm.ibdm_hl_mutex);
4788 return (NULL);
4789 }
4790 port_attr = (ibdm_port_attr_t *)kmem_zalloc(
4791 sizeof (ibdm_port_attr_t), KM_SLEEP);
4792 bcopy((char *)&hca_list->hl_port_attr[jj],
4793 port_attr, sizeof (ibdm_port_attr_t));
4794 ibdm_update_port_attr(port_attr);
4795
4796 mutex_exit(&ibdm.ibdm_hl_mutex);
4797 return (port_attr);
4798 }
4799
4800
4801 /*
4802 * ibdm_ibnex_get_port_attrs
4803 * Scan all HCAs for a matching port_guid.
4804 * Returns "port attributes" structure on success.
4805 */
4806 ibdm_port_attr_t *
ibdm_ibnex_get_port_attrs(ib_guid_t port_guid)4807 ibdm_ibnex_get_port_attrs(ib_guid_t port_guid)
4808 {
4809 int ii, jj;
4810 ibdm_hca_list_t *hca_list;
4811 ibdm_port_attr_t *port_attr;
4812
4813 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_port_attrs:");
4814
4815 mutex_enter(&ibdm.ibdm_hl_mutex);
4816 hca_list = ibdm.ibdm_hca_list_head;
4817
4818 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) {
4819 for (jj = 0; jj < hca_list->hl_nports; jj++) {
4820 if (hca_list->hl_port_attr[jj].pa_port_guid ==
4821 port_guid) {
4822 break;
4823 }
4824 }
4825 if (jj != hca_list->hl_nports)
4826 break;
4827 hca_list = hca_list->hl_next;
4828 }
4829
4830 if (ii == ibdm.ibdm_hca_count) {
4831 IBTF_DPRINTF_L2("ibdm", "\tibnex_get_port_attrs: not found");
4832 mutex_exit(&ibdm.ibdm_hl_mutex);
4833 return (NULL);
4834 }
4835
4836 port_attr = (ibdm_port_attr_t *)kmem_alloc(sizeof (ibdm_port_attr_t),
4837 KM_SLEEP);
4838 bcopy((char *)&hca_list->hl_port_attr[jj], port_attr,
4839 sizeof (ibdm_port_attr_t));
4840 ibdm_update_port_attr(port_attr);
4841
4842 mutex_exit(&ibdm.ibdm_hl_mutex);
4843 return (port_attr);
4844 }
4845
4846
4847 /*
4848 * ibdm_ibnex_free_port_attr()
4849 */
4850 void
ibdm_ibnex_free_port_attr(ibdm_port_attr_t * port_attr)4851 ibdm_ibnex_free_port_attr(ibdm_port_attr_t *port_attr)
4852 {
4853 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_port_attr:");
4854 if (port_attr) {
4855 if (port_attr->pa_pkey_tbl != NULL) {
4856 kmem_free(port_attr->pa_pkey_tbl,
4857 (port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t)));
4858 }
4859 kmem_free(port_attr, sizeof (ibdm_port_attr_t));
4860 }
4861 }
4862
4863
4864 /*
4865 * ibdm_ibnex_get_hca_list()
4866 * Returns portinfo for all the port for all the HCA's
4867 */
4868 void
ibdm_ibnex_get_hca_list(ibdm_hca_list_t ** hca,int * count)4869 ibdm_ibnex_get_hca_list(ibdm_hca_list_t **hca, int *count)
4870 {
4871 ibdm_hca_list_t *head = NULL, *temp, *temp1;
4872 int ii;
4873
4874 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_list:");
4875
4876 mutex_enter(&ibdm.ibdm_hl_mutex);
4877 temp = ibdm.ibdm_hca_list_head;
4878 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) {
4879 temp1 = ibdm_dup_hca_attr(temp);
4880 temp1->hl_next = head;
4881 head = temp1;
4882 temp = temp->hl_next;
4883 }
4884 *count = ibdm.ibdm_hca_count;
4885 *hca = head;
4886 mutex_exit(&ibdm.ibdm_hl_mutex);
4887 }
4888
4889
4890 /*
4891 * ibdm_ibnex_get_hca_info_by_guid()
4892 */
4893 ibdm_hca_list_t *
ibdm_ibnex_get_hca_info_by_guid(ib_guid_t hca_guid)4894 ibdm_ibnex_get_hca_info_by_guid(ib_guid_t hca_guid)
4895 {
4896 ibdm_hca_list_t *head = NULL, *hca = NULL;
4897
4898 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_info_by_dip");
4899
4900 mutex_enter(&ibdm.ibdm_hl_mutex);
4901 head = ibdm.ibdm_hca_list_head;
4902 while (head) {
4903 if (head->hl_hca_guid == hca_guid) {
4904 hca = ibdm_dup_hca_attr(head);
4905 hca->hl_next = NULL;
4906 break;
4907 }
4908 head = head->hl_next;
4909 }
4910 mutex_exit(&ibdm.ibdm_hl_mutex);
4911 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_info_by_dip %p", hca);
4912 return (hca);
4913 }
4914
4915
4916 /*
4917 * ibdm_dup_hca_attr()
4918 * Allocate a new HCA attribute strucuture and initialize
4919 * hca attribute structure with the incoming HCA attributes
4920 * returned the allocated hca attributes.
4921 */
4922 static ibdm_hca_list_t *
ibdm_dup_hca_attr(ibdm_hca_list_t * in_hca)4923 ibdm_dup_hca_attr(ibdm_hca_list_t *in_hca)
4924 {
4925 int len;
4926 ibdm_hca_list_t *out_hca;
4927
4928 len = sizeof (ibdm_hca_list_t) +
4929 (in_hca->hl_nports * sizeof (ibdm_port_attr_t));
4930 IBTF_DPRINTF_L4("ibdm", "\tdup_hca_attr len %d", len);
4931 out_hca = (ibdm_hca_list_t *)kmem_alloc(len, KM_SLEEP);
4932 bcopy((char *)in_hca,
4933 (char *)out_hca, sizeof (ibdm_hca_list_t));
4934 if (in_hca->hl_nports) {
4935 out_hca->hl_port_attr = (ibdm_port_attr_t *)
4936 ((char *)out_hca + sizeof (ibdm_hca_list_t));
4937 bcopy((char *)in_hca->hl_port_attr,
4938 (char *)out_hca->hl_port_attr,
4939 (in_hca->hl_nports * sizeof (ibdm_port_attr_t)));
4940 for (len = 0; len < out_hca->hl_nports; len++)
4941 ibdm_update_port_attr(&out_hca->hl_port_attr[len]);
4942 }
4943 return (out_hca);
4944 }
4945
4946
4947 /*
4948 * ibdm_ibnex_free_hca_list()
4949 * Free one/more HCA lists
4950 */
4951 void
ibdm_ibnex_free_hca_list(ibdm_hca_list_t * hca_list)4952 ibdm_ibnex_free_hca_list(ibdm_hca_list_t *hca_list)
4953 {
4954 int ii;
4955 size_t len;
4956 ibdm_hca_list_t *temp;
4957 ibdm_port_attr_t *port;
4958
4959 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_hca_list:");
4960 ASSERT(hca_list);
4961 while (hca_list) {
4962 temp = hca_list;
4963 hca_list = hca_list->hl_next;
4964 for (ii = 0; ii < temp->hl_nports; ii++) {
4965 port = &temp->hl_port_attr[ii];
4966 len = (port->pa_npkeys * sizeof (ibdm_pkey_tbl_t));
4967 if (len != 0)
4968 kmem_free(port->pa_pkey_tbl, len);
4969 }
4970 len = sizeof (ibdm_hca_list_t) + (temp->hl_nports *
4971 sizeof (ibdm_port_attr_t));
4972 kmem_free(temp, len);
4973 }
4974 }
4975
4976
4977 /*
4978 * ibdm_ibnex_probe_iocguid()
4979 * Probes the IOC on the fabric and returns the IOC information
4980 * if present. Otherwise, NULL is returned
4981 */
4982 /* ARGSUSED */
4983 ibdm_ioc_info_t *
ibdm_ibnex_probe_ioc(ib_guid_t iou,ib_guid_t ioc_guid,int reprobe_flag)4984 ibdm_ibnex_probe_ioc(ib_guid_t iou, ib_guid_t ioc_guid, int reprobe_flag)
4985 {
4986 int k;
4987 ibdm_ioc_info_t *ioc_info;
4988 ibdm_dp_gidinfo_t *gid_info; /* used as index and arg */
4989 timeout_id_t *timeout_id;
4990
4991 IBTF_DPRINTF_L4("ibdm", "\tibnex_probe_ioc: (%llX, %llX, %d) Begin",
4992 iou, ioc_guid, reprobe_flag);
4993
4994 if (ibdm_enumerate_iocs == 0)
4995 return (NULL);
4996
4997 /* Check whether we know this already */
4998 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info);
4999 if (ioc_info == NULL) {
5000 mutex_enter(&ibdm.ibdm_mutex);
5001 while (ibdm.ibdm_busy & IBDM_BUSY)
5002 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex);
5003 ibdm.ibdm_busy |= IBDM_BUSY;
5004 mutex_exit(&ibdm.ibdm_mutex);
5005 ibdm_probe_ioc(iou, ioc_guid, 0);
5006 mutex_enter(&ibdm.ibdm_mutex);
5007 ibdm.ibdm_busy &= ~IBDM_BUSY;
5008 cv_broadcast(&ibdm.ibdm_busy_cv);
5009 mutex_exit(&ibdm.ibdm_mutex);
5010 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info);
5011 } else if (reprobe_flag) { /* Handle Reprobe for the IOC */
5012 ASSERT(gid_info != NULL);
5013 /* Free the ioc_list before reprobe; and cancel any timers */
5014 mutex_enter(&ibdm.ibdm_mutex);
5015 mutex_enter(&gid_info->gl_mutex);
5016 if (ioc_info->ioc_timeout_id) {
5017 timeout_id = ioc_info->ioc_timeout_id;
5018 ioc_info->ioc_timeout_id = 0;
5019 mutex_exit(&gid_info->gl_mutex);
5020 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: "
5021 "ioc_timeout_id = 0x%x", timeout_id);
5022 if (untimeout(timeout_id) == -1) {
5023 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: "
5024 "untimeout ioc_timeout_id failed");
5025 }
5026 mutex_enter(&gid_info->gl_mutex);
5027 }
5028 if (ioc_info->ioc_dc_timeout_id) {
5029 timeout_id = ioc_info->ioc_dc_timeout_id;
5030 ioc_info->ioc_dc_timeout_id = 0;
5031 mutex_exit(&gid_info->gl_mutex);
5032 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: "
5033 "ioc_dc_timeout_id = 0x%x", timeout_id);
5034 if (untimeout(timeout_id) == -1) {
5035 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: "
5036 "untimeout ioc_dc_timeout_id failed");
5037 }
5038 mutex_enter(&gid_info->gl_mutex);
5039 }
5040 for (k = 0; k < ioc_info->ioc_profile.ioc_service_entries; k++)
5041 if (ioc_info->ioc_serv[k].se_timeout_id) {
5042 timeout_id = ioc_info->ioc_serv[k].
5043 se_timeout_id;
5044 ioc_info->ioc_serv[k].se_timeout_id = 0;
5045 mutex_exit(&gid_info->gl_mutex);
5046 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: "
5047 "ioc_info->ioc_serv[k].se_timeout_id = %x",
5048 k, timeout_id);
5049 if (untimeout(timeout_id) == -1) {
5050 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: "
5051 "untimeout se_timeout_id %d "
5052 "failed", k);
5053 }
5054 mutex_enter(&gid_info->gl_mutex);
5055 }
5056 mutex_exit(&gid_info->gl_mutex);
5057 mutex_exit(&ibdm.ibdm_mutex);
5058 ibdm_ibnex_free_ioc_list(ioc_info);
5059
5060 mutex_enter(&ibdm.ibdm_mutex);
5061 while (ibdm.ibdm_busy & IBDM_BUSY)
5062 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex);
5063 ibdm.ibdm_busy |= IBDM_BUSY;
5064 mutex_exit(&ibdm.ibdm_mutex);
5065
5066 ibdm_probe_ioc(iou, ioc_guid, 1);
5067
5068 /*
5069 * Skip if gl_reprobe_flag is set, this will be
5070 * a re-inserted / new GID, for which notifications
5071 * have already been send.
5072 */
5073 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info;
5074 gid_info = gid_info->gl_next) {
5075 uint8_t ii, niocs;
5076 ibdm_ioc_info_t *ioc;
5077
5078 if (gid_info->gl_iou == NULL)
5079 continue;
5080
5081 if (gid_info->gl_reprobe_flag) {
5082 gid_info->gl_reprobe_flag = 0;
5083 continue;
5084 }
5085
5086 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots;
5087 for (ii = 0; ii < niocs; ii++) {
5088 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii);
5089 if (ioc->ioc_profile.ioc_guid == ioc_guid) {
5090 mutex_enter(&ibdm.ibdm_mutex);
5091 ibdm_reprobe_update_port_srv(ioc,
5092 gid_info);
5093 mutex_exit(&ibdm.ibdm_mutex);
5094 }
5095 }
5096 }
5097 mutex_enter(&ibdm.ibdm_mutex);
5098 ibdm.ibdm_busy &= ~IBDM_BUSY;
5099 cv_broadcast(&ibdm.ibdm_busy_cv);
5100 mutex_exit(&ibdm.ibdm_mutex);
5101
5102 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info);
5103 }
5104 return (ioc_info);
5105 }
5106
5107
5108 /*
5109 * ibdm_get_ioc_info_with_gid()
5110 * Returns pointer to ibdm_ioc_info_t if it finds
5111 * matching record for the ioc_guid. Otherwise NULL is returned.
5112 * The pointer to gid_info is set to the second argument in case that
5113 * the non-NULL value returns (and the second argument is not NULL).
5114 *
5115 * Note. use the same strings as "ibnex_get_ioc_info" in
5116 * IBTF_DPRINTF() to keep compatibility.
5117 */
5118 static ibdm_ioc_info_t *
ibdm_get_ioc_info_with_gid(ib_guid_t ioc_guid,ibdm_dp_gidinfo_t ** gid_info)5119 ibdm_get_ioc_info_with_gid(ib_guid_t ioc_guid,
5120 ibdm_dp_gidinfo_t **gid_info)
5121 {
5122 int ii;
5123 ibdm_ioc_info_t *ioc = NULL, *tmp = NULL;
5124 ibdm_dp_gidinfo_t *gid_list;
5125 ib_dm_io_unitinfo_t *iou;
5126
5127 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_ioc_info: GUID %llx", ioc_guid);
5128
5129 mutex_enter(&ibdm.ibdm_mutex);
5130 while (ibdm.ibdm_busy & IBDM_BUSY)
5131 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex);
5132 ibdm.ibdm_busy |= IBDM_BUSY;
5133
5134 if (gid_info)
5135 *gid_info = NULL; /* clear the value of gid_info */
5136
5137 gid_list = ibdm.ibdm_dp_gidlist_head;
5138 while (gid_list) {
5139 mutex_enter(&gid_list->gl_mutex);
5140 if (gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) {
5141 mutex_exit(&gid_list->gl_mutex);
5142 gid_list = gid_list->gl_next;
5143 continue;
5144 }
5145 if (gid_list->gl_iou == NULL) {
5146 IBTF_DPRINTF_L2("ibdm",
5147 "\tget_ioc_info: No IOU info");
5148 mutex_exit(&gid_list->gl_mutex);
5149 gid_list = gid_list->gl_next;
5150 continue;
5151 }
5152 iou = &gid_list->gl_iou->iou_info;
5153 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) {
5154 tmp = IBDM_GIDINFO2IOCINFO(gid_list, ii);
5155 if ((tmp->ioc_profile.ioc_guid == ioc_guid) &&
5156 (tmp->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS)) {
5157 ioc = ibdm_dup_ioc_info(tmp, gid_list);
5158 if (gid_info)
5159 *gid_info = gid_list; /* set this ptr */
5160 mutex_exit(&gid_list->gl_mutex);
5161 ibdm.ibdm_busy &= ~IBDM_BUSY;
5162 cv_broadcast(&ibdm.ibdm_busy_cv);
5163 mutex_exit(&ibdm.ibdm_mutex);
5164 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_info: End");
5165 return (ioc);
5166 }
5167 }
5168 if (ii == iou->iou_num_ctrl_slots)
5169 ioc = NULL;
5170
5171 mutex_exit(&gid_list->gl_mutex);
5172 gid_list = gid_list->gl_next;
5173 }
5174
5175 ibdm.ibdm_busy &= ~IBDM_BUSY;
5176 cv_broadcast(&ibdm.ibdm_busy_cv);
5177 mutex_exit(&ibdm.ibdm_mutex);
5178 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_info: failure End");
5179 return (ioc);
5180 }
5181
5182 /*
5183 * ibdm_ibnex_get_ioc_info()
5184 * Returns pointer to ibdm_ioc_info_t if it finds
5185 * matching record for the ioc_guid, otherwise NULL
5186 * is returned
5187 *
5188 * Note. this is a wrapper function to ibdm_get_ioc_info_with_gid() now.
5189 */
5190 ibdm_ioc_info_t *
ibdm_ibnex_get_ioc_info(ib_guid_t ioc_guid)5191 ibdm_ibnex_get_ioc_info(ib_guid_t ioc_guid)
5192 {
5193 if (ibdm_enumerate_iocs == 0)
5194 return (NULL);
5195
5196 /* will not use the gid_info pointer, so the second arg is NULL */
5197 return (ibdm_get_ioc_info_with_gid(ioc_guid, NULL));
5198 }
5199
5200 /*
5201 * ibdm_ibnex_get_ioc_count()
5202 * Returns number of ibdm_ioc_info_t it finds
5203 */
5204 int
ibdm_ibnex_get_ioc_count(void)5205 ibdm_ibnex_get_ioc_count(void)
5206 {
5207 int count = 0, k;
5208 ibdm_ioc_info_t *ioc;
5209 ibdm_dp_gidinfo_t *gid_list;
5210
5211 if (ibdm_enumerate_iocs == 0)
5212 return (0);
5213
5214 mutex_enter(&ibdm.ibdm_mutex);
5215 ibdm_sweep_fabric(0);
5216
5217 while (ibdm.ibdm_busy & IBDM_BUSY)
5218 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex);
5219 ibdm.ibdm_busy |= IBDM_BUSY;
5220
5221 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list;
5222 gid_list = gid_list->gl_next) {
5223 mutex_enter(&gid_list->gl_mutex);
5224 if ((gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) ||
5225 (gid_list->gl_iou == NULL)) {
5226 mutex_exit(&gid_list->gl_mutex);
5227 continue;
5228 }
5229 for (k = 0; k < gid_list->gl_iou->iou_info.iou_num_ctrl_slots;
5230 k++) {
5231 ioc = IBDM_GIDINFO2IOCINFO(gid_list, k);
5232 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS)
5233 ++count;
5234 }
5235 mutex_exit(&gid_list->gl_mutex);
5236 }
5237 ibdm.ibdm_busy &= ~IBDM_BUSY;
5238 cv_broadcast(&ibdm.ibdm_busy_cv);
5239 mutex_exit(&ibdm.ibdm_mutex);
5240
5241 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_count: count = %d", count);
5242 return (count);
5243 }
5244
5245
5246 /*
5247 * ibdm_ibnex_get_ioc_list()
5248 * Returns information about all the IOCs present on the fabric.
5249 * Reprobes the IOCs and the GID list if list_flag is set to REPROBE_ALL.
5250 * Does not sweep fabric if DONOT_PROBE is set
5251 */
5252 ibdm_ioc_info_t *
ibdm_ibnex_get_ioc_list(ibdm_ibnex_get_ioclist_mtd_t list_flag)5253 ibdm_ibnex_get_ioc_list(ibdm_ibnex_get_ioclist_mtd_t list_flag)
5254 {
5255 int ii;
5256 ibdm_ioc_info_t *ioc_list = NULL, *tmp, *ioc;
5257 ibdm_dp_gidinfo_t *gid_list;
5258 ib_dm_io_unitinfo_t *iou;
5259
5260 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_list: Enter");
5261
5262 if (ibdm_enumerate_iocs == 0)
5263 return (NULL);
5264
5265 mutex_enter(&ibdm.ibdm_mutex);
5266 if (list_flag != IBDM_IBNEX_DONOT_PROBE)
5267 ibdm_sweep_fabric(list_flag == IBDM_IBNEX_REPROBE_ALL);
5268
5269 while (ibdm.ibdm_busy & IBDM_BUSY)
5270 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex);
5271 ibdm.ibdm_busy |= IBDM_BUSY;
5272
5273 gid_list = ibdm.ibdm_dp_gidlist_head;
5274 while (gid_list) {
5275 mutex_enter(&gid_list->gl_mutex);
5276 if (gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) {
5277 mutex_exit(&gid_list->gl_mutex);
5278 gid_list = gid_list->gl_next;
5279 continue;
5280 }
5281 if (gid_list->gl_iou == NULL) {
5282 IBTF_DPRINTF_L2("ibdm",
5283 "\tget_ioc_list: No IOU info");
5284 mutex_exit(&gid_list->gl_mutex);
5285 gid_list = gid_list->gl_next;
5286 continue;
5287 }
5288 iou = &gid_list->gl_iou->iou_info;
5289 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) {
5290 ioc = IBDM_GIDINFO2IOCINFO(gid_list, ii);
5291 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) {
5292 tmp = ibdm_dup_ioc_info(ioc, gid_list);
5293 tmp->ioc_next = ioc_list;
5294 ioc_list = tmp;
5295 }
5296 }
5297 mutex_exit(&gid_list->gl_mutex);
5298 gid_list = gid_list->gl_next;
5299 }
5300 ibdm.ibdm_busy &= ~IBDM_BUSY;
5301 cv_broadcast(&ibdm.ibdm_busy_cv);
5302 mutex_exit(&ibdm.ibdm_mutex);
5303
5304 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_list: End");
5305 return (ioc_list);
5306 }
5307
5308 /*
5309 * ibdm_dup_ioc_info()
5310 * Duplicate the IOC information and return the IOC
5311 * information.
5312 */
5313 static ibdm_ioc_info_t *
ibdm_dup_ioc_info(ibdm_ioc_info_t * in_ioc,ibdm_dp_gidinfo_t * gid_list)5314 ibdm_dup_ioc_info(ibdm_ioc_info_t *in_ioc, ibdm_dp_gidinfo_t *gid_list)
5315 {
5316 ibdm_ioc_info_t *out_ioc;
5317 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*out_ioc));
5318 ASSERT(MUTEX_HELD(&gid_list->gl_mutex));
5319
5320 out_ioc = kmem_alloc(sizeof (ibdm_ioc_info_t), KM_SLEEP);
5321 bcopy(in_ioc, out_ioc, sizeof (ibdm_ioc_info_t));
5322 ibdm_update_ioc_port_gidlist(out_ioc, gid_list);
5323 out_ioc->ioc_iou_dc_valid = gid_list->gl_iou->iou_dc_valid;
5324 out_ioc->ioc_iou_diagcode = gid_list->gl_iou->iou_diagcode;
5325
5326 return (out_ioc);
5327 }
5328
5329
5330 /*
5331 * ibdm_free_ioc_list()
5332 * Deallocate memory for IOC list structure
5333 */
5334 void
ibdm_ibnex_free_ioc_list(ibdm_ioc_info_t * ioc)5335 ibdm_ibnex_free_ioc_list(ibdm_ioc_info_t *ioc)
5336 {
5337 ibdm_ioc_info_t *temp;
5338
5339 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_ioc_list:");
5340 while (ioc) {
5341 temp = ioc;
5342 ioc = ioc->ioc_next;
5343 kmem_free(temp->ioc_gid_list,
5344 (sizeof (ibdm_gid_t) * temp->ioc_nportgids));
5345 if (temp->ioc_hca_list)
5346 ibdm_ibnex_free_hca_list(temp->ioc_hca_list);
5347 kmem_free(temp, sizeof (ibdm_ioc_info_t));
5348 }
5349 }
5350
5351
5352 /*
5353 * ibdm_ibnex_update_pkey_tbls
5354 * Updates the DM P_Key database.
5355 * NOTE: Two cases are handled here: P_Key being added or removed.
5356 *
5357 * Arguments : NONE
5358 * Return Values : NONE
5359 */
5360 void
ibdm_ibnex_update_pkey_tbls(void)5361 ibdm_ibnex_update_pkey_tbls(void)
5362 {
5363 int h, pp, pidx;
5364 uint_t nports;
5365 uint_t size;
5366 ib_pkey_t new_pkey;
5367 ib_pkey_t *orig_pkey;
5368 ibdm_hca_list_t *hca_list;
5369 ibdm_port_attr_t *port;
5370 ibt_hca_portinfo_t *pinfop;
5371
5372 IBTF_DPRINTF_L4("ibdm", "\tibnex_update_pkey_tbls:");
5373
5374 mutex_enter(&ibdm.ibdm_hl_mutex);
5375 hca_list = ibdm.ibdm_hca_list_head;
5376
5377 for (h = 0; h < ibdm.ibdm_hca_count; h++) {
5378
5379 /* This updates P_Key Tables for all ports of this HCA */
5380 (void) ibt_query_hca_ports(hca_list->hl_hca_hdl, 0, &pinfop,
5381 &nports, &size);
5382
5383 /* number of ports shouldn't have changed */
5384 ASSERT(nports == hca_list->hl_nports);
5385
5386 for (pp = 0; pp < hca_list->hl_nports; pp++) {
5387 port = &hca_list->hl_port_attr[pp];
5388
5389 /*
5390 * First figure out the P_Keys from IBTL.
5391 * Three things could have happened:
5392 * New P_Keys added
5393 * Existing P_Keys removed
5394 * Both of the above two
5395 *
5396 * Loop through the P_Key Indices and check if a
5397 * give P_Key_Ix matches that of the one seen by
5398 * IBDM. If they match no action is needed.
5399 *
5400 * If they don't match:
5401 * 1. if orig_pkey is invalid and new_pkey is valid
5402 * ---> add new_pkey to DM database
5403 * 2. if orig_pkey is valid and new_pkey is invalid
5404 * ---> remove orig_pkey from DM database
5405 * 3. if orig_pkey and new_pkey are both valid:
5406 * ---> remov orig_pkey from DM database
5407 * ---> add new_pkey to DM database
5408 * 4. if orig_pkey and new_pkey are both invalid:
5409 * ---> do nothing. Updated DM database.
5410 */
5411
5412 for (pidx = 0; pidx < port->pa_npkeys; pidx++) {
5413 new_pkey = pinfop[pp].p_pkey_tbl[pidx];
5414 orig_pkey = &port->pa_pkey_tbl[pidx].pt_pkey;
5415
5416 /* keys match - do nothing */
5417 if (*orig_pkey == new_pkey)
5418 continue;
5419
5420 if (IBDM_INVALID_PKEY(*orig_pkey) &&
5421 !IBDM_INVALID_PKEY(new_pkey)) {
5422 /* P_Key was added */
5423 IBTF_DPRINTF_L5("ibdm",
5424 "\tibnex_update_pkey_tbls: new "
5425 "P_Key added = 0x%x", new_pkey);
5426 *orig_pkey = new_pkey;
5427 ibdm_port_attr_ibmf_init(port,
5428 new_pkey, pp);
5429 } else if (!IBDM_INVALID_PKEY(*orig_pkey) &&
5430 IBDM_INVALID_PKEY(new_pkey)) {
5431 /* P_Key was removed */
5432 IBTF_DPRINTF_L5("ibdm",
5433 "\tibnex_update_pkey_tbls: P_Key "
5434 "removed = 0x%x", *orig_pkey);
5435 *orig_pkey = new_pkey;
5436 (void) ibdm_port_attr_ibmf_fini(port,
5437 pidx);
5438 } else if (!IBDM_INVALID_PKEY(*orig_pkey) &&
5439 !IBDM_INVALID_PKEY(new_pkey)) {
5440 /* P_Key were replaced */
5441 IBTF_DPRINTF_L5("ibdm",
5442 "\tibnex_update_pkey_tbls: P_Key "
5443 "replaced 0x%x with 0x%x",
5444 *orig_pkey, new_pkey);
5445 (void) ibdm_port_attr_ibmf_fini(port,
5446 pidx);
5447 *orig_pkey = new_pkey;
5448 ibdm_port_attr_ibmf_init(port,
5449 new_pkey, pp);
5450 } else {
5451 /*
5452 * P_Keys are invalid
5453 * set anyway to reflect if
5454 * INVALID_FULL was changed to
5455 * INVALID_LIMITED or vice-versa.
5456 */
5457 *orig_pkey = new_pkey;
5458 } /* end of else */
5459
5460 } /* loop of p_key index */
5461
5462 } /* loop of #ports of HCA */
5463
5464 ibt_free_portinfo(pinfop, size);
5465 hca_list = hca_list->hl_next;
5466
5467 } /* loop for all HCAs in the system */
5468
5469 mutex_exit(&ibdm.ibdm_hl_mutex);
5470 }
5471
5472
5473 /*
5474 * ibdm_send_ioc_profile()
5475 * Send IOC Controller Profile request. When the request is completed
5476 * IBMF calls ibdm_process_incoming_mad routine to inform about
5477 * the completion.
5478 */
5479 static int
ibdm_send_ioc_profile(ibdm_dp_gidinfo_t * gid_info,uint8_t ioc_no)5480 ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *gid_info, uint8_t ioc_no)
5481 {
5482 ibmf_msg_t *msg;
5483 ib_mad_hdr_t *hdr;
5484 ibdm_ioc_info_t *ioc_info = &(gid_info->gl_iou->iou_ioc_info[ioc_no]);
5485 ibdm_timeout_cb_args_t *cb_args;
5486
5487 IBTF_DPRINTF_L4("ibdm", "\tsend_ioc_profile: "
5488 "gid info 0x%p, ioc_no = %d", gid_info, ioc_no);
5489
5490 /*
5491 * Send command to get IOC profile.
5492 * Allocate a IBMF packet and initialize the packet.
5493 */
5494 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP,
5495 &msg) != IBMF_SUCCESS) {
5496 IBTF_DPRINTF_L2("ibdm", "\tsend_ioc_profile: pkt alloc fail");
5497 return (IBDM_FAILURE);
5498 }
5499
5500 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg))
5501 ibdm_alloc_send_buffers(msg);
5502 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg))
5503
5504 mutex_enter(&gid_info->gl_mutex);
5505 ibdm_bump_transactionID(gid_info);
5506 mutex_exit(&gid_info->gl_mutex);
5507
5508 msg->im_local_addr.ia_local_lid = gid_info->gl_slid;
5509 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid;
5510 if (gid_info->gl_redirected == B_TRUE) {
5511 if (gid_info->gl_redirect_dlid != 0) {
5512 msg->im_local_addr.ia_remote_lid =
5513 gid_info->gl_redirect_dlid;
5514 }
5515 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP;
5516 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey;
5517 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey;
5518 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL;
5519 } else {
5520 msg->im_local_addr.ia_remote_qno = 1;
5521 msg->im_local_addr.ia_p_key = gid_info->gl_p_key;
5522 msg->im_local_addr.ia_q_key = IB_GSI_QKEY;
5523 msg->im_local_addr.ia_service_level = gid_info->gl_SL;
5524 }
5525
5526 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg);
5527 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1;
5528 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT;
5529 hdr->ClassVersion = IB_DM_CLASS_VERSION_1;
5530 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET;
5531 hdr->Status = 0;
5532 hdr->TransactionID = h2b64(gid_info->gl_transactionID);
5533 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE);
5534 hdr->AttributeModifier = h2b32(ioc_no + 1);
5535
5536 ioc_info->ioc_state = IBDM_IOC_STATE_REPROBE_PROGRESS;
5537 cb_args = &ioc_info->ioc_cb_args;
5538 cb_args->cb_gid_info = gid_info;
5539 cb_args->cb_retry_count = ibdm_dft_retry_cnt;
5540 cb_args->cb_req_type = IBDM_REQ_TYPE_IOCINFO;
5541 cb_args->cb_ioc_num = ioc_no;
5542
5543 mutex_enter(&gid_info->gl_mutex);
5544 ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr,
5545 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout));
5546 mutex_exit(&gid_info->gl_mutex);
5547
5548 IBTF_DPRINTF_L5("ibdm", "\tsend_ioc_profile:"
5549 "timeout %x", ioc_info->ioc_timeout_id);
5550
5551 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg,
5552 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) {
5553 IBTF_DPRINTF_L2("ibdm",
5554 "\tsend_ioc_profile: msg transport failed");
5555 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args);
5556 }
5557 ioc_info->ioc_state = IBDM_IOC_STATE_REPROBE_PROGRESS;
5558 return (IBDM_SUCCESS);
5559 }
5560
5561
5562 /*
5563 * ibdm_port_reachable
5564 * Returns B_TRUE if the port GID is reachable by sending
5565 * a SA query to get the NODE record for this port GUID.
5566 */
5567 static boolean_t
ibdm_port_reachable(ibmf_saa_handle_t sa_hdl,ib_guid_t guid)5568 ibdm_port_reachable(ibmf_saa_handle_t sa_hdl, ib_guid_t guid)
5569 {
5570 sa_node_record_t *resp;
5571 size_t length;
5572
5573 /*
5574 * Verify if it's reachable by getting the node record.
5575 */
5576 if (ibdm_get_node_record_by_port(sa_hdl, guid, &resp, &length) ==
5577 IBDM_SUCCESS) {
5578 kmem_free(resp, length);
5579 return (B_TRUE);
5580 }
5581 return (B_FALSE);
5582 }
5583
5584 /*
5585 * ibdm_get_node_record_by_port
5586 * Sends a SA query to get the NODE record for port GUID
5587 * Returns IBDM_SUCCESS if the port GID is reachable.
5588 *
5589 * Note: the caller must be responsible for freeing the resource
5590 * by calling kmem_free(resp, length) later.
5591 */
5592 static int
ibdm_get_node_record_by_port(ibmf_saa_handle_t sa_hdl,ib_guid_t guid,sa_node_record_t ** resp,size_t * length)5593 ibdm_get_node_record_by_port(ibmf_saa_handle_t sa_hdl, ib_guid_t guid,
5594 sa_node_record_t **resp, size_t *length)
5595 {
5596 sa_node_record_t req;
5597 ibmf_saa_access_args_t args;
5598 int ret;
5599 ASSERT(resp != NULL && length != NULL);
5600
5601 IBTF_DPRINTF_L4("ibdm", "\tport_reachable: port_guid %llx",
5602 guid);
5603
5604 bzero(&req, sizeof (sa_node_record_t));
5605 req.NodeInfo.PortGUID = guid;
5606
5607 args.sq_attr_id = SA_NODERECORD_ATTRID;
5608 args.sq_access_type = IBMF_SAA_RETRIEVE;
5609 args.sq_component_mask = SA_NODEINFO_COMPMASK_PORTGUID;
5610 args.sq_template = &req;
5611 args.sq_callback = NULL;
5612 args.sq_callback_arg = NULL;
5613
5614 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) resp);
5615 if (ret != IBMF_SUCCESS) {
5616 IBTF_DPRINTF_L2("ibdm", "\tport_reachable:"
5617 " SA Retrieve Failed: %d", ret);
5618 return (IBDM_FAILURE);
5619 }
5620 if (*resp == NULL || *length == 0) {
5621 IBTF_DPRINTF_L2("ibdm", "\tport_reachable: No records");
5622 return (IBDM_FAILURE);
5623 }
5624 /*
5625 * There is one NodeRecord on each endport on a subnet.
5626 */
5627 ASSERT(*length == sizeof (sa_node_record_t));
5628
5629 return (IBDM_SUCCESS);
5630 }
5631
5632
5633 /*
5634 * Update the gidlist for all affected IOCs when GID becomes
5635 * available/unavailable.
5636 *
5637 * Parameters :
5638 * gidinfo - Incoming / Outgoing GID.
5639 * add_flag - 1 for GID added, 0 for GID removed.
5640 * - (-1) : IOC gid list updated, ioc_list required.
5641 *
5642 * This function gets the GID for the node GUID corresponding to the
5643 * port GID. Gets the IOU info
5644 */
5645 static ibdm_ioc_info_t *
ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t * gid_info,int avail_flag)5646 ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *gid_info, int avail_flag)
5647 {
5648 ibdm_dp_gidinfo_t *node_gid = NULL;
5649 uint8_t niocs, ii;
5650 ibdm_ioc_info_t *ioc, *ioc_list = NULL, *tmp;
5651
5652 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist");
5653
5654 switch (avail_flag) {
5655 case 1 :
5656 node_gid = ibdm_check_dest_nodeguid(gid_info);
5657 break;
5658 case 0 :
5659 node_gid = ibdm_handle_gid_rm(gid_info);
5660 break;
5661 case -1 :
5662 node_gid = gid_info;
5663 break;
5664 default :
5665 break;
5666 }
5667
5668 if (node_gid == NULL) {
5669 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist: "
5670 "No node GID found, port gid 0x%p, avail_flag %d",
5671 gid_info, avail_flag);
5672 return (NULL);
5673 }
5674
5675 mutex_enter(&node_gid->gl_mutex);
5676 if ((node_gid->gl_state != IBDM_GID_PROBING_COMPLETE &&
5677 node_gid->gl_state != IBDM_GID_PROBING_SKIPPED) ||
5678 node_gid->gl_iou == NULL) {
5679 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist "
5680 "gl_state %x, gl_iou %p", node_gid->gl_state,
5681 node_gid->gl_iou);
5682 mutex_exit(&node_gid->gl_mutex);
5683 return (NULL);
5684 }
5685
5686 niocs = node_gid->gl_iou->iou_info.iou_num_ctrl_slots;
5687 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist : niocs %x",
5688 niocs);
5689 for (ii = 0; ii < niocs; ii++) {
5690 ioc = IBDM_GIDINFO2IOCINFO(node_gid, ii);
5691 /*
5692 * Skip IOCs for which probe is not complete or
5693 * reprobe is progress
5694 */
5695 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) {
5696 tmp = ibdm_dup_ioc_info(ioc, node_gid);
5697 tmp->ioc_info_updated.ib_gid_prop_updated = 1;
5698 tmp->ioc_next = ioc_list;
5699 ioc_list = tmp;
5700 }
5701 }
5702 mutex_exit(&node_gid->gl_mutex);
5703
5704 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist : return %p",
5705 ioc_list);
5706 return (ioc_list);
5707 }
5708
5709 /*
5710 * ibdm_saa_event_cb :
5711 * Event handling which does *not* require ibdm_hl_mutex to be
5712 * held are executed in the same thread. This is to prevent
5713 * deadlocks with HCA port down notifications which hold the
5714 * ibdm_hl_mutex.
5715 *
5716 * GID_AVAILABLE event is handled here. A taskq is spawned to
5717 * handle GID_UNAVAILABLE.
5718 *
5719 * A new mutex ibdm_ibnex_mutex has been introduced to protect
5720 * ibnex_callback. This has been done to prevent any possible
5721 * deadlock (described above) while handling GID_AVAILABLE.
5722 *
5723 * IBMF calls the event callback for a HCA port. The SA handle
5724 * for this port would be valid, till the callback returns.
5725 * IBDM calling IBDM using the above SA handle should be valid.
5726 *
5727 * IBDM will additionally check (SA handle != NULL), before
5728 * calling IBMF.
5729 */
5730 /*ARGSUSED*/
5731 static void
ibdm_saa_event_cb(ibmf_saa_handle_t ibmf_saa_handle,ibmf_saa_subnet_event_t ibmf_saa_event,ibmf_saa_event_details_t * event_details,void * callback_arg)5732 ibdm_saa_event_cb(ibmf_saa_handle_t ibmf_saa_handle,
5733 ibmf_saa_subnet_event_t ibmf_saa_event,
5734 ibmf_saa_event_details_t *event_details, void *callback_arg)
5735 {
5736 ibdm_saa_event_arg_t *event_arg;
5737 ib_gid_t sgid, dgid;
5738 ibdm_port_attr_t *hca_port;
5739 ibdm_dp_gidinfo_t *gid_info, *node_gid_info = NULL;
5740 sa_node_record_t *nrec;
5741 size_t length;
5742
5743 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*event_arg));
5744
5745 hca_port = (ibdm_port_attr_t *)callback_arg;
5746
5747 IBTF_DPRINTF_L4("ibdm", "\tsaa_event_cb(%x, %x, %x, %x)\n",
5748 ibmf_saa_handle, ibmf_saa_event, event_details,
5749 callback_arg);
5750
5751 #ifdef DEBUG
5752 if (ibdm_ignore_saa_event)
5753 return;
5754 #endif
5755
5756 if (ibmf_saa_event == IBMF_SAA_EVENT_GID_AVAILABLE) {
5757 /*
5758 * Ensure no other probe / sweep fabric is in
5759 * progress.
5760 */
5761 mutex_enter(&ibdm.ibdm_mutex);
5762 while (ibdm.ibdm_busy & IBDM_BUSY)
5763 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex);
5764 ibdm.ibdm_busy |= IBDM_BUSY;
5765 mutex_exit(&ibdm.ibdm_mutex);
5766
5767 /*
5768 * If we already know about this GID, return.
5769 * GID_AVAILABLE may be reported for multiple HCA
5770 * ports.
5771 */
5772 if ((ibdm_check_dgid(event_details->ie_gid.gid_guid,
5773 event_details->ie_gid.gid_prefix)) != NULL) {
5774 mutex_enter(&ibdm.ibdm_mutex);
5775 ibdm.ibdm_busy &= ~IBDM_BUSY;
5776 cv_broadcast(&ibdm.ibdm_busy_cv);
5777 mutex_exit(&ibdm.ibdm_mutex);
5778 return;
5779 }
5780
5781 IBTF_DPRINTF_L4("ibdm", "\tGID (prefix %x, guid %llx) "
5782 "Insertion notified",
5783 event_details->ie_gid.gid_prefix,
5784 event_details->ie_gid.gid_guid);
5785
5786 /* This is a new gid, insert it to GID list */
5787 sgid.gid_prefix = hca_port->pa_sn_prefix;
5788 sgid.gid_guid = hca_port->pa_port_guid;
5789 dgid.gid_prefix = event_details->ie_gid.gid_prefix;
5790 dgid.gid_guid = event_details->ie_gid.gid_guid;
5791 gid_info = ibdm_create_gid_info(hca_port, sgid, dgid);
5792 if (gid_info == NULL) {
5793 IBTF_DPRINTF_L4("ibdm", "\tGID_AVAILABLE: "
5794 "create_gid_info returned NULL");
5795 mutex_enter(&ibdm.ibdm_mutex);
5796 ibdm.ibdm_busy &= ~IBDM_BUSY;
5797 cv_broadcast(&ibdm.ibdm_busy_cv);
5798 mutex_exit(&ibdm.ibdm_mutex);
5799 return;
5800 }
5801 mutex_enter(&gid_info->gl_mutex);
5802 gid_info->gl_state = IBDM_GID_PROBING_SKIPPED;
5803 mutex_exit(&gid_info->gl_mutex);
5804
5805 /* Get the node GUID */
5806 if (ibdm_get_node_record_by_port(ibmf_saa_handle, dgid.gid_guid,
5807 &nrec, &length) != IBDM_SUCCESS) {
5808 /*
5809 * Set the state to PROBE_NOT_DONE for the
5810 * next sweep to probe it
5811 */
5812 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_taskq: "
5813 "Skipping GID : port GUID not found");
5814 mutex_enter(&gid_info->gl_mutex);
5815 gid_info->gl_state = IBDM_GID_PROBE_NOT_DONE;
5816 mutex_exit(&gid_info->gl_mutex);
5817 mutex_enter(&ibdm.ibdm_mutex);
5818 ibdm.ibdm_busy &= ~IBDM_BUSY;
5819 cv_broadcast(&ibdm.ibdm_busy_cv);
5820 mutex_exit(&ibdm.ibdm_mutex);
5821 return;
5822 }
5823 gid_info->gl_nodeguid = nrec->NodeInfo.NodeGUID;
5824 gid_info->gl_devid = nrec->NodeInfo.DeviceID;
5825 kmem_free(nrec, length);
5826 gid_info->gl_portguid = dgid.gid_guid;
5827
5828 /*
5829 * Get the gid info with the same node GUID.
5830 */
5831 mutex_enter(&ibdm.ibdm_mutex);
5832 node_gid_info = ibdm.ibdm_dp_gidlist_head;
5833 while (node_gid_info) {
5834 if (node_gid_info->gl_nodeguid ==
5835 gid_info->gl_nodeguid &&
5836 node_gid_info->gl_iou != NULL) {
5837 break;
5838 }
5839 node_gid_info = node_gid_info->gl_next;
5840 }
5841 mutex_exit(&ibdm.ibdm_mutex);
5842
5843 /*
5844 * Handling a new GID requires filling of gl_hca_list.
5845 * This require ibdm hca_list to be parsed and hence
5846 * holding the ibdm_hl_mutex. Spawning a new thread to
5847 * handle this.
5848 */
5849 if (node_gid_info == NULL) {
5850 if (taskq_dispatch(system_taskq,
5851 ibdm_saa_handle_new_gid, (void *)gid_info,
5852 TQ_NOSLEEP) == TASKQID_INVALID) {
5853 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: "
5854 "new_gid taskq_dispatch failed");
5855 return;
5856 }
5857 }
5858
5859 mutex_enter(&ibdm.ibdm_mutex);
5860 ibdm.ibdm_busy &= ~IBDM_BUSY;
5861 cv_broadcast(&ibdm.ibdm_busy_cv);
5862 mutex_exit(&ibdm.ibdm_mutex);
5863 return;
5864 }
5865
5866 if (ibmf_saa_event != IBMF_SAA_EVENT_GID_UNAVAILABLE)
5867 return;
5868
5869 /*
5870 * GID UNAVAIL EVENT: Try to locate the GID in the GID list.
5871 * If we don't find it we just return.
5872 */
5873 mutex_enter(&ibdm.ibdm_mutex);
5874 gid_info = ibdm.ibdm_dp_gidlist_head;
5875 while (gid_info) {
5876 if (gid_info->gl_portguid ==
5877 event_details->ie_gid.gid_guid) {
5878 break;
5879 }
5880 gid_info = gid_info->gl_next;
5881 }
5882 mutex_exit(&ibdm.ibdm_mutex);
5883 if (gid_info == NULL) {
5884 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: "
5885 "GID for GUID %llX not found during GID UNAVAIL event",
5886 event_details->ie_gid.gid_guid);
5887 return;
5888 }
5889
5890 /*
5891 * If this GID is DM capable, we'll have to check whether this DGID
5892 * is reachable via another port.
5893 */
5894 if (gid_info->gl_is_dm_capable == B_TRUE) {
5895 event_arg = (ibdm_saa_event_arg_t *)kmem_alloc(
5896 sizeof (ibdm_saa_event_arg_t), KM_SLEEP);
5897 event_arg->ibmf_saa_handle = ibmf_saa_handle;
5898 event_arg->ibmf_saa_event = ibmf_saa_event;
5899 bcopy(event_details, &event_arg->event_details,
5900 sizeof (ibmf_saa_event_details_t));
5901 event_arg->callback_arg = callback_arg;
5902
5903 if (taskq_dispatch(system_taskq, ibdm_saa_event_taskq,
5904 (void *)event_arg, TQ_NOSLEEP) == TASKQID_INVALID) {
5905 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: "
5906 "taskq_dispatch failed");
5907 ibdm_free_saa_event_arg(event_arg);
5908 return;
5909 }
5910 }
5911 }
5912
5913 /*
5914 * Handle a new GID discovered by GID_AVAILABLE saa event.
5915 */
5916 void
ibdm_saa_handle_new_gid(void * arg)5917 ibdm_saa_handle_new_gid(void *arg)
5918 {
5919 ibdm_dp_gidinfo_t *gid_info;
5920 ibdm_hca_list_t *hca_list = NULL;
5921 ibdm_port_attr_t *port = NULL;
5922 ibdm_ioc_info_t *ioc_list = NULL;
5923
5924 IBTF_DPRINTF_L4(ibdm_string, "\tsaa_handle_new_gid(%p)", arg);
5925
5926 gid_info = (ibdm_dp_gidinfo_t *)arg;
5927
5928 /*
5929 * Ensure that no other sweep / probe has completed
5930 * probing this gid.
5931 */
5932 mutex_enter(&gid_info->gl_mutex);
5933 if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) {
5934 mutex_exit(&gid_info->gl_mutex);
5935 return;
5936 }
5937 mutex_exit(&gid_info->gl_mutex);
5938
5939 /*
5940 * Parse HCAs to fill gl_hca_list
5941 */
5942 mutex_enter(&ibdm.ibdm_hl_mutex);
5943 for (ibdm_get_next_port(&hca_list, &port, 1); port;
5944 ibdm_get_next_port(&hca_list, &port, 1)) {
5945 if (ibdm_port_reachable(port->pa_sa_hdl,
5946 gid_info->gl_portguid) == B_TRUE) {
5947 ibdm_addto_glhcalist(gid_info, hca_list);
5948 }
5949 }
5950 mutex_exit(&ibdm.ibdm_hl_mutex);
5951
5952 /*
5953 * Ensure no other probe / sweep fabric is in
5954 * progress.
5955 */
5956 mutex_enter(&ibdm.ibdm_mutex);
5957 while (ibdm.ibdm_busy & IBDM_BUSY)
5958 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex);
5959 ibdm.ibdm_busy |= IBDM_BUSY;
5960 mutex_exit(&ibdm.ibdm_mutex);
5961
5962 /*
5963 * New IOU probe it, to check if new IOCs
5964 */
5965 IBTF_DPRINTF_L4(ibdm_string, "\tsaa_handle_new_gid: "
5966 "new GID : probing");
5967 mutex_enter(&ibdm.ibdm_mutex);
5968 ibdm.ibdm_ngid_probes_in_progress++;
5969 mutex_exit(&ibdm.ibdm_mutex);
5970 mutex_enter(&gid_info->gl_mutex);
5971 gid_info->gl_reprobe_flag = 0;
5972 gid_info->gl_state = IBDM_GID_PROBE_NOT_DONE;
5973 mutex_exit(&gid_info->gl_mutex);
5974 ibdm_probe_gid_thread((void *)gid_info);
5975
5976 mutex_enter(&ibdm.ibdm_mutex);
5977 ibdm_wait_probe_completion();
5978 mutex_exit(&ibdm.ibdm_mutex);
5979
5980 if (gid_info->gl_iou == NULL) {
5981 mutex_enter(&ibdm.ibdm_mutex);
5982 ibdm.ibdm_busy &= ~IBDM_BUSY;
5983 cv_broadcast(&ibdm.ibdm_busy_cv);
5984 mutex_exit(&ibdm.ibdm_mutex);
5985 return;
5986 }
5987
5988 /*
5989 * Update GID list in all IOCs affected by this
5990 */
5991 ioc_list = ibdm_update_ioc_gidlist(gid_info, 1);
5992
5993 /*
5994 * Pass on the IOCs with updated GIDs to IBnexus
5995 */
5996 if (ioc_list) {
5997 mutex_enter(&ibdm.ibdm_ibnex_mutex);
5998 if (ibdm.ibdm_ibnex_callback != NULL) {
5999 (*ibdm.ibdm_ibnex_callback)((void *)ioc_list,
6000 IBDM_EVENT_IOC_PROP_UPDATE);
6001 }
6002 mutex_exit(&ibdm.ibdm_ibnex_mutex);
6003 }
6004
6005 mutex_enter(&ibdm.ibdm_mutex);
6006 ibdm.ibdm_busy &= ~IBDM_BUSY;
6007 cv_broadcast(&ibdm.ibdm_busy_cv);
6008 mutex_exit(&ibdm.ibdm_mutex);
6009 }
6010
6011 /*
6012 * ibdm_saa_event_taskq :
6013 * GID_UNAVAILABLE Event handling requires ibdm_hl_mutex to be
6014 * held. The GID_UNAVAILABLE handling is done in a taskq to
6015 * prevent deadlocks with HCA port down notifications which hold
6016 * ibdm_hl_mutex.
6017 */
6018 void
ibdm_saa_event_taskq(void * arg)6019 ibdm_saa_event_taskq(void *arg)
6020 {
6021 ibdm_saa_event_arg_t *event_arg;
6022 ibmf_saa_handle_t ibmf_saa_handle;
6023 ibmf_saa_subnet_event_t ibmf_saa_event;
6024 ibmf_saa_event_details_t *event_details;
6025 void *callback_arg;
6026
6027 ibdm_dp_gidinfo_t *gid_info;
6028 ibdm_port_attr_t *hca_port, *port = NULL;
6029 ibdm_hca_list_t *hca_list = NULL;
6030 int sa_handle_valid = 0;
6031 ibdm_ioc_info_t *ioc_list = NULL;
6032
6033 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*event_arg));
6034
6035 event_arg = (ibdm_saa_event_arg_t *)arg;
6036 ibmf_saa_handle = event_arg->ibmf_saa_handle;
6037 ibmf_saa_event = event_arg->ibmf_saa_event;
6038 event_details = &event_arg->event_details;
6039 callback_arg = event_arg->callback_arg;
6040
6041 ASSERT(callback_arg != NULL);
6042 ASSERT(ibmf_saa_event == IBMF_SAA_EVENT_GID_UNAVAILABLE);
6043 IBTF_DPRINTF_L4("ibdm", "\tsaa_event_taskq(%x, %x, %x, %x)",
6044 ibmf_saa_handle, ibmf_saa_event, event_details,
6045 callback_arg);
6046
6047 hca_port = (ibdm_port_attr_t *)callback_arg;
6048
6049 /* Check if the port_attr is still valid */
6050 mutex_enter(&ibdm.ibdm_hl_mutex);
6051 for (ibdm_get_next_port(&hca_list, &port, 0); port;
6052 ibdm_get_next_port(&hca_list, &port, 0)) {
6053 if (port == hca_port && port->pa_port_guid ==
6054 hca_port->pa_port_guid) {
6055 if (ibmf_saa_handle == hca_port->pa_sa_hdl)
6056 sa_handle_valid = 1;
6057 break;
6058 }
6059 }
6060 mutex_exit(&ibdm.ibdm_hl_mutex);
6061 if (sa_handle_valid == 0) {
6062 ibdm_free_saa_event_arg(event_arg);
6063 return;
6064 }
6065
6066 if (hca_port && (hca_port->pa_sa_hdl == NULL ||
6067 ibmf_saa_handle != hca_port->pa_sa_hdl)) {
6068 ibdm_free_saa_event_arg(event_arg);
6069 return;
6070 }
6071 hca_list = NULL;
6072 port = NULL;
6073
6074 /*
6075 * Check if the GID is visible to other HCA ports.
6076 * Return if so.
6077 */
6078 mutex_enter(&ibdm.ibdm_hl_mutex);
6079 for (ibdm_get_next_port(&hca_list, &port, 1); port;
6080 ibdm_get_next_port(&hca_list, &port, 1)) {
6081 if (ibdm_port_reachable(port->pa_sa_hdl,
6082 event_details->ie_gid.gid_guid) == B_TRUE) {
6083 mutex_exit(&ibdm.ibdm_hl_mutex);
6084 ibdm_free_saa_event_arg(event_arg);
6085 return;
6086 }
6087 }
6088 mutex_exit(&ibdm.ibdm_hl_mutex);
6089
6090 /*
6091 * Ensure no other probe / sweep fabric is in
6092 * progress.
6093 */
6094 mutex_enter(&ibdm.ibdm_mutex);
6095 while (ibdm.ibdm_busy & IBDM_BUSY)
6096 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex);
6097 ibdm.ibdm_busy |= IBDM_BUSY;
6098 mutex_exit(&ibdm.ibdm_mutex);
6099
6100 /*
6101 * If this GID is no longer in GID list, return
6102 * GID_UNAVAILABLE may be reported for multiple HCA
6103 * ports.
6104 */
6105 mutex_enter(&ibdm.ibdm_mutex);
6106 gid_info = ibdm.ibdm_dp_gidlist_head;
6107 while (gid_info) {
6108 if (gid_info->gl_portguid ==
6109 event_details->ie_gid.gid_guid) {
6110 break;
6111 }
6112 gid_info = gid_info->gl_next;
6113 }
6114 mutex_exit(&ibdm.ibdm_mutex);
6115 if (gid_info == NULL) {
6116 mutex_enter(&ibdm.ibdm_mutex);
6117 ibdm.ibdm_busy &= ~IBDM_BUSY;
6118 cv_broadcast(&ibdm.ibdm_busy_cv);
6119 mutex_exit(&ibdm.ibdm_mutex);
6120 ibdm_free_saa_event_arg(event_arg);
6121 return;
6122 }
6123
6124 IBTF_DPRINTF_L4("ibdm", "\tGID (prefix %x, guid %llx) "
6125 "Unavailable notification",
6126 event_details->ie_gid.gid_prefix,
6127 event_details->ie_gid.gid_guid);
6128
6129 /*
6130 * Update GID list in all IOCs affected by this
6131 */
6132 if (gid_info->gl_state == IBDM_GID_PROBING_SKIPPED ||
6133 gid_info->gl_state == IBDM_GID_PROBING_COMPLETE)
6134 ioc_list = ibdm_update_ioc_gidlist(gid_info, 0);
6135
6136 /*
6137 * Remove GID from the global GID list
6138 * Handle the case where all port GIDs for an
6139 * IOU have been hot-removed. Check both gid_info
6140 * & ioc_info for checking ngids.
6141 */
6142 mutex_enter(&ibdm.ibdm_mutex);
6143 if (gid_info->gl_iou != NULL && gid_info->gl_ngids == 0) {
6144 mutex_enter(&gid_info->gl_mutex);
6145 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou);
6146 mutex_exit(&gid_info->gl_mutex);
6147 }
6148 if (gid_info->gl_prev != NULL)
6149 gid_info->gl_prev->gl_next = gid_info->gl_next;
6150 if (gid_info->gl_next != NULL)
6151 gid_info->gl_next->gl_prev = gid_info->gl_prev;
6152
6153 if (gid_info == ibdm.ibdm_dp_gidlist_head)
6154 ibdm.ibdm_dp_gidlist_head = gid_info->gl_next;
6155 if (gid_info == ibdm.ibdm_dp_gidlist_tail)
6156 ibdm.ibdm_dp_gidlist_tail = gid_info->gl_prev;
6157 ibdm.ibdm_ngids--;
6158
6159 ibdm.ibdm_busy &= ~IBDM_BUSY;
6160 cv_broadcast(&ibdm.ibdm_busy_cv);
6161 mutex_exit(&ibdm.ibdm_mutex);
6162
6163 /* free the hca_list on this gid_info */
6164 ibdm_delete_glhca_list(gid_info);
6165
6166 mutex_destroy(&gid_info->gl_mutex);
6167 kmem_free(gid_info, sizeof (ibdm_dp_gidinfo_t));
6168
6169 /*
6170 * Pass on the IOCs with updated GIDs to IBnexus
6171 */
6172 if (ioc_list) {
6173 IBTF_DPRINTF_L4("ibdm", "\tGID_UNAVAILABLE "
6174 "IOC_PROP_UPDATE for %p\n", ioc_list);
6175 mutex_enter(&ibdm.ibdm_ibnex_mutex);
6176 if (ibdm.ibdm_ibnex_callback != NULL) {
6177 (*ibdm.ibdm_ibnex_callback)((void *)
6178 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE);
6179 }
6180 mutex_exit(&ibdm.ibdm_ibnex_mutex);
6181 }
6182
6183 ibdm_free_saa_event_arg(event_arg);
6184 }
6185
6186
6187 static int
ibdm_cmp_gid_list(ibdm_gid_t * new,ibdm_gid_t * prev)6188 ibdm_cmp_gid_list(ibdm_gid_t *new, ibdm_gid_t *prev)
6189 {
6190 ibdm_gid_t *scan_new, *scan_prev;
6191 int cmp_failed = 0;
6192
6193 ASSERT(new != NULL);
6194 ASSERT(prev != NULL);
6195
6196 /*
6197 * Search for each new gid anywhere in the prev GID list.
6198 * Note that the gid list could have been re-ordered.
6199 */
6200 for (scan_new = new; scan_new; scan_new = scan_new->gid_next) {
6201 for (scan_prev = prev, cmp_failed = 1; scan_prev;
6202 scan_prev = scan_prev->gid_next) {
6203 if (scan_prev->gid_dgid_hi == scan_new->gid_dgid_hi &&
6204 scan_prev->gid_dgid_lo == scan_new->gid_dgid_lo) {
6205 cmp_failed = 0;
6206 break;
6207 }
6208 }
6209
6210 if (cmp_failed)
6211 return (1);
6212 }
6213 return (0);
6214 }
6215
6216 /*
6217 * This is always called in a single thread
6218 * This function updates the gid_list and serv_list of IOC
6219 * The current gid_list is in ioc_info_t(contains only port
6220 * guids for which probe is done) & gidinfo_t(other port gids)
6221 * The gids in both locations are used for comparision.
6222 */
6223 static void
ibdm_reprobe_update_port_srv(ibdm_ioc_info_t * ioc,ibdm_dp_gidinfo_t * gidinfo)6224 ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *ioc, ibdm_dp_gidinfo_t *gidinfo)
6225 {
6226 ibdm_gid_t *cur_gid_list;
6227 uint_t cur_nportgids;
6228
6229 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex));
6230
6231 ioc->ioc_info_updated.ib_prop_updated = 0;
6232
6233
6234 /* Current GID list in gid_info only */
6235 cur_gid_list = gidinfo->gl_gid;
6236 cur_nportgids = gidinfo->gl_ngids;
6237
6238 if (ioc->ioc_prev_serv_cnt !=
6239 ioc->ioc_profile.ioc_service_entries ||
6240 ibdm_serv_cmp(&ioc->ioc_serv[0], &ioc->ioc_prev_serv[0],
6241 ioc->ioc_prev_serv_cnt))
6242 ioc->ioc_info_updated.ib_srv_prop_updated = 1;
6243
6244 if (ioc->ioc_prev_nportgids != cur_nportgids ||
6245 ioc->ioc_prev_gid_list == NULL || cur_gid_list == NULL) {
6246 ioc->ioc_info_updated.ib_gid_prop_updated = 1;
6247 } else if (ibdm_cmp_gid_list(ioc->ioc_prev_gid_list, cur_gid_list)) {
6248 ioc->ioc_info_updated.ib_gid_prop_updated = 1;
6249 }
6250
6251 /* Zero out previous entries */
6252 ibdm_free_gid_list(ioc->ioc_prev_gid_list);
6253 if (ioc->ioc_prev_serv)
6254 kmem_free(ioc->ioc_prev_serv, ioc->ioc_prev_serv_cnt *
6255 sizeof (ibdm_srvents_info_t));
6256 ioc->ioc_prev_serv_cnt = 0;
6257 ioc->ioc_prev_nportgids = 0;
6258 ioc->ioc_prev_serv = NULL;
6259 ioc->ioc_prev_gid_list = NULL;
6260 }
6261
6262 /*
6263 * Handle GID removal. This returns gid_info of an GID for the same
6264 * node GUID, if found. For an GID with IOU information, the same
6265 * gid_info is returned if no gid_info with same node_guid is found.
6266 */
6267 static ibdm_dp_gidinfo_t *
ibdm_handle_gid_rm(ibdm_dp_gidinfo_t * rm_gid)6268 ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *rm_gid)
6269 {
6270 ibdm_dp_gidinfo_t *gid_list;
6271
6272 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm(0x%p)", rm_gid);
6273
6274 if (rm_gid->gl_iou == NULL) {
6275 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm NO iou");
6276 /*
6277 * Search for a GID with same node_guid and
6278 * gl_iou != NULL
6279 */
6280 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list;
6281 gid_list = gid_list->gl_next) {
6282 if (gid_list->gl_iou != NULL && (gid_list->gl_nodeguid
6283 == rm_gid->gl_nodeguid))
6284 break;
6285 }
6286
6287 if (gid_list)
6288 ibdm_rmfrom_glgid_list(gid_list, rm_gid);
6289
6290 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm ret %p", gid_list);
6291 return (gid_list);
6292 } else {
6293 /*
6294 * Search for a GID with same node_guid and
6295 * gl_iou == NULL
6296 */
6297 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm with iou");
6298 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list;
6299 gid_list = gid_list->gl_next) {
6300 if (gid_list->gl_iou == NULL && (gid_list->gl_nodeguid
6301 == rm_gid->gl_nodeguid))
6302 break;
6303 }
6304
6305 if (gid_list) {
6306 /*
6307 * Copy the following fields from rm_gid :
6308 * 1. gl_state
6309 * 2. gl_iou
6310 * 3. gl_gid & gl_ngids
6311 *
6312 * Note : Function is synchronized by
6313 * ibdm_busy flag.
6314 *
6315 * Note : Redirect info is initialized if
6316 * any MADs for the GID fail
6317 */
6318 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm "
6319 "copying info to GID with gl_iou != NULl");
6320 gid_list->gl_state = rm_gid->gl_state;
6321 gid_list->gl_iou = rm_gid->gl_iou;
6322 gid_list->gl_gid = rm_gid->gl_gid;
6323 gid_list->gl_ngids = rm_gid->gl_ngids;
6324
6325 /* Remove the GID from gl_gid list */
6326 ibdm_rmfrom_glgid_list(gid_list, rm_gid);
6327 } else {
6328 /*
6329 * Handle a case where all GIDs to the IOU have
6330 * been removed.
6331 */
6332 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm 0 GID "
6333 "to IOU");
6334
6335 ibdm_rmfrom_glgid_list(rm_gid, rm_gid);
6336 return (rm_gid);
6337 }
6338 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm ret %p", gid_list);
6339 return (gid_list);
6340 }
6341 }
6342
6343 static void
ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t * gid_info,ibdm_dp_gidinfo_t * rm_gid)6344 ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *gid_info,
6345 ibdm_dp_gidinfo_t *rm_gid)
6346 {
6347 ibdm_gid_t *tmp, *prev;
6348
6349 IBTF_DPRINTF_L4("ibdm", "\trmfrom_glgid (%p, %p)",
6350 gid_info, rm_gid);
6351
6352 for (tmp = gid_info->gl_gid, prev = NULL; tmp; ) {
6353 if (tmp->gid_dgid_hi == rm_gid->gl_dgid_hi &&
6354 tmp->gid_dgid_lo == rm_gid->gl_dgid_lo) {
6355 if (prev == NULL)
6356 gid_info->gl_gid = tmp->gid_next;
6357 else
6358 prev->gid_next = tmp->gid_next;
6359
6360 kmem_free(tmp, sizeof (ibdm_gid_t));
6361 gid_info->gl_ngids--;
6362 break;
6363 } else {
6364 prev = tmp;
6365 tmp = tmp->gid_next;
6366 }
6367 }
6368 }
6369
6370 static void
ibdm_addto_gidlist(ibdm_gid_t ** src_ptr,ibdm_gid_t * dest)6371 ibdm_addto_gidlist(ibdm_gid_t **src_ptr, ibdm_gid_t *dest)
6372 {
6373 ibdm_gid_t *head = NULL, *new, *tail;
6374
6375 /* First copy the destination */
6376 for (; dest; dest = dest->gid_next) {
6377 new = kmem_zalloc(sizeof (ibdm_gid_t), KM_SLEEP);
6378 new->gid_dgid_hi = dest->gid_dgid_hi;
6379 new->gid_dgid_lo = dest->gid_dgid_lo;
6380 new->gid_next = head;
6381 head = new;
6382 }
6383
6384 /* Insert this to the source */
6385 if (*src_ptr == NULL)
6386 *src_ptr = head;
6387 else {
6388 for (tail = *src_ptr; tail->gid_next != NULL;
6389 tail = tail->gid_next)
6390 ;
6391
6392 tail->gid_next = head;
6393 }
6394 }
6395
6396 static void
ibdm_free_gid_list(ibdm_gid_t * head)6397 ibdm_free_gid_list(ibdm_gid_t *head)
6398 {
6399 ibdm_gid_t *delete;
6400
6401 for (delete = head; delete; ) {
6402 head = delete->gid_next;
6403 kmem_free(delete, sizeof (ibdm_gid_t));
6404 delete = head;
6405 }
6406 }
6407
6408 /*
6409 * This function rescans the DM capable GIDs (gl_state is
6410 * GID_PROBE_COMPLETE or IBDM_GID_PROBING_SKIPPED.This
6411 * basically checks if the DM capable GID is reachable. If
6412 * not this is handled the same way as GID_UNAVAILABLE,
6413 * except that notifications are not send to IBnexus.
6414 *
6415 * This function also initializes the ioc_prev_list for
6416 * a particular IOC (when called from probe_ioc, with
6417 * ioc_guidp != NULL) or all IOCs for the gid (called from
6418 * sweep_fabric, ioc_guidp == NULL).
6419 */
6420 static void
ibdm_rescan_gidlist(ib_guid_t * ioc_guidp)6421 ibdm_rescan_gidlist(ib_guid_t *ioc_guidp)
6422 {
6423 ibdm_dp_gidinfo_t *gid_info, *tmp;
6424 int ii, niocs, found;
6425 ibdm_hca_list_t *hca_list = NULL;
6426 ibdm_port_attr_t *port = NULL;
6427 ibdm_ioc_info_t *ioc_list;
6428
6429 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; ) {
6430 found = 0;
6431 if (gid_info->gl_state != IBDM_GID_PROBING_SKIPPED &&
6432 gid_info->gl_state != IBDM_GID_PROBING_COMPLETE) {
6433 gid_info = gid_info->gl_next;
6434 continue;
6435 }
6436
6437 /*
6438 * Check if the GID is visible to any HCA ports.
6439 * Return if so.
6440 */
6441 mutex_enter(&ibdm.ibdm_hl_mutex);
6442 for (ibdm_get_next_port(&hca_list, &port, 1); port;
6443 ibdm_get_next_port(&hca_list, &port, 1)) {
6444 if (ibdm_port_reachable(port->pa_sa_hdl,
6445 gid_info->gl_dgid_lo) == B_TRUE) {
6446 found = 1;
6447 break;
6448 }
6449 }
6450 mutex_exit(&ibdm.ibdm_hl_mutex);
6451
6452 if (found) {
6453 if (gid_info->gl_iou == NULL) {
6454 gid_info = gid_info->gl_next;
6455 continue;
6456 }
6457
6458 /* Intialize the ioc_prev_gid_list */
6459 niocs =
6460 gid_info->gl_iou->iou_info.iou_num_ctrl_slots;
6461 for (ii = 0; ii < niocs; ii++) {
6462 ioc_list = IBDM_GIDINFO2IOCINFO(gid_info, ii);
6463
6464 if (ioc_guidp == NULL || (*ioc_guidp ==
6465 ioc_list->ioc_profile.ioc_guid)) {
6466 /* Add info of GIDs in gid_info also */
6467 ibdm_addto_gidlist(
6468 &ioc_list->ioc_prev_gid_list,
6469 gid_info->gl_gid);
6470 ioc_list->ioc_prev_nportgids =
6471 gid_info->gl_ngids;
6472 }
6473 }
6474 gid_info = gid_info->gl_next;
6475 continue;
6476 }
6477
6478 IBTF_DPRINTF_L4("ibdm", "\trescan_gidlist "
6479 "deleted port GUID %llx",
6480 gid_info->gl_dgid_lo);
6481
6482 /*
6483 * Update GID list in all IOCs affected by this
6484 */
6485 ioc_list = ibdm_update_ioc_gidlist(gid_info, 0);
6486
6487 /*
6488 * Remove GID from the global GID list
6489 * Handle the case where all port GIDs for an
6490 * IOU have been hot-removed.
6491 */
6492 mutex_enter(&ibdm.ibdm_mutex);
6493 if (gid_info->gl_iou != NULL && gid_info->gl_ngids == 0) {
6494 mutex_enter(&gid_info->gl_mutex);
6495 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou);
6496 mutex_exit(&gid_info->gl_mutex);
6497 }
6498
6499 tmp = gid_info->gl_next;
6500 if (gid_info->gl_prev != NULL)
6501 gid_info->gl_prev->gl_next = gid_info->gl_next;
6502 if (gid_info->gl_next != NULL)
6503 gid_info->gl_next->gl_prev = gid_info->gl_prev;
6504
6505 if (gid_info == ibdm.ibdm_dp_gidlist_head)
6506 ibdm.ibdm_dp_gidlist_head = gid_info->gl_next;
6507 if (gid_info == ibdm.ibdm_dp_gidlist_tail)
6508 ibdm.ibdm_dp_gidlist_tail = gid_info->gl_prev;
6509 ibdm.ibdm_ngids--;
6510 mutex_exit(&ibdm.ibdm_mutex);
6511
6512 /* free the hca_list on this gid_info */
6513 ibdm_delete_glhca_list(gid_info);
6514
6515 mutex_destroy(&gid_info->gl_mutex);
6516 kmem_free(gid_info, sizeof (ibdm_dp_gidinfo_t));
6517
6518 gid_info = tmp;
6519
6520 /*
6521 * Pass on the IOCs with updated GIDs to IBnexus
6522 */
6523 if (ioc_list) {
6524 IBTF_DPRINTF_L4("ibdm", "\trescan_gidlist "
6525 "IOC_PROP_UPDATE for %p\n", ioc_list);
6526 mutex_enter(&ibdm.ibdm_ibnex_mutex);
6527 if (ibdm.ibdm_ibnex_callback != NULL) {
6528 (*ibdm.ibdm_ibnex_callback)((void *)
6529 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE);
6530 }
6531 mutex_exit(&ibdm.ibdm_ibnex_mutex);
6532 }
6533 }
6534 }
6535
6536 /*
6537 * This function notifies IBnex of IOCs on this GID.
6538 * Notification is for GIDs with gl_reprobe_flag set.
6539 * The flag is set when IOC probe / fabric sweep
6540 * probes a GID starting from CLASS port info.
6541 *
6542 * IBnexus will have information of a reconnected IOC
6543 * if it had probed it before. If this is a new IOC,
6544 * IBnexus ignores the notification.
6545 *
6546 * This function should be called with no locks held.
6547 */
6548 static void
ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t * gid_info)6549 ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *gid_info)
6550 {
6551 ibdm_ioc_info_t *ioc_list;
6552
6553 if (gid_info->gl_reprobe_flag == 0 ||
6554 gid_info->gl_iou == NULL)
6555 return;
6556
6557 ioc_list = ibdm_update_ioc_gidlist(gid_info, -1);
6558
6559 /*
6560 * Pass on the IOCs with updated GIDs to IBnexus
6561 */
6562 if (ioc_list) {
6563 mutex_enter(&ibdm.ibdm_ibnex_mutex);
6564 if (ibdm.ibdm_ibnex_callback != NULL) {
6565 (*ibdm.ibdm_ibnex_callback)((void *)ioc_list,
6566 IBDM_EVENT_IOC_PROP_UPDATE);
6567 }
6568 mutex_exit(&ibdm.ibdm_ibnex_mutex);
6569 }
6570 }
6571
6572
6573 static void
ibdm_free_saa_event_arg(ibdm_saa_event_arg_t * arg)6574 ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *arg)
6575 {
6576 if (arg != NULL)
6577 kmem_free(arg, sizeof (ibdm_saa_event_arg_t));
6578 }
6579
6580 /*
6581 * This function parses the list of HCAs and HCA ports
6582 * to return the port_attr of the next HCA port. A port
6583 * connected to IB fabric (port_state active) is returned,
6584 * if connected_flag is set.
6585 */
6586 static void
ibdm_get_next_port(ibdm_hca_list_t ** inp_hcap,ibdm_port_attr_t ** inp_portp,int connect_flag)6587 ibdm_get_next_port(ibdm_hca_list_t **inp_hcap,
6588 ibdm_port_attr_t **inp_portp, int connect_flag)
6589 {
6590 int ii;
6591 ibdm_port_attr_t *port, *next_port = NULL;
6592 ibdm_port_attr_t *inp_port;
6593 ibdm_hca_list_t *hca_list;
6594 int found = 0;
6595
6596 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex));
6597 IBTF_DPRINTF_L4(ibdm_string, "\tget_next_port(%p, %p, %x)",
6598 inp_hcap, inp_portp, connect_flag);
6599
6600 hca_list = *inp_hcap;
6601 inp_port = *inp_portp;
6602
6603 if (hca_list == NULL)
6604 hca_list = ibdm.ibdm_hca_list_head;
6605
6606 for (; hca_list; hca_list = hca_list->hl_next) {
6607 for (ii = 0; ii < hca_list->hl_nports; ii++) {
6608 port = &hca_list->hl_port_attr[ii];
6609
6610 /*
6611 * inp_port != NULL;
6612 * Skip till we find the matching port
6613 */
6614 if (inp_port && !found) {
6615 if (inp_port == port)
6616 found = 1;
6617 continue;
6618 }
6619
6620 if (!connect_flag) {
6621 next_port = port;
6622 break;
6623 }
6624
6625 if (port->pa_sa_hdl == NULL)
6626 ibdm_initialize_port(port);
6627 if (port->pa_sa_hdl == NULL)
6628 (void) ibdm_fini_port(port);
6629 else if (next_port == NULL &&
6630 port->pa_sa_hdl != NULL &&
6631 port->pa_state == IBT_PORT_ACTIVE) {
6632 next_port = port;
6633 break;
6634 }
6635 }
6636
6637 if (next_port)
6638 break;
6639 }
6640
6641 IBTF_DPRINTF_L4(ibdm_string, "\tget_next_port : "
6642 "returns hca_list %p port %p", hca_list, next_port);
6643 *inp_hcap = hca_list;
6644 *inp_portp = next_port;
6645 }
6646
6647 static void
ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t * nodegid,ibdm_dp_gidinfo_t * addgid)6648 ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *nodegid, ibdm_dp_gidinfo_t *addgid)
6649 {
6650 ibdm_gid_t *tmp;
6651
6652 tmp = kmem_zalloc(sizeof (ibdm_gid_t), KM_SLEEP);
6653 tmp->gid_dgid_hi = addgid->gl_dgid_hi;
6654 tmp->gid_dgid_lo = addgid->gl_dgid_lo;
6655
6656 mutex_enter(&nodegid->gl_mutex);
6657 tmp->gid_next = nodegid->gl_gid;
6658 nodegid->gl_gid = tmp;
6659 nodegid->gl_ngids++;
6660 mutex_exit(&nodegid->gl_mutex);
6661 }
6662
6663 static void
ibdm_addto_glhcalist(ibdm_dp_gidinfo_t * gid_info,ibdm_hca_list_t * hca)6664 ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *gid_info,
6665 ibdm_hca_list_t *hca)
6666 {
6667 ibdm_hca_list_t *head, *prev = NULL, *temp;
6668
6669 IBTF_DPRINTF_L4(ibdm_string, "\taddto_glhcalist(%p, %p) "
6670 ": gl_hca_list %p", gid_info, hca, gid_info->gl_hca_list);
6671 ASSERT(!MUTEX_HELD(&gid_info->gl_mutex));
6672
6673 mutex_enter(&gid_info->gl_mutex);
6674 head = gid_info->gl_hca_list;
6675 if (head == NULL) {
6676 head = ibdm_dup_hca_attr(hca);
6677 head->hl_next = NULL;
6678 gid_info->gl_hca_list = head;
6679 mutex_exit(&gid_info->gl_mutex);
6680 IBTF_DPRINTF_L4(ibdm_string, "\tadd_to_glhcalist: "
6681 "gid %p, gl_hca_list %p", gid_info,
6682 gid_info->gl_hca_list);
6683 return;
6684 }
6685
6686 /* Check if already in the list */
6687 while (head) {
6688 if (head->hl_hca_guid == hca->hl_hca_guid) {
6689 mutex_exit(&gid_info->gl_mutex);
6690 IBTF_DPRINTF_L4(ibdm_string,
6691 "\taddto_glhcalist : gid %p hca %p dup",
6692 gid_info, hca);
6693 return;
6694 }
6695 prev = head;
6696 head = head->hl_next;
6697 }
6698
6699 /* Add this HCA to gl_hca_list */
6700 temp = ibdm_dup_hca_attr(hca);
6701 temp->hl_next = NULL;
6702 prev->hl_next = temp;
6703 mutex_exit(&gid_info->gl_mutex);
6704
6705 IBTF_DPRINTF_L4(ibdm_string, "\tadd_to_glhcalist: "
6706 "gid %p, gl_hca_list %p", gid_info, gid_info->gl_hca_list);
6707 }
6708
6709 static void
ibdm_delete_glhca_list(ibdm_dp_gidinfo_t * gid_info)6710 ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *gid_info)
6711 {
6712 ASSERT(!MUTEX_HELD(&gid_info->gl_mutex));
6713 ASSERT(!MUTEX_HELD(&ibdm.ibdm_mutex));
6714
6715 mutex_enter(&gid_info->gl_mutex);
6716 if (gid_info->gl_hca_list)
6717 ibdm_ibnex_free_hca_list(gid_info->gl_hca_list);
6718 gid_info->gl_hca_list = NULL;
6719 mutex_exit(&gid_info->gl_mutex);
6720 }
6721
6722
6723 static void
ibdm_reset_all_dgids(ibmf_saa_handle_t port_sa_hdl)6724 ibdm_reset_all_dgids(ibmf_saa_handle_t port_sa_hdl)
6725 {
6726 IBTF_DPRINTF_L4(ibdm_string, "\treset_all_dgids(%X)",
6727 port_sa_hdl);
6728
6729 if (ibdm_enumerate_iocs == 0)
6730 return;
6731
6732 ASSERT(!MUTEX_HELD(&ibdm.ibdm_mutex));
6733 ASSERT(!MUTEX_HELD(&ibdm.ibdm_hl_mutex));
6734
6735 /* Check : Not busy in another probe / sweep */
6736 mutex_enter(&ibdm.ibdm_mutex);
6737 if ((ibdm.ibdm_busy & IBDM_BUSY) == 0) {
6738 ibdm_dp_gidinfo_t *gid_info;
6739
6740 ibdm.ibdm_busy |= IBDM_BUSY;
6741 mutex_exit(&ibdm.ibdm_mutex);
6742
6743 /*
6744 * Check if any GID is using the SA & IBMF handle
6745 * of HCA port going down. Reset ibdm_dp_gidinfo_t
6746 * using another HCA port which can reach the GID.
6747 * This is for DM capable GIDs only, no need to do
6748 * this for others
6749 *
6750 * Delete the GID if no alternate HCA port to reach
6751 * it is found.
6752 */
6753 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; ) {
6754 ibdm_dp_gidinfo_t *tmp;
6755
6756 IBTF_DPRINTF_L4(ibdm_string, "\tevent_hdlr "
6757 "checking gidinfo %p", gid_info);
6758
6759 if (gid_info->gl_sa_hdl == port_sa_hdl) {
6760 IBTF_DPRINTF_L3(ibdm_string,
6761 "\tevent_hdlr: down HCA port hdl "
6762 "matches gid %p", gid_info);
6763
6764 /*
6765 * The non-DM GIDs can come back
6766 * with a new subnet prefix, when
6767 * the HCA port commes up again. To
6768 * avoid issues, delete non-DM
6769 * capable GIDs, if the gid was
6770 * discovered using the HCA port
6771 * going down. This is ensured by
6772 * setting gl_disconnected to 1.
6773 */
6774 if (gid_info->gl_is_dm_capable == B_FALSE)
6775 gid_info->gl_disconnected = 1;
6776 else
6777 ibdm_reset_gidinfo(gid_info);
6778
6779 if (gid_info->gl_disconnected) {
6780 IBTF_DPRINTF_L3(ibdm_string,
6781 "\tevent_hdlr: deleting"
6782 " gid %p", gid_info);
6783 tmp = gid_info;
6784 gid_info = gid_info->gl_next;
6785 ibdm_delete_gidinfo(tmp);
6786 } else
6787 gid_info = gid_info->gl_next;
6788 } else
6789 gid_info = gid_info->gl_next;
6790 }
6791
6792 mutex_enter(&ibdm.ibdm_mutex);
6793 ibdm.ibdm_busy &= ~IBDM_BUSY;
6794 cv_signal(&ibdm.ibdm_busy_cv);
6795 }
6796 mutex_exit(&ibdm.ibdm_mutex);
6797 }
6798
6799 static void
ibdm_reset_gidinfo(ibdm_dp_gidinfo_t * gidinfo)6800 ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *gidinfo)
6801 {
6802 ibdm_hca_list_t *hca_list = NULL;
6803 ibdm_port_attr_t *port = NULL;
6804 int gid_reinited = 0;
6805 sa_node_record_t *nr, *tmp;
6806 sa_portinfo_record_t *pi;
6807 size_t nr_len = 0, pi_len = 0;
6808 size_t path_len;
6809 ib_gid_t sgid, dgid;
6810 int ret, ii, nrecords;
6811 sa_path_record_t *path;
6812 uint8_t npaths = 1;
6813 ibdm_pkey_tbl_t *pkey_tbl;
6814
6815 IBTF_DPRINTF_L4(ibdm_string, "\treset_gidinfo(%p)", gidinfo);
6816
6817 /*
6818 * Get list of all the ports reachable from the local known HCA
6819 * ports which are active
6820 */
6821 mutex_enter(&ibdm.ibdm_hl_mutex);
6822 for (ibdm_get_next_port(&hca_list, &port, 1); port;
6823 ibdm_get_next_port(&hca_list, &port, 1)) {
6824
6825
6826 /*
6827 * Get the path and re-populate the gidinfo.
6828 * Getting the path is the same probe_ioc
6829 * Init the gid info as in ibdm_create_gidinfo()
6830 */
6831 nr = ibdm_get_node_records(port->pa_sa_hdl, &nr_len,
6832 gidinfo->gl_nodeguid);
6833 if (nr == NULL) {
6834 IBTF_DPRINTF_L4(ibdm_string,
6835 "\treset_gidinfo : no records");
6836 continue;
6837 }
6838
6839 nrecords = (nr_len / sizeof (sa_node_record_t));
6840 for (tmp = nr, ii = 0; (ii < nrecords); ii++, tmp++) {
6841 if (tmp->NodeInfo.PortGUID == gidinfo->gl_portguid)
6842 break;
6843 }
6844
6845 if (ii == nrecords) {
6846 IBTF_DPRINTF_L4(ibdm_string,
6847 "\treset_gidinfo : no record for portguid");
6848 kmem_free(nr, nr_len);
6849 continue;
6850 }
6851
6852 pi = ibdm_get_portinfo(port->pa_sa_hdl, &pi_len, tmp->LID);
6853 if (pi == NULL) {
6854 IBTF_DPRINTF_L4(ibdm_string,
6855 "\treset_gidinfo : no portinfo");
6856 kmem_free(nr, nr_len);
6857 continue;
6858 }
6859
6860 sgid.gid_prefix = port->pa_sn_prefix;
6861 sgid.gid_guid = port->pa_port_guid;
6862 dgid.gid_prefix = pi->PortInfo.GidPrefix;
6863 dgid.gid_guid = tmp->NodeInfo.PortGUID;
6864
6865 ret = ibmf_saa_gid_to_pathrecords(port->pa_sa_hdl, sgid, dgid,
6866 IBMF_SAA_PKEY_WC, 0, B_TRUE, &npaths, 0, &path_len, &path);
6867
6868 if ((ret != IBMF_SUCCESS) || path == NULL) {
6869 IBTF_DPRINTF_L4(ibdm_string,
6870 "\treset_gidinfo : no paths");
6871 kmem_free(pi, pi_len);
6872 kmem_free(nr, nr_len);
6873 continue;
6874 }
6875
6876 gidinfo->gl_dgid_hi = path->DGID.gid_prefix;
6877 gidinfo->gl_dgid_lo = path->DGID.gid_guid;
6878 gidinfo->gl_sgid_hi = path->SGID.gid_prefix;
6879 gidinfo->gl_sgid_lo = path->SGID.gid_guid;
6880 gidinfo->gl_p_key = path->P_Key;
6881 gidinfo->gl_sa_hdl = port->pa_sa_hdl;
6882 gidinfo->gl_ibmf_hdl = port->pa_ibmf_hdl;
6883 gidinfo->gl_slid = path->SLID;
6884 gidinfo->gl_dlid = path->DLID;
6885 /* Reset redirect info, next MAD will set if redirected */
6886 gidinfo->gl_redirected = 0;
6887 gidinfo->gl_devid = (*tmp).NodeInfo.DeviceID;
6888 gidinfo->gl_SL = path->SL;
6889
6890 gidinfo->gl_qp_hdl = IBMF_QP_HANDLE_DEFAULT;
6891 for (ii = 0; ii < port->pa_npkeys; ii++) {
6892 if (port->pa_pkey_tbl == NULL)
6893 break;
6894
6895 pkey_tbl = &port->pa_pkey_tbl[ii];
6896 if ((gidinfo->gl_p_key == pkey_tbl->pt_pkey) &&
6897 (pkey_tbl->pt_qp_hdl != NULL)) {
6898 gidinfo->gl_qp_hdl = pkey_tbl->pt_qp_hdl;
6899 break;
6900 }
6901 }
6902
6903 if (gidinfo->gl_qp_hdl == NULL)
6904 IBTF_DPRINTF_L2(ibdm_string,
6905 "\treset_gid_info: No matching Pkey");
6906 else
6907 gid_reinited = 1;
6908
6909 kmem_free(path, path_len);
6910 kmem_free(pi, pi_len);
6911 kmem_free(nr, nr_len);
6912 break;
6913 }
6914 mutex_exit(&ibdm.ibdm_hl_mutex);
6915
6916 if (!gid_reinited)
6917 gidinfo->gl_disconnected = 1;
6918 }
6919
6920 static void
ibdm_delete_gidinfo(ibdm_dp_gidinfo_t * gidinfo)6921 ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *gidinfo)
6922 {
6923 ibdm_ioc_info_t *ioc_list;
6924 int in_gidlist = 0;
6925
6926 /*
6927 * Check if gidinfo has been inserted into the
6928 * ibdm_dp_gidlist_head list. gl_next or gl_prev
6929 * != NULL, if gidinfo is the list.
6930 */
6931 if (gidinfo->gl_prev != NULL ||
6932 gidinfo->gl_next != NULL ||
6933 ibdm.ibdm_dp_gidlist_head == gidinfo)
6934 in_gidlist = 1;
6935
6936 ioc_list = ibdm_update_ioc_gidlist(gidinfo, 0);
6937
6938 /*
6939 * Remove GID from the global GID list
6940 * Handle the case where all port GIDs for an
6941 * IOU have been hot-removed.
6942 */
6943 mutex_enter(&ibdm.ibdm_mutex);
6944 if (gidinfo->gl_iou != NULL && gidinfo->gl_ngids == 0) {
6945 mutex_enter(&gidinfo->gl_mutex);
6946 (void) ibdm_free_iou_info(gidinfo, &gidinfo->gl_iou);
6947 mutex_exit(&gidinfo->gl_mutex);
6948 }
6949
6950 /* Delete gl_hca_list */
6951 mutex_exit(&ibdm.ibdm_mutex);
6952 ibdm_delete_glhca_list(gidinfo);
6953 mutex_enter(&ibdm.ibdm_mutex);
6954
6955 if (in_gidlist) {
6956 if (gidinfo->gl_prev != NULL)
6957 gidinfo->gl_prev->gl_next = gidinfo->gl_next;
6958 if (gidinfo->gl_next != NULL)
6959 gidinfo->gl_next->gl_prev = gidinfo->gl_prev;
6960
6961 if (gidinfo == ibdm.ibdm_dp_gidlist_head)
6962 ibdm.ibdm_dp_gidlist_head = gidinfo->gl_next;
6963 if (gidinfo == ibdm.ibdm_dp_gidlist_tail)
6964 ibdm.ibdm_dp_gidlist_tail = gidinfo->gl_prev;
6965 ibdm.ibdm_ngids--;
6966 }
6967 mutex_exit(&ibdm.ibdm_mutex);
6968
6969 mutex_destroy(&gidinfo->gl_mutex);
6970 cv_destroy(&gidinfo->gl_probe_cv);
6971 kmem_free(gidinfo, sizeof (ibdm_dp_gidinfo_t));
6972
6973 /*
6974 * Pass on the IOCs with updated GIDs to IBnexus
6975 */
6976 if (ioc_list) {
6977 IBTF_DPRINTF_L4("ibdm", "\tdelete_gidinfo "
6978 "IOC_PROP_UPDATE for %p\n", ioc_list);
6979 mutex_enter(&ibdm.ibdm_ibnex_mutex);
6980 if (ibdm.ibdm_ibnex_callback != NULL) {
6981 (*ibdm.ibdm_ibnex_callback)((void *)
6982 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE);
6983 }
6984 mutex_exit(&ibdm.ibdm_ibnex_mutex);
6985 }
6986 }
6987
6988
6989 static void
ibdm_fill_srv_attr_mod(ib_mad_hdr_t * hdr,ibdm_timeout_cb_args_t * cb_args)6990 ibdm_fill_srv_attr_mod(ib_mad_hdr_t *hdr, ibdm_timeout_cb_args_t *cb_args)
6991 {
6992 uint32_t attr_mod;
6993
6994 attr_mod = (cb_args->cb_ioc_num + 1) << 16;
6995 attr_mod |= cb_args->cb_srvents_start;
6996 attr_mod |= (cb_args->cb_srvents_end) << 8;
6997 hdr->AttributeModifier = h2b32(attr_mod);
6998 }
6999
7000 static void
ibdm_bump_transactionID(ibdm_dp_gidinfo_t * gid_info)7001 ibdm_bump_transactionID(ibdm_dp_gidinfo_t *gid_info)
7002 {
7003 ASSERT(MUTEX_HELD(&gid_info->gl_mutex));
7004 gid_info->gl_transactionID++;
7005 if (gid_info->gl_transactionID == gid_info->gl_max_transactionID) {
7006 IBTF_DPRINTF_L4(ibdm_string,
7007 "\tbump_transactionID(%p), wrapup", gid_info);
7008 gid_info->gl_transactionID = gid_info->gl_min_transactionID;
7009 }
7010 }
7011
7012 /*
7013 * gl_prev_iou is set for *non-reprobe* sweeep requests, which
7014 * detected that ChangeID in IOU info has changed. The service
7015 * entry also may have changed. Check if service entry in IOC
7016 * has changed wrt the prev iou, if so notify to IB Nexus.
7017 */
7018 static ibdm_ioc_info_t *
ibdm_handle_prev_iou()7019 ibdm_handle_prev_iou()
7020 {
7021 ibdm_dp_gidinfo_t *gid_info;
7022 ibdm_ioc_info_t *ioc_list_head = NULL, *ioc_list;
7023 ibdm_ioc_info_t *prev_ioc, *ioc;
7024 int ii, jj, niocs, prev_niocs;
7025
7026 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex));
7027
7028 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iou enter");
7029 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info;
7030 gid_info = gid_info->gl_next) {
7031 if (gid_info->gl_prev_iou == NULL)
7032 continue;
7033
7034 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iou gid %p",
7035 gid_info);
7036 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots;
7037 prev_niocs =
7038 gid_info->gl_prev_iou->iou_info.iou_num_ctrl_slots;
7039 for (ii = 0; ii < niocs; ii++) {
7040 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii);
7041
7042 /* Find matching IOC */
7043 for (jj = 0; jj < prev_niocs; jj++) {
7044 prev_ioc = (ibdm_ioc_info_t *)
7045 &gid_info->gl_prev_iou->iou_ioc_info[jj];
7046 if (prev_ioc->ioc_profile.ioc_guid ==
7047 ioc->ioc_profile.ioc_guid)
7048 break;
7049 }
7050 if (jj == prev_niocs)
7051 prev_ioc = NULL;
7052 if (ioc == NULL || prev_ioc == NULL)
7053 continue;
7054 if ((ioc->ioc_profile.ioc_service_entries !=
7055 prev_ioc->ioc_profile.ioc_service_entries) ||
7056 ibdm_serv_cmp(&ioc->ioc_serv[0],
7057 &prev_ioc->ioc_serv[0],
7058 ioc->ioc_profile.ioc_service_entries) != 0) {
7059 IBTF_DPRINTF_L4(ibdm_string,
7060 "/thandle_prev_iou modified IOC: "
7061 "current ioc %p, old ioc %p",
7062 ioc, prev_ioc);
7063 mutex_enter(&gid_info->gl_mutex);
7064 ioc_list = ibdm_dup_ioc_info(ioc, gid_info);
7065 mutex_exit(&gid_info->gl_mutex);
7066 ioc_list->ioc_info_updated.ib_prop_updated
7067 = 0;
7068 ioc_list->ioc_info_updated.ib_srv_prop_updated
7069 = 1;
7070
7071 if (ioc_list_head == NULL)
7072 ioc_list_head = ioc_list;
7073 else {
7074 ioc_list_head->ioc_next = ioc_list;
7075 ioc_list_head = ioc_list;
7076 }
7077 }
7078 }
7079
7080 mutex_enter(&gid_info->gl_mutex);
7081 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_prev_iou);
7082 mutex_exit(&gid_info->gl_mutex);
7083 }
7084 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iouret %p",
7085 ioc_list_head);
7086 return (ioc_list_head);
7087 }
7088
7089 /*
7090 * Compares two service entries lists, returns 0 if same, returns 1
7091 * if no match.
7092 */
7093 static int
ibdm_serv_cmp(ibdm_srvents_info_t * serv1,ibdm_srvents_info_t * serv2,int nserv)7094 ibdm_serv_cmp(ibdm_srvents_info_t *serv1, ibdm_srvents_info_t *serv2,
7095 int nserv)
7096 {
7097 int ii;
7098
7099 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: enter");
7100 for (ii = 0; ii < nserv; ii++, serv1++, serv2++) {
7101 if (serv1->se_attr.srv_id != serv2->se_attr.srv_id ||
7102 bcmp(serv1->se_attr.srv_name,
7103 serv2->se_attr.srv_name,
7104 IB_DM_MAX_SVC_NAME_LEN) != 0) {
7105 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: ret 1");
7106 return (1);
7107 }
7108 }
7109 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: ret 0");
7110 return (0);
7111 }
7112
7113 /* For debugging purpose only */
7114 #ifdef DEBUG
7115 void
ibdm_dump_mad_hdr(ib_mad_hdr_t * mad_hdr)7116 ibdm_dump_mad_hdr(ib_mad_hdr_t *mad_hdr)
7117 {
7118 IBTF_DPRINTF_L4("ibdm", "\t\t MAD Header info");
7119 IBTF_DPRINTF_L4("ibdm", "\t\t ---------------");
7120
7121 IBTF_DPRINTF_L4("ibdm", "\tBase version : 0x%x"
7122 "\tMgmt Class : 0x%x", mad_hdr->BaseVersion, mad_hdr->MgmtClass);
7123 IBTF_DPRINTF_L4("ibdm", "\tClass version : 0x%x"
7124 "\tR Method : 0x%x",
7125 mad_hdr->ClassVersion, mad_hdr->R_Method);
7126 IBTF_DPRINTF_L4("ibdm", "\tMAD Status : 0x%x"
7127 "\tTransaction ID : 0x%llx",
7128 b2h16(mad_hdr->Status), b2h64(mad_hdr->TransactionID));
7129 IBTF_DPRINTF_L4("ibdm", "\t Attribute ID : 0x%x"
7130 "\tAttribute Modified : 0x%lx",
7131 b2h16(mad_hdr->AttributeID), b2h32(mad_hdr->AttributeModifier));
7132 }
7133
7134
7135 void
ibdm_dump_ibmf_msg(ibmf_msg_t * ibmf_msg,int flag)7136 ibdm_dump_ibmf_msg(ibmf_msg_t *ibmf_msg, int flag)
7137 {
7138 ib_mad_hdr_t *mad_hdr;
7139
7140 IBTF_DPRINTF_L4("ibdm", "\t\t(IBMF_PKT): Local address info");
7141 IBTF_DPRINTF_L4("ibdm", "\t\t ------------------");
7142
7143 IBTF_DPRINTF_L4("ibdm", "\tLocal Lid : 0x%x\tRemote Lid : 0x%x"
7144 " Remote Qp : 0x%x", ibmf_msg->im_local_addr.ia_local_lid,
7145 ibmf_msg->im_local_addr.ia_remote_lid,
7146 ibmf_msg->im_local_addr.ia_remote_qno);
7147 IBTF_DPRINTF_L4("ibdm", "\tP_key : 0x%x\tQ_key : 0x%x"
7148 " SL : 0x%x", ibmf_msg->im_local_addr.ia_p_key,
7149 ibmf_msg->im_local_addr.ia_q_key,
7150 ibmf_msg->im_local_addr.ia_service_level);
7151
7152 if (flag)
7153 mad_hdr = (ib_mad_hdr_t *)IBDM_OUT_IBMFMSG_MADHDR(ibmf_msg);
7154 else
7155 mad_hdr = IBDM_IN_IBMFMSG_MADHDR(ibmf_msg);
7156
7157 ibdm_dump_mad_hdr(mad_hdr);
7158 }
7159
7160
7161 void
ibdm_dump_path_info(sa_path_record_t * path)7162 ibdm_dump_path_info(sa_path_record_t *path)
7163 {
7164 IBTF_DPRINTF_L4("ibdm", "\t\t Path information");
7165 IBTF_DPRINTF_L4("ibdm", "\t\t ----------------");
7166
7167 IBTF_DPRINTF_L4("ibdm", "\t DGID hi : %llx\tDGID lo : %llx",
7168 path->DGID.gid_prefix, path->DGID.gid_guid);
7169 IBTF_DPRINTF_L4("ibdm", "\t SGID hi : %llx\tSGID lo : %llx",
7170 path->SGID.gid_prefix, path->SGID.gid_guid);
7171 IBTF_DPRINTF_L4("ibdm", "\t SLID : %x\t\tDlID : %x",
7172 path->SLID, path->DLID);
7173 IBTF_DPRINTF_L4("ibdm", "\t P Key : %x\t\tSL : %x",
7174 path->P_Key, path->SL);
7175 }
7176
7177
7178 void
ibdm_dump_classportinfo(ib_mad_classportinfo_t * classportinfo)7179 ibdm_dump_classportinfo(ib_mad_classportinfo_t *classportinfo)
7180 {
7181 IBTF_DPRINTF_L4("ibdm", "\t\t CLASSPORT INFO");
7182 IBTF_DPRINTF_L4("ibdm", "\t\t --------------");
7183
7184 IBTF_DPRINTF_L4("ibdm", "\t Response Time Value : 0x%x",
7185 ((b2h32(classportinfo->RespTimeValue)) & 0x1F));
7186
7187 IBTF_DPRINTF_L4("ibdm", "\t Redirected GID hi : 0x%llx",
7188 b2h64(classportinfo->RedirectGID_hi));
7189 IBTF_DPRINTF_L4("ibdm", "\t Redirected GID lo : 0x%llx",
7190 b2h64(classportinfo->RedirectGID_lo));
7191 IBTF_DPRINTF_L4("ibdm", "\t Redirected TC : 0x%x",
7192 classportinfo->RedirectTC);
7193 IBTF_DPRINTF_L4("ibdm", "\t Redirected SL : 0x%x",
7194 classportinfo->RedirectSL);
7195 IBTF_DPRINTF_L4("ibdm", "\t Redirected FL : 0x%x",
7196 classportinfo->RedirectFL);
7197 IBTF_DPRINTF_L4("ibdm", "\t Redirected LID : 0x%x",
7198 b2h16(classportinfo->RedirectLID));
7199 IBTF_DPRINTF_L4("ibdm", "\t Redirected P KEY : 0x%x",
7200 b2h16(classportinfo->RedirectP_Key));
7201 IBTF_DPRINTF_L4("ibdm", "\t Redirected QP : 0x%x",
7202 classportinfo->RedirectQP);
7203 IBTF_DPRINTF_L4("ibdm", "\t Redirected Q KEY : 0x%x",
7204 b2h32(classportinfo->RedirectQ_Key));
7205 IBTF_DPRINTF_L4("ibdm", "\t Trap GID hi : 0x%llx",
7206 b2h64(classportinfo->TrapGID_hi));
7207 IBTF_DPRINTF_L4("ibdm", "\t Trap GID lo : 0x%llx",
7208 b2h64(classportinfo->TrapGID_lo));
7209 IBTF_DPRINTF_L4("ibdm", "\t Trap TC : 0x%x",
7210 classportinfo->TrapTC);
7211 IBTF_DPRINTF_L4("ibdm", "\t Trap SL : 0x%x",
7212 classportinfo->TrapSL);
7213 IBTF_DPRINTF_L4("ibdm", "\t Trap FL : 0x%x",
7214 classportinfo->TrapFL);
7215 IBTF_DPRINTF_L4("ibdm", "\t Trap LID : 0x%x",
7216 b2h16(classportinfo->TrapLID));
7217 IBTF_DPRINTF_L4("ibdm", "\t Trap P_Key : 0x%x",
7218 b2h16(classportinfo->TrapP_Key));
7219 IBTF_DPRINTF_L4("ibdm", "\t Trap HL : 0x%x",
7220 classportinfo->TrapHL);
7221 IBTF_DPRINTF_L4("ibdm", "\t Trap QP : 0x%x",
7222 classportinfo->TrapQP);
7223 IBTF_DPRINTF_L4("ibdm", "\t Trap Q_Key : 0x%x",
7224 b2h32(classportinfo->TrapQ_Key));
7225 }
7226
7227
7228 void
ibdm_dump_iounitinfo(ib_dm_io_unitinfo_t * iou_info)7229 ibdm_dump_iounitinfo(ib_dm_io_unitinfo_t *iou_info)
7230 {
7231 IBTF_DPRINTF_L4("ibdm", "\t\t I/O UnitInfo");
7232 IBTF_DPRINTF_L4("ibdm", "\t\t ------------");
7233
7234 IBTF_DPRINTF_L4("ibdm", "\tChange ID : 0x%x",
7235 b2h16(iou_info->iou_changeid));
7236 IBTF_DPRINTF_L4("ibdm", "\t#of ctrl slots : %d",
7237 iou_info->iou_num_ctrl_slots);
7238 IBTF_DPRINTF_L4("ibdm", "\tIOU flag : 0x%x",
7239 iou_info->iou_flag);
7240 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 0 : 0x%x",
7241 iou_info->iou_ctrl_list[0]);
7242 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 1 : 0x%x",
7243 iou_info->iou_ctrl_list[1]);
7244 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 2 : 0x%x",
7245 iou_info->iou_ctrl_list[2]);
7246 }
7247
7248
7249 void
ibdm_dump_ioc_profile(ib_dm_ioc_ctrl_profile_t * ioc)7250 ibdm_dump_ioc_profile(ib_dm_ioc_ctrl_profile_t *ioc)
7251 {
7252 IBTF_DPRINTF_L4("ibdm", "\t\t IOC Controller Profile");
7253 IBTF_DPRINTF_L4("ibdm", "\t\t ----------------------");
7254
7255 IBTF_DPRINTF_L4("ibdm", "\tIOC Guid : %llx", ioc->ioc_guid);
7256 IBTF_DPRINTF_L4("ibdm", "\tVendorID : 0x%x", ioc->ioc_vendorid);
7257 IBTF_DPRINTF_L4("ibdm", "\tDevice Id : 0x%x", ioc->ioc_deviceid);
7258 IBTF_DPRINTF_L4("ibdm", "\tDevice Ver : 0x%x", ioc->ioc_device_ver);
7259 IBTF_DPRINTF_L4("ibdm", "\tSubsys ID : 0x%x", ioc->ioc_subsys_id);
7260 IBTF_DPRINTF_L4("ibdm", "\tIO class : 0x%x", ioc->ioc_io_class);
7261 IBTF_DPRINTF_L4("ibdm", "\tIO subclass : 0x%x", ioc->ioc_io_subclass);
7262 IBTF_DPRINTF_L4("ibdm", "\tProtocol : 0x%x", ioc->ioc_protocol);
7263 IBTF_DPRINTF_L4("ibdm", "\tProtocolV : 0x%x", ioc->ioc_protocol_ver);
7264 IBTF_DPRINTF_L4("ibdm", "\tmsg qdepth : %d", ioc->ioc_send_msg_qdepth);
7265 IBTF_DPRINTF_L4("ibdm", "\trdma qdepth : %d",
7266 ioc->ioc_rdma_read_qdepth);
7267 IBTF_DPRINTF_L4("ibdm", "\tsndmsg sz : %d", ioc->ioc_send_msg_sz);
7268 IBTF_DPRINTF_L4("ibdm", "\trdma xfersz : %d", ioc->ioc_rdma_xfer_sz);
7269 IBTF_DPRINTF_L4("ibdm", "\topcal mask : 0x%x",
7270 ioc->ioc_ctrl_opcap_mask);
7271 IBTF_DPRINTF_L4("ibdm", "\tsrventries : %x", ioc->ioc_service_entries);
7272 }
7273
7274
7275 void
ibdm_dump_service_entries(ib_dm_srv_t * srv_ents)7276 ibdm_dump_service_entries(ib_dm_srv_t *srv_ents)
7277 {
7278 IBTF_DPRINTF_L4("ibdm",
7279 "\thandle_srventry_mad: service id : %llx", srv_ents->srv_id);
7280
7281 IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad: "
7282 "Service Name : %s", srv_ents->srv_name);
7283 }
7284
7285 int ibdm_allow_sweep_fabric_timestamp = 1;
7286
7287 void
ibdm_dump_sweep_fabric_timestamp(int flag)7288 ibdm_dump_sweep_fabric_timestamp(int flag)
7289 {
7290 static hrtime_t x;
7291 if (flag) {
7292 if (ibdm_allow_sweep_fabric_timestamp) {
7293 IBTF_DPRINTF_L4("ibdm", "\tTime taken to complete "
7294 "sweep %lld ms", ((gethrtime() - x)/ 1000000));
7295 }
7296 x = 0;
7297 } else
7298 x = gethrtime();
7299 }
7300 #endif
7301