xref: /illumos-gate/usr/src/uts/common/io/ib/mgt/ibdma/ibdma.c (revision b1d7ec75953cd517f5b7c3d9cb427ff8ec5d7d07)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 /*
27  * Infiniband Device Management Agent for IB storage.
28  */
29 
30 #include <sys/conf.h>
31 #include <sys/file.h>
32 #include <sys/ddi.h>
33 #include <sys/sunddi.h>
34 #include <sys/modctl.h>
35 #include <sys/priv.h>
36 #include <sys/sysmacros.h>
37 
38 #include <sys/ib/ibtl/ibti.h>		/* IB public interfaces */
39 
40 #include <sys/ib/mgt/ibdma/ibdma.h>
41 #include <sys/ib/mgt/ibdma/ibdma_impl.h>
42 
43 /*
44  * NOTE: The IB Device Management Agent function, like other IB
45  * managers and agents is best implemented as a kernel misc.
46  * module.
47  * Eventually we could modify IBT_DM_AGENT so that we don't need to
48  * open each HCA to receive asynchronous events.
49  */
50 
51 #define	IBDMA_NAME_VERSION	"IB Device Management Agent"
52 
53 extern struct mod_ops mod_miscops;
54 
55 static void ibdma_ibt_async_handler(void *clnt, ibt_hca_hdl_t hdl,
56 	ibt_async_code_t code, ibt_async_event_t *event);
57 
58 static void ibdma_mad_recv_cb(ibmf_handle_t ibmf_hdl,
59 	ibmf_msg_t *msgp, void *args);
60 static void ibdma_create_resp_mad(ibmf_msg_t *msgp);
61 
62 /*
63  * Misc. kernel module for now.
64  */
65 static struct modlmisc modlmisc = {
66 	&mod_miscops,
67 	IBDMA_NAME_VERSION
68 };
69 
70 static struct modlinkage modlinkage = {
71 	MODREV_1, (void *)&modlmisc, NULL
72 };
73 
74 static ibt_clnt_modinfo_t ibdma_ibt_modinfo = {
75 	IBTI_V_CURR,
76 	IBT_DM_AGENT,
77 	ibdma_ibt_async_handler,
78 	NULL,
79 	"ibdma"
80 };
81 
82 /*
83  * Module global state allocated at init().
84  */
85 static ibdma_mod_state_t	*ibdma = NULL;
86 
87 /*
88  * Init/Fini handlers and IBTL HCA management prototypes.
89  */
90 static int ibdma_init();
91 static int ibdma_fini();
92 static int ibdma_ibt_init();
93 static void ibdma_ibt_fini();
94 static ibdma_hca_t *ibdma_hca_init(ib_guid_t guid);
95 static void ibdma_hca_fini(ibdma_hca_t *hca);
96 static ibdma_hca_t *ibdma_find_hca(ib_guid_t guid);
97 
98 /*
99  * DevMgmt Agent MAD attribute handlers prototypes.
100  */
101 static void ibdma_get_class_portinfo(ibmf_msg_t *msg);
102 static void ibdma_get_io_unitinfo(ibdma_hca_t *hca, ibmf_msg_t *msg);
103 static void ibdma_get_ioc_profile(ibdma_hca_t *hca, ibmf_msg_t *msg);
104 static void ibdma_get_ioc_services(ibdma_hca_t *hca, ibmf_msg_t *msg);
105 
106 /*
107  * _init()
108  */
109 int
_init(void)110 _init(void)
111 {
112 	int status;
113 
114 	ASSERT(ibdma == NULL);
115 
116 	ibdma = kmem_zalloc(sizeof (*ibdma), KM_SLEEP);
117 	ASSERT(ibdma != NULL);
118 
119 	status = ibdma_init();
120 	if (status != DDI_SUCCESS) {
121 		kmem_free(ibdma, sizeof (*ibdma));
122 		ibdma = NULL;
123 		return (status);
124 	}
125 
126 	status = mod_install(&modlinkage);
127 	if (status != DDI_SUCCESS) {
128 		cmn_err(CE_NOTE, "_init, mod_install error (%d)", status);
129 		(void) ibdma_fini();
130 		kmem_free(ibdma, sizeof (*ibdma));
131 		ibdma = NULL;
132 	}
133 	return (status);
134 }
135 
136 /*
137  * _info()
138  */
139 int
_info(struct modinfo * modinfop)140 _info(struct modinfo *modinfop)
141 {
142 	return (mod_info(&modlinkage, modinfop));
143 }
144 
145 /*
146  * _fini()
147  */
148 int
_fini(void)149 _fini(void)
150 {
151 	int		status;
152 	int		slot;
153 	ibdma_hca_t	*hca;
154 
155 	status = mod_remove(&modlinkage);
156 	if (status != DDI_SUCCESS) {
157 		cmn_err(CE_NOTE, "_fini, mod_remove error (%d)", status);
158 		return (status);
159 	}
160 
161 	/*
162 	 * Sanity check to see if anyone is not cleaning
163 	 * up appropriately.
164 	 */
165 	mutex_enter(&ibdma->ms_hca_list_lock);
166 	hca = list_head(&ibdma->ms_hca_list);
167 	while (hca != NULL) {
168 		for (slot = 0; slot < IBDMA_MAX_IOC; slot++) {
169 			if (hca->ih_ioc[slot].ii_inuse) {
170 				cmn_err(CE_NOTE, "_fini, IOC %d still attached"
171 				    " for (0x%0llx)", slot+1,
172 				    (u_longlong_t)hca->ih_iou_guid);
173 			}
174 		}
175 		hca = list_next(&ibdma->ms_hca_list, hca);
176 	}
177 	mutex_exit(&ibdma->ms_hca_list_lock);
178 
179 	(void) ibdma_fini();
180 	kmem_free(ibdma, sizeof (*ibdma));
181 	return (status);
182 }
183 
184 /*
185  * ibdma_init()
186  *
187  * Initialize I/O Unit structure, generate initial HCA list and register
188  * it port with the IBMF.
189  */
190 static int
ibdma_init()191 ibdma_init()
192 {
193 	int		status;
194 
195 	/*
196 	 * Global lock and I/O Unit initialization.
197 	 */
198 	mutex_init(&ibdma->ms_hca_list_lock, NULL, MUTEX_DRIVER, NULL);
199 
200 	/*
201 	 * Discover IB hardware and setup for device management agent
202 	 * support.
203 	 */
204 	status = ibdma_ibt_init();
205 	if (status != DDI_SUCCESS) {
206 		cmn_err(CE_NOTE, "ibdma_init, ibt_attach failed (%d)",
207 		    status);
208 		mutex_destroy(&ibdma->ms_hca_list_lock);
209 		return (status);
210 	}
211 
212 	return (status);
213 }
214 
215 /*
216  * ibdma_fini()
217  *
218  * Release resource if we are no longer in use.
219  */
220 static int
ibdma_fini()221 ibdma_fini()
222 {
223 	ibdma_ibt_fini();
224 	mutex_destroy(&ibdma->ms_hca_list_lock);
225 	return (DDI_SUCCESS);
226 }
227 
228 /*
229  * ibdma_ibt_async_handler()
230  */
231 /* ARGSUSED */
232 static void
ibdma_ibt_async_handler(void * clnt,ibt_hca_hdl_t hdl,ibt_async_code_t code,ibt_async_event_t * event)233 ibdma_ibt_async_handler(void *clnt, ibt_hca_hdl_t hdl,
234 	ibt_async_code_t code, ibt_async_event_t *event)
235 {
236 	ibdma_hca_t	*hca;
237 
238 	switch (code) {
239 
240 	case IBT_EVENT_PORT_UP:
241 	case IBT_ERROR_PORT_DOWN:
242 	case IBT_PORT_CHANGE_EVENT:
243 	case IBT_CLNT_REREG_EVENT:
244 		break;
245 
246 	case IBT_HCA_ATTACH_EVENT:
247 		mutex_enter(&ibdma->ms_hca_list_lock);
248 		hca = ibdma_hca_init(event->ev_hca_guid);
249 		if (hca != NULL) {
250 			list_insert_tail(&ibdma->ms_hca_list, hca);
251 			cmn_err(CE_NOTE, "hca ibt hdl (%p)",
252 			    (void *)hca->ih_ibt_hdl);
253 			ibdma->ms_num_hcas++;
254 		}
255 		mutex_exit(&ibdma->ms_hca_list_lock);
256 		break;
257 
258 	case IBT_HCA_DETACH_EVENT:
259 		mutex_enter(&ibdma->ms_hca_list_lock);
260 		hca = ibdma_find_hca(event->ev_hca_guid);
261 		if (hca != NULL) {
262 			list_remove(&ibdma->ms_hca_list, hca);
263 			cmn_err(CE_NOTE, "removing hca (%p) (0x%llx)",
264 			    (void *)hca, hca ?
265 			    (u_longlong_t)hca->ih_iou_guid : 0x0ll);
266 			ibdma_hca_fini(hca);
267 		}
268 		mutex_exit(&ibdma->ms_hca_list_lock);
269 		break;
270 
271 	default:
272 #ifdef	DEBUG
273 		cmn_err(CE_NOTE, "ibt_async_handler, unhandled event(%d)",
274 		    code);
275 #endif
276 		break;
277 	}
278 
279 }
280 
281 /*
282  * ibdma_ibt_init()
283  */
284 static int
ibdma_ibt_init()285 ibdma_ibt_init()
286 {
287 	int		status;
288 	int		hca_cnt;
289 	int		hca_ndx;
290 	ib_guid_t	*guid;
291 	ibdma_hca_t	*hca;
292 
293 	/*
294 	 * Attach to IBTF and get HCA list.
295 	 */
296 	status = ibt_attach(&ibdma_ibt_modinfo, NULL,
297 	    ibdma, &ibdma->ms_ibt_hdl);
298 	if (status != DDI_SUCCESS) {
299 		cmn_err(CE_NOTE, "ibt_init, ibt_attach failed (%d)",
300 		    status);
301 		return (status);
302 	}
303 
304 	list_create(&ibdma->ms_hca_list, sizeof (ibdma_hca_t),
305 	    offsetof(ibdma_hca_t, ih_node));
306 
307 	hca_cnt = ibt_get_hca_list(&guid);
308 	if (hca_cnt < 1) {
309 #ifdef	DEBUG_IBDMA
310 		cmn_err(CE_NOTE, "ibt_init, no HCA(s) found");
311 #endif
312 		/* not an error if no HCAs, but nothing more to do here */
313 		return (DDI_SUCCESS);
314 	}
315 
316 	mutex_enter(&ibdma->ms_hca_list_lock);
317 
318 	for (hca_ndx = 0; hca_ndx < hca_cnt; hca_ndx++) {
319 #ifdef	DEBUG_IBDMA
320 		cmn_err(CE_NOTE, "adding hca GUID(0x%llx)",
321 		    (u_longlong_t)guid[hca_ndx]);
322 #endif
323 
324 		hca = ibdma_hca_init(guid[hca_ndx]);
325 		if (hca == NULL) {
326 			cmn_err(CE_NOTE, "ibt_init, hca_init GUID(0x%llx)"
327 			    " failed", (u_longlong_t)guid[hca_ndx]);
328 			continue;
329 		}
330 		list_insert_tail(&ibdma->ms_hca_list, hca);
331 		ibdma->ms_num_hcas++;
332 	}
333 
334 	mutex_exit(&ibdma->ms_hca_list_lock);
335 
336 	ibt_free_hca_list(guid, hca_cnt);
337 #ifdef	DEBUG_IBDMA
338 	cmn_err(CE_NOTE, "Added %d HCA(s)",
339 	    ibdma->ms_num_hcas);
340 #endif
341 	return (DDI_SUCCESS);
342 }
343 
344 /*
345  * ibdma_ibt_fini()
346  */
347 static void
ibdma_ibt_fini()348 ibdma_ibt_fini()
349 {
350 	ibdma_hca_t		*hca;
351 	ibdma_hca_t		*next;
352 
353 	mutex_enter(&ibdma->ms_hca_list_lock);
354 	hca = list_head(&ibdma->ms_hca_list);
355 	while (hca != NULL) {
356 		next = list_next(&ibdma->ms_hca_list, hca);
357 		list_remove(&ibdma->ms_hca_list, hca);
358 #ifdef	DEBUG_IBDMA
359 		cmn_err(CE_NOTE, "removing hca (%p) (0x%llx)",
360 		    (void *)hca, hca ?
361 		    (u_longlong_t)hca->ih_iou_guid : 0x0ll);
362 		cmn_err(CE_NOTE, "hca ibt hdl (%p)",
363 		    (void *)hca->ih_ibt_hdl);
364 #endif
365 		ibdma_hca_fini(hca);
366 		hca = next;
367 	}
368 	list_destroy(&ibdma->ms_hca_list);
369 
370 	(void) ibt_detach(ibdma->ms_ibt_hdl);
371 	ibdma->ms_ibt_hdl   = NULL;
372 	ibdma->ms_num_hcas  = 0;
373 	mutex_exit(&ibdma->ms_hca_list_lock);
374 }
375 
376 /*
377  * ibdma_find_hca()
378  */
379 static ibdma_hca_t *
ibdma_find_hca(ib_guid_t guid)380 ibdma_find_hca(ib_guid_t guid)
381 {
382 	ibdma_hca_t	*hca;
383 
384 	ASSERT(mutex_owned(&ibdma->ms_hca_list_lock));
385 
386 	hca = list_head(&ibdma->ms_hca_list);
387 	while (hca != NULL) {
388 		if (hca->ih_iou_guid == guid) {
389 			break;
390 		}
391 		hca = list_next(&ibdma->ms_hca_list, hca);
392 	}
393 	return (hca);
394 }
395 
396 /*
397  * ibdma_hca_init()
398  */
399 static ibdma_hca_t *
ibdma_hca_init(ib_guid_t guid)400 ibdma_hca_init(ib_guid_t guid)
401 {
402 	ibt_status_t		status;
403 	ibdma_hca_t		*hca;
404 	ibdma_port_t		*port;
405 	ibt_hca_attr_t		hca_attr;
406 	int			ndx;
407 
408 	ASSERT(mutex_owned(&ibdma->ms_hca_list_lock));
409 
410 	status = ibt_query_hca_byguid(guid, &hca_attr);
411 	if (status != IBT_SUCCESS) {
412 		cmn_err(CE_NOTE, "hca_init HCA query error (%d)",
413 		    status);
414 		return (NULL);
415 	}
416 
417 	if (ibdma_find_hca(guid) != NULL) {
418 #ifdef	DEBUG_IBDMA
419 		cmn_err(CE_NOTE, "hca_init HCA already exists");
420 #endif
421 		return (NULL);
422 	}
423 
424 	hca = kmem_zalloc(sizeof (ibdma_hca_t) +
425 	    (hca_attr.hca_nports-1)*sizeof (ibdma_port_t), KM_SLEEP);
426 	ASSERT(hca != NULL);
427 
428 	hca->ih_nports   = hca_attr.hca_nports;
429 
430 	rw_init(&hca->ih_iou_rwlock, NULL, RW_DRIVER, NULL);
431 	rw_enter(&hca->ih_iou_rwlock, RW_WRITER);
432 	hca->ih_iou_guid		= guid;
433 	hca->ih_iou.iou_changeid	= h2b16(1);
434 	hca->ih_iou.iou_num_ctrl_slots	= IBDMA_MAX_IOC;
435 	hca->ih_iou.iou_flag		= IB_DM_IOU_OPTIONROM_ABSENT;
436 
437 	list_create(&hca->ih_hdl_list, sizeof (ibdma_hdl_impl_t),
438 	    offsetof(ibdma_hdl_impl_t, ih_node));
439 	rw_exit(&hca->ih_iou_rwlock);
440 
441 	/*
442 	 * It would be better to not open, but IBTL is setup to only allow
443 	 * certain managers to get async call backs if not open.
444 	 */
445 	status = ibt_open_hca(ibdma->ms_ibt_hdl, guid, &hca->ih_ibt_hdl);
446 	if (status != IBT_SUCCESS) {
447 		cmn_err(CE_NOTE, "hca_init() IBT open failed (%d)",
448 		    status);
449 
450 		list_destroy(&hca->ih_hdl_list);
451 		rw_destroy(&hca->ih_iou_rwlock);
452 		kmem_free(hca, sizeof (ibdma_hca_t) +
453 		    (hca_attr.hca_nports-1)*sizeof (ibdma_port_t));
454 		return (NULL);
455 	}
456 
457 	/*
458 	 * Register with the IB Management Framework and setup MAD call-back.
459 	 */
460 	for (ndx = 0; ndx < hca->ih_nports; ndx++) {
461 		port = &hca->ih_port[ndx];
462 		port->ip_hcap = hca;
463 		port->ip_ibmf_reg.ir_ci_guid	= hca->ih_iou_guid;
464 		port->ip_ibmf_reg.ir_port_num	= ndx + 1;
465 		port->ip_ibmf_reg.ir_client_class = DEV_MGT_AGENT;
466 
467 		status = ibmf_register(&port->ip_ibmf_reg, IBMF_VERSION,
468 		    0, NULL, NULL, &port->ip_ibmf_hdl, &port->ip_ibmf_caps);
469 		if (status != IBMF_SUCCESS) {
470 			cmn_err(CE_NOTE, "hca_init, IBMF register failed (%d)",
471 			    status);
472 			port->ip_ibmf_hdl = NULL;
473 			ibdma_hca_fini(hca);
474 			return (NULL);
475 		}
476 
477 		status = ibmf_setup_async_cb(port->ip_ibmf_hdl,
478 		    IBMF_QP_HANDLE_DEFAULT, ibdma_mad_recv_cb, port, 0);
479 		if (status != IBMF_SUCCESS) {
480 			cmn_err(CE_NOTE, "hca_init, IBMF cb setup failed (%d)",
481 			    status);
482 			ibdma_hca_fini(hca);
483 			return (NULL);
484 		}
485 
486 		status = ibt_modify_port_byguid(hca->ih_iou_guid,
487 		    ndx+1, IBT_PORT_SET_DEVMGT, 0);
488 		if (status != IBT_SUCCESS) {
489 			cmn_err(CE_NOTE, "hca_init, IBT modify port caps"
490 			    " error (%d)", status);
491 			ibdma_hca_fini(hca);
492 			return (NULL);
493 		}
494 	}
495 	return (hca);
496 }
497 
498 /*
499  * ibdma_hca_fini()
500  */
501 static void
ibdma_hca_fini(ibdma_hca_t * hca)502 ibdma_hca_fini(ibdma_hca_t *hca)
503 {
504 	int			status;
505 	int			ndx;
506 	ibdma_port_t		*port;
507 	ibdma_hdl_impl_t	*hdl;
508 	ibdma_hdl_impl_t	*hdl_next;
509 
510 	ASSERT(mutex_owned(&ibdma->ms_hca_list_lock));
511 	ASSERT(hca != NULL);
512 
513 	rw_enter(&hca->ih_iou_rwlock, RW_WRITER);
514 
515 	/*
516 	 * All handles should have been de-registered, but release
517 	 * any that are outstanding.
518 	 */
519 	hdl = list_head(&hca->ih_hdl_list);
520 	while (hdl != NULL) {
521 		hdl_next = list_next(&hca->ih_hdl_list, hdl);
522 		list_remove(&hca->ih_hdl_list, hdl);
523 		cmn_err(CE_NOTE, "hca_fini, unexpected ibdma user handle"
524 		    " exists");
525 		kmem_free(hdl, sizeof (*hdl));
526 		hdl = hdl_next;
527 	}
528 	list_destroy(&hca->ih_hdl_list);
529 
530 	/*
531 	 * Un-register with the IBMF.
532 	 */
533 	for (ndx = 0; ndx < hca->ih_nports; ndx++) {
534 		port = &hca->ih_port[ndx];
535 		port->ip_hcap = NULL;
536 
537 		status = ibt_modify_port_byguid(hca->ih_iou_guid,
538 		    ndx+1, IBT_PORT_RESET_DEVMGT, 0);
539 		if (status != IBT_SUCCESS)
540 			cmn_err(CE_NOTE, "hca_fini, IBT modify port caps"
541 			    " error (%d)", status);
542 
543 		if (port->ip_ibmf_hdl == NULL)
544 			continue;
545 
546 		status = ibmf_tear_down_async_cb(port->ip_ibmf_hdl,
547 		    IBMF_QP_HANDLE_DEFAULT, 0);
548 		if (status != IBMF_SUCCESS)
549 			cmn_err(CE_NOTE, "hca_fini, IBMF tear down cb"
550 			    " error (%d)", status);
551 
552 		status = ibmf_unregister(&port->ip_ibmf_hdl, 0);
553 		if (status != IBMF_SUCCESS)
554 			cmn_err(CE_NOTE, "hca_fini, IBMF un-register"
555 			    " error (%d)", status);
556 		port->ip_ibmf_hdl = NULL;
557 	}
558 
559 	status = ibt_close_hca(hca->ih_ibt_hdl);
560 	if (status != IBT_SUCCESS)
561 		cmn_err(CE_NOTE, "hca_fini close error (%d)", status);
562 
563 	rw_exit(&hca->ih_iou_rwlock);
564 	rw_destroy(&hca->ih_iou_rwlock);
565 	kmem_free(hca, sizeof (ibdma_hca_t) +
566 	    (hca->ih_nports-1) * sizeof (ibdma_port_t));
567 }
568 
569 /* DM IBMF MAD handlers */
570 /*
571  * ibdma_create_resp_mad()
572  */
573 static void
ibdma_create_resp_mad(ibmf_msg_t * msgp)574 ibdma_create_resp_mad(ibmf_msg_t *msgp)
575 {
576 	/*
577 	 * Allocate send buffer fix up hdr for response.
578 	 */
579 	msgp->im_msgbufs_send.im_bufs_mad_hdr =
580 	    kmem_zalloc(IBDMA_MAD_SIZE, KM_SLEEP);
581 
582 	msgp->im_msgbufs_send.im_bufs_cl_hdr = (uchar_t *)
583 	    msgp->im_msgbufs_send.im_bufs_mad_hdr + sizeof (ib_mad_hdr_t);
584 	msgp->im_msgbufs_send.im_bufs_cl_hdr_len = IBDMA_DM_MAD_HDR_SIZE;
585 	msgp->im_msgbufs_send.im_bufs_cl_data =
586 	    ((char *)msgp->im_msgbufs_send.im_bufs_cl_hdr +
587 	    IBDMA_DM_MAD_HDR_SIZE);
588 	msgp->im_msgbufs_send.im_bufs_cl_data_len =
589 	    IBDMA_MAD_SIZE - sizeof (ib_mad_hdr_t) - IBDMA_DM_MAD_HDR_SIZE;
590 	(void) memcpy(msgp->im_msgbufs_send.im_bufs_mad_hdr,
591 	    msgp->im_msgbufs_recv.im_bufs_mad_hdr, IBDMA_MAD_SIZE);
592 
593 	/*
594 	 * We may want to support a GRH since this is a GMP; not
595 	 * required for current SRP device manager platforms.
596 	 */
597 #if 0
598 	if (msgp->im_msg_flags & IBMF_MSG_FLAGS_GLOBAL_ADDRESS) {
599 		ib_gid_t	temp = msgp->im_global_addr.ig_recver_gid;
600 
601 		msgp->im_global_addr.ig_recver_gid =
602 		    msgp->im_global_addr.ig_sender_gid;
603 		msgp->im_global_addr.ig_sender_gid = temp;
604 	}
605 #endif
606 }
607 
608 /*
609  * ibdma_mad_send_cb()
610  */
611 /* ARGSUSED */
612 static void
ibdma_mad_send_cb(ibmf_handle_t ibmf_hdl,ibmf_msg_t * msgp,void * arg)613 ibdma_mad_send_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msgp, void *arg)
614 {
615 	/*
616 	 * Just free the buffers and release the message.
617 	 */
618 	if (msgp->im_msgbufs_send.im_bufs_mad_hdr != NULL) {
619 		kmem_free(msgp->im_msgbufs_send.im_bufs_mad_hdr,
620 		    IBDMA_MAD_SIZE);
621 		msgp->im_msgbufs_send.im_bufs_mad_hdr = NULL;
622 	}
623 	if (ibmf_free_msg(ibmf_hdl, &msgp) != IBMF_SUCCESS) {
624 		cmn_err(CE_NOTE, "mad_send_cb, IBMF message free error");
625 	}
626 }
627 
628 /*
629  * ibdma_mad_recv_cb()
630  */
631 static void
ibdma_mad_recv_cb(ibmf_handle_t ibmf_hdl,ibmf_msg_t * msgp,void * args)632 ibdma_mad_recv_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msgp, void *args)
633 {
634 	int		status;
635 	ib_mad_hdr_t	*in_mad;
636 	ib_mad_hdr_t	*out_mad;
637 	ibdma_port_t	*port = args;
638 
639 	ASSERT(msgp != NULL);
640 	ASSERT(port != NULL);
641 
642 	if (msgp->im_msg_status != IBMF_SUCCESS) {
643 		cmn_err(CE_NOTE, "mad_recv_cb, bad MAD receive status (%d)",
644 		    msgp->im_msg_status);
645 		goto drop;
646 	}
647 
648 	in_mad = msgp->im_msgbufs_recv.im_bufs_mad_hdr;
649 
650 	if (in_mad->MgmtClass != MAD_MGMT_CLASS_DEV_MGT) {
651 #ifdef	DEBUG_IBDMA
652 		cmn_err(CE_NOTE, "mad_recv_cb, MAD not of Dev Mgmt Class");
653 #endif
654 		goto drop;
655 	}
656 
657 	ibdma_create_resp_mad(msgp);
658 	out_mad = msgp->im_msgbufs_send.im_bufs_mad_hdr;
659 
660 	out_mad->R_Method = IB_DM_DEVMGT_METHOD_GET_RESP;
661 	out_mad->Status   = 0;
662 
663 	if (in_mad->R_Method == MAD_METHOD_SET) {
664 #ifdef	DEBUG_IBDMA
665 		cmn_err(CE_NOTE, "mad_recv_cb, no attributes supported"
666 		    " for set");
667 #endif
668 		out_mad->Status = MAD_STATUS_UNSUPP_METHOD_ATTR;
669 		goto send_resp;
670 	}
671 
672 	if (in_mad->R_Method != MAD_METHOD_GET) {
673 #ifdef	DEBUG_IBDMA
674 		cmn_err(CE_NOTE, "mad_recv_cb, no attributes supported"
675 		    " for set");
676 #endif
677 		out_mad->Status = MAD_STATUS_UNSUPP_METHOD;
678 		goto send_resp;
679 	}
680 
681 	/*
682 	 * Process a GET method.
683 	 */
684 	switch (b2h16(in_mad->AttributeID)) {
685 
686 	case IB_DM_ATTR_CLASSPORTINFO:
687 		ibdma_get_class_portinfo(msgp);
688 		break;
689 
690 	case IB_DM_ATTR_IO_UNITINFO:
691 		ibdma_get_io_unitinfo(port->ip_hcap, msgp);
692 		break;
693 
694 	case IB_DM_ATTR_IOC_CTRL_PROFILE:
695 		ibdma_get_ioc_profile(port->ip_hcap, msgp);
696 		break;
697 
698 	case IB_DM_ATTR_SERVICE_ENTRIES:
699 		ibdma_get_ioc_services(port->ip_hcap, msgp);
700 		break;
701 
702 	default:
703 		out_mad->Status = MAD_STATUS_UNSUPP_METHOD_ATTR;
704 		break;
705 	}
706 
707 send_resp:
708 	status = ibmf_msg_transport(ibmf_hdl, IBMF_QP_HANDLE_DEFAULT,
709 	    msgp, NULL, ibdma_mad_send_cb, NULL, 0);
710 	if (status != IBMF_SUCCESS) {
711 		cmn_err(CE_NOTE, "mad_recv_cb, send error (%d)", status);
712 		ibdma_mad_send_cb(ibmf_hdl, msgp, NULL);
713 	}
714 	return;
715 
716 drop:
717 	status = ibmf_free_msg(ibmf_hdl, &msgp);
718 	if (status != IBMF_SUCCESS) {
719 		cmn_err(CE_NOTE, "mad_recv_cb, error dropping (%d)",
720 		    status);
721 	}
722 }
723 
724 /*
725  * ibdma_get_class_portinfo()
726  */
727 static void
ibdma_get_class_portinfo(ibmf_msg_t * msg)728 ibdma_get_class_portinfo(ibmf_msg_t *msg)
729 {
730 	ib_mad_classportinfo_t	*cpip;
731 
732 	cpip = (ib_mad_classportinfo_t *)msg->im_msgbufs_send.im_bufs_cl_data;
733 	bzero(cpip, sizeof (*cpip));
734 	cpip->BaseVersion   = MAD_CLASS_BASE_VERS_1;
735 	cpip->ClassVersion  = IB_DM_CLASS_VERSION_1;
736 	cpip->RespTimeValue = h2b32(IBDMA_DM_RESP_TIME);
737 }
738 
739 /*
740  * ibdma_get_io_unitinfo()
741  */
742 static void
ibdma_get_io_unitinfo(ibdma_hca_t * hca,ibmf_msg_t * msg)743 ibdma_get_io_unitinfo(ibdma_hca_t *hca, ibmf_msg_t *msg)
744 {
745 	ib_dm_io_unitinfo_t	*uip;
746 
747 	uip = (ib_dm_io_unitinfo_t *)msg->im_msgbufs_send.im_bufs_cl_data;
748 	rw_enter(&hca->ih_iou_rwlock, RW_READER);
749 	bcopy(&hca->ih_iou, uip, sizeof (ib_dm_io_unitinfo_t));
750 	rw_exit(&hca->ih_iou_rwlock);
751 }
752 
753 /*
754  * ibdma_get_ioc_profile()
755  */
756 static void
ibdma_get_ioc_profile(ibdma_hca_t * hca,ibmf_msg_t * msg)757 ibdma_get_ioc_profile(ibdma_hca_t *hca, ibmf_msg_t *msg)
758 {
759 	ib_dm_ioc_ctrl_profile_t	*iocp;
760 	uint32_t			slot;
761 
762 	ASSERT(msg != NULL);
763 
764 	slot = b2h32(msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeModifier);
765 	iocp = (ib_dm_ioc_ctrl_profile_t *)
766 	    msg->im_msgbufs_send.im_bufs_cl_data;
767 	if (slot == 0 || slot > IBDMA_MAX_IOC) {
768 		msg->im_msgbufs_send.im_bufs_mad_hdr->Status =
769 		    MAD_STATUS_INVALID_FIELD;
770 		return;
771 	}
772 
773 	slot--;
774 	rw_enter(&hca->ih_iou_rwlock, RW_READER);
775 	if (ibdma_get_ioc_state(hca, slot) == IBDMA_IOC_PRESENT) {
776 		bcopy(&hca->ih_ioc[slot].ii_profile, iocp,
777 		    sizeof (ib_dm_ioc_ctrl_profile_t));
778 	} else {
779 		msg->im_msgbufs_send.im_bufs_mad_hdr->Status =
780 		    IB_DM_DEVMGT_MAD_STAT_NORESP;
781 	}
782 	rw_exit(&hca->ih_iou_rwlock);
783 }
784 
785 /*
786  * ibdma_get_ioc_services()
787  */
788 static void
ibdma_get_ioc_services(ibdma_hca_t * hca,ibmf_msg_t * msg)789 ibdma_get_ioc_services(ibdma_hca_t *hca, ibmf_msg_t *msg)
790 {
791 	ib_dm_srv_t	*to_svcp;
792 	ib_dm_srv_t	*from_svcp;
793 	uint32_t	slot;
794 	uint8_t		hi;
795 	uint8_t		low;
796 
797 	ASSERT(msg != NULL);
798 
799 	slot = b2h32(msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeModifier);
800 	hi   = (slot >> 8) & 0x00FF;
801 	low  = slot  & 0x00FF;
802 	slot = (slot >> 16) & 0x0FFFF;
803 	if (slot == 0 || slot > IBDMA_MAX_IOC) {
804 		msg->im_msgbufs_send.im_bufs_mad_hdr->Status =
805 		    MAD_STATUS_INVALID_FIELD;
806 		return;
807 	}
808 
809 	slot--;
810 
811 	rw_enter(&hca->ih_iou_rwlock, RW_READER);
812 	if (ibdma_get_ioc_state(hca, slot) != IBDMA_IOC_PRESENT) {
813 		msg->im_msgbufs_send.im_bufs_mad_hdr->Status =
814 		    IB_DM_DEVMGT_MAD_STAT_NORESP;
815 		rw_exit(&hca->ih_iou_rwlock);
816 		return;
817 	}
818 
819 	if ((low > hi) || (hi - low > 4)) {
820 		msg->im_msgbufs_send.im_bufs_mad_hdr->Status =
821 		    MAD_STATUS_INVALID_FIELD;
822 		rw_exit(&hca->ih_iou_rwlock);
823 		return;
824 	}
825 
826 	if (hi > hca->ih_ioc[slot].ii_profile.ioc_service_entries) {
827 		msg->im_msgbufs_send.im_bufs_mad_hdr->Status =
828 		    MAD_STATUS_INVALID_FIELD;
829 		rw_exit(&hca->ih_iou_rwlock);
830 		return;
831 	}
832 
833 	to_svcp = (ib_dm_srv_t *)msg->im_msgbufs_send.im_bufs_cl_data;
834 	from_svcp = hca->ih_ioc[slot].ii_srvcs + low;
835 	bcopy(from_svcp, to_svcp, sizeof (ib_dm_srv_t) * (hi - low + 1));
836 	rw_exit(&hca->ih_iou_rwlock);
837 }
838 
839 
840 /*
841  * Client API internal helpers
842  */
843 
844 /*
845  * ibdma_hdl_to_ioc()
846  */
847 ibdma_hdl_impl_t *
ibdma_get_hdl_impl(ibdma_hdl_t hdl)848 ibdma_get_hdl_impl(ibdma_hdl_t hdl)
849 {
850 	ibdma_hca_t		*hca;
851 	ibdma_hdl_impl_t	*hdl_tmp = hdl;
852 	ibdma_hdl_impl_t	*hdl_impl;
853 
854 	ASSERT(mutex_owned(&ibdma->ms_hca_list_lock));
855 
856 	if (hdl_tmp == NULL) {
857 		cmn_err(CE_NOTE, "get_hdl_impl, NULL handle");
858 		return (NULL);
859 	}
860 
861 	hca = ibdma_find_hca(hdl_tmp->ih_iou_guid);
862 	if (hca == NULL) {
863 		cmn_err(CE_NOTE, "get_hdl_impl, invalid handle, bad IOU");
864 		return (NULL);
865 	}
866 
867 	hdl_impl = list_head(&hca->ih_hdl_list);
868 	while (hdl_impl != NULL) {
869 		if (hdl_impl == hdl_tmp) {
870 			break;
871 		}
872 		hdl_impl = list_next(&hca->ih_hdl_list, hdl_impl);
873 	}
874 	return (hdl_impl);
875 }
876 
877 /*
878  * ibdma_set_ioc_state()
879  *
880  * slot should be 0 based (not DM 1 based slot).
881  *
882  * I/O Unit write lock should be held outside of this function.
883  */
884 static void
ibdma_set_ioc_state(ibdma_hca_t * hca,int slot,ibdma_ioc_state_t state)885 ibdma_set_ioc_state(ibdma_hca_t *hca, int slot, ibdma_ioc_state_t state)
886 {
887 	uint8_t		cur;
888 	uint16_t	id;
889 
890 	cur = hca->ih_iou.iou_ctrl_list[slot >> 1];
891 	if (slot & 1) {
892 		cur = (cur & 0xF0) | state;
893 	} else {
894 		cur = (cur & 0x0F) | (state << 4);
895 	}
896 	hca->ih_iou.iou_ctrl_list[slot >> 1] = cur;
897 	id = b2h16(hca->ih_iou.iou_changeid);
898 	id++;
899 	hca->ih_iou.iou_changeid = h2b16(id);
900 #ifdef	DEBUG_IBDMA
901 	cmn_err(CE_NOTE, "set_ioc_state, slot offset(%d), value(%d)",
902 	    slot, hca->ih_iou.iou_ctrl_list[slot >> 1]);
903 #endif
904 }
905 
906 /*
907  * ibdma_get_ioc_state()
908  *
909  * slot should be 0 based (not DM 1 based slot).
910  *
911  * I/O Unit read lock should be held outside of this function.
912  */
913 static ibdma_ioc_state_t
ibdma_get_ioc_state(ibdma_hca_t * hca,int slot)914 ibdma_get_ioc_state(ibdma_hca_t *hca, int slot)
915 {
916 	uint8_t		cur;
917 
918 	if (slot >= IBDMA_MAX_IOC)
919 		return (0xFF);
920 
921 	cur = hca->ih_iou.iou_ctrl_list[slot >> 1];
922 	cur = slot & 1 ?  cur & 0x0F : cur >> 4;
923 	return (cur);
924 }
925 
926 /* CLIENT API Implementation */
927 /*
928  * ibdma_ioc_register()
929  *
930  */
931 ibdma_hdl_t
ibdma_ioc_register(ib_guid_t iou_guid,ib_dm_ioc_ctrl_profile_t * profile,ib_dm_srv_t * services)932 ibdma_ioc_register(ib_guid_t iou_guid, ib_dm_ioc_ctrl_profile_t *profile,
933 	ib_dm_srv_t *services)
934 {
935 	int			free_slot = -1;
936 	int			svc_entries;
937 	int			slot;
938 	ibdma_hca_t		*hca;
939 	ibdma_hdl_impl_t	*hdl;
940 
941 	if (profile == NULL || services == NULL) {
942 		cmn_err(CE_NOTE, "ioc_register, bad parameter");
943 		return (NULL);
944 	}
945 
946 	svc_entries = profile->ioc_service_entries;
947 	if (svc_entries == 0) {
948 		cmn_err(CE_NOTE, "ioc_register, bad profile no service");
949 		return (NULL);
950 	}
951 
952 	/*
953 	 * Find the associated I/O Unit.
954 	 */
955 	mutex_enter(&ibdma->ms_hca_list_lock);
956 	hca = ibdma_find_hca(iou_guid);
957 	if (hca == NULL) {
958 		mutex_exit(&ibdma->ms_hca_list_lock);
959 		cmn_err(CE_NOTE, "ioc_register, bad I/O Unit GUID (0x%llx)",
960 		    (u_longlong_t)iou_guid);
961 		return (NULL);
962 	}
963 
964 	rw_enter(&hca->ih_iou_rwlock, RW_WRITER);
965 	for (slot = 0; slot < IBDMA_MAX_IOC; slot++) {
966 		if (hca->ih_ioc[slot].ii_inuse == 0) {
967 			if (free_slot == -1) {
968 				free_slot = slot;
969 			}
970 			continue;
971 		}
972 
973 		if (profile->ioc_guid ==
974 		    hca->ih_ioc[slot].ii_profile.ioc_guid) {
975 			rw_exit(&hca->ih_iou_rwlock);
976 			mutex_exit(&ibdma->ms_hca_list_lock);
977 #ifdef	DEBUG_IBDMA
978 			cmn_err(CE_NOTE, "ioc_register, IOC previously"
979 			    " registered");
980 #endif
981 			return (NULL);
982 		}
983 	}
984 
985 	if (free_slot < 0) {
986 		rw_exit(&hca->ih_iou_rwlock);
987 		cmn_err(CE_NOTE, "ioc_register, error - I/O Unit full");
988 		return (NULL);
989 	}
990 #ifdef	DEBUG_IBDMA
991 	cmn_err(CE_NOTE, "ibdma_ioc_register, assigned to 0 based slot (%d)",
992 	    free_slot);
993 #endif
994 
995 	hca->ih_ioc[free_slot].ii_inuse = 1;
996 	hca->ih_ioc[free_slot].ii_slot  = free_slot;
997 	hca->ih_ioc[free_slot].ii_hcap  = hca;
998 
999 	/*
1000 	 * Allocate local copy of profile and services.
1001 	 */
1002 	hca->ih_ioc[free_slot].ii_srvcs =
1003 	    kmem_zalloc(sizeof (ib_dm_srv_t) * svc_entries, KM_SLEEP);
1004 	bcopy(profile, &hca->ih_ioc[free_slot].ii_profile,
1005 	    sizeof (ib_dm_ioc_ctrl_profile_t));
1006 	bcopy(services, hca->ih_ioc[free_slot].ii_srvcs,
1007 	    sizeof (ib_dm_srv_t) * svc_entries);
1008 
1009 	/*
1010 	 * Update the profile copy with the I/O controller slot assigned.
1011 	 * The slot occupies the lower 8 biths of the vendor ID/slot 32bit
1012 	 * field.
1013 	 */
1014 	profile->ioc_vendorid |= h2b32(free_slot);
1015 
1016 	ibdma_set_ioc_state(hca, free_slot, IBDMA_IOC_PRESENT);
1017 
1018 	hdl = kmem_alloc(sizeof (*hdl), KM_SLEEP);
1019 	hdl->ih_iou_guid = hca->ih_iou_guid;
1020 	hdl->ih_ioc_ndx = (uint8_t)free_slot;
1021 	list_insert_tail(&hca->ih_hdl_list, hdl);
1022 
1023 	rw_exit(&hca->ih_iou_rwlock);
1024 	mutex_exit(&ibdma->ms_hca_list_lock);
1025 
1026 	return ((ibdma_hdl_t)hdl);
1027 }
1028 
1029 /*
1030  * ibdma_ioc_unregister()
1031  *
1032  */
1033 ibdma_status_t
ibdma_ioc_unregister(ibdma_hdl_t hdl)1034 ibdma_ioc_unregister(ibdma_hdl_t hdl)
1035 {
1036 	ibdma_ioc_t		*ioc;
1037 	ibdma_hca_t		*hca;
1038 	int			slot;
1039 	ibdma_hdl_impl_t	*hdl_tmp = hdl;
1040 	ibdma_hdl_impl_t	*hdl_impl;
1041 
1042 	if (hdl == NULL) {
1043 		cmn_err(CE_NOTE, "ioc_unregister, NULL handle");
1044 		return (IBDMA_BAD_PARAM);
1045 	}
1046 
1047 	mutex_enter(&ibdma->ms_hca_list_lock);
1048 	hca = ibdma_find_hca(hdl_tmp->ih_iou_guid);
1049 	if (hca == NULL) {
1050 		cmn_err(CE_NOTE, "ioc_unregsiter, invalid handle, IOU"
1051 		    " not found");
1052 		mutex_exit(&ibdma->ms_hca_list_lock);
1053 		return (IBDMA_BAD_PARAM);
1054 	}
1055 
1056 	hdl_impl = list_head(&hca->ih_hdl_list);
1057 	while (hdl_impl != NULL) {
1058 		if (hdl_impl == hdl_tmp) {
1059 			break;
1060 		}
1061 		hdl_impl = list_next(&hca->ih_hdl_list, hdl_impl);
1062 	}
1063 
1064 	if (hdl_impl == NULL) {
1065 		cmn_err(CE_NOTE, "ioc_unregsiter, invalid handle, not found");
1066 		mutex_exit(&ibdma->ms_hca_list_lock);
1067 		return (IBDMA_BAD_PARAM);
1068 	}
1069 
1070 	list_remove(&hca->ih_hdl_list, hdl_impl);
1071 
1072 	if (hdl_impl->ih_ioc_ndx >= IBDMA_MAX_IOC) {
1073 		cmn_err(CE_NOTE, "ioc_unregister, corrupted handle");
1074 		kmem_free(hdl_impl, sizeof (*hdl_impl));
1075 		mutex_exit(&ibdma->ms_hca_list_lock);
1076 		return (IBDMA_BAD_PARAM);
1077 	}
1078 	ioc = &hca->ih_ioc[hdl_impl->ih_ioc_ndx];
1079 	kmem_free(hdl_impl, sizeof (*hdl_impl));
1080 
1081 	if (ioc->ii_slot > IBDMA_MAX_IOC) {
1082 		cmn_err(CE_NOTE, "ioc_unregister, IOC corrupted, bad"
1083 		    " slot in IOC");
1084 		mutex_exit(&ibdma->ms_hca_list_lock);
1085 		return (IBDMA_BAD_PARAM);
1086 	}
1087 
1088 	rw_enter(&ioc->ii_hcap->ih_iou_rwlock, RW_WRITER);
1089 	if (ioc->ii_inuse == 0) {
1090 		rw_exit(&ioc->ii_hcap->ih_iou_rwlock);
1091 		mutex_exit(&ibdma->ms_hca_list_lock);
1092 		cmn_err(CE_NOTE, "ioc_unregister, slot not in use (%d)",
1093 		    ioc->ii_slot+1);
1094 		return (IBDMA_BAD_PARAM);
1095 	}
1096 
1097 	ASSERT(ioc->ii_srvcs != NULL);
1098 
1099 	slot = ioc->ii_slot;
1100 	hca  = ioc->ii_hcap;
1101 	kmem_free(ioc->ii_srvcs, sizeof (ib_dm_srv_t) *
1102 	    ioc->ii_profile.ioc_service_entries);
1103 	bzero(ioc, sizeof (ibdma_ioc_t));
1104 	ibdma_set_ioc_state(hca, slot, IBDMA_IOC_NOT_INSTALLED);
1105 
1106 	rw_exit(&hca->ih_iou_rwlock);
1107 	mutex_exit(&ibdma->ms_hca_list_lock);
1108 
1109 	return (IBDMA_SUCCESS);
1110 }
1111 
1112 /*
1113  * ibdma_ioc_update()
1114  *
1115  */
1116 ibdma_status_t
ibdma_ioc_update(ibdma_hdl_t hdl,ib_dm_ioc_ctrl_profile_t * profile,ib_dm_srv_t * services)1117 ibdma_ioc_update(ibdma_hdl_t hdl, ib_dm_ioc_ctrl_profile_t *profile,
1118 	ib_dm_srv_t *services)
1119 {
1120 	ibdma_ioc_t		*ioc;
1121 	ibdma_hca_t		*hca;
1122 	ibdma_hdl_impl_t	*hdl_tmp = hdl;
1123 	ibdma_hdl_impl_t	*hdl_impl;
1124 
1125 	if (hdl == NULL) {
1126 		cmn_err(CE_NOTE, "ioc_update, NULL handle");
1127 		return (IBDMA_BAD_PARAM);
1128 	}
1129 
1130 	if (profile == NULL || services == NULL) {
1131 		cmn_err(CE_NOTE, "ioc_update, NULL parameter");
1132 		return (IBDMA_BAD_PARAM);
1133 	}
1134 
1135 	mutex_enter(&ibdma->ms_hca_list_lock);
1136 	hca = ibdma_find_hca(hdl_tmp->ih_iou_guid);
1137 	if (hca == NULL) {
1138 		cmn_err(CE_NOTE, "ioc_update, invalid handle, IOU not found");
1139 		mutex_exit(&ibdma->ms_hca_list_lock);
1140 		return (IBDMA_BAD_PARAM);
1141 	}
1142 
1143 	hdl_impl = list_head(&hca->ih_hdl_list);
1144 	while (hdl_impl != NULL) {
1145 		if (hdl_impl == hdl_tmp) {
1146 			break;
1147 		}
1148 		hdl_impl = list_next(&hca->ih_hdl_list, hdl_impl);
1149 	}
1150 
1151 	if (hdl_impl == NULL) {
1152 		cmn_err(CE_NOTE, "ioc_update, invalid handle, not found");
1153 		mutex_exit(&ibdma->ms_hca_list_lock);
1154 		return (IBDMA_BAD_PARAM);
1155 	}
1156 
1157 	if (hdl_impl->ih_ioc_ndx >= IBDMA_MAX_IOC) {
1158 		cmn_err(CE_NOTE, "ioc_update, corrupted handle");
1159 		mutex_exit(&ibdma->ms_hca_list_lock);
1160 		return (IBDMA_BAD_PARAM);
1161 	}
1162 	ioc = &hca->ih_ioc[hdl_impl->ih_ioc_ndx];
1163 
1164 	if (ioc->ii_slot >= IBDMA_MAX_IOC || ioc->ii_hcap == NULL) {
1165 		cmn_err(CE_NOTE, "ioc_update, bad handle (%p)",
1166 		    (void *)hdl);
1167 		mutex_exit(&ibdma->ms_hca_list_lock);
1168 		return (IBDMA_BAD_PARAM);
1169 	}
1170 
1171 	rw_enter(&ioc->ii_hcap->ih_iou_rwlock, RW_WRITER);
1172 	if (ioc->ii_inuse == 0) {
1173 		rw_exit(&ioc->ii_hcap->ih_iou_rwlock);
1174 		mutex_exit(&ibdma->ms_hca_list_lock);
1175 		cmn_err(CE_NOTE, "ioc_udate slot not in use (%d)",
1176 		    ioc->ii_slot+1);
1177 		return (IBDMA_BAD_PARAM);
1178 	}
1179 
1180 	ASSERT(ioc->ii_srvcs != NULL);
1181 
1182 	kmem_free(ioc->ii_srvcs, ioc->ii_profile.ioc_service_entries *
1183 	    sizeof (ib_dm_srv_t));
1184 	ioc->ii_srvcs = kmem_zalloc(profile->ioc_service_entries  *
1185 	    sizeof (ib_dm_srv_t), KM_SLEEP);
1186 
1187 	bcopy(profile, &ioc->ii_profile, sizeof (ib_dm_ioc_ctrl_profile_t));
1188 	bcopy(services, ioc->ii_srvcs, sizeof (ib_dm_srv_t) *
1189 	    profile->ioc_service_entries);
1190 	/*
1191 	 * Update the profile copy with the I/O controller slot assigned.
1192 	 * The slot occupies the lower 8 biths of the vendor ID/slot 32bit
1193 	 * field.
1194 	 */
1195 	profile->ioc_vendorid |= h2b32(ioc->ii_slot);
1196 	ibdma_set_ioc_state(ioc->ii_hcap, ioc->ii_slot, IBDMA_IOC_PRESENT);
1197 	rw_exit(&ioc->ii_hcap->ih_iou_rwlock);
1198 	mutex_exit(&ibdma->ms_hca_list_lock);
1199 
1200 	return (IBDMA_SUCCESS);
1201 }
1202