xref: /illumos-gate/usr/src/uts/common/io/ib/mgt/ibmf/ibmf_ud_dest.c (revision 37e2cd25d56b334a2403f2540a0b0a1e6a40bcd1)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Should we maintain base lid for each port in ibmf_ci?
31  */
32 
33 /*
34  * This file implements the UD destination resource management in IBMF.
35  */
36 
37 #include <sys/ib/mgt/ibmf/ibmf_impl.h>
38 
39 extern int ibmf_trace_level;
40 extern ibmf_state_t *ibmf_statep;
41 static void ibmf_i_populate_ud_dest_list(ibmf_ci_t *cip, int kmflag);
42 
43 /*
44  * ibmf_i_init_ud_dest():
45  * Initialize a cache of UD destination structure used to send UD traffic.
46  * Also create a list of pre-allocated UD destination structures to
47  * satisfy requests for a UD destination structure and its associated
48  * address handle, from a thread in interrupt context. Threads in interrupt
49  * context are not allowed to allocated their own address handles.
50  */
51 void
52 ibmf_i_init_ud_dest(ibmf_ci_t *cip)
53 {
54 	IBMF_TRACE_1(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_ud_dest_start,
55 	    IBMF_TNF_TRACE, "", "ibmf_i_init_ud_dest() enter, cip = %p\n",
56 	    tnf_opaque, cip, cip);
57 
58 	/* initialize the UD dest list mutex */
59 	mutex_init(&cip->ci_ud_dest_list_mutex, NULL, MUTEX_DRIVER, NULL);
60 
61 	/* populate the UD dest list if possible */
62 	ibmf_i_pop_ud_dest_thread(cip);
63 
64 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_ud_dest_end,
65 	    IBMF_TNF_TRACE, "", "ibmf_i_init_ud_dest() exit\n");
66 }
67 
68 /*
69  * ibmf_i_fini_ud_dest():
70  * Free up the UD destination cache and the linked list.
71  */
72 void
73 ibmf_i_fini_ud_dest(ibmf_ci_t *cip)
74 {
75 	IBMF_TRACE_1(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_fini_ud_dest_start,
76 	    IBMF_TNF_TRACE, "", "ibmf_i_fini_ud_dest() enter, cip = %p\n",
77 	    tnf_opaque, cip, cip);
78 
79 	/* clean up the UD dest list */
80 	ibmf_i_clean_ud_dest_list(cip, B_TRUE);
81 
82 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_fini_ud_dest_end,
83 	    IBMF_TNF_TRACE, "", "ibmf_i_fini_ud_dest() exit\n");
84 }
85 
86 /*
87  * ibmf_i_get_ud_dest():
88  *	Get a UD destination structure from the list
89  */
90 ibmf_ud_dest_t *
91 ibmf_i_get_ud_dest(ibmf_ci_t *cip)
92 {
93 	ibmf_ud_dest_t		*ibmf_ud_dest;
94 
95 	IBMF_TRACE_1(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_get_ud_dest_start,
96 	    IBMF_TNF_TRACE, "", "ibmf_i_get_ud_dest() enter, cip = %p\n",
97 	    tnf_opaque, cip, cip);
98 
99 	mutex_enter(&cip->ci_ud_dest_list_mutex);
100 	ibmf_ud_dest = cip->ci_ud_dest_list_head;
101 	if (ibmf_ud_dest != NULL) {
102 		cip->ci_ud_dest_list_head = ibmf_ud_dest->ud_next;
103 		cip->ci_ud_dest_list_count--;
104 	}
105 	mutex_exit(&cip->ci_ud_dest_list_mutex);
106 
107 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_get_ud_dest_end,
108 	    IBMF_TNF_TRACE, "", "ibmf_i_get_ud_dest() exit\n");
109 	return (ibmf_ud_dest);
110 }
111 
112 /*
113  * ibmf_i_put_ud_dest():
114  *	Add a UD destination structure to the list
115  */
116 void
117 ibmf_i_put_ud_dest(ibmf_ci_t *cip, ibmf_ud_dest_t *ud_dest)
118 {
119 	IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_put_ud_dest_start,
120 	    IBMF_TNF_TRACE, "", "ibmf_i_put_ud_dest() enter, cip = %p, "
121 	    "ud_dest = %p\n", tnf_opaque, cip, cip,
122 	    tnf_opaque, ud_dest, ud_dest);
123 
124 	mutex_enter(&cip->ci_ud_dest_list_mutex);
125 	cip->ci_ud_dest_list_count++;
126 	ud_dest->ud_next = cip->ci_ud_dest_list_head;
127 	cip->ci_ud_dest_list_head = ud_dest;
128 	mutex_exit(&cip->ci_ud_dest_list_mutex);
129 
130 	IBMF_TRACE_1(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_put_ud_dest_end,
131 	    IBMF_TNF_TRACE, "", "ibmf_i_put_ud_dest() exit, cip = %p\n",
132 	    tnf_opaque, cip, cip);
133 }
134 
135 /*
136  * ibmf_i_populate_ud_dest_list():
137  * Maintain a list of IBMF UD destination structures to
138  * satisfy requests for a UD destination structure and its associated
139  * address handle, from a thread in interrupt context. Threads in interrupt
140  * context are not allowed to allocate their own address handles.
141  * Add to this list only if the number of entries in the list falls below
142  * IBMF_UD_DEST_LO_WATER_MARK. When adding to the list, add entries upto
143  * IBMF_UD_DEST_HI_WATER_MARK.
144  */
145 static void
146 ibmf_i_populate_ud_dest_list(ibmf_ci_t *cip, int kmflag)
147 {
148 	ibmf_ud_dest_t		*ibmf_ud_dest;
149 	uint32_t		count;
150 	ibt_status_t		status;
151 	ibt_ud_dest_flags_t	ud_dest_flags = IBT_UD_DEST_NO_FLAGS;
152 
153 	IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4,
154 	    ibmf_i_populate_ud_dest_list_start, IBMF_TNF_TRACE, "",
155 	    "ibmf_i_populate_ud_dest_list() enter, cip = %p, kmflag = %d \n",
156 	    tnf_opaque, cip, cip, tnf_int, kmflag, kmflag);
157 
158 	/* do not allow a population operation if non-blocking */
159 	if (kmflag == KM_NOSLEEP) {
160 		IBMF_TRACE_1(IBMF_TNF_DEBUG, DPRINT_L3,
161 		    ibmf_i_populate_ud_dest, IBMF_TNF_TRACE, "",
162 		    "ibmf_i_populate_ud_dest_list(): %s\n", tnf_string, msg,
163 		    "Skipping, called with non-blocking flag\n");
164 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
165 		    ibmf_i_populate_ud_dest_end, IBMF_TNF_TRACE, "",
166 		    "ibmf_i_populate_ud_dest_list() exit\n");
167 		/*
168 		 * Don't return a failure code here.
169 		 * If ibmf_i_ud_dest_alloc() returns NULL, the
170 		 * the resource allocation will fail
171 		 */
172 		return;
173 	}
174 
175 	mutex_enter(&cip->ci_ud_dest_list_mutex);
176 	count = cip->ci_ud_dest_list_count;
177 
178 	/* nothing to do if count is above the low water mark */
179 	if (count > IBMF_UD_DEST_LO_WATER_MARK) {
180 		mutex_exit(&cip->ci_ud_dest_list_mutex);
181 		IBMF_TRACE_1(IBMF_TNF_DEBUG, DPRINT_L3,
182 		    ibmf_i_populate_ud_dest, IBMF_TNF_TRACE, "",
183 		    "ibmf_i_populate_ud_dest_list(): %s\n", tnf_string, msg,
184 		    "Count not below low water mark\n");
185 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
186 		    ibmf_i_populate_ud_dest_end, IBMF_TNF_TRACE, "",
187 		    "ibmf_i_populate_ud_dest_list() exit\n");
188 		return;
189 	}
190 
191 	/* populate the pool upto the high water mark */
192 	while (count < IBMF_UD_DEST_HI_WATER_MARK) {
193 		ibt_adds_vect_t adds_vect;
194 
195 		ibmf_ud_dest = kmem_zalloc(sizeof (ibmf_ud_dest_t), kmflag);
196 		_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ibmf_ud_dest))
197 
198 		/* Call IBTF to allocate an address handle */
199 		bzero(&adds_vect, sizeof (adds_vect));
200 		adds_vect.av_port_num = 1;
201 		adds_vect.av_srate = IBT_SRATE_1X;	/* assume the minimum */
202 		mutex_exit(&cip->ci_ud_dest_list_mutex);
203 
204 		status = ibt_alloc_ah(cip->ci_ci_handle, ud_dest_flags,
205 		    cip->ci_pd, &adds_vect, &ibmf_ud_dest->ud_dest.ud_ah);
206 		if (status != IBT_SUCCESS) {
207 			kmem_free(ibmf_ud_dest, sizeof (ibmf_ud_dest_t));
208 			IBMF_TRACE_2(IBMF_TNF_NODEBUG, DPRINT_L1,
209 			    ibmf_i_populate_ud_dest_err, IBMF_TNF_ERROR, "",
210 			    "ibmf_i_populate_ud_dest_list(): %s, status = %d\n",
211 			    tnf_string, msg, "ibt alloc ah failed",
212 			    tnf_uint, ibt_status, status);
213 			IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
214 			    ibmf_i_populate_ud_dest_end, IBMF_TNF_TRACE, "",
215 			    "ibmf_i_populate_ud_dest_list() exit\n");
216 			return;
217 		}
218 
219 		/* Add the ud_dest to the list */
220 		mutex_enter(&cip->ci_ud_dest_list_mutex);
221 
222 		if (cip->ci_ud_dest_list_head != NULL)
223 			ibmf_ud_dest->ud_next = cip->ci_ud_dest_list_head;
224 		else
225 			ibmf_ud_dest->ud_next = NULL;
226 
227 		cip->ci_ud_dest_list_head = ibmf_ud_dest;
228 		cip->ci_ud_dest_list_count++;
229 
230 		/*
231 		 * Get the latest count since other threads may have
232 		 * added to the list as well.
233 		 */
234 		count = cip->ci_ud_dest_list_count;
235 
236 	}
237 
238 	mutex_exit(&cip->ci_ud_dest_list_mutex);
239 
240 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_populate_ud_dest_end,
241 	    IBMF_TNF_TRACE, "", "ibmf_i_populate_ud_dest_list() exit\n");
242 }
243 
244 /*
245  * ibmf_i_clean_ud_dest_list():
246  * Free up entries from the linked list of IBMF UD destination structures.
247  * If the "all" argument is B_TRUE, free up all the entries in the list.
248  * If the "all" argument is B_FALSE, free up entries to bring the total
249  * down to IBMF_UD_DEST_HI_WATER_MARK.
250  */
251 void
252 ibmf_i_clean_ud_dest_list(ibmf_ci_t *cip, boolean_t all)
253 {
254 	ibmf_ud_dest_t		*ibmf_ud_dest;
255 	ibt_ud_dest_t		*ud_dest;
256 	uint32_t		count;
257 	ibt_status_t		status;
258 
259 	IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_clean_ud_dest_start,
260 	    IBMF_TNF_TRACE, "", "ibmf_i_clean_ud_dest_list() enter, "
261 	    "cip = %p, all = %d\n", tnf_opaque, cip, cip,
262 	    tnf_uint, all, all);
263 
264 	mutex_enter(&cip->ci_ud_dest_list_mutex);
265 
266 	/* Determine the number of UD destination resources to free */
267 	if (all == B_TRUE) {
268 		count = cip->ci_ud_dest_list_count;
269 	} else if (cip->ci_ud_dest_list_count > IBMF_UD_DEST_HI_WATER_MARK) {
270 		count = cip->ci_ud_dest_list_count -
271 		    IBMF_UD_DEST_HI_WATER_MARK;
272 	} else
273 		count = 0;
274 
275 	while (count) {
276 		ibmf_ud_dest = cip->ci_ud_dest_list_head;
277 		ASSERT(ibmf_ud_dest != NULL);
278 		if (ibmf_ud_dest != NULL) {
279 			/* Remove ibmf_ud_dest from the list */
280 			cip->ci_ud_dest_list_head = ibmf_ud_dest->ud_next;
281 			cip->ci_ud_dest_list_count--;
282 			mutex_exit(&cip->ci_ud_dest_list_mutex);
283 
284 			ud_dest = &ibmf_ud_dest->ud_dest;
285 			status = ibt_free_ah(cip->ci_ci_handle, ud_dest->ud_ah);
286 			if (status != IBT_SUCCESS) {
287 				IBMF_TRACE_2(IBMF_TNF_NODEBUG, DPRINT_L1,
288 				    ibmf_i_clean_ud_dest_err, IBMF_TNF_ERROR,
289 				    "", "ibmf_i_clean_ud_dest_list(): %s, "
290 				    "status = %d\n", tnf_string, msg,
291 				    "ibt_free_ah failed", tnf_uint, ibt_status,
292 				    status);
293 				IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
294 				    ibmf_i_clean_ud_dest_end, IBMF_TNF_TRACE,
295 				    "", "ibmf_i_clean_ud_dest_list() exit\n");
296 				return;
297 			}
298 
299 			/* Free the ud_dest context */
300 			kmem_free(ibmf_ud_dest, sizeof (ibmf_ud_dest_t));
301 
302 			mutex_enter(&cip->ci_ud_dest_list_mutex);
303 		}
304 		/* Determine the number of UD destination resources to free */
305 		if (all == B_TRUE) {
306 			count = cip->ci_ud_dest_list_count;
307 		} else if (cip->ci_ud_dest_list_count >
308 		    IBMF_UD_DEST_HI_WATER_MARK) {
309 			count = cip->ci_ud_dest_list_count -
310 			    IBMF_UD_DEST_HI_WATER_MARK;
311 		} else
312 			count = 0;
313 	}
314 
315 	mutex_exit(&cip->ci_ud_dest_list_mutex);
316 
317 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_clean_ud_dest_end,
318 	    IBMF_TNF_TRACE, "", "ibmf_i_clean_ud_dest_list() exit\n");
319 }
320 
321 /*
322  * ibmf_i_alloc_ud_dest():
323  *	Allocate and set up a UD destination context
324  */
325 /*ARGSUSED*/
326 int
327 ibmf_i_alloc_ud_dest(ibmf_client_t *clientp, ibmf_msg_impl_t *msgimplp,
328     ibt_ud_dest_hdl_t *ud_dest_p, boolean_t block)
329 {
330 	ibmf_ci_t 		*cip;
331 	ibmf_addr_info_t	*addrp;
332 	ibt_status_t		status;
333 	ibt_adds_vect_t		adds_vec;
334 	ibt_ud_dest_t		*ud_dest;
335 	int			ibmf_status, ret;
336 
337 	IBMF_TRACE_4(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_alloc_ud_dest_start,
338 	    IBMF_TNF_TRACE, "", "ibmf_i_alloc_ud_dest_list() enter, "
339 	    "clientp = %p, msg = %p, ud_destp = %p, block = %d\n",
340 	    tnf_opaque, clientp, clientp, tnf_opaque, msg, msgimplp,
341 	    tnf_opaque, ud_dest_p, ud_dest_p, tnf_uint, block, block);
342 
343 	_NOTE(ASSUMING_PROTECTED(*ud_dest_p))
344 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ud_dest))
345 
346 	addrp = &msgimplp->im_local_addr;
347 	cip = clientp->ic_myci;
348 
349 	/*
350 	 * Dispatch a taskq to replenish the UD destination handle cache.
351 	 */
352 	mutex_enter(&cip->ci_ud_dest_list_mutex);
353 	if (cip->ci_ud_dest_list_count < IBMF_UD_DEST_LO_WATER_MARK) {
354 		ret = ibmf_ud_dest_tq_disp(cip);
355 		if (ret == 0) {
356 			IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L3,
357 			    ibmf_i_alloc_ud_dest_err, IBMF_TNF_ERROR, "",
358 			    "ibmf_i_alloc_ud_dest(): %s\n", tnf_string, msg,
359 			    "taskq dispatch of ud_dest population thread "
360 			    "failed");
361 		}
362 	}
363 	mutex_exit(&cip->ci_ud_dest_list_mutex);
364 
365 	/* initialize the address vector bases on global/local address */
366 	if (msgimplp->im_msg_flags & IBMF_MSG_FLAGS_GLOBAL_ADDRESS) {
367 		/* fill in the grh stuff as expected by ibt */
368 		adds_vec.av_flow = msgimplp->im_global_addr.ig_flow_label;
369 		adds_vec.av_send_grh = B_TRUE;
370 		adds_vec.av_tclass = msgimplp->im_global_addr.ig_tclass;
371 		adds_vec.av_hop = msgimplp->im_global_addr.ig_hop_limit;
372 		if (msgimplp->im_unsolicited == B_TRUE) {
373 			adds_vec.av_sgid =
374 			    msgimplp->im_global_addr.ig_recver_gid;
375 			adds_vec.av_dgid =
376 			    msgimplp->im_global_addr.ig_sender_gid;
377 		} else {
378 			adds_vec.av_sgid =
379 			    msgimplp->im_global_addr.ig_sender_gid;
380 			adds_vec.av_dgid =
381 			    msgimplp->im_global_addr.ig_recver_gid;
382 		}
383 	} else {
384 		adds_vec.av_send_grh = B_FALSE;
385 	}
386 
387 	/* common address vector initialization */
388 	adds_vec.av_dlid = addrp->ia_remote_lid;
389 	if ((clientp->ic_base_lid == 0) && (clientp->ic_qp->iq_qp_num != 0)) {
390 		/* Get the port's base LID */
391 		(void) ibt_get_port_state_byguid(
392 		    clientp->ic_client_info.ci_guid,
393 		    clientp->ic_client_info.port_num, NULL,
394 		    &clientp->ic_base_lid);
395 		if (clientp->ic_base_lid == 0) {
396 			IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
397 			    ibmf_i_alloc_ud_dest_err, IBMF_TNF_ERROR, "",
398 			    "ibmf_i_alloc_ud_dest(): %s\n", tnf_string, msg,
399 			    "base_lid is not defined, i.e., port is down");
400 			IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
401 			    ibmf_i_alloc_ud_dest_end, IBMF_TNF_TRACE, "",
402 			    "ibmf_i_alloc_ud_dest_list() exit\n");
403 			return (IBMF_BAD_PORT_STATE);
404 		}
405 	}
406 	adds_vec.av_src_path = addrp->ia_local_lid - clientp->ic_base_lid;
407 	adds_vec.av_srvl = addrp->ia_service_level;
408 	adds_vec.av_srate = IBT_SRATE_1X;
409 	adds_vec.av_port_num = clientp->ic_client_info.port_num;
410 
411 	ud_dest = *ud_dest_p;
412 
413 	/* If an IBT UD destination structure has not been allocated, do so */
414 	if (ud_dest == NULL) {
415 
416 		ibmf_ud_dest_t *ibmf_ud_dest;
417 
418 		/* Get a UD destination resource from the list */
419 		ibmf_ud_dest = ibmf_i_get_ud_dest(cip);
420 		if (ibmf_ud_dest == NULL) {
421 			IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
422 			    ibmf_i_alloc_ud_dest_err, IBMF_TNF_ERROR, "",
423 			    "ibmf_i_alloc_ud_dest(): %s\n",
424 			    tnf_string, msg, "No ud_dest available");
425 			IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
426 			    ibmf_i_alloc_ud_dest_end, IBMF_TNF_TRACE, "",
427 			    "ibmf_i_alloc_ud_dest_list() exit\n");
428 			return (IBMF_NO_RESOURCES);
429 		}
430 		ud_dest = &ibmf_ud_dest->ud_dest;
431 		msgimplp->im_ibmf_ud_dest = ibmf_ud_dest;
432 		ud_dest->ud_qkey = msgimplp->im_local_addr.ia_q_key;
433 		ud_dest->ud_dst_qpn = msgimplp->im_local_addr.ia_remote_qno;
434 		*ud_dest_p = ud_dest;
435 	} else {
436 		ud_dest->ud_qkey = msgimplp->im_local_addr.ia_q_key;
437 		ud_dest->ud_dst_qpn = msgimplp->im_local_addr.ia_remote_qno;
438 	}
439 
440 	/* modify the address handle with the address vector information */
441 	status = ibt_modify_ah(cip->ci_ci_handle, ud_dest->ud_ah, &adds_vec);
442 	if (status != IBT_SUCCESS)
443 		IBMF_TRACE_2(IBMF_TNF_NODEBUG, DPRINT_L1,
444 		    ibmf_i_alloc_ud_dest_err, IBMF_TNF_ERROR, "",
445 		    "ibmf_i_alloc_ud_dest(): %s, status = %d\n",
446 		    tnf_string, msg, "ibt alloc ah failed", tnf_uint,
447 		    ibt_status, status);
448 
449 	ibmf_status = ibmf_i_ibt_to_ibmf_status(status);
450 	if (ibmf_status == IBMF_SUCCESS) {
451 		mutex_enter(&clientp->ic_kstat_mutex);
452 		IBMF_ADD32_KSTATS(clientp, ud_dests_alloced, 1);
453 		mutex_exit(&clientp->ic_kstat_mutex);
454 	}
455 
456 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_alloc_ud_dest_end,
457 	    IBMF_TNF_TRACE, "", "ibmf_i_alloc_ud_dest() exit\n");
458 
459 	return (ibmf_status);
460 }
461 
462 /*
463  * ibmf_i_free_ud_dest():
464  *	Free up the UD destination context
465  */
466 void
467 ibmf_i_free_ud_dest(ibmf_client_t *clientp, ibmf_msg_impl_t *msgimplp)
468 {
469 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_free_ud_dest_start,
470 	    IBMF_TNF_TRACE, "", "ibmf_i_free_ud_dest() enter\n");
471 
472 	ibmf_i_put_ud_dest(clientp->ic_myci, msgimplp->im_ibmf_ud_dest);
473 
474 	/* Clear the UD dest pointers so a new UD dest may be allocated */
475 	mutex_enter(&msgimplp->im_mutex);
476 	msgimplp->im_ibmf_ud_dest = NULL;
477 	msgimplp->im_ud_dest = NULL;
478 	mutex_exit(&msgimplp->im_mutex);
479 
480 	mutex_enter(&clientp->ic_kstat_mutex);
481 	IBMF_SUB32_KSTATS(clientp, ud_dests_alloced, 1);
482 	mutex_exit(&clientp->ic_kstat_mutex);
483 
484 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_free_ud_dest_end,
485 	    IBMF_TNF_TRACE, "", "ibmf_i_free_ud_dest() exit\n");
486 
487 }
488 
489 /*
490  * ibmf_i_pop_ud_dest_thread()
491  *
492  * Wrapper function to call ibmf_i_populate_ud_dest_list() with
493  * the KM_SLEEP flag.
494  */
495 void
496 ibmf_i_pop_ud_dest_thread(void *argp)
497 {
498 	ibmf_ci_t *cip = (ibmf_ci_t *)argp;
499 
500 	ibmf_i_populate_ud_dest_list(cip, KM_SLEEP);
501 }
502 
503 /*
504  * ibmf_ud_dest_tq_disp()
505  *
506  * Wrapper for taskq dispatch of the function that populates
507  * the UD destination handle cache.
508  */
509 int
510 ibmf_ud_dest_tq_disp(ibmf_ci_t *cip)
511 {
512 	return (taskq_dispatch(ibmf_statep->ibmf_taskq,
513 	    ibmf_i_pop_ud_dest_thread, cip, TQ_NOSLEEP));
514 }
515