xref: /titanic_52/usr/src/uts/sun4v/io/vio_util.c (revision ac88567a7a5bb7f01cf22cf366bc9d6203e24d7a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/sysmacros.h>
29 #include <sys/errno.h>
30 #include <sys/kmem.h>
31 #include <sys/ksynch.h>
32 #include <sys/stream.h>
33 #include <sys/ddi.h>
34 #include <sys/sunddi.h>
35 #include <sys/vio_util.h>
36 
37 static int vio_pool_cleanup_retries = 10;	/* Max retries to free pool */
38 static int vio_pool_cleanup_delay = 10000;	/* 10ms */
39 
40 /*
41  * Create a pool of mblks from which future vio_allocb() requests
42  * will be serviced.
43  *
44  * NOTE: num_mblks has to non-zero and a power-of-2
45  *
46  * Returns
47  *	0 on success
48  *	EINVAL if num_mblks is zero or not a power of 2.
49  *	ENOSPC if the pool could not be created due to alloc failures.
50  */
51 int
52 vio_create_mblks(uint64_t num_mblks, size_t mblk_size, uint8_t *mblk_datap,
53     vio_mblk_pool_t **poolp)
54 {
55 	vio_mblk_pool_t		*vmplp;
56 	vio_mblk_t		*vmp;
57 	uint8_t			*datap;
58 	int			i;
59 	int			rv;
60 
61 	if (!(num_mblks) || (!ISP2(num_mblks))) {
62 		*poolp = 0;
63 		return (EINVAL);
64 	}
65 
66 	vmplp = kmem_zalloc(sizeof (*vmplp), KM_SLEEP);
67 	vmplp->quelen = num_mblks;
68 	vmplp->quemask = num_mblks - 1; /* expects quelen is power-of-2 */
69 	vmplp->mblk_size = mblk_size;
70 
71 	mutex_init(&vmplp->hlock, NULL, MUTEX_DRIVER,
72 	    DDI_INTR_PRI(DDI_INTR_SOFTPRI_DEFAULT));
73 	mutex_init(&vmplp->tlock, NULL, MUTEX_DRIVER,
74 	    DDI_INTR_PRI(DDI_INTR_SOFTPRI_DEFAULT));
75 
76 	vmplp->basep = kmem_zalloc(num_mblks * sizeof (vio_mblk_t), KM_SLEEP);
77 	if (mblk_datap == NULL) {
78 		vmplp->datap = kmem_zalloc(num_mblks * mblk_size, KM_SLEEP);
79 	} else {
80 		vmplp->datap = mblk_datap;
81 		vmplp->flag |= VMPL_FLAG_CLIENT_DATA;
82 	}
83 	vmplp->nextp = NULL;
84 
85 	/* create a queue of pointers to free vio_mblk_t's */
86 	vmplp->quep = kmem_zalloc(vmplp->quelen *
87 	    sizeof (vio_mblk_t *), KM_SLEEP);
88 	vmplp->head = 0;
89 	vmplp->tail =  0;
90 
91 	for (i = 0, datap = vmplp->datap; i < num_mblks; i++) {
92 
93 		vmp = &(vmplp->basep[i]);
94 		vmp->vmplp = vmplp;
95 		vmp->datap = datap;
96 		vmp->reclaim.free_func = vio_freeb;
97 		vmp->reclaim.free_arg = (caddr_t)vmp;
98 		vmp->mp = desballoc(vmp->datap, mblk_size, BPRI_MED,
99 		    &vmp->reclaim);
100 
101 		if (vmp->mp == NULL) {
102 			/* reset tail */
103 			vmplp->tail = vmplp->head;
104 
105 			/*
106 			 * vio_destroy_mblks() frees mblks that have been
107 			 * allocated so far and then destroys the pool.
108 			 */
109 			rv = vio_destroy_mblks(vmplp);
110 			ASSERT(rv == 0);
111 
112 			*poolp = NULL;
113 			return (ENOSPC);
114 		}
115 
116 		if ((vmplp->flag & VMPL_FLAG_CLIENT_DATA) != 0) {
117 			vmp->index = i;
118 			vmp->state = VIO_MBLK_FREE;
119 		}
120 
121 		/* put this vmp on the free stack */
122 		vmplp->quep[vmplp->tail] = vmp;
123 		vmplp->tail = (vmplp->tail + 1) & vmplp->quemask;
124 
125 		datap += mblk_size;
126 	}
127 
128 	*poolp = vmplp;
129 	return (0);
130 }
131 
132 /*
133  * Destroy the pool of mblks. This can only succeed when
134  * all allocated mblks have been returned to the pool.
135  *
136  * It is up to the caller to ensure that no further mblks are
137  * requested from the pool after destroy has been invoked.
138  *
139  * Returns 0 on success, EINVAL if handle is invalid, or
140  * EBUSY if not all mblks reclaimed yet.
141  */
142 int
143 vio_destroy_mblks(vio_mblk_pool_t *vmplp)
144 {
145 	uint64_t	i;
146 	uint64_t	num_mblks;
147 	vio_mblk_t	*vmp;
148 	int		pool_cleanup_retries = 0;
149 
150 
151 	if (vmplp == NULL)
152 		return (EINVAL);
153 
154 	/*
155 	 * We can only destroy the pool once all the mblks have
156 	 * been reclaimed.
157 	 */
158 	do {
159 		if (vmplp->head == vmplp->tail) {
160 			break;
161 		}
162 
163 		/* some mblks still in use */
164 		drv_usecwait(vio_pool_cleanup_delay);
165 	} while (++pool_cleanup_retries < vio_pool_cleanup_retries);
166 
167 	if (vmplp->head != vmplp->tail) {
168 		return (EBUSY);
169 	}
170 
171 	num_mblks = vmplp->quelen;
172 
173 	/*
174 	 * Set pool flag to tell vio_freeb() which is invoked from freeb(),
175 	 * that it is being called in the context of vio_destroy_mblks().
176 	 * This results in freeing only mblk_t and dblk_t structures for
177 	 * each mp. The associated data buffers are freed below as one big
178 	 * chunk through kmem_free(vmplp->datap).
179 	 */
180 	vmplp->flag |= VMPL_FLAG_DESTROYING;
181 	for (i = 0; i < num_mblks; i++) {
182 		vmp = &(vmplp->basep[i]);
183 		/*
184 		 * It is possible that mblks have been allocated only upto
185 		 * a certain index and the entire quelen has not been
186 		 * initialized. This might happen due to desballoc() failure
187 		 * while creating the pool. The below check handles this
188 		 * condition.
189 		 */
190 		if (vmp->mp != NULL)
191 			freeb(vmp->mp);
192 	}
193 	vmplp->flag &= ~(VMPL_FLAG_DESTROYING);
194 
195 	kmem_free(vmplp->basep, num_mblks * sizeof (vio_mblk_t));
196 	if ((vmplp->flag & VMPL_FLAG_CLIENT_DATA) == 0) {
197 		kmem_free(vmplp->datap, num_mblks * vmplp->mblk_size);
198 	}
199 	kmem_free(vmplp->quep, num_mblks * sizeof (vio_mblk_t *));
200 
201 	mutex_destroy(&vmplp->hlock);
202 	mutex_destroy(&vmplp->tlock);
203 
204 	kmem_free(vmplp, sizeof (*vmplp));
205 
206 	return (0);
207 }
208 
209 /*
210  * Allocate a vio_mblk from the free pool if one is available.
211  * Otherwise returns NULL.
212  */
213 vio_mblk_t *
214 vio_allocb(vio_mblk_pool_t *vmplp)
215 {
216 	vio_mblk_t	*vmp = NULL;
217 	uint32_t	head;
218 
219 	mutex_enter(&vmplp->hlock);
220 	head = (vmplp->head + 1) & vmplp->quemask;
221 	if (head != vmplp->tail) {
222 		/* we have free mblks */
223 		vmp = vmplp->quep[vmplp->head];
224 		vmplp->head = head;
225 		ASSERT(vmp->state == VIO_MBLK_FREE);
226 		vmp->state = VIO_MBLK_BOUND;
227 	}
228 	mutex_exit(&vmplp->hlock);
229 
230 	return (vmp);
231 }
232 
233 /*
234  * Return a mblk to the free pool. Invoked when the upper IP
235  * layers do freemsg() etc on the mblk they were passed.
236  */
237 void
238 vio_freeb(void *arg)
239 {
240 	vio_mblk_t	*vmp = (vio_mblk_t *)arg;
241 	vio_mblk_pool_t	*vmplp = vmp->vmplp;
242 
243 	if (vmplp->flag & VMPL_FLAG_DESTROYING) {
244 		/*
245 		 * This flag indicates that freeb() is being called from
246 		 * vio_destroy_mblks().
247 		 * We don't need to alloc a new mblk_t/dblk_t pair for
248 		 * this data buffer, return from here and the data buffer
249 		 * itself will be freed in vio_destroy_mblks().
250 		 */
251 		return;
252 	}
253 
254 	vmp->mp = desballoc(vmp->datap, vmplp->mblk_size,
255 	    BPRI_MED, &vmp->reclaim);
256 	vmp->state = VIO_MBLK_FREE;
257 
258 	mutex_enter(&vmplp->tlock);
259 	vmplp->quep[vmplp->tail] = vmp;
260 	vmplp->tail = (vmplp->tail + 1) & vmplp->quemask;
261 	mutex_exit(&vmplp->tlock);
262 }
263 
264 
265 /*
266  * This function searches the given mblk pool for mblks that are in the
267  * BOUND state and moves them to the FREE state. Note that only clients that
268  * are operating in RxDringData mode use this function. This allows such
269  * clients to reclaim buffers that are provided to the peer as shared memory,
270  * before calling vio_destroy_mblks(). We don't need this in other cases
271  * as the buffer is locally managed.
272  */
273 void
274 vio_clobber_pool(vio_mblk_pool_t *vmplp)
275 {
276 	uint64_t	num_mblks = vmplp->quelen;
277 	uint64_t	i;
278 	vio_mblk_t	*vmp;
279 
280 	mutex_enter(&vmplp->hlock);
281 	mutex_enter(&vmplp->tlock);
282 	for (i = 0; i < num_mblks; i++) {
283 		vmp = &(vmplp->basep[i]);
284 		if ((vmp->state & VIO_MBLK_BOUND) != 0) {
285 			/* put this vmp on the free stack */
286 			vmp->state = VIO_MBLK_FREE;
287 			ASSERT(vmplp->tail != vmplp->head);
288 			vmplp->quep[vmplp->tail] = vmp;
289 			vmplp->tail = (vmplp->tail + 1) & vmplp->quemask;
290 		}
291 	}
292 	mutex_exit(&vmplp->tlock);
293 	mutex_exit(&vmplp->hlock);
294 }
295 
296 /*
297  * Create a multiple pools of mblks from which future vio_allocb()
298  * or vio_multipool_allocb() requests will be serviced.
299  *
300  * Arguments:
301  *	vmultip -- A pointer to vio_multi_pool_t structure.
302  *	num_pools -- Number of the pools.
303  *	... -- Variable arguments consisting a list of buffer sizes for
304  *		each pool and list of number of buffers for each pool.
305  *
306  * NOTE: The restrictions of vio_create_mblks() apply to this interface also.
307  *
308  * Returns 0 on success or an error returned by vio_create_mblks().
309  */
310 int
311 vio_init_multipools(vio_multi_pool_t *vmultip, int num_pools, ...)
312 {
313 	int		i;
314 	int		status;
315 	char		*tbuf;
316 	va_list		vap;
317 	vio_mblk_pool_t *fvmp = NULL;
318 
319 	/*
320 	 * Allocate memory for all of the following in one allocation.
321 	 * 	bufsz_tbl -- sizeof (uint32_t) * num_pools
322 	 * 	nbuf_tbl  -- sizeof (uint32_t) * num_pools
323 	 *	vmpp	  -- sizeof (vio_mblk_pool_t *) * numpools
324 	 */
325 	vmultip->tbsz = (sizeof (uint32_t) * num_pools) +
326 	    (sizeof (uint32_t) * num_pools) +
327 	    (sizeof (vio_mblk_pool_t *) * num_pools);
328 	tbuf = kmem_zalloc(vmultip->tbsz, KM_SLEEP);
329 	vmultip->bufsz_tbl = (uint32_t *)tbuf;
330 	vmultip->nbuf_tbl = (uint32_t *)(tbuf +
331 	    (sizeof (uint32_t) * num_pools));
332 	vmultip->vmpp = (vio_mblk_pool_t **)(tbuf +
333 	    (sizeof (uint32_t) * num_pools * 2));
334 	vmultip->num_pools = num_pools;
335 
336 	/* initialize the array first */
337 	va_start(vap, num_pools);
338 	for (i = 0; i < num_pools; i++) {
339 		vmultip->bufsz_tbl[i] = va_arg(vap, uint32_t);
340 	}
341 	for (i = 0; i < num_pools; i++) {
342 		vmultip->nbuf_tbl[i] = va_arg(vap, uint32_t);
343 	}
344 	va_end(vap);
345 
346 	for (i = 0; i < vmultip->num_pools; i++) {
347 		status = vio_create_mblks(vmultip->nbuf_tbl[i],
348 		    vmultip->bufsz_tbl[i], NULL, &vmultip->vmpp[i]);
349 		if (status != 0) {
350 			vio_destroy_multipools(vmultip, &fvmp);
351 			/* We expect to free the pools without failure here */
352 			ASSERT(fvmp == NULL);
353 			return (status);
354 		}
355 	}
356 	return (0);
357 }
358 
359 /*
360  * Destroy the multiple pools of mblks. This can only succeed when
361  * all allocated mblks have been returned to the pool.
362  *
363  * If a pool of mblks couldn't be destroyed, then the failed vio_mblk_pool_t
364  * pointers are returned via th fvmp list. Its the caller's
365  * responsibility to check this list and free them later at an appropriate
366  * time with vio_destroy_mblks().
367  *
368  * Arguments:
369  *	vmultip -- A pointer to vio_multi_pool_t structure.
370  *	fvmp -- A list in which the pools that couldn't be destroyed are
371  *		returned.
372  */
373 void
374 vio_destroy_multipools(vio_multi_pool_t *vmultip, vio_mblk_pool_t **fvmp)
375 {
376 	int i;
377 	vio_mblk_pool_t *vmp;
378 
379 	for (i = 0; i < vmultip->num_pools; i++) {
380 		if ((vmp = vmultip->vmpp[i]) != NULL) {
381 			if (vio_destroy_mblks(vmp)) {
382 				/*
383 				 * if we cannot reclaim all mblks, then
384 				 * return the pool in the failed vmp
385 				 * list(fvmp).
386 				 */
387 				vmp->nextp =  *fvmp;
388 				*fvmp = vmp;
389 			}
390 		}
391 	}
392 	if (vmultip->tbsz != 0)
393 		kmem_free(vmultip->bufsz_tbl, vmultip->tbsz);
394 	vmultip->bufsz_tbl = NULL;
395 	vmultip->nbuf_tbl = NULL;
396 	vmultip->vmpp = NULL;
397 	vmultip->num_pools = 0;
398 	vmultip->tbsz = 0;
399 }
400 
401 
402 /*
403  * Allocate an vio_mblk from one of the free pools, but tries the pool that
404  * best fits size requested first.
405  */
406 vio_mblk_t *
407 vio_multipool_allocb(vio_multi_pool_t *vmultip, size_t size)
408 {
409 	int i;
410 	vio_mblk_t *vmp = NULL;
411 
412 	/* Try allocating any size that fits */
413 	for (i = 0; i < vmultip->num_pools; i++) {
414 		if (size > vmultip->bufsz_tbl[i]) {
415 			continue;
416 		}
417 		vmp = vio_allocb(vmultip->vmpp[i]);
418 		if (vmp != NULL) {
419 			break;
420 		}
421 	}
422 	return (vmp);
423 }
424 
425 /*
426  * -----------------------------------------------------------------------------
427  * LDoms versioning functions
428  *
429  * Future work: the version negotiating code in the various VIO drivers
430  * could be made common and placed here.
431  */
432 
433 /*
434  * Description:
435  *	This function checks to see if the supplied version tuple (major,minor)
436  *	is supported by the version 'ver', negotiated during the handshake
437  *	between the client and the server (ver).
438  *
439  * Assumption:
440  *	This function assumes that backward compatability is not broken in
441  *	newer minor versions of the protocol (e.g. v1.5 & v1.1 support v1.0)
442  *
443  * Return Value:
444  *	B_TRUE		- The (major,minor) version is supported
445  *	B_FALSE		- not supported
446  */
447 boolean_t
448 vio_ver_is_supported(vio_ver_t ver, uint16_t major, uint16_t minor)
449 {
450 	if ((ver.major == major) && (ver.minor >= minor))
451 		return (B_TRUE);
452 
453 	return (B_FALSE);
454 }
455