xref: /freebsd/contrib/sendmail/libsm/rpool.c (revision e92d3f3ffe83a6ed7eaafac70da9cf4fafe13243)
1 /*
2  * Copyright (c) 2000-2003 Sendmail, Inc. and its suppliers.
3  *	All rights reserved.
4  *
5  * By using this file, you agree to the terms and conditions set
6  * forth in the LICENSE file which can be found at the top level of
7  * the sendmail distribution.
8  */
9 
10 #include <sm/gen.h>
11 SM_RCSID("@(#)$Id: rpool.c,v 1.27 2003/10/09 17:49:47 ca Exp $")
12 
13 /*
14 **  resource pools
15 **  For documentation, see rpool.html
16 */
17 
18 #include <sm/exc.h>
19 #include <sm/heap.h>
20 #include <sm/rpool.h>
21 #include <sm/varargs.h>
22 #include <sm/conf.h>
23 #if _FFR_PERF_RPOOL
24 # include <syslog.h>
25 #endif /* _FFR_PERF_RPOOL */
26 
27 const char SmRpoolMagic[] = "sm_rpool";
28 
29 typedef union
30 {
31 	SM_POOLLINK_T	link;
32 	char		align[SM_ALIGN_SIZE];
33 } SM_POOLHDR_T;
34 
35 /*
36 **  Tune this later
37 */
38 
39 #define POOLSIZE		4096
40 #define BIG_OBJECT_RATIO	10
41 
42 /*
43 **  SM_RPOOL_ALLOCBLOCK_X -- allocate a new block for an rpool.
44 **
45 **	Parameters:
46 **		rpool -- rpool to which the block should be added.
47 **		size -- size of block.
48 **
49 **	Returns:
50 **		Pointer to block.
51 **
52 **	Exceptions:
53 **		F:sm_heap -- out of memory
54 */
55 
56 static char *
57 sm_rpool_allocblock_x(rpool, size)
58 	SM_RPOOL_T *rpool;
59 	size_t size;
60 {
61 	SM_POOLLINK_T *p;
62 
63 	p = sm_malloc_x(sizeof(SM_POOLHDR_T) + size);
64 	p->sm_pnext = rpool->sm_pools;
65 	rpool->sm_pools = p;
66 	return (char*) p + sizeof(SM_POOLHDR_T);
67 }
68 
69 /*
70 **  SM_RPOOL_ALLOCBLOCK -- allocate a new block for an rpool.
71 **
72 **	Parameters:
73 **		rpool -- rpool to which the block should be added.
74 **		size -- size of block.
75 **
76 **	Returns:
77 **		Pointer to block, NULL on failure.
78 */
79 
80 static char *
81 sm_rpool_allocblock(rpool, size)
82 	SM_RPOOL_T *rpool;
83 	size_t size;
84 {
85 	SM_POOLLINK_T *p;
86 
87 	p = sm_malloc(sizeof(SM_POOLHDR_T) + size);
88 	if (p == NULL)
89 		return NULL;
90 	p->sm_pnext = rpool->sm_pools;
91 	rpool->sm_pools = p;
92 	return (char*) p + sizeof(SM_POOLHDR_T);
93 }
94 
95 /*
96 **  SM_RPOOL_MALLOC_TAGGED_X -- allocate memory from rpool
97 **
98 **	Parameters:
99 **		rpool -- rpool from which memory should be allocated;
100 **			can be NULL, use sm_malloc() then.
101 **		size -- size of block.
102 **		file -- filename.
103 **		line -- line number in file.
104 **		group -- heap group for debugging.
105 **
106 **	Returns:
107 **		Pointer to block.
108 **
109 **	Exceptions:
110 **		F:sm_heap -- out of memory
111 **
112 **	Notice: XXX
113 **		if size == 0 and the rpool is new (no memory
114 **		allocated yet) NULL is returned!
115 **		We could solve this by
116 **		- wasting 1 byte (size < avail)
117 **		- checking for rpool->sm_poolptr != NULL
118 **		- not asking for 0 sized buffer
119 */
120 
121 void *
122 #if SM_HEAP_CHECK
123 sm_rpool_malloc_tagged_x(rpool, size, file, line, group)
124 	SM_RPOOL_T *rpool;
125 	size_t size;
126 	char *file;
127 	int line;
128 	int group;
129 #else /* SM_HEAP_CHECK */
130 sm_rpool_malloc_x(rpool, size)
131 	SM_RPOOL_T *rpool;
132 	size_t size;
133 #endif /* SM_HEAP_CHECK */
134 {
135 	char *ptr;
136 
137 	if (rpool == NULL)
138 		return sm_malloc_tagged_x(size, file, line, group);
139 
140 	/* Ensure that size is properly aligned. */
141 	if (size & SM_ALIGN_BITS)
142 		size = (size & ~SM_ALIGN_BITS) + SM_ALIGN_SIZE;
143 
144 	/* The common case.  This is optimized for speed. */
145 	if (size <= rpool->sm_poolavail)
146 	{
147 		ptr = rpool->sm_poolptr;
148 		rpool->sm_poolptr += size;
149 		rpool->sm_poolavail -= size;
150 		return ptr;
151 	}
152 
153 	/*
154 	**  The slow case: we need to call malloc.
155 	**  The SM_REQUIRE assertion is deferred until now, for speed.
156 	**  That's okay: we set rpool->sm_poolavail to 0 when we free an rpool,
157 	**  so the common case code won't be triggered on a dangling pointer.
158 	*/
159 
160 	SM_REQUIRE(rpool->sm_magic == SmRpoolMagic);
161 
162 	/*
163 	**  If size > sm_poolsize, then malloc a new block especially for
164 	**  this request.  Future requests will be allocated from the
165 	**  current pool.
166 	**
167 	**  What if the current pool is mostly unallocated, and the current
168 	**  request is larger than the available space, but < sm_poolsize?
169 	**  If we discard the current pool, and start allocating from a new
170 	**  pool, then we will be wasting a lot of space.  For this reason,
171 	**  we malloc a block just for the current request if size >
172 	**  sm_bigobjectsize, where sm_bigobjectsize <= sm_poolsize.
173 	**  Thus, the most space that we will waste at the end of a pool
174 	**  is sm_bigobjectsize - 1.
175 	*/
176 
177 	if (size > rpool->sm_bigobjectsize)
178 	{
179 #if _FFR_PERF_RPOOL
180 		++rpool->sm_nbigblocks;
181 #endif /* _FFR_PERF_RPOOL */
182 		return sm_rpool_allocblock_x(rpool, size);
183 	}
184 	SM_ASSERT(rpool->sm_bigobjectsize <= rpool->sm_poolsize);
185 	ptr = sm_rpool_allocblock_x(rpool, rpool->sm_poolsize);
186 	rpool->sm_poolptr = ptr + size;
187 	rpool->sm_poolavail = rpool->sm_poolsize - size;
188 #if _FFR_PERF_RPOOL
189 	++rpool->sm_npools;
190 #endif /* _FFR_PERF_RPOOL */
191 	return ptr;
192 }
193 
194 /*
195 **  SM_RPOOL_MALLOC_TAGGED -- allocate memory from rpool
196 **
197 **	Parameters:
198 **		rpool -- rpool from which memory should be allocated;
199 **			can be NULL, use sm_malloc() then.
200 **		size -- size of block.
201 **		file -- filename.
202 **		line -- line number in file.
203 **		group -- heap group for debugging.
204 **
205 **	Returns:
206 **		Pointer to block, NULL on failure.
207 **
208 **	Notice: XXX
209 **		if size == 0 and the rpool is new (no memory
210 **		allocated yet) NULL is returned!
211 **		We could solve this by
212 **		- wasting 1 byte (size < avail)
213 **		- checking for rpool->sm_poolptr != NULL
214 **		- not asking for 0 sized buffer
215 */
216 
217 void *
218 #if SM_HEAP_CHECK
219 sm_rpool_malloc_tagged(rpool, size, file, line, group)
220 	SM_RPOOL_T *rpool;
221 	size_t size;
222 	char *file;
223 	int line;
224 	int group;
225 #else /* SM_HEAP_CHECK */
226 sm_rpool_malloc(rpool, size)
227 	SM_RPOOL_T *rpool;
228 	size_t size;
229 #endif /* SM_HEAP_CHECK */
230 {
231 	char *ptr;
232 
233 	if (rpool == NULL)
234 		return sm_malloc_tagged(size, file, line, group);
235 
236 	/* Ensure that size is properly aligned. */
237 	if (size & SM_ALIGN_BITS)
238 		size = (size & ~SM_ALIGN_BITS) + SM_ALIGN_SIZE;
239 
240 	/* The common case.  This is optimized for speed. */
241 	if (size <= rpool->sm_poolavail)
242 	{
243 		ptr = rpool->sm_poolptr;
244 		rpool->sm_poolptr += size;
245 		rpool->sm_poolavail -= size;
246 		return ptr;
247 	}
248 
249 	/*
250 	**  The slow case: we need to call malloc.
251 	**  The SM_REQUIRE assertion is deferred until now, for speed.
252 	**  That's okay: we set rpool->sm_poolavail to 0 when we free an rpool,
253 	**  so the common case code won't be triggered on a dangling pointer.
254 	*/
255 
256 	SM_REQUIRE(rpool->sm_magic == SmRpoolMagic);
257 
258 	/*
259 	**  If size > sm_poolsize, then malloc a new block especially for
260 	**  this request.  Future requests will be allocated from the
261 	**  current pool.
262 	**
263 	**  What if the current pool is mostly unallocated, and the current
264 	**  request is larger than the available space, but < sm_poolsize?
265 	**  If we discard the current pool, and start allocating from a new
266 	**  pool, then we will be wasting a lot of space.  For this reason,
267 	**  we malloc a block just for the current request if size >
268 	**  sm_bigobjectsize, where sm_bigobjectsize <= sm_poolsize.
269 	**  Thus, the most space that we will waste at the end of a pool
270 	**  is sm_bigobjectsize - 1.
271 	*/
272 
273 	if (size > rpool->sm_bigobjectsize)
274 	{
275 #if _FFR_PERF_RPOOL
276 		++rpool->sm_nbigblocks;
277 #endif /* _FFR_PERF_RPOOL */
278 		return sm_rpool_allocblock(rpool, size);
279 	}
280 	SM_ASSERT(rpool->sm_bigobjectsize <= rpool->sm_poolsize);
281 	ptr = sm_rpool_allocblock(rpool, rpool->sm_poolsize);
282 	if (ptr == NULL)
283 		return NULL;
284 	rpool->sm_poolptr = ptr + size;
285 	rpool->sm_poolavail = rpool->sm_poolsize - size;
286 #if _FFR_PERF_RPOOL
287 	++rpool->sm_npools;
288 #endif /* _FFR_PERF_RPOOL */
289 	return ptr;
290 }
291 
292 /*
293 **  SM_RPOOL_NEW_X -- create a new rpool.
294 **
295 **	Parameters:
296 **		parent -- pointer to parent rpool, can be NULL.
297 **
298 **	Returns:
299 **		Pointer to new rpool.
300 */
301 
302 SM_RPOOL_T *
303 sm_rpool_new_x(parent)
304 	SM_RPOOL_T *parent;
305 {
306 	SM_RPOOL_T *rpool;
307 
308 	rpool = sm_malloc_x(sizeof(SM_RPOOL_T));
309 	if (parent == NULL)
310 		rpool->sm_parentlink = NULL;
311 	else
312 	{
313 		SM_TRY
314 			rpool->sm_parentlink = sm_rpool_attach_x(parent,
315 					(SM_RPOOL_RFREE_T) sm_rpool_free,
316 					(void *) rpool);
317 		SM_EXCEPT(exc, "*")
318 			sm_free(rpool);
319 			sm_exc_raise_x(exc);
320 		SM_END_TRY
321 	}
322 	rpool->sm_magic = SmRpoolMagic;
323 
324 	rpool->sm_poolsize = POOLSIZE - sizeof(SM_POOLHDR_T);
325 	rpool->sm_bigobjectsize = rpool->sm_poolsize / BIG_OBJECT_RATIO;
326 	rpool->sm_poolptr = NULL;
327 	rpool->sm_poolavail = 0;
328 	rpool->sm_pools = NULL;
329 
330 	rpool->sm_rptr = NULL;
331 	rpool->sm_ravail = 0;
332 	rpool->sm_rlists = NULL;
333 #if _FFR_PERF_RPOOL
334 	rpool->sm_nbigblocks = 0;
335 	rpool->sm_npools = 0;
336 #endif /* _FFR_PERF_RPOOL */
337 
338 	return rpool;
339 }
340 
341 /*
342 **  SM_RPOOL_SETSIZES -- set sizes for rpool.
343 **
344 **	Parameters:
345 **		poolsize -- size of a single rpool block.
346 **		bigobjectsize -- if this size is exceeded, an individual
347 **			block is allocated (must be less or equal poolsize).
348 **
349 **	Returns:
350 **		none.
351 */
352 
353 void
354 sm_rpool_setsizes(rpool, poolsize, bigobjectsize)
355 	SM_RPOOL_T *rpool;
356 	size_t poolsize;
357 	size_t bigobjectsize;
358 {
359 	SM_REQUIRE(poolsize >= bigobjectsize);
360 	if (poolsize == 0)
361 		poolsize = POOLSIZE - sizeof(SM_POOLHDR_T);
362 	if (bigobjectsize == 0)
363 		bigobjectsize = poolsize / BIG_OBJECT_RATIO;
364 	rpool->sm_poolsize = poolsize;
365 	rpool->sm_bigobjectsize = bigobjectsize;
366 }
367 
368 /*
369 **  SM_RPOOL_FREE -- free an rpool and release all of its resources.
370 **
371 **	Parameters:
372 **		rpool -- rpool to free.
373 **
374 **	Returns:
375 **		none.
376 */
377 
378 void
379 sm_rpool_free(rpool)
380 	SM_RPOOL_T *rpool;
381 {
382 	SM_RLIST_T *rl, *rnext;
383 	SM_RESOURCE_T *r, *rmax;
384 	SM_POOLLINK_T *pp, *pnext;
385 
386 	if (rpool == NULL)
387 		return;
388 
389 	/*
390 	**  It's important to free the resources before the memory pools,
391 	**  because the resource free functions might modify the contents
392 	**  of the memory pools.
393 	*/
394 
395 	rl = rpool->sm_rlists;
396 	if (rl != NULL)
397 	{
398 		rmax = rpool->sm_rptr;
399 		for (;;)
400 		{
401 			for (r = rl->sm_rvec; r < rmax; ++r)
402 			{
403 				if (r->sm_rfree != NULL)
404 					r->sm_rfree(r->sm_rcontext);
405 			}
406 			rnext = rl->sm_rnext;
407 			sm_free(rl);
408 			if (rnext == NULL)
409 				break;
410 			rl = rnext;
411 			rmax = &rl->sm_rvec[SM_RLIST_MAX];
412 		}
413 	}
414 
415 	/*
416 	**  Now free the memory pools.
417 	*/
418 
419 	for (pp = rpool->sm_pools; pp != NULL; pp = pnext)
420 	{
421 		pnext = pp->sm_pnext;
422 		sm_free(pp);
423 	}
424 
425 	/*
426 	**  Disconnect rpool from its parent.
427 	*/
428 
429 	if (rpool->sm_parentlink != NULL)
430 		*rpool->sm_parentlink = NULL;
431 
432 	/*
433 	**  Setting these fields to zero means that any future to attempt
434 	**  to use the rpool after it is freed will cause an assertion failure.
435 	*/
436 
437 	rpool->sm_magic = NULL;
438 	rpool->sm_poolavail = 0;
439 	rpool->sm_ravail = 0;
440 
441 #if _FFR_PERF_RPOOL
442 	if (rpool->sm_nbigblocks > 0 || rpool->sm_npools > 1)
443 		syslog(LOG_NOTICE,
444 			"perf: rpool=%lx, sm_nbigblocks=%d, sm_npools=%d",
445 			(long) rpool, rpool->sm_nbigblocks, rpool->sm_npools);
446 	rpool->sm_nbigblocks = 0;
447 	rpool->sm_npools = 0;
448 #endif /* _FFR_PERF_RPOOL */
449 	sm_free(rpool);
450 }
451 
452 /*
453 **  SM_RPOOL_ATTACH_X -- attach a resource to an rpool.
454 **
455 **	Parameters:
456 **		rpool -- rpool to which resource should be attached.
457 **		rfree -- function to call when rpool is freed.
458 **		rcontext -- argument for function to call when rpool is freed.
459 **
460 **	Returns:
461 **		Pointer to allocated function.
462 **
463 **	Exceptions:
464 **		F:sm_heap -- out of memory
465 */
466 
467 SM_RPOOL_ATTACH_T
468 sm_rpool_attach_x(rpool, rfree, rcontext)
469 	SM_RPOOL_T *rpool;
470 	SM_RPOOL_RFREE_T rfree;
471 	void *rcontext;
472 {
473 	SM_RLIST_T *rl;
474 	SM_RPOOL_ATTACH_T a;
475 
476 	SM_REQUIRE_ISA(rpool, SmRpoolMagic);
477 
478 	if (rpool->sm_ravail == 0)
479 	{
480 		rl = sm_malloc_x(sizeof(SM_RLIST_T));
481 		rl->sm_rnext = rpool->sm_rlists;
482 		rpool->sm_rlists = rl;
483 		rpool->sm_rptr = rl->sm_rvec;
484 		rpool->sm_ravail = SM_RLIST_MAX;
485 	}
486 
487 	a = &rpool->sm_rptr->sm_rfree;
488 	rpool->sm_rptr->sm_rfree = rfree;
489 	rpool->sm_rptr->sm_rcontext = rcontext;
490 	++rpool->sm_rptr;
491 	--rpool->sm_ravail;
492 	return a;
493 }
494 
495 #if DO_NOT_USE_STRCPY
496 /*
497 **  SM_RPOOL_STRDUP_X -- Create a copy of a C string
498 **
499 **	Parameters:
500 **		rpool -- rpool to use.
501 **		s -- the string to copy.
502 **
503 **	Returns:
504 **		pointer to newly allocated string.
505 */
506 
507 char *
508 sm_rpool_strdup_x(rpool, s)
509 	SM_RPOOL_T *rpool;
510 	const char *s;
511 {
512 	size_t l;
513 	char *n;
514 
515 	l = strlen(s);
516 	SM_ASSERT(l + 1 > l);
517 	n = sm_rpool_malloc_x(rpool, l + 1);
518 	sm_strlcpy(n, s, l + 1);
519 	return n;
520 }
521 #endif /* DO_NOT_USE_STRCPY */
522