xref: /freebsd/contrib/sendmail/libsm/rpool.c (revision f4b37ed0f8b307b1f3f0f630ca725d68f1dff30d)
1 /*
2  * Copyright (c) 2000-2004 Proofpoint, Inc. and its suppliers.
3  *	All rights reserved.
4  *
5  * By using this file, you agree to the terms and conditions set
6  * forth in the LICENSE file which can be found at the top level of
7  * the sendmail distribution.
8  */
9 
10 #include <sm/gen.h>
11 SM_RCSID("@(#)$Id: rpool.c,v 1.29 2013-11-22 20:51:43 ca Exp $")
12 
13 /*
14 **  resource pools
15 **  For documentation, see rpool.html
16 */
17 
18 #include <sm/exc.h>
19 #include <sm/heap.h>
20 #include <sm/rpool.h>
21 #include <sm/varargs.h>
22 #include <sm/conf.h>
23 #if _FFR_PERF_RPOOL
24 # include <syslog.h>
25 #endif /* _FFR_PERF_RPOOL */
26 
27 const char SmRpoolMagic[] = "sm_rpool";
28 
29 typedef union
30 {
31 	SM_POOLLINK_T	link;
32 	char		align[SM_ALIGN_SIZE];
33 } SM_POOLHDR_T;
34 
35 static char	*sm_rpool_allocblock_x __P((SM_RPOOL_T *, size_t));
36 static char	*sm_rpool_allocblock __P((SM_RPOOL_T *, size_t));
37 
38 /*
39 **  Tune this later
40 */
41 
42 #define POOLSIZE		4096
43 #define BIG_OBJECT_RATIO	10
44 
45 /*
46 **  SM_RPOOL_ALLOCBLOCK_X -- allocate a new block for an rpool.
47 **
48 **	Parameters:
49 **		rpool -- rpool to which the block should be added.
50 **		size -- size of block.
51 **
52 **	Returns:
53 **		Pointer to block.
54 **
55 **	Exceptions:
56 **		F:sm_heap -- out of memory
57 */
58 
59 static char *
60 sm_rpool_allocblock_x(rpool, size)
61 	SM_RPOOL_T *rpool;
62 	size_t size;
63 {
64 	SM_POOLLINK_T *p;
65 
66 	p = sm_malloc_x(sizeof(SM_POOLHDR_T) + size);
67 	p->sm_pnext = rpool->sm_pools;
68 	rpool->sm_pools = p;
69 	return (char*) p + sizeof(SM_POOLHDR_T);
70 }
71 
72 /*
73 **  SM_RPOOL_ALLOCBLOCK -- allocate a new block for an rpool.
74 **
75 **	Parameters:
76 **		rpool -- rpool to which the block should be added.
77 **		size -- size of block.
78 **
79 **	Returns:
80 **		Pointer to block, NULL on failure.
81 */
82 
83 static char *
84 sm_rpool_allocblock(rpool, size)
85 	SM_RPOOL_T *rpool;
86 	size_t size;
87 {
88 	SM_POOLLINK_T *p;
89 
90 	p = sm_malloc(sizeof(SM_POOLHDR_T) + size);
91 	if (p == NULL)
92 		return NULL;
93 	p->sm_pnext = rpool->sm_pools;
94 	rpool->sm_pools = p;
95 	return (char*) p + sizeof(SM_POOLHDR_T);
96 }
97 
98 /*
99 **  SM_RPOOL_MALLOC_TAGGED_X -- allocate memory from rpool
100 **
101 **	Parameters:
102 **		rpool -- rpool from which memory should be allocated;
103 **			can be NULL, use sm_malloc() then.
104 **		size -- size of block.
105 **		file -- filename.
106 **		line -- line number in file.
107 **		group -- heap group for debugging.
108 **
109 **	Returns:
110 **		Pointer to block.
111 **
112 **	Exceptions:
113 **		F:sm_heap -- out of memory
114 **
115 **	Notice: XXX
116 **		if size == 0 and the rpool is new (no memory
117 **		allocated yet) NULL is returned!
118 **		We could solve this by
119 **		- wasting 1 byte (size < avail)
120 **		- checking for rpool->sm_poolptr != NULL
121 **		- not asking for 0 sized buffer
122 */
123 
124 void *
125 #if SM_HEAP_CHECK
126 sm_rpool_malloc_tagged_x(rpool, size, file, line, group)
127 	SM_RPOOL_T *rpool;
128 	size_t size;
129 	char *file;
130 	int line;
131 	int group;
132 #else /* SM_HEAP_CHECK */
133 sm_rpool_malloc_x(rpool, size)
134 	SM_RPOOL_T *rpool;
135 	size_t size;
136 #endif /* SM_HEAP_CHECK */
137 {
138 	char *ptr;
139 
140 	if (rpool == NULL)
141 		return sm_malloc_tagged_x(size, file, line, group);
142 
143 	/* Ensure that size is properly aligned. */
144 	if (size & SM_ALIGN_BITS)
145 		size = (size & ~SM_ALIGN_BITS) + SM_ALIGN_SIZE;
146 
147 	/* The common case.  This is optimized for speed. */
148 	if (size <= rpool->sm_poolavail)
149 	{
150 		ptr = rpool->sm_poolptr;
151 		rpool->sm_poolptr += size;
152 		rpool->sm_poolavail -= size;
153 		return ptr;
154 	}
155 
156 	/*
157 	**  The slow case: we need to call malloc.
158 	**  The SM_REQUIRE assertion is deferred until now, for speed.
159 	**  That's okay: we set rpool->sm_poolavail to 0 when we free an rpool,
160 	**  so the common case code won't be triggered on a dangling pointer.
161 	*/
162 
163 	SM_REQUIRE(rpool->sm_magic == SmRpoolMagic);
164 
165 	/*
166 	**  If size > sm_poolsize, then malloc a new block especially for
167 	**  this request.  Future requests will be allocated from the
168 	**  current pool.
169 	**
170 	**  What if the current pool is mostly unallocated, and the current
171 	**  request is larger than the available space, but < sm_poolsize?
172 	**  If we discard the current pool, and start allocating from a new
173 	**  pool, then we will be wasting a lot of space.  For this reason,
174 	**  we malloc a block just for the current request if size >
175 	**  sm_bigobjectsize, where sm_bigobjectsize <= sm_poolsize.
176 	**  Thus, the most space that we will waste at the end of a pool
177 	**  is sm_bigobjectsize - 1.
178 	*/
179 
180 	if (size > rpool->sm_bigobjectsize)
181 	{
182 #if _FFR_PERF_RPOOL
183 		++rpool->sm_nbigblocks;
184 #endif /* _FFR_PERF_RPOOL */
185 		return sm_rpool_allocblock_x(rpool, size);
186 	}
187 	SM_ASSERT(rpool->sm_bigobjectsize <= rpool->sm_poolsize);
188 	ptr = sm_rpool_allocblock_x(rpool, rpool->sm_poolsize);
189 	rpool->sm_poolptr = ptr + size;
190 	rpool->sm_poolavail = rpool->sm_poolsize - size;
191 #if _FFR_PERF_RPOOL
192 	++rpool->sm_npools;
193 #endif /* _FFR_PERF_RPOOL */
194 	return ptr;
195 }
196 
197 /*
198 **  SM_RPOOL_MALLOC_TAGGED -- allocate memory from rpool
199 **
200 **	Parameters:
201 **		rpool -- rpool from which memory should be allocated;
202 **			can be NULL, use sm_malloc() then.
203 **		size -- size of block.
204 **		file -- filename.
205 **		line -- line number in file.
206 **		group -- heap group for debugging.
207 **
208 **	Returns:
209 **		Pointer to block, NULL on failure.
210 **
211 **	Notice: XXX
212 **		if size == 0 and the rpool is new (no memory
213 **		allocated yet) NULL is returned!
214 **		We could solve this by
215 **		- wasting 1 byte (size < avail)
216 **		- checking for rpool->sm_poolptr != NULL
217 **		- not asking for 0 sized buffer
218 */
219 
220 void *
221 #if SM_HEAP_CHECK
222 sm_rpool_malloc_tagged(rpool, size, file, line, group)
223 	SM_RPOOL_T *rpool;
224 	size_t size;
225 	char *file;
226 	int line;
227 	int group;
228 #else /* SM_HEAP_CHECK */
229 sm_rpool_malloc(rpool, size)
230 	SM_RPOOL_T *rpool;
231 	size_t size;
232 #endif /* SM_HEAP_CHECK */
233 {
234 	char *ptr;
235 
236 	if (rpool == NULL)
237 		return sm_malloc_tagged(size, file, line, group);
238 
239 	/* Ensure that size is properly aligned. */
240 	if (size & SM_ALIGN_BITS)
241 		size = (size & ~SM_ALIGN_BITS) + SM_ALIGN_SIZE;
242 
243 	/* The common case.  This is optimized for speed. */
244 	if (size <= rpool->sm_poolavail)
245 	{
246 		ptr = rpool->sm_poolptr;
247 		rpool->sm_poolptr += size;
248 		rpool->sm_poolavail -= size;
249 		return ptr;
250 	}
251 
252 	/*
253 	**  The slow case: we need to call malloc.
254 	**  The SM_REQUIRE assertion is deferred until now, for speed.
255 	**  That's okay: we set rpool->sm_poolavail to 0 when we free an rpool,
256 	**  so the common case code won't be triggered on a dangling pointer.
257 	*/
258 
259 	SM_REQUIRE(rpool->sm_magic == SmRpoolMagic);
260 
261 	/*
262 	**  If size > sm_poolsize, then malloc a new block especially for
263 	**  this request.  Future requests will be allocated from the
264 	**  current pool.
265 	**
266 	**  What if the current pool is mostly unallocated, and the current
267 	**  request is larger than the available space, but < sm_poolsize?
268 	**  If we discard the current pool, and start allocating from a new
269 	**  pool, then we will be wasting a lot of space.  For this reason,
270 	**  we malloc a block just for the current request if size >
271 	**  sm_bigobjectsize, where sm_bigobjectsize <= sm_poolsize.
272 	**  Thus, the most space that we will waste at the end of a pool
273 	**  is sm_bigobjectsize - 1.
274 	*/
275 
276 	if (size > rpool->sm_bigobjectsize)
277 	{
278 #if _FFR_PERF_RPOOL
279 		++rpool->sm_nbigblocks;
280 #endif /* _FFR_PERF_RPOOL */
281 		return sm_rpool_allocblock(rpool, size);
282 	}
283 	SM_ASSERT(rpool->sm_bigobjectsize <= rpool->sm_poolsize);
284 	ptr = sm_rpool_allocblock(rpool, rpool->sm_poolsize);
285 	if (ptr == NULL)
286 		return NULL;
287 	rpool->sm_poolptr = ptr + size;
288 	rpool->sm_poolavail = rpool->sm_poolsize - size;
289 #if _FFR_PERF_RPOOL
290 	++rpool->sm_npools;
291 #endif /* _FFR_PERF_RPOOL */
292 	return ptr;
293 }
294 
295 /*
296 **  SM_RPOOL_NEW_X -- create a new rpool.
297 **
298 **	Parameters:
299 **		parent -- pointer to parent rpool, can be NULL.
300 **
301 **	Returns:
302 **		Pointer to new rpool.
303 */
304 
305 SM_RPOOL_T *
306 sm_rpool_new_x(parent)
307 	SM_RPOOL_T *parent;
308 {
309 	SM_RPOOL_T *rpool;
310 
311 	rpool = sm_malloc_x(sizeof(SM_RPOOL_T));
312 	if (parent == NULL)
313 		rpool->sm_parentlink = NULL;
314 	else
315 	{
316 		SM_TRY
317 			rpool->sm_parentlink = sm_rpool_attach_x(parent,
318 					(SM_RPOOL_RFREE_T) sm_rpool_free,
319 					(void *) rpool);
320 		SM_EXCEPT(exc, "*")
321 			sm_free(rpool);
322 			sm_exc_raise_x(exc);
323 		SM_END_TRY
324 	}
325 	rpool->sm_magic = SmRpoolMagic;
326 
327 	rpool->sm_poolsize = POOLSIZE - sizeof(SM_POOLHDR_T);
328 	rpool->sm_bigobjectsize = rpool->sm_poolsize / BIG_OBJECT_RATIO;
329 	rpool->sm_poolptr = NULL;
330 	rpool->sm_poolavail = 0;
331 	rpool->sm_pools = NULL;
332 
333 	rpool->sm_rptr = NULL;
334 	rpool->sm_ravail = 0;
335 	rpool->sm_rlists = NULL;
336 #if _FFR_PERF_RPOOL
337 	rpool->sm_nbigblocks = 0;
338 	rpool->sm_npools = 0;
339 #endif /* _FFR_PERF_RPOOL */
340 
341 	return rpool;
342 }
343 
344 /*
345 **  SM_RPOOL_SETSIZES -- set sizes for rpool.
346 **
347 **	Parameters:
348 **		poolsize -- size of a single rpool block.
349 **		bigobjectsize -- if this size is exceeded, an individual
350 **			block is allocated (must be less or equal poolsize).
351 **
352 **	Returns:
353 **		none.
354 */
355 
356 void
357 sm_rpool_setsizes(rpool, poolsize, bigobjectsize)
358 	SM_RPOOL_T *rpool;
359 	size_t poolsize;
360 	size_t bigobjectsize;
361 {
362 	SM_REQUIRE(poolsize >= bigobjectsize);
363 	if (poolsize == 0)
364 		poolsize = POOLSIZE - sizeof(SM_POOLHDR_T);
365 	if (bigobjectsize == 0)
366 		bigobjectsize = poolsize / BIG_OBJECT_RATIO;
367 	rpool->sm_poolsize = poolsize;
368 	rpool->sm_bigobjectsize = bigobjectsize;
369 }
370 
371 /*
372 **  SM_RPOOL_FREE -- free an rpool and release all of its resources.
373 **
374 **	Parameters:
375 **		rpool -- rpool to free.
376 **
377 **	Returns:
378 **		none.
379 */
380 
381 void
382 sm_rpool_free(rpool)
383 	SM_RPOOL_T *rpool;
384 {
385 	SM_RLIST_T *rl, *rnext;
386 	SM_RESOURCE_T *r, *rmax;
387 	SM_POOLLINK_T *pp, *pnext;
388 
389 	if (rpool == NULL)
390 		return;
391 
392 	/*
393 	**  It's important to free the resources before the memory pools,
394 	**  because the resource free functions might modify the contents
395 	**  of the memory pools.
396 	*/
397 
398 	rl = rpool->sm_rlists;
399 	if (rl != NULL)
400 	{
401 		rmax = rpool->sm_rptr;
402 		for (;;)
403 		{
404 			for (r = rl->sm_rvec; r < rmax; ++r)
405 			{
406 				if (r->sm_rfree != NULL)
407 					r->sm_rfree(r->sm_rcontext);
408 			}
409 			rnext = rl->sm_rnext;
410 			sm_free(rl);
411 			if (rnext == NULL)
412 				break;
413 			rl = rnext;
414 			rmax = &rl->sm_rvec[SM_RLIST_MAX];
415 		}
416 	}
417 
418 	/*
419 	**  Now free the memory pools.
420 	*/
421 
422 	for (pp = rpool->sm_pools; pp != NULL; pp = pnext)
423 	{
424 		pnext = pp->sm_pnext;
425 		sm_free(pp);
426 	}
427 
428 	/*
429 	**  Disconnect rpool from its parent.
430 	*/
431 
432 	if (rpool->sm_parentlink != NULL)
433 		*rpool->sm_parentlink = NULL;
434 
435 	/*
436 	**  Setting these fields to zero means that any future to attempt
437 	**  to use the rpool after it is freed will cause an assertion failure.
438 	*/
439 
440 	rpool->sm_magic = NULL;
441 	rpool->sm_poolavail = 0;
442 	rpool->sm_ravail = 0;
443 
444 #if _FFR_PERF_RPOOL
445 	if (rpool->sm_nbigblocks > 0 || rpool->sm_npools > 1)
446 		syslog(LOG_NOTICE,
447 			"perf: rpool=%lx, sm_nbigblocks=%d, sm_npools=%d",
448 			(long) rpool, rpool->sm_nbigblocks, rpool->sm_npools);
449 	rpool->sm_nbigblocks = 0;
450 	rpool->sm_npools = 0;
451 #endif /* _FFR_PERF_RPOOL */
452 	sm_free(rpool);
453 }
454 
455 /*
456 **  SM_RPOOL_ATTACH_X -- attach a resource to an rpool.
457 **
458 **	Parameters:
459 **		rpool -- rpool to which resource should be attached.
460 **		rfree -- function to call when rpool is freed.
461 **		rcontext -- argument for function to call when rpool is freed.
462 **
463 **	Returns:
464 **		Pointer to allocated function.
465 **
466 **	Exceptions:
467 **		F:sm_heap -- out of memory
468 */
469 
470 SM_RPOOL_ATTACH_T
471 sm_rpool_attach_x(rpool, rfree, rcontext)
472 	SM_RPOOL_T *rpool;
473 	SM_RPOOL_RFREE_T rfree;
474 	void *rcontext;
475 {
476 	SM_RLIST_T *rl;
477 	SM_RPOOL_ATTACH_T a;
478 
479 	SM_REQUIRE_ISA(rpool, SmRpoolMagic);
480 
481 	if (rpool->sm_ravail == 0)
482 	{
483 		rl = sm_malloc_x(sizeof(SM_RLIST_T));
484 		rl->sm_rnext = rpool->sm_rlists;
485 		rpool->sm_rlists = rl;
486 		rpool->sm_rptr = rl->sm_rvec;
487 		rpool->sm_ravail = SM_RLIST_MAX;
488 	}
489 
490 	a = &rpool->sm_rptr->sm_rfree;
491 	rpool->sm_rptr->sm_rfree = rfree;
492 	rpool->sm_rptr->sm_rcontext = rcontext;
493 	++rpool->sm_rptr;
494 	--rpool->sm_ravail;
495 	return a;
496 }
497 
498 #if DO_NOT_USE_STRCPY
499 /*
500 **  SM_RPOOL_STRDUP_X -- Create a copy of a C string
501 **
502 **	Parameters:
503 **		rpool -- rpool to use.
504 **		s -- the string to copy.
505 **
506 **	Returns:
507 **		pointer to newly allocated string.
508 */
509 
510 char *
511 sm_rpool_strdup_x(rpool, s)
512 	SM_RPOOL_T *rpool;
513 	const char *s;
514 {
515 	size_t l;
516 	char *n;
517 
518 	l = strlen(s);
519 	SM_ASSERT(l + 1 > l);
520 	n = sm_rpool_malloc_x(rpool, l + 1);
521 	sm_strlcpy(n, s, l + 1);
522 	return n;
523 }
524 #endif /* DO_NOT_USE_STRCPY */
525