1 /* 2 * Copyright (c) 2000-2004 Sendmail, Inc. and its suppliers. 3 * All rights reserved. 4 * 5 * By using this file, you agree to the terms and conditions set 6 * forth in the LICENSE file which can be found at the top level of 7 * the sendmail distribution. 8 */ 9 10 #pragma ident "%Z%%M% %I% %E% SMI" 11 12 #include <sm/gen.h> 13 SM_RCSID("@(#)$Id: rpool.c,v 1.28 2004/08/03 20:44:04 ca Exp $") 14 15 /* 16 ** resource pools 17 ** For documentation, see rpool.html 18 */ 19 20 #include <sm/exc.h> 21 #include <sm/heap.h> 22 #include <sm/rpool.h> 23 #include <sm/varargs.h> 24 #include <sm/conf.h> 25 #if _FFR_PERF_RPOOL 26 # include <syslog.h> 27 #endif /* _FFR_PERF_RPOOL */ 28 29 const char SmRpoolMagic[] = "sm_rpool"; 30 31 typedef union 32 { 33 SM_POOLLINK_T link; 34 char align[SM_ALIGN_SIZE]; 35 } SM_POOLHDR_T; 36 37 static char *sm_rpool_allocblock_x __P((SM_RPOOL_T *, size_t)); 38 static char *sm_rpool_allocblock __P((SM_RPOOL_T *, size_t)); 39 40 /* 41 ** Tune this later 42 */ 43 44 #define POOLSIZE 4096 45 #define BIG_OBJECT_RATIO 10 46 47 /* 48 ** SM_RPOOL_ALLOCBLOCK_X -- allocate a new block for an rpool. 49 ** 50 ** Parameters: 51 ** rpool -- rpool to which the block should be added. 52 ** size -- size of block. 53 ** 54 ** Returns: 55 ** Pointer to block. 56 ** 57 ** Exceptions: 58 ** F:sm_heap -- out of memory 59 */ 60 61 static char * 62 sm_rpool_allocblock_x(rpool, size) 63 SM_RPOOL_T *rpool; 64 size_t size; 65 { 66 SM_POOLLINK_T *p; 67 68 p = sm_malloc_x(sizeof(SM_POOLHDR_T) + size); 69 p->sm_pnext = rpool->sm_pools; 70 rpool->sm_pools = p; 71 return (char*) p + sizeof(SM_POOLHDR_T); 72 } 73 74 /* 75 ** SM_RPOOL_ALLOCBLOCK -- allocate a new block for an rpool. 76 ** 77 ** Parameters: 78 ** rpool -- rpool to which the block should be added. 79 ** size -- size of block. 80 ** 81 ** Returns: 82 ** Pointer to block, NULL on failure. 83 */ 84 85 static char * 86 sm_rpool_allocblock(rpool, size) 87 SM_RPOOL_T *rpool; 88 size_t size; 89 { 90 SM_POOLLINK_T *p; 91 92 p = sm_malloc(sizeof(SM_POOLHDR_T) + size); 93 if (p == NULL) 94 return NULL; 95 p->sm_pnext = rpool->sm_pools; 96 rpool->sm_pools = p; 97 return (char*) p + sizeof(SM_POOLHDR_T); 98 } 99 100 /* 101 ** SM_RPOOL_MALLOC_TAGGED_X -- allocate memory from rpool 102 ** 103 ** Parameters: 104 ** rpool -- rpool from which memory should be allocated; 105 ** can be NULL, use sm_malloc() then. 106 ** size -- size of block. 107 ** file -- filename. 108 ** line -- line number in file. 109 ** group -- heap group for debugging. 110 ** 111 ** Returns: 112 ** Pointer to block. 113 ** 114 ** Exceptions: 115 ** F:sm_heap -- out of memory 116 ** 117 ** Notice: XXX 118 ** if size == 0 and the rpool is new (no memory 119 ** allocated yet) NULL is returned! 120 ** We could solve this by 121 ** - wasting 1 byte (size < avail) 122 ** - checking for rpool->sm_poolptr != NULL 123 ** - not asking for 0 sized buffer 124 */ 125 126 void * 127 #if SM_HEAP_CHECK 128 sm_rpool_malloc_tagged_x(rpool, size, file, line, group) 129 SM_RPOOL_T *rpool; 130 size_t size; 131 char *file; 132 int line; 133 int group; 134 #else /* SM_HEAP_CHECK */ 135 sm_rpool_malloc_x(rpool, size) 136 SM_RPOOL_T *rpool; 137 size_t size; 138 #endif /* SM_HEAP_CHECK */ 139 { 140 char *ptr; 141 142 if (rpool == NULL) 143 return sm_malloc_tagged_x(size, file, line, group); 144 145 /* Ensure that size is properly aligned. */ 146 if (size & SM_ALIGN_BITS) 147 size = (size & ~SM_ALIGN_BITS) + SM_ALIGN_SIZE; 148 149 /* The common case. This is optimized for speed. */ 150 if (size <= rpool->sm_poolavail) 151 { 152 ptr = rpool->sm_poolptr; 153 rpool->sm_poolptr += size; 154 rpool->sm_poolavail -= size; 155 return ptr; 156 } 157 158 /* 159 ** The slow case: we need to call malloc. 160 ** The SM_REQUIRE assertion is deferred until now, for speed. 161 ** That's okay: we set rpool->sm_poolavail to 0 when we free an rpool, 162 ** so the common case code won't be triggered on a dangling pointer. 163 */ 164 165 SM_REQUIRE(rpool->sm_magic == SmRpoolMagic); 166 167 /* 168 ** If size > sm_poolsize, then malloc a new block especially for 169 ** this request. Future requests will be allocated from the 170 ** current pool. 171 ** 172 ** What if the current pool is mostly unallocated, and the current 173 ** request is larger than the available space, but < sm_poolsize? 174 ** If we discard the current pool, and start allocating from a new 175 ** pool, then we will be wasting a lot of space. For this reason, 176 ** we malloc a block just for the current request if size > 177 ** sm_bigobjectsize, where sm_bigobjectsize <= sm_poolsize. 178 ** Thus, the most space that we will waste at the end of a pool 179 ** is sm_bigobjectsize - 1. 180 */ 181 182 if (size > rpool->sm_bigobjectsize) 183 { 184 #if _FFR_PERF_RPOOL 185 ++rpool->sm_nbigblocks; 186 #endif /* _FFR_PERF_RPOOL */ 187 return sm_rpool_allocblock_x(rpool, size); 188 } 189 SM_ASSERT(rpool->sm_bigobjectsize <= rpool->sm_poolsize); 190 ptr = sm_rpool_allocblock_x(rpool, rpool->sm_poolsize); 191 rpool->sm_poolptr = ptr + size; 192 rpool->sm_poolavail = rpool->sm_poolsize - size; 193 #if _FFR_PERF_RPOOL 194 ++rpool->sm_npools; 195 #endif /* _FFR_PERF_RPOOL */ 196 return ptr; 197 } 198 199 /* 200 ** SM_RPOOL_MALLOC_TAGGED -- allocate memory from rpool 201 ** 202 ** Parameters: 203 ** rpool -- rpool from which memory should be allocated; 204 ** can be NULL, use sm_malloc() then. 205 ** size -- size of block. 206 ** file -- filename. 207 ** line -- line number in file. 208 ** group -- heap group for debugging. 209 ** 210 ** Returns: 211 ** Pointer to block, NULL on failure. 212 ** 213 ** Notice: XXX 214 ** if size == 0 and the rpool is new (no memory 215 ** allocated yet) NULL is returned! 216 ** We could solve this by 217 ** - wasting 1 byte (size < avail) 218 ** - checking for rpool->sm_poolptr != NULL 219 ** - not asking for 0 sized buffer 220 */ 221 222 void * 223 #if SM_HEAP_CHECK 224 sm_rpool_malloc_tagged(rpool, size, file, line, group) 225 SM_RPOOL_T *rpool; 226 size_t size; 227 char *file; 228 int line; 229 int group; 230 #else /* SM_HEAP_CHECK */ 231 sm_rpool_malloc(rpool, size) 232 SM_RPOOL_T *rpool; 233 size_t size; 234 #endif /* SM_HEAP_CHECK */ 235 { 236 char *ptr; 237 238 if (rpool == NULL) 239 return sm_malloc_tagged(size, file, line, group); 240 241 /* Ensure that size is properly aligned. */ 242 if (size & SM_ALIGN_BITS) 243 size = (size & ~SM_ALIGN_BITS) + SM_ALIGN_SIZE; 244 245 /* The common case. This is optimized for speed. */ 246 if (size <= rpool->sm_poolavail) 247 { 248 ptr = rpool->sm_poolptr; 249 rpool->sm_poolptr += size; 250 rpool->sm_poolavail -= size; 251 return ptr; 252 } 253 254 /* 255 ** The slow case: we need to call malloc. 256 ** The SM_REQUIRE assertion is deferred until now, for speed. 257 ** That's okay: we set rpool->sm_poolavail to 0 when we free an rpool, 258 ** so the common case code won't be triggered on a dangling pointer. 259 */ 260 261 SM_REQUIRE(rpool->sm_magic == SmRpoolMagic); 262 263 /* 264 ** If size > sm_poolsize, then malloc a new block especially for 265 ** this request. Future requests will be allocated from the 266 ** current pool. 267 ** 268 ** What if the current pool is mostly unallocated, and the current 269 ** request is larger than the available space, but < sm_poolsize? 270 ** If we discard the current pool, and start allocating from a new 271 ** pool, then we will be wasting a lot of space. For this reason, 272 ** we malloc a block just for the current request if size > 273 ** sm_bigobjectsize, where sm_bigobjectsize <= sm_poolsize. 274 ** Thus, the most space that we will waste at the end of a pool 275 ** is sm_bigobjectsize - 1. 276 */ 277 278 if (size > rpool->sm_bigobjectsize) 279 { 280 #if _FFR_PERF_RPOOL 281 ++rpool->sm_nbigblocks; 282 #endif /* _FFR_PERF_RPOOL */ 283 return sm_rpool_allocblock(rpool, size); 284 } 285 SM_ASSERT(rpool->sm_bigobjectsize <= rpool->sm_poolsize); 286 ptr = sm_rpool_allocblock(rpool, rpool->sm_poolsize); 287 if (ptr == NULL) 288 return NULL; 289 rpool->sm_poolptr = ptr + size; 290 rpool->sm_poolavail = rpool->sm_poolsize - size; 291 #if _FFR_PERF_RPOOL 292 ++rpool->sm_npools; 293 #endif /* _FFR_PERF_RPOOL */ 294 return ptr; 295 } 296 297 /* 298 ** SM_RPOOL_NEW_X -- create a new rpool. 299 ** 300 ** Parameters: 301 ** parent -- pointer to parent rpool, can be NULL. 302 ** 303 ** Returns: 304 ** Pointer to new rpool. 305 */ 306 307 SM_RPOOL_T * 308 sm_rpool_new_x(parent) 309 SM_RPOOL_T *parent; 310 { 311 SM_RPOOL_T *rpool; 312 313 rpool = sm_malloc_x(sizeof(SM_RPOOL_T)); 314 if (parent == NULL) 315 rpool->sm_parentlink = NULL; 316 else 317 { 318 SM_TRY 319 rpool->sm_parentlink = sm_rpool_attach_x(parent, 320 (SM_RPOOL_RFREE_T) sm_rpool_free, 321 (void *) rpool); 322 SM_EXCEPT(exc, "*") 323 sm_free(rpool); 324 sm_exc_raise_x(exc); 325 SM_END_TRY 326 } 327 rpool->sm_magic = SmRpoolMagic; 328 329 rpool->sm_poolsize = POOLSIZE - sizeof(SM_POOLHDR_T); 330 rpool->sm_bigobjectsize = rpool->sm_poolsize / BIG_OBJECT_RATIO; 331 rpool->sm_poolptr = NULL; 332 rpool->sm_poolavail = 0; 333 rpool->sm_pools = NULL; 334 335 rpool->sm_rptr = NULL; 336 rpool->sm_ravail = 0; 337 rpool->sm_rlists = NULL; 338 #if _FFR_PERF_RPOOL 339 rpool->sm_nbigblocks = 0; 340 rpool->sm_npools = 0; 341 #endif /* _FFR_PERF_RPOOL */ 342 343 return rpool; 344 } 345 346 /* 347 ** SM_RPOOL_SETSIZES -- set sizes for rpool. 348 ** 349 ** Parameters: 350 ** poolsize -- size of a single rpool block. 351 ** bigobjectsize -- if this size is exceeded, an individual 352 ** block is allocated (must be less or equal poolsize). 353 ** 354 ** Returns: 355 ** none. 356 */ 357 358 void 359 sm_rpool_setsizes(rpool, poolsize, bigobjectsize) 360 SM_RPOOL_T *rpool; 361 size_t poolsize; 362 size_t bigobjectsize; 363 { 364 SM_REQUIRE(poolsize >= bigobjectsize); 365 if (poolsize == 0) 366 poolsize = POOLSIZE - sizeof(SM_POOLHDR_T); 367 if (bigobjectsize == 0) 368 bigobjectsize = poolsize / BIG_OBJECT_RATIO; 369 rpool->sm_poolsize = poolsize; 370 rpool->sm_bigobjectsize = bigobjectsize; 371 } 372 373 /* 374 ** SM_RPOOL_FREE -- free an rpool and release all of its resources. 375 ** 376 ** Parameters: 377 ** rpool -- rpool to free. 378 ** 379 ** Returns: 380 ** none. 381 */ 382 383 void 384 sm_rpool_free(rpool) 385 SM_RPOOL_T *rpool; 386 { 387 SM_RLIST_T *rl, *rnext; 388 SM_RESOURCE_T *r, *rmax; 389 SM_POOLLINK_T *pp, *pnext; 390 391 if (rpool == NULL) 392 return; 393 394 /* 395 ** It's important to free the resources before the memory pools, 396 ** because the resource free functions might modify the contents 397 ** of the memory pools. 398 */ 399 400 rl = rpool->sm_rlists; 401 if (rl != NULL) 402 { 403 rmax = rpool->sm_rptr; 404 for (;;) 405 { 406 for (r = rl->sm_rvec; r < rmax; ++r) 407 { 408 if (r->sm_rfree != NULL) 409 r->sm_rfree(r->sm_rcontext); 410 } 411 rnext = rl->sm_rnext; 412 sm_free(rl); 413 if (rnext == NULL) 414 break; 415 rl = rnext; 416 rmax = &rl->sm_rvec[SM_RLIST_MAX]; 417 } 418 } 419 420 /* 421 ** Now free the memory pools. 422 */ 423 424 for (pp = rpool->sm_pools; pp != NULL; pp = pnext) 425 { 426 pnext = pp->sm_pnext; 427 sm_free(pp); 428 } 429 430 /* 431 ** Disconnect rpool from its parent. 432 */ 433 434 if (rpool->sm_parentlink != NULL) 435 *rpool->sm_parentlink = NULL; 436 437 /* 438 ** Setting these fields to zero means that any future to attempt 439 ** to use the rpool after it is freed will cause an assertion failure. 440 */ 441 442 rpool->sm_magic = NULL; 443 rpool->sm_poolavail = 0; 444 rpool->sm_ravail = 0; 445 446 #if _FFR_PERF_RPOOL 447 if (rpool->sm_nbigblocks > 0 || rpool->sm_npools > 1) 448 syslog(LOG_NOTICE, 449 "perf: rpool=%lx, sm_nbigblocks=%d, sm_npools=%d", 450 (long) rpool, rpool->sm_nbigblocks, rpool->sm_npools); 451 rpool->sm_nbigblocks = 0; 452 rpool->sm_npools = 0; 453 #endif /* _FFR_PERF_RPOOL */ 454 sm_free(rpool); 455 } 456 457 /* 458 ** SM_RPOOL_ATTACH_X -- attach a resource to an rpool. 459 ** 460 ** Parameters: 461 ** rpool -- rpool to which resource should be attached. 462 ** rfree -- function to call when rpool is freed. 463 ** rcontext -- argument for function to call when rpool is freed. 464 ** 465 ** Returns: 466 ** Pointer to allocated function. 467 ** 468 ** Exceptions: 469 ** F:sm_heap -- out of memory 470 */ 471 472 SM_RPOOL_ATTACH_T 473 sm_rpool_attach_x(rpool, rfree, rcontext) 474 SM_RPOOL_T *rpool; 475 SM_RPOOL_RFREE_T rfree; 476 void *rcontext; 477 { 478 SM_RLIST_T *rl; 479 SM_RPOOL_ATTACH_T a; 480 481 SM_REQUIRE_ISA(rpool, SmRpoolMagic); 482 483 if (rpool->sm_ravail == 0) 484 { 485 rl = sm_malloc_x(sizeof(SM_RLIST_T)); 486 rl->sm_rnext = rpool->sm_rlists; 487 rpool->sm_rlists = rl; 488 rpool->sm_rptr = rl->sm_rvec; 489 rpool->sm_ravail = SM_RLIST_MAX; 490 } 491 492 a = &rpool->sm_rptr->sm_rfree; 493 rpool->sm_rptr->sm_rfree = rfree; 494 rpool->sm_rptr->sm_rcontext = rcontext; 495 ++rpool->sm_rptr; 496 --rpool->sm_ravail; 497 return a; 498 } 499 500 #if DO_NOT_USE_STRCPY 501 /* 502 ** SM_RPOOL_STRDUP_X -- Create a copy of a C string 503 ** 504 ** Parameters: 505 ** rpool -- rpool to use. 506 ** s -- the string to copy. 507 ** 508 ** Returns: 509 ** pointer to newly allocated string. 510 */ 511 512 char * 513 sm_rpool_strdup_x(rpool, s) 514 SM_RPOOL_T *rpool; 515 const char *s; 516 { 517 size_t l; 518 char *n; 519 520 l = strlen(s); 521 SM_ASSERT(l + 1 > l); 522 n = sm_rpool_malloc_x(rpool, l + 1); 523 sm_strlcpy(n, s, l + 1); 524 return n; 525 } 526 #endif /* DO_NOT_USE_STRCPY */ 527