xref: /freebsd/contrib/tcsh/tc.alloc.c (revision 87569f75a91f298c52a71823c04d41cf53c88889)
1 /* $Header: /src/pub/tcsh/tc.alloc.c,v 3.39 2005/01/05 16:06:14 christos Exp $ */
2 /*
3  * tc.alloc.c (Caltech) 2/21/82
4  * Chris Kingsley, kingsley@cit-20.
5  *
6  * This is a very fast storage allocator.  It allocates blocks of a small
7  * number of different sizes, and keeps free lists of each size.  Blocks that
8  * don't exactly fit are passed up to the next larger size.  In this
9  * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long.
10  * This is designed for use in a program that uses vast quantities of memory,
11  * but bombs when it runs out.
12  */
13 /*-
14  * Copyright (c) 1980, 1991 The Regents of the University of California.
15  * All rights reserved.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in the
24  *    documentation and/or other materials provided with the distribution.
25  * 3. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  */
41 #include "sh.h"
42 
43 RCSID("$Id: tc.alloc.c,v 3.39 2005/01/05 16:06:14 christos Exp $")
44 
45 static char   *memtop = NULL;		/* PWP: top of current memory */
46 static char   *membot = NULL;		/* PWP: bottom of allocatable memory */
47 
48 int dont_free = 0;
49 
50 #ifdef WINNT_NATIVE
51 # define malloc		fmalloc
52 # define free		ffree
53 # define calloc		fcalloc
54 # define realloc	frealloc
55 #endif /* WINNT_NATIVE */
56 
57 #ifndef SYSMALLOC
58 
59 #undef RCHECK
60 #undef DEBUG
61 
62 #ifdef SX
63 extern void* sbrk();
64 #endif
65 /*
66  * Lots of os routines are busted and try to free invalid pointers.
67  * Although our free routine is smart enough and it will pick bad
68  * pointers most of the time, in cases where we know we are going to get
69  * a bad pointer, we'd rather leak.
70  */
71 
72 #ifndef NULL
73 #define	NULL 0
74 #endif
75 
76 typedef unsigned char U_char;	/* we don't really have signed chars */
77 typedef unsigned int U_int;
78 typedef unsigned short U_short;
79 typedef unsigned long U_long;
80 
81 
82 /*
83  * The overhead on a block is at least 4 bytes.  When free, this space
84  * contains a pointer to the next free block, and the bottom two bits must
85  * be zero.  When in use, the first byte is set to MAGIC, and the second
86  * byte is the size index.  The remaining bytes are for alignment.
87  * If range checking is enabled and the size of the block fits
88  * in two bytes, then the top two bytes hold the size of the requested block
89  * plus the range checking words, and the header word MINUS ONE.
90  */
91 
92 
93 #define MEMALIGN(a) (((a) + ROUNDUP) & ~ROUNDUP)
94 
95 union overhead {
96     union overhead *ov_next;	/* when free */
97     struct {
98 	U_char  ovu_magic;	/* magic number */
99 	U_char  ovu_index;	/* bucket # */
100 #ifdef RCHECK
101 	U_short ovu_size;	/* actual block size */
102 	U_int   ovu_rmagic;	/* range magic number */
103 #endif
104     }       ovu;
105 #define	ov_magic	ovu.ovu_magic
106 #define	ov_index	ovu.ovu_index
107 #define	ov_size		ovu.ovu_size
108 #define	ov_rmagic	ovu.ovu_rmagic
109 };
110 
111 #define	MAGIC		0xfd	/* magic # on accounting info */
112 #define RMAGIC		0x55555555	/* magic # on range info */
113 #ifdef RCHECK
114 #define	RSLOP		sizeof (U_int)
115 #else
116 #define	RSLOP		0
117 #endif
118 
119 
120 #define ROUNDUP	7
121 
122 /*
123  * nextf[i] is the pointer to the next free block of size 2^(i+3).  The
124  * smallest allocatable block is 8 bytes.  The overhead information
125  * precedes the data area returned to the user.
126  */
127 #define	NBUCKETS ((sizeof(long) << 3) - 3)
128 static union overhead *nextf[NBUCKETS] IZERO_STRUCT;
129 
130 /*
131  * nmalloc[i] is the difference between the number of mallocs and frees
132  * for a given block size.
133  */
134 static U_int nmalloc[NBUCKETS] IZERO_STRUCT;
135 
136 #ifndef lint
137 static	int	findbucket	__P((union overhead *, int));
138 static	void	morecore	__P((int));
139 #endif
140 
141 
142 #ifdef DEBUG
143 # define CHECK(a, str, p) \
144     if (a) { \
145 	xprintf(str, p);	\
146 	xprintf(" (memtop = %lx membot = %lx)\n", memtop, membot);	\
147 	abort(); \
148     }
149 #else
150 # define CHECK(a, str, p) \
151     if (a) { \
152 	xprintf(str, p);	\
153 	xprintf(" (memtop = %lx membot = %lx)\n", memtop, membot);	\
154 	return; \
155     }
156 #endif
157 
158 memalign_t
159 malloc(nbytes)
160     size_t nbytes;
161 {
162 #ifndef lint
163     union overhead *p;
164     int bucket = 0;
165     unsigned shiftr;
166 
167     /*
168      * Convert amount of memory requested into closest block size stored in
169      * hash buckets which satisfies request.  Account for space used per block
170      * for accounting.
171      */
172 #ifdef SUNOS4
173     /*
174      * SunOS localtime() overwrites the 9th byte on an 8 byte malloc()....
175      * so we get one more...
176      * From Michael Schroeder: This is not true. It depends on the
177      * timezone string. In Europe it can overwrite the 13th byte on a
178      * 12 byte malloc.
179      * So we punt and we always allocate an extra byte.
180      */
181     nbytes++;
182 #endif
183 
184     nbytes = MEMALIGN(MEMALIGN(sizeof(union overhead)) + nbytes + RSLOP);
185     shiftr = (nbytes - 1) >> 2;
186 
187     /* apart from this loop, this is O(1) */
188     while ((shiftr >>= 1) != 0)
189 	bucket++;
190     /*
191      * If nothing in hash bucket right now, request more memory from the
192      * system.
193      */
194     if (nextf[bucket] == NULL)
195 	morecore(bucket);
196     if ((p = (union overhead *) nextf[bucket]) == NULL) {
197 	child++;
198 #ifndef DEBUG
199 	stderror(ERR_NOMEM);
200 #else
201 	showall(NULL, NULL);
202 	xprintf(CGETS(19, 1, "nbytes=%d: Out of memory\n"), nbytes);
203 	abort();
204 #endif
205 	/* fool lint */
206 	return ((memalign_t) 0);
207     }
208     /* remove from linked list */
209     nextf[bucket] = nextf[bucket]->ov_next;
210     p->ov_magic = MAGIC;
211     p->ov_index = bucket;
212     nmalloc[bucket]++;
213 #ifdef RCHECK
214     /*
215      * Record allocated size of block and bound space with magic numbers.
216      */
217     p->ov_size = (p->ov_index <= 13) ? nbytes - 1 : 0;
218     p->ov_rmagic = RMAGIC;
219     *((U_int *) (((caddr_t) p) + nbytes - RSLOP)) = RMAGIC;
220 #endif
221     return ((memalign_t) (((caddr_t) p) + MEMALIGN(sizeof(union overhead))));
222 #else
223     if (nbytes)
224 	return ((memalign_t) 0);
225     else
226 	return ((memalign_t) 0);
227 #endif /* !lint */
228 }
229 
230 #ifndef lint
231 /*
232  * Allocate more memory to the indicated bucket.
233  */
234 static void
235 morecore(bucket)
236     int bucket;
237 {
238     union overhead *op;
239     int rnu;		/* 2^rnu bytes will be requested */
240     int nblks;		/* become nblks blocks of the desired size */
241     int siz;
242 
243     if (nextf[bucket])
244 	return;
245     /*
246      * Insure memory is allocated on a page boundary.  Should make getpageize
247      * call?
248      */
249     op = (union overhead *) sbrk(0);
250     memtop = (char *) op;
251     if (membot == NULL)
252 	membot = memtop;
253     if ((long) op & 0x3ff) {
254 	memtop = (char *) sbrk((int) (1024 - ((long) op & 0x3ff)));
255 	memtop += (long) (1024 - ((long) op & 0x3ff));
256     }
257 
258     /* take 2k unless the block is bigger than that */
259     rnu = (bucket <= 8) ? 11 : bucket + 3;
260     nblks = 1 << (rnu - (bucket + 3));	/* how many blocks to get */
261     memtop = (char *) sbrk(1 << rnu);	/* PWP */
262     op = (union overhead *) memtop;
263     /* no more room! */
264     if ((long) op == -1)
265 	return;
266     memtop += (long) (1 << rnu);
267     /*
268      * Round up to minimum allocation size boundary and deduct from block count
269      * to reflect.
270      */
271     if (((U_long) op) & ROUNDUP) {
272 	op = (union overhead *) (((U_long) op + (ROUNDUP + 1)) & ~ROUNDUP);
273 	nblks--;
274     }
275     /*
276      * Add new memory allocated to that on free list for this hash bucket.
277      */
278     nextf[bucket] = op;
279     siz = 1 << (bucket + 3);
280     while (--nblks > 0) {
281 	op->ov_next = (union overhead *) (((caddr_t) op) + siz);
282 	op = (union overhead *) (((caddr_t) op) + siz);
283     }
284     op->ov_next = NULL;
285 }
286 
287 #endif
288 
289 void
290 free(cp)
291     ptr_t   cp;
292 {
293 #ifndef lint
294     int size;
295     union overhead *op;
296 
297     /*
298      * the don't free flag is there so that we avoid os bugs in routines
299      * that free invalid pointers!
300      */
301     if (cp == NULL || dont_free)
302 	return;
303     CHECK(!memtop || !membot,
304 	  CGETS(19, 2, "free(%lx) called before any allocations."), cp);
305     CHECK(cp > (ptr_t) memtop,
306 	  CGETS(19, 3, "free(%lx) above top of memory."), cp);
307     CHECK(cp < (ptr_t) membot,
308 	  CGETS(19, 4, "free(%lx) below bottom of memory."), cp);
309     op = (union overhead *) (((caddr_t) cp) - MEMALIGN(sizeof(union overhead)));
310     CHECK(op->ov_magic != MAGIC,
311 	  CGETS(19, 5, "free(%lx) bad block."), cp);
312 
313 #ifdef RCHECK
314     if (op->ov_index <= 13)
315 	CHECK(*(U_int *) ((caddr_t) op + op->ov_size + 1 - RSLOP) != RMAGIC,
316 	      CGETS(19, 6, "free(%lx) bad range check."), cp);
317 #endif
318     CHECK(op->ov_index >= NBUCKETS,
319 	  CGETS(19, 7, "free(%lx) bad block index."), cp);
320     size = op->ov_index;
321     op->ov_next = nextf[size];
322     nextf[size] = op;
323 
324     nmalloc[size]--;
325 
326 #else
327     if (cp == NULL)
328 	return;
329 #endif
330 }
331 
332 memalign_t
333 calloc(i, j)
334     size_t  i, j;
335 {
336 #ifndef lint
337     char *cp, *scp;
338 
339     i *= j;
340     scp = cp = (char *) xmalloc((size_t) i);
341     if (i != 0)
342 	do
343 	    *cp++ = 0;
344 	while (--i);
345 
346     return ((memalign_t) scp);
347 #else
348     if (i && j)
349 	return ((memalign_t) 0);
350     else
351 	return ((memalign_t) 0);
352 #endif
353 }
354 
355 /*
356  * When a program attempts "storage compaction" as mentioned in the
357  * old malloc man page, it realloc's an already freed block.  Usually
358  * this is the last block it freed; occasionally it might be farther
359  * back.  We have to search all the free lists for the block in order
360  * to determine its bucket: 1st we make one pass thru the lists
361  * checking only the first block in each; if that fails we search
362  * ``realloc_srchlen'' blocks in each list for a match (the variable
363  * is extern so the caller can modify it).  If that fails we just copy
364  * however many bytes was given to realloc() and hope it's not huge.
365  */
366 #ifndef lint
367 /* 4 should be plenty, -1 =>'s whole list */
368 static int     realloc_srchlen = 4;
369 #endif /* lint */
370 
371 memalign_t
372 realloc(cp, nbytes)
373     ptr_t   cp;
374     size_t  nbytes;
375 {
376 #ifndef lint
377     U_int onb;
378     union overhead *op;
379     ptr_t res;
380     int i;
381     int     was_alloced = 0;
382 
383     if (cp == NULL)
384 	return (malloc(nbytes));
385     op = (union overhead *) (((caddr_t) cp) - MEMALIGN(sizeof(union overhead)));
386     if (op->ov_magic == MAGIC) {
387 	was_alloced++;
388 	i = op->ov_index;
389     }
390     else
391 	/*
392 	 * Already free, doing "compaction".
393 	 *
394 	 * Search for the old block of memory on the free list.  First, check the
395 	 * most common case (last element free'd), then (this failing) the last
396 	 * ``realloc_srchlen'' items free'd. If all lookups fail, then assume
397 	 * the size of the memory block being realloc'd is the smallest
398 	 * possible.
399 	 */
400 	if ((i = findbucket(op, 1)) < 0 &&
401 	    (i = findbucket(op, realloc_srchlen)) < 0)
402 	    i = 0;
403 
404     onb = MEMALIGN(nbytes + MEMALIGN(sizeof(union overhead)) + RSLOP);
405 
406     /* avoid the copy if same size block */
407     if (was_alloced && (onb <= (U_int) (1 << (i + 3))) &&
408 	(onb > (U_int) (1 << (i + 2)))) {
409 #ifdef RCHECK
410 	/* JMR: formerly this wasn't updated ! */
411 	nbytes = MEMALIGN(MEMALIGN(sizeof(union overhead))+nbytes+RSLOP);
412 	*((U_int *) (((caddr_t) op) + nbytes - RSLOP)) = RMAGIC;
413 	op->ov_rmagic = RMAGIC;
414 	op->ov_size = (op->ov_index <= 13) ? nbytes - 1 : 0;
415 #endif
416 	return ((memalign_t) cp);
417     }
418     if ((res = malloc(nbytes)) == NULL)
419 	return ((memalign_t) NULL);
420     if (cp != res) {		/* common optimization */
421 	/*
422 	 * christos: this used to copy nbytes! It should copy the
423 	 * smaller of the old and new size
424 	 */
425 	onb = (1 << (i + 3)) - MEMALIGN(sizeof(union overhead)) - RSLOP;
426 	(void) memmove((ptr_t) res, (ptr_t) cp,
427 		       (size_t) (onb < nbytes ? onb : nbytes));
428     }
429     if (was_alloced)
430 	free(cp);
431     return ((memalign_t) res);
432 #else
433     if (cp && nbytes)
434 	return ((memalign_t) 0);
435     else
436 	return ((memalign_t) 0);
437 #endif /* !lint */
438 }
439 
440 
441 
442 #ifndef lint
443 /*
444  * Search ``srchlen'' elements of each free list for a block whose
445  * header starts at ``freep''.  If srchlen is -1 search the whole list.
446  * Return bucket number, or -1 if not found.
447  */
448 static int
449 findbucket(freep, srchlen)
450     union overhead *freep;
451     int     srchlen;
452 {
453     union overhead *p;
454     size_t i;
455     int j;
456 
457     for (i = 0; i < NBUCKETS; i++) {
458 	j = 0;
459 	for (p = nextf[i]; p && j != srchlen; p = p->ov_next) {
460 	    if (p == freep)
461 		return (i);
462 	    j++;
463 	}
464     }
465     return (-1);
466 }
467 
468 #endif
469 
470 
471 #else				/* SYSMALLOC */
472 
473 /**
474  ** ``Protected versions'' of malloc, realloc, calloc, and free
475  **
476  ** On many systems:
477  **
478  ** 1. malloc(0) is bad
479  ** 2. free(0) is bad
480  ** 3. realloc(0, n) is bad
481  ** 4. realloc(n, 0) is bad
482  **
483  ** Also we call our error routine if we run out of memory.
484  **/
485 memalign_t
486 smalloc(n)
487     size_t  n;
488 {
489     ptr_t   ptr;
490 
491     n = n ? n : 1;
492 
493 #ifdef HAVE_SBRK
494     if (membot == NULL)
495 	membot = (char*) sbrk(0);
496 #endif /* HAVE_SBRK */
497 
498     if ((ptr = malloc(n)) == (ptr_t) 0) {
499 	child++;
500 	stderror(ERR_NOMEM);
501     }
502 #ifndef HAVE_SBRK
503     if (memtop < ((char *) ptr) + n)
504 	memtop = ((char *) ptr) + n;
505     if (membot == NULL)
506 	membot = (char*) ptr;
507 #endif /* !HAVE_SBRK */
508     return ((memalign_t) ptr);
509 }
510 
511 memalign_t
512 srealloc(p, n)
513     ptr_t   p;
514     size_t  n;
515 {
516     ptr_t   ptr;
517 
518     n = n ? n : 1;
519 
520 #ifdef HAVE_SBRK
521     if (membot == NULL)
522 	membot = (char*) sbrk(0);
523 #endif /* HAVE_SBRK */
524 
525     if ((ptr = (p ? realloc(p, n) : malloc(n))) == (ptr_t) 0) {
526 	child++;
527 	stderror(ERR_NOMEM);
528     }
529 #ifndef HAVE_SBRK
530     if (memtop < ((char *) ptr) + n)
531 	memtop = ((char *) ptr) + n;
532     if (membot == NULL)
533 	membot = (char*) ptr;
534 #endif /* !HAVE_SBRK */
535     return ((memalign_t) ptr);
536 }
537 
538 memalign_t
539 scalloc(s, n)
540     size_t  s, n;
541 {
542     char   *sptr;
543     ptr_t   ptr;
544 
545     n *= s;
546     n = n ? n : 1;
547 
548 #ifdef HAVE_SBRK
549     if (membot == NULL)
550 	membot = (char*) sbrk(0);
551 #endif /* HAVE_SBRK */
552 
553     if ((ptr = malloc(n)) == (ptr_t) 0) {
554 	child++;
555 	stderror(ERR_NOMEM);
556     }
557 
558     sptr = (char *) ptr;
559     if (n != 0)
560 	do
561 	    *sptr++ = 0;
562 	while (--n);
563 
564 #ifndef HAVE_SBRK
565     if (memtop < ((char *) ptr) + n)
566 	memtop = ((char *) ptr) + n;
567     if (membot == NULL)
568 	membot = (char*) ptr;
569 #endif /* !HAVE_SBRK */
570 
571     return ((memalign_t) ptr);
572 }
573 
574 void
575 sfree(p)
576     ptr_t   p;
577 {
578     if (p && !dont_free)
579 	free(p);
580 }
581 
582 #endif /* SYSMALLOC */
583 
584 /*
585  * mstats - print out statistics about malloc
586  *
587  * Prints two lines of numbers, one showing the length of the free list
588  * for each size category, the second showing the number of mallocs -
589  * frees for each size category.
590  */
591 /*ARGSUSED*/
592 void
593 showall(v, c)
594     Char **v;
595     struct command *c;
596 {
597 #ifndef SYSMALLOC
598     size_t i, j;
599     union overhead *p;
600     int     totfree = 0, totused = 0;
601 
602     xprintf(CGETS(19, 8, "%s current memory allocation:\nfree:\t"), progname);
603     for (i = 0; i < NBUCKETS; i++) {
604 	for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
605 	    continue;
606 	xprintf(" %4d", j);
607 	totfree += j * (1 << (i + 3));
608     }
609     xprintf(CGETS(19, 9, "\nused:\t"));
610     for (i = 0; i < NBUCKETS; i++) {
611 	xprintf(" %4u", nmalloc[i]);
612 	totused += nmalloc[i] * (1 << (i + 3));
613     }
614     xprintf(CGETS(19, 10, "\n\tTotal in use: %d, total free: %d\n"),
615 	    totused, totfree);
616     xprintf(CGETS(19, 11,
617 	    "\tAllocated memory from 0x%lx to 0x%lx.  Real top at 0x%lx\n"),
618 	    (unsigned long) membot, (unsigned long) memtop,
619 	    (unsigned long) sbrk(0));
620 #else
621 #ifdef HAVE_SBRK
622     memtop = (char *) sbrk(0);
623 #endif /* HAVE_SBRK */
624     xprintf(CGETS(19, 12, "Allocated memory from 0x%lx to 0x%lx (%ld).\n"),
625 	    (unsigned long) membot, (unsigned long) memtop,
626 	    (unsigned long) (memtop - membot));
627 #endif /* SYSMALLOC */
628     USE(c);
629     USE(v);
630 }
631