xref: /freebsd/contrib/tcsh/tc.alloc.c (revision 39ee7a7a6bdd1557b1c3532abf60d139798ac88b)
1 /* $Header: /p/tcsh/cvsroot/tcsh/tc.alloc.c,v 3.50 2011/12/30 20:55:24 christos Exp $ */
2 /*
3  * tc.alloc.c (Caltech) 2/21/82
4  * Chris Kingsley, kingsley@cit-20.
5  *
6  * This is a very fast storage allocator.  It allocates blocks of a small
7  * number of different sizes, and keeps free lists of each size.  Blocks that
8  * don't exactly fit are passed up to the next larger size.  In this
9  * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long.
10  * This is designed for use in a program that uses vast quantities of memory,
11  * but bombs when it runs out.
12  */
13 /*-
14  * Copyright (c) 1980, 1991 The Regents of the University of California.
15  * All rights reserved.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in the
24  *    documentation and/or other materials provided with the distribution.
25  * 3. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  */
41 #include "sh.h"
42 #ifdef HAVE_MALLINFO
43 #include <malloc.h>
44 #endif
45 
46 RCSID("$tcsh: tc.alloc.c,v 3.50 2011/12/30 20:55:24 christos Exp $")
47 
48 #define RCHECK
49 #define DEBUG
50 
51 static char   *memtop = NULL;		/* PWP: top of current memory */
52 static char   *membot = NULL;		/* PWP: bottom of allocatable memory */
53 
54 int dont_free = 0;
55 
56 #ifdef WINNT_NATIVE
57 # define malloc		fmalloc
58 # define free		ffree
59 # define calloc		fcalloc
60 # define realloc	frealloc
61 #endif /* WINNT_NATIVE */
62 
63 #if !defined(DEBUG) || defined(SYSMALLOC)
64 static void
65 out_of_memory (void)
66 {
67     static const char msg[] = "Out of memory\n";
68 
69     write(didfds ? 2 : SHDIAG, msg, strlen(msg));
70     _exit(1);
71 }
72 #endif
73 
74 #ifndef SYSMALLOC
75 
76 #ifdef SX
77 extern void* sbrk();
78 #endif
79 /*
80  * Lots of os routines are busted and try to free invalid pointers.
81  * Although our free routine is smart enough and it will pick bad
82  * pointers most of the time, in cases where we know we are going to get
83  * a bad pointer, we'd rather leak.
84  */
85 
86 #ifndef NULL
87 #define	NULL 0
88 #endif
89 
90 typedef unsigned char U_char;	/* we don't really have signed chars */
91 typedef unsigned int U_int;
92 typedef unsigned short U_short;
93 typedef unsigned long U_long;
94 
95 
96 /*
97  * The overhead on a block is at least 4 bytes.  When free, this space
98  * contains a pointer to the next free block, and the bottom two bits must
99  * be zero.  When in use, the first byte is set to MAGIC, and the second
100  * byte is the size index.  The remaining bytes are for alignment.
101  * If range checking is enabled and the size of the block fits
102  * in two bytes, then the top two bytes hold the size of the requested block
103  * plus the range checking words, and the header word MINUS ONE.
104  */
105 
106 
107 #define MEMALIGN(a) (((a) + ROUNDUP) & ~ROUNDUP)
108 
109 union overhead {
110     union overhead *ov_next;	/* when free */
111     struct {
112 	U_char  ovu_magic;	/* magic number */
113 	U_char  ovu_index;	/* bucket # */
114 #ifdef RCHECK
115 	U_short ovu_size;	/* actual block size */
116 	U_int   ovu_rmagic;	/* range magic number */
117 #endif
118     }       ovu;
119 #define	ov_magic	ovu.ovu_magic
120 #define	ov_index	ovu.ovu_index
121 #define	ov_size		ovu.ovu_size
122 #define	ov_rmagic	ovu.ovu_rmagic
123 };
124 
125 #define	MAGIC		0xfd	/* magic # on accounting info */
126 #define RMAGIC		0x55555555	/* magic # on range info */
127 #ifdef RCHECK
128 #define	RSLOP		sizeof (U_int)
129 #else
130 #define	RSLOP		0
131 #endif
132 
133 
134 #define ROUNDUP	7
135 
136 /*
137  * nextf[i] is the pointer to the next free block of size 2^(i+3).  The
138  * smallest allocatable block is 8 bytes.  The overhead information
139  * precedes the data area returned to the user.
140  */
141 #define	NBUCKETS ((sizeof(long) << 3) - 3)
142 static union overhead *nextf[NBUCKETS] IZERO_STRUCT;
143 
144 /*
145  * nmalloc[i] is the difference between the number of mallocs and frees
146  * for a given block size.
147  */
148 static U_int nmalloc[NBUCKETS] IZERO_STRUCT;
149 
150 #ifndef lint
151 static	int	findbucket	(union overhead *, int);
152 static	void	morecore	(int);
153 #endif
154 
155 
156 #ifdef DEBUG
157 # define CHECK(a, str, p) \
158     if (a) { \
159 	xprintf(str, p);	\
160 	xprintf(" (memtop = %p membot = %p)\n", memtop, membot);	\
161 	abort(); \
162     }
163 #else
164 # define CHECK(a, str, p) \
165     if (a) { \
166 	xprintf(str, p);	\
167 	xprintf(" (memtop = %p membot = %p)\n", memtop, membot);	\
168 	return; \
169     }
170 #endif
171 
172 memalign_t
173 malloc(size_t nbytes)
174 {
175 #ifndef lint
176     union overhead *p;
177     int bucket = 0;
178     unsigned shiftr;
179 
180     /*
181      * Convert amount of memory requested into closest block size stored in
182      * hash buckets which satisfies request.  Account for space used per block
183      * for accounting.
184      */
185 #ifdef SUNOS4
186     /*
187      * SunOS localtime() overwrites the 9th byte on an 8 byte malloc()....
188      * so we get one more...
189      * From Michael Schroeder: This is not true. It depends on the
190      * timezone string. In Europe it can overwrite the 13th byte on a
191      * 12 byte malloc.
192      * So we punt and we always allocate an extra byte.
193      */
194     nbytes++;
195 #endif
196 
197     nbytes = MEMALIGN(MEMALIGN(sizeof(union overhead)) + nbytes + RSLOP);
198     shiftr = (nbytes - 1) >> 2;
199 
200     /* apart from this loop, this is O(1) */
201     while ((shiftr >>= 1) != 0)
202 	bucket++;
203     /*
204      * If nothing in hash bucket right now, request more memory from the
205      * system.
206      */
207     if (nextf[bucket] == NULL)
208 	morecore(bucket);
209     if ((p = nextf[bucket]) == NULL) {
210 	child++;
211 #ifndef DEBUG
212 	out_of_memory();
213 #else
214 	showall(NULL, NULL);
215 	xprintf(CGETS(19, 1, "nbytes=%zu: Out of memory\n"), nbytes);
216 	abort();
217 #endif
218 	/* fool lint */
219 	return ((memalign_t) 0);
220     }
221     /* remove from linked list */
222     nextf[bucket] = nextf[bucket]->ov_next;
223     p->ov_magic = MAGIC;
224     p->ov_index = bucket;
225     nmalloc[bucket]++;
226 #ifdef RCHECK
227     /*
228      * Record allocated size of block and bound space with magic numbers.
229      */
230     p->ov_size = (p->ov_index <= 13) ? nbytes - 1 : 0;
231     p->ov_rmagic = RMAGIC;
232     *((U_int *) (((caddr_t) p) + nbytes - RSLOP)) = RMAGIC;
233 #endif
234     return ((memalign_t) (((caddr_t) p) + MEMALIGN(sizeof(union overhead))));
235 #else
236     if (nbytes)
237 	return ((memalign_t) 0);
238     else
239 	return ((memalign_t) 0);
240 #endif /* !lint */
241 }
242 
243 #ifndef lint
244 /*
245  * Allocate more memory to the indicated bucket.
246  */
247 static void
248 morecore(int bucket)
249 {
250     union overhead *op;
251     int rnu;		/* 2^rnu bytes will be requested */
252     int nblks;		/* become nblks blocks of the desired size */
253     int siz;
254 
255     if (nextf[bucket])
256 	return;
257     /*
258      * Insure memory is allocated on a page boundary.  Should make getpageize
259      * call?
260      */
261     op = (union overhead *) sbrk(0);
262     memtop = (char *) op;
263     if (membot == NULL)
264 	membot = memtop;
265     if ((long) op & 0x3ff) {
266 	memtop = sbrk((int) (1024 - ((long) op & 0x3ff)));
267 	memtop += (long) (1024 - ((long) op & 0x3ff));
268     }
269 
270     /* take 2k unless the block is bigger than that */
271     rnu = (bucket <= 8) ? 11 : bucket + 3;
272     nblks = 1 << (rnu - (bucket + 3));	/* how many blocks to get */
273     memtop = sbrk(1 << rnu);	/* PWP */
274     op = (union overhead *) memtop;
275     /* no more room! */
276     if ((long) op == -1)
277 	return;
278     memtop += (long) (1 << rnu);
279     /*
280      * Round up to minimum allocation size boundary and deduct from block count
281      * to reflect.
282      */
283     if (((U_long) op) & ROUNDUP) {
284 	op = (union overhead *) (((U_long) op + (ROUNDUP + 1)) & ~ROUNDUP);
285 	nblks--;
286     }
287     /*
288      * Add new memory allocated to that on free list for this hash bucket.
289      */
290     nextf[bucket] = op;
291     siz = 1 << (bucket + 3);
292     while (--nblks > 0) {
293 	op->ov_next = (union overhead *) (((caddr_t) op) + siz);
294 	op = (union overhead *) (((caddr_t) op) + siz);
295     }
296     op->ov_next = NULL;
297 }
298 
299 #endif
300 
301 void
302 free(ptr_t cp)
303 {
304 #ifndef lint
305     int size;
306     union overhead *op;
307 
308     /*
309      * the don't free flag is there so that we avoid os bugs in routines
310      * that free invalid pointers!
311      */
312     if (cp == NULL || dont_free)
313 	return;
314     CHECK(!memtop || !membot,
315 	  CGETS(19, 2, "free(%p) called before any allocations."), cp);
316     CHECK(cp > (ptr_t) memtop,
317 	  CGETS(19, 3, "free(%p) above top of memory."), cp);
318     CHECK(cp < (ptr_t) membot,
319 	  CGETS(19, 4, "free(%p) below bottom of memory."), cp);
320     op = (union overhead *) (((caddr_t) cp) - MEMALIGN(sizeof(union overhead)));
321     CHECK(op->ov_magic != MAGIC,
322 	  CGETS(19, 5, "free(%p) bad block."), cp);
323 
324 #ifdef RCHECK
325     if (op->ov_index <= 13)
326 	CHECK(*(U_int *) ((caddr_t) op + op->ov_size + 1 - RSLOP) != RMAGIC,
327 	      CGETS(19, 6, "free(%p) bad range check."), cp);
328 #endif
329     CHECK(op->ov_index >= NBUCKETS,
330 	  CGETS(19, 7, "free(%p) bad block index."), cp);
331     size = op->ov_index;
332     op->ov_next = nextf[size];
333     nextf[size] = op;
334 
335     nmalloc[size]--;
336 
337 #else
338     if (cp == NULL)
339 	return;
340 #endif
341 }
342 
343 memalign_t
344 calloc(size_t i, size_t j)
345 {
346 #ifndef lint
347     char *cp;
348 
349     i *= j;
350     cp = xmalloc(i);
351     memset(cp, 0, i);
352 
353     return ((memalign_t) cp);
354 #else
355     if (i && j)
356 	return ((memalign_t) 0);
357     else
358 	return ((memalign_t) 0);
359 #endif
360 }
361 
362 /*
363  * When a program attempts "storage compaction" as mentioned in the
364  * old malloc man page, it realloc's an already freed block.  Usually
365  * this is the last block it freed; occasionally it might be farther
366  * back.  We have to search all the free lists for the block in order
367  * to determine its bucket: 1st we make one pass thru the lists
368  * checking only the first block in each; if that fails we search
369  * ``realloc_srchlen'' blocks in each list for a match (the variable
370  * is extern so the caller can modify it).  If that fails we just copy
371  * however many bytes was given to realloc() and hope it's not huge.
372  */
373 #ifndef lint
374 /* 4 should be plenty, -1 =>'s whole list */
375 static int     realloc_srchlen = 4;
376 #endif /* lint */
377 
378 memalign_t
379 realloc(ptr_t cp, size_t nbytes)
380 {
381 #ifndef lint
382     U_int onb;
383     union overhead *op;
384     ptr_t res;
385     int i;
386     int     was_alloced = 0;
387 
388     if (cp == NULL)
389 	return (malloc(nbytes));
390     op = (union overhead *) (((caddr_t) cp) - MEMALIGN(sizeof(union overhead)));
391     if (op->ov_magic == MAGIC) {
392 	was_alloced++;
393 	i = op->ov_index;
394     }
395     else
396 	/*
397 	 * Already free, doing "compaction".
398 	 *
399 	 * Search for the old block of memory on the free list.  First, check the
400 	 * most common case (last element free'd), then (this failing) the last
401 	 * ``realloc_srchlen'' items free'd. If all lookups fail, then assume
402 	 * the size of the memory block being realloc'd is the smallest
403 	 * possible.
404 	 */
405 	if ((i = findbucket(op, 1)) < 0 &&
406 	    (i = findbucket(op, realloc_srchlen)) < 0)
407 	    i = 0;
408 
409     onb = MEMALIGN(nbytes + MEMALIGN(sizeof(union overhead)) + RSLOP);
410 
411     /* avoid the copy if same size block */
412     if (was_alloced && (onb <= (U_int) (1 << (i + 3))) &&
413 	(onb > (U_int) (1 << (i + 2)))) {
414 #ifdef RCHECK
415 	/* JMR: formerly this wasn't updated ! */
416 	nbytes = MEMALIGN(MEMALIGN(sizeof(union overhead))+nbytes+RSLOP);
417 	*((U_int *) (((caddr_t) op) + nbytes - RSLOP)) = RMAGIC;
418 	op->ov_rmagic = RMAGIC;
419 	op->ov_size = (op->ov_index <= 13) ? nbytes - 1 : 0;
420 #endif
421 	return ((memalign_t) cp);
422     }
423     if ((res = malloc(nbytes)) == NULL)
424 	return ((memalign_t) NULL);
425     if (cp != res) {		/* common optimization */
426 	/*
427 	 * christos: this used to copy nbytes! It should copy the
428 	 * smaller of the old and new size
429 	 */
430 	onb = (1 << (i + 3)) - MEMALIGN(sizeof(union overhead)) - RSLOP;
431 	(void) memmove(res, cp, onb < nbytes ? onb : nbytes);
432     }
433     if (was_alloced)
434 	free(cp);
435     return ((memalign_t) res);
436 #else
437     if (cp && nbytes)
438 	return ((memalign_t) 0);
439     else
440 	return ((memalign_t) 0);
441 #endif /* !lint */
442 }
443 
444 /*
445  * On linux, _nss_nis_setnetgrent() calls this function to determine
446  * the usable size of the pointer passed, but this is not a portable
447  * API, so we cannot use our malloc replacement without providing one.
448  * Thanks a lot glibc!
449  */
450 #ifdef __linux__
451 #define M_U_S_CONST
452 #else
453 #define M_U_S_CONST
454 #endif
455 size_t malloc_usable_size(M_U_S_CONST void *);
456 size_t
457 malloc_usable_size(M_U_S_CONST void *ptr)
458 {
459     const union overhead *op = (const union overhead *)
460 	(((const char *) ptr) - MEMALIGN(sizeof(*op)));
461     if (op->ov_magic == MAGIC)
462 	    return 1 << (op->ov_index + 2);
463     else
464 	    return 0;
465 }
466 
467 
468 #ifndef lint
469 /*
470  * Search ``srchlen'' elements of each free list for a block whose
471  * header starts at ``freep''.  If srchlen is -1 search the whole list.
472  * Return bucket number, or -1 if not found.
473  */
474 static int
475 findbucket(union overhead *freep, int srchlen)
476 {
477     union overhead *p;
478     size_t i;
479     int j;
480 
481     for (i = 0; i < NBUCKETS; i++) {
482 	j = 0;
483 	for (p = nextf[i]; p && j != srchlen; p = p->ov_next) {
484 	    if (p == freep)
485 		return (i);
486 	    j++;
487 	}
488     }
489     return (-1);
490 }
491 
492 #endif
493 
494 
495 #else				/* SYSMALLOC */
496 
497 /**
498  ** ``Protected versions'' of malloc, realloc, calloc, and free
499  **
500  ** On many systems:
501  **
502  ** 1. malloc(0) is bad
503  ** 2. free(0) is bad
504  ** 3. realloc(0, n) is bad
505  ** 4. realloc(n, 0) is bad
506  **
507  ** Also we call our error routine if we run out of memory.
508  **/
509 memalign_t
510 smalloc(size_t n)
511 {
512     ptr_t   ptr;
513 
514     n = n ? n : 1;
515 
516 #ifdef HAVE_SBRK
517     if (membot == NULL)
518 	membot = sbrk(0);
519 #endif /* HAVE_SBRK */
520 
521     if ((ptr = malloc(n)) == NULL)
522 	out_of_memory();
523 #ifndef HAVE_SBRK
524     if (memtop < ((char *) ptr) + n)
525 	memtop = ((char *) ptr) + n;
526     if (membot == NULL)
527 	membot = ptr;
528 #endif /* !HAVE_SBRK */
529     return ((memalign_t) ptr);
530 }
531 
532 memalign_t
533 srealloc(ptr_t p, size_t n)
534 {
535     ptr_t   ptr;
536 
537     n = n ? n : 1;
538 
539 #ifdef HAVE_SBRK
540     if (membot == NULL)
541 	membot = sbrk(0);
542 #endif /* HAVE_SBRK */
543 
544     if ((ptr = (p ? realloc(p, n) : malloc(n))) == NULL)
545 	out_of_memory();
546 #ifndef HAVE_SBRK
547     if (memtop < ((char *) ptr) + n)
548 	memtop = ((char *) ptr) + n;
549     if (membot == NULL)
550 	membot = ptr;
551 #endif /* !HAVE_SBRK */
552     return ((memalign_t) ptr);
553 }
554 
555 memalign_t
556 scalloc(size_t s, size_t n)
557 {
558     ptr_t   ptr;
559 
560     n *= s;
561     n = n ? n : 1;
562 
563 #ifdef HAVE_SBRK
564     if (membot == NULL)
565 	membot = sbrk(0);
566 #endif /* HAVE_SBRK */
567 
568     if ((ptr = malloc(n)) == NULL)
569 	out_of_memory();
570 
571     memset (ptr, 0, n);
572 
573 #ifndef HAVE_SBRK
574     if (memtop < ((char *) ptr) + n)
575 	memtop = ((char *) ptr) + n;
576     if (membot == NULL)
577 	membot = ptr;
578 #endif /* !HAVE_SBRK */
579 
580     return ((memalign_t) ptr);
581 }
582 
583 void
584 sfree(ptr_t p)
585 {
586     if (p && !dont_free)
587 	free(p);
588 }
589 
590 #endif /* SYSMALLOC */
591 
592 /*
593  * mstats - print out statistics about malloc
594  *
595  * Prints two lines of numbers, one showing the length of the free list
596  * for each size category, the second showing the number of mallocs -
597  * frees for each size category.
598  */
599 /*ARGSUSED*/
600 void
601 showall(Char **v, struct command *c)
602 {
603 #ifndef SYSMALLOC
604     size_t i, j;
605     union overhead *p;
606     int     totfree = 0, totused = 0;
607 
608     xprintf(CGETS(19, 8, "%s current memory allocation:\nfree:\t"), progname);
609     for (i = 0; i < NBUCKETS; i++) {
610 	for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
611 	    continue;
612 	xprintf(" %4zd", j);
613 	totfree += j * (1 << (i + 3));
614     }
615     xprintf("\n%s:\t", CGETS(19, 9, "used"));
616     for (i = 0; i < NBUCKETS; i++) {
617 	xprintf(" %4d", nmalloc[i]);
618 	totused += nmalloc[i] * (1 << (i + 3));
619     }
620     xprintf(CGETS(19, 10, "\n\tTotal in use: %d, total free: %d\n"),
621 	    totused, totfree);
622     xprintf(CGETS(19, 11,
623 	    "\tAllocated memory from 0x%lx to 0x%lx.  Real top at 0x%lx\n"),
624 	    (unsigned long) membot, (unsigned long) memtop,
625 	    (unsigned long) sbrk(0));
626 #else /* SYSMALLOC */
627 #ifndef HAVE_MALLINFO
628 #ifdef HAVE_SBRK
629     memtop = sbrk(0);
630 #endif /* HAVE_SBRK */
631     xprintf(CGETS(19, 12, "Allocated memory from 0x%lx to 0x%lx (%ld).\n"),
632 	    (unsigned long) membot, (unsigned long) memtop,
633 	    (unsigned long) (memtop - membot));
634 #else /* HAVE_MALLINFO */
635     struct mallinfo mi;
636 
637     mi = mallinfo();
638     xprintf(CGETS(19, 13, "%s current memory allocation:\n"), progname);
639     xprintf(CGETS(19, 14, "Total space allocated from system: %d\n"), mi.arena);
640     xprintf(CGETS(19, 15, "Number of non-inuse chunks: %d\n"), mi.ordblks);
641     xprintf(CGETS(19, 16, "Number of mmapped regions: %d\n"), mi.hblks);
642     xprintf(CGETS(19, 17, "Total space in mmapped regions: %d\n"), mi.hblkhd);
643     xprintf(CGETS(19, 18, "Total allocated space: %d\n"), mi.uordblks);
644     xprintf(CGETS(19, 19, "Total non-inuse space: %d\n"), mi.fordblks);
645     xprintf(CGETS(19, 20, "Top-most, releasable space: %d\n"), mi.keepcost);
646 #endif /* HAVE_MALLINFO */
647 #endif /* SYSMALLOC */
648     USE(c);
649     USE(v);
650 }
651