1 /*
2 * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
3 * Use is subject to license terms.
4 */
5
6 /*
7 * wizard:/space/4.3reno/usr/src/lib/libc/stdlib/malloc.c
8 * Copyright (c) 1983 Regents of the University of California.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms are permitted
12 * provided that: (1) source distributions retain this entire copyright
13 * notice and comment, and (2) distributions including binaries display
14 * the following acknowledgement: ``This product includes software
15 * developed by the University of California, Berkeley and its contributors''
16 * in the documentation or other materials provided with the distribution
17 * and in all advertising materials mentioning features or use of this
18 * software. Neither the name of the University nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
23 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
24 */
25
26 /*
27 * malloc.c (Caltech) 2/21/82
28 * Chris Kingsley, kingsley@cit-20.
29 *
30 * This is a very fast storage allocator. It allocates blocks of a small
31 * number of different sizes, and keeps free lists of each size. Blocks that
32 * don't exactly fit are passed up to the next larger size. In this
33 * implementation, the available sizes are 2^n-4 bytes long (ILP32)
34 * or 2^n-8 bytes long (LP64).
35 */
36
37 /*LINTLIBRARY*/
38 #include <sys/types.h>
39 #include <stdlib.h>
40 #include <unistd.h>
41 #include <string.h>
42 #include <limits.h>
43
44 /*
45 * The overhead on a block is at least 4 bytes. When free, this space
46 * contains a pointer to the next free block, and the bottom two bits must
47 * be zero. When in use, the first byte is set to MAGIC, and the second
48 * byte is the size index. The remaining bytes are for alignment.
49 * The order of elements is critical: ov_magic must overlay the low order
50 * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern.
51 * Overhead is 4 bytes for ILP32, 8 bytes for LP64
52 */
53 union overhead {
54 union overhead *ov_next; /* when free */
55 struct {
56 #if defined(_LITTLE_ENDIAN)
57 uchar_t ovu_magic; /* magic number */
58 uchar_t ovu_index; /* bucket # */
59 uchar_t ovu_pad[sizeof (union overhead *) - 2];
60 #elif defined(_BIG_ENDIAN)
61 uchar_t ovu_pad[sizeof (union overhead *) - 2];
62 uchar_t ovu_index; /* bucket # */
63 uchar_t ovu_magic; /* magic number */
64 #else
65 #error "Endianness is not defined"
66 #endif
67 } ovu;
68 };
69
70 #define ov_magic ovu.ovu_magic
71 #define ov_index ovu.ovu_index
72
73 #define MAGIC 0xef /* magic # on accounting info */
74
75 /*
76 * nextf[i] is the pointer to the next free block of size 2^(i+EXP).
77 * The smallest allocatable block is 8 bytes (ILP32) or 16 bytes (LP64).
78 * The overhead information precedes the data area returned to the user.
79 */
80 #ifdef _LP64
81 #define EXP 4
82 #define NBUCKETS 60
83 #else
84 #define EXP 3
85 #define NBUCKETS 29
86 #endif
87 static union overhead *nextf[NBUCKETS];
88
89 static int pagesz; /* page size */
90 static long sbrk_adjust; /* in case sbrk() does alignment */
91 static int pagebucket; /* page size bucket */
92 static void morecore(int);
93 static int findbucket(union overhead *, int);
94
95 void *
malloc(size_t nbytes)96 malloc(size_t nbytes)
97 {
98 union overhead *op;
99 int bucket;
100 ssize_t n;
101 size_t amt;
102
103 /*
104 * First time malloc is called, setup page size and
105 * align break pointer so all data will be page aligned.
106 */
107 if (pagesz == 0) {
108 pagesz = getpagesize();
109 op = sbrk(0);
110 n = pagesz - sizeof (*op) - ((uintptr_t)op & (pagesz - 1));
111 if (n < 0)
112 n += pagesz;
113 if (n) {
114 if (sbrk(n) == (void *)-1)
115 return (NULL);
116 /*
117 * We were careful to arrange that
118 * sbrk(0) + sizeof (union overhead)
119 * should end up on a page boundary.
120 * If the underlying sbrk() performs alignment
121 * then this is false. We compute the adjustment.
122 */
123 op = sbrk(0);
124 sbrk_adjust = (uintptr_t)(op + 1) & (pagesz - 1);
125 } else {
126 sbrk_adjust = 0;
127 }
128 bucket = 0;
129 amt = (1UL << EXP);
130 while (pagesz > amt) {
131 amt <<= 1;
132 bucket++;
133 }
134 pagebucket = bucket;
135 }
136 /*
137 * Convert amount of memory requested into closest block size
138 * stored in hash buckets which satisfies request.
139 * Account for space used per block for accounting.
140 */
141 if (nbytes <= (n = pagesz - sizeof (*op))) {
142 amt = (1UL << EXP); /* size of first bucket */
143 bucket = 0;
144 n = -(ssize_t)(sizeof (*op));
145 } else {
146 amt = pagesz;
147 bucket = pagebucket;
148 }
149 while (nbytes > amt + n) {
150 amt <<= 1;
151 if (amt == 0)
152 return (NULL);
153 bucket++;
154 }
155 /*
156 * If nothing in hash bucket right now,
157 * request more memory from the system.
158 */
159 if ((op = nextf[bucket]) == NULL) {
160 morecore(bucket);
161 if ((op = nextf[bucket]) == NULL)
162 return (NULL);
163 }
164 /* remove from linked list */
165 nextf[bucket] = op->ov_next;
166 op->ov_magic = MAGIC;
167 op->ov_index = (uchar_t)bucket;
168 return (op + 1);
169 }
170
171 /*
172 * Allocate more memory to the indicated bucket.
173 */
174 static void
morecore(int bucket)175 morecore(int bucket)
176 {
177 union overhead *op;
178 size_t sz; /* size of desired block */
179 ssize_t amt; /* amount to allocate */
180 long nblks; /* how many blocks we get */
181
182 sz = 1UL << (bucket + EXP);
183 if (sz == 0)
184 return;
185 if (sz < pagesz) {
186 amt = pagesz;
187 nblks = amt / sz;
188 } else {
189 amt = sz + pagesz;
190 nblks = 1;
191 }
192 if (amt <= 0)
193 return;
194 if (amt > LONG_MAX) {
195 intptr_t delta;
196 /*
197 * the value required is too big for sbrk() to deal with
198 * in one go, so use sbrk() at most 2 times instead.
199 */
200 op = sbrk(0);
201 delta = LONG_MAX;
202 while (delta > 0) {
203 if (sbrk(delta) == (void *)-1) {
204 if (op != sbrk(0))
205 (void) sbrk(-LONG_MAX);
206 return;
207 }
208 amt -= LONG_MAX;
209 delta = amt;
210 }
211 }
212 else
213 op = sbrk(amt);
214 /* no more room! */
215 if (op == (union overhead *)-1)
216 return;
217 /* LINTED improper alignment */
218 op = (union overhead *)((caddr_t)op - sbrk_adjust);
219 /*
220 * Add new memory allocated to that on
221 * free list for this hash bucket.
222 */
223 nextf[bucket] = op;
224 while (--nblks > 0) {
225 /* LINTED improper alignment */
226 op->ov_next = (union overhead *)((caddr_t)op + sz);
227 /* LINTED improper alignment */
228 op = (union overhead *)((caddr_t)op + sz);
229 }
230 }
231
232 void
free(void * cp)233 free(void *cp)
234 {
235 int size;
236 union overhead *op;
237
238 if (cp == NULL)
239 return;
240 /* LINTED improper alignment */
241 op = (union overhead *)((caddr_t)cp - sizeof (union overhead));
242 if (op->ov_magic != MAGIC)
243 return; /* previously freed? */
244 size = op->ov_index;
245 op->ov_next = nextf[size]; /* also clobbers ov_magic */
246 nextf[size] = op;
247 }
248
249 /*
250 * When a program attempts "storage compaction" as mentioned in the
251 * old malloc man page, it realloc's an already freed block. Usually
252 * this is the last block it freed; occasionally it might be farther
253 * back. We have to search all the free lists for the block in order
254 * to determine its bucket: 1st we make one pass thru the lists
255 * checking only the first block in each; if that fails we search
256 * ``realloc_srchlen'' blocks in each list for a match (the variable
257 * is extern so the caller can modify it). If that fails we just copy
258 * however many bytes was given to realloc() and hope it's not huge.
259 */
260 int realloc_srchlen = 4; /* 4 should be plenty, -1 =>'s whole list */
261
262 void *
realloc(void * cp,size_t nbytes)263 realloc(void *cp, size_t nbytes)
264 {
265 size_t onb;
266 int i;
267 union overhead *op;
268 char *res;
269 int was_alloced = 0;
270
271 if (cp == NULL)
272 return (malloc(nbytes));
273 /* LINTED improper alignment */
274 op = (union overhead *)((caddr_t)cp - sizeof (union overhead));
275 if (op->ov_magic == MAGIC) {
276 was_alloced++;
277 i = op->ov_index;
278 } else {
279 /*
280 * Already free, doing "compaction".
281 *
282 * Search for the old block of memory on the
283 * free list. First, check the most common
284 * case (last element free'd), then (this failing)
285 * the last ``realloc_srchlen'' items free'd.
286 * If all lookups fail, then just malloc() the
287 * space and copy the size of the new space.
288 */
289 if ((i = findbucket(op, 1)) < 0 &&
290 (i = findbucket(op, realloc_srchlen)) < 0) {
291 if ((res = malloc(nbytes)) != NULL)
292 (void) memmove(res, cp, nbytes);
293 return (res);
294 }
295 }
296 onb = 1UL << (i + EXP);
297 if (onb < pagesz)
298 onb -= sizeof (*op);
299 else
300 onb += pagesz - sizeof (*op);
301 /* avoid the copy if same size block */
302 if (was_alloced) {
303 size_t sz = 0;
304 if (i) {
305 sz = 1UL << (i + EXP - 1);
306 if (sz < pagesz)
307 sz -= sizeof (*op);
308 else
309 sz += pagesz - sizeof (*op);
310 }
311 if (nbytes <= onb && nbytes > sz) {
312 return (cp);
313 } else
314 free(cp);
315 }
316 if ((res = malloc(nbytes)) == NULL)
317 return (NULL);
318 if (cp != res) /* common optimization if "compacting" */
319 (void) memmove(res, cp, (nbytes < onb) ? nbytes : onb);
320 return (res);
321 }
322
323 /*
324 * Search ``srchlen'' elements of each free list for a block whose
325 * header starts at ``freep''. If srchlen is -1 search the whole list.
326 * Return bucket number, or -1 if not found.
327 */
328 static int
findbucket(union overhead * freep,int srchlen)329 findbucket(union overhead *freep, int srchlen)
330 {
331 union overhead *p;
332 int i, j;
333
334 for (i = 0; i < NBUCKETS; i++) {
335 j = 0;
336 for (p = nextf[i]; p && j != srchlen; p = p->ov_next) {
337 if (p == freep)
338 return (i);
339 j++;
340 }
341 }
342 return (-1);
343 }
344