1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28 /* All Rights Reserved */
29
30 #pragma ident "%Z%%M% %I% %E% SMI"
31
32 #include <sys/types.h>
33
34
35 /*
36 * Simplified version of malloc(), free() and realloc(), to be linked with
37 * utilities that use [s]brk() and do not define their own version of the
38 * routines.
39 *
40 * The algorithm used to get extra memory space by mmap'ing /dev/zero. This
41 * breaks if the application closes the open descriptor, so now it uses
42 * mmap's MAP_ANON feature.
43 *
44 * Each call to mmap() creates a page. The pages are linked in a list.
45 * Each page is divided in blocks. There is at least one block in a page.
46 * New memory chunks are allocated on a first-fit basis.
47 * Freed blocks are joined in larger blocks. Free pages are unmapped.
48 */
49 #include <stdlib.h>
50 #include <sys/types.h>
51 #include <sys/mman.h>
52 #include <fcntl.h>
53 #include <errno.h>
54 #include <unistd.h>
55 #include <thread.h>
56 #include <pthread.h>
57 #include <synch.h>
58 #include <string.h>
59
60 static mutex_t lock = DEFAULTMUTEX;
61
62 struct block {
63 size_t size; /* Space available for user */
64 struct page *page; /* Backwards reference to page */
65 int status;
66 struct block *next;
67 void *memstart[1];
68 };
69
70 struct page {
71 size_t size; /* Total page size (incl. header) */
72 struct page *next;
73 struct block block[1];
74 };
75
76 #define FREE 0
77 #define BUSY 1
78
79 #define HDR_BLOCK (sizeof (struct block) - sizeof (void *))
80 #define HDR_PAGE (sizeof (struct page) - sizeof (void *))
81 #define MINSZ sizeof (double)
82
83 /* for convenience */
84 #ifndef NULL
85 #define NULL (0)
86 #endif
87
88 struct page *memstart;
89 static int pagesize;
90 static void defrag(struct page *);
91 static void split(struct block *, size_t);
92 static void *malloc_unlocked(size_t);
93 static size_t align(size_t, int);
94
95 void *
malloc(size_t size)96 malloc(size_t size)
97 {
98 void *retval;
99 (void) mutex_lock(&lock);
100 retval = malloc_unlocked(size);
101 (void) mutex_unlock(&lock);
102 return (retval);
103 }
104
105
106 static void *
malloc_unlocked(size_t size)107 malloc_unlocked(size_t size)
108 {
109 struct block *block;
110 struct page *page;
111
112 if (pagesize == 0)
113 pagesize = (int)sysconf(_SC_PAGESIZE);
114
115 size = align(size, MINSZ);
116
117 /*
118 * Try to locate necessary space
119 */
120 for (page = memstart; page; page = page->next) {
121 for (block = page->block; block; block = block->next) {
122 if (block->status == FREE && block->size >= size)
123 goto found;
124 }
125 }
126 found:
127
128 /*
129 * Need to allocate a new page
130 */
131 if (!page) {
132 size_t totsize = size + HDR_PAGE;
133 size_t totpage = align(totsize, pagesize);
134
135 if ((page = (struct page *)mmap(0, totpage,
136 PROT_READ|PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0))
137 == MAP_FAILED)
138 return (0);
139
140 page->next = memstart;
141 memstart = page;
142 page->size = totpage;
143 block = page->block;
144 block->next = 0;
145 block->status = FREE;
146 block->size = totpage - HDR_PAGE;
147 block->page = page;
148 }
149
150 split(block, size);
151
152 block->status = BUSY;
153 return (&block->memstart);
154 }
155
156 void *
realloc(void * ptr,size_t size)157 realloc(void *ptr, size_t size)
158 {
159 struct block *block;
160 size_t osize;
161 void *newptr;
162
163 (void) mutex_lock(&lock);
164 if (ptr == NULL) {
165 newptr = malloc_unlocked(size);
166 (void) mutex_unlock(&lock);
167 return (newptr);
168 }
169 block = (struct block *)((char *)ptr - HDR_BLOCK);
170 size = align(size, MINSZ);
171 osize = block->size;
172
173 /*
174 * Join block with next one if it is free
175 */
176 if (block->next && block->next->status == FREE) {
177 block->size += block->next->size + HDR_BLOCK;
178 block->next = block->next->next;
179 }
180
181 if (size <= block->size) {
182 split(block, size);
183 (void) mutex_unlock(&lock);
184 return (ptr);
185 }
186
187 newptr = malloc_unlocked(size);
188 (void) memcpy(newptr, ptr, osize);
189 block->status = FREE;
190 defrag(block->page);
191 (void) mutex_unlock(&lock);
192 return (newptr);
193 }
194
195 void
free(void * ptr)196 free(void *ptr)
197 {
198 struct block *block;
199
200 (void) mutex_lock(&lock);
201 if (ptr == NULL) {
202 (void) mutex_unlock(&lock);
203 return;
204 }
205 block = (struct block *)((char *)ptr - HDR_BLOCK);
206 block->status = FREE;
207
208 defrag(block->page);
209 (void) mutex_unlock(&lock);
210 }
211
212 /*
213 * Align size on an appropriate boundary
214 */
215 static size_t
align(size_t size,int bound)216 align(size_t size, int bound)
217 {
218 if (size < bound)
219 return ((size_t)bound);
220 else
221 return (size + bound - 1 - (size + bound - 1) % bound);
222 }
223
224 static void
split(struct block * block,size_t size)225 split(struct block *block, size_t size)
226 {
227 if (block->size > size + sizeof (struct block)) {
228 struct block *newblock;
229 newblock = (struct block *)((char *)block + HDR_BLOCK + size);
230 newblock->next = block->next;
231 block->next = newblock;
232 newblock->status = FREE;
233 newblock->page = block->page;
234 newblock->size = block->size - size - HDR_BLOCK;
235 block->size = size;
236 }
237 }
238
239 /*
240 * Defragmentation
241 */
242 static void
defrag(struct page * page)243 defrag(struct page *page)
244 {
245 struct block *block;
246
247 for (block = page->block; block; block = block->next) {
248 struct block *block2;
249
250 if (block->status == BUSY)
251 continue;
252 for (block2 = block->next; block2 && block2->status == FREE;
253 block2 = block2->next) {
254 block->next = block2->next;
255 block->size += block2->size + HDR_BLOCK;
256 }
257 }
258
259 /*
260 * Free page
261 */
262 if (page->block->size == page->size - HDR_PAGE) {
263 if (page == memstart)
264 memstart = page->next;
265 else {
266 struct page *page2;
267 for (page2 = memstart; page2->next;
268 page2 = page2->next) {
269 if (page2->next == page) {
270 page2->next = page->next;
271 break;
272 }
273 }
274 }
275 (void) munmap((caddr_t)page, page->size);
276 }
277 }
278
279 static void
malloc_prepare()280 malloc_prepare()
281 {
282 (void) mutex_lock(&lock);
283 }
284
285 static void
malloc_release()286 malloc_release()
287 {
288 (void) mutex_unlock(&lock);
289 }
290
291 #pragma init(malloc_init)
292 static void
malloc_init(void)293 malloc_init(void)
294 {
295 (void) pthread_atfork(malloc_prepare, malloc_release, malloc_release);
296 }
297