xref: /illumos-gate/usr/src/lib/libmapmalloc/common/textmem.c (revision fc910014e8a32a65612105835a10995f2c13d942)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
28 /*	  All Rights Reserved  	*/
29 
30 #include <sys/types.h>
31 
32 
33 /*
34  * Simplified version of malloc(), free() and realloc(), to be linked with
35  * utilities that use [s]brk() and do not define their own version of the
36  * routines.
37  *
38  * The algorithm used to get extra memory space by mmap'ing /dev/zero. This
39  * breaks if the application closes the open descriptor, so now it uses
40  * mmap's MAP_ANON feature.
41  *
42  * Each call to mmap() creates a page. The pages are linked in a list.
43  * Each page is divided in blocks. There is at least one block in a page.
44  * New memory chunks are allocated on a first-fit basis.
45  * Freed blocks are joined in larger blocks. Free pages are unmapped.
46  */
47 #include <stdlib.h>
48 #include <sys/types.h>
49 #include <sys/mman.h>
50 #include <fcntl.h>
51 #include <errno.h>
52 #include <unistd.h>
53 #include <thread.h>
54 #include <pthread.h>
55 #include <synch.h>
56 #include <string.h>
57 
58 static mutex_t lock = DEFAULTMUTEX;
59 
60 struct block {
61 	size_t size;		/* Space available for user */
62 	struct page *page;	/* Backwards reference to page */
63 	int status;
64 	struct block *next;
65 	void *memstart[1];
66 };
67 
68 struct page {
69 	size_t size;		/* Total page size (incl. header) */
70 	struct page *next;
71 	struct block block[1];
72 };
73 
74 #define	FREE	0
75 #define	BUSY	1
76 
77 #define	HDR_BLOCK	(sizeof (struct block) - sizeof (void *))
78 #define	HDR_PAGE	(sizeof (struct page) - sizeof (void *))
79 #define	MINSZ		sizeof (double)
80 
81 /* for convenience */
82 #ifndef	NULL
83 #define	NULL		(0)
84 #endif
85 
86 struct page *memstart;
87 static int pagesize;
88 static void defrag(struct page *);
89 static void split(struct block *,  size_t);
90 static void *malloc_unlocked(size_t);
91 static size_t align(size_t, int);
92 
93 void *
94 malloc(size_t size)
95 {
96 	void *retval;
97 	(void) mutex_lock(&lock);
98 	retval = malloc_unlocked(size);
99 	(void) mutex_unlock(&lock);
100 	return (retval);
101 }
102 
103 
104 static void *
105 malloc_unlocked(size_t size)
106 {
107 	struct block *block;
108 	struct page *page;
109 
110 	if (pagesize == 0)
111 		pagesize = (int)sysconf(_SC_PAGESIZE);
112 
113 	size = align(size, MINSZ);
114 
115 	/*
116 	 * Try to locate necessary space
117 	 */
118 	for (page = memstart; page; page = page->next) {
119 		for (block = page->block; block; block = block->next) {
120 			if (block->status == FREE && block->size >= size)
121 				goto found;
122 		}
123 	}
124 found:
125 
126 	/*
127 	 * Need to allocate a new page
128 	 */
129 	if (!page) {
130 		size_t totsize = size + HDR_PAGE;
131 		size_t totpage = align(totsize, pagesize);
132 
133 		if ((page = (struct page *)mmap(0, totpage,
134 		    PROT_READ|PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0))
135 		    == MAP_FAILED)
136 			return (0);
137 
138 		page->next = memstart;
139 		memstart = page;
140 		page->size = totpage;
141 		block = page->block;
142 		block->next = 0;
143 		block->status = FREE;
144 		block->size = totpage - HDR_PAGE;
145 		block->page = page;
146 	}
147 
148 	split(block, size);
149 
150 	block->status = BUSY;
151 	return (&block->memstart);
152 }
153 
154 void *
155 realloc(void *ptr, size_t size)
156 {
157 	struct block *block;
158 	size_t osize;
159 	void *newptr;
160 
161 	(void) mutex_lock(&lock);
162 	if (ptr == NULL) {
163 		newptr = malloc_unlocked(size);
164 		(void) mutex_unlock(&lock);
165 		return (newptr);
166 	}
167 	block = (struct block *)((char *)ptr - HDR_BLOCK);
168 	size = align(size, MINSZ);
169 	osize = block->size;
170 
171 	/*
172 	 * Join block with next one if it is free
173 	 */
174 	if (block->next && block->next->status == FREE) {
175 		block->size += block->next->size + HDR_BLOCK;
176 		block->next = block->next->next;
177 	}
178 
179 	if (size <= block->size) {
180 		split(block, size);
181 		(void) mutex_unlock(&lock);
182 		return (ptr);
183 	}
184 
185 	newptr = malloc_unlocked(size);
186 	(void) memcpy(newptr, ptr, osize);
187 	block->status = FREE;
188 	defrag(block->page);
189 	(void) mutex_unlock(&lock);
190 	return (newptr);
191 }
192 
193 void
194 free(void *ptr)
195 {
196 	struct block *block;
197 
198 	(void) mutex_lock(&lock);
199 	if (ptr == NULL) {
200 		(void) mutex_unlock(&lock);
201 		return;
202 	}
203 	block = (struct block *)((char *)ptr - HDR_BLOCK);
204 	block->status = FREE;
205 
206 	defrag(block->page);
207 	(void) mutex_unlock(&lock);
208 }
209 
210 /*
211  * Align size on an appropriate boundary
212  */
213 static size_t
214 align(size_t size, int bound)
215 {
216 	if (size < bound)
217 		return ((size_t)bound);
218 	else
219 		return (size + bound - 1 - (size + bound - 1) % bound);
220 }
221 
222 static void
223 split(struct block *block, size_t size)
224 {
225 	if (block->size > size + sizeof (struct block)) {
226 		struct block *newblock;
227 		newblock = (struct block *)((char *)block + HDR_BLOCK + size);
228 		newblock->next = block->next;
229 		block->next = newblock;
230 		newblock->status = FREE;
231 		newblock->page = block->page;
232 		newblock->size = block->size - size - HDR_BLOCK;
233 		block->size = size;
234 	}
235 }
236 
237 /*
238  * Defragmentation
239  */
240 static void
241 defrag(struct page *page)
242 {
243 	struct block *block;
244 
245 	for (block = page->block; block; block = block->next) {
246 		struct block *block2;
247 
248 		if (block->status == BUSY)
249 			continue;
250 		for (block2 = block->next; block2 && block2->status == FREE;
251 		    block2 = block2->next) {
252 			block->next = block2->next;
253 			block->size += block2->size + HDR_BLOCK;
254 		}
255 	}
256 
257 	/*
258 	 * Free page
259 	 */
260 	if (page->block->size == page->size - HDR_PAGE) {
261 		if (page == memstart)
262 			memstart = page->next;
263 		else {
264 			struct page *page2;
265 			for (page2 = memstart; page2->next;
266 			    page2 = page2->next) {
267 				if (page2->next == page) {
268 					page2->next = page->next;
269 					break;
270 				}
271 			}
272 		}
273 		(void) munmap((caddr_t)page, page->size);
274 	}
275 }
276 
277 static void
278 malloc_prepare()
279 {
280 	(void) mutex_lock(&lock);
281 }
282 
283 static void
284 malloc_release()
285 {
286 	(void) mutex_unlock(&lock);
287 }
288 
289 #pragma init(malloc_init)
290 static void
291 malloc_init(void)
292 {
293 	(void) pthread_atfork(malloc_prepare, malloc_release, malloc_release);
294 }
295