xref: /titanic_50/usr/src/lib/libmapmalloc/common/textmem.c (revision dfb96a4f56fb431b915bc67e5d9d5c8d4f4f6679)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
28 /*	  All Rights Reserved  	*/
29 
30 #pragma ident	"%Z%%M%	%I%	%E% SMI"	/* SVR4/MNLS 1.1.2.1 */
31 
32 /*LINTLIBRARY*/
33 
34 #include <sys/types.h>
35 
36 
37 /*
38  * Simplified version of malloc(), free() and realloc(), to be linked with
39  * utilities that use [s]brk() and do not define their own version of the
40  * routines.
41  *
42  * The algorithm used to get extra memory space by mmap'ing /dev/zero. This
43  * breaks if the application closes the open descriptor, so now it uses
44  * mmap's MAP_ANON feature.
45  *
46  * Each call to mmap() creates a page. The pages are linked in a list.
47  * Each page is divided in blocks. There is at least one block in a page.
48  * New memory chunks are allocated on a first-fit basis.
49  * Freed blocks are joined in larger blocks. Free pages are unmapped.
50  */
51 #include <c_synonyms.h>
52 #include <stdlib.h>
53 #include <sys/types.h>
54 #include <sys/mman.h>
55 #include <fcntl.h>
56 #include <errno.h>
57 #include <unistd.h>
58 #include <thread.h>
59 #include <pthread.h>
60 #include <synch.h>
61 #include <string.h>
62 
63 static mutex_t lock = DEFAULTMUTEX;
64 
65 struct block {
66 	size_t size;		/* Space available for user */
67 	struct page *page;	/* Backwards reference to page */
68 	int status;
69 	struct block *next;
70 	void *memstart[1];
71 };
72 
73 struct page {
74 	size_t size;		/* Total page size (incl. header) */
75 	struct page *next;
76 	struct block block[1];
77 };
78 
79 #define	FREE	0
80 #define	BUSY	1
81 
82 #define	HDR_BLOCK	(sizeof (struct block) - sizeof (void *))
83 #define	HDR_PAGE	(sizeof (struct page) - sizeof (void *))
84 #define	MINSZ		sizeof (double)
85 
86 /* for convenience */
87 #ifndef	NULL
88 #define	NULL		(0)
89 #endif
90 
91 struct page *memstart;
92 static int pagesize;
93 static void defrag(struct page *);
94 static void split(struct block *,  size_t);
95 static void *malloc_unlocked(size_t);
96 static size_t align(size_t, int);
97 
98 void *
99 malloc(size_t size)
100 {
101 	void *retval;
102 	(void) mutex_lock(&lock);
103 	retval = malloc_unlocked(size);
104 	(void) mutex_unlock(&lock);
105 	return (retval);
106 }
107 
108 
109 static void *
110 malloc_unlocked(size_t size)
111 {
112 	struct block *block;
113 	struct page *page;
114 
115 	if (pagesize == 0)
116 		pagesize = (int)sysconf(_SC_PAGESIZE);
117 
118 	size = align(size, MINSZ);
119 
120 	/*
121 	 * Try to locate necessary space
122 	 */
123 	for (page = memstart; page; page = page->next) {
124 		for (block = page->block; block; block = block->next) {
125 			if (block->status == FREE && block->size >= size)
126 				goto found;
127 		}
128 	}
129 found:
130 
131 	/*
132 	 * Need to allocate a new page
133 	 */
134 	if (!page) {
135 		size_t totsize = size + HDR_PAGE;
136 		size_t totpage = align(totsize, pagesize);
137 
138 		if ((page = (struct page *)mmap(0, totpage,
139 		    PROT_READ|PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0))
140 			== MAP_FAILED)
141 			return (0);
142 
143 		page->next = memstart;
144 		memstart = page;
145 		page->size = totpage;
146 		block = page->block;
147 		block->next = 0;
148 		block->status = FREE;
149 		block->size = totpage - HDR_PAGE;
150 		block->page = page;
151 	}
152 
153 	split(block, size);
154 
155 	block->status = BUSY;
156 	return (&block->memstart);
157 }
158 
159 void *
160 realloc(void *ptr, size_t size)
161 {
162 	struct block *block;
163 	size_t osize;
164 	void *newptr;
165 
166 	(void) mutex_lock(&lock);
167 	if (ptr == NULL) {
168 		newptr = malloc_unlocked(size);
169 		(void) mutex_unlock(&lock);
170 		return (newptr);
171 	}
172 	block = (struct block *)((char *)ptr - HDR_BLOCK);
173 	size = align(size, MINSZ);
174 	osize = block->size;
175 
176 	/*
177 	 * Join block with next one if it is free
178 	 */
179 	if (block->next && block->next->status == FREE) {
180 		block->size += block->next->size + HDR_BLOCK;
181 		block->next = block->next->next;
182 	}
183 
184 	if (size <= block->size) {
185 		split(block, size);
186 		(void) mutex_unlock(&lock);
187 		return (ptr);
188 	}
189 
190 	newptr = malloc_unlocked(size);
191 	(void) memcpy(newptr, ptr, osize);
192 	block->status = FREE;
193 	defrag(block->page);
194 	(void) mutex_unlock(&lock);
195 	return (newptr);
196 }
197 
198 void
199 free(void *ptr)
200 {
201 	struct block *block;
202 
203 	(void) mutex_lock(&lock);
204 	if (ptr == NULL) {
205 		(void) mutex_unlock(&lock);
206 		return;
207 	}
208 	block = (struct block *)((char *)ptr - HDR_BLOCK);
209 	block->status = FREE;
210 
211 	defrag(block->page);
212 	(void) mutex_unlock(&lock);
213 }
214 
215 /*
216  * Align size on an appropriate boundary
217  */
218 static size_t
219 align(size_t size, int bound)
220 {
221 	if (size < bound)
222 		return ((size_t)bound);
223 	else
224 		return (size + bound - 1 - (size + bound - 1) % bound);
225 }
226 
227 static void
228 split(struct block *block, size_t size)
229 {
230 	if (block->size > size + sizeof (struct block)) {
231 		struct block *newblock;
232 		newblock = (struct block *)((char *)block + HDR_BLOCK + size);
233 		newblock->next = block->next;
234 		block->next = newblock;
235 		newblock->status = FREE;
236 		newblock->page = block->page;
237 		newblock->size = block->size - size - HDR_BLOCK;
238 		block->size = size;
239 	}
240 }
241 
242 /*
243  * Defragmentation
244  */
245 static void
246 defrag(struct page *page)
247 {
248 	struct block *block;
249 
250 	for (block = page->block; block; block = block->next) {
251 		struct block *block2;
252 
253 		if (block->status == BUSY)
254 			continue;
255 		for (block2 = block->next; block2 && block2->status == FREE;
256 			block2 = block2->next) {
257 			block->next = block2->next;
258 			block->size += block2->size + HDR_BLOCK;
259 		}
260 	}
261 
262 	/*
263 	 * Free page
264 	 */
265 	if (page->block->size == page->size - HDR_PAGE) {
266 		if (page == memstart)
267 			memstart = page->next;
268 		else {
269 			struct page *page2;
270 			for (page2 = memstart; page2->next;
271 				page2 = page2->next) {
272 				if (page2->next == page) {
273 					page2->next = page->next;
274 					break;
275 				}
276 			}
277 		}
278 		(void) munmap((caddr_t)page, page->size);
279 	}
280 }
281 
282 static void
283 malloc_prepare()
284 {
285 	(void) mutex_lock(&lock);
286 }
287 
288 static void
289 malloc_release()
290 {
291 	(void) mutex_unlock(&lock);
292 }
293 
294 #pragma init(malloc_init)
295 static void
296 malloc_init(void)
297 {
298 	(void) pthread_atfork(malloc_prepare, malloc_release, malloc_release);
299 }
300