xref: /titanic_41/usr/src/lib/libast/common/vmalloc/vmlast.c (revision 581cede61ac9c14d8d4ea452562a567189eead78)
1 /***********************************************************************
2 *                                                                      *
3 *               This software is part of the ast package               *
4 *          Copyright (c) 1985-2008 AT&T Intellectual Property          *
5 *                      and is licensed under the                       *
6 *                  Common Public License, Version 1.0                  *
7 *                    by AT&T Intellectual Property                     *
8 *                                                                      *
9 *                A copy of the License is available at                 *
10 *            http://www.opensource.org/licenses/cpl1.0.txt             *
11 *         (with md5 checksum 059e8cd6165cb4c31e351f2b69388fd9)         *
12 *                                                                      *
13 *              Information and Software Systems Research               *
14 *                            AT&T Research                             *
15 *                           Florham Park NJ                            *
16 *                                                                      *
17 *                 Glenn Fowler <gsf@research.att.com>                  *
18 *                  David Korn <dgk@research.att.com>                   *
19 *                   Phong Vo <kpv@research.att.com>                    *
20 *                                                                      *
21 ***********************************************************************/
22 #if defined(_UWIN) && defined(_BLD_ast)
23 
24 void _STUB_vmlast(){}
25 
26 #else
27 
28 #include	"vmhdr.h"
29 
30 /*	Allocation with freeing and reallocing of last allocated block only.
31 **
32 **	Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94.
33 */
34 
35 #if __STD_C
36 static Void_t* lastalloc(Vmalloc_t* vm, size_t size)
37 #else
38 static Void_t* lastalloc(vm, size)
39 Vmalloc_t*	vm;
40 size_t		size;
41 #endif
42 {
43 	reg Block_t	*tp, *next;
44 	reg Seg_t	*seg, *last;
45 	reg size_t	s;
46 	reg Vmdata_t*	vd = vm->data;
47 	reg int		local, inuse;
48 	size_t		orgsize = 0;
49 
50 	SETINUSE(vd, inuse);
51 	if(!(local = vd->mode&VM_TRUST))
52 	{	GETLOCAL(vd,local);
53 		if(ISLOCK(vd,local))
54 		{	CLRINUSE(vd, inuse);
55 			return NIL(Void_t*);
56 		}
57 		SETLOCK(vd,local);
58 		orgsize = size;
59 	}
60 
61 	size = size < ALIGN ? ALIGN : ROUND(size,ALIGN);
62 	for(;;)
63 	{	for(last = NIL(Seg_t*), seg = vd->seg; seg; last = seg, seg = seg->next)
64 		{	if(!(tp = seg->free) || (SIZE(tp)+sizeof(Head_t)) < size)
65 				continue;
66 			if(last)
67 			{	last->next = seg->next;
68 				seg->next = vd->seg;
69 				vd->seg = seg;
70 			}
71 			goto got_block;
72 		}
73 
74 		/* there is no usable free space in region, try extending */
75 		if((tp = (*_Vmextend)(vm,size,NIL(Vmsearch_f))) )
76 		{	seg = SEG(tp);
77 			goto got_block;
78 		}
79 		else if(vd->mode&VM_AGAIN)
80 			vd->mode &= ~VM_AGAIN;
81 		else	goto done;
82 	}
83 
84 got_block:
85 	if((s = SIZE(tp)) >= size)
86 	{	next = (Block_t*)((Vmuchar_t*)tp+size);
87 		SIZE(next) = s - size;
88 		SEG(next) = seg;
89 		seg->free = next;
90 	}
91 	else	seg->free = NIL(Block_t*);
92 
93 	vd->free = seg->last = tp;
94 
95 	if(!local && (vd->mode&VM_TRACE) && _Vmtrace)
96 		(*_Vmtrace)(vm, NIL(Vmuchar_t*), (Vmuchar_t*)tp, orgsize, 0);
97 
98 done:
99 	CLRLOCK(vd,local);
100 	ANNOUNCE(local, vm, VM_ALLOC, (Void_t*)tp, vm->disc);
101 	CLRINUSE(vd, inuse);
102 	return (Void_t*)tp;
103 }
104 
105 #if __STD_C
106 static int lastfree(Vmalloc_t* vm, reg Void_t* data )
107 #else
108 static int lastfree(vm, data)
109 Vmalloc_t*	vm;
110 reg Void_t*	data;
111 #endif
112 {
113 	reg Seg_t*	seg;
114 	reg Block_t*	fp;
115 	reg size_t	s;
116 	reg Vmdata_t*	vd = vm->data;
117 	reg int		local, inuse;
118 
119 	if(!data)
120 		return 0;
121 
122 	SETINUSE(vd, inuse);
123 	if(!(local = vd->mode&VM_TRUST) )
124 	{	GETLOCAL(vd, local);
125 		if(ISLOCK(vd, local))
126 		{	CLRINUSE(vd, inuse);
127 			return -1;
128 		}
129 		SETLOCK(vd, local);
130 	}
131 	if(data != (Void_t*)vd->free)
132 	{	if(!local && vm->disc->exceptf)
133 			(void)(*vm->disc->exceptf)(vm,VM_BADADDR,data,vm->disc);
134 		CLRLOCK(vd, local);
135 		CLRINUSE(vd, inuse);
136 		return -1;
137 	}
138 
139 	seg = vd->seg;
140 	if(!local && (vd->mode&VM_TRACE) && _Vmtrace)
141 	{	if(seg->free )
142 			s = (Vmuchar_t*)(seg->free) - (Vmuchar_t*)data;
143 		else	s = (Vmuchar_t*)BLOCK(seg->baddr) - (Vmuchar_t*)data;
144 		(*_Vmtrace)(vm, (Vmuchar_t*)data, NIL(Vmuchar_t*), s, 0);
145 	}
146 
147 	vd->free = NIL(Block_t*);
148 	fp = (Block_t*)data;
149 	SEG(fp)  = seg;
150 	SIZE(fp) = ((Vmuchar_t*)BLOCK(seg->baddr) - (Vmuchar_t*)data) - sizeof(Head_t);
151 	seg->free = fp;
152 	seg->last = NIL(Block_t*);
153 
154 	CLRLOCK(vd, local);
155 	ANNOUNCE(local, vm, VM_FREE, data, vm->disc);
156 	CLRINUSE(vd, inuse);
157 	return 0;
158 }
159 
160 #if __STD_C
161 static Void_t* lastresize(Vmalloc_t* vm, reg Void_t* data, size_t size, int type )
162 #else
163 static Void_t* lastresize(vm, data, size, type )
164 Vmalloc_t*	vm;
165 reg Void_t*	data;
166 size_t		size;
167 int		type;
168 #endif
169 {
170 	reg Block_t*	tp;
171 	reg Seg_t	*seg;
172 	reg size_t	oldsize;
173 	reg ssize_t	s, ds;
174 	reg Vmdata_t*	vd = vm->data;
175 	reg int		local, inuse;
176 	reg Void_t*	addr;
177 	Void_t*		orgdata = NIL(Void_t*);
178 	size_t		orgsize = 0;
179 
180 	SETINUSE(vd, inuse);
181 	if(!data)
182 	{	oldsize = 0;
183 		data = lastalloc(vm,size);
184 		goto done;
185 	}
186 	if(size <= 0)
187 	{	(void)lastfree(vm,data);
188 		CLRINUSE(vd, inuse);
189 		return NIL(Void_t*);
190 	}
191 
192 	if(!(local = vd->mode&VM_TRUST))
193 	{	GETLOCAL(vd, local);
194 		if(ISLOCK(vd, local))
195 		{	CLRINUSE(vd, inuse);
196 			return NIL(Void_t*);
197 		}
198 		SETLOCK(vd, local);
199 		orgdata = data;
200 		orgsize = size;
201 	}
202 
203 	if(data == (Void_t*)vd->free)
204 		seg = vd->seg;
205 	else
206 	{	/* see if it was one of ours */
207 		for(seg = vd->seg; seg; seg = seg->next)
208 			if(data >= seg->addr && data < (Void_t*)seg->baddr)
209 				break;
210 		if(!seg || (VLONG(data)%ALIGN) != 0 ||
211 		   (seg->last && (Vmuchar_t*)data > (Vmuchar_t*)seg->last) )
212 		{	CLRLOCK(vd,0);
213 			CLRINUSE(vd, inuse);
214 			return NIL(Void_t*);
215 		}
216 	}
217 
218 	/* set 's' to be the current available space */
219 	if(data != seg->last)
220 	{	if(seg->last && (Vmuchar_t*)data < (Vmuchar_t*)seg->last)
221 			oldsize = (Vmuchar_t*)seg->last - (Vmuchar_t*)data;
222 		else	oldsize = (Vmuchar_t*)BLOCK(seg->baddr) - (Vmuchar_t*)data;
223 		s = -1;
224 	}
225 	else
226 	{	s = (Vmuchar_t*)BLOCK(seg->baddr) - (Vmuchar_t*)data;
227 		if(!(tp = seg->free) )
228 			oldsize = s;
229 		else
230 		{	oldsize = (Vmuchar_t*)tp - (Vmuchar_t*)data;
231 			seg->free = NIL(Block_t*);
232 		}
233 	}
234 
235 	size = size < ALIGN ? ALIGN : ROUND(size,ALIGN);
236 	if(s < 0 || (ssize_t)size > s)
237 	{	if(s >= 0) /* amount to extend */
238 		{	ds = size-s; ds = ROUND(ds,vd->incr);
239 			addr = (*vm->disc->memoryf)(vm, seg->addr, seg->extent,
240 						    seg->extent+ds, vm->disc);
241 			if(addr == seg->addr)
242 			{	s += ds;
243 				seg->size += ds;
244 				seg->extent += ds;
245 				seg->baddr += ds;
246 				SIZE(BLOCK(seg->baddr)) = BUSY;
247 			}
248 			else	goto do_alloc;
249 		}
250 		else
251 		{ do_alloc:
252 			if(!(type&(VM_RSMOVE|VM_RSCOPY)) )
253 				data = NIL(Void_t*);
254 			else
255 			{	tp = vd->free;
256 				if(!(addr = KPVALLOC(vm,size,lastalloc)) )
257 				{	vd->free = tp;
258 					data = NIL(Void_t*);
259 				}
260 				else
261 				{	if(type&VM_RSCOPY)
262 					{	ds = oldsize < size ? oldsize : size;
263 						memcpy(addr, data, ds);
264 					}
265 
266 					if(s >= 0 && seg != vd->seg)
267 					{	tp = (Block_t*)data;
268 						SEG(tp) = seg;
269 						SIZE(tp) = s - sizeof(Head_t);
270 						seg->free = tp;
271 					}
272 
273 					/* new block and size */
274 					data = addr;
275 					seg = vd->seg;
276 					s = (Vmuchar_t*)BLOCK(seg->baddr) -
277 					    (Vmuchar_t*)data;
278 					seg->free = NIL(Block_t*);
279 				}
280 			}
281 		}
282 	}
283 
284 	if(data)
285 	{	if(s >= (ssize_t)(size+sizeof(Head_t)) )
286 		{	tp = (Block_t*)((Vmuchar_t*)data + size);
287 			SEG(tp) = seg;
288 			SIZE(tp) = (s - size) - sizeof(Head_t);
289 			seg->free = tp;
290 		}
291 
292 		vd->free = seg->last = (Block_t*)data;
293 
294 		if(!local && (vd->mode&VM_TRACE) && _Vmtrace)
295 			(*_Vmtrace)(vm,(Vmuchar_t*)orgdata,(Vmuchar_t*)data,orgsize,0);
296 	}
297 
298 	CLRLOCK(vd, local);
299 	ANNOUNCE(local, vm, VM_RESIZE, data, vm->disc);
300 
301 done:	if(data && (type&VM_RSZERO) && size > oldsize)
302 		memset((Void_t*)((Vmuchar_t*)data + oldsize), 0, size-oldsize);
303 
304 	CLRINUSE(vd, inuse);
305 	return data;
306 }
307 
308 
309 #if __STD_C
310 static long lastaddr(Vmalloc_t* vm, Void_t* addr)
311 #else
312 static long lastaddr(vm, addr)
313 Vmalloc_t*	vm;
314 Void_t*		addr;
315 #endif
316 {
317 	reg Vmdata_t*	vd = vm->data;
318 
319 	if(!(vd->mode&VM_TRUST) && ISLOCK(vd,0))
320 		return -1L;
321 	if(!vd->free || addr < (Void_t*)vd->free || addr >= (Void_t*)vd->seg->baddr)
322 		return -1L;
323 	else	return (Vmuchar_t*)addr - (Vmuchar_t*)vd->free;
324 }
325 
326 #if __STD_C
327 static long lastsize(Vmalloc_t* vm, Void_t* addr)
328 #else
329 static long lastsize(vm, addr)
330 Vmalloc_t*	vm;
331 Void_t*		addr;
332 #endif
333 {
334 	reg Vmdata_t*	vd = vm->data;
335 
336 	if(!(vd->mode&VM_TRUST) && ISLOCK(vd,0))
337 		return -1L;
338 	if(!vd->free || addr != (Void_t*)vd->free )
339 		return -1L;
340 	else if(vd->seg->free)
341 		return (Vmuchar_t*)vd->seg->free - (Vmuchar_t*)addr;
342 	else	return (Vmuchar_t*)vd->seg->baddr - (Vmuchar_t*)addr - sizeof(Head_t);
343 }
344 
345 #if __STD_C
346 static int lastcompact(Vmalloc_t* vm)
347 #else
348 static int lastcompact(vm)
349 Vmalloc_t*	vm;
350 #endif
351 {
352 	reg Block_t*	fp;
353 	reg Seg_t	*seg, *next;
354 	reg size_t	s;
355 	reg Vmdata_t*	vd = vm->data;
356 	reg int		inuse;
357 
358 	SETINUSE(vd, inuse);
359 	if(!(vd->mode&VM_TRUST))
360 	{	if(ISLOCK(vd,0))
361 		{	CLRINUSE(vd, inuse);
362 			return -1;
363 		}
364 		SETLOCK(vd,0);
365 	}
366 
367 	for(seg = vd->seg; seg; seg = next)
368 	{	next = seg->next;
369 
370 		if(!(fp = seg->free))
371 			continue;
372 
373 		seg->free = NIL(Block_t*);
374 		if(seg->size == (s = SIZE(fp)&~BITS))
375 			s = seg->extent;
376 		else	s += sizeof(Head_t);
377 
378 		if((*_Vmtruncate)(vm,seg,s,1) == s)
379 			seg->free = fp;
380 	}
381 
382 	if((vd->mode&VM_TRACE) && _Vmtrace)
383 		(*_Vmtrace)(vm,(Vmuchar_t*)0,(Vmuchar_t*)0,0,0);
384 
385 	CLRLOCK(vd,0);
386 	CLRINUSE(vd, inuse);
387 	return 0;
388 }
389 
390 #if __STD_C
391 static Void_t* lastalign(Vmalloc_t* vm, size_t size, size_t align)
392 #else
393 static Void_t* lastalign(vm, size, align)
394 Vmalloc_t*	vm;
395 size_t		size;
396 size_t		align;
397 #endif
398 {
399 	reg Vmuchar_t*	data;
400 	reg Seg_t*	seg;
401 	reg Block_t*	next;
402 	reg int		local, inuse;
403 	reg size_t	s, orgsize = 0, orgalign = 0;
404 	reg Vmdata_t*	vd = vm->data;
405 
406 	if(size <= 0 || align <= 0)
407 		return NIL(Void_t*);
408 
409 	SETINUSE(vd, inuse);
410 	if(!(local = vd->mode&VM_TRUST) )
411 	{	GETLOCAL(vd,local);
412 		if(ISLOCK(vd,local) )
413 		{	CLRINUSE(vd, inuse);
414 			return NIL(Void_t*);
415 		}
416 		SETLOCK(vd,local);
417 		orgsize = size;
418 		orgalign = align;
419 	}
420 
421 	size = size <= TINYSIZE ? TINYSIZE : ROUND(size,ALIGN);
422 	align = MULTIPLE(align,ALIGN);
423 
424 	s = size + align;
425 	if(!(data = (Vmuchar_t*)KPVALLOC(vm,s,lastalloc)) )
426 		goto done;
427 
428 	/* find the segment containing this block */
429 	for(seg = vd->seg; seg; seg = seg->next)
430 		if(seg->last == (Block_t*)data)
431 			break;
432 	/**/ASSERT(seg);
433 
434 	/* get a suitably aligned address */
435 	if((s = (size_t)(VLONG(data)%align)) != 0)
436 		data += align-s; /**/ASSERT((VLONG(data)%align) == 0);
437 
438 	/* free the unused tail */
439 	next = (Block_t*)(data+size);
440 	if((s = (seg->baddr - (Vmuchar_t*)next)) >= sizeof(Block_t))
441 	{	SEG(next) = seg;
442 		SIZE(next) = s - sizeof(Head_t);
443 		seg->free = next;
444 	}
445 
446 	vd->free = seg->last = (Block_t*)data;
447 
448 	if(!local && !(vd->mode&VM_TRUST) && _Vmtrace && (vd->mode&VM_TRACE) )
449 		(*_Vmtrace)(vm,NIL(Vmuchar_t*),data,orgsize,orgalign);
450 
451 done:
452 	CLRLOCK(vd,local);
453 	ANNOUNCE(local, vm, VM_ALLOC, (Void_t*)data, vm->disc);
454 
455 	CLRINUSE(vd, inuse);
456 	return (Void_t*)data;
457 }
458 
459 /* Public method for free-1 allocation */
460 static Vmethod_t _Vmlast =
461 {
462 	lastalloc,
463 	lastresize,
464 	lastfree,
465 	lastaddr,
466 	lastsize,
467 	lastcompact,
468 	lastalign,
469 	VM_MTLAST
470 };
471 
472 __DEFINE__(Vmethod_t*,Vmlast,&_Vmlast);
473 
474 #ifdef NoF
475 NoF(vmlast)
476 #endif
477 
478 #endif
479