xref: /freebsd/sys/vm/vm_glue.c (revision 0c43d89a0d8e976ca494d4837f4c1f3734d2c300)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  *
62  * $Id: vm_glue.c,v 1.5 1994/08/09 10:42:41 davidg Exp $
63  */
64 
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/proc.h>
68 #include <sys/resourcevar.h>
69 #include <sys/buf.h>
70 #include <sys/user.h>
71 
72 #include <sys/kernel.h>
73 #include <sys/dkstat.h>
74 
75 #include <vm/vm.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_pageout.h>
78 #include <vm/vm_kern.h>
79 
80 #include <machine/stdarg.h>
81 
82 extern char kstack[];
83 int	avefree = 0;		/* XXX */
84 int	readbuffers = 0;	/* XXX allow kgdb to read kernel buffer pool */
85 /* vm_map_t upages_map; */
86 
87 int
88 kernacc(addr, len, rw)
89 	caddr_t addr;
90 	int len, rw;
91 {
92 	boolean_t rv;
93 	vm_offset_t saddr, eaddr;
94 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
95 
96 	saddr = trunc_page(addr);
97 	eaddr = round_page(addr+len);
98 	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
99 	return(rv == TRUE);
100 }
101 
102 int
103 useracc(addr, len, rw)
104 	caddr_t addr;
105 	int len, rw;
106 {
107 	boolean_t rv;
108 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
109 
110 	/*
111 	 * XXX - check separately to disallow access to user area and user
112 	 * page tables - they are in the map.
113 	 *
114 	 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max.  It was
115 	 * once only used (as an end address) in trap.c.  Use it as an end
116 	 * address here too.  This bogusness has spread.  I just fixed
117 	 * where it was used as a max in vm_mmap.c.
118 	 */
119 	if ((vm_offset_t) addr + len > /* XXX */ VM_MAXUSER_ADDRESS
120 	    || (vm_offset_t) addr + len < (vm_offset_t) addr) {
121 		return (FALSE);
122 	}
123 
124 	rv = vm_map_check_protection(&curproc->p_vmspace->vm_map,
125 	    trunc_page(addr), round_page(addr+len), prot);
126 	return(rv == TRUE);
127 }
128 
129 #ifdef KGDB
130 /*
131  * Change protections on kernel pages from addr to addr+len
132  * (presumably so debugger can plant a breakpoint).
133  * All addresses are assumed to reside in the Sysmap,
134  */
135 chgkprot(addr, len, rw)
136 	register caddr_t addr;
137 	int len, rw;
138 {
139 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
140 
141 	vm_map_protect(kernel_map, trunc_page(addr),
142 		       round_page(addr+len), prot, FALSE);
143 }
144 #endif
145 void
146 vslock(addr, len)
147 	caddr_t	addr;
148 	u_int	len;
149 {
150 	vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
151 			round_page(addr+len), FALSE);
152 }
153 
154 void
155 vsunlock(addr, len, dirtied)
156 	caddr_t	addr;
157 	u_int	len;
158 	int dirtied;
159 {
160 #ifdef	lint
161 	dirtied++;
162 #endif	lint
163 		vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
164 			round_page(addr+len), TRUE);
165 }
166 
167 /*
168  * Implement fork's actions on an address space.
169  * Here we arrange for the address space to be copied or referenced,
170  * allocate a user struct (pcb and kernel stack), then call the
171  * machine-dependent layer to fill those in and make the new process
172  * ready to run.
173  * NOTE: the kernel stack may be at a different location in the child
174  * process, and thus addresses of automatic variables may be invalid
175  * after cpu_fork returns in the child process.  We do nothing here
176  * after cpu_fork returns.
177  */
178 int
179 vm_fork(p1, p2, isvfork)
180 	register struct proc *p1, *p2;
181 	int isvfork;
182 {
183 	register struct user *up;
184 	vm_offset_t addr, ptaddr;
185 	int i;
186 	struct vm_map *vp;
187 
188 	while( cnt.v_free_count < cnt.v_free_min)
189 		VM_WAIT;
190 
191 	/*
192 	 * avoid copying any of the parent's pagetables or other per-process
193 	 * objects that reside in the map by marking all of them non-inheritable
194 	 */
195 	(void)vm_map_inherit(&p1->p_vmspace->vm_map,
196 		UPT_MIN_ADDRESS - UPAGES * NBPG, VM_MAX_ADDRESS, VM_INHERIT_NONE);
197 	p2->p_vmspace = vmspace_fork(p1->p_vmspace);
198 
199 #ifdef SYSVSHM
200 	if (p1->p_vmspace->vm_shm)
201 		shmfork(p1, p2, isvfork);
202 #endif
203 
204 	/*
205 	 * Allocate a wired-down (for now) pcb and kernel stack for the process
206 	 */
207 
208 	addr = (vm_offset_t) kstack;
209 
210 	vp = &p2->p_vmspace->vm_map;
211 
212 	/* ream out old pagetables and kernel stack */
213 	(void)vm_deallocate(vp, addr, UPT_MAX_ADDRESS - addr);
214 
215 	/* get new pagetables and kernel stack */
216 	(void)vm_allocate(vp, &addr, UPT_MAX_ADDRESS - addr, FALSE);
217 
218 	/* force in the page table encompassing the UPAGES */
219 	ptaddr = trunc_page((u_int)vtopte(addr));
220 	vm_map_pageable(vp, ptaddr, ptaddr + NBPG, FALSE);
221 
222 	/* and force in (demand-zero) the UPAGES */
223 	vm_map_pageable(vp, addr, addr + UPAGES * NBPG, FALSE);
224 
225 	/* get a kernel virtual address for the UPAGES for this proc */
226 	up = (struct user *)kmem_alloc_pageable(kernel_map, UPAGES * NBPG);
227 
228 	/* and force-map the upages into the kernel pmap */
229 	for (i = 0; i < UPAGES; i++)
230 		pmap_enter(vm_map_pmap(kernel_map),
231 			((vm_offset_t) up) + NBPG * i,
232 			pmap_extract(vp->pmap, addr + NBPG * i),
233 			VM_PROT_READ|VM_PROT_WRITE, 1);
234 
235 	/* and allow the UPAGES page table entry to be paged (at the vm system level) */
236 	vm_map_pageable(vp, ptaddr, ptaddr + NBPG, TRUE);
237 
238 	p2->p_addr = up;
239 
240 	/*
241 	 * p_stats and p_sigacts currently point at fields
242 	 * in the user struct but not at &u, instead at p_addr.
243 	 * Copy p_sigacts and parts of p_stats; zero the rest
244 	 * of p_stats (statistics).
245 	 */
246 	p2->p_stats = &up->u_stats;
247 	p2->p_sigacts = &up->u_sigacts;
248 	up->u_sigacts = *p1->p_sigacts;
249 	bzero(&up->u_stats.pstat_startzero,
250 	    (unsigned) ((caddr_t)&up->u_stats.pstat_endzero -
251 	    (caddr_t)&up->u_stats.pstat_startzero));
252 	bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy,
253 	    ((caddr_t)&up->u_stats.pstat_endcopy -
254 	     (caddr_t)&up->u_stats.pstat_startcopy));
255 
256 
257 	/*
258 	 * cpu_fork will copy and update the kernel stack and pcb,
259 	 * and make the child ready to run.  It marks the child
260 	 * so that it can return differently than the parent.
261 	 * It returns twice, once in the parent process and
262 	 * once in the child.
263 	 */
264 	return (cpu_fork(p1, p2));
265 }
266 
267 /*
268  * Set default limits for VM system.
269  * Called for proc 0, and then inherited by all others.
270  */
271 void
272 vm_init_limits(p)
273 	register struct proc *p;
274 {
275 	int rss_limit;
276 
277 	/*
278 	 * Set up the initial limits on process VM.
279 	 * Set the maximum resident set size to be half
280 	 * of (reasonably) available memory.  Since this
281 	 * is a soft limit, it comes into effect only
282 	 * when the system is out of memory - half of
283 	 * main memory helps to favor smaller processes,
284 	 * and reduces thrashing of the object cache.
285 	 */
286         p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
287         p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
288         p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
289         p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
290 	/* limit the limit to no less than 128K */
291 	rss_limit = max(cnt.v_free_count / 2, 32);
292 	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
293 	p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
294 }
295 
296 #ifdef DEBUG
297 int	enableswap = 1;
298 int	swapdebug = 0;
299 #define	SDB_FOLLOW	1
300 #define SDB_SWAPIN	2
301 #define SDB_SWAPOUT	4
302 #endif
303 
304 void
305 faultin(p)
306 struct proc *p;
307 {
308 	vm_offset_t i;
309 	vm_offset_t vaddr, ptaddr;
310 	vm_offset_t v, v1;
311 	struct user *up;
312 	int s;
313 	int opflag;
314 
315 	if ((p->p_flag & P_INMEM) == 0) {
316 		int rv0, rv1;
317 		vm_map_t map;
318 
319 		++p->p_lock;
320 
321 		map = &p->p_vmspace->vm_map;
322 		/* force the page table encompassing the kernel stack (upages) */
323 		ptaddr = trunc_page((u_int)vtopte(kstack));
324 		vm_map_pageable(map, ptaddr, ptaddr + NBPG, FALSE);
325 
326 		/* wire in the UPAGES */
327 		vm_map_pageable(map, (vm_offset_t) kstack,
328 			(vm_offset_t) kstack + UPAGES * NBPG, FALSE);
329 
330 		/* and map them nicely into the kernel pmap */
331 		for (i = 0; i < UPAGES; i++) {
332 			vm_offset_t off = i * NBPG;
333 			vm_offset_t pa = (vm_offset_t)
334 				pmap_extract(&p->p_vmspace->vm_pmap,
335 				(vm_offset_t) kstack + off);
336 			pmap_enter(vm_map_pmap(kernel_map),
337 				((vm_offset_t)p->p_addr) + off,
338 					pa, VM_PROT_READ|VM_PROT_WRITE, 1);
339 		}
340 
341 		/* and let the page table pages go (at least above pmap level) */
342 		vm_map_pageable(map, ptaddr, ptaddr + NBPG, TRUE);
343 
344 		s = splhigh();
345 
346 		if (p->p_stat == SRUN)
347 			setrunqueue(p);
348 
349 		p->p_flag |= P_INMEM;
350 
351 		/* undo the effect of setting SLOCK above */
352 		--p->p_lock;
353 		splx(s);
354 
355 	}
356 
357 }
358 
359 int swapinreq;
360 int percentactive;
361 /*
362  * This swapin algorithm attempts to swap-in processes only if there
363  * is enough space for them.  Of course, if a process waits for a long
364  * time, it will be swapped in anyway.
365  */
366 void
367 scheduler()
368 {
369 	register struct proc *p;
370 	register int pri;
371 	struct proc *pp;
372 	int ppri;
373 	vm_offset_t addr;
374 	int lastidle, lastrun;
375 	int curidle, currun;
376 	int forceload;
377 	int percent;
378 	int ntries;
379 
380 	lastidle = 0;
381 	lastrun = 0;
382 
383 loop:
384 	ntries = 0;
385 
386 	curidle = cp_time[CP_IDLE];
387 	currun = cp_time[CP_USER] + cp_time[CP_SYS] + cp_time[CP_NICE];
388 	percent = (100*(currun-lastrun)) / ( 1 + (currun-lastrun) + (curidle-lastidle));
389 	lastrun = currun;
390 	lastidle = curidle;
391 	if( percent > 100)
392 		percent = 100;
393 	percentactive = percent;
394 
395 	if( percentactive < 25)
396 		forceload = 1;
397 	else
398 		forceload = 0;
399 
400 loop1:
401 	pp = NULL;
402 	ppri = INT_MIN;
403 	for (p = (struct proc *)allproc; p != NULL; p = p->p_next) {
404 		if (p->p_stat == SRUN && (p->p_flag & P_INMEM) == 0) {
405 			int mempri;
406 			pri = p->p_swtime + p->p_slptime - p->p_nice * 8;
407 			mempri = pri > 0 ? pri : 0;
408 			/*
409 			 * if this process is higher priority and there is
410 			 * enough space, then select this process instead
411 			 * of the previous selection.
412 			 */
413 			if (pri > ppri &&
414 				(((cnt.v_free_count + (mempri * (4*PAGE_SIZE) / PAGE_SIZE) >= (p->p_vmspace->vm_swrss)) || (ntries > 0 && forceload)))) {
415 				pp = p;
416 				ppri = pri;
417 			}
418 		}
419 	}
420 
421 	if ((pp == NULL) && (ntries == 0) && forceload) {
422 		++ntries;
423 		goto loop1;
424 	}
425 
426 	/*
427 	 * Nothing to do, back to sleep
428 	 */
429 	if ((p = pp) == NULL) {
430 		tsleep((caddr_t)&proc0, PVM, "sched", 0);
431 		goto loop;
432 	}
433 
434 	/*
435 	 * We would like to bring someone in. (only if there is space).
436 	 */
437 /*
438 	printf("swapin: %d, free: %d, res: %d, min: %d\n",
439 		p->p_pid, cnt.v_free_count, cnt.v_free_reserved, cnt.v_free_min);
440 */
441 	(void) splhigh();
442 	if ((forceload && (cnt.v_free_count > (cnt.v_free_reserved + UPAGES + 1))) ||
443 	    (cnt.v_free_count >= cnt.v_free_min)) {
444 		spl0();
445 		faultin(p);
446 		p->p_swtime = 0;
447 		goto loop;
448 	}
449 	/*
450 	 * log the memory shortage
451 	 */
452 	swapinreq += p->p_vmspace->vm_swrss;
453 	/*
454 	 * Not enough memory, jab the pageout daemon and wait til the
455 	 * coast is clear.
456 	 */
457 	if( cnt.v_free_count < cnt.v_free_min) {
458 		VM_WAIT;
459 	} else {
460 		tsleep((caddr_t)&proc0, PVM, "sched", 0);
461 	}
462 	(void) spl0();
463 	goto loop;
464 }
465 
466 #define	swappable(p) \
467 	(((p)->p_lock == 0) && \
468 		((p)->p_flag & (P_TRACED|P_NOSWAP|P_SYSTEM|P_INMEM|P_WEXIT|P_PHYSIO)) == P_INMEM)
469 
470 extern int vm_pageout_free_min;
471 /*
472  * Swapout is driven by the pageout daemon.  Very simple, we find eligible
473  * procs and unwire their u-areas.  We try to always "swap" at least one
474  * process in case we need the room for a swapin.
475  * If any procs have been sleeping/stopped for at least maxslp seconds,
476  * they are swapped.  Else, we swap the longest-sleeping or stopped process,
477  * if any, otherwise the longest-resident process.
478  */
479 void
480 swapout_threads()
481 {
482 	register struct proc *p;
483 	struct proc *outp, *outp2;
484 	int outpri, outpri2;
485 	int tpri;
486 	int didswap = 0;
487 	int swapneeded = swapinreq;
488 	extern int maxslp;
489 	int runnablenow;
490 	int s;
491 
492 swapmore:
493 	runnablenow = 0;
494 	outp = outp2 = NULL;
495 	outpri = outpri2 = INT_MIN;
496 	for (p = (struct proc *)allproc; p != NULL; p = p->p_next) {
497 		if (!swappable(p))
498 			continue;
499 		switch (p->p_stat) {
500 		case SRUN:
501 			++runnablenow;
502 			/*
503 			 * count the process as being in a runnable state
504 			 */
505 			if ((tpri = p->p_swtime + p->p_nice * 8) > outpri2) {
506 				outp2 = p;
507 				outpri2 = tpri;
508 			}
509 			continue;
510 
511 		case SSLEEP:
512 		case SSTOP:
513 			/*
514 			 * do not swapout a process that is waiting for VM datastructures
515 			 * there is a possible deadlock.
516 			 */
517 			if (!lock_try_write( &p->p_vmspace->vm_map.lock)) {
518 				continue;
519 			}
520 			vm_map_unlock( &p->p_vmspace->vm_map);
521 			if (p->p_slptime > maxslp) {
522 				swapout(p);
523 				didswap++;
524 			} else if ((tpri = p->p_slptime + p->p_nice * 8) > outpri) {
525 				outp = p;
526 				outpri = tpri ;
527 			}
528 			continue;
529 		}
530 	}
531 	/*
532 	 * We swapout only if there are more than two runnable processes or if
533 	 * another process needs some space to swapin.
534 	 */
535 	if ((swapinreq || ((percentactive > 90) && (runnablenow > 2))) &&
536 			(((cnt.v_free_count + cnt.v_inactive_count) <= (cnt.v_free_target + cnt.v_inactive_target)) ||
537 			(cnt.v_free_count < cnt.v_free_min))) {
538 		if ((p = outp) == 0) {
539 			p = outp2;
540 		}
541 
542 		if (p) {
543 			swapout(p);
544 			didswap = 1;
545 		}
546 	}
547 
548 	/*
549 	 * if we previously had found a process to swapout, and we need to swapout
550 	 * more then try again.
551 	 */
552 #if 0
553 	if( p && swapinreq)
554 		goto swapmore;
555 #endif
556 
557 	/*
558 	 * If we swapped something out, and another process needed memory,
559 	 * then wakeup the sched process.
560 	 */
561 	if (didswap) {
562 		if (swapneeded)
563 			wakeup((caddr_t)&proc0);
564 		swapinreq = 0;
565 	}
566 }
567 
568 void
569 swapout(p)
570 	register struct proc *p;
571 {
572 	vm_offset_t addr;
573 	struct pmap *pmap = &p->p_vmspace->vm_pmap;
574 	vm_map_t map = &p->p_vmspace->vm_map;
575 	vm_offset_t ptaddr;
576 	int i;
577 
578 	++p->p_stats->p_ru.ru_nswap;
579 	/*
580 	 * remember the process resident count
581 	 */
582 	p->p_vmspace->vm_swrss =
583 			p->p_vmspace->vm_pmap.pm_stats.resident_count;
584 	/*
585 	 * and decrement the amount of needed space
586 	 */
587 	swapinreq -= min(swapinreq, p->p_vmspace->vm_pmap.pm_stats.resident_count);
588 
589 	(void) splhigh();
590 	p->p_flag &= ~P_INMEM;
591 	if (p->p_stat == SRUN)
592 		remrq(p);
593 	(void) spl0();
594 
595 	++p->p_lock;
596 /* let the upages be paged */
597 	pmap_remove(vm_map_pmap(kernel_map),
598 		(vm_offset_t) p->p_addr, ((vm_offset_t) p->p_addr) + UPAGES * NBPG);
599 
600 	vm_map_pageable(map, (vm_offset_t) kstack,
601 		(vm_offset_t) kstack + UPAGES * NBPG, TRUE);
602 
603 	--p->p_lock;
604 	p->p_swtime = 0;
605 }
606 
607 /*
608  * The rest of these routines fake thread handling
609  */
610 
611 #ifndef assert_wait
612 void
613 assert_wait(event, ruptible)
614 	int event;
615 	boolean_t ruptible;
616 {
617 #ifdef lint
618 	ruptible++;
619 #endif
620 	curproc->p_thread = event;
621 }
622 #endif
623 
624 void
625 thread_block(char *msg)
626 {
627 	if (curproc->p_thread)
628 		tsleep((caddr_t)curproc->p_thread, PVM, msg, 0);
629 }
630 
631 
632 void
633 thread_sleep_(event, lock, wmesg)
634 	int event;
635 	simple_lock_t lock;
636 	char *wmesg;
637 {
638 
639 	curproc->p_thread = event;
640 	simple_unlock(lock);
641 	if (curproc->p_thread) {
642 		tsleep((caddr_t)event, PVM, wmesg, 0);
643 	}
644 }
645 
646 #ifndef thread_wakeup
647 void
648 thread_wakeup(event)
649 	int event;
650 {
651 	wakeup((caddr_t)event);
652 }
653 #endif
654 
655 /*
656  * DEBUG stuff
657  */
658 
659 int indent = 0;
660 
661 #include <machine/stdarg.h>		/* see subr_prf.c */
662 
663 /*ARGSUSED2*/
664 void
665 #if __STDC__
666 iprintf(const char *fmt, ...)
667 #else
668 iprintf(fmt /* , va_alist */)
669 	char *fmt;
670 	/* va_dcl */
671 #endif
672 {
673 	register int i;
674 	va_list ap;
675 
676 	for (i = indent; i >= 8; i -= 8)
677 		printf("\t");
678 	while (--i >= 0)
679 		printf(" ");
680 	va_start(ap, fmt);
681 	printf("%r", fmt, ap);
682 	va_end(ap);
683 }
684