xref: /freebsd/sys/vm/vm_glue.c (revision afe61c15161c324a7af299a9b8457aba5afc92db)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)vm_glue.c	8.6 (Berkeley) 1/5/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  */
62 
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/proc.h>
66 #include <sys/resourcevar.h>
67 #include <sys/buf.h>
68 #include <sys/user.h>
69 
70 #include <sys/kernel.h>
71 #include <sys/dkstat.h>
72 
73 #include <vm/vm.h>
74 #include <vm/vm_page.h>
75 #include <vm/vm_pageout.h>
76 #include <vm/vm_kern.h>
77 
78 #include <machine/stdarg.h>
79 
80 extern char kstack[];
81 int	avefree = 0;		/* XXX */
82 int	readbuffers = 0;	/* XXX allow kgdb to read kernel buffer pool */
83 /* vm_map_t upages_map; */
84 
85 void swapout(struct proc *p);
86 int
87 kernacc(addr, len, rw)
88 	caddr_t addr;
89 	int len, rw;
90 {
91 	boolean_t rv;
92 	vm_offset_t saddr, eaddr;
93 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
94 
95 	saddr = trunc_page(addr);
96 	eaddr = round_page(addr+len);
97 	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
98 	return(rv == TRUE);
99 }
100 
101 int
102 useracc(addr, len, rw)
103 	caddr_t addr;
104 	int len, rw;
105 {
106 	boolean_t rv;
107 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
108 
109 	/*
110 	 * XXX - specially disallow access to user page tables - they are
111 	 * in the map.
112 	 *
113 	 * XXX - don't specially disallow access to the user area - treat
114 	 * it as incorrectly as elsewhere.
115 	 *
116 	 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max.  It was
117 	 * only used (as an end address) in trap.c.  Use it as an end
118 	 * address here too.
119 	 */
120 	if ((vm_offset_t) addr >= VM_MAXUSER_ADDRESS
121 	    || (vm_offset_t) addr + len > VM_MAXUSER_ADDRESS
122 	    || (vm_offset_t) addr + len <= (vm_offset_t) addr) {
123 		return (FALSE);
124 	}
125 
126 	rv = vm_map_check_protection(&curproc->p_vmspace->vm_map,
127 	    trunc_page(addr), round_page(addr+len), prot);
128 	return(rv == TRUE);
129 }
130 
131 #ifdef KGDB
132 /*
133  * Change protections on kernel pages from addr to addr+len
134  * (presumably so debugger can plant a breakpoint).
135  * All addresses are assumed to reside in the Sysmap,
136  */
137 chgkprot(addr, len, rw)
138 	register caddr_t addr;
139 	int len, rw;
140 {
141 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
142 
143 	vm_map_protect(kernel_map, trunc_page(addr),
144 		       round_page(addr+len), prot, FALSE);
145 }
146 #endif
147 void
148 vslock(addr, len)
149 	caddr_t	addr;
150 	u_int	len;
151 {
152 	vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
153 			round_page(addr+len), FALSE);
154 }
155 
156 void
157 vsunlock(addr, len, dirtied)
158 	caddr_t	addr;
159 	u_int	len;
160 	int dirtied;
161 {
162 #ifdef	lint
163 	dirtied++;
164 #endif	lint
165 		vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
166 			round_page(addr+len), TRUE);
167 }
168 
169 /*
170  * Implement fork's actions on an address space.
171  * Here we arrange for the address space to be copied or referenced,
172  * allocate a user struct (pcb and kernel stack), then call the
173  * machine-dependent layer to fill those in and make the new process
174  * ready to run.
175  * NOTE: the kernel stack may be at a different location in the child
176  * process, and thus addresses of automatic variables may be invalid
177  * after cpu_fork returns in the child process.  We do nothing here
178  * after cpu_fork returns.
179  */
180 int
181 vm_fork(p1, p2, isvfork)
182 	register struct proc *p1, *p2;
183 	int isvfork;
184 {
185 	register struct user *up;
186 	vm_offset_t addr, ptaddr;
187 	int i;
188 	struct vm_map *vp;
189 
190 	while( cnt.v_free_count < cnt.v_free_min)
191 		VM_WAIT;
192 
193 	/*
194 	 * avoid copying any of the parent's pagetables or other per-process
195 	 * objects that reside in the map by marking all of them non-inheritable
196 	 */
197 	(void)vm_map_inherit(&p1->p_vmspace->vm_map,
198 		UPT_MIN_ADDRESS - UPAGES * NBPG, VM_MAX_ADDRESS, VM_INHERIT_NONE);
199 	p2->p_vmspace = vmspace_fork(p1->p_vmspace);
200 
201 #ifdef SYSVSHM
202 	if (p1->p_vmspace->vm_shm)
203 		shmfork(p1, p2, isvfork);
204 #endif
205 
206 	/*
207 	 * Allocate a wired-down (for now) pcb and kernel stack for the process
208 	 */
209 
210 	addr = (vm_offset_t) kstack;
211 
212 	vp = &p2->p_vmspace->vm_map;
213 
214 	/* ream out old pagetables and kernel stack */
215 	(void)vm_deallocate(vp, addr, UPT_MAX_ADDRESS - addr);
216 
217 	/* get new pagetables and kernel stack */
218 	(void)vm_allocate(vp, &addr, UPT_MAX_ADDRESS - addr, FALSE);
219 
220 	/* force in the page table encompassing the UPAGES */
221 	ptaddr = trunc_page((u_int)vtopte(addr));
222 	vm_map_pageable(vp, ptaddr, ptaddr + NBPG, FALSE);
223 
224 	/* and force in (demand-zero) the UPAGES */
225 	vm_map_pageable(vp, addr, addr + UPAGES * NBPG, FALSE);
226 
227 	/* get a kernel virtual address for the UPAGES for this proc */
228 	up = (struct user *)kmem_alloc_pageable(kernel_map, UPAGES * NBPG);
229 
230 	/* and force-map the upages into the kernel pmap */
231 	for (i = 0; i < UPAGES; i++)
232 		pmap_enter(vm_map_pmap(kernel_map),
233 			((vm_offset_t) up) + NBPG * i,
234 			pmap_extract(vp->pmap, addr + NBPG * i),
235 			VM_PROT_READ|VM_PROT_WRITE, 1);
236 
237 	/* and allow the UPAGES page table entry to be paged (at the vm system level) */
238 	vm_map_pageable(vp, ptaddr, ptaddr + NBPG, TRUE);
239 
240 	p2->p_addr = up;
241 
242 	/*
243 	 * p_stats and p_sigacts currently point at fields
244 	 * in the user struct but not at &u, instead at p_addr.
245 	 * Copy p_sigacts and parts of p_stats; zero the rest
246 	 * of p_stats (statistics).
247 	 */
248 	p2->p_stats = &up->u_stats;
249 	p2->p_sigacts = &up->u_sigacts;
250 	up->u_sigacts = *p1->p_sigacts;
251 	bzero(&up->u_stats.pstat_startzero,
252 	    (unsigned) ((caddr_t)&up->u_stats.pstat_endzero -
253 	    (caddr_t)&up->u_stats.pstat_startzero));
254 	bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy,
255 	    ((caddr_t)&up->u_stats.pstat_endcopy -
256 	     (caddr_t)&up->u_stats.pstat_startcopy));
257 
258 
259 	/*
260 	 * cpu_fork will copy and update the kernel stack and pcb,
261 	 * and make the child ready to run.  It marks the child
262 	 * so that it can return differently than the parent.
263 	 * It returns twice, once in the parent process and
264 	 * once in the child.
265 	 */
266 	return (cpu_fork(p1, p2));
267 }
268 
269 /*
270  * Set default limits for VM system.
271  * Called for proc 0, and then inherited by all others.
272  */
273 void
274 vm_init_limits(p)
275 	register struct proc *p;
276 {
277 	int tmp;
278 
279 	/*
280 	 * Set up the initial limits on process VM.
281 	 * Set the maximum resident set size to be all
282 	 * of (reasonably) available memory.  This causes
283 	 * any single, large process to start random page
284 	 * replacement once it fills memory.
285 	 */
286         p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
287         p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
288         p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
289         p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
290 	tmp = ((2 * cnt.v_free_count) / 3) - 32;
291 	if (cnt.v_free_count < 512)
292 		tmp = cnt.v_free_count;
293 	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(tmp);
294 	p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
295 }
296 
297 #ifdef DEBUG
298 int	enableswap = 1;
299 int	swapdebug = 0;
300 #define	SDB_FOLLOW	1
301 #define SDB_SWAPIN	2
302 #define SDB_SWAPOUT	4
303 #endif
304 
305 void
306 faultin(p)
307 struct proc *p;
308 {
309 	vm_offset_t i;
310 	vm_offset_t vaddr, ptaddr;
311 	vm_offset_t v, v1;
312 	struct user *up;
313 	int s;
314 	int opflag;
315 
316 	if ((p->p_flag & P_INMEM) == 0) {
317 		int rv0, rv1;
318 		vm_map_t map;
319 
320 		++p->p_lock;
321 
322 		map = &p->p_vmspace->vm_map;
323 		/* force the page table encompassing the kernel stack (upages) */
324 		ptaddr = trunc_page((u_int)vtopte(kstack));
325 		vm_map_pageable(map, ptaddr, ptaddr + NBPG, FALSE);
326 
327 		/* wire in the UPAGES */
328 		vm_map_pageable(map, (vm_offset_t) kstack,
329 			(vm_offset_t) kstack + UPAGES * NBPG, FALSE);
330 
331 		/* and map them nicely into the kernel pmap */
332 		for (i = 0; i < UPAGES; i++) {
333 			vm_offset_t off = i * NBPG;
334 			vm_offset_t pa = (vm_offset_t)
335 				pmap_extract(&p->p_vmspace->vm_pmap,
336 				(vm_offset_t) kstack + off);
337 			pmap_enter(vm_map_pmap(kernel_map),
338 				((vm_offset_t)p->p_addr) + off,
339 					pa, VM_PROT_READ|VM_PROT_WRITE, 1);
340 		}
341 
342 		/* and let the page table pages go (at least above pmap level) */
343 		vm_map_pageable(map, ptaddr, ptaddr + NBPG, TRUE);
344 
345 		s = splhigh();
346 
347 		if (p->p_stat == SRUN)
348 			setrunqueue(p);
349 
350 		p->p_flag |= P_INMEM;
351 
352 		/* undo the effect of setting SLOCK above */
353 		--p->p_lock;
354 		splx(s);
355 
356 	}
357 
358 }
359 
360 int swapinreq;
361 int percentactive;
362 /*
363  * This swapin algorithm attempts to swap-in processes only if there
364  * is enough space for them.  Of course, if a process waits for a long
365  * time, it will be swapped in anyway.
366  */
367 void
368 scheduler()
369 {
370 	register struct proc *p;
371 	register int pri;
372 	struct proc *pp;
373 	int ppri;
374 	vm_offset_t addr;
375 	int lastidle, lastrun;
376 	int curidle, currun;
377 	int forceload;
378 	int percent;
379 	int ntries;
380 
381 	lastidle = 0;
382 	lastrun = 0;
383 
384 loop:
385 	ntries = 0;
386 	vmmeter();
387 
388 	curidle = cp_time[CP_IDLE];
389 	currun = cp_time[CP_USER] + cp_time[CP_SYS] + cp_time[CP_NICE];
390 	percent = (100*(currun-lastrun)) / ( 1 + (currun-lastrun) + (curidle-lastidle));
391 	lastrun = currun;
392 	lastidle = curidle;
393 	if( percent > 100)
394 		percent = 100;
395 	percentactive = percent;
396 
397 	if( percentactive < 25)
398 		forceload = 1;
399 	else
400 		forceload = 0;
401 
402 loop1:
403 	pp = NULL;
404 	ppri = INT_MIN;
405 	for (p = (struct proc *)allproc; p != NULL; p = p->p_next) {
406 		if (p->p_stat == SRUN && (p->p_flag & P_INMEM) == 0) {
407 			int mempri;
408 			pri = p->p_swtime + p->p_slptime - p->p_nice * 8;
409 			mempri = pri > 0 ? pri : 0;
410 			/*
411 			 * if this process is higher priority and there is
412 			 * enough space, then select this process instead
413 			 * of the previous selection.
414 			 */
415 			if (pri > ppri &&
416 				(((cnt.v_free_count + (mempri * (4*PAGE_SIZE) / PAGE_SIZE) >= (p->p_vmspace->vm_swrss)) || (ntries > 0 && forceload)))) {
417 				pp = p;
418 				ppri = pri;
419 			}
420 		}
421 	}
422 
423 	if ((pp == NULL) && (ntries == 0) && forceload) {
424 		++ntries;
425 		goto loop1;
426 	}
427 
428 	/*
429 	 * Nothing to do, back to sleep
430 	 */
431 	if ((p = pp) == NULL) {
432 		tsleep((caddr_t)&proc0, PVM, "sched", 0);
433 		goto loop;
434 	}
435 
436 	/*
437 	 * We would like to bring someone in. (only if there is space).
438 	 */
439 /*
440 	printf("swapin: %d, free: %d, res: %d, min: %d\n",
441 		p->p_pid, cnt.v_free_count, cnt.v_free_reserved, cnt.v_free_min);
442 */
443 	(void) splhigh();
444 	if ((forceload && (cnt.v_free_count > (cnt.v_free_reserved + UPAGES + 1))) ||
445 	    (cnt.v_free_count >= cnt.v_free_min)) {
446 		spl0();
447 		faultin(p);
448 		p->p_swtime = 0;
449 		goto loop;
450 	}
451 	/*
452 	 * log the memory shortage
453 	 */
454 	swapinreq += p->p_vmspace->vm_swrss;
455 	/*
456 	 * Not enough memory, jab the pageout daemon and wait til the
457 	 * coast is clear.
458 	 */
459 	if( cnt.v_free_count < cnt.v_free_min) {
460 		VM_WAIT;
461 	} else {
462 		tsleep((caddr_t)&proc0, PVM, "sched", 0);
463 	}
464 	(void) spl0();
465 	goto loop;
466 }
467 
468 #define	swappable(p) \
469 	(((p)->p_lock == 0) && \
470 		((p)->p_flag & (P_TRACED|P_NOSWAP|P_SYSTEM|P_INMEM|P_WEXIT|P_PHYSIO)) == P_INMEM)
471 
472 extern int vm_pageout_free_min;
473 /*
474  * Swapout is driven by the pageout daemon.  Very simple, we find eligible
475  * procs and unwire their u-areas.  We try to always "swap" at least one
476  * process in case we need the room for a swapin.
477  * If any procs have been sleeping/stopped for at least maxslp seconds,
478  * they are swapped.  Else, we swap the longest-sleeping or stopped process,
479  * if any, otherwise the longest-resident process.
480  */
481 void
482 swapout_threads()
483 {
484 	register struct proc *p;
485 	struct proc *outp, *outp2;
486 	int outpri, outpri2;
487 	int tpri;
488 	int didswap = 0;
489 	int swapneeded = swapinreq;
490 	extern int maxslp;
491 	int runnablenow;
492 	int s;
493 
494 swapmore:
495 	runnablenow = 0;
496 	outp = outp2 = NULL;
497 	outpri = outpri2 = INT_MIN;
498 	for (p = (struct proc *)allproc; p != NULL; p = p->p_next) {
499 		if (!swappable(p))
500 			continue;
501 		switch (p->p_stat) {
502 		case SRUN:
503 			++runnablenow;
504 			/*
505 			 * count the process as being in a runnable state
506 			 */
507 			if ((tpri = p->p_swtime + p->p_nice * 8) > outpri2) {
508 				outp2 = p;
509 				outpri2 = tpri;
510 			}
511 			continue;
512 
513 		case SSLEEP:
514 		case SSTOP:
515 			/*
516 			 * do not swapout a process that is waiting for VM datastructures
517 			 * there is a possible deadlock.
518 			 */
519 			if (!lock_try_write( &p->p_vmspace->vm_map.lock)) {
520 				continue;
521 			}
522 			vm_map_unlock( &p->p_vmspace->vm_map);
523 			if (p->p_slptime > maxslp) {
524 				swapout(p);
525 				didswap++;
526 			} else if ((tpri = p->p_slptime + p->p_nice * 8) > outpri) {
527 				outp = p;
528 				outpri = tpri ;
529 			}
530 			continue;
531 		}
532 	}
533 	/*
534 	 * We swapout only if there are more than two runnable processes or if
535 	 * another process needs some space to swapin.
536 	 */
537 	if ((swapinreq || ((percentactive > 90) && (runnablenow > 2))) &&
538 			(((cnt.v_free_count + cnt.v_inactive_count) <= (cnt.v_free_target + cnt.v_inactive_target)) ||
539 			(cnt.v_free_count < cnt.v_free_min))) {
540 		if ((p = outp) == 0) {
541 			p = outp2;
542 		}
543 
544 		if (p) {
545 			swapout(p);
546 			didswap = 1;
547 		}
548 	}
549 
550 	/*
551 	 * if we previously had found a process to swapout, and we need to swapout
552 	 * more then try again.
553 	 */
554 #if 0
555 	if( p && swapinreq)
556 		goto swapmore;
557 #endif
558 
559 	/*
560 	 * If we swapped something out, and another process needed memory,
561 	 * then wakeup the sched process.
562 	 */
563 	if (didswap) {
564 		if (swapneeded)
565 			wakeup((caddr_t)&proc0);
566 		swapinreq = 0;
567 	}
568 }
569 
570 void
571 swapout(p)
572 	register struct proc *p;
573 {
574 	vm_offset_t addr;
575 	struct pmap *pmap = &p->p_vmspace->vm_pmap;
576 	vm_map_t map = &p->p_vmspace->vm_map;
577 	vm_offset_t ptaddr;
578 	int i;
579 
580 	++p->p_stats->p_ru.ru_nswap;
581 	/*
582 	 * remember the process resident count
583 	 */
584 	p->p_vmspace->vm_swrss =
585 			p->p_vmspace->vm_pmap.pm_stats.resident_count;
586 	/*
587 	 * and decrement the amount of needed space
588 	 */
589 	swapinreq -= min(swapinreq, p->p_vmspace->vm_pmap.pm_stats.resident_count);
590 
591 	(void) splhigh();
592 	p->p_flag &= ~P_INMEM;
593 	if (p->p_stat == SRUN)
594 		remrq(p);
595 	(void) spl0();
596 
597 	++p->p_lock;
598 /* let the upages be paged */
599 	pmap_remove(vm_map_pmap(kernel_map),
600 		(vm_offset_t) p->p_addr, ((vm_offset_t) p->p_addr) + UPAGES * NBPG);
601 
602 	vm_map_pageable(map, (vm_offset_t) kstack,
603 		(vm_offset_t) kstack + UPAGES * NBPG, TRUE);
604 
605 	--p->p_lock;
606 	p->p_swtime = 0;
607 }
608 
609 /*
610  * The rest of these routines fake thread handling
611  */
612 
613 #ifndef assert_wait
614 void
615 assert_wait(event, ruptible)
616 	int event;
617 	boolean_t ruptible;
618 {
619 #ifdef lint
620 	ruptible++;
621 #endif
622 	curproc->p_thread = event;
623 }
624 #endif
625 
626 void
627 thread_block(char *msg)
628 {
629 	if (curproc->p_thread)
630 		tsleep((caddr_t)curproc->p_thread, PVM, msg, 0);
631 }
632 
633 
634 void
635 thread_sleep_(event, lock, wmesg)
636 	int event;
637 	simple_lock_t lock;
638 	char *wmesg;
639 {
640 
641 	curproc->p_thread = event;
642 	simple_unlock(lock);
643 	if (curproc->p_thread) {
644 		tsleep((caddr_t)event, PVM, wmesg, 0);
645 	}
646 }
647 
648 #ifndef thread_wakeup
649 void
650 thread_wakeup(event)
651 	int event;
652 {
653 	wakeup((caddr_t)event);
654 }
655 #endif
656 
657 /*
658  * DEBUG stuff
659  */
660 
661 int indent = 0;
662 
663 #include <machine/stdarg.h>		/* see subr_prf.c */
664 
665 /*ARGSUSED2*/
666 void
667 #if __STDC__
668 iprintf(const char *fmt, ...)
669 #else
670 iprintf(fmt /* , va_alist */)
671 	char *fmt;
672 	/* va_dcl */
673 #endif
674 {
675 	register int i;
676 	va_list ap;
677 
678 	for (i = indent; i >= 8; i -= 8)
679 		printf("\t");
680 	while (--i >= 0)
681 		printf(" ");
682 	va_start(ap, fmt);
683 	printf("%r", fmt, ap);
684 	va_end(ap);
685 }
686