xref: /freebsd/sys/i386/i386/vm_machdep.c (revision a8445737e740901f5f2c8d24c12ef7fc8b00134e)
1 /*-
2  * Copyright (c) 1982, 1986 The Regents of the University of California.
3  * Copyright (c) 1989, 1990 William Jolitz
4  * Copyright (c) 1994 John Dyson
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * the Systems Programming Group of the University of Utah Computer
9  * Science Department, and William Jolitz.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the University of
22  *	California, Berkeley and its contributors.
23  * 4. Neither the name of the University nor the names of its contributors
24  *    may be used to endorse or promote products derived from this software
25  *    without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
40  *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41  *	$Id: vm_machdep.c,v 1.108 1998/05/19 00:00:10 tegge Exp $
42  */
43 
44 #include "npx.h"
45 #include "opt_bounce.h"
46 #include "opt_user_ldt.h"
47 #include "opt_vm86.h"
48 #ifdef PC98
49 #include "opt_pc98.h"
50 #endif
51 
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/proc.h>
55 #include <sys/malloc.h>
56 #include <sys/buf.h>
57 #include <sys/vnode.h>
58 #include <sys/vmmeter.h>
59 #include <sys/kernel.h>
60 #include <sys/sysctl.h>
61 
62 #include <machine/clock.h>
63 #include <machine/cpu.h>
64 #include <machine/md_var.h>
65 #ifdef SMP
66 #include <machine/smp.h>
67 #endif
68 #ifdef VM86
69 #include <machine/pcb_ext.h>
70 #include <machine/vm86.h>
71 #endif
72 
73 #include <vm/vm.h>
74 #include <vm/vm_param.h>
75 #include <vm/vm_prot.h>
76 #include <sys/lock.h>
77 #include <vm/vm_kern.h>
78 #include <vm/vm_page.h>
79 #include <vm/vm_map.h>
80 #include <vm/vm_extern.h>
81 
82 #include <sys/user.h>
83 
84 #ifdef PC98
85 #include <pc98/pc98/pc98.h>
86 #else
87 #include <i386/isa/isa.h>
88 #endif
89 
90 static void	cpu_reset_real __P((void));
91 #ifdef SMP
92 static void	cpu_reset_proxy __P((void));
93 static u_int	cpu_reset_proxyid;
94 static volatile u_int	cpu_reset_proxy_active;
95 #endif
96 
97 #ifdef BOUNCE_BUFFERS
98 static vm_offset_t
99 		vm_bounce_kva __P((int size, int waitok));
100 static void	vm_bounce_kva_free __P((vm_offset_t addr, vm_offset_t size,
101 					int now));
102 static vm_offset_t
103 		vm_bounce_page_find __P((int count));
104 static void	vm_bounce_page_free __P((vm_offset_t pa, int count));
105 
106 static volatile int	kvasfreecnt;
107 
108 caddr_t		bouncememory;
109 static int	bpwait;
110 static vm_offset_t	*bouncepa;
111 static int		bmwait, bmfreeing;
112 
113 #define BITS_IN_UNSIGNED (8*sizeof(unsigned))
114 static int		bounceallocarraysize;
115 static unsigned	*bounceallocarray;
116 static int		bouncefree;
117 
118 #if defined(PC98) && defined (EPSON_BOUNCEDMA)
119 #define SIXTEENMEG (3840*4096)			/* 15MB boundary */
120 #else
121 #define SIXTEENMEG (4096*4096)
122 #endif
123 #define MAXBKVA 1024
124 int		maxbkva = MAXBKVA*PAGE_SIZE;
125 
126 /* special list that can be used at interrupt time for eventual kva free */
127 static struct kvasfree {
128 	vm_offset_t addr;
129 	vm_offset_t size;
130 } kvaf[MAXBKVA];
131 
132 /*
133  * get bounce buffer pages (count physically contiguous)
134  * (only 1 inplemented now)
135  */
136 static vm_offset_t
137 vm_bounce_page_find(count)
138 	int count;
139 {
140 	int bit;
141 	int s,i;
142 
143 	if (count != 1)
144 		panic("vm_bounce_page_find -- no support for > 1 page yet!!!");
145 
146 	s = splbio();
147 retry:
148 	for (i = 0; i < bounceallocarraysize; i++) {
149 		if (bounceallocarray[i] != 0xffffffff) {
150 			bit = ffs(~bounceallocarray[i]);
151 			if (bit) {
152 				bounceallocarray[i] |= 1 << (bit - 1) ;
153 				bouncefree -= count;
154 				splx(s);
155 				return bouncepa[(i * BITS_IN_UNSIGNED + (bit - 1))];
156 			}
157 		}
158 	}
159 	bpwait = 1;
160 	tsleep((caddr_t) &bounceallocarray, PRIBIO, "bncwai", 0);
161 	goto retry;
162 }
163 
164 static void
165 vm_bounce_kva_free(addr, size, now)
166 	vm_offset_t addr;
167 	vm_offset_t size;
168 	int now;
169 {
170 	int s = splbio();
171 	kvaf[kvasfreecnt].addr = addr;
172 	kvaf[kvasfreecnt].size = size;
173 	++kvasfreecnt;
174 	if( now) {
175 		/*
176 		 * this will do wakeups
177 		 */
178 		vm_bounce_kva(0,0);
179 	} else {
180 		if (bmwait) {
181 		/*
182 		 * if anyone is waiting on the bounce-map, then wakeup
183 		 */
184 			wakeup((caddr_t) io_map);
185 			bmwait = 0;
186 		}
187 	}
188 	splx(s);
189 }
190 
191 /*
192  * free count bounce buffer pages
193  */
194 static void
195 vm_bounce_page_free(pa, count)
196 	vm_offset_t pa;
197 	int count;
198 {
199 	int allocindex;
200 	int index;
201 	int bit;
202 
203 	if (count != 1)
204 		panic("vm_bounce_page_free -- no support for > 1 page yet!!!");
205 
206 	for(index=0;index<bouncepages;index++) {
207 		if( pa == bouncepa[index])
208 			break;
209 	}
210 
211 	if( index == bouncepages)
212 		panic("vm_bounce_page_free: invalid bounce buffer");
213 
214 	allocindex = index / BITS_IN_UNSIGNED;
215 	bit = index % BITS_IN_UNSIGNED;
216 
217 	bounceallocarray[allocindex] &= ~(1 << bit);
218 
219 	bouncefree += count;
220 	if (bpwait) {
221 		bpwait = 0;
222 		wakeup((caddr_t) &bounceallocarray);
223 	}
224 }
225 
226 /*
227  * allocate count bounce buffer kva pages
228  */
229 static vm_offset_t
230 vm_bounce_kva(size, waitok)
231 	int size;
232 	int waitok;
233 {
234 	int i;
235 	vm_offset_t kva = 0;
236 	vm_offset_t off;
237 	int s = splbio();
238 more:
239 	if (!bmfreeing && kvasfreecnt) {
240 		bmfreeing = 1;
241 		for (i = 0; i < kvasfreecnt; i++) {
242 			for(off=0;off<kvaf[i].size;off+=PAGE_SIZE) {
243 				pmap_kremove( kvaf[i].addr + off);
244 			}
245 			kmem_free_wakeup(io_map, kvaf[i].addr,
246 				kvaf[i].size);
247 		}
248 		kvasfreecnt = 0;
249 		bmfreeing = 0;
250 		if( bmwait) {
251 			bmwait = 0;
252 			wakeup( (caddr_t) io_map);
253 		}
254 	}
255 
256 	if( size == 0) {
257 		splx(s);
258 		return 0;
259 	}
260 
261 	if ((kva = kmem_alloc_pageable(io_map, size)) == 0) {
262 		if( !waitok) {
263 			splx(s);
264 			return 0;
265 		}
266 		bmwait = 1;
267 		tsleep((caddr_t) io_map, PRIBIO, "bmwait", 0);
268 		goto more;
269 	}
270 	splx(s);
271 	return kva;
272 }
273 
274 /*
275  * same as vm_bounce_kva -- but really allocate (but takes pages as arg)
276  */
277 vm_offset_t
278 vm_bounce_kva_alloc(count)
279 int count;
280 {
281 	int i;
282 	vm_offset_t kva;
283 	vm_offset_t pa;
284 	if( bouncepages == 0) {
285 		kva = (vm_offset_t) malloc(count*PAGE_SIZE, M_TEMP, M_WAITOK);
286 		return kva;
287 	}
288 	kva = vm_bounce_kva(count*PAGE_SIZE, 1);
289 	for(i=0;i<count;i++) {
290 		pa = vm_bounce_page_find(1);
291 		pmap_kenter(kva + i * PAGE_SIZE, pa);
292 	}
293 	return kva;
294 }
295 
296 /*
297  * same as vm_bounce_kva_free -- but really free
298  */
299 void
300 vm_bounce_kva_alloc_free(kva, count)
301 	vm_offset_t kva;
302 	int count;
303 {
304 	int i;
305 	vm_offset_t pa;
306 	if( bouncepages == 0) {
307 		free((caddr_t) kva, M_TEMP);
308 		return;
309 	}
310 	for(i = 0; i < count; i++) {
311 		pa = pmap_kextract(kva + i * PAGE_SIZE);
312 		vm_bounce_page_free(pa, 1);
313 	}
314 	vm_bounce_kva_free(kva, count*PAGE_SIZE, 0);
315 }
316 
317 /*
318  * do the things necessary to the struct buf to implement
319  * bounce buffers...  inserted before the disk sort
320  */
321 void
322 vm_bounce_alloc(bp)
323 	struct buf *bp;
324 {
325 	int countvmpg;
326 	vm_offset_t vastart, vaend;
327 	vm_offset_t vapstart, vapend;
328 	vm_offset_t va, kva;
329 	vm_offset_t pa;
330 	int dobounceflag = 0;
331 	int i;
332 
333 	if (bouncepages == 0)
334 		return;
335 
336 	if (bp->b_flags & B_BOUNCE) {
337 		printf("vm_bounce_alloc: called recursively???\n");
338 		return;
339 	}
340 
341 	if (bp->b_bufsize < bp->b_bcount) {
342 		printf(
343 		    "vm_bounce_alloc: b_bufsize(0x%lx) < b_bcount(0x%lx) !!\n",
344 			bp->b_bufsize, bp->b_bcount);
345 		panic("vm_bounce_alloc");
346 	}
347 
348 /*
349  *  This is not really necessary
350  *	if( bp->b_bufsize != bp->b_bcount) {
351  *		printf("size: %d, count: %d\n", bp->b_bufsize, bp->b_bcount);
352  *	}
353  */
354 
355 
356 	vastart = (vm_offset_t) bp->b_data;
357 	vaend = (vm_offset_t) bp->b_data + bp->b_bufsize;
358 
359 	vapstart = trunc_page(vastart);
360 	vapend = round_page(vaend);
361 	countvmpg = (vapend - vapstart) / PAGE_SIZE;
362 
363 /*
364  * if any page is above 16MB, then go into bounce-buffer mode
365  */
366 	va = vapstart;
367 	for (i = 0; i < countvmpg; i++) {
368 		pa = pmap_kextract(va);
369 		if (pa >= SIXTEENMEG)
370 			++dobounceflag;
371 		if( pa == 0)
372 			panic("vm_bounce_alloc: Unmapped page");
373 		va += PAGE_SIZE;
374 	}
375 	if (dobounceflag == 0)
376 		return;
377 
378 	if (bouncepages < dobounceflag)
379 		panic("Not enough bounce buffers!!!");
380 
381 /*
382  * allocate a replacement kva for b_addr
383  */
384 	kva = vm_bounce_kva(countvmpg*PAGE_SIZE, 1);
385 #if 0
386 	printf("%s: vapstart: %x, vapend: %x, countvmpg: %d, kva: %x ",
387 		(bp->b_flags & B_READ) ? "read":"write",
388 			vapstart, vapend, countvmpg, kva);
389 #endif
390 	va = vapstart;
391 	for (i = 0; i < countvmpg; i++) {
392 		pa = pmap_kextract(va);
393 		if (pa >= SIXTEENMEG) {
394 			/*
395 			 * allocate a replacement page
396 			 */
397 			vm_offset_t bpa = vm_bounce_page_find(1);
398 			pmap_kenter(kva + (PAGE_SIZE * i), bpa);
399 #if 0
400 			printf("r(%d): (%x,%x,%x) ", i, va, pa, bpa);
401 #endif
402 			/*
403 			 * if we are writing, the copy the data into the page
404 			 */
405 			if ((bp->b_flags & B_READ) == 0) {
406 				bcopy((caddr_t) va, (caddr_t) kva + (PAGE_SIZE * i), PAGE_SIZE);
407 			}
408 		} else {
409 			/*
410 			 * use original page
411 			 */
412 			pmap_kenter(kva + (PAGE_SIZE * i), pa);
413 		}
414 		va += PAGE_SIZE;
415 	}
416 
417 /*
418  * flag the buffer as being bounced
419  */
420 	bp->b_flags |= B_BOUNCE;
421 /*
422  * save the original buffer kva
423  */
424 	bp->b_savekva = bp->b_data;
425 /*
426  * put our new kva into the buffer (offset by original offset)
427  */
428 	bp->b_data = (caddr_t) (((vm_offset_t) kva) |
429 				((vm_offset_t) bp->b_savekva & PAGE_MASK));
430 #if 0
431 	printf("b_savekva: %x, newva: %x\n", bp->b_savekva, bp->b_data);
432 #endif
433 	return;
434 }
435 
436 /*
437  * hook into biodone to free bounce buffer
438  */
439 void
440 vm_bounce_free(bp)
441 	struct buf *bp;
442 {
443 	int i;
444 	vm_offset_t origkva, bouncekva, bouncekvaend;
445 
446 /*
447  * if this isn't a bounced buffer, then just return
448  */
449 	if ((bp->b_flags & B_BOUNCE) == 0)
450 		return;
451 
452 /*
453  *  This check is not necessary
454  *	if (bp->b_bufsize != bp->b_bcount) {
455  *		printf("vm_bounce_free: b_bufsize=%d, b_bcount=%d\n",
456  *			bp->b_bufsize, bp->b_bcount);
457  *	}
458  */
459 
460 	origkva = (vm_offset_t) bp->b_savekva;
461 	bouncekva = (vm_offset_t) bp->b_data;
462 /*
463 	printf("free: %d ", bp->b_bufsize);
464 */
465 
466 /*
467  * check every page in the kva space for b_addr
468  */
469 	for (i = 0; i < bp->b_bufsize; ) {
470 		vm_offset_t mybouncepa;
471 		vm_offset_t copycount;
472 
473 		copycount = round_page(bouncekva + 1) - bouncekva;
474 		mybouncepa = pmap_kextract(trunc_page(bouncekva));
475 
476 /*
477  * if this is a bounced pa, then process as one
478  */
479 		if ( mybouncepa != pmap_kextract( trunc_page( origkva))) {
480 			vm_offset_t tocopy = copycount;
481 			if (i + tocopy > bp->b_bufsize)
482 				tocopy = bp->b_bufsize - i;
483 /*
484  * if this is a read, then copy from bounce buffer into original buffer
485  */
486 			if (bp->b_flags & B_READ)
487 				bcopy((caddr_t) bouncekva, (caddr_t) origkva, tocopy);
488 /*
489  * free the bounce allocation
490  */
491 
492 /*
493 			printf("(kva: %x, pa: %x)", bouncekva, mybouncepa);
494 */
495 			vm_bounce_page_free(mybouncepa, 1);
496 		}
497 
498 		origkva += copycount;
499 		bouncekva += copycount;
500 		i += copycount;
501 	}
502 
503 /*
504 	printf("\n");
505 */
506 /*
507  * add the old kva into the "to free" list
508  */
509 
510 	bouncekva= trunc_page((vm_offset_t) bp->b_data);
511 	bouncekvaend= round_page((vm_offset_t)bp->b_data + bp->b_bufsize);
512 
513 /*
514 	printf("freeva: %d\n", (bouncekvaend - bouncekva) / PAGE_SIZE);
515 */
516 	vm_bounce_kva_free( bouncekva, (bouncekvaend - bouncekva), 0);
517 	bp->b_data = bp->b_savekva;
518 	bp->b_savekva = 0;
519 	bp->b_flags &= ~B_BOUNCE;
520 
521 	return;
522 }
523 
524 
525 /*
526  * init the bounce buffer system
527  */
528 void
529 vm_bounce_init()
530 {
531 	int i;
532 
533 	kvasfreecnt = 0;
534 
535 	if (bouncepages == 0)
536 		return;
537 
538 	bounceallocarraysize = (bouncepages + BITS_IN_UNSIGNED - 1) / BITS_IN_UNSIGNED;
539 	bounceallocarray = malloc(bounceallocarraysize * sizeof(unsigned), M_TEMP, M_NOWAIT);
540 
541 	if (!bounceallocarray)
542 		panic("Cannot allocate bounce resource array");
543 
544 	bouncepa = malloc(bouncepages * sizeof(vm_offset_t), M_TEMP, M_NOWAIT);
545 	if (!bouncepa)
546 		panic("Cannot allocate physical memory array");
547 
548 	for(i=0;i<bounceallocarraysize;i++) {
549 		bounceallocarray[i] = 0xffffffff;
550 	}
551 
552 	for(i=0;i<bouncepages;i++) {
553 		vm_offset_t pa;
554 		if( (pa = pmap_kextract((vm_offset_t) bouncememory + i * PAGE_SIZE)) >= SIXTEENMEG) {
555 			printf("vm_bounce_init: bounce memory out of range -- bounce disabled\n");
556 			free(bounceallocarray, M_TEMP);
557 			bounceallocarray = NULL;
558 			free(bouncepa, M_TEMP);
559 			bouncepa = NULL;
560 			bouncepages = 0;
561 			break;
562 		}
563 		if( pa == 0)
564 			panic("bounce memory not resident");
565 		bouncepa[i] = pa;
566 		bounceallocarray[i/(8*sizeof(int))] &= ~(1<<(i%(8*sizeof(int))));
567 	}
568 	bouncefree = bouncepages;
569 
570 }
571 #endif /* BOUNCE_BUFFERS */
572 
573 /*
574  * quick version of vm_fault
575  */
576 void
577 vm_fault_quick(v, prot)
578 	caddr_t v;
579 	int prot;
580 {
581 	if (prot & VM_PROT_WRITE)
582 		subyte(v, fubyte(v));
583 	else
584 		fubyte(v);
585 }
586 
587 /*
588  * Finish a fork operation, with process p2 nearly set up.
589  * Copy and update the pcb, set up the stack so that the child
590  * ready to run and return to user mode.
591  */
592 void
593 cpu_fork(p1, p2)
594 	register struct proc *p1, *p2;
595 {
596 	struct pcb *pcb2 = &p2->p_addr->u_pcb;
597 
598 #if NNPX > 0
599 	/* Ensure that p1's pcb is up to date. */
600 	if (npxproc == p1)
601 		npxsave(&p1->p_addr->u_pcb.pcb_savefpu);
602 #endif
603 
604 	/* Copy p1's pcb. */
605 	p2->p_addr->u_pcb = p1->p_addr->u_pcb;
606 
607 	/*
608 	 * Create a new fresh stack for the new process.
609 	 * Copy the trap frame for the return to user mode as if from a
610 	 * syscall.  This copies the user mode register values.
611 	 */
612 	p2->p_md.md_regs = (struct trapframe *)
613 #ifdef VM86
614 			   ((int)p2->p_addr + UPAGES * PAGE_SIZE - 16) - 1;
615 #else
616 			   ((int)p2->p_addr + UPAGES * PAGE_SIZE) - 1;
617 #endif /* VM86 */
618 	*p2->p_md.md_regs = *p1->p_md.md_regs;
619 
620 	/*
621 	 * Set registers for trampoline to user mode.  Leave space for the
622 	 * return address on stack.  These are the kernel mode register values.
623 	 */
624 	pcb2->pcb_cr3 = vtophys(p2->p_vmspace->vm_pmap.pm_pdir);
625 	pcb2->pcb_edi = p2->p_md.md_regs->tf_edi;
626 	pcb2->pcb_esi = (int)fork_return;
627 	pcb2->pcb_ebp = p2->p_md.md_regs->tf_ebp;
628 	pcb2->pcb_esp = (int)p2->p_md.md_regs - sizeof(void *);
629 	pcb2->pcb_ebx = (int)p2;
630 	pcb2->pcb_eip = (int)fork_trampoline;
631 	/*
632 	 * pcb2->pcb_ldt:	duplicated below, if necessary.
633 	 * pcb2->pcb_ldt_len:	cloned above.
634 	 * pcb2->pcb_savefpu:	cloned above.
635 	 * pcb2->pcb_flags:	cloned above (always 0 here?).
636 	 * pcb2->pcb_onfault:	cloned above (always NULL here?).
637 	 */
638 
639 #ifdef VM86
640 	/*
641 	 * XXX don't copy the i/o pages.  this should probably be fixed.
642 	 */
643 	pcb2->pcb_ext = 0;
644 #endif
645 
646 #ifdef USER_LDT
647         /* Copy the LDT, if necessary. */
648         if (pcb2->pcb_ldt != 0) {
649                 union descriptor *new_ldt;
650                 size_t len = pcb2->pcb_ldt_len * sizeof(union descriptor);
651 
652                 new_ldt = (union descriptor *)kmem_alloc(kernel_map, len);
653                 bcopy(pcb2->pcb_ldt, new_ldt, len);
654                 pcb2->pcb_ldt = (caddr_t)new_ldt;
655         }
656 #endif
657 
658 	/*
659 	 * Now, cpu_switch() can schedule the new process.
660 	 * pcb_esp is loaded pointing to the cpu_switch() stack frame
661 	 * containing the return address when exiting cpu_switch.
662 	 * This will normally be to proc_trampoline(), which will have
663 	 * %ebx loaded with the new proc's pointer.  proc_trampoline()
664 	 * will set up a stack to call fork_return(p, frame); to complete
665 	 * the return to user-mode.
666 	 */
667 }
668 
669 /*
670  * Intercept the return address from a freshly forked process that has NOT
671  * been scheduled yet.
672  *
673  * This is needed to make kernel threads stay in kernel mode.
674  */
675 void
676 cpu_set_fork_handler(p, func, arg)
677 	struct proc *p;
678 	void (*func) __P((void *));
679 	void *arg;
680 {
681 	/*
682 	 * Note that the trap frame follows the args, so the function
683 	 * is really called like this:  func(arg, frame);
684 	 */
685 	p->p_addr->u_pcb.pcb_esi = (int) func;	/* function */
686 	p->p_addr->u_pcb.pcb_ebx = (int) arg;	/* first arg */
687 }
688 
689 void
690 cpu_exit(p)
691 	register struct proc *p;
692 {
693 #if defined(USER_LDT) || defined(VM86)
694 	struct pcb *pcb = &p->p_addr->u_pcb;
695 #endif
696 
697 #if NNPX > 0
698 	npxexit(p);
699 #endif	/* NNPX */
700 #ifdef VM86
701 	if (pcb->pcb_ext != 0) {
702 	        /*
703 		 * XXX do we need to move the TSS off the allocated pages
704 		 * before freeing them?  (not done here)
705 		 */
706 		kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ext,
707 		    ctob(IOPAGES + 1));
708 		pcb->pcb_ext = 0;
709 	}
710 #endif
711 #ifdef USER_LDT
712 	if (pcb->pcb_ldt != 0) {
713 		if (pcb == curpcb) {
714 			lldt(_default_ldt);
715 			currentldt = _default_ldt;
716 		}
717 		kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ldt,
718 			pcb->pcb_ldt_len * sizeof(union descriptor));
719 		pcb->pcb_ldt_len = (int)pcb->pcb_ldt = 0;
720 	}
721 #endif
722 	cnt.v_swtch++;
723 	cpu_switch(p);
724 	panic("cpu_exit");
725 }
726 
727 void
728 cpu_wait(p)
729 	struct proc *p;
730 {
731 	/* drop per-process resources */
732 	pmap_dispose_proc(p);
733 
734 	/* and clean-out the vmspace */
735 	vmspace_free(p->p_vmspace);
736 }
737 
738 /*
739  * Dump the machine specific header information at the start of a core dump.
740  */
741 int
742 cpu_coredump(p, vp, cred)
743 	struct proc *p;
744 	struct vnode *vp;
745 	struct ucred *cred;
746 {
747 	int error;
748 	caddr_t tempuser;
749 
750 	tempuser = malloc(ctob(UPAGES), M_TEMP, M_WAITOK);
751 	if (!tempuser)
752 		return EINVAL;
753 
754 	bzero(tempuser, ctob(UPAGES));
755 	bcopy(p->p_addr, tempuser, sizeof(struct user));
756 	bcopy(p->p_md.md_regs,
757 	      tempuser + ((caddr_t) p->p_md.md_regs - (caddr_t) p->p_addr),
758 	      sizeof(struct trapframe));
759 
760 	error = vn_rdwr(UIO_WRITE, vp, (caddr_t) tempuser,
761 			ctob(UPAGES),
762 			(off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT,
763 			cred, (int *)NULL, p);
764 
765 	free(tempuser, M_TEMP);
766 
767 	return error;
768 }
769 
770 #ifdef notyet
771 static void
772 setredzone(pte, vaddr)
773 	u_short *pte;
774 	caddr_t vaddr;
775 {
776 /* eventually do this by setting up an expand-down stack segment
777    for ss0: selector, allowing stack access down to top of u.
778    this means though that protection violations need to be handled
779    thru a double fault exception that must do an integral task
780    switch to a known good context, within which a dump can be
781    taken. a sensible scheme might be to save the initial context
782    used by sched (that has physical memory mapped 1:1 at bottom)
783    and take the dump while still in mapped mode */
784 }
785 #endif
786 
787 /*
788  * Convert kernel VA to physical address
789  */
790 u_long
791 kvtop(void *addr)
792 {
793 	vm_offset_t va;
794 
795 	va = pmap_kextract((vm_offset_t)addr);
796 	if (va == 0)
797 		panic("kvtop: zero page frame");
798 	return((int)va);
799 }
800 
801 /*
802  * Map an IO request into kernel virtual address space.
803  *
804  * All requests are (re)mapped into kernel VA space.
805  * Notice that we use b_bufsize for the size of the buffer
806  * to be mapped.  b_bcount might be modified by the driver.
807  */
808 void
809 vmapbuf(bp)
810 	register struct buf *bp;
811 {
812 	register caddr_t addr, v, kva;
813 	vm_offset_t pa;
814 
815 	if ((bp->b_flags & B_PHYS) == 0)
816 		panic("vmapbuf");
817 
818 	for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page(bp->b_data);
819 	    addr < bp->b_data + bp->b_bufsize;
820 	    addr += PAGE_SIZE, v += PAGE_SIZE) {
821 		/*
822 		 * Do the vm_fault if needed; do the copy-on-write thing
823 		 * when reading stuff off device into memory.
824 		 */
825 		vm_fault_quick(addr,
826 			(bp->b_flags&B_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
827 		pa = trunc_page(pmap_kextract((vm_offset_t) addr));
828 		if (pa == 0)
829 			panic("vmapbuf: page not present");
830 		vm_page_hold(PHYS_TO_VM_PAGE(pa));
831 		pmap_kenter((vm_offset_t) v, pa);
832 	}
833 
834 	kva = bp->b_saveaddr;
835 	bp->b_saveaddr = bp->b_data;
836 	bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
837 }
838 
839 /*
840  * Free the io map PTEs associated with this IO operation.
841  * We also invalidate the TLB entries and restore the original b_addr.
842  */
843 void
844 vunmapbuf(bp)
845 	register struct buf *bp;
846 {
847 	register caddr_t addr;
848 	vm_offset_t pa;
849 
850 	if ((bp->b_flags & B_PHYS) == 0)
851 		panic("vunmapbuf");
852 
853 	for (addr = (caddr_t)trunc_page(bp->b_data);
854 	    addr < bp->b_data + bp->b_bufsize;
855 	    addr += PAGE_SIZE) {
856 		pa = trunc_page(pmap_kextract((vm_offset_t) addr));
857 		pmap_kremove((vm_offset_t) addr);
858 		vm_page_unhold(PHYS_TO_VM_PAGE(pa));
859 	}
860 
861 	bp->b_data = bp->b_saveaddr;
862 }
863 
864 /*
865  * Force reset the processor by invalidating the entire address space!
866  */
867 
868 #ifdef SMP
869 static void
870 cpu_reset_proxy()
871 {
872 	u_int saved_mp_lock;
873 
874 	cpu_reset_proxy_active = 1;
875 	while (cpu_reset_proxy_active == 1)
876 		;	 /* Wait for other cpu to disable interupts */
877 	saved_mp_lock = mp_lock;
878 	mp_lock = 1;
879 	printf("cpu_reset_proxy: Grabbed mp lock for BSP\n");
880 	cpu_reset_proxy_active = 3;
881 	while (cpu_reset_proxy_active == 3)
882 		;	/* Wait for other cpu to enable interrupts */
883 	stop_cpus((1<<cpu_reset_proxyid));
884 	printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
885 	DELAY(1000000);
886 	cpu_reset_real();
887 }
888 #endif
889 
890 void
891 cpu_reset()
892 {
893 #ifdef SMP
894 	if (smp_active == 0) {
895 		cpu_reset_real();
896 		/* NOTREACHED */
897 	} else {
898 
899 		u_int map;
900 		int cnt;
901 		printf("cpu_reset called on cpu#%d\n",cpuid);
902 
903 		map = other_cpus & ~ stopped_cpus;
904 
905 		if (map != 0) {
906 			printf("cpu_reset: Stopping other CPUs\n");
907 			stop_cpus(map);		/* Stop all other CPUs */
908 		}
909 
910 		if (cpuid == 0) {
911 			DELAY(1000000);
912 			cpu_reset_real();
913 			/* NOTREACHED */
914 		} else {
915 			/* We are not BSP (CPU #0) */
916 
917 			cpu_reset_proxyid = cpuid;
918 			cpustop_restartfunc = cpu_reset_proxy;
919 			printf("cpu_reset: Restarting BSP\n");
920 			started_cpus = (1<<0);		/* Restart CPU #0 */
921 
922 			cnt = 0;
923 			while (cpu_reset_proxy_active == 0 && cnt < 10000000)
924 				cnt++;	/* Wait for BSP to announce restart */
925 			if (cpu_reset_proxy_active == 0)
926 				printf("cpu_reset: Failed to restart BSP\n");
927 			__asm __volatile("cli" : : : "memory");
928 			cpu_reset_proxy_active = 2;
929 			cnt = 0;
930 			while (cpu_reset_proxy_active == 2 && cnt < 10000000)
931 				cnt++;	/* Do nothing */
932 			if (cpu_reset_proxy_active == 2) {
933 				printf("cpu_reset: BSP did not grab mp lock\n");
934 				cpu_reset_real();	/* XXX: Bogus ? */
935 			}
936 			cpu_reset_proxy_active = 4;
937 			__asm __volatile("sti" : : : "memory");
938 			while (1);
939 			/* NOTREACHED */
940 		}
941 	}
942 #else
943 	cpu_reset_real();
944 #endif
945 }
946 
947 static void
948 cpu_reset_real()
949 {
950 
951 #ifdef PC98
952 	/*
953 	 * Attempt to do a CPU reset via CPU reset port.
954 	 */
955 	disable_intr();
956 	if ((inb(0x35) & 0xa0) != 0xa0) {
957 		outb(0x37, 0x0f);		/* SHUT0 = 0. */
958 		outb(0x37, 0x0b);		/* SHUT1 = 0. */
959 	}
960 	outb(0xf0, 0x00);		/* Reset. */
961 #else
962 	/*
963 	 * Attempt to do a CPU reset via the keyboard controller,
964 	 * do not turn of the GateA20, as any machine that fails
965 	 * to do the reset here would then end up in no man's land.
966 	 */
967 
968 #if !defined(BROKEN_KEYBOARD_RESET)
969 	outb(IO_KBD + 4, 0xFE);
970 	DELAY(500000);	/* wait 0.5 sec to see if that did it */
971 	printf("Keyboard reset did not work, attempting CPU shutdown\n");
972 	DELAY(1000000);	/* wait 1 sec for printf to complete */
973 #endif
974 #endif /* PC98 */
975 	/* force a shutdown by unmapping entire address space ! */
976 	bzero((caddr_t) PTD, PAGE_SIZE);
977 
978 	/* "good night, sweet prince .... <THUNK!>" */
979 	invltlb();
980 	/* NOTREACHED */
981 	while(1);
982 }
983 
984 /*
985  * Grow the user stack to allow for 'sp'. This version grows the stack in
986  *	chunks of SGROWSIZ.
987  */
988 int
989 grow(p, sp)
990 	struct proc *p;
991 	u_int sp;
992 {
993 	unsigned int nss;
994 	caddr_t v;
995 	struct vmspace *vm = p->p_vmspace;
996 
997 	if ((caddr_t)sp <= vm->vm_maxsaddr || (unsigned)sp >= (unsigned)USRSTACK)
998 	    return (1);
999 
1000 	nss = roundup(USRSTACK - (unsigned)sp, PAGE_SIZE);
1001 
1002 	if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur)
1003 		return (0);
1004 
1005 	if (vm->vm_ssize && roundup(vm->vm_ssize << PAGE_SHIFT,
1006 	    SGROWSIZ) < nss) {
1007 		int grow_amount;
1008 		/*
1009 		 * If necessary, grow the VM that the stack occupies
1010 		 * to allow for the rlimit. This allows us to not have
1011 		 * to allocate all of the VM up-front in execve (which
1012 		 * is expensive).
1013 		 * Grow the VM by the amount requested rounded up to
1014 		 * the nearest SGROWSIZ to provide for some hysteresis.
1015 		 */
1016 		grow_amount = roundup((nss - (vm->vm_ssize << PAGE_SHIFT)), SGROWSIZ);
1017 		v = (char *)USRSTACK - roundup(vm->vm_ssize << PAGE_SHIFT,
1018 		    SGROWSIZ) - grow_amount;
1019 		/*
1020 		 * If there isn't enough room to extend by SGROWSIZ, then
1021 		 * just extend to the maximum size
1022 		 */
1023 		if (v < vm->vm_maxsaddr) {
1024 			v = vm->vm_maxsaddr;
1025 			grow_amount = MAXSSIZ - (vm->vm_ssize << PAGE_SHIFT);
1026 		}
1027 		if ((grow_amount == 0) || (vm_map_find(&vm->vm_map, NULL, 0, (vm_offset_t *)&v,
1028 		    grow_amount, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0) != KERN_SUCCESS)) {
1029 			return (0);
1030 		}
1031 		vm->vm_ssize += grow_amount >> PAGE_SHIFT;
1032 	}
1033 
1034 	return (1);
1035 }
1036 
1037 static int cnt_prezero;
1038 
1039 SYSCTL_INT(_machdep, OID_AUTO, cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, "");
1040 
1041 /*
1042  * Implement the pre-zeroed page mechanism.
1043  * This routine is called from the idle loop.
1044  */
1045 int
1046 vm_page_zero_idle()
1047 {
1048 	static int free_rover;
1049 	vm_page_t m;
1050 	int s;
1051 
1052 	/*
1053 	 * XXX
1054 	 * We stop zeroing pages when there are sufficent prezeroed pages.
1055 	 * This threshold isn't really needed, except we want to
1056 	 * bypass unneeded calls to vm_page_list_find, and the
1057 	 * associated cache flush and latency.  The pre-zero will
1058 	 * still be called when there are significantly more
1059 	 * non-prezeroed pages than zeroed pages.  The threshold
1060 	 * of half the number of reserved pages is arbitrary, but
1061 	 * approximately the right amount.  Eventually, we should
1062 	 * perhaps interrupt the zero operation when a process
1063 	 * is found to be ready to run.
1064 	 */
1065 	if (cnt.v_free_count - vm_page_zero_count <= cnt.v_free_reserved / 2)
1066 		return (0);
1067 #ifdef SMP
1068 	if (try_mplock()) {
1069 #endif
1070 		s = splvm();
1071 		__asm __volatile("sti" : : : "memory");
1072 		m = vm_page_list_find(PQ_FREE, free_rover);
1073 		if (m != NULL) {
1074 			--(*vm_page_queues[m->queue].lcnt);
1075 			TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq);
1076 			m->queue = PQ_NONE;
1077 			splx(s);
1078 #if 0
1079 			rel_mplock();
1080 #endif
1081 			pmap_zero_page(VM_PAGE_TO_PHYS(m));
1082 #if 0
1083 			get_mplock();
1084 #endif
1085 			(void)splvm();
1086 			m->queue = PQ_ZERO + m->pc;
1087 			++(*vm_page_queues[m->queue].lcnt);
1088 			TAILQ_INSERT_HEAD(vm_page_queues[m->queue].pl, m,
1089 			    pageq);
1090 			free_rover = (free_rover + PQ_PRIME3) & PQ_L2_MASK;
1091 			++vm_page_zero_count;
1092 			++cnt_prezero;
1093 		}
1094 		splx(s);
1095 		__asm __volatile("cli" : : : "memory");
1096 #ifdef SMP
1097 		rel_mplock();
1098 #endif
1099 		return (1);
1100 #ifdef SMP
1101 	}
1102 #endif
1103 	return (0);
1104 }
1105 
1106 /*
1107  * Software interrupt handler for queued VM system processing.
1108  */
1109 void
1110 swi_vm()
1111 {
1112 	if (busdma_swi_pending != 0)
1113 		busdma_swi();
1114 }
1115 
1116 /*
1117  * Tell whether this address is in some physical memory region.
1118  * Currently used by the kernel coredump code in order to avoid
1119  * dumping the ``ISA memory hole'' which could cause indefinite hangs,
1120  * or other unpredictable behaviour.
1121  */
1122 
1123 #include "isa.h"
1124 
1125 int
1126 is_physical_memory(addr)
1127 	vm_offset_t addr;
1128 {
1129 
1130 #if NISA > 0
1131 	/* The ISA ``memory hole''. */
1132 	if (addr >= 0xa0000 && addr < 0x100000)
1133 		return 0;
1134 #endif
1135 
1136 	/*
1137 	 * stuff other tests for known memory-mapped devices (PCI?)
1138 	 * here
1139 	 */
1140 
1141 	return 1;
1142 }
1143