xref: /freebsd/sys/i386/i386/vm_machdep.c (revision 6e8394b8baa7d5d9153ab90de6824bcd19b3b4e1)
1 /*-
2  * Copyright (c) 1982, 1986 The Regents of the University of California.
3  * Copyright (c) 1989, 1990 William Jolitz
4  * Copyright (c) 1994 John Dyson
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * the Systems Programming Group of the University of Utah Computer
9  * Science Department, and William Jolitz.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the University of
22  *	California, Berkeley and its contributors.
23  * 4. Neither the name of the University nor the names of its contributors
24  *    may be used to endorse or promote products derived from this software
25  *    without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
40  *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41  *	$Id: vm_machdep.c,v 1.121 1999/04/19 14:14:13 peter Exp $
42  */
43 
44 #include "npx.h"
45 #include "opt_user_ldt.h"
46 #ifdef PC98
47 #include "opt_pc98.h"
48 #endif
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/proc.h>
53 #include <sys/malloc.h>
54 #include <sys/buf.h>
55 #include <sys/vnode.h>
56 #include <sys/vmmeter.h>
57 #include <sys/kernel.h>
58 #include <sys/sysctl.h>
59 
60 #include <machine/clock.h>
61 #include <machine/cpu.h>
62 #include <machine/md_var.h>
63 #ifdef SMP
64 #include <machine/smp.h>
65 #endif
66 #include <machine/pcb_ext.h>
67 #include <machine/vm86.h>
68 
69 #include <vm/vm.h>
70 #include <vm/vm_param.h>
71 #include <vm/vm_prot.h>
72 #include <sys/lock.h>
73 #include <vm/vm_kern.h>
74 #include <vm/vm_page.h>
75 #include <vm/vm_map.h>
76 #include <vm/vm_extern.h>
77 
78 #include <sys/user.h>
79 
80 #ifdef PC98
81 #include <pc98/pc98/pc98.h>
82 #else
83 #include <i386/isa/isa.h>
84 #endif
85 
86 static void	cpu_reset_real __P((void));
87 #ifdef SMP
88 static void	cpu_reset_proxy __P((void));
89 static u_int	cpu_reset_proxyid;
90 static volatile u_int	cpu_reset_proxy_active;
91 #endif
92 
93 /*
94  * quick version of vm_fault
95  */
96 void
97 vm_fault_quick(v, prot)
98 	caddr_t v;
99 	int prot;
100 {
101 	if (prot & VM_PROT_WRITE)
102 		subyte(v, fubyte(v));
103 	else
104 		fubyte(v);
105 }
106 
107 /*
108  * Finish a fork operation, with process p2 nearly set up.
109  * Copy and update the pcb, set up the stack so that the child
110  * ready to run and return to user mode.
111  */
112 void
113 cpu_fork(p1, p2)
114 	register struct proc *p1, *p2;
115 {
116 	struct pcb *pcb2 = &p2->p_addr->u_pcb;
117 
118 #if NNPX > 0
119 	/* Ensure that p1's pcb is up to date. */
120 	if (npxproc == p1)
121 		npxsave(&p1->p_addr->u_pcb.pcb_savefpu);
122 #endif
123 
124 	/* Copy p1's pcb. */
125 	p2->p_addr->u_pcb = p1->p_addr->u_pcb;
126 
127 	/*
128 	 * Create a new fresh stack for the new process.
129 	 * Copy the trap frame for the return to user mode as if from a
130 	 * syscall.  This copies the user mode register values.
131 	 */
132 	p2->p_md.md_regs = (struct trapframe *)
133 			   ((int)p2->p_addr + UPAGES * PAGE_SIZE - 16) - 1;
134 	*p2->p_md.md_regs = *p1->p_md.md_regs;
135 
136 	/*
137 	 * Set registers for trampoline to user mode.  Leave space for the
138 	 * return address on stack.  These are the kernel mode register values.
139 	 */
140 	pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdir);
141 	pcb2->pcb_edi = p2->p_md.md_regs->tf_edi;
142 	pcb2->pcb_esi = (int)fork_return;
143 	pcb2->pcb_ebp = p2->p_md.md_regs->tf_ebp;
144 	pcb2->pcb_esp = (int)p2->p_md.md_regs - sizeof(void *);
145 	pcb2->pcb_ebx = (int)p2;
146 	pcb2->pcb_eip = (int)fork_trampoline;
147 	/*
148 	 * pcb2->pcb_ldt:	duplicated below, if necessary.
149 	 * pcb2->pcb_ldt_len:	cloned above.
150 	 * pcb2->pcb_savefpu:	cloned above.
151 	 * pcb2->pcb_flags:	cloned above (always 0 here?).
152 	 * pcb2->pcb_onfault:	cloned above (always NULL here?).
153 	 */
154 
155 #ifdef SMP
156 	pcb2->pcb_mpnest = 1;
157 #endif
158 	/*
159 	 * XXX don't copy the i/o pages.  this should probably be fixed.
160 	 */
161 	pcb2->pcb_ext = 0;
162 
163 #ifdef USER_LDT
164         /* Copy the LDT, if necessary. */
165         if (pcb2->pcb_ldt != 0) {
166                 union descriptor *new_ldt;
167                 size_t len = pcb2->pcb_ldt_len * sizeof(union descriptor);
168 
169                 new_ldt = (union descriptor *)kmem_alloc(kernel_map, len);
170                 bcopy(pcb2->pcb_ldt, new_ldt, len);
171                 pcb2->pcb_ldt = (caddr_t)new_ldt;
172         }
173 #endif
174 
175 	/*
176 	 * Now, cpu_switch() can schedule the new process.
177 	 * pcb_esp is loaded pointing to the cpu_switch() stack frame
178 	 * containing the return address when exiting cpu_switch.
179 	 * This will normally be to proc_trampoline(), which will have
180 	 * %ebx loaded with the new proc's pointer.  proc_trampoline()
181 	 * will set up a stack to call fork_return(p, frame); to complete
182 	 * the return to user-mode.
183 	 */
184 }
185 
186 /*
187  * Intercept the return address from a freshly forked process that has NOT
188  * been scheduled yet.
189  *
190  * This is needed to make kernel threads stay in kernel mode.
191  */
192 void
193 cpu_set_fork_handler(p, func, arg)
194 	struct proc *p;
195 	void (*func) __P((const void *));
196 	const void *arg;
197 {
198 	/*
199 	 * Note that the trap frame follows the args, so the function
200 	 * is really called like this:  func(arg, frame);
201 	 */
202 	p->p_addr->u_pcb.pcb_esi = (int) func;	/* function */
203 	p->p_addr->u_pcb.pcb_ebx = (int) arg;	/* first arg */
204 }
205 
206 void
207 cpu_exit(p)
208 	register struct proc *p;
209 {
210 	struct pcb *pcb = &p->p_addr->u_pcb;
211 
212 #if NNPX > 0
213 	npxexit(p);
214 #endif	/* NNPX */
215 	if (pcb->pcb_ext != 0) {
216 	        /*
217 		 * XXX do we need to move the TSS off the allocated pages
218 		 * before freeing them?  (not done here)
219 		 */
220 		kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ext,
221 		    ctob(IOPAGES + 1));
222 		pcb->pcb_ext = 0;
223 	}
224 #ifdef USER_LDT
225 	if (pcb->pcb_ldt != 0) {
226 		if (pcb == curpcb) {
227 			lldt(_default_ldt);
228 			currentldt = _default_ldt;
229 		}
230 		kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ldt,
231 			pcb->pcb_ldt_len * sizeof(union descriptor));
232 		pcb->pcb_ldt_len = (int)pcb->pcb_ldt = 0;
233 	}
234 #endif
235 	cnt.v_swtch++;
236 	cpu_switch(p);
237 	panic("cpu_exit");
238 }
239 
240 void
241 cpu_wait(p)
242 	struct proc *p;
243 {
244 	/* drop per-process resources */
245 	pmap_dispose_proc(p);
246 
247 	/* and clean-out the vmspace */
248 	vmspace_free(p->p_vmspace);
249 }
250 
251 /*
252  * Dump the machine specific header information at the start of a core dump.
253  */
254 int
255 cpu_coredump(p, vp, cred)
256 	struct proc *p;
257 	struct vnode *vp;
258 	struct ucred *cred;
259 {
260 	int error;
261 	caddr_t tempuser;
262 
263 	tempuser = malloc(ctob(UPAGES), M_TEMP, M_WAITOK);
264 	if (!tempuser)
265 		return EINVAL;
266 
267 	bzero(tempuser, ctob(UPAGES));
268 	bcopy(p->p_addr, tempuser, sizeof(struct user));
269 	bcopy(p->p_md.md_regs,
270 	      tempuser + ((caddr_t) p->p_md.md_regs - (caddr_t) p->p_addr),
271 	      sizeof(struct trapframe));
272 
273 	error = vn_rdwr(UIO_WRITE, vp, (caddr_t) tempuser,
274 			ctob(UPAGES),
275 			(off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT,
276 			cred, (int *)NULL, p);
277 
278 	free(tempuser, M_TEMP);
279 
280 	return error;
281 }
282 
283 #ifdef notyet
284 static void
285 setredzone(pte, vaddr)
286 	u_short *pte;
287 	caddr_t vaddr;
288 {
289 /* eventually do this by setting up an expand-down stack segment
290    for ss0: selector, allowing stack access down to top of u.
291    this means though that protection violations need to be handled
292    thru a double fault exception that must do an integral task
293    switch to a known good context, within which a dump can be
294    taken. a sensible scheme might be to save the initial context
295    used by sched (that has physical memory mapped 1:1 at bottom)
296    and take the dump while still in mapped mode */
297 }
298 #endif
299 
300 /*
301  * Convert kernel VA to physical address
302  */
303 u_long
304 kvtop(void *addr)
305 {
306 	vm_offset_t va;
307 
308 	va = pmap_kextract((vm_offset_t)addr);
309 	if (va == 0)
310 		panic("kvtop: zero page frame");
311 	return((int)va);
312 }
313 
314 /*
315  * Map an IO request into kernel virtual address space.
316  *
317  * All requests are (re)mapped into kernel VA space.
318  * Notice that we use b_bufsize for the size of the buffer
319  * to be mapped.  b_bcount might be modified by the driver.
320  */
321 void
322 vmapbuf(bp)
323 	register struct buf *bp;
324 {
325 	register caddr_t addr, v, kva;
326 	vm_offset_t pa;
327 
328 	if ((bp->b_flags & B_PHYS) == 0)
329 		panic("vmapbuf");
330 
331 	for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
332 	    addr < bp->b_data + bp->b_bufsize;
333 	    addr += PAGE_SIZE, v += PAGE_SIZE) {
334 		/*
335 		 * Do the vm_fault if needed; do the copy-on-write thing
336 		 * when reading stuff off device into memory.
337 		 */
338 		vm_fault_quick(addr,
339 			(bp->b_flags&B_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
340 		pa = trunc_page(pmap_kextract((vm_offset_t) addr));
341 		if (pa == 0)
342 			panic("vmapbuf: page not present");
343 		vm_page_hold(PHYS_TO_VM_PAGE(pa));
344 		pmap_kenter((vm_offset_t) v, pa);
345 	}
346 
347 	kva = bp->b_saveaddr;
348 	bp->b_saveaddr = bp->b_data;
349 	bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
350 }
351 
352 /*
353  * Free the io map PTEs associated with this IO operation.
354  * We also invalidate the TLB entries and restore the original b_addr.
355  */
356 void
357 vunmapbuf(bp)
358 	register struct buf *bp;
359 {
360 	register caddr_t addr;
361 	vm_offset_t pa;
362 
363 	if ((bp->b_flags & B_PHYS) == 0)
364 		panic("vunmapbuf");
365 
366 	for (addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
367 	    addr < bp->b_data + bp->b_bufsize;
368 	    addr += PAGE_SIZE) {
369 		pa = trunc_page(pmap_kextract((vm_offset_t) addr));
370 		pmap_kremove((vm_offset_t) addr);
371 		vm_page_unhold(PHYS_TO_VM_PAGE(pa));
372 	}
373 
374 	bp->b_data = bp->b_saveaddr;
375 }
376 
377 /*
378  * Force reset the processor by invalidating the entire address space!
379  */
380 
381 #ifdef SMP
382 static void
383 cpu_reset_proxy()
384 {
385 	u_int saved_mp_lock;
386 
387 	cpu_reset_proxy_active = 1;
388 	while (cpu_reset_proxy_active == 1)
389 		;	 /* Wait for other cpu to disable interupts */
390 	saved_mp_lock = mp_lock;
391 	mp_lock = 1;
392 	printf("cpu_reset_proxy: Grabbed mp lock for BSP\n");
393 	cpu_reset_proxy_active = 3;
394 	while (cpu_reset_proxy_active == 3)
395 		;	/* Wait for other cpu to enable interrupts */
396 	stop_cpus((1<<cpu_reset_proxyid));
397 	printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
398 	DELAY(1000000);
399 	cpu_reset_real();
400 }
401 #endif
402 
403 void
404 cpu_reset()
405 {
406 #ifdef SMP
407 	if (smp_active == 0) {
408 		cpu_reset_real();
409 		/* NOTREACHED */
410 	} else {
411 
412 		u_int map;
413 		int cnt;
414 		printf("cpu_reset called on cpu#%d\n",cpuid);
415 
416 		map = other_cpus & ~ stopped_cpus;
417 
418 		if (map != 0) {
419 			printf("cpu_reset: Stopping other CPUs\n");
420 			stop_cpus(map);		/* Stop all other CPUs */
421 		}
422 
423 		if (cpuid == 0) {
424 			DELAY(1000000);
425 			cpu_reset_real();
426 			/* NOTREACHED */
427 		} else {
428 			/* We are not BSP (CPU #0) */
429 
430 			cpu_reset_proxyid = cpuid;
431 			cpustop_restartfunc = cpu_reset_proxy;
432 			printf("cpu_reset: Restarting BSP\n");
433 			started_cpus = (1<<0);		/* Restart CPU #0 */
434 
435 			cnt = 0;
436 			while (cpu_reset_proxy_active == 0 && cnt < 10000000)
437 				cnt++;	/* Wait for BSP to announce restart */
438 			if (cpu_reset_proxy_active == 0)
439 				printf("cpu_reset: Failed to restart BSP\n");
440 			__asm __volatile("cli" : : : "memory");
441 			cpu_reset_proxy_active = 2;
442 			cnt = 0;
443 			while (cpu_reset_proxy_active == 2 && cnt < 10000000)
444 				cnt++;	/* Do nothing */
445 			if (cpu_reset_proxy_active == 2) {
446 				printf("cpu_reset: BSP did not grab mp lock\n");
447 				cpu_reset_real();	/* XXX: Bogus ? */
448 			}
449 			cpu_reset_proxy_active = 4;
450 			__asm __volatile("sti" : : : "memory");
451 			while (1);
452 			/* NOTREACHED */
453 		}
454 	}
455 #else
456 	cpu_reset_real();
457 #endif
458 }
459 
460 static void
461 cpu_reset_real()
462 {
463 
464 #ifdef PC98
465 	/*
466 	 * Attempt to do a CPU reset via CPU reset port.
467 	 */
468 	disable_intr();
469 	if ((inb(0x35) & 0xa0) != 0xa0) {
470 		outb(0x37, 0x0f);		/* SHUT0 = 0. */
471 		outb(0x37, 0x0b);		/* SHUT1 = 0. */
472 	}
473 	outb(0xf0, 0x00);		/* Reset. */
474 #else
475 	/*
476 	 * Attempt to do a CPU reset via the keyboard controller,
477 	 * do not turn of the GateA20, as any machine that fails
478 	 * to do the reset here would then end up in no man's land.
479 	 */
480 
481 #if !defined(BROKEN_KEYBOARD_RESET)
482 	outb(IO_KBD + 4, 0xFE);
483 	DELAY(500000);	/* wait 0.5 sec to see if that did it */
484 	printf("Keyboard reset did not work, attempting CPU shutdown\n");
485 	DELAY(1000000);	/* wait 1 sec for printf to complete */
486 #endif
487 #endif /* PC98 */
488 	/* force a shutdown by unmapping entire address space ! */
489 	bzero((caddr_t) PTD, PAGE_SIZE);
490 
491 	/* "good night, sweet prince .... <THUNK!>" */
492 	invltlb();
493 	/* NOTREACHED */
494 	while(1);
495 }
496 
497 int
498 grow_stack(p, sp)
499 	struct proc *p;
500 	u_int sp;
501 {
502 	int rv;
503 
504 	rv = vm_map_growstack (p, sp);
505 	if (rv != KERN_SUCCESS)
506 		return (0);
507 
508 	return (1);
509 }
510 
511 SYSCTL_DECL(_vm_stats_misc);
512 
513 static int cnt_prezero;
514 
515 SYSCTL_INT(_vm_stats_misc, OID_AUTO,
516 	cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, "");
517 
518 /*
519  * Implement the pre-zeroed page mechanism.
520  * This routine is called from the idle loop.
521  */
522 
523 #define ZIDLE_LO(v)	((v) * 2 / 3)
524 #define ZIDLE_HI(v)	((v) * 4 / 5)
525 
526 int
527 vm_page_zero_idle()
528 {
529 	static int free_rover;
530 	static int zero_state;
531 	vm_page_t m;
532 	int s;
533 
534 	/*
535 	 * Attempt to maintain approximately 1/2 of our free pages in a
536 	 * PG_ZERO'd state.   Add some hysteresis to (attempt to) avoid
537 	 * generally zeroing a page when the system is near steady-state.
538 	 * Otherwise we might get 'flutter' during disk I/O / IPC or
539 	 * fast sleeps.  We also do not want to be continuously zeroing
540 	 * pages because doing so may flush our L1 and L2 caches too much.
541 	 */
542 
543 	if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count))
544 		return(0);
545 	if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
546 		return(0);
547 
548 #ifdef SMP
549 	if (try_mplock()) {
550 #endif
551 		s = splvm();
552 		__asm __volatile("sti" : : : "memory");
553 		zero_state = 0;
554 		m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
555 		if (m != NULL && (m->flags & PG_ZERO) == 0) {
556 			--(*vm_page_queues[m->queue].lcnt);
557 			TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq);
558 			m->queue = PQ_NONE;
559 			splx(s);
560 #if 0
561 			rel_mplock();
562 #endif
563 			pmap_zero_page(VM_PAGE_TO_PHYS(m));
564 #if 0
565 			get_mplock();
566 #endif
567 			(void)splvm();
568 			vm_page_flag_set(m, PG_ZERO);
569 			m->queue = PQ_FREE + m->pc;
570 			++(*vm_page_queues[m->queue].lcnt);
571 			TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m,
572 			    pageq);
573 			++vm_page_zero_count;
574 			++cnt_prezero;
575 			if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
576 				zero_state = 1;
577 		}
578 		free_rover = (free_rover + PQ_PRIME3) & PQ_L2_MASK;
579 		splx(s);
580 		__asm __volatile("cli" : : : "memory");
581 #ifdef SMP
582 		rel_mplock();
583 #endif
584 		return (1);
585 #ifdef SMP
586 	}
587 #endif
588 	return (0);
589 }
590 
591 /*
592  * Software interrupt handler for queued VM system processing.
593  */
594 void
595 swi_vm()
596 {
597 	if (busdma_swi_pending != 0)
598 		busdma_swi();
599 }
600 
601 /*
602  * Tell whether this address is in some physical memory region.
603  * Currently used by the kernel coredump code in order to avoid
604  * dumping the ``ISA memory hole'' which could cause indefinite hangs,
605  * or other unpredictable behaviour.
606  */
607 
608 #include "isa.h"
609 
610 int
611 is_physical_memory(addr)
612 	vm_offset_t addr;
613 {
614 
615 #if NISA > 0
616 	/* The ISA ``memory hole''. */
617 	if (addr >= 0xa0000 && addr < 0x100000)
618 		return 0;
619 #endif
620 
621 	/*
622 	 * stuff other tests for known memory-mapped devices (PCI?)
623 	 * here
624 	 */
625 
626 	return 1;
627 }
628