xref: /freebsd/sys/i386/i386/vm_machdep.c (revision 77a0943ded95b9e6438f7db70c4a28e4d93946d4)
1 /*-
2  * Copyright (c) 1982, 1986 The Regents of the University of California.
3  * Copyright (c) 1989, 1990 William Jolitz
4  * Copyright (c) 1994 John Dyson
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * the Systems Programming Group of the University of Utah Computer
9  * Science Department, and William Jolitz.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the University of
22  *	California, Berkeley and its contributors.
23  * 4. Neither the name of the University nor the names of its contributors
24  *    may be used to endorse or promote products derived from this software
25  *    without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
40  *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41  * $FreeBSD$
42  */
43 
44 #include "npx.h"
45 #include "opt_user_ldt.h"
46 #ifdef PC98
47 #include "opt_pc98.h"
48 #endif
49 #include "opt_reset.h"
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/malloc.h>
54 #include <sys/proc.h>
55 #include <sys/bio.h>
56 #include <sys/buf.h>
57 #include <sys/vnode.h>
58 #include <sys/vmmeter.h>
59 #include <sys/kernel.h>
60 #include <sys/ktr.h>
61 #include <sys/mutex.h>
62 #include <sys/sysctl.h>
63 #include <sys/unistd.h>
64 
65 #include <machine/cpu.h>
66 #include <machine/md_var.h>
67 #ifdef SMP
68 #include <machine/smp.h>
69 #endif
70 #include <machine/pcb.h>
71 #include <machine/pcb_ext.h>
72 #include <machine/vm86.h>
73 
74 #include <vm/vm.h>
75 #include <vm/vm_param.h>
76 #include <sys/lock.h>
77 #include <vm/vm_kern.h>
78 #include <vm/vm_page.h>
79 #include <vm/vm_map.h>
80 #include <vm/vm_extern.h>
81 
82 #include <sys/user.h>
83 
84 #ifdef PC98
85 #include <pc98/pc98/pc98.h>
86 #else
87 #include <i386/isa/isa.h>
88 #endif
89 
90 static void	cpu_reset_real __P((void));
91 #ifdef SMP
92 static void	cpu_reset_proxy __P((void));
93 static u_int	cpu_reset_proxyid;
94 static volatile u_int	cpu_reset_proxy_active;
95 #endif
96 extern int	_ucodesel, _udatasel;
97 
98 /*
99  * quick version of vm_fault
100  */
101 int
102 vm_fault_quick(v, prot)
103 	caddr_t v;
104 	int prot;
105 {
106 	int r;
107 
108 	if (prot & VM_PROT_WRITE)
109 		r = subyte(v, fubyte(v));
110 	else
111 		r = fubyte(v);
112 	return(r);
113 }
114 
115 /*
116  * Finish a fork operation, with process p2 nearly set up.
117  * Copy and update the pcb, set up the stack so that the child
118  * ready to run and return to user mode.
119  */
120 void
121 cpu_fork(p1, p2, flags)
122 	register struct proc *p1, *p2;
123 	int flags;
124 {
125 	struct pcb *pcb2;
126 
127 	if ((flags & RFPROC) == 0) {
128 #ifdef USER_LDT
129 		if ((flags & RFMEM) == 0) {
130 			/* unshare user LDT */
131 			struct pcb *pcb1 = &p1->p_addr->u_pcb;
132 			struct pcb_ldt *pcb_ldt = pcb1->pcb_ldt;
133 			if (pcb_ldt && pcb_ldt->ldt_refcnt > 1) {
134 				pcb_ldt = user_ldt_alloc(pcb1,pcb_ldt->ldt_len);
135 				user_ldt_free(pcb1);
136 				pcb1->pcb_ldt = pcb_ldt;
137 				set_user_ldt(pcb1);
138 			}
139 		}
140 #endif
141 		return;
142 	}
143 
144 #if NNPX > 0
145 	/* Ensure that p1's pcb is up to date. */
146 	if (npxproc == p1)
147 		npxsave(&p1->p_addr->u_pcb.pcb_savefpu);
148 #endif
149 
150 	/* Copy p1's pcb. */
151 	p2->p_addr->u_pcb = p1->p_addr->u_pcb;
152 	pcb2 = &p2->p_addr->u_pcb;
153 
154 	/*
155 	 * Create a new fresh stack for the new process.
156 	 * Copy the trap frame for the return to user mode as if from a
157 	 * syscall.  This copies the user mode register values.
158 	 */
159 	p2->p_md.md_regs = (struct trapframe *)
160 			   ((int)p2->p_addr + UPAGES * PAGE_SIZE - 16) - 1;
161 	bcopy(p1->p_md.md_regs, p2->p_md.md_regs, sizeof(*p2->p_md.md_regs));
162 
163 	/*
164 	 * Set registers for trampoline to user mode.  Leave space for the
165 	 * return address on stack.  These are the kernel mode register values.
166 	 */
167 	pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdir);
168 	pcb2->pcb_edi = 0;
169 	pcb2->pcb_esi = (int)fork_return;	/* fork_trampoline argument */
170 	pcb2->pcb_ebp = 0;
171 	pcb2->pcb_esp = (int)p2->p_md.md_regs - sizeof(void *);
172 	pcb2->pcb_ebx = (int)p2;		/* fork_trampoline argument */
173 	pcb2->pcb_eip = (int)fork_trampoline;
174 	/*
175 	 * pcb2->pcb_ldt:	duplicated below, if necessary.
176 	 * pcb2->pcb_savefpu:	cloned above.
177 	 * pcb2->pcb_flags:	cloned above (always 0 here?).
178 	 * pcb2->pcb_onfault:	cloned above (always NULL here?).
179 	 */
180 
181 	pcb2->pcb_schednest = 0;
182 
183 	/*
184 	 * XXX don't copy the i/o pages.  this should probably be fixed.
185 	 */
186 	pcb2->pcb_ext = 0;
187 
188 #ifdef USER_LDT
189         /* Copy the LDT, if necessary. */
190         if (pcb2->pcb_ldt != 0) {
191 		if (flags & RFMEM) {
192 			pcb2->pcb_ldt->ldt_refcnt++;
193 		} else {
194 			pcb2->pcb_ldt = user_ldt_alloc(pcb2,
195 				pcb2->pcb_ldt->ldt_len);
196 		}
197         }
198 #endif
199 
200 	/*
201 	 * Now, cpu_switch() can schedule the new process.
202 	 * pcb_esp is loaded pointing to the cpu_switch() stack frame
203 	 * containing the return address when exiting cpu_switch.
204 	 * This will normally be to fork_trampoline(), which will have
205 	 * %ebx loaded with the new proc's pointer.  fork_trampoline()
206 	 * will set up a stack to call fork_return(p, frame); to complete
207 	 * the return to user-mode.
208 	 */
209 }
210 
211 /*
212  * Intercept the return address from a freshly forked process that has NOT
213  * been scheduled yet.
214  *
215  * This is needed to make kernel threads stay in kernel mode.
216  */
217 void
218 cpu_set_fork_handler(p, func, arg)
219 	struct proc *p;
220 	void (*func) __P((void *));
221 	void *arg;
222 {
223 	/*
224 	 * Note that the trap frame follows the args, so the function
225 	 * is really called like this:  func(arg, frame);
226 	 */
227 	p->p_addr->u_pcb.pcb_esi = (int) func;	/* function */
228 	p->p_addr->u_pcb.pcb_ebx = (int) arg;	/* first arg */
229 }
230 
231 void
232 cpu_exit(p)
233 	register struct proc *p;
234 {
235 	struct pcb *pcb = &p->p_addr->u_pcb;
236 
237 #if NNPX > 0
238 	npxexit(p);
239 #endif	/* NNPX */
240 	if (pcb->pcb_ext != 0) {
241 	        /*
242 		 * XXX do we need to move the TSS off the allocated pages
243 		 * before freeing them?  (not done here)
244 		 */
245 		kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ext,
246 		    ctob(IOPAGES + 1));
247 		pcb->pcb_ext = 0;
248 	}
249 #ifdef USER_LDT
250 	user_ldt_free(pcb);
251 #endif
252         if (pcb->pcb_flags & PCB_DBREGS) {
253                 /*
254                  * disable all hardware breakpoints
255                  */
256                 reset_dbregs();
257                 pcb->pcb_flags &= ~PCB_DBREGS;
258         }
259 	mtx_enter(&sched_lock, MTX_SPIN);
260 	mtx_exit(&Giant, MTX_DEF | MTX_NOSWITCH);
261 	mtx_assert(&Giant, MA_NOTOWNED);
262 	cnt.v_swtch++;
263 	cpu_throw();
264 	panic("cpu_exit");
265 }
266 
267 void
268 cpu_wait(p)
269 	struct proc *p;
270 {
271 	/* drop per-process resources */
272 	pmap_dispose_proc(p);
273 
274 	/* and clean-out the vmspace */
275 	vmspace_free(p->p_vmspace);
276 }
277 
278 /*
279  * Dump the machine specific header information at the start of a core dump.
280  */
281 int
282 cpu_coredump(p, vp, cred)
283 	struct proc *p;
284 	struct vnode *vp;
285 	struct ucred *cred;
286 {
287 	int error;
288 	caddr_t tempuser;
289 
290 	tempuser = malloc(ctob(UPAGES), M_TEMP, M_WAITOK);
291 	if (!tempuser)
292 		return EINVAL;
293 
294 	bzero(tempuser, ctob(UPAGES));
295 	bcopy(p->p_addr, tempuser, sizeof(struct user));
296 	bcopy(p->p_md.md_regs,
297 	      tempuser + ((caddr_t) p->p_md.md_regs - (caddr_t) p->p_addr),
298 	      sizeof(struct trapframe));
299 
300 	error = vn_rdwr(UIO_WRITE, vp, (caddr_t) tempuser,
301 			ctob(UPAGES),
302 			(off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT,
303 			cred, (int *)NULL, p);
304 
305 	free(tempuser, M_TEMP);
306 
307 	return error;
308 }
309 
310 #ifdef notyet
311 static void
312 setredzone(pte, vaddr)
313 	u_short *pte;
314 	caddr_t vaddr;
315 {
316 /* eventually do this by setting up an expand-down stack segment
317    for ss0: selector, allowing stack access down to top of u.
318    this means though that protection violations need to be handled
319    thru a double fault exception that must do an integral task
320    switch to a known good context, within which a dump can be
321    taken. a sensible scheme might be to save the initial context
322    used by sched (that has physical memory mapped 1:1 at bottom)
323    and take the dump while still in mapped mode */
324 }
325 #endif
326 
327 /*
328  * Convert kernel VA to physical address
329  */
330 u_long
331 kvtop(void *addr)
332 {
333 	vm_offset_t va;
334 
335 	va = pmap_kextract((vm_offset_t)addr);
336 	if (va == 0)
337 		panic("kvtop: zero page frame");
338 	return((int)va);
339 }
340 
341 /*
342  * Map an IO request into kernel virtual address space.
343  *
344  * All requests are (re)mapped into kernel VA space.
345  * Notice that we use b_bufsize for the size of the buffer
346  * to be mapped.  b_bcount might be modified by the driver.
347  */
348 void
349 vmapbuf(bp)
350 	register struct buf *bp;
351 {
352 	register caddr_t addr, v, kva;
353 	vm_offset_t pa;
354 
355 	if ((bp->b_flags & B_PHYS) == 0)
356 		panic("vmapbuf");
357 
358 	for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
359 	    addr < bp->b_data + bp->b_bufsize;
360 	    addr += PAGE_SIZE, v += PAGE_SIZE) {
361 		/*
362 		 * Do the vm_fault if needed; do the copy-on-write thing
363 		 * when reading stuff off device into memory.
364 		 */
365 		vm_fault_quick(addr,
366 			(bp->b_iocmd == BIO_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
367 		pa = trunc_page(pmap_kextract((vm_offset_t) addr));
368 		if (pa == 0)
369 			panic("vmapbuf: page not present");
370 		vm_page_hold(PHYS_TO_VM_PAGE(pa));
371 		pmap_kenter((vm_offset_t) v, pa);
372 	}
373 
374 	kva = bp->b_saveaddr;
375 	bp->b_saveaddr = bp->b_data;
376 	bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
377 }
378 
379 /*
380  * Free the io map PTEs associated with this IO operation.
381  * We also invalidate the TLB entries and restore the original b_addr.
382  */
383 void
384 vunmapbuf(bp)
385 	register struct buf *bp;
386 {
387 	register caddr_t addr;
388 	vm_offset_t pa;
389 
390 	if ((bp->b_flags & B_PHYS) == 0)
391 		panic("vunmapbuf");
392 
393 	for (addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
394 	    addr < bp->b_data + bp->b_bufsize;
395 	    addr += PAGE_SIZE) {
396 		pa = trunc_page(pmap_kextract((vm_offset_t) addr));
397 		pmap_kremove((vm_offset_t) addr);
398 		vm_page_unhold(PHYS_TO_VM_PAGE(pa));
399 	}
400 
401 	bp->b_data = bp->b_saveaddr;
402 }
403 
404 /*
405  * Force reset the processor by invalidating the entire address space!
406  */
407 
408 #ifdef SMP
409 static void
410 cpu_reset_proxy()
411 {
412 
413 	cpu_reset_proxy_active = 1;
414 	while (cpu_reset_proxy_active == 1)
415 		;	 /* Wait for other cpu to see that we've started */
416 	stop_cpus((1<<cpu_reset_proxyid));
417 	printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
418 	DELAY(1000000);
419 	cpu_reset_real();
420 }
421 #endif
422 
423 void
424 cpu_reset()
425 {
426 #ifdef SMP
427 	if (smp_active == 0) {
428 		cpu_reset_real();
429 		/* NOTREACHED */
430 	} else {
431 
432 		u_int map;
433 		int cnt;
434 		printf("cpu_reset called on cpu#%d\n",cpuid);
435 
436 		map = other_cpus & ~ stopped_cpus;
437 
438 		if (map != 0) {
439 			printf("cpu_reset: Stopping other CPUs\n");
440 			stop_cpus(map);		/* Stop all other CPUs */
441 		}
442 
443 		if (cpuid == 0) {
444 			DELAY(1000000);
445 			cpu_reset_real();
446 			/* NOTREACHED */
447 		} else {
448 			/* We are not BSP (CPU #0) */
449 
450 			cpu_reset_proxyid = cpuid;
451 			cpustop_restartfunc = cpu_reset_proxy;
452 			cpu_reset_proxy_active = 0;
453 			printf("cpu_reset: Restarting BSP\n");
454 			started_cpus = (1<<0);		/* Restart CPU #0 */
455 
456 			cnt = 0;
457 			while (cpu_reset_proxy_active == 0 && cnt < 10000000)
458 				cnt++;	/* Wait for BSP to announce restart */
459 			if (cpu_reset_proxy_active == 0)
460 				printf("cpu_reset: Failed to restart BSP\n");
461 			enable_intr();
462 			cpu_reset_proxy_active = 2;
463 
464 			while (1);
465 			/* NOTREACHED */
466 		}
467 	}
468 #else
469 	cpu_reset_real();
470 #endif
471 }
472 
473 static void
474 cpu_reset_real()
475 {
476 
477 #ifdef PC98
478 	/*
479 	 * Attempt to do a CPU reset via CPU reset port.
480 	 */
481 	disable_intr();
482 	if ((inb(0x35) & 0xa0) != 0xa0) {
483 		outb(0x37, 0x0f);		/* SHUT0 = 0. */
484 		outb(0x37, 0x0b);		/* SHUT1 = 0. */
485 	}
486 	outb(0xf0, 0x00);		/* Reset. */
487 #else
488 	/*
489 	 * Attempt to do a CPU reset via the keyboard controller,
490 	 * do not turn of the GateA20, as any machine that fails
491 	 * to do the reset here would then end up in no man's land.
492 	 */
493 
494 #if !defined(BROKEN_KEYBOARD_RESET)
495 	outb(IO_KBD + 4, 0xFE);
496 	DELAY(500000);	/* wait 0.5 sec to see if that did it */
497 	printf("Keyboard reset did not work, attempting CPU shutdown\n");
498 	DELAY(1000000);	/* wait 1 sec for printf to complete */
499 #endif
500 #endif /* PC98 */
501 	/* force a shutdown by unmapping entire address space ! */
502 	bzero((caddr_t) PTD, PAGE_SIZE);
503 
504 	/* "good night, sweet prince .... <THUNK!>" */
505 	invltlb();
506 	/* NOTREACHED */
507 	while(1);
508 }
509 
510 int
511 grow_stack(p, sp)
512 	struct proc *p;
513 	u_int sp;
514 {
515 	int rv;
516 
517 	rv = vm_map_growstack (p, sp);
518 	if (rv != KERN_SUCCESS)
519 		return (0);
520 
521 	return (1);
522 }
523 
524 SYSCTL_DECL(_vm_stats_misc);
525 
526 static int cnt_prezero;
527 
528 SYSCTL_INT(_vm_stats_misc, OID_AUTO,
529 	cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, "");
530 
531 /*
532  * Implement the pre-zeroed page mechanism.
533  * This routine is called from the idle loop.
534  */
535 
536 #define ZIDLE_LO(v)	((v) * 2 / 3)
537 #define ZIDLE_HI(v)	((v) * 4 / 5)
538 
539 int
540 vm_page_zero_idle()
541 {
542 	static int free_rover;
543 	static int zero_state;
544 	vm_page_t m;
545 	int s;
546 
547 	/*
548 	 * Attempt to maintain approximately 1/2 of our free pages in a
549 	 * PG_ZERO'd state.   Add some hysteresis to (attempt to) avoid
550 	 * generally zeroing a page when the system is near steady-state.
551 	 * Otherwise we might get 'flutter' during disk I/O / IPC or
552 	 * fast sleeps.  We also do not want to be continuously zeroing
553 	 * pages because doing so may flush our L1 and L2 caches too much.
554 	 */
555 
556 	if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count))
557 		return(0);
558 	if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
559 		return(0);
560 
561 	if (mtx_try_enter(&Giant, MTX_DEF)) {
562 		s = splvm();
563 		zero_state = 0;
564 		m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
565 		if (m != NULL && (m->flags & PG_ZERO) == 0) {
566 			vm_page_queues[m->queue].lcnt--;
567 			TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq);
568 			m->queue = PQ_NONE;
569 			splx(s);
570 			pmap_zero_page(VM_PAGE_TO_PHYS(m));
571 			(void)splvm();
572 			vm_page_flag_set(m, PG_ZERO);
573 			m->queue = PQ_FREE + m->pc;
574 			vm_page_queues[m->queue].lcnt++;
575 			TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m,
576 			    pageq);
577 			++vm_page_zero_count;
578 			++cnt_prezero;
579 			if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
580 				zero_state = 1;
581 		}
582 		free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
583 		splx(s);
584 		mtx_exit(&Giant, MTX_DEF);
585 		return (1);
586 	}
587 	return (0);
588 }
589 
590 /*
591  * Software interrupt handler for queued VM system processing.
592  */
593 void
594 swi_vm(void *dummy)
595 {
596 	if (busdma_swi_pending != 0)
597 		busdma_swi();
598 }
599 
600 /*
601  * Tell whether this address is in some physical memory region.
602  * Currently used by the kernel coredump code in order to avoid
603  * dumping the ``ISA memory hole'' which could cause indefinite hangs,
604  * or other unpredictable behaviour.
605  */
606 
607 #include "isa.h"
608 
609 int
610 is_physical_memory(addr)
611 	vm_offset_t addr;
612 {
613 
614 #if NISA > 0
615 	/* The ISA ``memory hole''. */
616 	if (addr >= 0xa0000 && addr < 0x100000)
617 		return 0;
618 #endif
619 
620 	/*
621 	 * stuff other tests for known memory-mapped devices (PCI?)
622 	 * here
623 	 */
624 
625 	return 1;
626 }
627