xref: /freebsd/sys/i386/i386/vm_machdep.c (revision 7660b554bc59a07be0431c17e0e33815818baa69)
1 /*-
2  * Copyright (c) 1982, 1986 The Regents of the University of California.
3  * Copyright (c) 1989, 1990 William Jolitz
4  * Copyright (c) 1994 John Dyson
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * the Systems Programming Group of the University of Utah Computer
9  * Science Department, and William Jolitz.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the University of
22  *	California, Berkeley and its contributors.
23  * 4. Neither the name of the University nor the names of its contributors
24  *    may be used to endorse or promote products derived from this software
25  *    without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
40  *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41  */
42 
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include "opt_npx.h"
47 #ifdef PC98
48 #include "opt_pc98.h"
49 #endif
50 #include "opt_reset.h"
51 #include "opt_isa.h"
52 #include "opt_kstack_pages.h"
53 
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/malloc.h>
57 #include <sys/proc.h>
58 #include <sys/kse.h>
59 #include <sys/bio.h>
60 #include <sys/buf.h>
61 #include <sys/vnode.h>
62 #include <sys/vmmeter.h>
63 #include <sys/kernel.h>
64 #include <sys/ktr.h>
65 #include <sys/mbuf.h>
66 #include <sys/mutex.h>
67 #include <sys/smp.h>
68 #include <sys/socketvar.h>
69 #include <sys/sysctl.h>
70 #include <sys/unistd.h>
71 
72 #include <machine/cpu.h>
73 #include <machine/md_var.h>
74 #include <machine/pcb.h>
75 #include <machine/pcb_ext.h>
76 #include <machine/vm86.h>
77 
78 #include <vm/vm.h>
79 #include <vm/vm_param.h>
80 #include <sys/lock.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_page.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_extern.h>
85 
86 #include <sys/user.h>
87 
88 #ifdef PC98
89 #include <pc98/pc98/pc98.h>
90 #else
91 #include <i386/isa/isa.h>
92 #endif
93 
94 static void	cpu_reset_real(void);
95 #ifdef SMP
96 static void	cpu_reset_proxy(void);
97 static u_int	cpu_reset_proxyid;
98 static volatile u_int	cpu_reset_proxy_active;
99 #endif
100 static void	sf_buf_init(void *arg);
101 SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
102 
103 /*
104  * Expanded sf_freelist head. Really an SLIST_HEAD() in disguise, with the
105  * sf_freelist head with the sf_lock mutex.
106  */
107 static struct {
108 	SLIST_HEAD(, sf_buf) sf_head;
109 	struct mtx sf_lock;
110 } sf_freelist;
111 
112 static u_int	sf_buf_alloc_want;
113 
114 extern int	_ucodesel, _udatasel;
115 
116 /*
117  * Finish a fork operation, with process p2 nearly set up.
118  * Copy and update the pcb, set up the stack so that the child
119  * ready to run and return to user mode.
120  */
121 void
122 cpu_fork(td1, p2, td2, flags)
123 	register struct thread *td1;
124 	register struct proc *p2;
125 	struct thread *td2;
126 	int flags;
127 {
128 	register struct proc *p1;
129 	struct pcb *pcb2;
130 	struct mdproc *mdp2;
131 #ifdef DEV_NPX
132 	register_t savecrit;
133 #endif
134 
135 	p1 = td1->td_proc;
136 	if ((flags & RFPROC) == 0) {
137 		if ((flags & RFMEM) == 0) {
138 			/* unshare user LDT */
139 			struct mdproc *mdp1 = &p1->p_md;
140 			struct proc_ldt *pldt = mdp1->md_ldt;
141 			if (pldt && pldt->ldt_refcnt > 1) {
142 				pldt = user_ldt_alloc(mdp1, pldt->ldt_len);
143 				if (pldt == NULL)
144 					panic("could not copy LDT");
145 				mdp1->md_ldt = pldt;
146 				set_user_ldt(mdp1);
147 				user_ldt_free(td1);
148 			}
149 		}
150 		return;
151 	}
152 
153 	/* Ensure that p1's pcb is up to date. */
154 #ifdef DEV_NPX
155 	if (td1 == curthread)
156 		td1->td_pcb->pcb_gs = rgs();
157 	savecrit = intr_disable();
158 	if (PCPU_GET(fpcurthread) == td1)
159 		npxsave(&td1->td_pcb->pcb_save);
160 	intr_restore(savecrit);
161 #endif
162 
163 	/* Point the pcb to the top of the stack */
164 	pcb2 = (struct pcb *)(td2->td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
165 	td2->td_pcb = pcb2;
166 
167 	/* Copy p1's pcb */
168 	bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
169 
170 	/* Point mdproc and then copy over td1's contents */
171 	mdp2 = &p2->p_md;
172 	bcopy(&p1->p_md, mdp2, sizeof(*mdp2));
173 
174 	/*
175 	 * Create a new fresh stack for the new process.
176 	 * Copy the trap frame for the return to user mode as if from a
177 	 * syscall.  This copies most of the user mode register values.
178 	 * The -16 is so we can expand the trapframe if we go to vm86.
179 	 */
180 	td2->td_frame = (struct trapframe *)((caddr_t)td2->td_pcb - 16) - 1;
181 	bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe));
182 
183 	td2->td_frame->tf_eax = 0;		/* Child returns zero */
184 	td2->td_frame->tf_eflags &= ~PSL_C;	/* success */
185 	td2->td_frame->tf_edx = 1;
186 
187 	/*
188 	 * Set registers for trampoline to user mode.  Leave space for the
189 	 * return address on stack.  These are the kernel mode register values.
190 	 */
191 #ifdef PAE
192 	pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdpt);
193 #else
194 	pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdir);
195 #endif
196 	pcb2->pcb_edi = 0;
197 	pcb2->pcb_esi = (int)fork_return;	/* fork_trampoline argument */
198 	pcb2->pcb_ebp = 0;
199 	pcb2->pcb_esp = (int)td2->td_frame - sizeof(void *);
200 	pcb2->pcb_ebx = (int)td2;		/* fork_trampoline argument */
201 	pcb2->pcb_eip = (int)fork_trampoline;
202 	pcb2->pcb_psl = PSL_KERNEL;		/* ints disabled */
203 	pcb2->pcb_gs = rgs();
204 	/*-
205 	 * pcb2->pcb_dr*:	cloned above.
206 	 * pcb2->pcb_savefpu:	cloned above.
207 	 * pcb2->pcb_flags:	cloned above.
208 	 * pcb2->pcb_onfault:	cloned above (always NULL here?).
209 	 * pcb2->pcb_gs:	cloned above.
210 	 * pcb2->pcb_ext:	cleared below.
211 	 */
212 
213 	/*
214 	 * XXX don't copy the i/o pages.  this should probably be fixed.
215 	 */
216 	pcb2->pcb_ext = 0;
217 
218         /* Copy the LDT, if necessary. */
219 	mtx_lock_spin(&sched_lock);
220         if (mdp2->md_ldt != 0) {
221 		if (flags & RFMEM) {
222 			mdp2->md_ldt->ldt_refcnt++;
223 		} else {
224 			mdp2->md_ldt = user_ldt_alloc(mdp2,
225 			    mdp2->md_ldt->ldt_len);
226 			if (mdp2->md_ldt == NULL)
227 				panic("could not copy LDT");
228 		}
229         }
230 	mtx_unlock_spin(&sched_lock);
231 
232 	/*
233 	 * Now, cpu_switch() can schedule the new process.
234 	 * pcb_esp is loaded pointing to the cpu_switch() stack frame
235 	 * containing the return address when exiting cpu_switch.
236 	 * This will normally be to fork_trampoline(), which will have
237 	 * %ebx loaded with the new proc's pointer.  fork_trampoline()
238 	 * will set up a stack to call fork_return(p, frame); to complete
239 	 * the return to user-mode.
240 	 */
241 }
242 
243 /*
244  * Intercept the return address from a freshly forked process that has NOT
245  * been scheduled yet.
246  *
247  * This is needed to make kernel threads stay in kernel mode.
248  */
249 void
250 cpu_set_fork_handler(td, func, arg)
251 	struct thread *td;
252 	void (*func)(void *);
253 	void *arg;
254 {
255 	/*
256 	 * Note that the trap frame follows the args, so the function
257 	 * is really called like this:  func(arg, frame);
258 	 */
259 	td->td_pcb->pcb_esi = (int) func;	/* function */
260 	td->td_pcb->pcb_ebx = (int) arg;	/* first arg */
261 }
262 
263 void
264 cpu_exit(struct thread *td)
265 {
266 	struct mdproc *mdp;
267 	struct pcb *pcb = td->td_pcb;
268 
269 
270 	/* Reset pc->pcb_gs and %gs before possibly invalidating it. */
271 	mdp = &td->td_proc->p_md;
272 	if (mdp->md_ldt) {
273 		td->td_pcb->pcb_gs = _udatasel;
274 		load_gs(_udatasel);
275 		user_ldt_free(td);
276 	}
277 	if (pcb->pcb_flags & PCB_DBREGS) {
278 		/* disable all hardware breakpoints */
279 		reset_dbregs();
280 		pcb->pcb_flags &= ~PCB_DBREGS;
281 	}
282 }
283 
284 void
285 cpu_thread_exit(struct thread *td)
286 {
287 	struct pcb *pcb = td->td_pcb;
288 #ifdef DEV_NPX
289 	npxexit(td);
290 #endif
291         if (pcb->pcb_flags & PCB_DBREGS) {
292 		/* disable all hardware breakpoints */
293                 reset_dbregs();
294                 pcb->pcb_flags &= ~PCB_DBREGS;
295         }
296 }
297 
298 void
299 cpu_thread_clean(struct thread *td)
300 {
301 	struct pcb *pcb;
302 
303 	pcb = td->td_pcb;
304 	if (pcb->pcb_ext != 0) {
305 		/* XXXKSE  XXXSMP  not SMP SAFE.. what locks do we have? */
306 		/* if (pcb->pcb_ext->ext_refcount-- == 1) ?? */
307 		/*
308 		 * XXX do we need to move the TSS off the allocated pages
309 		 * before freeing them?  (not done here)
310 		 */
311 		mtx_lock(&Giant);
312 		kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ext,
313 		    ctob(IOPAGES + 1));
314 		mtx_unlock(&Giant);
315 		pcb->pcb_ext = 0;
316 	}
317 }
318 
319 void
320 cpu_thread_swapin(struct thread *td)
321 {
322 }
323 
324 void
325 cpu_thread_swapout(struct thread *td)
326 {
327 }
328 
329 void
330 cpu_sched_exit(td)
331 	register struct thread *td;
332 {
333 }
334 
335 void
336 cpu_thread_setup(struct thread *td)
337 {
338 
339 	td->td_pcb =
340 	     (struct pcb *)(td->td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
341 	td->td_frame = (struct trapframe *)((caddr_t)td->td_pcb - 16) - 1;
342 	td->td_pcb->pcb_ext = NULL;
343 }
344 
345 /*
346  * Initialize machine state (pcb and trap frame) for a new thread about to
347  * upcall. Pu t enough state in the new thread's PCB to get it to go back
348  * userret(), where we can intercept it again to set the return (upcall)
349  * Address and stack, along with those from upcals that are from other sources
350  * such as those generated in thread_userret() itself.
351  */
352 void
353 cpu_set_upcall(struct thread *td, struct thread *td0)
354 {
355 	struct pcb *pcb2;
356 
357 	/* Point the pcb to the top of the stack. */
358 	pcb2 = td->td_pcb;
359 
360 	/*
361 	 * Copy the upcall pcb.  This loads kernel regs.
362 	 * Those not loaded individually below get their default
363 	 * values here.
364 	 *
365 	 * XXXKSE It might be a good idea to simply skip this as
366 	 * the values of the other registers may be unimportant.
367 	 * This would remove any requirement for knowing the KSE
368 	 * at this time (see the matching comment below for
369 	 * more analysis) (need a good safe default).
370 	 */
371 	bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));
372 
373 	/*
374 	 * Create a new fresh stack for the new thread.
375 	 * The -16 is so we can expand the trapframe if we go to vm86.
376 	 * Don't forget to set this stack value into whatever supplies
377 	 * the address for the fault handlers.
378 	 * The contexts are filled in at the time we actually DO the
379 	 * upcall as only then do we know which KSE we got.
380 	 */
381 	bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
382 
383 	/*
384 	 * Set registers for trampoline to user mode.  Leave space for the
385 	 * return address on stack.  These are the kernel mode register values.
386 	 */
387 #ifdef PAE
388 	pcb2->pcb_cr3 = vtophys(vmspace_pmap(td->td_proc->p_vmspace)->pm_pdpt);
389 #else
390 	pcb2->pcb_cr3 = vtophys(vmspace_pmap(td->td_proc->p_vmspace)->pm_pdir);
391 #endif
392 	pcb2->pcb_edi = 0;
393 	pcb2->pcb_esi = (int)fork_return;		    /* trampoline arg */
394 	pcb2->pcb_ebp = 0;
395 	pcb2->pcb_esp = (int)td->td_frame - sizeof(void *); /* trampoline arg */
396 	pcb2->pcb_ebx = (int)td;			    /* trampoline arg */
397 	pcb2->pcb_eip = (int)fork_trampoline;
398 	pcb2->pcb_psl &= ~(PSL_I);	/* interrupts must be disabled */
399 	pcb2->pcb_gs = rgs();
400 	/*
401 	 * If we didn't copy the pcb, we'd need to do the following registers:
402 	 * pcb2->pcb_dr*:	cloned above.
403 	 * pcb2->pcb_savefpu:	cloned above.
404 	 * pcb2->pcb_flags:	cloned above.
405 	 * pcb2->pcb_onfault:	cloned above (always NULL here?).
406 	 * pcb2->pcb_gs:	cloned above.  XXXKSE ???
407 	 * pcb2->pcb_ext:	cleared below.
408 	 */
409 	 pcb2->pcb_ext = NULL;
410 }
411 
412 /*
413  * Set that machine state for performing an upcall that has to
414  * be done in thread_userret() so that those upcalls generated
415  * in thread_userret() itself can be done as well.
416  */
417 void
418 cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku)
419 {
420 
421 	/*
422 	 * Do any extra cleaning that needs to be done.
423 	 * The thread may have optional components
424 	 * that are not present in a fresh thread.
425 	 * This may be a recycled thread so make it look
426 	 * as though it's newly allocated.
427 	 */
428 	cpu_thread_clean(td);
429 
430 	/*
431 	 * Set the trap frame to point at the beginning of the uts
432 	 * function.
433 	 */
434 	td->td_frame->tf_esp =
435 	    (int)ku->ku_stack.ss_sp + ku->ku_stack.ss_size - 16;
436 	td->td_frame->tf_eip = (int)ku->ku_func;
437 
438 	/*
439 	 * Pass the address of the mailbox for this kse to the uts
440 	 * function as a parameter on the stack.
441 	 */
442 	suword((void *)(td->td_frame->tf_esp + sizeof(void *)),
443 	    (int)ku->ku_mailbox);
444 }
445 
446 /*
447  * Convert kernel VA to physical address
448  */
449 vm_paddr_t
450 kvtop(void *addr)
451 {
452 	vm_paddr_t pa;
453 
454 	pa = pmap_kextract((vm_offset_t)addr);
455 	if (pa == 0)
456 		panic("kvtop: zero page frame");
457 	return (pa);
458 }
459 
460 /*
461  * Force reset the processor by invalidating the entire address space!
462  */
463 
464 #ifdef SMP
465 static void
466 cpu_reset_proxy()
467 {
468 
469 	cpu_reset_proxy_active = 1;
470 	while (cpu_reset_proxy_active == 1)
471 		;	 /* Wait for other cpu to see that we've started */
472 	stop_cpus((1<<cpu_reset_proxyid));
473 	printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
474 	DELAY(1000000);
475 	cpu_reset_real();
476 }
477 #endif
478 
479 void
480 cpu_reset()
481 {
482 #ifdef SMP
483 	if (smp_active == 0) {
484 		cpu_reset_real();
485 		/* NOTREACHED */
486 	} else {
487 
488 		u_int map;
489 		int cnt;
490 		printf("cpu_reset called on cpu#%d\n", PCPU_GET(cpuid));
491 
492 		map = PCPU_GET(other_cpus) & ~ stopped_cpus;
493 
494 		if (map != 0) {
495 			printf("cpu_reset: Stopping other CPUs\n");
496 			stop_cpus(map);		/* Stop all other CPUs */
497 		}
498 
499 		if (PCPU_GET(cpuid) == 0) {
500 			DELAY(1000000);
501 			cpu_reset_real();
502 			/* NOTREACHED */
503 		} else {
504 			/* We are not BSP (CPU #0) */
505 
506 			cpu_reset_proxyid = PCPU_GET(cpuid);
507 			cpustop_restartfunc = cpu_reset_proxy;
508 			cpu_reset_proxy_active = 0;
509 			printf("cpu_reset: Restarting BSP\n");
510 			started_cpus = (1<<0);		/* Restart CPU #0 */
511 
512 			cnt = 0;
513 			while (cpu_reset_proxy_active == 0 && cnt < 10000000)
514 				cnt++;	/* Wait for BSP to announce restart */
515 			if (cpu_reset_proxy_active == 0)
516 				printf("cpu_reset: Failed to restart BSP\n");
517 			enable_intr();
518 			cpu_reset_proxy_active = 2;
519 
520 			while (1);
521 			/* NOTREACHED */
522 		}
523 	}
524 #else
525 	cpu_reset_real();
526 #endif
527 }
528 
529 static void
530 cpu_reset_real()
531 {
532 
533 #ifdef PC98
534 	/*
535 	 * Attempt to do a CPU reset via CPU reset port.
536 	 */
537 	disable_intr();
538 	if ((inb(0x35) & 0xa0) != 0xa0) {
539 		outb(0x37, 0x0f);		/* SHUT0 = 0. */
540 		outb(0x37, 0x0b);		/* SHUT1 = 0. */
541 	}
542 	outb(0xf0, 0x00);		/* Reset. */
543 #else
544 	/*
545 	 * Attempt to do a CPU reset via the keyboard controller,
546 	 * do not turn of the GateA20, as any machine that fails
547 	 * to do the reset here would then end up in no man's land.
548 	 */
549 
550 #if !defined(BROKEN_KEYBOARD_RESET)
551 	outb(IO_KBD + 4, 0xFE);
552 	DELAY(500000);	/* wait 0.5 sec to see if that did it */
553 	printf("Keyboard reset did not work, attempting CPU shutdown\n");
554 	DELAY(1000000);	/* wait 1 sec for printf to complete */
555 #endif
556 #endif /* PC98 */
557 	/* force a shutdown by unmapping entire address space ! */
558 	bzero((caddr_t)PTD, NBPTD);
559 
560 	/* "good night, sweet prince .... <THUNK!>" */
561 	invltlb();
562 	/* NOTREACHED */
563 	while(1);
564 }
565 
566 /*
567  * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
568  */
569 static void
570 sf_buf_init(void *arg)
571 {
572 	struct sf_buf *sf_bufs;
573 	vm_offset_t sf_base;
574 	int i;
575 
576 	mtx_init(&sf_freelist.sf_lock, "sf_bufs list lock", NULL, MTX_DEF);
577 	mtx_lock(&sf_freelist.sf_lock);
578 	SLIST_INIT(&sf_freelist.sf_head);
579 	sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
580 	sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
581 	    M_NOWAIT | M_ZERO);
582 	for (i = 0; i < nsfbufs; i++) {
583 		sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
584 		SLIST_INSERT_HEAD(&sf_freelist.sf_head, &sf_bufs[i], free_list);
585 	}
586 	sf_buf_alloc_want = 0;
587 	mtx_unlock(&sf_freelist.sf_lock);
588 }
589 
590 /*
591  * Get an sf_buf from the freelist. Will block if none are available.
592  */
593 struct sf_buf *
594 sf_buf_alloc(struct vm_page *m)
595 {
596 	struct sf_buf *sf;
597 	int error;
598 
599 	mtx_lock(&sf_freelist.sf_lock);
600 	while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) {
601 		sf_buf_alloc_want++;
602 		error = msleep(&sf_freelist, &sf_freelist.sf_lock, PVM|PCATCH,
603 		    "sfbufa", 0);
604 		sf_buf_alloc_want--;
605 
606 		/*
607 		 * If we got a signal, don't risk going back to sleep.
608 		 */
609 		if (error)
610 			break;
611 	}
612 	if (sf != NULL) {
613 		SLIST_REMOVE_HEAD(&sf_freelist.sf_head, free_list);
614 		sf->m = m;
615 		pmap_qenter(sf->kva, &sf->m, 1);
616 	}
617 	mtx_unlock(&sf_freelist.sf_lock);
618 	return (sf);
619 }
620 
621 /*
622  * Detatch mapped page and release resources back to the system.
623  */
624 void
625 sf_buf_free(void *addr, void *args)
626 {
627 	struct sf_buf *sf;
628 	struct vm_page *m;
629 
630 	sf = args;
631 	pmap_qremove((vm_offset_t)addr, 1);
632 	m = sf->m;
633 	vm_page_lock_queues();
634 	vm_page_unwire(m, 0);
635 	/*
636 	 * Check for the object going away on us. This can
637 	 * happen since we don't hold a reference to it.
638 	 * If so, we're responsible for freeing the page.
639 	 */
640 	if (m->wire_count == 0 && m->object == NULL)
641 		vm_page_free(m);
642 	vm_page_unlock_queues();
643 	sf->m = NULL;
644 	mtx_lock(&sf_freelist.sf_lock);
645 	SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list);
646 	if (sf_buf_alloc_want > 0)
647 		wakeup_one(&sf_freelist);
648 	mtx_unlock(&sf_freelist.sf_lock);
649 }
650 
651 /*
652  * Software interrupt handler for queued VM system processing.
653  */
654 void
655 swi_vm(void *dummy)
656 {
657 	if (busdma_swi_pending != 0)
658 		busdma_swi();
659 }
660 
661 /*
662  * Tell whether this address is in some physical memory region.
663  * Currently used by the kernel coredump code in order to avoid
664  * dumping the ``ISA memory hole'' which could cause indefinite hangs,
665  * or other unpredictable behaviour.
666  */
667 
668 int
669 is_physical_memory(addr)
670 	vm_offset_t addr;
671 {
672 
673 #ifdef DEV_ISA
674 	/* The ISA ``memory hole''. */
675 	if (addr >= 0xa0000 && addr < 0x100000)
676 		return 0;
677 #endif
678 
679 	/*
680 	 * stuff other tests for known memory-mapped devices (PCI?)
681 	 * here
682 	 */
683 
684 	return 1;
685 }
686