15b81b6b3SRodney W. Grimes /*- 2*df57947fSPedro F. Giffuni * SPDX-License-Identifier: BSD-4-Clause 3*df57947fSPedro F. Giffuni * 45b81b6b3SRodney W. Grimes * Copyright (c) 1982, 1986 The Regents of the University of California. 55b81b6b3SRodney W. Grimes * Copyright (c) 1989, 1990 William Jolitz 61561d038SDavid Greenman * Copyright (c) 1994 John Dyson 75b81b6b3SRodney W. Grimes * All rights reserved. 85b81b6b3SRodney W. Grimes * 95b81b6b3SRodney W. Grimes * This code is derived from software contributed to Berkeley by 105b81b6b3SRodney W. Grimes * the Systems Programming Group of the University of Utah Computer 115b81b6b3SRodney W. Grimes * Science Department, and William Jolitz. 125b81b6b3SRodney W. Grimes * 135b81b6b3SRodney W. Grimes * Redistribution and use in source and binary forms, with or without 145b81b6b3SRodney W. Grimes * modification, are permitted provided that the following conditions 155b81b6b3SRodney W. Grimes * are met: 165b81b6b3SRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 175b81b6b3SRodney W. Grimes * notice, this list of conditions and the following disclaimer. 185b81b6b3SRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 195b81b6b3SRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 205b81b6b3SRodney W. Grimes * documentation and/or other materials provided with the distribution. 215b81b6b3SRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 225b81b6b3SRodney W. Grimes * must display the following acknowledgement: 235b81b6b3SRodney W. Grimes * This product includes software developed by the University of 245b81b6b3SRodney W. Grimes * California, Berkeley and its contributors. 255b81b6b3SRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 265b81b6b3SRodney W. Grimes * may be used to endorse or promote products derived from this software 275b81b6b3SRodney W. Grimes * without specific prior written permission. 285b81b6b3SRodney W. Grimes * 295b81b6b3SRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 305b81b6b3SRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 315b81b6b3SRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 325b81b6b3SRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 335b81b6b3SRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 345b81b6b3SRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 355b81b6b3SRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 365b81b6b3SRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 375b81b6b3SRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 385b81b6b3SRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 395b81b6b3SRodney W. Grimes * SUCH DAMAGE. 405b81b6b3SRodney W. Grimes * 41960173b9SRodney W. Grimes * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 425b81b6b3SRodney W. Grimes * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 435b81b6b3SRodney W. Grimes */ 445b81b6b3SRodney W. Grimes 459676a785SDavid E. O'Brien #include <sys/cdefs.h> 469676a785SDavid E. O'Brien __FBSDID("$FreeBSD$"); 479676a785SDavid E. O'Brien 486d7d4649SBruce Evans #include "opt_isa.h" 49558226eaSPeter Wemm #include "opt_npx.h" 500d74cc48SPeter Wemm #include "opt_reset.h" 51883bd55aSPoul-Henning Kamp #include "opt_cpu.h" 528890984dSGarrett Wollman 5326f9a767SRodney W. Grimes #include <sys/param.h> 5426f9a767SRodney W. Grimes #include <sys/systm.h> 559626b608SPoul-Henning Kamp #include <sys/bio.h> 5626f9a767SRodney W. Grimes #include <sys/buf.h> 5766095752SJohn Dyson #include <sys/kernel.h> 580384fff8SJason Evans #include <sys/ktr.h> 596d7d4649SBruce Evans #include <sys/lock.h> 606d7d4649SBruce Evans #include <sys/malloc.h> 61411d10a6SAlan Cox #include <sys/mbuf.h> 6213b0500fSJohn Baldwin #include <sys/mutex.h> 635ad5504cSKelly Yancey #include <sys/pioctl.h> 646d7d4649SBruce Evans #include <sys/proc.h> 65a7b89044SKonstantin Belousov #include <sys/sysent.h> 66e45db9b8SAlan Cox #include <sys/sf_buf.h> 676d7d4649SBruce Evans #include <sys/smp.h> 684c0e268aSStephan Uphoff #include <sys/sched.h> 6966095752SJohn Dyson #include <sys/sysctl.h> 7091c28bfdSLuoqi Chen #include <sys/unistd.h> 716d7d4649SBruce Evans #include <sys/vnode.h> 726d7d4649SBruce Evans #include <sys/vmmeter.h> 735b81b6b3SRodney W. Grimes 74a2a1c95cSPeter Wemm #include <machine/cpu.h> 7541ee9f1cSPoul-Henning Kamp #include <machine/cputypes.h> 761f8745a9SPeter Wemm #include <machine/md_var.h> 7791c28bfdSLuoqi Chen #include <machine/pcb.h> 7891c28bfdSLuoqi Chen #include <machine/pcb_ext.h> 795c0db7c7SAlan Cox #include <machine/smp.h> 8091c28bfdSLuoqi Chen #include <machine/vm86.h> 815b81b6b3SRodney W. Grimes 82883bd55aSPoul-Henning Kamp #ifdef CPU_ELAN 83883bd55aSPoul-Henning Kamp #include <machine/elan_mmcr.h> 84883bd55aSPoul-Henning Kamp #endif 85883bd55aSPoul-Henning Kamp 8626f9a767SRodney W. Grimes #include <vm/vm.h> 876d7d4649SBruce Evans #include <vm/vm_extern.h> 8826f9a767SRodney W. Grimes #include <vm/vm_kern.h> 8924a1cce3SDavid Greenman #include <vm/vm_page.h> 90efeaf95aSDavid Greenman #include <vm/vm_map.h> 916d7d4649SBruce Evans #include <vm/vm_param.h> 925b81b6b3SRodney W. Grimes 93f79309d2SWarner Losh #include <isa/isareg.h> 942320728fSRodney W. Grimes 95099a0e58SBosko Milekic #ifndef NSFBUFS 96099a0e58SBosko Milekic #define NSFBUFS (512 + maxusers * 16) 97099a0e58SBosko Milekic #endif 98099a0e58SBosko Milekic 9931ffd803SEd Schouten _Static_assert(OFFSETOF_CURTHREAD == offsetof(struct pcpu, pc_curthread), 10031ffd803SEd Schouten "OFFSETOF_CURTHREAD does not correspond with offset of pc_curthread."); 10131ffd803SEd Schouten _Static_assert(OFFSETOF_CURPCB == offsetof(struct pcpu, pc_curpcb), 10231ffd803SEd Schouten "OFFSETOF_CURPCB does not correspond with offset of pc_curpcb."); 10348cae112SKonstantin Belousov _Static_assert(__OFFSETOF_MONITORBUF == offsetof(struct pcpu, pc_monitorbuf), 10448cae112SKonstantin Belousov "__OFFSETOF_MONINORBUF does not correspond with offset of pc_monitorbuf."); 105e93d0cbeSKonstantin Belousov 10615fe3067SAlfred Perlstein static void cpu_reset_real(void); 1074260b00aSNate Lawson #ifdef SMP 1084260b00aSNate Lawson static void cpu_reset_proxy(void); 1094260b00aSNate Lawson static u_int cpu_reset_proxyid; 1104260b00aSNate Lawson static volatile u_int cpu_reset_proxy_active; 1114260b00aSNate Lawson #endif 112255c1caaSGleb Smirnoff 113824fc460SJohn Baldwin union savefpu * 114824fc460SJohn Baldwin get_pcb_user_save_td(struct thread *td) 115824fc460SJohn Baldwin { 116824fc460SJohn Baldwin vm_offset_t p; 1172f1e7069STor Egge 118824fc460SJohn Baldwin p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE - 1193ef966c4SKonstantin Belousov roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN); 1203ef966c4SKonstantin Belousov KASSERT((p % XSAVE_AREA_ALIGN) == 0, ("Unaligned pcb_user_save area")); 121824fc460SJohn Baldwin return ((union savefpu *)p); 122824fc460SJohn Baldwin } 123824fc460SJohn Baldwin 124824fc460SJohn Baldwin union savefpu * 125824fc460SJohn Baldwin get_pcb_user_save_pcb(struct pcb *pcb) 126824fc460SJohn Baldwin { 127824fc460SJohn Baldwin vm_offset_t p; 128824fc460SJohn Baldwin 129824fc460SJohn Baldwin p = (vm_offset_t)(pcb + 1); 130824fc460SJohn Baldwin return ((union savefpu *)p); 131824fc460SJohn Baldwin } 132824fc460SJohn Baldwin 133824fc460SJohn Baldwin struct pcb * 134824fc460SJohn Baldwin get_pcb_td(struct thread *td) 135824fc460SJohn Baldwin { 136824fc460SJohn Baldwin vm_offset_t p; 137824fc460SJohn Baldwin 138824fc460SJohn Baldwin p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE - 1393ef966c4SKonstantin Belousov roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN) - 1403ef966c4SKonstantin Belousov sizeof(struct pcb); 141824fc460SJohn Baldwin return ((struct pcb *)p); 142824fc460SJohn Baldwin } 143824fc460SJohn Baldwin 144824fc460SJohn Baldwin void * 145824fc460SJohn Baldwin alloc_fpusave(int flags) 146824fc460SJohn Baldwin { 147824fc460SJohn Baldwin void *res; 148824fc460SJohn Baldwin struct savefpu_ymm *sf; 149824fc460SJohn Baldwin 150824fc460SJohn Baldwin res = malloc(cpu_max_ext_state_size, M_DEVBUF, flags); 151824fc460SJohn Baldwin if (use_xsave) { 152824fc460SJohn Baldwin sf = (struct savefpu_ymm *)res; 153824fc460SJohn Baldwin bzero(&sf->sv_xstate.sx_hd, sizeof(sf->sv_xstate.sx_hd)); 154824fc460SJohn Baldwin sf->sv_xstate.sx_hd.xstate_bv = xsave_mask; 155824fc460SJohn Baldwin } 156824fc460SJohn Baldwin return (res); 157824fc460SJohn Baldwin } 158a4f7a4c9SDavid Greenman /* 1595b81b6b3SRodney W. Grimes * Finish a fork operation, with process p2 nearly set up. 160a2a1c95cSPeter Wemm * Copy and update the pcb, set up the stack so that the child 161a2a1c95cSPeter Wemm * ready to run and return to user mode. 1625b81b6b3SRodney W. Grimes */ 163a2a1c95cSPeter Wemm void 1649c16356cSKonstantin Belousov cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags) 1655b81b6b3SRodney W. Grimes { 1663e85b721SEd Maste struct proc *p1; 16791c28bfdSLuoqi Chen struct pcb *pcb2; 16824db0459SJohn Baldwin struct mdproc *mdp2; 16991c28bfdSLuoqi Chen 170b40ce416SJulian Elischer p1 = td1->td_proc; 17191c28bfdSLuoqi Chen if ((flags & RFPROC) == 0) { 17291c28bfdSLuoqi Chen if ((flags & RFMEM) == 0) { 17391c28bfdSLuoqi Chen /* unshare user LDT */ 17445449f84SJulian Elischer struct mdproc *mdp1 = &p1->p_md; 1759719da13SKonstantin Belousov struct proc_ldt *pldt, *pldt1; 176bc2e774aSJohn Baldwin 1770ad5e7f3SJeff Roberson mtx_lock_spin(&dt_lock); 1789719da13SKonstantin Belousov if ((pldt1 = mdp1->md_ldt) != NULL && 1799719da13SKonstantin Belousov pldt1->ldt_refcnt > 1) { 1809719da13SKonstantin Belousov pldt = user_ldt_alloc(mdp1, pldt1->ldt_len); 18124db0459SJohn Baldwin if (pldt == NULL) 1821acf256dSJohn Baldwin panic("could not copy LDT"); 18324db0459SJohn Baldwin mdp1->md_ldt = pldt; 18424db0459SJohn Baldwin set_user_ldt(mdp1); 1859719da13SKonstantin Belousov user_ldt_deref(pldt1); 18602b0a160SAttilio Rao } else 1870ad5e7f3SJeff Roberson mtx_unlock_spin(&dt_lock); 18891c28bfdSLuoqi Chen } 18991c28bfdSLuoqi Chen return; 19091c28bfdSLuoqi Chen } 1915b81b6b3SRodney W. Grimes 1926cf9a08dSKonstantin Belousov /* Ensure that td1's pcb is up to date. */ 193b40ce416SJulian Elischer if (td1 == curthread) 194b40ce416SJulian Elischer td1->td_pcb->pcb_gs = rgs(); 1951060a94fSKonstantin Belousov critical_enter(); 1960bbc8826SJohn Baldwin if (PCPU_GET(fpcurthread) == td1) 1976cf9a08dSKonstantin Belousov npxsave(td1->td_pcb->pcb_save); 1981060a94fSKonstantin Belousov critical_exit(); 1991f8745a9SPeter Wemm 200b40ce416SJulian Elischer /* Point the pcb to the top of the stack */ 201824fc460SJohn Baldwin pcb2 = get_pcb_td(td2); 202b40ce416SJulian Elischer td2->td_pcb = pcb2; 203b40ce416SJulian Elischer 2046cf9a08dSKonstantin Belousov /* Copy td1's pcb */ 205b40ce416SJulian Elischer bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); 206a2a1c95cSPeter Wemm 2076cf9a08dSKonstantin Belousov /* Properly initialize pcb_save */ 208824fc460SJohn Baldwin pcb2->pcb_save = get_pcb_user_save_pcb(pcb2); 209824fc460SJohn Baldwin bcopy(get_pcb_user_save_td(td1), get_pcb_user_save_pcb(pcb2), 210824fc460SJohn Baldwin cpu_max_ext_state_size); 2116cf9a08dSKonstantin Belousov 21224db0459SJohn Baldwin /* Point mdproc and then copy over td1's contents */ 21345449f84SJulian Elischer mdp2 = &p2->p_md; 21445449f84SJulian Elischer bcopy(&p1->p_md, mdp2, sizeof(*mdp2)); 21524db0459SJohn Baldwin 216a2a1c95cSPeter Wemm /* 217a2a1c95cSPeter Wemm * Create a new fresh stack for the new process. 2181f8745a9SPeter Wemm * Copy the trap frame for the return to user mode as if from a 219a4b8c657SBruce Evans * syscall. This copies most of the user mode register values. 220e5a860ebSPeter Wemm * The -16 is so we can expand the trapframe if we go to vm86. 221a2a1c95cSPeter Wemm */ 222e5a860ebSPeter Wemm td2->td_frame = (struct trapframe *)((caddr_t)td2->td_pcb - 16) - 1; 223b40ce416SJulian Elischer bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe)); 224a2a1c95cSPeter Wemm 225b40ce416SJulian Elischer td2->td_frame->tf_eax = 0; /* Child returns zero */ 226b40ce416SJulian Elischer td2->td_frame->tf_eflags &= ~PSL_C; /* success */ 227b40ce416SJulian Elischer td2->td_frame->tf_edx = 1; 2280d54b9d1SJohn Baldwin 229a2a1c95cSPeter Wemm /* 2305ad5504cSKelly Yancey * If the parent process has the trap bit set (i.e. a debugger had 2315ad5504cSKelly Yancey * single stepped the process to the system call), we need to clear 2325ad5504cSKelly Yancey * the trap flag from the new frame unless the debugger had set PF_FORK 2335ad5504cSKelly Yancey * on the parent. Otherwise, the child will receive a (likely 2345ad5504cSKelly Yancey * unexpected) SIGTRAP when it executes the first instruction after 2355ad5504cSKelly Yancey * returning to userland. 2365ad5504cSKelly Yancey */ 2375ad5504cSKelly Yancey if ((p1->p_pfsflags & PF_FORK) == 0) 2385ad5504cSKelly Yancey td2->td_frame->tf_eflags &= ~PSL_T; 2395ad5504cSKelly Yancey 2405ad5504cSKelly Yancey /* 241a2a1c95cSPeter Wemm * Set registers for trampoline to user mode. Leave space for the 242a2a1c95cSPeter Wemm * return address on stack. These are the kernel mode register values. 243a2a1c95cSPeter Wemm */ 24434c15db9SKonstantin Belousov #if defined(PAE) || defined(PAE_TABLES) 2457ab9b220SJake Burkholder pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdpt); 2467ab9b220SJake Burkholder #else 247b1028ad1SLuoqi Chen pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdir); 2487ab9b220SJake Burkholder #endif 24937b087a6SPeter Wemm pcb2->pcb_edi = 0; 25037b087a6SPeter Wemm pcb2->pcb_esi = (int)fork_return; /* fork_trampoline argument */ 25137b087a6SPeter Wemm pcb2->pcb_ebp = 0; 252b40ce416SJulian Elischer pcb2->pcb_esp = (int)td2->td_frame - sizeof(void *); 253b40ce416SJulian Elischer pcb2->pcb_ebx = (int)td2; /* fork_trampoline argument */ 2541f8745a9SPeter Wemm pcb2->pcb_eip = (int)fork_trampoline; 255a4b8c657SBruce Evans /*- 256a4b8c657SBruce Evans * pcb2->pcb_dr*: cloned above. 2571f8745a9SPeter Wemm * pcb2->pcb_savefpu: cloned above. 258a4b8c657SBruce Evans * pcb2->pcb_flags: cloned above. 2591f8745a9SPeter Wemm * pcb2->pcb_onfault: cloned above (always NULL here?). 260a4b8c657SBruce Evans * pcb2->pcb_gs: cloned above. 261a4b8c657SBruce Evans * pcb2->pcb_ext: cleared below. 2621f8745a9SPeter Wemm */ 2635b81b6b3SRodney W. Grimes 26448a09cf2SJohn Dyson /* 26548a09cf2SJohn Dyson * XXX don't copy the i/o pages. this should probably be fixed. 26648a09cf2SJohn Dyson */ 26748a09cf2SJohn Dyson pcb2->pcb_ext = 0; 26848a09cf2SJohn Dyson 2698c39a127SStefan Eßer /* Copy the LDT, if necessary. */ 2700ad5e7f3SJeff Roberson mtx_lock_spin(&dt_lock); 271753d1af1SJohn Baldwin if (mdp2->md_ldt != NULL) { 27291c28bfdSLuoqi Chen if (flags & RFMEM) { 27305dfa22fSAttilio Rao mdp2->md_ldt->ldt_refcnt++; 27491c28bfdSLuoqi Chen } else { 27524db0459SJohn Baldwin mdp2->md_ldt = user_ldt_alloc(mdp2, 27624db0459SJohn Baldwin mdp2->md_ldt->ldt_len); 27724db0459SJohn Baldwin if (mdp2->md_ldt == NULL) 278df4d012bSJohn Baldwin panic("could not copy LDT"); 27991c28bfdSLuoqi Chen } 2808c39a127SStefan Eßer } 2810ad5e7f3SJeff Roberson mtx_unlock_spin(&dt_lock); 2828c39a127SStefan Eßer 2831b1618fbSJeff Roberson /* Setup to release spin count in fork_exit(). */ 284c6a37e84SJohn Baldwin td2->td_md.md_spinlock_count = 1; 285c6a37e84SJohn Baldwin td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I; 286ed95805eSJohn Baldwin 287a2a1c95cSPeter Wemm /* 288a2a1c95cSPeter Wemm * Now, cpu_switch() can schedule the new process. 289a2a1c95cSPeter Wemm * pcb_esp is loaded pointing to the cpu_switch() stack frame 290a2a1c95cSPeter Wemm * containing the return address when exiting cpu_switch. 29137b087a6SPeter Wemm * This will normally be to fork_trampoline(), which will have 29237b087a6SPeter Wemm * %ebx loaded with the new proc's pointer. fork_trampoline() 293a2a1c95cSPeter Wemm * will set up a stack to call fork_return(p, frame); to complete 294a2a1c95cSPeter Wemm * the return to user-mode. 295a2a1c95cSPeter Wemm */ 296a2a1c95cSPeter Wemm } 297a2a1c95cSPeter Wemm 298a2a1c95cSPeter Wemm /* 299a2a1c95cSPeter Wemm * Intercept the return address from a freshly forked process that has NOT 300a2a1c95cSPeter Wemm * been scheduled yet. 301a2a1c95cSPeter Wemm * 302a2a1c95cSPeter Wemm * This is needed to make kernel threads stay in kernel mode. 303a2a1c95cSPeter Wemm */ 304a2a1c95cSPeter Wemm void 3055c2cf818SKonstantin Belousov cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg) 306a2a1c95cSPeter Wemm { 307a2a1c95cSPeter Wemm /* 308a2a1c95cSPeter Wemm * Note that the trap frame follows the args, so the function 309a2a1c95cSPeter Wemm * is really called like this: func(arg, frame); 310a2a1c95cSPeter Wemm */ 311b40ce416SJulian Elischer td->td_pcb->pcb_esi = (int) func; /* function */ 312b40ce416SJulian Elischer td->td_pcb->pcb_ebx = (int) arg; /* first arg */ 3135b81b6b3SRodney W. Grimes } 3145b81b6b3SRodney W. Grimes 3157c2b54e8SNate Williams void 316e602ba25SJulian Elischer cpu_exit(struct thread *td) 317e602ba25SJulian Elischer { 318e602ba25SJulian Elischer 319bc2e774aSJohn Baldwin /* 320bc2e774aSJohn Baldwin * If this process has a custom LDT, release it. Reset pc->pcb_gs 321bc2e774aSJohn Baldwin * and %gs before we free it in case they refer to an LDT entry. 322bc2e774aSJohn Baldwin */ 3230ad5e7f3SJeff Roberson mtx_lock_spin(&dt_lock); 324bc2e774aSJohn Baldwin if (td->td_proc->p_md.md_ldt) { 32566250360SDavid Xu td->td_pcb->pcb_gs = _udatasel; 32666250360SDavid Xu load_gs(_udatasel); 327e602ba25SJulian Elischer user_ldt_free(td); 32802b0a160SAttilio Rao } else 3290ad5e7f3SJeff Roberson mtx_unlock_spin(&dt_lock); 330e602ba25SJulian Elischer } 331e602ba25SJulian Elischer 332e602ba25SJulian Elischer void 333e602ba25SJulian Elischer cpu_thread_exit(struct thread *td) 3345b81b6b3SRodney W. Grimes { 3354cc99cf6SJohn Baldwin 3361060a94fSKonstantin Belousov critical_enter(); 337d5e1f581SDavid Xu if (td == PCPU_GET(fpcurthread)) 338d5e1f581SDavid Xu npxdrop(); 3391060a94fSKonstantin Belousov critical_exit(); 340bc2e774aSJohn Baldwin 341bc2e774aSJohn Baldwin /* Disable any hardware breakpoints. */ 342bc2e774aSJohn Baldwin if (td->td_pcb->pcb_flags & PCB_DBREGS) { 343b19d9defSMaxime Henrion reset_dbregs(); 344bc2e774aSJohn Baldwin td->td_pcb->pcb_flags &= ~PCB_DBREGS; 345b19d9defSMaxime Henrion } 346b19d9defSMaxime Henrion } 347b19d9defSMaxime Henrion 348b19d9defSMaxime Henrion void 349696058c3SJulian Elischer cpu_thread_clean(struct thread *td) 350b19d9defSMaxime Henrion { 351b19d9defSMaxime Henrion struct pcb *pcb; 352b19d9defSMaxime Henrion 353b19d9defSMaxime Henrion pcb = td->td_pcb; 354ae3676c4SJohn Baldwin if (pcb->pcb_ext != NULL) { 355e602ba25SJulian Elischer /* if (pcb->pcb_ext->ext_refcount-- == 1) ?? */ 35648a09cf2SJohn Dyson /* 35748a09cf2SJohn Dyson * XXX do we need to move the TSS off the allocated pages 35848a09cf2SJohn Dyson * before freeing them? (not done here) 35948a09cf2SJohn Dyson */ 360ad43b984SKonstantin Belousov kmem_free(kernel_arena, (vm_offset_t)pcb->pcb_ext, 36148a09cf2SJohn Dyson ctob(IOPAGES + 1)); 362ae3676c4SJohn Baldwin pcb->pcb_ext = NULL; 36348a09cf2SJohn Dyson } 3645b81b6b3SRodney W. Grimes } 3655b81b6b3SRodney W. Grimes 366381fe1aaSGarrett Wollman void 367710338e9SMarcel Moolenaar cpu_thread_swapin(struct thread *td) 368710338e9SMarcel Moolenaar { 369710338e9SMarcel Moolenaar } 370710338e9SMarcel Moolenaar 371710338e9SMarcel Moolenaar void 372710338e9SMarcel Moolenaar cpu_thread_swapout(struct thread *td) 373710338e9SMarcel Moolenaar { 374710338e9SMarcel Moolenaar } 375710338e9SMarcel Moolenaar 376710338e9SMarcel Moolenaar void 3770c3967e7SMarcel Moolenaar cpu_thread_alloc(struct thread *td) 378e602ba25SJulian Elischer { 379824fc460SJohn Baldwin struct pcb *pcb; 380824fc460SJohn Baldwin struct xstate_hdr *xhdr; 381e602ba25SJulian Elischer 382824fc460SJohn Baldwin td->td_pcb = pcb = get_pcb_td(td); 383824fc460SJohn Baldwin td->td_frame = (struct trapframe *)((caddr_t)pcb - 16) - 1; 384824fc460SJohn Baldwin pcb->pcb_ext = NULL; 385824fc460SJohn Baldwin pcb->pcb_save = get_pcb_user_save_pcb(pcb); 386824fc460SJohn Baldwin if (use_xsave) { 387824fc460SJohn Baldwin xhdr = (struct xstate_hdr *)(pcb->pcb_save + 1); 388824fc460SJohn Baldwin bzero(xhdr, sizeof(*xhdr)); 389824fc460SJohn Baldwin xhdr->xstate_bv = xsave_mask; 390824fc460SJohn Baldwin } 391e602ba25SJulian Elischer } 392e602ba25SJulian Elischer 3930c3967e7SMarcel Moolenaar void 3940c3967e7SMarcel Moolenaar cpu_thread_free(struct thread *td) 3950c3967e7SMarcel Moolenaar { 3960c3967e7SMarcel Moolenaar 3970c3967e7SMarcel Moolenaar cpu_thread_clean(td); 3980c3967e7SMarcel Moolenaar } 3990c3967e7SMarcel Moolenaar 400a7b89044SKonstantin Belousov void 401a7b89044SKonstantin Belousov cpu_set_syscall_retval(struct thread *td, int error) 402a7b89044SKonstantin Belousov { 403a7b89044SKonstantin Belousov 404a7b89044SKonstantin Belousov switch (error) { 405a7b89044SKonstantin Belousov case 0: 406a7b89044SKonstantin Belousov td->td_frame->tf_eax = td->td_retval[0]; 407a7b89044SKonstantin Belousov td->td_frame->tf_edx = td->td_retval[1]; 408a7b89044SKonstantin Belousov td->td_frame->tf_eflags &= ~PSL_C; 409a7b89044SKonstantin Belousov break; 410a7b89044SKonstantin Belousov 411a7b89044SKonstantin Belousov case ERESTART: 412a7b89044SKonstantin Belousov /* 413a7b89044SKonstantin Belousov * Reconstruct pc, assuming lcall $X,y is 7 bytes, int 414a7b89044SKonstantin Belousov * 0x80 is 2 bytes. We saved this in tf_err. 415a7b89044SKonstantin Belousov */ 416a7b89044SKonstantin Belousov td->td_frame->tf_eip -= td->td_frame->tf_err; 417a7b89044SKonstantin Belousov break; 418a7b89044SKonstantin Belousov 419a7b89044SKonstantin Belousov case EJUSTRETURN: 420a7b89044SKonstantin Belousov break; 421a7b89044SKonstantin Belousov 422a7b89044SKonstantin Belousov default: 4235437e1d1SDmitry Chagin td->td_frame->tf_eax = SV_ABI_ERRNO(td->td_proc, error); 424a7b89044SKonstantin Belousov td->td_frame->tf_eflags |= PSL_C; 425a7b89044SKonstantin Belousov break; 426a7b89044SKonstantin Belousov } 427a7b89044SKonstantin Belousov } 428a7b89044SKonstantin Belousov 429e602ba25SJulian Elischer /* 4305c2cf818SKonstantin Belousov * Initialize machine state, mostly pcb and trap frame for a new 4315c2cf818SKonstantin Belousov * thread, about to return to userspace. Put enough state in the new 4325c2cf818SKonstantin Belousov * thread's PCB to get it to go back to the fork_return(), which 4335c2cf818SKonstantin Belousov * finalizes the thread state and handles peculiarities of the first 4345c2cf818SKonstantin Belousov * return to userspace for the new thread. 435e602ba25SJulian Elischer */ 436e602ba25SJulian Elischer void 4375c2cf818SKonstantin Belousov cpu_copy_thread(struct thread *td, struct thread *td0) 438e602ba25SJulian Elischer { 439e602ba25SJulian Elischer struct pcb *pcb2; 440e602ba25SJulian Elischer 441e602ba25SJulian Elischer /* Point the pcb to the top of the stack. */ 442e602ba25SJulian Elischer pcb2 = td->td_pcb; 443e602ba25SJulian Elischer 444e602ba25SJulian Elischer /* 445e602ba25SJulian Elischer * Copy the upcall pcb. This loads kernel regs. 446e602ba25SJulian Elischer * Those not loaded individually below get their default 447e602ba25SJulian Elischer * values here. 448e602ba25SJulian Elischer */ 44911e0f8e1SMarcel Moolenaar bcopy(td0->td_pcb, pcb2, sizeof(*pcb2)); 45065f99c74SKonstantin Belousov pcb2->pcb_flags &= ~(PCB_NPXINITDONE | PCB_NPXUSERINITDONE | 45165f99c74SKonstantin Belousov PCB_KERNNPX); 452824fc460SJohn Baldwin pcb2->pcb_save = get_pcb_user_save_pcb(pcb2); 453824fc460SJohn Baldwin bcopy(get_pcb_user_save_td(td0), pcb2->pcb_save, 454824fc460SJohn Baldwin cpu_max_ext_state_size); 455e602ba25SJulian Elischer 456e602ba25SJulian Elischer /* 457e602ba25SJulian Elischer * Create a new fresh stack for the new thread. 458e602ba25SJulian Elischer */ 45911e0f8e1SMarcel Moolenaar bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe)); 460e602ba25SJulian Elischer 4617ce5e15eSDavid Xu /* If the current thread has the trap bit set (i.e. a debugger had 4627ce5e15eSDavid Xu * single stepped the process to the system call), we need to clear 4637ce5e15eSDavid Xu * the trap flag from the new frame. Otherwise, the new thread will 4647ce5e15eSDavid Xu * receive a (likely unexpected) SIGTRAP when it executes the first 4657ce5e15eSDavid Xu * instruction after returning to userland. 4667ce5e15eSDavid Xu */ 4677ce5e15eSDavid Xu td->td_frame->tf_eflags &= ~PSL_T; 4687ce5e15eSDavid Xu 469e602ba25SJulian Elischer /* 470e602ba25SJulian Elischer * Set registers for trampoline to user mode. Leave space for the 471e602ba25SJulian Elischer * return address on stack. These are the kernel mode register values. 472e602ba25SJulian Elischer */ 473e602ba25SJulian Elischer pcb2->pcb_edi = 0; 474e602ba25SJulian Elischer pcb2->pcb_esi = (int)fork_return; /* trampoline arg */ 475e602ba25SJulian Elischer pcb2->pcb_ebp = 0; 476e602ba25SJulian Elischer pcb2->pcb_esp = (int)td->td_frame - sizeof(void *); /* trampoline arg */ 477e602ba25SJulian Elischer pcb2->pcb_ebx = (int)td; /* trampoline arg */ 478e602ba25SJulian Elischer pcb2->pcb_eip = (int)fork_trampoline; 4792257a44fSDavid Xu pcb2->pcb_gs = rgs(); 480e602ba25SJulian Elischer /* 481e602ba25SJulian Elischer * If we didn't copy the pcb, we'd need to do the following registers: 4829624b51aSAlan Cox * pcb2->pcb_cr3: cloned above. 483e602ba25SJulian Elischer * pcb2->pcb_dr*: cloned above. 484e602ba25SJulian Elischer * pcb2->pcb_savefpu: cloned above. 485e602ba25SJulian Elischer * pcb2->pcb_flags: cloned above. 486e602ba25SJulian Elischer * pcb2->pcb_onfault: cloned above (always NULL here?). 4876617724cSJeff Roberson * pcb2->pcb_gs: cloned above. 488e602ba25SJulian Elischer * pcb2->pcb_ext: cleared below. 489e602ba25SJulian Elischer */ 490e602ba25SJulian Elischer pcb2->pcb_ext = NULL; 491c6a37e84SJohn Baldwin 4921b1618fbSJeff Roberson /* Setup to release spin count in fork_exit(). */ 493c6a37e84SJohn Baldwin td->td_md.md_spinlock_count = 1; 494c6a37e84SJohn Baldwin td->td_md.md_saved_flags = PSL_KERNEL | PSL_I; 495e602ba25SJulian Elischer } 496e602ba25SJulian Elischer 497e602ba25SJulian Elischer /* 4985c2cf818SKonstantin Belousov * Set that machine state for performing an upcall that starts 4995c2cf818SKonstantin Belousov * the entry function with the given argument. 500e602ba25SJulian Elischer */ 501575525a0SJonathan Mini void 5025c2cf818SKonstantin Belousov cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg, 50321fc3164SDavid Xu stack_t *stack) 504575525a0SJonathan Mini { 505575525a0SJonathan Mini 506575525a0SJonathan Mini /* 507696058c3SJulian Elischer * Do any extra cleaning that needs to be done. 508696058c3SJulian Elischer * The thread may have optional components 509696058c3SJulian Elischer * that are not present in a fresh thread. 510696058c3SJulian Elischer * This may be a recycled thread so make it look 511696058c3SJulian Elischer * as though it's newly allocated. 512696058c3SJulian Elischer */ 513696058c3SJulian Elischer cpu_thread_clean(td); 514696058c3SJulian Elischer 515696058c3SJulian Elischer /* 5165c2cf818SKonstantin Belousov * Set the trap frame to point at the beginning of the entry 517575525a0SJonathan Mini * function. 518575525a0SJonathan Mini */ 5192396628bSDavid Xu td->td_frame->tf_ebp = 0; 520575525a0SJonathan Mini td->td_frame->tf_esp = 521fc643048SDavid Xu (((int)stack->ss_sp + stack->ss_size - 4) & ~0x0f) - 4; 52221fc3164SDavid Xu td->td_frame->tf_eip = (int)entry; 523575525a0SJonathan Mini 524f236378bSTijl Coosemans /* Return address sentinel value to stop stack unwinding. */ 525f236378bSTijl Coosemans suword((void *)td->td_frame->tf_esp, 0); 526f236378bSTijl Coosemans 5275c2cf818SKonstantin Belousov /* Pass the argument to the entry point. */ 528575525a0SJonathan Mini suword((void *)(td->td_frame->tf_esp + sizeof(void *)), 52921fc3164SDavid Xu (int)arg); 53021fc3164SDavid Xu } 53121fc3164SDavid Xu 532740fd64dSDavid Xu int 53321fc3164SDavid Xu cpu_set_user_tls(struct thread *td, void *tls_base) 53421fc3164SDavid Xu { 53521fc3164SDavid Xu struct segment_descriptor sd; 53621fc3164SDavid Xu uint32_t base; 53721fc3164SDavid Xu 53821fc3164SDavid Xu /* 53921fc3164SDavid Xu * Construct a descriptor and store it in the pcb for 54021fc3164SDavid Xu * the next context switch. Also store it in the gdt 54121fc3164SDavid Xu * so that the load of tf_fs into %fs will activate it 54221fc3164SDavid Xu * at return to userland. 54321fc3164SDavid Xu */ 54421fc3164SDavid Xu base = (uint32_t)tls_base; 54521fc3164SDavid Xu sd.sd_lobase = base & 0xffffff; 54621fc3164SDavid Xu sd.sd_hibase = (base >> 24) & 0xff; 54721fc3164SDavid Xu sd.sd_lolimit = 0xffff; /* 4GB limit, wraps around */ 54821fc3164SDavid Xu sd.sd_hilimit = 0xf; 54921fc3164SDavid Xu sd.sd_type = SDT_MEMRWA; 55021fc3164SDavid Xu sd.sd_dpl = SEL_UPL; 55121fc3164SDavid Xu sd.sd_p = 1; 55221fc3164SDavid Xu sd.sd_xx = 0; 55321fc3164SDavid Xu sd.sd_def32 = 1; 55421fc3164SDavid Xu sd.sd_gran = 1; 55521fc3164SDavid Xu critical_enter(); 55621fc3164SDavid Xu /* set %gs */ 55721fc3164SDavid Xu td->td_pcb->pcb_gsd = sd; 55821fc3164SDavid Xu if (td == curthread) { 55921fc3164SDavid Xu PCPU_GET(fsgs_gdt)[1] = sd; 56021fc3164SDavid Xu load_gs(GSEL(GUGS_SEL, SEL_UPL)); 56121fc3164SDavid Xu } 56221fc3164SDavid Xu critical_exit(); 563740fd64dSDavid Xu return (0); 564e602ba25SJulian Elischer } 565e602ba25SJulian Elischer 5665b81b6b3SRodney W. Grimes /* 5675b81b6b3SRodney W. Grimes * Convert kernel VA to physical address 5685b81b6b3SRodney W. Grimes */ 569227f9a1cSJake Burkholder vm_paddr_t 5707f8cb368SDavid Greenman kvtop(void *addr) 5715b81b6b3SRodney W. Grimes { 572227f9a1cSJake Burkholder vm_paddr_t pa; 5735b81b6b3SRodney W. Grimes 574227f9a1cSJake Burkholder pa = pmap_kextract((vm_offset_t)addr); 575227f9a1cSJake Burkholder if (pa == 0) 5765b81b6b3SRodney W. Grimes panic("kvtop: zero page frame"); 577227f9a1cSJake Burkholder return (pa); 5785b81b6b3SRodney W. Grimes } 5795b81b6b3SRodney W. Grimes 5804260b00aSNate Lawson #ifdef SMP 5814260b00aSNate Lawson static void 5824260b00aSNate Lawson cpu_reset_proxy() 5834260b00aSNate Lawson { 58471a19bdcSAttilio Rao cpuset_t tcrp; 5854260b00aSNate Lawson 5864260b00aSNate Lawson cpu_reset_proxy_active = 1; 5874260b00aSNate Lawson while (cpu_reset_proxy_active == 1) 5884260b00aSNate Lawson ; /* Wait for other cpu to see that we've started */ 58971a19bdcSAttilio Rao CPU_SETOF(cpu_reset_proxyid, &tcrp); 59071a19bdcSAttilio Rao stop_cpus(tcrp); 5914260b00aSNate Lawson printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid); 5924260b00aSNate Lawson DELAY(1000000); 5934260b00aSNate Lawson cpu_reset_real(); 5944260b00aSNate Lawson } 5954260b00aSNate Lawson #endif 5964260b00aSNate Lawson 5977f8cb368SDavid Greenman void 598d447dbeeSBruce Evans cpu_reset() 599d447dbeeSBruce Evans { 6002f1e7069STor Egge #ifdef SMP 60171a19bdcSAttilio Rao cpuset_t map; 60260c7b36bSJohn Baldwin u_int cnt; 6032f1e7069STor Egge 60460ad8150SScott Long if (smp_started) { 605250a44f6SAttilio Rao map = all_cpus; 606250a44f6SAttilio Rao CPU_CLR(PCPU_GET(cpuid), &map); 60771a19bdcSAttilio Rao CPU_NAND(&map, &stopped_cpus); 60871a19bdcSAttilio Rao if (!CPU_EMPTY(&map)) { 6092f1e7069STor Egge printf("cpu_reset: Stopping other CPUs\n"); 61063a6daf6SNate Lawson stop_cpus(map); 6112f1e7069STor Egge } 6124260b00aSNate Lawson 6134260b00aSNate Lawson if (PCPU_GET(cpuid) != 0) { 6144260b00aSNate Lawson cpu_reset_proxyid = PCPU_GET(cpuid); 6154260b00aSNate Lawson cpustop_restartfunc = cpu_reset_proxy; 6164260b00aSNate Lawson cpu_reset_proxy_active = 0; 6174260b00aSNate Lawson printf("cpu_reset: Restarting BSP\n"); 618301268b8SJohn Baldwin 619301268b8SJohn Baldwin /* Restart CPU #0. */ 620301268b8SJohn Baldwin /* XXX: restart_cpus(1 << 0); */ 62171a19bdcSAttilio Rao CPU_SETOF(0, &started_cpus); 62271a19bdcSAttilio Rao wmb(); 6234260b00aSNate Lawson 6244260b00aSNate Lawson cnt = 0; 6254260b00aSNate Lawson while (cpu_reset_proxy_active == 0 && cnt < 10000000) 6264260b00aSNate Lawson cnt++; /* Wait for BSP to announce restart */ 6274260b00aSNate Lawson if (cpu_reset_proxy_active == 0) 6284260b00aSNate Lawson printf("cpu_reset: Failed to restart BSP\n"); 6294260b00aSNate Lawson enable_intr(); 6304260b00aSNate Lawson cpu_reset_proxy_active = 2; 6314260b00aSNate Lawson 6324260b00aSNate Lawson while (1); 6334260b00aSNate Lawson /* NOTREACHED */ 634250a44f6SAttilio Rao } 6354260b00aSNate Lawson 6362f1e7069STor Egge DELAY(1000000); 6372f1e7069STor Egge } 6382f1e7069STor Egge #endif 63963a6daf6SNate Lawson cpu_reset_real(); 64063a6daf6SNate Lawson /* NOTREACHED */ 6412f1e7069STor Egge } 6422f1e7069STor Egge 6432f1e7069STor Egge static void 6442f1e7069STor Egge cpu_reset_real() 6452f1e7069STor Egge { 646a5b6b9a6SJohn Baldwin struct region_descriptor null_idt; 647897f1917SMaxim Sobolev int b; 648d447dbeeSBruce Evans 649897f1917SMaxim Sobolev disable_intr(); 650883bd55aSPoul-Henning Kamp #ifdef CPU_ELAN 651883bd55aSPoul-Henning Kamp if (elan_mmcr != NULL) 652883bd55aSPoul-Henning Kamp elan_mmcr->RESCFG = 1; 653883bd55aSPoul-Henning Kamp #endif 654883bd55aSPoul-Henning Kamp 65502d12d93SPoul-Henning Kamp if (cpu == CPU_GEODE1100) { 65602d12d93SPoul-Henning Kamp /* Attempt Geode's own reset */ 65702d12d93SPoul-Henning Kamp outl(0xcf8, 0x80009044ul); 65802d12d93SPoul-Henning Kamp outl(0xcfc, 0xf); 65902d12d93SPoul-Henning Kamp } 66002d12d93SPoul-Henning Kamp 66163a6daf6SNate Lawson #if !defined(BROKEN_KEYBOARD_RESET) 6622320728fSRodney W. Grimes /* 6632320728fSRodney W. Grimes * Attempt to do a CPU reset via the keyboard controller, 66463a6daf6SNate Lawson * do not turn off GateA20, as any machine that fails 6652320728fSRodney W. Grimes * to do the reset here would then end up in no man's land. 6662320728fSRodney W. Grimes */ 6672320728fSRodney W. Grimes outb(IO_KBD + 4, 0xFE); 6682320728fSRodney W. Grimes DELAY(500000); /* wait 0.5 sec to see if that did it */ 6695eb46edfSDavid Greenman #endif 670b72d374cSJohn Baldwin 671b72d374cSJohn Baldwin /* 672b72d374cSJohn Baldwin * Attempt to force a reset via the Reset Control register at 673289f40c6SJohn Baldwin * I/O port 0xcf9. Bit 2 forces a system reset when it 674289f40c6SJohn Baldwin * transitions from 0 to 1. Bit 1 selects the type of reset 675289f40c6SJohn Baldwin * to attempt: 0 selects a "soft" reset, and 1 selects a 676289f40c6SJohn Baldwin * "hard" reset. We try a "hard" reset. The first write sets 677289f40c6SJohn Baldwin * bit 1 to select a "hard" reset and clears bit 2. The 678289f40c6SJohn Baldwin * second write forces a 0 -> 1 transition in bit 2 to trigger 679289f40c6SJohn Baldwin * a reset. 680b72d374cSJohn Baldwin */ 681897f1917SMaxim Sobolev outb(0xcf9, 0x2); 682897f1917SMaxim Sobolev outb(0xcf9, 0x6); 683897f1917SMaxim Sobolev DELAY(500000); /* wait 0.5 sec to see if that did it */ 684897f1917SMaxim Sobolev 685b72d374cSJohn Baldwin /* 686b72d374cSJohn Baldwin * Attempt to force a reset via the Fast A20 and Init register 687b72d374cSJohn Baldwin * at I/O port 0x92. Bit 1 serves as an alternate A20 gate. 688b72d374cSJohn Baldwin * Bit 0 asserts INIT# when set to 1. We are careful to only 689b72d374cSJohn Baldwin * preserve bit 1 while setting bit 0. We also must clear bit 690b72d374cSJohn Baldwin * 0 before setting it if it isn't already clear. 691b72d374cSJohn Baldwin */ 692897f1917SMaxim Sobolev b = inb(0x92); 693897f1917SMaxim Sobolev if (b != 0xff) { 694897f1917SMaxim Sobolev if ((b & 0x1) != 0) 695897f1917SMaxim Sobolev outb(0x92, b & 0xfe); 696897f1917SMaxim Sobolev outb(0x92, b | 0x1); 697897f1917SMaxim Sobolev DELAY(500000); /* wait 0.5 sec to see if that did it */ 698897f1917SMaxim Sobolev } 69963a6daf6SNate Lawson 70019461776SJohn Baldwin printf("No known reset method worked, attempting CPU shutdown\n"); 701897f1917SMaxim Sobolev DELAY(1000000); /* wait 1 sec for printf to complete */ 702897f1917SMaxim Sobolev 703a5b6b9a6SJohn Baldwin /* Wipe the IDT. */ 704a5b6b9a6SJohn Baldwin null_idt.rd_limit = 0; 705a5b6b9a6SJohn Baldwin null_idt.rd_base = 0; 706a5b6b9a6SJohn Baldwin lidt(&null_idt); 7075b81b6b3SRodney W. Grimes 7085b81b6b3SRodney W. Grimes /* "good night, sweet prince .... <THUNK!>" */ 709a5b6b9a6SJohn Baldwin breakpoint(); 710a5b6b9a6SJohn Baldwin 7115b81b6b3SRodney W. Grimes /* NOTREACHED */ 7127f8cb368SDavid Greenman while(1); 7135b81b6b3SRodney W. Grimes } 714b9d60b3fSDavid Greenman 715e0b78e19SJoerg Wunsch /* 716b1e5dcf7SRobert Watson * Get an sf_buf from the freelist. May block if none are available. 717411d10a6SAlan Cox */ 718c8d2ffd6SGleb Smirnoff void 719c8d2ffd6SGleb Smirnoff sf_buf_map(struct sf_buf *sf, int flags) 720411d10a6SAlan Cox { 72120351fafSAlan Cox pt_entry_t opte, *ptep; 72220351fafSAlan Cox 72320351fafSAlan Cox /* 72420351fafSAlan Cox * Update the sf_buf's virtual-to-physical mapping, flushing the 7254c0e268aSStephan Uphoff * virtual address from the TLB. Since the reference count for 7264c0e268aSStephan Uphoff * the sf_buf's old mapping was zero, that mapping is not 7274c0e268aSStephan Uphoff * currently in use. Consequently, there is no need to exchange 7284c0e268aSStephan Uphoff * the old and new PTEs atomically, even under PAE. 72920351fafSAlan Cox */ 73020351fafSAlan Cox ptep = vtopte(sf->kva); 73120351fafSAlan Cox opte = *ptep; 732c8d2ffd6SGleb Smirnoff *ptep = VM_PAGE_TO_PHYS(sf->m) | pgeflag | PG_RW | PG_V | 733c8d2ffd6SGleb Smirnoff pmap_cache_bits(sf->m->md.pat_mode, 0); 734c71c8706SAlan Cox 735c71c8706SAlan Cox /* 736c71c8706SAlan Cox * Avoid unnecessary TLB invalidations: If the sf_buf's old 737c71c8706SAlan Cox * virtual-to-physical mapping was not used, then any processor 738c71c8706SAlan Cox * that has invalidated the sf_buf's virtual address from its TLB 739c71c8706SAlan Cox * since the last used mapping need not invalidate again. 740c71c8706SAlan Cox */ 7415c0db7c7SAlan Cox #ifdef SMP 742f6f67ea9SStephan Uphoff if ((opte & (PG_V | PG_A)) == (PG_V | PG_A)) 74371a19bdcSAttilio Rao CPU_ZERO(&sf->cpumask); 744c8d2ffd6SGleb Smirnoff 745c8d2ffd6SGleb Smirnoff sf_buf_shootdown(sf, flags); 746c8d2ffd6SGleb Smirnoff #else 747c8d2ffd6SGleb Smirnoff if ((opte & (PG_V | PG_A)) == (PG_V | PG_A)) 748c8d2ffd6SGleb Smirnoff pmap_invalidate_page(kernel_pmap, sf->kva); 749c8d2ffd6SGleb Smirnoff #endif 750c8d2ffd6SGleb Smirnoff } 751c8d2ffd6SGleb Smirnoff 752c8d2ffd6SGleb Smirnoff #ifdef SMP 753c8d2ffd6SGleb Smirnoff void 754c8d2ffd6SGleb Smirnoff sf_buf_shootdown(struct sf_buf *sf, int flags) 755c8d2ffd6SGleb Smirnoff { 756c8d2ffd6SGleb Smirnoff cpuset_t other_cpus; 757c8d2ffd6SGleb Smirnoff u_int cpuid; 758c8d2ffd6SGleb Smirnoff 759f6f67ea9SStephan Uphoff sched_pin(); 760250a44f6SAttilio Rao cpuid = PCPU_GET(cpuid); 761250a44f6SAttilio Rao if (!CPU_ISSET(cpuid, &sf->cpumask)) { 762250a44f6SAttilio Rao CPU_SET(cpuid, &sf->cpumask); 7635c0db7c7SAlan Cox invlpg(sf->kva); 7644c0e268aSStephan Uphoff } 765f6f67ea9SStephan Uphoff if ((flags & SFB_CPUPRIVATE) == 0) { 766250a44f6SAttilio Rao other_cpus = all_cpus; 767250a44f6SAttilio Rao CPU_CLR(cpuid, &other_cpus); 76871a19bdcSAttilio Rao CPU_NAND(&other_cpus, &sf->cpumask); 76971a19bdcSAttilio Rao if (!CPU_EMPTY(&other_cpus)) { 77071a19bdcSAttilio Rao CPU_OR(&sf->cpumask, &other_cpus); 771f6f67ea9SStephan Uphoff smp_masked_invlpg(other_cpus, sf->kva); 772f6f67ea9SStephan Uphoff } 773f6f67ea9SStephan Uphoff } 774f6f67ea9SStephan Uphoff sched_unpin(); 775411d10a6SAlan Cox } 776c8d2ffd6SGleb Smirnoff #endif 777411d10a6SAlan Cox 778411d10a6SAlan Cox /* 779c8d2ffd6SGleb Smirnoff * MD part of sf_buf_free(). 780411d10a6SAlan Cox */ 781c8d2ffd6SGleb Smirnoff int 782c8d2ffd6SGleb Smirnoff sf_buf_unmap(struct sf_buf *sf) 783411d10a6SAlan Cox { 784ed95805eSJohn Baldwin 785c8d2ffd6SGleb Smirnoff return (0); 7860543fa53SAlan Cox } 787c8d2ffd6SGleb Smirnoff 788c8d2ffd6SGleb Smirnoff static void 789c8d2ffd6SGleb Smirnoff sf_buf_invalidate(struct sf_buf *sf) 790c8d2ffd6SGleb Smirnoff { 791c8d2ffd6SGleb Smirnoff vm_page_t m = sf->m; 792c8d2ffd6SGleb Smirnoff 793c8d2ffd6SGleb Smirnoff /* 794c8d2ffd6SGleb Smirnoff * Use pmap_qenter to update the pte for 795c8d2ffd6SGleb Smirnoff * existing mapping, in particular, the PAT 796c8d2ffd6SGleb Smirnoff * settings are recalculated. 797c8d2ffd6SGleb Smirnoff */ 798c8d2ffd6SGleb Smirnoff pmap_qenter(sf->kva, &m, 1); 79907a92f34SKonstantin Belousov pmap_invalidate_cache_range(sf->kva, sf->kva + PAGE_SIZE, FALSE); 800c8d2ffd6SGleb Smirnoff } 801c8d2ffd6SGleb Smirnoff 802c8d2ffd6SGleb Smirnoff /* 803c8d2ffd6SGleb Smirnoff * Invalidate the cache lines that may belong to the page, if 804c8d2ffd6SGleb Smirnoff * (possibly old) mapping of the page by sf buffer exists. Returns 805c8d2ffd6SGleb Smirnoff * TRUE when mapping was found and cache invalidated. 806c8d2ffd6SGleb Smirnoff */ 807c8d2ffd6SGleb Smirnoff boolean_t 808c8d2ffd6SGleb Smirnoff sf_buf_invalidate_cache(vm_page_t m) 809c8d2ffd6SGleb Smirnoff { 810c8d2ffd6SGleb Smirnoff 811c8d2ffd6SGleb Smirnoff return (sf_buf_process_page(m, sf_buf_invalidate)); 812411d10a6SAlan Cox } 813411d10a6SAlan Cox 814411d10a6SAlan Cox /* 81557d7d7b3SJustin T. Gibbs * Software interrupt handler for queued VM system processing. 81657d7d7b3SJustin T. Gibbs */ 81757d7d7b3SJustin T. Gibbs void 8188088699fSJohn Baldwin swi_vm(void *dummy) 81957d7d7b3SJustin T. Gibbs { 82057d7d7b3SJustin T. Gibbs if (busdma_swi_pending != 0) 82157d7d7b3SJustin T. Gibbs busdma_swi(); 82257d7d7b3SJustin T. Gibbs } 82357d7d7b3SJustin T. Gibbs 82457d7d7b3SJustin T. Gibbs /* 825cae6f73aSJoerg Wunsch * Tell whether this address is in some physical memory region. 826e0b78e19SJoerg Wunsch * Currently used by the kernel coredump code in order to avoid 827e0b78e19SJoerg Wunsch * dumping the ``ISA memory hole'' which could cause indefinite hangs, 828e0b78e19SJoerg Wunsch * or other unpredictable behaviour. 829e0b78e19SJoerg Wunsch */ 830e0b78e19SJoerg Wunsch 831e0b78e19SJoerg Wunsch int 8322c38d78eSAlan Cox is_physical_memory(vm_paddr_t addr) 833e0b78e19SJoerg Wunsch { 834e0b78e19SJoerg Wunsch 83503927d3cSPeter Wemm #ifdef DEV_ISA 836e0b78e19SJoerg Wunsch /* The ISA ``memory hole''. */ 837e0b78e19SJoerg Wunsch if (addr >= 0xa0000 && addr < 0x100000) 838cae6f73aSJoerg Wunsch return 0; 839e0b78e19SJoerg Wunsch #endif 840e0b78e19SJoerg Wunsch 841e0b78e19SJoerg Wunsch /* 842e0b78e19SJoerg Wunsch * stuff other tests for known memory-mapped devices (PCI?) 843e0b78e19SJoerg Wunsch * here 844e0b78e19SJoerg Wunsch */ 845e0b78e19SJoerg Wunsch 846cae6f73aSJoerg Wunsch return 1; 847e0b78e19SJoerg Wunsch } 848