1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 1982, 1986 The Regents of the University of California. 5 * Copyright (c) 1989, 1990 William Jolitz 6 * Copyright (c) 1994 John Dyson 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * the Systems Programming Group of the University of Utah Computer 11 * Science Department, and William Jolitz. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the University of 24 * California, Berkeley and its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 41 */ 42 43 #include <sys/cdefs.h> 44 #include "opt_isa.h" 45 #include "opt_npx.h" 46 #include "opt_reset.h" 47 #include "opt_cpu.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/bio.h> 52 #include <sys/buf.h> 53 #include <sys/kernel.h> 54 #include <sys/ktr.h> 55 #include <sys/lock.h> 56 #include <sys/malloc.h> 57 #include <sys/mbuf.h> 58 #include <sys/mutex.h> 59 #include <sys/proc.h> 60 #include <sys/sysent.h> 61 #include <sys/sf_buf.h> 62 #include <sys/smp.h> 63 #include <sys/sched.h> 64 #include <sys/sysctl.h> 65 #include <sys/unistd.h> 66 #include <sys/vnode.h> 67 #include <sys/vmmeter.h> 68 69 #include <machine/cpu.h> 70 #include <machine/cputypes.h> 71 #include <machine/md_var.h> 72 #include <machine/pcb.h> 73 #include <machine/pcb_ext.h> 74 #include <machine/smp.h> 75 #include <machine/vm86.h> 76 77 #include <vm/vm.h> 78 #include <vm/vm_extern.h> 79 #include <vm/vm_kern.h> 80 #include <vm/vm_page.h> 81 #include <vm/vm_map.h> 82 #include <vm/vm_param.h> 83 84 _Static_assert(__OFFSETOF_MONITORBUF == offsetof(struct pcpu, pc_monitorbuf), 85 "__OFFSETOF_MONITORBUF does not correspond with offset of pc_monitorbuf."); 86 87 union savefpu * 88 get_pcb_user_save_td(struct thread *td) 89 { 90 vm_offset_t p; 91 92 p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE - 93 roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN); 94 KASSERT((p % XSAVE_AREA_ALIGN) == 0, ("Unaligned pcb_user_save area")); 95 return ((union savefpu *)p); 96 } 97 98 union savefpu * 99 get_pcb_user_save_pcb(struct pcb *pcb) 100 { 101 vm_offset_t p; 102 103 p = (vm_offset_t)(pcb + 1); 104 return ((union savefpu *)p); 105 } 106 107 struct pcb * 108 get_pcb_td(struct thread *td) 109 { 110 vm_offset_t p; 111 112 p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE - 113 roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN) - 114 sizeof(struct pcb); 115 return ((struct pcb *)p); 116 } 117 118 void * 119 alloc_fpusave(int flags) 120 { 121 void *res; 122 struct savefpu_ymm *sf; 123 124 res = malloc(cpu_max_ext_state_size, M_DEVBUF, flags); 125 if (use_xsave) { 126 sf = (struct savefpu_ymm *)res; 127 bzero(&sf->sv_xstate.sx_hd, sizeof(sf->sv_xstate.sx_hd)); 128 sf->sv_xstate.sx_hd.xstate_bv = xsave_mask; 129 } 130 return (res); 131 } 132 133 /* 134 * Common code shared between cpu_fork() and cpu_copy_thread() for 135 * initializing a thread. 136 */ 137 static void 138 copy_thread(struct thread *td1, struct thread *td2) 139 { 140 struct pcb *pcb2; 141 142 pcb2 = td2->td_pcb; 143 144 /* Ensure that td1's pcb is up to date for user threads. */ 145 if ((td2->td_pflags & TDP_KTHREAD) == 0) { 146 MPASS(td1 == curthread); 147 td1->td_pcb->pcb_gs = rgs(); 148 critical_enter(); 149 if (PCPU_GET(fpcurthread) == td1) 150 npxsave(td1->td_pcb->pcb_save); 151 critical_exit(); 152 } 153 154 /* Copy td1's pcb */ 155 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); 156 157 /* Properly initialize pcb_save */ 158 pcb2->pcb_save = get_pcb_user_save_pcb(pcb2); 159 160 /* Kernel threads start with clean NPX and segment bases. */ 161 if ((td2->td_pflags & TDP_KTHREAD) != 0) { 162 pcb2->pcb_gs = _udatasel; 163 set_fsbase(td2, 0); 164 set_gsbase(td2, 0); 165 pcb2->pcb_flags &= ~(PCB_NPXINITDONE | PCB_NPXUSERINITDONE | 166 PCB_KERNNPX | PCB_KERNNPX_THR); 167 } else { 168 MPASS((pcb2->pcb_flags & (PCB_KERNNPX | PCB_KERNNPX_THR)) == 0); 169 bcopy(get_pcb_user_save_td(td1), get_pcb_user_save_pcb(pcb2), 170 cpu_max_ext_state_size); 171 } 172 173 /* 174 * Set registers for trampoline to user mode. Leave space for the 175 * return address on stack. These are the kernel mode register values. 176 */ 177 pcb2->pcb_edi = 0; 178 pcb2->pcb_esi = (int)fork_return; /* trampoline arg */ 179 pcb2->pcb_ebp = 0; 180 pcb2->pcb_esp = (int)td2->td_frame - sizeof(void *); /* trampoline arg */ 181 pcb2->pcb_ebx = (int)td2; /* trampoline arg */ 182 pcb2->pcb_eip = (int)fork_trampoline + setidt_disp; 183 /* 184 * If we didn't copy the pcb, we'd need to do the following registers: 185 * pcb2->pcb_cr3: cloned above. 186 * pcb2->pcb_dr*: cloned above. 187 * pcb2->pcb_savefpu: cloned above. 188 * pcb2->pcb_flags: cloned above. 189 * pcb2->pcb_onfault: cloned above (always NULL here?). 190 * pcb2->pcb_gs: cloned above. 191 * pcb2->pcb_ext: cleared below. 192 */ 193 pcb2->pcb_ext = NULL; 194 195 /* Setup to release spin count in fork_exit(). */ 196 td2->td_md.md_spinlock_count = 1; 197 td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I; 198 } 199 200 /* 201 * Finish a fork operation, with process p2 nearly set up. 202 * Copy and update the pcb, set up the stack so that the child 203 * ready to run and return to user mode. 204 */ 205 void 206 cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags) 207 { 208 struct proc *p1; 209 struct pcb *pcb2; 210 struct mdproc *mdp2; 211 212 p1 = td1->td_proc; 213 if ((flags & RFPROC) == 0) { 214 if ((flags & RFMEM) == 0) { 215 /* unshare user LDT */ 216 struct mdproc *mdp1 = &p1->p_md; 217 struct proc_ldt *pldt, *pldt1; 218 219 mtx_lock_spin(&dt_lock); 220 if ((pldt1 = mdp1->md_ldt) != NULL && 221 pldt1->ldt_refcnt > 1) { 222 pldt = user_ldt_alloc(mdp1, pldt1->ldt_len); 223 if (pldt == NULL) 224 panic("could not copy LDT"); 225 mdp1->md_ldt = pldt; 226 set_user_ldt(mdp1); 227 user_ldt_deref(pldt1); 228 } else 229 mtx_unlock_spin(&dt_lock); 230 } 231 return; 232 } 233 234 /* Point the pcb to the top of the stack */ 235 pcb2 = get_pcb_td(td2); 236 td2->td_pcb = pcb2; 237 238 copy_thread(td1, td2); 239 240 /* Reset debug registers in the new process */ 241 x86_clear_dbregs(pcb2); 242 243 /* Point mdproc and then copy over td1's contents */ 244 mdp2 = &p2->p_md; 245 bcopy(&p1->p_md, mdp2, sizeof(*mdp2)); 246 247 /* 248 * Copy the trap frame for the return to user mode as if from a 249 * syscall. This copies most of the user mode register values. 250 * The -VM86_STACK_SPACE (-16) is so we can expand the trapframe 251 * if we go to vm86. 252 */ 253 td2->td_frame = (struct trapframe *)((caddr_t)td2->td_pcb - 254 VM86_STACK_SPACE) - 1; 255 bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe)); 256 257 /* Set child return values. */ 258 p2->p_sysent->sv_set_fork_retval(td2); 259 260 /* 261 * If the parent process has the trap bit set (i.e. a debugger 262 * had single stepped the process to the system call), we need 263 * to clear the trap flag from the new frame. 264 */ 265 td2->td_frame->tf_eflags &= ~PSL_T; 266 267 /* Set cr3 for the new process. */ 268 pcb2->pcb_cr3 = pmap_get_cr3(vmspace_pmap(p2->p_vmspace)); 269 270 /* 271 * XXX don't copy the i/o pages. this should probably be fixed. 272 */ 273 pcb2->pcb_ext = NULL; 274 275 /* Copy the LDT, if necessary. */ 276 mtx_lock_spin(&dt_lock); 277 if (mdp2->md_ldt != NULL) { 278 if (flags & RFMEM) { 279 mdp2->md_ldt->ldt_refcnt++; 280 } else { 281 mdp2->md_ldt = user_ldt_alloc(mdp2, 282 mdp2->md_ldt->ldt_len); 283 if (mdp2->md_ldt == NULL) 284 panic("could not copy LDT"); 285 } 286 } 287 mtx_unlock_spin(&dt_lock); 288 289 /* 290 * Now, cpu_switch() can schedule the new process. 291 * pcb_esp is loaded pointing to the cpu_switch() stack frame 292 * containing the return address when exiting cpu_switch. 293 * This will normally be to fork_trampoline(), which will have 294 * %ebx loaded with the new proc's pointer. fork_trampoline() 295 * will set up a stack to call fork_return(p, frame); to complete 296 * the return to user-mode. 297 */ 298 } 299 300 void 301 x86_set_fork_retval(struct thread *td) 302 { 303 struct trapframe * frame = td->td_frame; 304 305 frame->tf_eax = 0; /* Child returns zero */ 306 frame->tf_eflags &= ~PSL_C; /* success */ 307 frame->tf_edx = 1; /* System V emulation */ 308 } 309 310 /* 311 * Intercept the return address from a freshly forked process that has NOT 312 * been scheduled yet. 313 * 314 * This is needed to make kernel threads stay in kernel mode. 315 */ 316 void 317 cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg) 318 { 319 /* 320 * Note that the trap frame follows the args, so the function 321 * is really called like this: func(arg, frame); 322 */ 323 td->td_pcb->pcb_esi = (int) func; /* function */ 324 td->td_pcb->pcb_ebx = (int) arg; /* first arg */ 325 } 326 327 void 328 cpu_exit(struct thread *td) 329 { 330 331 /* 332 * If this process has a custom LDT, release it. Reset pc->pcb_gs 333 * and %gs before we free it in case they refer to an LDT entry. 334 */ 335 mtx_lock_spin(&dt_lock); 336 if (td->td_proc->p_md.md_ldt) { 337 td->td_pcb->pcb_gs = _udatasel; 338 load_gs(_udatasel); 339 user_ldt_free(td); 340 } else 341 mtx_unlock_spin(&dt_lock); 342 } 343 344 void 345 cpu_thread_exit(struct thread *td) 346 { 347 348 critical_enter(); 349 if (td == PCPU_GET(fpcurthread)) 350 npxdrop(); 351 critical_exit(); 352 353 /* Disable any hardware breakpoints. */ 354 if (td->td_pcb->pcb_flags & PCB_DBREGS) { 355 reset_dbregs(); 356 td->td_pcb->pcb_flags &= ~PCB_DBREGS; 357 } 358 } 359 360 void 361 cpu_thread_clean(struct thread *td) 362 { 363 struct pcb *pcb; 364 365 pcb = td->td_pcb; 366 if (pcb->pcb_ext != NULL) { 367 /* if (pcb->pcb_ext->ext_refcount-- == 1) ?? */ 368 /* 369 * XXX do we need to move the TSS off the allocated pages 370 * before freeing them? (not done here) 371 */ 372 pmap_trm_free(pcb->pcb_ext, ctob(IOPAGES + 1)); 373 pcb->pcb_ext = NULL; 374 } 375 } 376 377 void 378 cpu_thread_alloc(struct thread *td) 379 { 380 struct pcb *pcb; 381 struct xstate_hdr *xhdr; 382 383 td->td_pcb = pcb = get_pcb_td(td); 384 td->td_frame = (struct trapframe *)((caddr_t)pcb - 385 VM86_STACK_SPACE) - 1; 386 pcb->pcb_ext = NULL; 387 pcb->pcb_save = get_pcb_user_save_pcb(pcb); 388 if (use_xsave) { 389 xhdr = (struct xstate_hdr *)(pcb->pcb_save + 1); 390 bzero(xhdr, sizeof(*xhdr)); 391 xhdr->xstate_bv = xsave_mask; 392 } 393 } 394 395 void 396 cpu_thread_free(struct thread *td) 397 { 398 399 cpu_thread_clean(td); 400 } 401 402 bool 403 cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused) 404 { 405 406 return (true); 407 } 408 409 int 410 cpu_procctl(struct thread *td __unused, int idtype __unused, id_t id __unused, 411 int com __unused, void *data __unused) 412 { 413 414 return (EINVAL); 415 } 416 417 void 418 cpu_set_syscall_retval(struct thread *td, int error) 419 { 420 421 switch (error) { 422 case 0: 423 td->td_frame->tf_eax = td->td_retval[0]; 424 td->td_frame->tf_edx = td->td_retval[1]; 425 td->td_frame->tf_eflags &= ~PSL_C; 426 break; 427 428 case ERESTART: 429 /* 430 * Reconstruct pc, assuming lcall $X,y is 7 bytes, int 431 * 0x80 is 2 bytes. We saved this in tf_err. 432 */ 433 td->td_frame->tf_eip -= td->td_frame->tf_err; 434 break; 435 436 case EJUSTRETURN: 437 break; 438 439 default: 440 td->td_frame->tf_eax = error; 441 td->td_frame->tf_eflags |= PSL_C; 442 break; 443 } 444 } 445 446 /* 447 * Initialize machine state, mostly pcb and trap frame for a new 448 * thread, about to return to userspace. Put enough state in the new 449 * thread's PCB to get it to go back to the fork_return(), which 450 * finalizes the thread state and handles peculiarities of the first 451 * return to userspace for the new thread. 452 */ 453 void 454 cpu_copy_thread(struct thread *td, struct thread *td0) 455 { 456 copy_thread(td0, td); 457 458 /* 459 * Copy user general-purpose registers. 460 * 461 * Some of these registers are rewritten by cpu_set_upcall() 462 * and linux_set_upcall(). 463 */ 464 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe)); 465 466 /* If the current thread has the trap bit set (i.e. a debugger had 467 * single stepped the process to the system call), we need to clear 468 * the trap flag from the new frame. Otherwise, the new thread will 469 * receive a (likely unexpected) SIGTRAP when it executes the first 470 * instruction after returning to userland. 471 */ 472 td->td_frame->tf_eflags &= ~PSL_T; 473 } 474 475 /* 476 * Set that machine state for performing an upcall that starts 477 * the entry function with the given argument. 478 */ 479 int 480 cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg, 481 stack_t *stack) 482 { 483 484 /* 485 * Do any extra cleaning that needs to be done. 486 * The thread may have optional components 487 * that are not present in a fresh thread. 488 * This may be a recycled thread so make it look 489 * as though it's newly allocated. 490 */ 491 cpu_thread_clean(td); 492 493 /* 494 * Set the trap frame to point at the beginning of the entry 495 * function. 496 */ 497 td->td_frame->tf_ebp = 0; 498 td->td_frame->tf_esp = 499 (((int)stack->ss_sp + stack->ss_size - 4) & ~0x0f) - 4; 500 td->td_frame->tf_eip = (int)entry; 501 502 /* Return address sentinel value to stop stack unwinding. */ 503 if (suword((void *)td->td_frame->tf_esp, 0) != 0) 504 return (EFAULT); 505 506 /* Pass the argument to the entry point. */ 507 if (suword((void *)(td->td_frame->tf_esp + sizeof(void *)), 508 (int)arg) != 0) 509 return (EFAULT); 510 return (0); 511 } 512 513 int 514 cpu_set_user_tls(struct thread *td, void *tls_base) 515 { 516 struct segment_descriptor sd; 517 uint32_t base; 518 519 /* 520 * Construct a descriptor and store it in the pcb for 521 * the next context switch. Also store it in the gdt 522 * so that the load of tf_fs into %fs will activate it 523 * at return to userland. 524 */ 525 base = (uint32_t)tls_base; 526 sd.sd_lobase = base & 0xffffff; 527 sd.sd_hibase = (base >> 24) & 0xff; 528 sd.sd_lolimit = 0xffff; /* 4GB limit, wraps around */ 529 sd.sd_hilimit = 0xf; 530 sd.sd_type = SDT_MEMRWA; 531 sd.sd_dpl = SEL_UPL; 532 sd.sd_p = 1; 533 sd.sd_xx = 0; 534 sd.sd_def32 = 1; 535 sd.sd_gran = 1; 536 critical_enter(); 537 /* set %gs */ 538 td->td_pcb->pcb_gsd = sd; 539 if (td == curthread) { 540 PCPU_GET(fsgs_gdt)[1] = sd; 541 load_gs(GSEL(GUGS_SEL, SEL_UPL)); 542 } 543 critical_exit(); 544 return (0); 545 } 546 547 /* 548 * Convert kernel VA to physical address 549 */ 550 vm_paddr_t 551 kvtop(void *addr) 552 { 553 vm_paddr_t pa; 554 555 pa = pmap_kextract((vm_offset_t)addr); 556 if (pa == 0) 557 panic("kvtop: zero page frame"); 558 return (pa); 559 } 560 561 /* 562 * Get an sf_buf from the freelist. May block if none are available. 563 */ 564 void 565 sf_buf_map(struct sf_buf *sf, int flags) 566 { 567 568 pmap_sf_buf_map(sf); 569 #ifdef SMP 570 sf_buf_shootdown(sf, flags); 571 #endif 572 } 573 574 #ifdef SMP 575 static void 576 sf_buf_shootdown_curcpu_cb(pmap_t pmap __unused, 577 vm_offset_t addr1 __unused, vm_offset_t addr2 __unused) 578 { 579 } 580 581 void 582 sf_buf_shootdown(struct sf_buf *sf, int flags) 583 { 584 cpuset_t other_cpus; 585 u_int cpuid; 586 587 sched_pin(); 588 cpuid = PCPU_GET(cpuid); 589 if (!CPU_ISSET(cpuid, &sf->cpumask)) { 590 CPU_SET(cpuid, &sf->cpumask); 591 invlpg(sf->kva); 592 } 593 if ((flags & SFB_CPUPRIVATE) == 0) { 594 other_cpus = all_cpus; 595 CPU_CLR(cpuid, &other_cpus); 596 CPU_ANDNOT(&other_cpus, &other_cpus, &sf->cpumask); 597 if (!CPU_EMPTY(&other_cpus)) { 598 CPU_OR(&sf->cpumask, &sf->cpumask, &other_cpus); 599 smp_masked_invlpg(other_cpus, sf->kva, kernel_pmap, 600 sf_buf_shootdown_curcpu_cb); 601 } 602 } 603 sched_unpin(); 604 } 605 #endif 606 607 /* 608 * MD part of sf_buf_free(). 609 */ 610 int 611 sf_buf_unmap(struct sf_buf *sf) 612 { 613 614 return (0); 615 } 616 617 static void 618 sf_buf_invalidate(struct sf_buf *sf) 619 { 620 vm_page_t m = sf->m; 621 622 /* 623 * Use pmap_qenter to update the pte for 624 * existing mapping, in particular, the PAT 625 * settings are recalculated. 626 */ 627 pmap_qenter(sf->kva, &m, 1); 628 pmap_invalidate_cache_range(sf->kva, sf->kva + PAGE_SIZE); 629 } 630 631 /* 632 * Invalidate the cache lines that may belong to the page, if 633 * (possibly old) mapping of the page by sf buffer exists. Returns 634 * TRUE when mapping was found and cache invalidated. 635 */ 636 boolean_t 637 sf_buf_invalidate_cache(vm_page_t m) 638 { 639 640 return (sf_buf_process_page(m, sf_buf_invalidate)); 641 } 642