1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 1982, 1986 The Regents of the University of California. 5 * Copyright (c) 1989, 1990 William Jolitz 6 * Copyright (c) 1994 John Dyson 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * the Systems Programming Group of the University of Utah Computer 11 * Science Department, and William Jolitz. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the University of 24 * California, Berkeley and its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 41 */ 42 43 #include <sys/cdefs.h> 44 #include "opt_isa.h" 45 #include "opt_npx.h" 46 #include "opt_reset.h" 47 #include "opt_cpu.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/bio.h> 52 #include <sys/buf.h> 53 #include <sys/kernel.h> 54 #include <sys/ktr.h> 55 #include <sys/lock.h> 56 #include <sys/malloc.h> 57 #include <sys/mbuf.h> 58 #include <sys/mutex.h> 59 #include <sys/proc.h> 60 #include <sys/sysent.h> 61 #include <sys/sf_buf.h> 62 #include <sys/smp.h> 63 #include <sys/sched.h> 64 #include <sys/sysctl.h> 65 #include <sys/unistd.h> 66 #include <sys/vnode.h> 67 #include <sys/vmmeter.h> 68 69 #include <machine/cpu.h> 70 #include <machine/cputypes.h> 71 #include <machine/md_var.h> 72 #include <machine/pcb.h> 73 #include <machine/pcb_ext.h> 74 #include <machine/smp.h> 75 #include <machine/vm86.h> 76 77 #include <vm/vm.h> 78 #include <vm/vm_extern.h> 79 #include <vm/vm_kern.h> 80 #include <vm/vm_page.h> 81 #include <vm/vm_map.h> 82 #include <vm/vm_param.h> 83 84 _Static_assert(__OFFSETOF_MONITORBUF == offsetof(struct pcpu, pc_monitorbuf), 85 "__OFFSETOF_MONITORBUF does not correspond with offset of pc_monitorbuf."); 86 87 union savefpu * 88 get_pcb_user_save_td(struct thread *td) 89 { 90 vm_offset_t p; 91 92 p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE - 93 roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN); 94 KASSERT((p % XSAVE_AREA_ALIGN) == 0, ("Unaligned pcb_user_save area")); 95 return ((union savefpu *)p); 96 } 97 98 union savefpu * 99 get_pcb_user_save_pcb(struct pcb *pcb) 100 { 101 vm_offset_t p; 102 103 p = (vm_offset_t)(pcb + 1); 104 return ((union savefpu *)p); 105 } 106 107 struct pcb * 108 get_pcb_td(struct thread *td) 109 { 110 vm_offset_t p; 111 112 p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE - 113 roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN) - 114 sizeof(struct pcb); 115 return ((struct pcb *)p); 116 } 117 118 void * 119 alloc_fpusave(int flags) 120 { 121 void *res; 122 struct savefpu_ymm *sf; 123 124 res = malloc(cpu_max_ext_state_size, M_DEVBUF, flags); 125 if (use_xsave) { 126 sf = (struct savefpu_ymm *)res; 127 bzero(&sf->sv_xstate.sx_hd, sizeof(sf->sv_xstate.sx_hd)); 128 sf->sv_xstate.sx_hd.xstate_bv = xsave_mask; 129 } 130 return (res); 131 } 132 133 /* 134 * Common code shared between cpu_fork() and cpu_copy_thread() for 135 * initializing a thread. 136 */ 137 static void 138 copy_thread(struct thread *td1, struct thread *td2) 139 { 140 struct pcb *pcb2; 141 142 pcb2 = td2->td_pcb; 143 144 /* Ensure that td1's pcb is up to date for user threads. */ 145 if ((td2->td_pflags & TDP_KTHREAD) == 0) { 146 MPASS(td1 == curthread); 147 td1->td_pcb->pcb_gs = rgs(); 148 critical_enter(); 149 if (PCPU_GET(fpcurthread) == td1) 150 npxsave(td1->td_pcb->pcb_save); 151 critical_exit(); 152 } 153 154 /* Copy td1's pcb */ 155 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); 156 157 /* Properly initialize pcb_save */ 158 pcb2->pcb_save = get_pcb_user_save_pcb(pcb2); 159 160 /* Kernel threads start with clean NPX and segment bases. */ 161 if ((td2->td_pflags & TDP_KTHREAD) != 0) { 162 pcb2->pcb_gs = _udatasel; 163 set_fsbase(td2, 0); 164 set_gsbase(td2, 0); 165 pcb2->pcb_flags &= ~(PCB_NPXINITDONE | PCB_NPXUSERINITDONE | 166 PCB_KERNNPX | PCB_KERNNPX_THR); 167 } else { 168 MPASS((pcb2->pcb_flags & (PCB_KERNNPX | PCB_KERNNPX_THR)) == 0); 169 bcopy(get_pcb_user_save_td(td1), get_pcb_user_save_pcb(pcb2), 170 cpu_max_ext_state_size); 171 } 172 173 /* 174 * Set registers for trampoline to user mode. Leave space for the 175 * return address on stack. These are the kernel mode register values. 176 */ 177 pcb2->pcb_edi = 0; 178 pcb2->pcb_esi = (int)fork_return; /* trampoline arg */ 179 pcb2->pcb_ebp = 0; 180 pcb2->pcb_esp = (int)td2->td_frame - sizeof(void *); /* trampoline arg */ 181 pcb2->pcb_ebx = (int)td2; /* trampoline arg */ 182 pcb2->pcb_eip = (int)fork_trampoline + setidt_disp; 183 /* 184 * If we didn't copy the pcb, we'd need to do the following registers: 185 * pcb2->pcb_cr3: cloned above. 186 * pcb2->pcb_dr*: cloned above. 187 * pcb2->pcb_savefpu: cloned above. 188 * pcb2->pcb_flags: cloned above. 189 * pcb2->pcb_onfault: cloned above (always NULL here?). 190 * pcb2->pcb_gs: cloned above. 191 * pcb2->pcb_ext: cleared below. 192 */ 193 pcb2->pcb_ext = NULL; 194 195 /* Setup to release spin count in fork_exit(). */ 196 td2->td_md.md_spinlock_count = 1; 197 td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I; 198 } 199 200 /* 201 * Finish a fork operation, with process p2 nearly set up. 202 * Copy and update the pcb, set up the stack so that the child 203 * ready to run and return to user mode. 204 */ 205 void 206 cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags) 207 { 208 struct proc *p1; 209 struct pcb *pcb2; 210 struct mdproc *mdp2; 211 212 p1 = td1->td_proc; 213 if ((flags & RFPROC) == 0) { 214 if ((flags & RFMEM) == 0) { 215 /* unshare user LDT */ 216 struct mdproc *mdp1 = &p1->p_md; 217 struct proc_ldt *pldt, *pldt1; 218 219 mtx_lock_spin(&dt_lock); 220 if ((pldt1 = mdp1->md_ldt) != NULL && 221 pldt1->ldt_refcnt > 1) { 222 pldt = user_ldt_alloc(mdp1, pldt1->ldt_len); 223 if (pldt == NULL) 224 panic("could not copy LDT"); 225 mdp1->md_ldt = pldt; 226 set_user_ldt(mdp1); 227 user_ldt_deref(pldt1); 228 } else 229 mtx_unlock_spin(&dt_lock); 230 } 231 return; 232 } 233 234 /* Point the pcb to the top of the stack */ 235 pcb2 = get_pcb_td(td2); 236 td2->td_pcb = pcb2; 237 238 copy_thread(td1, td2); 239 240 /* Reset debug registers in the new process */ 241 x86_clear_dbregs(pcb2); 242 243 /* Point mdproc and then copy over td1's contents */ 244 mdp2 = &p2->p_md; 245 bcopy(&p1->p_md, mdp2, sizeof(*mdp2)); 246 247 /* 248 * Copy the trap frame for the return to user mode as if from a 249 * syscall. This copies most of the user mode register values. 250 * The -VM86_STACK_SPACE (-16) is so we can expand the trapframe 251 * if we go to vm86. 252 */ 253 td2->td_frame = (struct trapframe *)((caddr_t)td2->td_pcb - 254 VM86_STACK_SPACE) - 1; 255 bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe)); 256 257 /* Set child return values. */ 258 p2->p_sysent->sv_set_fork_retval(td2); 259 260 /* 261 * If the parent process has the trap bit set (i.e. a debugger 262 * had single stepped the process to the system call), we need 263 * to clear the trap flag from the new frame. 264 */ 265 td2->td_frame->tf_eflags &= ~PSL_T; 266 267 /* Set cr3 for the new process. */ 268 pcb2->pcb_cr3 = pmap_get_cr3(vmspace_pmap(p2->p_vmspace)); 269 270 /* 271 * XXX don't copy the i/o pages. this should probably be fixed. 272 */ 273 pcb2->pcb_ext = NULL; 274 275 /* Copy the LDT, if necessary. */ 276 mtx_lock_spin(&dt_lock); 277 if (mdp2->md_ldt != NULL) { 278 if (flags & RFMEM) { 279 mdp2->md_ldt->ldt_refcnt++; 280 } else { 281 mdp2->md_ldt = user_ldt_alloc(mdp2, 282 mdp2->md_ldt->ldt_len); 283 if (mdp2->md_ldt == NULL) 284 panic("could not copy LDT"); 285 } 286 } 287 mtx_unlock_spin(&dt_lock); 288 289 /* 290 * Now, cpu_switch() can schedule the new process. 291 * pcb_esp is loaded pointing to the cpu_switch() stack frame 292 * containing the return address when exiting cpu_switch. 293 * This will normally be to fork_trampoline(), which will have 294 * %ebx loaded with the new proc's pointer. fork_trampoline() 295 * will set up a stack to call fork_return(p, frame); to complete 296 * the return to user-mode. 297 */ 298 } 299 300 void 301 x86_set_fork_retval(struct thread *td) 302 { 303 struct trapframe * frame = td->td_frame; 304 305 frame->tf_eax = 0; /* Child returns zero */ 306 frame->tf_eflags &= ~PSL_C; /* success */ 307 frame->tf_edx = 1; /* System V emulation */ 308 } 309 310 /* 311 * Intercept the return address from a freshly forked process that has NOT 312 * been scheduled yet. 313 * 314 * This is needed to make kernel threads stay in kernel mode. 315 */ 316 void 317 cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg) 318 { 319 /* 320 * Note that the trap frame follows the args, so the function 321 * is really called like this: func(arg, frame); 322 */ 323 td->td_pcb->pcb_esi = (int) func; /* function */ 324 td->td_pcb->pcb_ebx = (int) arg; /* first arg */ 325 } 326 327 void 328 cpu_exit(struct thread *td) 329 { 330 331 /* 332 * If this process has a custom LDT, release it. Reset pc->pcb_gs 333 * and %gs before we free it in case they refer to an LDT entry. 334 */ 335 mtx_lock_spin(&dt_lock); 336 if (td->td_proc->p_md.md_ldt) { 337 td->td_pcb->pcb_gs = _udatasel; 338 load_gs(_udatasel); 339 user_ldt_free(td); 340 } else 341 mtx_unlock_spin(&dt_lock); 342 } 343 344 void 345 cpu_thread_exit(struct thread *td) 346 { 347 348 critical_enter(); 349 if (td == PCPU_GET(fpcurthread)) 350 npxdrop(); 351 critical_exit(); 352 353 /* Disable any hardware breakpoints. */ 354 if (td->td_pcb->pcb_flags & PCB_DBREGS) { 355 reset_dbregs(); 356 td->td_pcb->pcb_flags &= ~PCB_DBREGS; 357 } 358 } 359 360 void 361 cpu_thread_clean(struct thread *td) 362 { 363 struct pcb *pcb; 364 365 pcb = td->td_pcb; 366 if (pcb->pcb_ext != NULL) { 367 /* if (pcb->pcb_ext->ext_refcount-- == 1) ?? */ 368 /* 369 * XXX do we need to move the TSS off the allocated pages 370 * before freeing them? (not done here) 371 */ 372 pmap_trm_free(pcb->pcb_ext, ctob(IOPAGES + 1)); 373 pcb->pcb_ext = NULL; 374 } 375 } 376 377 void 378 cpu_thread_swapin(struct thread *td) 379 { 380 } 381 382 void 383 cpu_thread_swapout(struct thread *td) 384 { 385 } 386 387 void 388 cpu_thread_alloc(struct thread *td) 389 { 390 struct pcb *pcb; 391 struct xstate_hdr *xhdr; 392 393 td->td_pcb = pcb = get_pcb_td(td); 394 td->td_frame = (struct trapframe *)((caddr_t)pcb - 395 VM86_STACK_SPACE) - 1; 396 pcb->pcb_ext = NULL; 397 pcb->pcb_save = get_pcb_user_save_pcb(pcb); 398 if (use_xsave) { 399 xhdr = (struct xstate_hdr *)(pcb->pcb_save + 1); 400 bzero(xhdr, sizeof(*xhdr)); 401 xhdr->xstate_bv = xsave_mask; 402 } 403 } 404 405 void 406 cpu_thread_free(struct thread *td) 407 { 408 409 cpu_thread_clean(td); 410 } 411 412 bool 413 cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused) 414 { 415 416 return (true); 417 } 418 419 int 420 cpu_procctl(struct thread *td __unused, int idtype __unused, id_t id __unused, 421 int com __unused, void *data __unused) 422 { 423 424 return (EINVAL); 425 } 426 427 void 428 cpu_set_syscall_retval(struct thread *td, int error) 429 { 430 431 switch (error) { 432 case 0: 433 td->td_frame->tf_eax = td->td_retval[0]; 434 td->td_frame->tf_edx = td->td_retval[1]; 435 td->td_frame->tf_eflags &= ~PSL_C; 436 break; 437 438 case ERESTART: 439 /* 440 * Reconstruct pc, assuming lcall $X,y is 7 bytes, int 441 * 0x80 is 2 bytes. We saved this in tf_err. 442 */ 443 td->td_frame->tf_eip -= td->td_frame->tf_err; 444 break; 445 446 case EJUSTRETURN: 447 break; 448 449 default: 450 td->td_frame->tf_eax = error; 451 td->td_frame->tf_eflags |= PSL_C; 452 break; 453 } 454 } 455 456 /* 457 * Initialize machine state, mostly pcb and trap frame for a new 458 * thread, about to return to userspace. Put enough state in the new 459 * thread's PCB to get it to go back to the fork_return(), which 460 * finalizes the thread state and handles peculiarities of the first 461 * return to userspace for the new thread. 462 */ 463 void 464 cpu_copy_thread(struct thread *td, struct thread *td0) 465 { 466 copy_thread(td0, td); 467 468 /* 469 * Copy user general-purpose registers. 470 * 471 * Some of these registers are rewritten by cpu_set_upcall() 472 * and linux_set_upcall(). 473 */ 474 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe)); 475 476 /* If the current thread has the trap bit set (i.e. a debugger had 477 * single stepped the process to the system call), we need to clear 478 * the trap flag from the new frame. Otherwise, the new thread will 479 * receive a (likely unexpected) SIGTRAP when it executes the first 480 * instruction after returning to userland. 481 */ 482 td->td_frame->tf_eflags &= ~PSL_T; 483 } 484 485 /* 486 * Set that machine state for performing an upcall that starts 487 * the entry function with the given argument. 488 */ 489 int 490 cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg, 491 stack_t *stack) 492 { 493 494 /* 495 * Do any extra cleaning that needs to be done. 496 * The thread may have optional components 497 * that are not present in a fresh thread. 498 * This may be a recycled thread so make it look 499 * as though it's newly allocated. 500 */ 501 cpu_thread_clean(td); 502 503 /* 504 * Set the trap frame to point at the beginning of the entry 505 * function. 506 */ 507 td->td_frame->tf_ebp = 0; 508 td->td_frame->tf_esp = 509 (((int)stack->ss_sp + stack->ss_size - 4) & ~0x0f) - 4; 510 td->td_frame->tf_eip = (int)entry; 511 512 /* Return address sentinel value to stop stack unwinding. */ 513 if (suword((void *)td->td_frame->tf_esp, 0) != 0) 514 return (EFAULT); 515 516 /* Pass the argument to the entry point. */ 517 if (suword((void *)(td->td_frame->tf_esp + sizeof(void *)), 518 (int)arg) != 0) 519 return (EFAULT); 520 return (0); 521 } 522 523 int 524 cpu_set_user_tls(struct thread *td, void *tls_base) 525 { 526 struct segment_descriptor sd; 527 uint32_t base; 528 529 /* 530 * Construct a descriptor and store it in the pcb for 531 * the next context switch. Also store it in the gdt 532 * so that the load of tf_fs into %fs will activate it 533 * at return to userland. 534 */ 535 base = (uint32_t)tls_base; 536 sd.sd_lobase = base & 0xffffff; 537 sd.sd_hibase = (base >> 24) & 0xff; 538 sd.sd_lolimit = 0xffff; /* 4GB limit, wraps around */ 539 sd.sd_hilimit = 0xf; 540 sd.sd_type = SDT_MEMRWA; 541 sd.sd_dpl = SEL_UPL; 542 sd.sd_p = 1; 543 sd.sd_xx = 0; 544 sd.sd_def32 = 1; 545 sd.sd_gran = 1; 546 critical_enter(); 547 /* set %gs */ 548 td->td_pcb->pcb_gsd = sd; 549 if (td == curthread) { 550 PCPU_GET(fsgs_gdt)[1] = sd; 551 load_gs(GSEL(GUGS_SEL, SEL_UPL)); 552 } 553 critical_exit(); 554 return (0); 555 } 556 557 /* 558 * Convert kernel VA to physical address 559 */ 560 vm_paddr_t 561 kvtop(void *addr) 562 { 563 vm_paddr_t pa; 564 565 pa = pmap_kextract((vm_offset_t)addr); 566 if (pa == 0) 567 panic("kvtop: zero page frame"); 568 return (pa); 569 } 570 571 /* 572 * Get an sf_buf from the freelist. May block if none are available. 573 */ 574 void 575 sf_buf_map(struct sf_buf *sf, int flags) 576 { 577 578 pmap_sf_buf_map(sf); 579 #ifdef SMP 580 sf_buf_shootdown(sf, flags); 581 #endif 582 } 583 584 #ifdef SMP 585 static void 586 sf_buf_shootdown_curcpu_cb(pmap_t pmap __unused, 587 vm_offset_t addr1 __unused, vm_offset_t addr2 __unused) 588 { 589 } 590 591 void 592 sf_buf_shootdown(struct sf_buf *sf, int flags) 593 { 594 cpuset_t other_cpus; 595 u_int cpuid; 596 597 sched_pin(); 598 cpuid = PCPU_GET(cpuid); 599 if (!CPU_ISSET(cpuid, &sf->cpumask)) { 600 CPU_SET(cpuid, &sf->cpumask); 601 invlpg(sf->kva); 602 } 603 if ((flags & SFB_CPUPRIVATE) == 0) { 604 other_cpus = all_cpus; 605 CPU_CLR(cpuid, &other_cpus); 606 CPU_ANDNOT(&other_cpus, &other_cpus, &sf->cpumask); 607 if (!CPU_EMPTY(&other_cpus)) { 608 CPU_OR(&sf->cpumask, &sf->cpumask, &other_cpus); 609 smp_masked_invlpg(other_cpus, sf->kva, kernel_pmap, 610 sf_buf_shootdown_curcpu_cb); 611 } 612 } 613 sched_unpin(); 614 } 615 #endif 616 617 /* 618 * MD part of sf_buf_free(). 619 */ 620 int 621 sf_buf_unmap(struct sf_buf *sf) 622 { 623 624 return (0); 625 } 626 627 static void 628 sf_buf_invalidate(struct sf_buf *sf) 629 { 630 vm_page_t m = sf->m; 631 632 /* 633 * Use pmap_qenter to update the pte for 634 * existing mapping, in particular, the PAT 635 * settings are recalculated. 636 */ 637 pmap_qenter(sf->kva, &m, 1); 638 pmap_invalidate_cache_range(sf->kva, sf->kva + PAGE_SIZE); 639 } 640 641 /* 642 * Invalidate the cache lines that may belong to the page, if 643 * (possibly old) mapping of the page by sf buffer exists. Returns 644 * TRUE when mapping was found and cache invalidated. 645 */ 646 boolean_t 647 sf_buf_invalidate_cache(vm_page_t m) 648 { 649 650 return (sf_buf_process_page(m, sf_buf_invalidate)); 651 } 652