Lines Matching +full:mm +full:- +full:0

1 // SPDX-License-Identifier: GPL-2.0
19 #include <linux/mm.h>
42 void load_mm_ldt(struct mm_struct *mm) in load_mm_ldt() argument
47 ldt = READ_ONCE(mm->context.ldt); in load_mm_ldt()
50 * Any change to mm->context.ldt is followed by an IPI to all in load_mm_ldt()
51 * CPUs with the mm active. The LDT will not be freed until in load_mm_ldt()
65 if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) { in load_mm_ldt()
67 * Whoops -- either the new LDT isn't mapped in load_mm_ldt()
68 * (if slot == -1) or is mapped into a bogus in load_mm_ldt()
76 * If page table isolation is enabled, ldt->entries in load_mm_ldt()
79 * at ldt_slot_va(ldt->slot). in load_mm_ldt()
81 set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries); in load_mm_ldt()
83 set_ldt(ldt->entries, ldt->nr_entries); in load_mm_ldt()
93 * Load the LDT if either the old or new mm had an LDT. in switch_ldt()
95 * An mm will never go from having an LDT to not having an LDT. Two in switch_ldt()
98 * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL, in switch_ldt()
99 * then prev->context.ldt will also be non-NULL. in switch_ldt()
109 if (unlikely((unsigned long)prev->context.ldt | in switch_ldt()
110 (unsigned long)next->context.ldt)) in switch_ldt()
138 struct mm_struct *mm = __mm; in flush_ldt() local
140 if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm) in flush_ldt()
143 load_mm_ldt(mm); in flush_ldt()
165 * Xen is very picky: it requires a page-aligned LDT that has no in alloc_ldt_struct()
171 new_ldt->entries = __vmalloc(alloc_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); in alloc_ldt_struct()
173 new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT); in alloc_ldt_struct()
175 if (!new_ldt->entries) { in alloc_ldt_struct()
181 new_ldt->slot = -1; in alloc_ldt_struct()
183 new_ldt->nr_entries = num_entries; in alloc_ldt_struct()
189 static void do_sanity_check(struct mm_struct *mm, in do_sanity_check() argument
193 if (mm->context.ldt) { in do_sanity_check()
195 * We already had an LDT. The top-level entry should already in do_sanity_check()
220 if (pgd->pgd == 0) in pgd_to_pmd_walk()
234 static void map_ldt_struct_to_user(struct mm_struct *mm) in map_ldt_struct_to_user() argument
236 pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR); in map_ldt_struct_to_user()
243 if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt) in map_ldt_struct_to_user()
247 static void sanity_check_ldt_mapping(struct mm_struct *mm) in sanity_check_ldt_mapping() argument
249 pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR); in sanity_check_ldt_mapping()
256 had_kernel = (k_pmd->pmd != 0); in sanity_check_ldt_mapping()
257 had_user = (u_pmd->pmd != 0); in sanity_check_ldt_mapping()
259 do_sanity_check(mm, had_kernel, had_user); in sanity_check_ldt_mapping()
264 static void map_ldt_struct_to_user(struct mm_struct *mm) in map_ldt_struct_to_user() argument
266 pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR); in map_ldt_struct_to_user()
268 if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt) in map_ldt_struct_to_user()
272 static void sanity_check_ldt_mapping(struct mm_struct *mm) in sanity_check_ldt_mapping() argument
274 pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR); in sanity_check_ldt_mapping()
275 bool had_kernel = (pgd->pgd != 0); in sanity_check_ldt_mapping()
276 bool had_user = (kernel_to_user_pgdp(pgd)->pgd != 0); in sanity_check_ldt_mapping()
278 do_sanity_check(mm, had_kernel, had_user); in sanity_check_ldt_mapping()
285 * usermode tables for the given mm.
288 map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) in map_ldt_struct() argument
296 return 0; in map_ldt_struct()
302 WARN_ON(ldt->slot != -1); in map_ldt_struct()
305 sanity_check_ldt_mapping(mm); in map_ldt_struct()
307 is_vmalloc = is_vmalloc_addr(ldt->entries); in map_ldt_struct()
309 nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); in map_ldt_struct()
311 for (i = 0; i < nr_pages; i++) { in map_ldt_struct()
313 const void *src = (char *)ldt->entries + offset; in map_ldt_struct()
324 * and account for them in this mm. in map_ldt_struct()
326 ptep = get_locked_pte(mm, va, &ptl); in map_ldt_struct()
328 return -ENOMEM; in map_ldt_struct()
338 set_pte_at(mm, va, ptep, pte); in map_ldt_struct()
342 /* Propagate LDT mapping to the user page-table */ in map_ldt_struct()
343 map_ldt_struct_to_user(mm); in map_ldt_struct()
345 ldt->slot = slot; in map_ldt_struct()
346 return 0; in map_ldt_struct()
349 static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt) in unmap_ldt_struct() argument
361 nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); in unmap_ldt_struct()
363 for (i = 0; i < nr_pages; i++) { in unmap_ldt_struct()
368 va = (unsigned long)ldt_slot_va(ldt->slot) + offset; in unmap_ldt_struct()
369 ptep = get_locked_pte(mm, va, &ptl); in unmap_ldt_struct()
371 pte_clear(mm, va, ptep); in unmap_ldt_struct()
376 va = (unsigned long)ldt_slot_va(ldt->slot); in unmap_ldt_struct()
377 flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, PAGE_SHIFT, false); in unmap_ldt_struct()
383 map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) in map_ldt_struct() argument
385 return 0; in map_ldt_struct()
388 static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt) in unmap_ldt_struct() argument
393 static void free_ldt_pgtables(struct mm_struct *mm) in free_ldt_pgtables() argument
405 * page-tables, it also works out for kernel mappings on x86. in free_ldt_pgtables()
407 * range-tracking logic in __tlb_adjust_range(). in free_ldt_pgtables()
409 tlb_gather_mmu_fullmm(&tlb, mm); in free_ldt_pgtables()
418 paravirt_alloc_ldt(ldt->entries, ldt->nr_entries); in finalize_ldt_struct()
421 static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt) in install_ldt() argument
423 mutex_lock(&mm->context.lock); in install_ldt()
426 smp_store_release(&mm->context.ldt, ldt); in install_ldt()
428 /* Activate the LDT for all CPUs using currents mm. */ in install_ldt()
429 on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true); in install_ldt()
431 mutex_unlock(&mm->context.lock); in install_ldt()
439 paravirt_free_ldt(ldt->entries, ldt->nr_entries); in free_ldt_struct()
440 if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE) in free_ldt_struct()
441 vfree_atomic(ldt->entries); in free_ldt_struct()
443 free_page((unsigned long)ldt->entries); in free_ldt_struct()
451 int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm) in ldt_dup_context() argument
454 int retval = 0; in ldt_dup_context()
457 return 0; in ldt_dup_context()
459 mutex_lock(&old_mm->context.lock); in ldt_dup_context()
460 if (!old_mm->context.ldt) in ldt_dup_context()
463 new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries); in ldt_dup_context()
465 retval = -ENOMEM; in ldt_dup_context()
469 memcpy(new_ldt->entries, old_mm->context.ldt->entries, in ldt_dup_context()
470 new_ldt->nr_entries * LDT_ENTRY_SIZE); in ldt_dup_context()
473 retval = map_ldt_struct(mm, new_ldt, 0); in ldt_dup_context()
475 free_ldt_pgtables(mm); in ldt_dup_context()
479 mm->context.ldt = new_ldt; in ldt_dup_context()
482 mutex_unlock(&old_mm->context.lock); in ldt_dup_context()
487 * No need to lock the MM as we are the last user
489 * 64bit: Don't touch the LDT register - we're already in the next thread.
491 void destroy_context_ldt(struct mm_struct *mm) in destroy_context_ldt() argument
493 free_ldt_struct(mm->context.ldt); in destroy_context_ldt()
494 mm->context.ldt = NULL; in destroy_context_ldt()
497 void ldt_arch_exit_mmap(struct mm_struct *mm) in ldt_arch_exit_mmap() argument
499 free_ldt_pgtables(mm); in ldt_arch_exit_mmap()
504 struct mm_struct *mm = current->mm; in read_ldt() local
508 down_read(&mm->context.ldt_usr_sem); in read_ldt()
510 if (!mm->context.ldt) { in read_ldt()
511 retval = 0; in read_ldt()
518 entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE; in read_ldt()
522 if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) { in read_ldt()
523 retval = -EFAULT; in read_ldt()
528 /* Zero-fill the rest and pretend we read bytecount bytes. */ in read_ldt()
529 if (clear_user(ptr + entries_size, bytecount - entries_size)) { in read_ldt()
530 retval = -EFAULT; in read_ldt()
537 up_read(&mm->context.ldt_usr_sem); in read_ldt()
552 return -EFAULT; in read_default_ldt()
563 * Xen PV does not implement ESPFIX64, which means that 16-bit in allow_16bit_segments()
566 * provides compelling evidence that allowing broken 16-bit segments in allow_16bit_segments()
567 * is worthwhile, disallow 16-bit segments under Xen PV. in allow_16bit_segments()
570 pr_info_once("Warning: 16-bit segments do not work correctly in a Xen PV guest\n"); in allow_16bit_segments()
580 struct mm_struct *mm = current->mm; in write_ldt() local
587 error = -EINVAL; in write_ldt()
590 error = -EFAULT; in write_ldt()
594 error = -EINVAL; in write_ldt()
600 if (ldt_info.seg_not_present == 0) in write_ldt()
607 memset(&ldt, 0, sizeof(ldt)); in write_ldt()
610 error = -EINVAL; in write_ldt()
616 ldt.avl = 0; in write_ldt()
619 if (down_write_killable(&mm->context.ldt_usr_sem)) in write_ldt()
620 return -EINTR; in write_ldt()
622 old_ldt = mm->context.ldt; in write_ldt()
623 old_nr_entries = old_ldt ? old_ldt->nr_entries : 0; in write_ldt()
626 error = -ENOMEM; in write_ldt()
632 memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE); in write_ldt()
634 new_ldt->entries[ldt_info.entry_number] = ldt; in write_ldt()
643 error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0); in write_ldt()
651 free_ldt_pgtables(mm); in write_ldt()
656 install_ldt(mm, new_ldt); in write_ldt()
657 unmap_ldt_struct(mm, old_ldt); in write_ldt()
659 error = 0; in write_ldt()
662 up_write(&mm->context.ldt_usr_sem); in write_ldt()
670 int ret = -ENOSYS; in SYSCALL_DEFINE3()
673 case 0: in SYSCALL_DEFINE3()
682 case 0x11: in SYSCALL_DEFINE3()
683 ret = write_ldt(ptr, bytecount, 0); in SYSCALL_DEFINE3()
689 * 'int'. This cast gives us an int-sized value in %rax in SYSCALL_DEFINE3()
691 * the compiler does not try to sign-extend the negative in SYSCALL_DEFINE3()
693 * taking the value from int->long. in SYSCALL_DEFINE3()