mmu_pv.c (3b5d1afd1f13bcab85eaa28223ad396694f929e3) mmu_pv.c (a13f2ef168cb2a033a284eb841bcc481ffbc90cf)
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Xen mmu operations
5 *
6 * This file contains the various mmu fetch and update operations.
7 * The most important job they must perform is the mapping between the
8 * domain's pfn and the overall machine mfns.

--- 72 unchanged lines hidden (view full) ---

81#include <xen/interface/version.h>
82#include <xen/interface/memory.h>
83#include <xen/hvc-console.h>
84
85#include "multicalls.h"
86#include "mmu.h"
87#include "debugfs.h"
88
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Xen mmu operations
5 *
6 * This file contains the various mmu fetch and update operations.
7 * The most important job they must perform is the mapping between the
8 * domain's pfn and the overall machine mfns.

--- 72 unchanged lines hidden (view full) ---

81#include <xen/interface/version.h>
82#include <xen/interface/memory.h>
83#include <xen/hvc-console.h>
84
85#include "multicalls.h"
86#include "mmu.h"
87#include "debugfs.h"
88
89#ifdef CONFIG_X86_32
90/*
91 * Identity map, in addition to plain kernel map. This needs to be
92 * large enough to allocate page table pages to allocate the rest.
93 * Each page can map 2MB.
94 */
95#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
96static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
97#endif
98#ifdef CONFIG_X86_64
99/* l3 pud for userspace vsyscall mapping */
100static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
89/* l3 pud for userspace vsyscall mapping */
90static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
101#endif /* CONFIG_X86_64 */
102
103/*
104 * Protects atomic reservation decrease/increase against concurrent increases.
105 * Also protects non-atomic updates of current_pages and balloon lists.
106 */
107static DEFINE_SPINLOCK(xen_reservation_lock);
108
109/*

--- 165 unchanged lines hidden (view full) ---

275 return true;
276}
277
278static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
279{
280 if (!xen_batched_set_pte(ptep, pteval)) {
281 /*
282 * Could call native_set_pte() here and trap and
91
92/*
93 * Protects atomic reservation decrease/increase against concurrent increases.
94 * Also protects non-atomic updates of current_pages and balloon lists.
95 */
96static DEFINE_SPINLOCK(xen_reservation_lock);
97
98/*

--- 165 unchanged lines hidden (view full) ---

264 return true;
265}
266
267static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
268{
269 if (!xen_batched_set_pte(ptep, pteval)) {
270 /*
271 * Could call native_set_pte() here and trap and
283 * emulate the PTE write but with 32-bit guests this
284 * needs two traps (one for each of the two 32-bit
285 * words in the PTE) so do one hypercall directly
286 * instead.
272 * emulate the PTE write, but a hypercall is much cheaper.
287 */
288 struct mmu_update u;
289
290 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
291 u.val = pte_val_ma(pteval);
292 HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
293 }
294}

--- 139 unchanged lines hidden (view full) ---

434 if (!xen_page_pinned(ptr)) {
435 *ptr = val;
436 return;
437 }
438
439 xen_set_pud_hyper(ptr, val);
440}
441
273 */
274 struct mmu_update u;
275
276 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
277 u.val = pte_val_ma(pteval);
278 HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
279 }
280}

--- 139 unchanged lines hidden (view full) ---

420 if (!xen_page_pinned(ptr)) {
421 *ptr = val;
422 return;
423 }
424
425 xen_set_pud_hyper(ptr, val);
426}
427
442#ifdef CONFIG_X86_PAE
443static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
444{
445 trace_xen_mmu_set_pte_atomic(ptep, pte);
446 __xen_set_pte(ptep, pte);
447}
448
449static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
450{
451 trace_xen_mmu_pte_clear(mm, addr, ptep);
452 __xen_set_pte(ptep, native_make_pte(0));
453}
454
455static void xen_pmd_clear(pmd_t *pmdp)
456{
457 trace_xen_mmu_pmd_clear(pmdp);
458 set_pmd(pmdp, __pmd(0));
459}
460#endif /* CONFIG_X86_PAE */
461
462__visible pmd_t xen_make_pmd(pmdval_t pmd)
463{
464 pmd = pte_pfn_to_mfn(pmd);
465 return native_make_pmd(pmd);
466}
467PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
468
428__visible pmd_t xen_make_pmd(pmdval_t pmd)
429{
430 pmd = pte_pfn_to_mfn(pmd);
431 return native_make_pmd(pmd);
432}
433PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
434
469#ifdef CONFIG_X86_64
470__visible pudval_t xen_pud_val(pud_t pud)
471{
472 return pte_mfn_to_pfn(pud.pud);
473}
474PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
475
476__visible pud_t xen_make_pud(pudval_t pud)
477{

--- 88 unchanged lines hidden (view full) ---

566__visible p4d_t xen_make_p4d(p4dval_t p4d)
567{
568 p4d = pte_pfn_to_mfn(p4d);
569
570 return native_make_p4d(p4d);
571}
572PV_CALLEE_SAVE_REGS_THUNK(xen_make_p4d);
573#endif /* CONFIG_PGTABLE_LEVELS >= 5 */
435__visible pudval_t xen_pud_val(pud_t pud)
436{
437 return pte_mfn_to_pfn(pud.pud);
438}
439PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
440
441__visible pud_t xen_make_pud(pudval_t pud)
442{

--- 88 unchanged lines hidden (view full) ---

531__visible p4d_t xen_make_p4d(p4dval_t p4d)
532{
533 p4d = pte_pfn_to_mfn(p4d);
534
535 return native_make_p4d(p4d);
536}
537PV_CALLEE_SAVE_REGS_THUNK(xen_make_p4d);
538#endif /* CONFIG_PGTABLE_LEVELS >= 5 */
574#endif /* CONFIG_X86_64 */
575
576static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
577 int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
578 bool last, unsigned long limit)
579{
580 int i, nr, flush = 0;
581
582 nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD;

--- 48 unchanged lines hidden (view full) ---

631 * (Yet another) pagetable walker. This one is intended for pinning a
632 * pagetable. This means that it walks a pagetable and calls the
633 * callback function on each page it finds making up the page table,
634 * at every level. It walks the entire pagetable, but it only bothers
635 * pinning pte pages which are below limit. In the normal case this
636 * will be STACK_TOP_MAX, but at boot we need to pin up to
637 * FIXADDR_TOP.
638 *
539
540static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
541 int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
542 bool last, unsigned long limit)
543{
544 int i, nr, flush = 0;
545
546 nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD;

--- 48 unchanged lines hidden (view full) ---

595 * (Yet another) pagetable walker. This one is intended for pinning a
596 * pagetable. This means that it walks a pagetable and calls the
597 * callback function on each page it finds making up the page table,
598 * at every level. It walks the entire pagetable, but it only bothers
599 * pinning pte pages which are below limit. In the normal case this
600 * will be STACK_TOP_MAX, but at boot we need to pin up to
601 * FIXADDR_TOP.
602 *
639 * For 32-bit the important bit is that we don't pin beyond there,
640 * because then we start getting into Xen's ptes.
641 *
642 * For 64-bit, we must skip the Xen hole in the middle of the address
643 * space, just after the big x86-64 virtual hole.
603 * We must skip the Xen hole in the middle of the address space, just after
604 * the big x86-64 virtual hole.
644 */
645static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
646 int (*func)(struct mm_struct *mm, struct page *,
647 enum pt_level),
648 unsigned long limit)
649{
650 int i, nr, flush = 0;
651 unsigned hole_low = 0, hole_high = 0;
652
653 /* The limit is the last byte to be touched */
654 limit--;
655 BUG_ON(limit >= FIXADDR_TOP);
656
605 */
606static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
607 int (*func)(struct mm_struct *mm, struct page *,
608 enum pt_level),
609 unsigned long limit)
610{
611 int i, nr, flush = 0;
612 unsigned hole_low = 0, hole_high = 0;
613
614 /* The limit is the last byte to be touched */
615 limit--;
616 BUG_ON(limit >= FIXADDR_TOP);
617
657#ifdef CONFIG_X86_64
658 /*
659 * 64-bit has a great big hole in the middle of the address
660 * space, which contains the Xen mappings.
661 */
662 hole_low = pgd_index(GUARD_HOLE_BASE_ADDR);
663 hole_high = pgd_index(GUARD_HOLE_END_ADDR);
618 /*
619 * 64-bit has a great big hole in the middle of the address
620 * space, which contains the Xen mappings.
621 */
622 hole_low = pgd_index(GUARD_HOLE_BASE_ADDR);
623 hole_high = pgd_index(GUARD_HOLE_END_ADDR);
664#endif
665
666 nr = pgd_index(limit) + 1;
667 for (i = 0; i < nr; i++) {
668 p4d_t *p4d;
669
670 if (i >= hole_low && i < hole_high)
671 continue;
672

--- 109 unchanged lines hidden (view full) ---

782 return flush;
783}
784
785/* This is called just after a mm has been created, but it has not
786 been used yet. We need to make sure that its pagetable is all
787 read-only, and can be pinned. */
788static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
789{
624
625 nr = pgd_index(limit) + 1;
626 for (i = 0; i < nr; i++) {
627 p4d_t *p4d;
628
629 if (i >= hole_low && i < hole_high)
630 continue;
631

--- 109 unchanged lines hidden (view full) ---

741 return flush;
742}
743
744/* This is called just after a mm has been created, but it has not
745 been used yet. We need to make sure that its pagetable is all
746 read-only, and can be pinned. */
747static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
748{
749 pgd_t *user_pgd = xen_get_user_pgd(pgd);
750
790 trace_xen_mmu_pgd_pin(mm, pgd);
791
792 xen_mc_batch();
793
794 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
795 /* re-enable interrupts for flushing */
796 xen_mc_issue(0);
797
798 kmap_flush_unused();
799
800 xen_mc_batch();
801 }
802
751 trace_xen_mmu_pgd_pin(mm, pgd);
752
753 xen_mc_batch();
754
755 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
756 /* re-enable interrupts for flushing */
757 xen_mc_issue(0);
758
759 kmap_flush_unused();
760
761 xen_mc_batch();
762 }
763
803#ifdef CONFIG_X86_64
804 {
805 pgd_t *user_pgd = xen_get_user_pgd(pgd);
764 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
806
765
807 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
808
809 if (user_pgd) {
810 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
811 xen_do_pin(MMUEXT_PIN_L4_TABLE,
812 PFN_DOWN(__pa(user_pgd)));
813 }
766 if (user_pgd) {
767 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
768 xen_do_pin(MMUEXT_PIN_L4_TABLE,
769 PFN_DOWN(__pa(user_pgd)));
814 }
770 }
815#else /* CONFIG_X86_32 */
816#ifdef CONFIG_X86_PAE
817 /* Need to make sure unshared kernel PMD is pinnable */
818 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
819 PT_PMD);
820#endif
821 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
822#endif /* CONFIG_X86_64 */
771
823 xen_mc_issue(0);
824}
825
826static void xen_pgd_pin(struct mm_struct *mm)
827{
828 __xen_pgd_pin(mm, mm->pgd);
829}
830

--- 34 unchanged lines hidden (view full) ---

865 * The init_mm pagetable is really pinned as soon as its created, but
866 * that's before we have page structures to store the bits. So do all
867 * the book-keeping now once struct pages for allocated pages are
868 * initialized. This happens only after memblock_free_all() is called.
869 */
870static void __init xen_after_bootmem(void)
871{
872 static_branch_enable(&xen_struct_pages_ready);
772 xen_mc_issue(0);
773}
774
775static void xen_pgd_pin(struct mm_struct *mm)
776{
777 __xen_pgd_pin(mm, mm->pgd);
778}
779

--- 34 unchanged lines hidden (view full) ---

814 * The init_mm pagetable is really pinned as soon as its created, but
815 * that's before we have page structures to store the bits. So do all
816 * the book-keeping now once struct pages for allocated pages are
817 * initialized. This happens only after memblock_free_all() is called.
818 */
819static void __init xen_after_bootmem(void)
820{
821 static_branch_enable(&xen_struct_pages_ready);
873#ifdef CONFIG_X86_64
874 SetPagePinned(virt_to_page(level3_user_vsyscall));
822 SetPagePinned(virt_to_page(level3_user_vsyscall));
875#endif
876 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
877}
878
879static int xen_unpin_page(struct mm_struct *mm, struct page *page,
880 enum pt_level level)
881{
882 unsigned pgfl = TestClearPagePinned(page);
883

--- 30 unchanged lines hidden (view full) ---

914 }
915
916 return 0; /* never need to flush on unpin */
917}
918
919/* Release a pagetables pages back as normal RW */
920static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
921{
823 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
824}
825
826static int xen_unpin_page(struct mm_struct *mm, struct page *page,
827 enum pt_level level)
828{
829 unsigned pgfl = TestClearPagePinned(page);
830

--- 30 unchanged lines hidden (view full) ---

861 }
862
863 return 0; /* never need to flush on unpin */
864}
865
866/* Release a pagetables pages back as normal RW */
867static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
868{
869 pgd_t *user_pgd = xen_get_user_pgd(pgd);
870
922 trace_xen_mmu_pgd_unpin(mm, pgd);
923
924 xen_mc_batch();
925
926 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
927
871 trace_xen_mmu_pgd_unpin(mm, pgd);
872
873 xen_mc_batch();
874
875 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
876
928#ifdef CONFIG_X86_64
929 {
930 pgd_t *user_pgd = xen_get_user_pgd(pgd);
931
932 if (user_pgd) {
933 xen_do_pin(MMUEXT_UNPIN_TABLE,
934 PFN_DOWN(__pa(user_pgd)));
935 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
936 }
877 if (user_pgd) {
878 xen_do_pin(MMUEXT_UNPIN_TABLE,
879 PFN_DOWN(__pa(user_pgd)));
880 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
937 }
881 }
938#endif
939
882
940#ifdef CONFIG_X86_PAE
941 /* Need to make sure unshared kernel PMD is unpinned */
942 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
943 PT_PMD);
944#endif
945
946 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
947
948 xen_mc_issue(0);
949}
950
951static void xen_pgd_unpin(struct mm_struct *mm)
952{
953 __xen_pgd_unpin(mm, mm->pgd);

--- 130 unchanged lines hidden (view full) ---

1084 struct mmuext_op op;
1085
1086 op.cmd = cmd;
1087 op.arg1.mfn = pfn_to_mfn(pfn);
1088 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1089 BUG();
1090}
1091
883 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
884
885 xen_mc_issue(0);
886}
887
888static void xen_pgd_unpin(struct mm_struct *mm)
889{
890 __xen_pgd_unpin(mm, mm->pgd);

--- 130 unchanged lines hidden (view full) ---

1021 struct mmuext_op op;
1022
1023 op.cmd = cmd;
1024 op.arg1.mfn = pfn_to_mfn(pfn);
1025 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1026 BUG();
1027}
1028
1092#ifdef CONFIG_X86_64
1093static void __init xen_cleanhighmap(unsigned long vaddr,
1094 unsigned long vaddr_end)
1095{
1096 unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1097 pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1098
1099 /* NOTE: The loop is more greedy than the cleanup_highmap variant.
1100 * We include the PMD passed in on _both_ boundaries. */

--- 167 unchanged lines hidden (view full) ---

1268 * to use it - they are going to crash. The xen_start_info has been
1269 * taken care of already in xen_setup_kernel_pagetable. */
1270 addr = xen_start_info->pt_base;
1271 size = xen_start_info->nr_pt_frames * PAGE_SIZE;
1272
1273 xen_cleanhighmap(addr, roundup(addr + size, PMD_SIZE * 2));
1274 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1275}
1029static void __init xen_cleanhighmap(unsigned long vaddr,
1030 unsigned long vaddr_end)
1031{
1032 unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1033 pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1034
1035 /* NOTE: The loop is more greedy than the cleanup_highmap variant.
1036 * We include the PMD passed in on _both_ boundaries. */

--- 167 unchanged lines hidden (view full) ---

1204 * to use it - they are going to crash. The xen_start_info has been
1205 * taken care of already in xen_setup_kernel_pagetable. */
1206 addr = xen_start_info->pt_base;
1207 size = xen_start_info->nr_pt_frames * PAGE_SIZE;
1208
1209 xen_cleanhighmap(addr, roundup(addr + size, PMD_SIZE * 2));
1210 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1211}
1276#endif
1277
1278static void __init xen_pagetable_p2m_setup(void)
1279{
1280 xen_vmalloc_p2m_tree();
1281
1212
1213static void __init xen_pagetable_p2m_setup(void)
1214{
1215 xen_vmalloc_p2m_tree();
1216
1282#ifdef CONFIG_X86_64
1283 xen_pagetable_p2m_free();
1284
1285 xen_pagetable_cleanhighmap();
1217 xen_pagetable_p2m_free();
1218
1219 xen_pagetable_cleanhighmap();
1286#endif
1220
1287 /* And revector! Bye bye old array */
1288 xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
1289}
1290
1291static void __init xen_pagetable_init(void)
1292{
1293 paging_init();
1294 xen_post_allocator_init();

--- 120 unchanged lines hidden (view full) ---

1415
1416 /* Update xen_current_cr3 once the batch has actually
1417 been submitted. */
1418 xen_mc_callback(set_current_cr3, (void *)cr3);
1419 }
1420}
1421static void xen_write_cr3(unsigned long cr3)
1422{
1221 /* And revector! Bye bye old array */
1222 xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
1223}
1224
1225static void __init xen_pagetable_init(void)
1226{
1227 paging_init();
1228 xen_post_allocator_init();

--- 120 unchanged lines hidden (view full) ---

1349
1350 /* Update xen_current_cr3 once the batch has actually
1351 been submitted. */
1352 xen_mc_callback(set_current_cr3, (void *)cr3);
1353 }
1354}
1355static void xen_write_cr3(unsigned long cr3)
1356{
1357 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1358
1423 BUG_ON(preemptible());
1424
1425 xen_mc_batch(); /* disables interrupts */
1426
1427 /* Update while interrupts are disabled, so its atomic with
1428 respect to ipis */
1429 this_cpu_write(xen_cr3, cr3);
1430
1431 __xen_write_cr3(true, cr3);
1432
1359 BUG_ON(preemptible());
1360
1361 xen_mc_batch(); /* disables interrupts */
1362
1363 /* Update while interrupts are disabled, so its atomic with
1364 respect to ipis */
1365 this_cpu_write(xen_cr3, cr3);
1366
1367 __xen_write_cr3(true, cr3);
1368
1433#ifdef CONFIG_X86_64
1434 {
1435 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1436 if (user_pgd)
1437 __xen_write_cr3(false, __pa(user_pgd));
1438 else
1439 __xen_write_cr3(false, 0);
1440 }
1441#endif
1369 if (user_pgd)
1370 __xen_write_cr3(false, __pa(user_pgd));
1371 else
1372 __xen_write_cr3(false, 0);
1442
1443 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1444}
1445
1373
1374 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1375}
1376
1446#ifdef CONFIG_X86_64
1447/*
1448 * At the start of the day - when Xen launches a guest, it has already
1449 * built pagetables for the guest. We diligently look over them
1450 * in xen_setup_kernel_pagetable and graft as appropriate them in the
1451 * init_top_pgt and its friends. Then when we are happy we load
1452 * the new init_top_pgt - and continue on.
1453 *
1454 * The generic code starts (start_kernel) and 'init_mem_mapping' sets

--- 18 unchanged lines hidden (view full) ---

1473 /* Update while interrupts are disabled, so its atomic with
1474 respect to ipis */
1475 this_cpu_write(xen_cr3, cr3);
1476
1477 __xen_write_cr3(true, cr3);
1478
1479 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1480}
1377/*
1378 * At the start of the day - when Xen launches a guest, it has already
1379 * built pagetables for the guest. We diligently look over them
1380 * in xen_setup_kernel_pagetable and graft as appropriate them in the
1381 * init_top_pgt and its friends. Then when we are happy we load
1382 * the new init_top_pgt - and continue on.
1383 *
1384 * The generic code starts (start_kernel) and 'init_mem_mapping' sets

--- 18 unchanged lines hidden (view full) ---

1403 /* Update while interrupts are disabled, so its atomic with
1404 respect to ipis */
1405 this_cpu_write(xen_cr3, cr3);
1406
1407 __xen_write_cr3(true, cr3);
1408
1409 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1410}
1481#endif
1482
1483static int xen_pgd_alloc(struct mm_struct *mm)
1484{
1485 pgd_t *pgd = mm->pgd;
1411
1412static int xen_pgd_alloc(struct mm_struct *mm)
1413{
1414 pgd_t *pgd = mm->pgd;
1486 int ret = 0;
1415 struct page *page = virt_to_page(pgd);
1416 pgd_t *user_pgd;
1417 int ret = -ENOMEM;
1487
1488 BUG_ON(PagePinned(virt_to_page(pgd)));
1418
1419 BUG_ON(PagePinned(virt_to_page(pgd)));
1420 BUG_ON(page->private != 0);
1489
1421
1490#ifdef CONFIG_X86_64
1491 {
1492 struct page *page = virt_to_page(pgd);
1493 pgd_t *user_pgd;
1422 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1423 page->private = (unsigned long)user_pgd;
1494
1424
1495 BUG_ON(page->private != 0);
1496
1497 ret = -ENOMEM;
1498
1499 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1500 page->private = (unsigned long)user_pgd;
1501
1502 if (user_pgd != NULL) {
1425 if (user_pgd != NULL) {
1503#ifdef CONFIG_X86_VSYSCALL_EMULATION
1426#ifdef CONFIG_X86_VSYSCALL_EMULATION
1504 user_pgd[pgd_index(VSYSCALL_ADDR)] =
1505 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1427 user_pgd[pgd_index(VSYSCALL_ADDR)] =
1428 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1506#endif
1429#endif
1507 ret = 0;
1508 }
1509
1510 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1430 ret = 0;
1511 }
1431 }
1512#endif
1432
1433 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1434
1513 return ret;
1514}
1515
1516static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1517{
1435 return ret;
1436}
1437
1438static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1439{
1518#ifdef CONFIG_X86_64
1519 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1520
1521 if (user_pgd)
1522 free_page((unsigned long)user_pgd);
1440 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1441
1442 if (user_pgd)
1443 free_page((unsigned long)user_pgd);
1523#endif
1524}
1525
1526/*
1527 * Init-time set_pte while constructing initial pagetables, which
1528 * doesn't allow RO page table pages to be remapped RW.
1529 *
1530 * If there is no MFN for this PFN then this page is initially
1531 * ballooned out so clear the PTE (as in decrease_reservation() in
1532 * drivers/xen/balloon.c).
1533 *
1534 * Many of these PTE updates are done on unpinned and writable pages
1535 * and doing a hypercall for these is unnecessary and expensive. At
1536 * this point it is not possible to tell if a page is pinned or not,
1537 * so always write the PTE directly and rely on Xen trapping and
1538 * emulating any updates as necessary.
1539 */
1540__visible pte_t xen_make_pte_init(pteval_t pte)
1541{
1444}
1445
1446/*
1447 * Init-time set_pte while constructing initial pagetables, which
1448 * doesn't allow RO page table pages to be remapped RW.
1449 *
1450 * If there is no MFN for this PFN then this page is initially
1451 * ballooned out so clear the PTE (as in decrease_reservation() in
1452 * drivers/xen/balloon.c).
1453 *
1454 * Many of these PTE updates are done on unpinned and writable pages
1455 * and doing a hypercall for these is unnecessary and expensive. At
1456 * this point it is not possible to tell if a page is pinned or not,
1457 * so always write the PTE directly and rely on Xen trapping and
1458 * emulating any updates as necessary.
1459 */
1460__visible pte_t xen_make_pte_init(pteval_t pte)
1461{
1542#ifdef CONFIG_X86_64
1543 unsigned long pfn;
1544
1545 /*
1546 * Pages belonging to the initial p2m list mapped outside the default
1547 * address range must be mapped read-only. This region contains the
1548 * page tables for mapping the p2m list, too, and page tables MUST be
1549 * mapped read-only.
1550 */
1551 pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
1552 if (xen_start_info->mfn_list < __START_KERNEL_map &&
1553 pfn >= xen_start_info->first_p2m_pfn &&
1554 pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
1555 pte &= ~_PAGE_RW;
1462 unsigned long pfn;
1463
1464 /*
1465 * Pages belonging to the initial p2m list mapped outside the default
1466 * address range must be mapped read-only. This region contains the
1467 * page tables for mapping the p2m list, too, and page tables MUST be
1468 * mapped read-only.
1469 */
1470 pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
1471 if (xen_start_info->mfn_list < __START_KERNEL_map &&
1472 pfn >= xen_start_info->first_p2m_pfn &&
1473 pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
1474 pte &= ~_PAGE_RW;
1556#endif
1475
1557 pte = pte_pfn_to_mfn(pte);
1558 return native_make_pte(pte);
1559}
1560PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
1561
1562static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1563{
1476 pte = pte_pfn_to_mfn(pte);
1477 return native_make_pte(pte);
1478}
1479PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
1480
1481static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1482{
1564#ifdef CONFIG_X86_32
1565 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1566 if (pte_mfn(pte) != INVALID_P2M_ENTRY
1567 && pte_val_ma(*ptep) & _PAGE_PRESENT)
1568 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1569 pte_val_ma(pte));
1570#endif
1571 __xen_set_pte(ptep, pte);
1572}
1573
1574/* Early in boot, while setting up the initial pagetable, assume
1575 everything is pinned. */
1576static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1577{
1578#ifdef CONFIG_FLATMEM

--- 118 unchanged lines hidden (view full) ---

1697 xen_release_ptpage(pfn, PT_PTE);
1698}
1699
1700static void xen_release_pmd(unsigned long pfn)
1701{
1702 xen_release_ptpage(pfn, PT_PMD);
1703}
1704
1483 __xen_set_pte(ptep, pte);
1484}
1485
1486/* Early in boot, while setting up the initial pagetable, assume
1487 everything is pinned. */
1488static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1489{
1490#ifdef CONFIG_FLATMEM

--- 118 unchanged lines hidden (view full) ---

1609 xen_release_ptpage(pfn, PT_PTE);
1610}
1611
1612static void xen_release_pmd(unsigned long pfn)
1613{
1614 xen_release_ptpage(pfn, PT_PMD);
1615}
1616
1705#ifdef CONFIG_X86_64
1706static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1707{
1708 xen_alloc_ptpage(mm, pfn, PT_PUD);
1709}
1710
1711static void xen_release_pud(unsigned long pfn)
1712{
1713 xen_release_ptpage(pfn, PT_PUD);
1714}
1617static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1618{
1619 xen_alloc_ptpage(mm, pfn, PT_PUD);
1620}
1621
1622static void xen_release_pud(unsigned long pfn)
1623{
1624 xen_release_ptpage(pfn, PT_PUD);
1625}
1715#endif
1716
1626
1717void __init xen_reserve_top(void)
1718{
1719#ifdef CONFIG_X86_32
1720 unsigned long top = HYPERVISOR_VIRT_START;
1721 struct xen_platform_parameters pp;
1722
1723 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1724 top = pp.virt_start;
1725
1726 reserve_top_address(-top);
1727#endif /* CONFIG_X86_32 */
1728}
1729
1730/*
1731 * Like __va(), but returns address in the kernel mapping (which is
1732 * all we have until the physical memory mapping has been set up.
1733 */
1734static void * __init __ka(phys_addr_t paddr)
1735{
1627/*
1628 * Like __va(), but returns address in the kernel mapping (which is
1629 * all we have until the physical memory mapping has been set up.
1630 */
1631static void * __init __ka(phys_addr_t paddr)
1632{
1736#ifdef CONFIG_X86_64
1737 return (void *)(paddr + __START_KERNEL_map);
1633 return (void *)(paddr + __START_KERNEL_map);
1738#else
1739 return __va(paddr);
1740#endif
1741}
1742
1743/* Convert a machine address to physical address */
1744static unsigned long __init m2p(phys_addr_t maddr)
1745{
1746 phys_addr_t paddr;
1747
1748 maddr &= XEN_PTE_MFN_MASK;

--- 17 unchanged lines hidden (view full) ---

1766
1767 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
1768 BUG();
1769}
1770static void __init set_page_prot(void *addr, pgprot_t prot)
1771{
1772 return set_page_prot_flags(addr, prot, UVMF_NONE);
1773}
1634}
1635
1636/* Convert a machine address to physical address */
1637static unsigned long __init m2p(phys_addr_t maddr)
1638{
1639 phys_addr_t paddr;
1640
1641 maddr &= XEN_PTE_MFN_MASK;

--- 17 unchanged lines hidden (view full) ---

1659
1660 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
1661 BUG();
1662}
1663static void __init set_page_prot(void *addr, pgprot_t prot)
1664{
1665 return set_page_prot_flags(addr, prot, UVMF_NONE);
1666}
1774#ifdef CONFIG_X86_32
1775static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1776{
1777 unsigned pmdidx, pteidx;
1778 unsigned ident_pte;
1779 unsigned long pfn;
1780
1667
1781 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1782 PAGE_SIZE);
1783
1784 ident_pte = 0;
1785 pfn = 0;
1786 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1787 pte_t *pte_page;
1788
1789 /* Reuse or allocate a page of ptes */
1790 if (pmd_present(pmd[pmdidx]))
1791 pte_page = m2v(pmd[pmdidx].pmd);
1792 else {
1793 /* Check for free pte pages */
1794 if (ident_pte == LEVEL1_IDENT_ENTRIES)
1795 break;
1796
1797 pte_page = &level1_ident_pgt[ident_pte];
1798 ident_pte += PTRS_PER_PTE;
1799
1800 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1801 }
1802
1803 /* Install mappings */
1804 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1805 pte_t pte;
1806
1807 if (pfn > max_pfn_mapped)
1808 max_pfn_mapped = pfn;
1809
1810 if (!pte_none(pte_page[pteidx]))
1811 continue;
1812
1813 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1814 pte_page[pteidx] = pte;
1815 }
1816 }
1817
1818 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1819 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1820
1821 set_page_prot(pmd, PAGE_KERNEL_RO);
1822}
1823#endif
1824void __init xen_setup_machphys_mapping(void)
1825{
1826 struct xen_machphys_mapping mapping;
1827
1828 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1829 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
1830 machine_to_phys_nr = mapping.max_mfn + 1;
1831 } else {
1832 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
1833 }
1668void __init xen_setup_machphys_mapping(void)
1669{
1670 struct xen_machphys_mapping mapping;
1671
1672 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1673 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
1674 machine_to_phys_nr = mapping.max_mfn + 1;
1675 } else {
1676 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
1677 }
1834#ifdef CONFIG_X86_32
1835 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1836 < machine_to_phys_mapping);
1837#endif
1838}
1839
1678}
1679
1840#ifdef CONFIG_X86_64
1841static void __init convert_pfn_mfn(void *v)
1842{
1843 pte_t *pte = v;
1844 int i;
1845
1846 /* All levels are converted the same way, so just treat them
1847 as ptes. */
1848 for (i = 0; i < PTRS_PER_PTE; i++)

--- 314 unchanged lines hidden (view full) ---

2163 pfn++;
2164 }
2165
2166 xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
2167 xen_start_info->first_p2m_pfn = PFN_DOWN(new_area);
2168 xen_start_info->nr_p2m_frames = n_frames;
2169}
2170
1680static void __init convert_pfn_mfn(void *v)
1681{
1682 pte_t *pte = v;
1683 int i;
1684
1685 /* All levels are converted the same way, so just treat them
1686 as ptes. */
1687 for (i = 0; i < PTRS_PER_PTE; i++)

--- 314 unchanged lines hidden (view full) ---

2002 pfn++;
2003 }
2004
2005 xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
2006 xen_start_info->first_p2m_pfn = PFN_DOWN(new_area);
2007 xen_start_info->nr_p2m_frames = n_frames;
2008}
2009
2171#else /* !CONFIG_X86_64 */
2172static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
2173static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
2174RESERVE_BRK(fixup_kernel_pmd, PAGE_SIZE);
2175RESERVE_BRK(fixup_kernel_pte, PAGE_SIZE);
2176
2177static void __init xen_write_cr3_init(unsigned long cr3)
2178{
2179 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
2180
2181 BUG_ON(read_cr3_pa() != __pa(initial_page_table));
2182 BUG_ON(cr3 != __pa(swapper_pg_dir));
2183
2184 /*
2185 * We are switching to swapper_pg_dir for the first time (from
2186 * initial_page_table) and therefore need to mark that page
2187 * read-only and then pin it.
2188 *
2189 * Xen disallows sharing of kernel PMDs for PAE
2190 * guests. Therefore we must copy the kernel PMD from
2191 * initial_page_table into a new kernel PMD to be used in
2192 * swapper_pg_dir.
2193 */
2194 swapper_kernel_pmd =
2195 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2196 copy_page(swapper_kernel_pmd, initial_kernel_pmd);
2197 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
2198 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
2199 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
2200
2201 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
2202 xen_write_cr3(cr3);
2203 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
2204
2205 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
2206 PFN_DOWN(__pa(initial_page_table)));
2207 set_page_prot(initial_page_table, PAGE_KERNEL);
2208 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
2209
2210 pv_ops.mmu.write_cr3 = &xen_write_cr3;
2211}
2212
2213/*
2214 * For 32 bit domains xen_start_info->pt_base is the pgd address which might be
2215 * not the first page table in the page table pool.
2216 * Iterate through the initial page tables to find the real page table base.
2217 */
2218static phys_addr_t __init xen_find_pt_base(pmd_t *pmd)
2219{
2220 phys_addr_t pt_base, paddr;
2221 unsigned pmdidx;
2222
2223 pt_base = min(__pa(xen_start_info->pt_base), __pa(pmd));
2224
2225 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++)
2226 if (pmd_present(pmd[pmdidx]) && !pmd_large(pmd[pmdidx])) {
2227 paddr = m2p(pmd[pmdidx].pmd);
2228 pt_base = min(pt_base, paddr);
2229 }
2230
2231 return pt_base;
2232}
2233
2234void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
2235{
2236 pmd_t *kernel_pmd;
2237
2238 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
2239
2240 xen_pt_base = xen_find_pt_base(kernel_pmd);
2241 xen_pt_size = xen_start_info->nr_pt_frames * PAGE_SIZE;
2242
2243 initial_kernel_pmd =
2244 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2245
2246 max_pfn_mapped = PFN_DOWN(xen_pt_base + xen_pt_size + 512 * 1024);
2247
2248 copy_page(initial_kernel_pmd, kernel_pmd);
2249
2250 xen_map_identity_early(initial_kernel_pmd, max_pfn);
2251
2252 copy_page(initial_page_table, pgd);
2253 initial_page_table[KERNEL_PGD_BOUNDARY] =
2254 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
2255
2256 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
2257 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
2258 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
2259
2260 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
2261
2262 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
2263 PFN_DOWN(__pa(initial_page_table)));
2264 xen_write_cr3(__pa(initial_page_table));
2265
2266 memblock_reserve(xen_pt_base, xen_pt_size);
2267}
2268#endif /* CONFIG_X86_64 */
2269
2270void __init xen_reserve_special_pages(void)
2271{
2272 phys_addr_t paddr;
2273
2274 memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
2275 if (xen_start_info->store_mfn) {
2276 paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->store_mfn));
2277 memblock_reserve(paddr, PAGE_SIZE);

--- 17 unchanged lines hidden (view full) ---

2295static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
2296{
2297 pte_t pte;
2298
2299 phys >>= PAGE_SHIFT;
2300
2301 switch (idx) {
2302 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
2010void __init xen_reserve_special_pages(void)
2011{
2012 phys_addr_t paddr;
2013
2014 memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
2015 if (xen_start_info->store_mfn) {
2016 paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->store_mfn));
2017 memblock_reserve(paddr, PAGE_SIZE);

--- 17 unchanged lines hidden (view full) ---

2035static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
2036{
2037 pte_t pte;
2038
2039 phys >>= PAGE_SHIFT;
2040
2041 switch (idx) {
2042 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
2303#ifdef CONFIG_X86_32
2304 case FIX_WP_TEST:
2305# ifdef CONFIG_HIGHMEM
2306 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
2307# endif
2308#elif defined(CONFIG_X86_VSYSCALL_EMULATION)
2043#ifdef CONFIG_X86_VSYSCALL_EMULATION
2309 case VSYSCALL_PAGE:
2310#endif
2311 /* All local page mappings */
2312 pte = pfn_pte(phys, prot);
2313 break;
2314
2315#ifdef CONFIG_X86_LOCAL_APIC
2316 case FIX_APIC_BASE: /* maps dummy local APIC */

--- 35 unchanged lines hidden (view full) ---

2352#endif
2353}
2354
2355static void __init xen_post_allocator_init(void)
2356{
2357 pv_ops.mmu.set_pte = xen_set_pte;
2358 pv_ops.mmu.set_pmd = xen_set_pmd;
2359 pv_ops.mmu.set_pud = xen_set_pud;
2044 case VSYSCALL_PAGE:
2045#endif
2046 /* All local page mappings */
2047 pte = pfn_pte(phys, prot);
2048 break;
2049
2050#ifdef CONFIG_X86_LOCAL_APIC
2051 case FIX_APIC_BASE: /* maps dummy local APIC */

--- 35 unchanged lines hidden (view full) ---

2087#endif
2088}
2089
2090static void __init xen_post_allocator_init(void)
2091{
2092 pv_ops.mmu.set_pte = xen_set_pte;
2093 pv_ops.mmu.set_pmd = xen_set_pmd;
2094 pv_ops.mmu.set_pud = xen_set_pud;
2360#ifdef CONFIG_X86_64
2361 pv_ops.mmu.set_p4d = xen_set_p4d;
2095 pv_ops.mmu.set_p4d = xen_set_p4d;
2362#endif
2363
2364 /* This will work as long as patching hasn't happened yet
2365 (which it hasn't) */
2366 pv_ops.mmu.alloc_pte = xen_alloc_pte;
2367 pv_ops.mmu.alloc_pmd = xen_alloc_pmd;
2368 pv_ops.mmu.release_pte = xen_release_pte;
2369 pv_ops.mmu.release_pmd = xen_release_pmd;
2096
2097 /* This will work as long as patching hasn't happened yet
2098 (which it hasn't) */
2099 pv_ops.mmu.alloc_pte = xen_alloc_pte;
2100 pv_ops.mmu.alloc_pmd = xen_alloc_pmd;
2101 pv_ops.mmu.release_pte = xen_release_pte;
2102 pv_ops.mmu.release_pmd = xen_release_pmd;
2370#ifdef CONFIG_X86_64
2371 pv_ops.mmu.alloc_pud = xen_alloc_pud;
2372 pv_ops.mmu.release_pud = xen_release_pud;
2103 pv_ops.mmu.alloc_pud = xen_alloc_pud;
2104 pv_ops.mmu.release_pud = xen_release_pud;
2373#endif
2374 pv_ops.mmu.make_pte = PV_CALLEE_SAVE(xen_make_pte);
2375
2105 pv_ops.mmu.make_pte = PV_CALLEE_SAVE(xen_make_pte);
2106
2376#ifdef CONFIG_X86_64
2377 pv_ops.mmu.write_cr3 = &xen_write_cr3;
2107 pv_ops.mmu.write_cr3 = &xen_write_cr3;
2378#endif
2379}
2380
2381static void xen_leave_lazy_mmu(void)
2382{
2383 preempt_disable();
2384 xen_mc_flush();
2385 paravirt_leave_lazy_mmu();
2386 preempt_enable();

--- 28 unchanged lines hidden (view full) ---

2415 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2416
2417 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2418 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
2419
2420 .make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
2421 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
2422
2108}
2109
2110static void xen_leave_lazy_mmu(void)
2111{
2112 preempt_disable();
2113 xen_mc_flush();
2114 paravirt_leave_lazy_mmu();
2115 preempt_enable();

--- 28 unchanged lines hidden (view full) ---

2144 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2145
2146 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2147 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
2148
2149 .make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
2150 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
2151
2423#ifdef CONFIG_X86_PAE
2424 .set_pte_atomic = xen_set_pte_atomic,
2425 .pte_clear = xen_pte_clear,
2426 .pmd_clear = xen_pmd_clear,
2427#endif /* CONFIG_X86_PAE */
2428 .set_pud = xen_set_pud_hyper,
2429
2430 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2431 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
2432
2152 .set_pud = xen_set_pud_hyper,
2153
2154 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2155 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
2156
2433#ifdef CONFIG_X86_64
2434 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2435 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
2436 .set_p4d = xen_set_p4d_hyper,
2437
2438 .alloc_pud = xen_alloc_pmd_init,
2439 .release_pud = xen_release_pmd_init,
2440
2441#if CONFIG_PGTABLE_LEVELS >= 5
2442 .p4d_val = PV_CALLEE_SAVE(xen_p4d_val),
2443 .make_p4d = PV_CALLEE_SAVE(xen_make_p4d),
2444#endif
2157 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2158 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
2159 .set_p4d = xen_set_p4d_hyper,
2160
2161 .alloc_pud = xen_alloc_pmd_init,
2162 .release_pud = xen_release_pmd_init,
2163
2164#if CONFIG_PGTABLE_LEVELS >= 5
2165 .p4d_val = PV_CALLEE_SAVE(xen_p4d_val),
2166 .make_p4d = PV_CALLEE_SAVE(xen_make_p4d),
2167#endif
2445#endif /* CONFIG_X86_64 */
2446
2447 .activate_mm = xen_activate_mm,
2448 .dup_mmap = xen_dup_mmap,
2449 .exit_mmap = xen_exit_mmap,
2450
2451 .lazy_mode = {
2452 .enter = paravirt_enter_lazy_mmu,
2453 .leave = xen_leave_lazy_mmu,

--- 349 unchanged lines hidden ---
2168
2169 .activate_mm = xen_activate_mm,
2170 .dup_mmap = xen_dup_mmap,
2171 .exit_mmap = xen_exit_mmap,
2172
2173 .lazy_mode = {
2174 .enter = paravirt_enter_lazy_mmu,
2175 .leave = xen_leave_lazy_mmu,

--- 349 unchanged lines hidden ---