xref: /linux/arch/x86/xen/mmu_pv.c (revision 9a379e77033f02c4a071891afdf0f0a01eff8ccb)
1 /*
2  * Xen mmu operations
3  *
4  * This file contains the various mmu fetch and update operations.
5  * The most important job they must perform is the mapping between the
6  * domain's pfn and the overall machine mfns.
7  *
8  * Xen allows guests to directly update the pagetable, in a controlled
9  * fashion.  In other words, the guest modifies the same pagetable
10  * that the CPU actually uses, which eliminates the overhead of having
11  * a separate shadow pagetable.
12  *
13  * In order to allow this, it falls on the guest domain to map its
14  * notion of a "physical" pfn - which is just a domain-local linear
15  * address - into a real "machine address" which the CPU's MMU can
16  * use.
17  *
18  * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19  * inserted directly into the pagetable.  When creating a new
20  * pte/pmd/pgd, it converts the passed pfn into an mfn.  Conversely,
21  * when reading the content back with __(pgd|pmd|pte)_val, it converts
22  * the mfn back into a pfn.
23  *
24  * The other constraint is that all pages which make up a pagetable
25  * must be mapped read-only in the guest.  This prevents uncontrolled
26  * guest updates to the pagetable.  Xen strictly enforces this, and
27  * will disallow any pagetable update which will end up mapping a
28  * pagetable page RW, and will disallow using any writable page as a
29  * pagetable.
30  *
31  * Naively, when loading %cr3 with the base of a new pagetable, Xen
32  * would need to validate the whole pagetable before going on.
33  * Naturally, this is quite slow.  The solution is to "pin" a
34  * pagetable, which enforces all the constraints on the pagetable even
35  * when it is not actively in use.  This menas that Xen can be assured
36  * that it is still valid when you do load it into %cr3, and doesn't
37  * need to revalidate it.
38  *
39  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40  */
41 #include <linux/sched/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/debugfs.h>
44 #include <linux/bug.h>
45 #include <linux/vmalloc.h>
46 #include <linux/export.h>
47 #include <linux/init.h>
48 #include <linux/gfp.h>
49 #include <linux/memblock.h>
50 #include <linux/seq_file.h>
51 #include <linux/crash_dump.h>
52 #ifdef CONFIG_KEXEC_CORE
53 #include <linux/kexec.h>
54 #endif
55 
56 #include <trace/events/xen.h>
57 
58 #include <asm/pgtable.h>
59 #include <asm/tlbflush.h>
60 #include <asm/fixmap.h>
61 #include <asm/mmu_context.h>
62 #include <asm/setup.h>
63 #include <asm/paravirt.h>
64 #include <asm/e820/api.h>
65 #include <asm/linkage.h>
66 #include <asm/page.h>
67 #include <asm/init.h>
68 #include <asm/pat.h>
69 #include <asm/smp.h>
70 
71 #include <asm/xen/hypercall.h>
72 #include <asm/xen/hypervisor.h>
73 
74 #include <xen/xen.h>
75 #include <xen/page.h>
76 #include <xen/interface/xen.h>
77 #include <xen/interface/hvm/hvm_op.h>
78 #include <xen/interface/version.h>
79 #include <xen/interface/memory.h>
80 #include <xen/hvc-console.h>
81 
82 #include "multicalls.h"
83 #include "mmu.h"
84 #include "debugfs.h"
85 
86 #ifdef CONFIG_X86_32
87 /*
88  * Identity map, in addition to plain kernel map.  This needs to be
89  * large enough to allocate page table pages to allocate the rest.
90  * Each page can map 2MB.
91  */
92 #define LEVEL1_IDENT_ENTRIES	(PTRS_PER_PTE * 4)
93 static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
94 #endif
95 #ifdef CONFIG_X86_64
96 /* l3 pud for userspace vsyscall mapping */
97 static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
98 #endif /* CONFIG_X86_64 */
99 
100 /*
101  * Note about cr3 (pagetable base) values:
102  *
103  * xen_cr3 contains the current logical cr3 value; it contains the
104  * last set cr3.  This may not be the current effective cr3, because
105  * its update may be being lazily deferred.  However, a vcpu looking
106  * at its own cr3 can use this value knowing that it everything will
107  * be self-consistent.
108  *
109  * xen_current_cr3 contains the actual vcpu cr3; it is set once the
110  * hypercall to set the vcpu cr3 is complete (so it may be a little
111  * out of date, but it will never be set early).  If one vcpu is
112  * looking at another vcpu's cr3 value, it should use this variable.
113  */
114 DEFINE_PER_CPU(unsigned long, xen_cr3);	 /* cr3 stored as physaddr */
115 DEFINE_PER_CPU(unsigned long, xen_current_cr3);	 /* actual vcpu cr3 */
116 
117 static phys_addr_t xen_pt_base, xen_pt_size __initdata;
118 
119 /*
120  * Just beyond the highest usermode address.  STACK_TOP_MAX has a
121  * redzone above it, so round it up to a PGD boundary.
122  */
123 #define USER_LIMIT	((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
124 
125 void make_lowmem_page_readonly(void *vaddr)
126 {
127 	pte_t *pte, ptev;
128 	unsigned long address = (unsigned long)vaddr;
129 	unsigned int level;
130 
131 	pte = lookup_address(address, &level);
132 	if (pte == NULL)
133 		return;		/* vaddr missing */
134 
135 	ptev = pte_wrprotect(*pte);
136 
137 	if (HYPERVISOR_update_va_mapping(address, ptev, 0))
138 		BUG();
139 }
140 
141 void make_lowmem_page_readwrite(void *vaddr)
142 {
143 	pte_t *pte, ptev;
144 	unsigned long address = (unsigned long)vaddr;
145 	unsigned int level;
146 
147 	pte = lookup_address(address, &level);
148 	if (pte == NULL)
149 		return;		/* vaddr missing */
150 
151 	ptev = pte_mkwrite(*pte);
152 
153 	if (HYPERVISOR_update_va_mapping(address, ptev, 0))
154 		BUG();
155 }
156 
157 
158 static bool xen_page_pinned(void *ptr)
159 {
160 	struct page *page = virt_to_page(ptr);
161 
162 	return PagePinned(page);
163 }
164 
165 static void xen_extend_mmu_update(const struct mmu_update *update)
166 {
167 	struct multicall_space mcs;
168 	struct mmu_update *u;
169 
170 	mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
171 
172 	if (mcs.mc != NULL) {
173 		mcs.mc->args[1]++;
174 	} else {
175 		mcs = __xen_mc_entry(sizeof(*u));
176 		MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
177 	}
178 
179 	u = mcs.args;
180 	*u = *update;
181 }
182 
183 static void xen_extend_mmuext_op(const struct mmuext_op *op)
184 {
185 	struct multicall_space mcs;
186 	struct mmuext_op *u;
187 
188 	mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
189 
190 	if (mcs.mc != NULL) {
191 		mcs.mc->args[1]++;
192 	} else {
193 		mcs = __xen_mc_entry(sizeof(*u));
194 		MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
195 	}
196 
197 	u = mcs.args;
198 	*u = *op;
199 }
200 
201 static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
202 {
203 	struct mmu_update u;
204 
205 	preempt_disable();
206 
207 	xen_mc_batch();
208 
209 	/* ptr may be ioremapped for 64-bit pagetable setup */
210 	u.ptr = arbitrary_virt_to_machine(ptr).maddr;
211 	u.val = pmd_val_ma(val);
212 	xen_extend_mmu_update(&u);
213 
214 	xen_mc_issue(PARAVIRT_LAZY_MMU);
215 
216 	preempt_enable();
217 }
218 
219 static void xen_set_pmd(pmd_t *ptr, pmd_t val)
220 {
221 	trace_xen_mmu_set_pmd(ptr, val);
222 
223 	/* If page is not pinned, we can just update the entry
224 	   directly */
225 	if (!xen_page_pinned(ptr)) {
226 		*ptr = val;
227 		return;
228 	}
229 
230 	xen_set_pmd_hyper(ptr, val);
231 }
232 
233 /*
234  * Associate a virtual page frame with a given physical page frame
235  * and protection flags for that frame.
236  */
237 void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
238 {
239 	set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
240 }
241 
242 static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
243 {
244 	struct mmu_update u;
245 
246 	if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
247 		return false;
248 
249 	xen_mc_batch();
250 
251 	u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
252 	u.val = pte_val_ma(pteval);
253 	xen_extend_mmu_update(&u);
254 
255 	xen_mc_issue(PARAVIRT_LAZY_MMU);
256 
257 	return true;
258 }
259 
260 static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
261 {
262 	if (!xen_batched_set_pte(ptep, pteval)) {
263 		/*
264 		 * Could call native_set_pte() here and trap and
265 		 * emulate the PTE write but with 32-bit guests this
266 		 * needs two traps (one for each of the two 32-bit
267 		 * words in the PTE) so do one hypercall directly
268 		 * instead.
269 		 */
270 		struct mmu_update u;
271 
272 		u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
273 		u.val = pte_val_ma(pteval);
274 		HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
275 	}
276 }
277 
278 static void xen_set_pte(pte_t *ptep, pte_t pteval)
279 {
280 	trace_xen_mmu_set_pte(ptep, pteval);
281 	__xen_set_pte(ptep, pteval);
282 }
283 
284 static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
285 		    pte_t *ptep, pte_t pteval)
286 {
287 	trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
288 	__xen_set_pte(ptep, pteval);
289 }
290 
291 pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
292 				 unsigned long addr, pte_t *ptep)
293 {
294 	/* Just return the pte as-is.  We preserve the bits on commit */
295 	trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
296 	return *ptep;
297 }
298 
299 void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
300 				 pte_t *ptep, pte_t pte)
301 {
302 	struct mmu_update u;
303 
304 	trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
305 	xen_mc_batch();
306 
307 	u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
308 	u.val = pte_val_ma(pte);
309 	xen_extend_mmu_update(&u);
310 
311 	xen_mc_issue(PARAVIRT_LAZY_MMU);
312 }
313 
314 /* Assume pteval_t is equivalent to all the other *val_t types. */
315 static pteval_t pte_mfn_to_pfn(pteval_t val)
316 {
317 	if (val & _PAGE_PRESENT) {
318 		unsigned long mfn = (val & XEN_PTE_MFN_MASK) >> PAGE_SHIFT;
319 		unsigned long pfn = mfn_to_pfn(mfn);
320 
321 		pteval_t flags = val & PTE_FLAGS_MASK;
322 		if (unlikely(pfn == ~0))
323 			val = flags & ~_PAGE_PRESENT;
324 		else
325 			val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
326 	}
327 
328 	return val;
329 }
330 
331 static pteval_t pte_pfn_to_mfn(pteval_t val)
332 {
333 	if (val & _PAGE_PRESENT) {
334 		unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
335 		pteval_t flags = val & PTE_FLAGS_MASK;
336 		unsigned long mfn;
337 
338 		mfn = __pfn_to_mfn(pfn);
339 
340 		/*
341 		 * If there's no mfn for the pfn, then just create an
342 		 * empty non-present pte.  Unfortunately this loses
343 		 * information about the original pfn, so
344 		 * pte_mfn_to_pfn is asymmetric.
345 		 */
346 		if (unlikely(mfn == INVALID_P2M_ENTRY)) {
347 			mfn = 0;
348 			flags = 0;
349 		} else
350 			mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
351 		val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
352 	}
353 
354 	return val;
355 }
356 
357 __visible pteval_t xen_pte_val(pte_t pte)
358 {
359 	pteval_t pteval = pte.pte;
360 
361 	return pte_mfn_to_pfn(pteval);
362 }
363 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
364 
365 __visible pgdval_t xen_pgd_val(pgd_t pgd)
366 {
367 	return pte_mfn_to_pfn(pgd.pgd);
368 }
369 PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
370 
371 __visible pte_t xen_make_pte(pteval_t pte)
372 {
373 	pte = pte_pfn_to_mfn(pte);
374 
375 	return native_make_pte(pte);
376 }
377 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
378 
379 __visible pgd_t xen_make_pgd(pgdval_t pgd)
380 {
381 	pgd = pte_pfn_to_mfn(pgd);
382 	return native_make_pgd(pgd);
383 }
384 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
385 
386 __visible pmdval_t xen_pmd_val(pmd_t pmd)
387 {
388 	return pte_mfn_to_pfn(pmd.pmd);
389 }
390 PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
391 
392 static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
393 {
394 	struct mmu_update u;
395 
396 	preempt_disable();
397 
398 	xen_mc_batch();
399 
400 	/* ptr may be ioremapped for 64-bit pagetable setup */
401 	u.ptr = arbitrary_virt_to_machine(ptr).maddr;
402 	u.val = pud_val_ma(val);
403 	xen_extend_mmu_update(&u);
404 
405 	xen_mc_issue(PARAVIRT_LAZY_MMU);
406 
407 	preempt_enable();
408 }
409 
410 static void xen_set_pud(pud_t *ptr, pud_t val)
411 {
412 	trace_xen_mmu_set_pud(ptr, val);
413 
414 	/* If page is not pinned, we can just update the entry
415 	   directly */
416 	if (!xen_page_pinned(ptr)) {
417 		*ptr = val;
418 		return;
419 	}
420 
421 	xen_set_pud_hyper(ptr, val);
422 }
423 
424 #ifdef CONFIG_X86_PAE
425 static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
426 {
427 	trace_xen_mmu_set_pte_atomic(ptep, pte);
428 	set_64bit((u64 *)ptep, native_pte_val(pte));
429 }
430 
431 static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
432 {
433 	trace_xen_mmu_pte_clear(mm, addr, ptep);
434 	if (!xen_batched_set_pte(ptep, native_make_pte(0)))
435 		native_pte_clear(mm, addr, ptep);
436 }
437 
438 static void xen_pmd_clear(pmd_t *pmdp)
439 {
440 	trace_xen_mmu_pmd_clear(pmdp);
441 	set_pmd(pmdp, __pmd(0));
442 }
443 #endif	/* CONFIG_X86_PAE */
444 
445 __visible pmd_t xen_make_pmd(pmdval_t pmd)
446 {
447 	pmd = pte_pfn_to_mfn(pmd);
448 	return native_make_pmd(pmd);
449 }
450 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
451 
452 #ifdef CONFIG_X86_64
453 __visible pudval_t xen_pud_val(pud_t pud)
454 {
455 	return pte_mfn_to_pfn(pud.pud);
456 }
457 PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
458 
459 __visible pud_t xen_make_pud(pudval_t pud)
460 {
461 	pud = pte_pfn_to_mfn(pud);
462 
463 	return native_make_pud(pud);
464 }
465 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
466 
467 static pgd_t *xen_get_user_pgd(pgd_t *pgd)
468 {
469 	pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
470 	unsigned offset = pgd - pgd_page;
471 	pgd_t *user_ptr = NULL;
472 
473 	if (offset < pgd_index(USER_LIMIT)) {
474 		struct page *page = virt_to_page(pgd_page);
475 		user_ptr = (pgd_t *)page->private;
476 		if (user_ptr)
477 			user_ptr += offset;
478 	}
479 
480 	return user_ptr;
481 }
482 
483 static void __xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
484 {
485 	struct mmu_update u;
486 
487 	u.ptr = virt_to_machine(ptr).maddr;
488 	u.val = p4d_val_ma(val);
489 	xen_extend_mmu_update(&u);
490 }
491 
492 /*
493  * Raw hypercall-based set_p4d, intended for in early boot before
494  * there's a page structure.  This implies:
495  *  1. The only existing pagetable is the kernel's
496  *  2. It is always pinned
497  *  3. It has no user pagetable attached to it
498  */
499 static void __init xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
500 {
501 	preempt_disable();
502 
503 	xen_mc_batch();
504 
505 	__xen_set_p4d_hyper(ptr, val);
506 
507 	xen_mc_issue(PARAVIRT_LAZY_MMU);
508 
509 	preempt_enable();
510 }
511 
512 static void xen_set_p4d(p4d_t *ptr, p4d_t val)
513 {
514 	pgd_t *user_ptr = xen_get_user_pgd((pgd_t *)ptr);
515 	pgd_t pgd_val;
516 
517 	trace_xen_mmu_set_p4d(ptr, (p4d_t *)user_ptr, val);
518 
519 	/* If page is not pinned, we can just update the entry
520 	   directly */
521 	if (!xen_page_pinned(ptr)) {
522 		*ptr = val;
523 		if (user_ptr) {
524 			WARN_ON(xen_page_pinned(user_ptr));
525 			pgd_val.pgd = p4d_val_ma(val);
526 			*user_ptr = pgd_val;
527 		}
528 		return;
529 	}
530 
531 	/* If it's pinned, then we can at least batch the kernel and
532 	   user updates together. */
533 	xen_mc_batch();
534 
535 	__xen_set_p4d_hyper(ptr, val);
536 	if (user_ptr)
537 		__xen_set_p4d_hyper((p4d_t *)user_ptr, val);
538 
539 	xen_mc_issue(PARAVIRT_LAZY_MMU);
540 }
541 #endif	/* CONFIG_X86_64 */
542 
543 static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
544 		int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
545 		bool last, unsigned long limit)
546 {
547 	int i, nr, flush = 0;
548 
549 	nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD;
550 	for (i = 0; i < nr; i++) {
551 		if (!pmd_none(pmd[i]))
552 			flush |= (*func)(mm, pmd_page(pmd[i]), PT_PTE);
553 	}
554 	return flush;
555 }
556 
557 static int xen_pud_walk(struct mm_struct *mm, pud_t *pud,
558 		int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
559 		bool last, unsigned long limit)
560 {
561 	int i, nr, flush = 0;
562 
563 	nr = last ? pud_index(limit) + 1 : PTRS_PER_PUD;
564 	for (i = 0; i < nr; i++) {
565 		pmd_t *pmd;
566 
567 		if (pud_none(pud[i]))
568 			continue;
569 
570 		pmd = pmd_offset(&pud[i], 0);
571 		if (PTRS_PER_PMD > 1)
572 			flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
573 		flush |= xen_pmd_walk(mm, pmd, func,
574 				last && i == nr - 1, limit);
575 	}
576 	return flush;
577 }
578 
579 static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
580 		int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
581 		bool last, unsigned long limit)
582 {
583 	int flush = 0;
584 	pud_t *pud;
585 
586 
587 	if (p4d_none(*p4d))
588 		return flush;
589 
590 	pud = pud_offset(p4d, 0);
591 	if (PTRS_PER_PUD > 1)
592 		flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
593 	flush |= xen_pud_walk(mm, pud, func, last, limit);
594 	return flush;
595 }
596 
597 /*
598  * (Yet another) pagetable walker.  This one is intended for pinning a
599  * pagetable.  This means that it walks a pagetable and calls the
600  * callback function on each page it finds making up the page table,
601  * at every level.  It walks the entire pagetable, but it only bothers
602  * pinning pte pages which are below limit.  In the normal case this
603  * will be STACK_TOP_MAX, but at boot we need to pin up to
604  * FIXADDR_TOP.
605  *
606  * For 32-bit the important bit is that we don't pin beyond there,
607  * because then we start getting into Xen's ptes.
608  *
609  * For 64-bit, we must skip the Xen hole in the middle of the address
610  * space, just after the big x86-64 virtual hole.
611  */
612 static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
613 			  int (*func)(struct mm_struct *mm, struct page *,
614 				      enum pt_level),
615 			  unsigned long limit)
616 {
617 	int i, nr, flush = 0;
618 	unsigned hole_low, hole_high;
619 
620 	/* The limit is the last byte to be touched */
621 	limit--;
622 	BUG_ON(limit >= FIXADDR_TOP);
623 
624 	/*
625 	 * 64-bit has a great big hole in the middle of the address
626 	 * space, which contains the Xen mappings.  On 32-bit these
627 	 * will end up making a zero-sized hole and so is a no-op.
628 	 */
629 	hole_low = pgd_index(USER_LIMIT);
630 	hole_high = pgd_index(PAGE_OFFSET);
631 
632 	nr = pgd_index(limit) + 1;
633 	for (i = 0; i < nr; i++) {
634 		p4d_t *p4d;
635 
636 		if (i >= hole_low && i < hole_high)
637 			continue;
638 
639 		if (pgd_none(pgd[i]))
640 			continue;
641 
642 		p4d = p4d_offset(&pgd[i], 0);
643 		flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
644 	}
645 
646 	/* Do the top level last, so that the callbacks can use it as
647 	   a cue to do final things like tlb flushes. */
648 	flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
649 
650 	return flush;
651 }
652 
653 static int xen_pgd_walk(struct mm_struct *mm,
654 			int (*func)(struct mm_struct *mm, struct page *,
655 				    enum pt_level),
656 			unsigned long limit)
657 {
658 	return __xen_pgd_walk(mm, mm->pgd, func, limit);
659 }
660 
661 /* If we're using split pte locks, then take the page's lock and
662    return a pointer to it.  Otherwise return NULL. */
663 static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
664 {
665 	spinlock_t *ptl = NULL;
666 
667 #if USE_SPLIT_PTE_PTLOCKS
668 	ptl = ptlock_ptr(page);
669 	spin_lock_nest_lock(ptl, &mm->page_table_lock);
670 #endif
671 
672 	return ptl;
673 }
674 
675 static void xen_pte_unlock(void *v)
676 {
677 	spinlock_t *ptl = v;
678 	spin_unlock(ptl);
679 }
680 
681 static void xen_do_pin(unsigned level, unsigned long pfn)
682 {
683 	struct mmuext_op op;
684 
685 	op.cmd = level;
686 	op.arg1.mfn = pfn_to_mfn(pfn);
687 
688 	xen_extend_mmuext_op(&op);
689 }
690 
691 static int xen_pin_page(struct mm_struct *mm, struct page *page,
692 			enum pt_level level)
693 {
694 	unsigned pgfl = TestSetPagePinned(page);
695 	int flush;
696 
697 	if (pgfl)
698 		flush = 0;		/* already pinned */
699 	else if (PageHighMem(page))
700 		/* kmaps need flushing if we found an unpinned
701 		   highpage */
702 		flush = 1;
703 	else {
704 		void *pt = lowmem_page_address(page);
705 		unsigned long pfn = page_to_pfn(page);
706 		struct multicall_space mcs = __xen_mc_entry(0);
707 		spinlock_t *ptl;
708 
709 		flush = 0;
710 
711 		/*
712 		 * We need to hold the pagetable lock between the time
713 		 * we make the pagetable RO and when we actually pin
714 		 * it.  If we don't, then other users may come in and
715 		 * attempt to update the pagetable by writing it,
716 		 * which will fail because the memory is RO but not
717 		 * pinned, so Xen won't do the trap'n'emulate.
718 		 *
719 		 * If we're using split pte locks, we can't hold the
720 		 * entire pagetable's worth of locks during the
721 		 * traverse, because we may wrap the preempt count (8
722 		 * bits).  The solution is to mark RO and pin each PTE
723 		 * page while holding the lock.  This means the number
724 		 * of locks we end up holding is never more than a
725 		 * batch size (~32 entries, at present).
726 		 *
727 		 * If we're not using split pte locks, we needn't pin
728 		 * the PTE pages independently, because we're
729 		 * protected by the overall pagetable lock.
730 		 */
731 		ptl = NULL;
732 		if (level == PT_PTE)
733 			ptl = xen_pte_lock(page, mm);
734 
735 		MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
736 					pfn_pte(pfn, PAGE_KERNEL_RO),
737 					level == PT_PGD ? UVMF_TLB_FLUSH : 0);
738 
739 		if (ptl) {
740 			xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
741 
742 			/* Queue a deferred unlock for when this batch
743 			   is completed. */
744 			xen_mc_callback(xen_pte_unlock, ptl);
745 		}
746 	}
747 
748 	return flush;
749 }
750 
751 /* This is called just after a mm has been created, but it has not
752    been used yet.  We need to make sure that its pagetable is all
753    read-only, and can be pinned. */
754 static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
755 {
756 	trace_xen_mmu_pgd_pin(mm, pgd);
757 
758 	xen_mc_batch();
759 
760 	if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
761 		/* re-enable interrupts for flushing */
762 		xen_mc_issue(0);
763 
764 		kmap_flush_unused();
765 
766 		xen_mc_batch();
767 	}
768 
769 #ifdef CONFIG_X86_64
770 	{
771 		pgd_t *user_pgd = xen_get_user_pgd(pgd);
772 
773 		xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
774 
775 		if (user_pgd) {
776 			xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
777 			xen_do_pin(MMUEXT_PIN_L4_TABLE,
778 				   PFN_DOWN(__pa(user_pgd)));
779 		}
780 	}
781 #else /* CONFIG_X86_32 */
782 #ifdef CONFIG_X86_PAE
783 	/* Need to make sure unshared kernel PMD is pinnable */
784 	xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
785 		     PT_PMD);
786 #endif
787 	xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
788 #endif /* CONFIG_X86_64 */
789 	xen_mc_issue(0);
790 }
791 
792 static void xen_pgd_pin(struct mm_struct *mm)
793 {
794 	__xen_pgd_pin(mm, mm->pgd);
795 }
796 
797 /*
798  * On save, we need to pin all pagetables to make sure they get their
799  * mfns turned into pfns.  Search the list for any unpinned pgds and pin
800  * them (unpinned pgds are not currently in use, probably because the
801  * process is under construction or destruction).
802  *
803  * Expected to be called in stop_machine() ("equivalent to taking
804  * every spinlock in the system"), so the locking doesn't really
805  * matter all that much.
806  */
807 void xen_mm_pin_all(void)
808 {
809 	struct page *page;
810 
811 	spin_lock(&pgd_lock);
812 
813 	list_for_each_entry(page, &pgd_list, lru) {
814 		if (!PagePinned(page)) {
815 			__xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
816 			SetPageSavePinned(page);
817 		}
818 	}
819 
820 	spin_unlock(&pgd_lock);
821 }
822 
823 /*
824  * The init_mm pagetable is really pinned as soon as its created, but
825  * that's before we have page structures to store the bits.  So do all
826  * the book-keeping now.
827  */
828 static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
829 				  enum pt_level level)
830 {
831 	SetPagePinned(page);
832 	return 0;
833 }
834 
835 static void __init xen_mark_init_mm_pinned(void)
836 {
837 	xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
838 }
839 
840 static int xen_unpin_page(struct mm_struct *mm, struct page *page,
841 			  enum pt_level level)
842 {
843 	unsigned pgfl = TestClearPagePinned(page);
844 
845 	if (pgfl && !PageHighMem(page)) {
846 		void *pt = lowmem_page_address(page);
847 		unsigned long pfn = page_to_pfn(page);
848 		spinlock_t *ptl = NULL;
849 		struct multicall_space mcs;
850 
851 		/*
852 		 * Do the converse to pin_page.  If we're using split
853 		 * pte locks, we must be holding the lock for while
854 		 * the pte page is unpinned but still RO to prevent
855 		 * concurrent updates from seeing it in this
856 		 * partially-pinned state.
857 		 */
858 		if (level == PT_PTE) {
859 			ptl = xen_pte_lock(page, mm);
860 
861 			if (ptl)
862 				xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
863 		}
864 
865 		mcs = __xen_mc_entry(0);
866 
867 		MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
868 					pfn_pte(pfn, PAGE_KERNEL),
869 					level == PT_PGD ? UVMF_TLB_FLUSH : 0);
870 
871 		if (ptl) {
872 			/* unlock when batch completed */
873 			xen_mc_callback(xen_pte_unlock, ptl);
874 		}
875 	}
876 
877 	return 0;		/* never need to flush on unpin */
878 }
879 
880 /* Release a pagetables pages back as normal RW */
881 static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
882 {
883 	trace_xen_mmu_pgd_unpin(mm, pgd);
884 
885 	xen_mc_batch();
886 
887 	xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
888 
889 #ifdef CONFIG_X86_64
890 	{
891 		pgd_t *user_pgd = xen_get_user_pgd(pgd);
892 
893 		if (user_pgd) {
894 			xen_do_pin(MMUEXT_UNPIN_TABLE,
895 				   PFN_DOWN(__pa(user_pgd)));
896 			xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
897 		}
898 	}
899 #endif
900 
901 #ifdef CONFIG_X86_PAE
902 	/* Need to make sure unshared kernel PMD is unpinned */
903 	xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
904 		       PT_PMD);
905 #endif
906 
907 	__xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
908 
909 	xen_mc_issue(0);
910 }
911 
912 static void xen_pgd_unpin(struct mm_struct *mm)
913 {
914 	__xen_pgd_unpin(mm, mm->pgd);
915 }
916 
917 /*
918  * On resume, undo any pinning done at save, so that the rest of the
919  * kernel doesn't see any unexpected pinned pagetables.
920  */
921 void xen_mm_unpin_all(void)
922 {
923 	struct page *page;
924 
925 	spin_lock(&pgd_lock);
926 
927 	list_for_each_entry(page, &pgd_list, lru) {
928 		if (PageSavePinned(page)) {
929 			BUG_ON(!PagePinned(page));
930 			__xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
931 			ClearPageSavePinned(page);
932 		}
933 	}
934 
935 	spin_unlock(&pgd_lock);
936 }
937 
938 static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
939 {
940 	spin_lock(&next->page_table_lock);
941 	xen_pgd_pin(next);
942 	spin_unlock(&next->page_table_lock);
943 }
944 
945 static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
946 {
947 	spin_lock(&mm->page_table_lock);
948 	xen_pgd_pin(mm);
949 	spin_unlock(&mm->page_table_lock);
950 }
951 
952 static void drop_mm_ref_this_cpu(void *info)
953 {
954 	struct mm_struct *mm = info;
955 
956 	if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm)
957 		leave_mm(smp_processor_id());
958 
959 	/*
960 	 * If this cpu still has a stale cr3 reference, then make sure
961 	 * it has been flushed.
962 	 */
963 	if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
964 		xen_mc_flush();
965 }
966 
967 #ifdef CONFIG_SMP
968 /*
969  * Another cpu may still have their %cr3 pointing at the pagetable, so
970  * we need to repoint it somewhere else before we can unpin it.
971  */
972 static void xen_drop_mm_ref(struct mm_struct *mm)
973 {
974 	cpumask_var_t mask;
975 	unsigned cpu;
976 
977 	drop_mm_ref_this_cpu(mm);
978 
979 	/* Get the "official" set of cpus referring to our pagetable. */
980 	if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
981 		for_each_online_cpu(cpu) {
982 			if (per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
983 				continue;
984 			smp_call_function_single(cpu, drop_mm_ref_this_cpu, mm, 1);
985 		}
986 		return;
987 	}
988 
989 	/*
990 	 * It's possible that a vcpu may have a stale reference to our
991 	 * cr3, because its in lazy mode, and it hasn't yet flushed
992 	 * its set of pending hypercalls yet.  In this case, we can
993 	 * look at its actual current cr3 value, and force it to flush
994 	 * if needed.
995 	 */
996 	cpumask_clear(mask);
997 	for_each_online_cpu(cpu) {
998 		if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
999 			cpumask_set_cpu(cpu, mask);
1000 	}
1001 
1002 	smp_call_function_many(mask, drop_mm_ref_this_cpu, mm, 1);
1003 	free_cpumask_var(mask);
1004 }
1005 #else
1006 static void xen_drop_mm_ref(struct mm_struct *mm)
1007 {
1008 	drop_mm_ref_this_cpu(mm);
1009 }
1010 #endif
1011 
1012 /*
1013  * While a process runs, Xen pins its pagetables, which means that the
1014  * hypervisor forces it to be read-only, and it controls all updates
1015  * to it.  This means that all pagetable updates have to go via the
1016  * hypervisor, which is moderately expensive.
1017  *
1018  * Since we're pulling the pagetable down, we switch to use init_mm,
1019  * unpin old process pagetable and mark it all read-write, which
1020  * allows further operations on it to be simple memory accesses.
1021  *
1022  * The only subtle point is that another CPU may be still using the
1023  * pagetable because of lazy tlb flushing.  This means we need need to
1024  * switch all CPUs off this pagetable before we can unpin it.
1025  */
1026 static void xen_exit_mmap(struct mm_struct *mm)
1027 {
1028 	get_cpu();		/* make sure we don't move around */
1029 	xen_drop_mm_ref(mm);
1030 	put_cpu();
1031 
1032 	spin_lock(&mm->page_table_lock);
1033 
1034 	/* pgd may not be pinned in the error exit path of execve */
1035 	if (xen_page_pinned(mm->pgd))
1036 		xen_pgd_unpin(mm);
1037 
1038 	spin_unlock(&mm->page_table_lock);
1039 }
1040 
1041 static void xen_post_allocator_init(void);
1042 
1043 static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1044 {
1045 	struct mmuext_op op;
1046 
1047 	op.cmd = cmd;
1048 	op.arg1.mfn = pfn_to_mfn(pfn);
1049 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1050 		BUG();
1051 }
1052 
1053 #ifdef CONFIG_X86_64
1054 static void __init xen_cleanhighmap(unsigned long vaddr,
1055 				    unsigned long vaddr_end)
1056 {
1057 	unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1058 	pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1059 
1060 	/* NOTE: The loop is more greedy than the cleanup_highmap variant.
1061 	 * We include the PMD passed in on _both_ boundaries. */
1062 	for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
1063 			pmd++, vaddr += PMD_SIZE) {
1064 		if (pmd_none(*pmd))
1065 			continue;
1066 		if (vaddr < (unsigned long) _text || vaddr > kernel_end)
1067 			set_pmd(pmd, __pmd(0));
1068 	}
1069 	/* In case we did something silly, we should crash in this function
1070 	 * instead of somewhere later and be confusing. */
1071 	xen_mc_flush();
1072 }
1073 
1074 /*
1075  * Make a page range writeable and free it.
1076  */
1077 static void __init xen_free_ro_pages(unsigned long paddr, unsigned long size)
1078 {
1079 	void *vaddr = __va(paddr);
1080 	void *vaddr_end = vaddr + size;
1081 
1082 	for (; vaddr < vaddr_end; vaddr += PAGE_SIZE)
1083 		make_lowmem_page_readwrite(vaddr);
1084 
1085 	memblock_free(paddr, size);
1086 }
1087 
1088 static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl, bool unpin)
1089 {
1090 	unsigned long pa = __pa(pgtbl) & PHYSICAL_PAGE_MASK;
1091 
1092 	if (unpin)
1093 		pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(pa));
1094 	ClearPagePinned(virt_to_page(__va(pa)));
1095 	xen_free_ro_pages(pa, PAGE_SIZE);
1096 }
1097 
1098 static void __init xen_cleanmfnmap_pmd(pmd_t *pmd, bool unpin)
1099 {
1100 	unsigned long pa;
1101 	pte_t *pte_tbl;
1102 	int i;
1103 
1104 	if (pmd_large(*pmd)) {
1105 		pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
1106 		xen_free_ro_pages(pa, PMD_SIZE);
1107 		return;
1108 	}
1109 
1110 	pte_tbl = pte_offset_kernel(pmd, 0);
1111 	for (i = 0; i < PTRS_PER_PTE; i++) {
1112 		if (pte_none(pte_tbl[i]))
1113 			continue;
1114 		pa = pte_pfn(pte_tbl[i]) << PAGE_SHIFT;
1115 		xen_free_ro_pages(pa, PAGE_SIZE);
1116 	}
1117 	set_pmd(pmd, __pmd(0));
1118 	xen_cleanmfnmap_free_pgtbl(pte_tbl, unpin);
1119 }
1120 
1121 static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin)
1122 {
1123 	unsigned long pa;
1124 	pmd_t *pmd_tbl;
1125 	int i;
1126 
1127 	if (pud_large(*pud)) {
1128 		pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
1129 		xen_free_ro_pages(pa, PUD_SIZE);
1130 		return;
1131 	}
1132 
1133 	pmd_tbl = pmd_offset(pud, 0);
1134 	for (i = 0; i < PTRS_PER_PMD; i++) {
1135 		if (pmd_none(pmd_tbl[i]))
1136 			continue;
1137 		xen_cleanmfnmap_pmd(pmd_tbl + i, unpin);
1138 	}
1139 	set_pud(pud, __pud(0));
1140 	xen_cleanmfnmap_free_pgtbl(pmd_tbl, unpin);
1141 }
1142 
1143 static void __init xen_cleanmfnmap_p4d(p4d_t *p4d, bool unpin)
1144 {
1145 	unsigned long pa;
1146 	pud_t *pud_tbl;
1147 	int i;
1148 
1149 	if (p4d_large(*p4d)) {
1150 		pa = p4d_val(*p4d) & PHYSICAL_PAGE_MASK;
1151 		xen_free_ro_pages(pa, P4D_SIZE);
1152 		return;
1153 	}
1154 
1155 	pud_tbl = pud_offset(p4d, 0);
1156 	for (i = 0; i < PTRS_PER_PUD; i++) {
1157 		if (pud_none(pud_tbl[i]))
1158 			continue;
1159 		xen_cleanmfnmap_pud(pud_tbl + i, unpin);
1160 	}
1161 	set_p4d(p4d, __p4d(0));
1162 	xen_cleanmfnmap_free_pgtbl(pud_tbl, unpin);
1163 }
1164 
1165 /*
1166  * Since it is well isolated we can (and since it is perhaps large we should)
1167  * also free the page tables mapping the initial P->M table.
1168  */
1169 static void __init xen_cleanmfnmap(unsigned long vaddr)
1170 {
1171 	pgd_t *pgd;
1172 	p4d_t *p4d;
1173 	bool unpin;
1174 
1175 	unpin = (vaddr == 2 * PGDIR_SIZE);
1176 	vaddr &= PMD_MASK;
1177 	pgd = pgd_offset_k(vaddr);
1178 	p4d = p4d_offset(pgd, 0);
1179 	if (!p4d_none(*p4d))
1180 		xen_cleanmfnmap_p4d(p4d, unpin);
1181 }
1182 
1183 static void __init xen_pagetable_p2m_free(void)
1184 {
1185 	unsigned long size;
1186 	unsigned long addr;
1187 
1188 	size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1189 
1190 	/* No memory or already called. */
1191 	if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list)
1192 		return;
1193 
1194 	/* using __ka address and sticking INVALID_P2M_ENTRY! */
1195 	memset((void *)xen_start_info->mfn_list, 0xff, size);
1196 
1197 	addr = xen_start_info->mfn_list;
1198 	/*
1199 	 * We could be in __ka space.
1200 	 * We roundup to the PMD, which means that if anybody at this stage is
1201 	 * using the __ka address of xen_start_info or
1202 	 * xen_start_info->shared_info they are in going to crash. Fortunatly
1203 	 * we have already revectored in xen_setup_kernel_pagetable and in
1204 	 * xen_setup_shared_info.
1205 	 */
1206 	size = roundup(size, PMD_SIZE);
1207 
1208 	if (addr >= __START_KERNEL_map) {
1209 		xen_cleanhighmap(addr, addr + size);
1210 		size = PAGE_ALIGN(xen_start_info->nr_pages *
1211 				  sizeof(unsigned long));
1212 		memblock_free(__pa(addr), size);
1213 	} else {
1214 		xen_cleanmfnmap(addr);
1215 	}
1216 }
1217 
1218 static void __init xen_pagetable_cleanhighmap(void)
1219 {
1220 	unsigned long size;
1221 	unsigned long addr;
1222 
1223 	/* At this stage, cleanup_highmap has already cleaned __ka space
1224 	 * from _brk_limit way up to the max_pfn_mapped (which is the end of
1225 	 * the ramdisk). We continue on, erasing PMD entries that point to page
1226 	 * tables - do note that they are accessible at this stage via __va.
1227 	 * As Xen is aligning the memory end to a 4MB boundary, for good
1228 	 * measure we also round up to PMD_SIZE * 2 - which means that if
1229 	 * anybody is using __ka address to the initial boot-stack - and try
1230 	 * to use it - they are going to crash. The xen_start_info has been
1231 	 * taken care of already in xen_setup_kernel_pagetable. */
1232 	addr = xen_start_info->pt_base;
1233 	size = xen_start_info->nr_pt_frames * PAGE_SIZE;
1234 
1235 	xen_cleanhighmap(addr, roundup(addr + size, PMD_SIZE * 2));
1236 	xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1237 }
1238 #endif
1239 
1240 static void __init xen_pagetable_p2m_setup(void)
1241 {
1242 	xen_vmalloc_p2m_tree();
1243 
1244 #ifdef CONFIG_X86_64
1245 	xen_pagetable_p2m_free();
1246 
1247 	xen_pagetable_cleanhighmap();
1248 #endif
1249 	/* And revector! Bye bye old array */
1250 	xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
1251 }
1252 
1253 static void __init xen_pagetable_init(void)
1254 {
1255 	paging_init();
1256 	xen_post_allocator_init();
1257 
1258 	xen_pagetable_p2m_setup();
1259 
1260 	/* Allocate and initialize top and mid mfn levels for p2m structure */
1261 	xen_build_mfn_list_list();
1262 
1263 	/* Remap memory freed due to conflicts with E820 map */
1264 	xen_remap_memory();
1265 
1266 	xen_setup_shared_info();
1267 }
1268 static void xen_write_cr2(unsigned long cr2)
1269 {
1270 	this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
1271 }
1272 
1273 static unsigned long xen_read_cr2(void)
1274 {
1275 	return this_cpu_read(xen_vcpu)->arch.cr2;
1276 }
1277 
1278 unsigned long xen_read_cr2_direct(void)
1279 {
1280 	return this_cpu_read(xen_vcpu_info.arch.cr2);
1281 }
1282 
1283 static void xen_flush_tlb(void)
1284 {
1285 	struct mmuext_op *op;
1286 	struct multicall_space mcs;
1287 
1288 	trace_xen_mmu_flush_tlb(0);
1289 
1290 	preempt_disable();
1291 
1292 	mcs = xen_mc_entry(sizeof(*op));
1293 
1294 	op = mcs.args;
1295 	op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1296 	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1297 
1298 	xen_mc_issue(PARAVIRT_LAZY_MMU);
1299 
1300 	preempt_enable();
1301 }
1302 
1303 static void xen_flush_tlb_single(unsigned long addr)
1304 {
1305 	struct mmuext_op *op;
1306 	struct multicall_space mcs;
1307 
1308 	trace_xen_mmu_flush_tlb_single(addr);
1309 
1310 	preempt_disable();
1311 
1312 	mcs = xen_mc_entry(sizeof(*op));
1313 	op = mcs.args;
1314 	op->cmd = MMUEXT_INVLPG_LOCAL;
1315 	op->arg1.linear_addr = addr & PAGE_MASK;
1316 	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1317 
1318 	xen_mc_issue(PARAVIRT_LAZY_MMU);
1319 
1320 	preempt_enable();
1321 }
1322 
1323 static void xen_flush_tlb_others(const struct cpumask *cpus,
1324 				 const struct flush_tlb_info *info)
1325 {
1326 	struct {
1327 		struct mmuext_op op;
1328 		DECLARE_BITMAP(mask, NR_CPUS);
1329 	} *args;
1330 	struct multicall_space mcs;
1331 	const size_t mc_entry_size = sizeof(args->op) +
1332 		sizeof(args->mask[0]) * BITS_TO_LONGS(num_possible_cpus());
1333 
1334 	trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end);
1335 
1336 	if (cpumask_empty(cpus))
1337 		return;		/* nothing to do */
1338 
1339 	mcs = xen_mc_entry(mc_entry_size);
1340 	args = mcs.args;
1341 	args->op.arg2.vcpumask = to_cpumask(args->mask);
1342 
1343 	/* Remove us, and any offline CPUS. */
1344 	cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1345 	cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
1346 
1347 	args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1348 	if (info->end != TLB_FLUSH_ALL &&
1349 	    (info->end - info->start) <= PAGE_SIZE) {
1350 		args->op.cmd = MMUEXT_INVLPG_MULTI;
1351 		args->op.arg1.linear_addr = info->start;
1352 	}
1353 
1354 	MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1355 
1356 	xen_mc_issue(PARAVIRT_LAZY_MMU);
1357 }
1358 
1359 static unsigned long xen_read_cr3(void)
1360 {
1361 	return this_cpu_read(xen_cr3);
1362 }
1363 
1364 static void set_current_cr3(void *v)
1365 {
1366 	this_cpu_write(xen_current_cr3, (unsigned long)v);
1367 }
1368 
1369 static void __xen_write_cr3(bool kernel, unsigned long cr3)
1370 {
1371 	struct mmuext_op op;
1372 	unsigned long mfn;
1373 
1374 	trace_xen_mmu_write_cr3(kernel, cr3);
1375 
1376 	if (cr3)
1377 		mfn = pfn_to_mfn(PFN_DOWN(cr3));
1378 	else
1379 		mfn = 0;
1380 
1381 	WARN_ON(mfn == 0 && kernel);
1382 
1383 	op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1384 	op.arg1.mfn = mfn;
1385 
1386 	xen_extend_mmuext_op(&op);
1387 
1388 	if (kernel) {
1389 		this_cpu_write(xen_cr3, cr3);
1390 
1391 		/* Update xen_current_cr3 once the batch has actually
1392 		   been submitted. */
1393 		xen_mc_callback(set_current_cr3, (void *)cr3);
1394 	}
1395 }
1396 static void xen_write_cr3(unsigned long cr3)
1397 {
1398 	BUG_ON(preemptible());
1399 
1400 	xen_mc_batch();  /* disables interrupts */
1401 
1402 	/* Update while interrupts are disabled, so its atomic with
1403 	   respect to ipis */
1404 	this_cpu_write(xen_cr3, cr3);
1405 
1406 	__xen_write_cr3(true, cr3);
1407 
1408 #ifdef CONFIG_X86_64
1409 	{
1410 		pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1411 		if (user_pgd)
1412 			__xen_write_cr3(false, __pa(user_pgd));
1413 		else
1414 			__xen_write_cr3(false, 0);
1415 	}
1416 #endif
1417 
1418 	xen_mc_issue(PARAVIRT_LAZY_CPU);  /* interrupts restored */
1419 }
1420 
1421 #ifdef CONFIG_X86_64
1422 /*
1423  * At the start of the day - when Xen launches a guest, it has already
1424  * built pagetables for the guest. We diligently look over them
1425  * in xen_setup_kernel_pagetable and graft as appropriate them in the
1426  * init_top_pgt and its friends. Then when we are happy we load
1427  * the new init_top_pgt - and continue on.
1428  *
1429  * The generic code starts (start_kernel) and 'init_mem_mapping' sets
1430  * up the rest of the pagetables. When it has completed it loads the cr3.
1431  * N.B. that baremetal would start at 'start_kernel' (and the early
1432  * #PF handler would create bootstrap pagetables) - so we are running
1433  * with the same assumptions as what to do when write_cr3 is executed
1434  * at this point.
1435  *
1436  * Since there are no user-page tables at all, we have two variants
1437  * of xen_write_cr3 - the early bootup (this one), and the late one
1438  * (xen_write_cr3). The reason we have to do that is that in 64-bit
1439  * the Linux kernel and user-space are both in ring 3 while the
1440  * hypervisor is in ring 0.
1441  */
1442 static void __init xen_write_cr3_init(unsigned long cr3)
1443 {
1444 	BUG_ON(preemptible());
1445 
1446 	xen_mc_batch();  /* disables interrupts */
1447 
1448 	/* Update while interrupts are disabled, so its atomic with
1449 	   respect to ipis */
1450 	this_cpu_write(xen_cr3, cr3);
1451 
1452 	__xen_write_cr3(true, cr3);
1453 
1454 	xen_mc_issue(PARAVIRT_LAZY_CPU);  /* interrupts restored */
1455 }
1456 #endif
1457 
1458 static int xen_pgd_alloc(struct mm_struct *mm)
1459 {
1460 	pgd_t *pgd = mm->pgd;
1461 	int ret = 0;
1462 
1463 	BUG_ON(PagePinned(virt_to_page(pgd)));
1464 
1465 #ifdef CONFIG_X86_64
1466 	{
1467 		struct page *page = virt_to_page(pgd);
1468 		pgd_t *user_pgd;
1469 
1470 		BUG_ON(page->private != 0);
1471 
1472 		ret = -ENOMEM;
1473 
1474 		user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1475 		page->private = (unsigned long)user_pgd;
1476 
1477 		if (user_pgd != NULL) {
1478 #ifdef CONFIG_X86_VSYSCALL_EMULATION
1479 			user_pgd[pgd_index(VSYSCALL_ADDR)] =
1480 				__pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1481 #endif
1482 			ret = 0;
1483 		}
1484 
1485 		BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1486 	}
1487 #endif
1488 	return ret;
1489 }
1490 
1491 static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1492 {
1493 #ifdef CONFIG_X86_64
1494 	pgd_t *user_pgd = xen_get_user_pgd(pgd);
1495 
1496 	if (user_pgd)
1497 		free_page((unsigned long)user_pgd);
1498 #endif
1499 }
1500 
1501 /*
1502  * Init-time set_pte while constructing initial pagetables, which
1503  * doesn't allow RO page table pages to be remapped RW.
1504  *
1505  * If there is no MFN for this PFN then this page is initially
1506  * ballooned out so clear the PTE (as in decrease_reservation() in
1507  * drivers/xen/balloon.c).
1508  *
1509  * Many of these PTE updates are done on unpinned and writable pages
1510  * and doing a hypercall for these is unnecessary and expensive.  At
1511  * this point it is not possible to tell if a page is pinned or not,
1512  * so always write the PTE directly and rely on Xen trapping and
1513  * emulating any updates as necessary.
1514  */
1515 __visible pte_t xen_make_pte_init(pteval_t pte)
1516 {
1517 #ifdef CONFIG_X86_64
1518 	unsigned long pfn;
1519 
1520 	/*
1521 	 * Pages belonging to the initial p2m list mapped outside the default
1522 	 * address range must be mapped read-only. This region contains the
1523 	 * page tables for mapping the p2m list, too, and page tables MUST be
1524 	 * mapped read-only.
1525 	 */
1526 	pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
1527 	if (xen_start_info->mfn_list < __START_KERNEL_map &&
1528 	    pfn >= xen_start_info->first_p2m_pfn &&
1529 	    pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
1530 		pte &= ~_PAGE_RW;
1531 #endif
1532 	pte = pte_pfn_to_mfn(pte);
1533 	return native_make_pte(pte);
1534 }
1535 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
1536 
1537 static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1538 {
1539 #ifdef CONFIG_X86_32
1540 	/* If there's an existing pte, then don't allow _PAGE_RW to be set */
1541 	if (pte_mfn(pte) != INVALID_P2M_ENTRY
1542 	    && pte_val_ma(*ptep) & _PAGE_PRESENT)
1543 		pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1544 			       pte_val_ma(pte));
1545 #endif
1546 	native_set_pte(ptep, pte);
1547 }
1548 
1549 /* Early in boot, while setting up the initial pagetable, assume
1550    everything is pinned. */
1551 static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1552 {
1553 #ifdef CONFIG_FLATMEM
1554 	BUG_ON(mem_map);	/* should only be used early */
1555 #endif
1556 	make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1557 	pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1558 }
1559 
1560 /* Used for pmd and pud */
1561 static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1562 {
1563 #ifdef CONFIG_FLATMEM
1564 	BUG_ON(mem_map);	/* should only be used early */
1565 #endif
1566 	make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1567 }
1568 
1569 /* Early release_pte assumes that all pts are pinned, since there's
1570    only init_mm and anything attached to that is pinned. */
1571 static void __init xen_release_pte_init(unsigned long pfn)
1572 {
1573 	pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1574 	make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1575 }
1576 
1577 static void __init xen_release_pmd_init(unsigned long pfn)
1578 {
1579 	make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1580 }
1581 
1582 static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1583 {
1584 	struct multicall_space mcs;
1585 	struct mmuext_op *op;
1586 
1587 	mcs = __xen_mc_entry(sizeof(*op));
1588 	op = mcs.args;
1589 	op->cmd = cmd;
1590 	op->arg1.mfn = pfn_to_mfn(pfn);
1591 
1592 	MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1593 }
1594 
1595 static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1596 {
1597 	struct multicall_space mcs;
1598 	unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1599 
1600 	mcs = __xen_mc_entry(0);
1601 	MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1602 				pfn_pte(pfn, prot), 0);
1603 }
1604 
1605 /* This needs to make sure the new pte page is pinned iff its being
1606    attached to a pinned pagetable. */
1607 static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1608 				    unsigned level)
1609 {
1610 	bool pinned = PagePinned(virt_to_page(mm->pgd));
1611 
1612 	trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
1613 
1614 	if (pinned) {
1615 		struct page *page = pfn_to_page(pfn);
1616 
1617 		SetPagePinned(page);
1618 
1619 		if (!PageHighMem(page)) {
1620 			xen_mc_batch();
1621 
1622 			__set_pfn_prot(pfn, PAGE_KERNEL_RO);
1623 
1624 			if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1625 				__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1626 
1627 			xen_mc_issue(PARAVIRT_LAZY_MMU);
1628 		} else {
1629 			/* make sure there are no stray mappings of
1630 			   this page */
1631 			kmap_flush_unused();
1632 		}
1633 	}
1634 }
1635 
1636 static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1637 {
1638 	xen_alloc_ptpage(mm, pfn, PT_PTE);
1639 }
1640 
1641 static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1642 {
1643 	xen_alloc_ptpage(mm, pfn, PT_PMD);
1644 }
1645 
1646 /* This should never happen until we're OK to use struct page */
1647 static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
1648 {
1649 	struct page *page = pfn_to_page(pfn);
1650 	bool pinned = PagePinned(page);
1651 
1652 	trace_xen_mmu_release_ptpage(pfn, level, pinned);
1653 
1654 	if (pinned) {
1655 		if (!PageHighMem(page)) {
1656 			xen_mc_batch();
1657 
1658 			if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1659 				__pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1660 
1661 			__set_pfn_prot(pfn, PAGE_KERNEL);
1662 
1663 			xen_mc_issue(PARAVIRT_LAZY_MMU);
1664 		}
1665 		ClearPagePinned(page);
1666 	}
1667 }
1668 
1669 static void xen_release_pte(unsigned long pfn)
1670 {
1671 	xen_release_ptpage(pfn, PT_PTE);
1672 }
1673 
1674 static void xen_release_pmd(unsigned long pfn)
1675 {
1676 	xen_release_ptpage(pfn, PT_PMD);
1677 }
1678 
1679 #ifdef CONFIG_X86_64
1680 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1681 {
1682 	xen_alloc_ptpage(mm, pfn, PT_PUD);
1683 }
1684 
1685 static void xen_release_pud(unsigned long pfn)
1686 {
1687 	xen_release_ptpage(pfn, PT_PUD);
1688 }
1689 #endif
1690 
1691 void __init xen_reserve_top(void)
1692 {
1693 #ifdef CONFIG_X86_32
1694 	unsigned long top = HYPERVISOR_VIRT_START;
1695 	struct xen_platform_parameters pp;
1696 
1697 	if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1698 		top = pp.virt_start;
1699 
1700 	reserve_top_address(-top);
1701 #endif	/* CONFIG_X86_32 */
1702 }
1703 
1704 /*
1705  * Like __va(), but returns address in the kernel mapping (which is
1706  * all we have until the physical memory mapping has been set up.
1707  */
1708 static void * __init __ka(phys_addr_t paddr)
1709 {
1710 #ifdef CONFIG_X86_64
1711 	return (void *)(paddr + __START_KERNEL_map);
1712 #else
1713 	return __va(paddr);
1714 #endif
1715 }
1716 
1717 /* Convert a machine address to physical address */
1718 static unsigned long __init m2p(phys_addr_t maddr)
1719 {
1720 	phys_addr_t paddr;
1721 
1722 	maddr &= XEN_PTE_MFN_MASK;
1723 	paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1724 
1725 	return paddr;
1726 }
1727 
1728 /* Convert a machine address to kernel virtual */
1729 static void * __init m2v(phys_addr_t maddr)
1730 {
1731 	return __ka(m2p(maddr));
1732 }
1733 
1734 /* Set the page permissions on an identity-mapped pages */
1735 static void __init set_page_prot_flags(void *addr, pgprot_t prot,
1736 				       unsigned long flags)
1737 {
1738 	unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1739 	pte_t pte = pfn_pte(pfn, prot);
1740 
1741 	if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
1742 		BUG();
1743 }
1744 static void __init set_page_prot(void *addr, pgprot_t prot)
1745 {
1746 	return set_page_prot_flags(addr, prot, UVMF_NONE);
1747 }
1748 #ifdef CONFIG_X86_32
1749 static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1750 {
1751 	unsigned pmdidx, pteidx;
1752 	unsigned ident_pte;
1753 	unsigned long pfn;
1754 
1755 	level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1756 				      PAGE_SIZE);
1757 
1758 	ident_pte = 0;
1759 	pfn = 0;
1760 	for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1761 		pte_t *pte_page;
1762 
1763 		/* Reuse or allocate a page of ptes */
1764 		if (pmd_present(pmd[pmdidx]))
1765 			pte_page = m2v(pmd[pmdidx].pmd);
1766 		else {
1767 			/* Check for free pte pages */
1768 			if (ident_pte == LEVEL1_IDENT_ENTRIES)
1769 				break;
1770 
1771 			pte_page = &level1_ident_pgt[ident_pte];
1772 			ident_pte += PTRS_PER_PTE;
1773 
1774 			pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1775 		}
1776 
1777 		/* Install mappings */
1778 		for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1779 			pte_t pte;
1780 
1781 			if (pfn > max_pfn_mapped)
1782 				max_pfn_mapped = pfn;
1783 
1784 			if (!pte_none(pte_page[pteidx]))
1785 				continue;
1786 
1787 			pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1788 			pte_page[pteidx] = pte;
1789 		}
1790 	}
1791 
1792 	for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1793 		set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1794 
1795 	set_page_prot(pmd, PAGE_KERNEL_RO);
1796 }
1797 #endif
1798 void __init xen_setup_machphys_mapping(void)
1799 {
1800 	struct xen_machphys_mapping mapping;
1801 
1802 	if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1803 		machine_to_phys_mapping = (unsigned long *)mapping.v_start;
1804 		machine_to_phys_nr = mapping.max_mfn + 1;
1805 	} else {
1806 		machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
1807 	}
1808 #ifdef CONFIG_X86_32
1809 	WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1810 		< machine_to_phys_mapping);
1811 #endif
1812 }
1813 
1814 #ifdef CONFIG_X86_64
1815 static void __init convert_pfn_mfn(void *v)
1816 {
1817 	pte_t *pte = v;
1818 	int i;
1819 
1820 	/* All levels are converted the same way, so just treat them
1821 	   as ptes. */
1822 	for (i = 0; i < PTRS_PER_PTE; i++)
1823 		pte[i] = xen_make_pte(pte[i].pte);
1824 }
1825 static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1826 				 unsigned long addr)
1827 {
1828 	if (*pt_base == PFN_DOWN(__pa(addr))) {
1829 		set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1830 		clear_page((void *)addr);
1831 		(*pt_base)++;
1832 	}
1833 	if (*pt_end == PFN_DOWN(__pa(addr))) {
1834 		set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1835 		clear_page((void *)addr);
1836 		(*pt_end)--;
1837 	}
1838 }
1839 /*
1840  * Set up the initial kernel pagetable.
1841  *
1842  * We can construct this by grafting the Xen provided pagetable into
1843  * head_64.S's preconstructed pagetables.  We copy the Xen L2's into
1844  * level2_ident_pgt, and level2_kernel_pgt.  This means that only the
1845  * kernel has a physical mapping to start with - but that's enough to
1846  * get __va working.  We need to fill in the rest of the physical
1847  * mapping once some sort of allocator has been set up.
1848  */
1849 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1850 {
1851 	pud_t *l3;
1852 	pmd_t *l2;
1853 	unsigned long addr[3];
1854 	unsigned long pt_base, pt_end;
1855 	unsigned i;
1856 
1857 	/* max_pfn_mapped is the last pfn mapped in the initial memory
1858 	 * mappings. Considering that on Xen after the kernel mappings we
1859 	 * have the mappings of some pages that don't exist in pfn space, we
1860 	 * set max_pfn_mapped to the last real pfn mapped. */
1861 	if (xen_start_info->mfn_list < __START_KERNEL_map)
1862 		max_pfn_mapped = xen_start_info->first_p2m_pfn;
1863 	else
1864 		max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1865 
1866 	pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
1867 	pt_end = pt_base + xen_start_info->nr_pt_frames;
1868 
1869 	/* Zap identity mapping */
1870 	init_top_pgt[0] = __pgd(0);
1871 
1872 	/* Pre-constructed entries are in pfn, so convert to mfn */
1873 	/* L4[272] -> level3_ident_pgt  */
1874 	/* L4[511] -> level3_kernel_pgt */
1875 	convert_pfn_mfn(init_top_pgt);
1876 
1877 	/* L3_i[0] -> level2_ident_pgt */
1878 	convert_pfn_mfn(level3_ident_pgt);
1879 	/* L3_k[510] -> level2_kernel_pgt */
1880 	/* L3_k[511] -> level2_fixmap_pgt */
1881 	convert_pfn_mfn(level3_kernel_pgt);
1882 
1883 	/* L3_k[511][506] -> level1_fixmap_pgt */
1884 	convert_pfn_mfn(level2_fixmap_pgt);
1885 
1886 	/* We get [511][511] and have Xen's version of level2_kernel_pgt */
1887 	l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1888 	l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1889 
1890 	addr[0] = (unsigned long)pgd;
1891 	addr[1] = (unsigned long)l3;
1892 	addr[2] = (unsigned long)l2;
1893 	/* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
1894 	 * Both L4[272][0] and L4[511][510] have entries that point to the same
1895 	 * L2 (PMD) tables. Meaning that if you modify it in __va space
1896 	 * it will be also modified in the __ka space! (But if you just
1897 	 * modify the PMD table to point to other PTE's or none, then you
1898 	 * are OK - which is what cleanup_highmap does) */
1899 	copy_page(level2_ident_pgt, l2);
1900 	/* Graft it onto L4[511][510] */
1901 	copy_page(level2_kernel_pgt, l2);
1902 
1903 	/*
1904 	 * Zap execute permission from the ident map. Due to the sharing of
1905 	 * L1 entries we need to do this in the L2.
1906 	 */
1907 	if (__supported_pte_mask & _PAGE_NX) {
1908 		for (i = 0; i < PTRS_PER_PMD; ++i) {
1909 			if (pmd_none(level2_ident_pgt[i]))
1910 				continue;
1911 			level2_ident_pgt[i] = pmd_set_flags(level2_ident_pgt[i], _PAGE_NX);
1912 		}
1913 	}
1914 
1915 	/* Copy the initial P->M table mappings if necessary. */
1916 	i = pgd_index(xen_start_info->mfn_list);
1917 	if (i && i < pgd_index(__START_KERNEL_map))
1918 		init_top_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
1919 
1920 	/* Make pagetable pieces RO */
1921 	set_page_prot(init_top_pgt, PAGE_KERNEL_RO);
1922 	set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1923 	set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1924 	set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1925 	set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
1926 	set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1927 	set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1928 	set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
1929 
1930 	/* Pin down new L4 */
1931 	pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1932 			  PFN_DOWN(__pa_symbol(init_top_pgt)));
1933 
1934 	/* Unpin Xen-provided one */
1935 	pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1936 
1937 	/*
1938 	 * At this stage there can be no user pgd, and no page structure to
1939 	 * attach it to, so make sure we just set kernel pgd.
1940 	 */
1941 	xen_mc_batch();
1942 	__xen_write_cr3(true, __pa(init_top_pgt));
1943 	xen_mc_issue(PARAVIRT_LAZY_CPU);
1944 
1945 	/* We can't that easily rip out L3 and L2, as the Xen pagetables are
1946 	 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ...  for
1947 	 * the initial domain. For guests using the toolstack, they are in:
1948 	 * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
1949 	 * rip out the [L4] (pgd), but for guests we shave off three pages.
1950 	 */
1951 	for (i = 0; i < ARRAY_SIZE(addr); i++)
1952 		check_pt_base(&pt_base, &pt_end, addr[i]);
1953 
1954 	/* Our (by three pages) smaller Xen pagetable that we are using */
1955 	xen_pt_base = PFN_PHYS(pt_base);
1956 	xen_pt_size = (pt_end - pt_base) * PAGE_SIZE;
1957 	memblock_reserve(xen_pt_base, xen_pt_size);
1958 
1959 	/* Revector the xen_start_info */
1960 	xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
1961 }
1962 
1963 /*
1964  * Read a value from a physical address.
1965  */
1966 static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
1967 {
1968 	unsigned long *vaddr;
1969 	unsigned long val;
1970 
1971 	vaddr = early_memremap_ro(addr, sizeof(val));
1972 	val = *vaddr;
1973 	early_memunmap(vaddr, sizeof(val));
1974 	return val;
1975 }
1976 
1977 /*
1978  * Translate a virtual address to a physical one without relying on mapped
1979  * page tables. Don't rely on big pages being aligned in (guest) physical
1980  * space!
1981  */
1982 static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
1983 {
1984 	phys_addr_t pa;
1985 	pgd_t pgd;
1986 	pud_t pud;
1987 	pmd_t pmd;
1988 	pte_t pte;
1989 
1990 	pa = read_cr3_pa();
1991 	pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) *
1992 						       sizeof(pgd)));
1993 	if (!pgd_present(pgd))
1994 		return 0;
1995 
1996 	pa = pgd_val(pgd) & PTE_PFN_MASK;
1997 	pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) *
1998 						       sizeof(pud)));
1999 	if (!pud_present(pud))
2000 		return 0;
2001 	pa = pud_val(pud) & PTE_PFN_MASK;
2002 	if (pud_large(pud))
2003 		return pa + (vaddr & ~PUD_MASK);
2004 
2005 	pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
2006 						       sizeof(pmd)));
2007 	if (!pmd_present(pmd))
2008 		return 0;
2009 	pa = pmd_val(pmd) & PTE_PFN_MASK;
2010 	if (pmd_large(pmd))
2011 		return pa + (vaddr & ~PMD_MASK);
2012 
2013 	pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
2014 						       sizeof(pte)));
2015 	if (!pte_present(pte))
2016 		return 0;
2017 	pa = pte_pfn(pte) << PAGE_SHIFT;
2018 
2019 	return pa | (vaddr & ~PAGE_MASK);
2020 }
2021 
2022 /*
2023  * Find a new area for the hypervisor supplied p2m list and relocate the p2m to
2024  * this area.
2025  */
2026 void __init xen_relocate_p2m(void)
2027 {
2028 	phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys;
2029 	unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
2030 	int n_pte, n_pt, n_pmd, n_pud, idx_pte, idx_pt, idx_pmd, idx_pud;
2031 	pte_t *pt;
2032 	pmd_t *pmd;
2033 	pud_t *pud;
2034 	pgd_t *pgd;
2035 	unsigned long *new_p2m;
2036 	int save_pud;
2037 
2038 	size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
2039 	n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
2040 	n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT;
2041 	n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT;
2042 	n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT;
2043 	n_frames = n_pte + n_pt + n_pmd + n_pud;
2044 
2045 	new_area = xen_find_free_area(PFN_PHYS(n_frames));
2046 	if (!new_area) {
2047 		xen_raw_console_write("Can't find new memory area for p2m needed due to E820 map conflict\n");
2048 		BUG();
2049 	}
2050 
2051 	/*
2052 	 * Setup the page tables for addressing the new p2m list.
2053 	 * We have asked the hypervisor to map the p2m list at the user address
2054 	 * PUD_SIZE. It may have done so, or it may have used a kernel space
2055 	 * address depending on the Xen version.
2056 	 * To avoid any possible virtual address collision, just use
2057 	 * 2 * PUD_SIZE for the new area.
2058 	 */
2059 	pud_phys = new_area;
2060 	pmd_phys = pud_phys + PFN_PHYS(n_pud);
2061 	pt_phys = pmd_phys + PFN_PHYS(n_pmd);
2062 	p2m_pfn = PFN_DOWN(pt_phys) + n_pt;
2063 
2064 	pgd = __va(read_cr3_pa());
2065 	new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
2066 	save_pud = n_pud;
2067 	for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
2068 		pud = early_memremap(pud_phys, PAGE_SIZE);
2069 		clear_page(pud);
2070 		for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
2071 				idx_pmd++) {
2072 			pmd = early_memremap(pmd_phys, PAGE_SIZE);
2073 			clear_page(pmd);
2074 			for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD);
2075 					idx_pt++) {
2076 				pt = early_memremap(pt_phys, PAGE_SIZE);
2077 				clear_page(pt);
2078 				for (idx_pte = 0;
2079 						idx_pte < min(n_pte, PTRS_PER_PTE);
2080 						idx_pte++) {
2081 					set_pte(pt + idx_pte,
2082 							pfn_pte(p2m_pfn, PAGE_KERNEL));
2083 					p2m_pfn++;
2084 				}
2085 				n_pte -= PTRS_PER_PTE;
2086 				early_memunmap(pt, PAGE_SIZE);
2087 				make_lowmem_page_readonly(__va(pt_phys));
2088 				pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
2089 						PFN_DOWN(pt_phys));
2090 				set_pmd(pmd + idx_pt,
2091 						__pmd(_PAGE_TABLE | pt_phys));
2092 				pt_phys += PAGE_SIZE;
2093 			}
2094 			n_pt -= PTRS_PER_PMD;
2095 			early_memunmap(pmd, PAGE_SIZE);
2096 			make_lowmem_page_readonly(__va(pmd_phys));
2097 			pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
2098 					PFN_DOWN(pmd_phys));
2099 			set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys));
2100 			pmd_phys += PAGE_SIZE;
2101 		}
2102 		n_pmd -= PTRS_PER_PUD;
2103 		early_memunmap(pud, PAGE_SIZE);
2104 		make_lowmem_page_readonly(__va(pud_phys));
2105 		pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys));
2106 		set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
2107 		pud_phys += PAGE_SIZE;
2108 	}
2109 
2110 	/* Now copy the old p2m info to the new area. */
2111 	memcpy(new_p2m, xen_p2m_addr, size);
2112 	xen_p2m_addr = new_p2m;
2113 
2114 	/* Release the old p2m list and set new list info. */
2115 	p2m_pfn = PFN_DOWN(xen_early_virt_to_phys(xen_start_info->mfn_list));
2116 	BUG_ON(!p2m_pfn);
2117 	p2m_pfn_end = p2m_pfn + PFN_DOWN(size);
2118 
2119 	if (xen_start_info->mfn_list < __START_KERNEL_map) {
2120 		pfn = xen_start_info->first_p2m_pfn;
2121 		pfn_end = xen_start_info->first_p2m_pfn +
2122 			  xen_start_info->nr_p2m_frames;
2123 		set_pgd(pgd + 1, __pgd(0));
2124 	} else {
2125 		pfn = p2m_pfn;
2126 		pfn_end = p2m_pfn_end;
2127 	}
2128 
2129 	memblock_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn));
2130 	while (pfn < pfn_end) {
2131 		if (pfn == p2m_pfn) {
2132 			pfn = p2m_pfn_end;
2133 			continue;
2134 		}
2135 		make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
2136 		pfn++;
2137 	}
2138 
2139 	xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
2140 	xen_start_info->first_p2m_pfn =  PFN_DOWN(new_area);
2141 	xen_start_info->nr_p2m_frames = n_frames;
2142 }
2143 
2144 #else	/* !CONFIG_X86_64 */
2145 static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
2146 static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
2147 
2148 static void __init xen_write_cr3_init(unsigned long cr3)
2149 {
2150 	unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
2151 
2152 	BUG_ON(read_cr3_pa() != __pa(initial_page_table));
2153 	BUG_ON(cr3 != __pa(swapper_pg_dir));
2154 
2155 	/*
2156 	 * We are switching to swapper_pg_dir for the first time (from
2157 	 * initial_page_table) and therefore need to mark that page
2158 	 * read-only and then pin it.
2159 	 *
2160 	 * Xen disallows sharing of kernel PMDs for PAE
2161 	 * guests. Therefore we must copy the kernel PMD from
2162 	 * initial_page_table into a new kernel PMD to be used in
2163 	 * swapper_pg_dir.
2164 	 */
2165 	swapper_kernel_pmd =
2166 		extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2167 	copy_page(swapper_kernel_pmd, initial_kernel_pmd);
2168 	swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
2169 		__pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
2170 	set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
2171 
2172 	set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
2173 	xen_write_cr3(cr3);
2174 	pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
2175 
2176 	pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
2177 			  PFN_DOWN(__pa(initial_page_table)));
2178 	set_page_prot(initial_page_table, PAGE_KERNEL);
2179 	set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
2180 
2181 	pv_mmu_ops.write_cr3 = &xen_write_cr3;
2182 }
2183 
2184 /*
2185  * For 32 bit domains xen_start_info->pt_base is the pgd address which might be
2186  * not the first page table in the page table pool.
2187  * Iterate through the initial page tables to find the real page table base.
2188  */
2189 static phys_addr_t __init xen_find_pt_base(pmd_t *pmd)
2190 {
2191 	phys_addr_t pt_base, paddr;
2192 	unsigned pmdidx;
2193 
2194 	pt_base = min(__pa(xen_start_info->pt_base), __pa(pmd));
2195 
2196 	for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++)
2197 		if (pmd_present(pmd[pmdidx]) && !pmd_large(pmd[pmdidx])) {
2198 			paddr = m2p(pmd[pmdidx].pmd);
2199 			pt_base = min(pt_base, paddr);
2200 		}
2201 
2202 	return pt_base;
2203 }
2204 
2205 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
2206 {
2207 	pmd_t *kernel_pmd;
2208 
2209 	kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
2210 
2211 	xen_pt_base = xen_find_pt_base(kernel_pmd);
2212 	xen_pt_size = xen_start_info->nr_pt_frames * PAGE_SIZE;
2213 
2214 	initial_kernel_pmd =
2215 		extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2216 
2217 	max_pfn_mapped = PFN_DOWN(xen_pt_base + xen_pt_size + 512 * 1024);
2218 
2219 	copy_page(initial_kernel_pmd, kernel_pmd);
2220 
2221 	xen_map_identity_early(initial_kernel_pmd, max_pfn);
2222 
2223 	copy_page(initial_page_table, pgd);
2224 	initial_page_table[KERNEL_PGD_BOUNDARY] =
2225 		__pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
2226 
2227 	set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
2228 	set_page_prot(initial_page_table, PAGE_KERNEL_RO);
2229 	set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
2230 
2231 	pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
2232 
2233 	pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
2234 			  PFN_DOWN(__pa(initial_page_table)));
2235 	xen_write_cr3(__pa(initial_page_table));
2236 
2237 	memblock_reserve(xen_pt_base, xen_pt_size);
2238 }
2239 #endif	/* CONFIG_X86_64 */
2240 
2241 void __init xen_reserve_special_pages(void)
2242 {
2243 	phys_addr_t paddr;
2244 
2245 	memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
2246 	if (xen_start_info->store_mfn) {
2247 		paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->store_mfn));
2248 		memblock_reserve(paddr, PAGE_SIZE);
2249 	}
2250 	if (!xen_initial_domain()) {
2251 		paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->console.domU.mfn));
2252 		memblock_reserve(paddr, PAGE_SIZE);
2253 	}
2254 }
2255 
2256 void __init xen_pt_check_e820(void)
2257 {
2258 	if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) {
2259 		xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n");
2260 		BUG();
2261 	}
2262 }
2263 
2264 static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
2265 
2266 static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
2267 {
2268 	pte_t pte;
2269 
2270 	phys >>= PAGE_SHIFT;
2271 
2272 	switch (idx) {
2273 	case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
2274 #ifdef CONFIG_X86_32
2275 	case FIX_WP_TEST:
2276 # ifdef CONFIG_HIGHMEM
2277 	case FIX_KMAP_BEGIN ... FIX_KMAP_END:
2278 # endif
2279 #elif defined(CONFIG_X86_VSYSCALL_EMULATION)
2280 	case VSYSCALL_PAGE:
2281 #endif
2282 	case FIX_TEXT_POKE0:
2283 	case FIX_TEXT_POKE1:
2284 		/* All local page mappings */
2285 		pte = pfn_pte(phys, prot);
2286 		break;
2287 
2288 #ifdef CONFIG_X86_LOCAL_APIC
2289 	case FIX_APIC_BASE:	/* maps dummy local APIC */
2290 		pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2291 		break;
2292 #endif
2293 
2294 #ifdef CONFIG_X86_IO_APIC
2295 	case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
2296 		/*
2297 		 * We just don't map the IO APIC - all access is via
2298 		 * hypercalls.  Keep the address in the pte for reference.
2299 		 */
2300 		pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2301 		break;
2302 #endif
2303 
2304 	case FIX_PARAVIRT_BOOTMAP:
2305 		/* This is an MFN, but it isn't an IO mapping from the
2306 		   IO domain */
2307 		pte = mfn_pte(phys, prot);
2308 		break;
2309 
2310 	default:
2311 		/* By default, set_fixmap is used for hardware mappings */
2312 		pte = mfn_pte(phys, prot);
2313 		break;
2314 	}
2315 
2316 	__native_set_fixmap(idx, pte);
2317 
2318 #ifdef CONFIG_X86_VSYSCALL_EMULATION
2319 	/* Replicate changes to map the vsyscall page into the user
2320 	   pagetable vsyscall mapping. */
2321 	if (idx == VSYSCALL_PAGE) {
2322 		unsigned long vaddr = __fix_to_virt(idx);
2323 		set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2324 	}
2325 #endif
2326 }
2327 
2328 static void __init xen_post_allocator_init(void)
2329 {
2330 	pv_mmu_ops.set_pte = xen_set_pte;
2331 	pv_mmu_ops.set_pmd = xen_set_pmd;
2332 	pv_mmu_ops.set_pud = xen_set_pud;
2333 #ifdef CONFIG_X86_64
2334 	pv_mmu_ops.set_p4d = xen_set_p4d;
2335 #endif
2336 
2337 	/* This will work as long as patching hasn't happened yet
2338 	   (which it hasn't) */
2339 	pv_mmu_ops.alloc_pte = xen_alloc_pte;
2340 	pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2341 	pv_mmu_ops.release_pte = xen_release_pte;
2342 	pv_mmu_ops.release_pmd = xen_release_pmd;
2343 #ifdef CONFIG_X86_64
2344 	pv_mmu_ops.alloc_pud = xen_alloc_pud;
2345 	pv_mmu_ops.release_pud = xen_release_pud;
2346 #endif
2347 	pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte);
2348 
2349 #ifdef CONFIG_X86_64
2350 	pv_mmu_ops.write_cr3 = &xen_write_cr3;
2351 	SetPagePinned(virt_to_page(level3_user_vsyscall));
2352 #endif
2353 	xen_mark_init_mm_pinned();
2354 }
2355 
2356 static void xen_leave_lazy_mmu(void)
2357 {
2358 	preempt_disable();
2359 	xen_mc_flush();
2360 	paravirt_leave_lazy_mmu();
2361 	preempt_enable();
2362 }
2363 
2364 static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2365 	.read_cr2 = xen_read_cr2,
2366 	.write_cr2 = xen_write_cr2,
2367 
2368 	.read_cr3 = xen_read_cr3,
2369 	.write_cr3 = xen_write_cr3_init,
2370 
2371 	.flush_tlb_user = xen_flush_tlb,
2372 	.flush_tlb_kernel = xen_flush_tlb,
2373 	.flush_tlb_single = xen_flush_tlb_single,
2374 	.flush_tlb_others = xen_flush_tlb_others,
2375 
2376 	.pgd_alloc = xen_pgd_alloc,
2377 	.pgd_free = xen_pgd_free,
2378 
2379 	.alloc_pte = xen_alloc_pte_init,
2380 	.release_pte = xen_release_pte_init,
2381 	.alloc_pmd = xen_alloc_pmd_init,
2382 	.release_pmd = xen_release_pmd_init,
2383 
2384 	.set_pte = xen_set_pte_init,
2385 	.set_pte_at = xen_set_pte_at,
2386 	.set_pmd = xen_set_pmd_hyper,
2387 
2388 	.ptep_modify_prot_start = __ptep_modify_prot_start,
2389 	.ptep_modify_prot_commit = __ptep_modify_prot_commit,
2390 
2391 	.pte_val = PV_CALLEE_SAVE(xen_pte_val),
2392 	.pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
2393 
2394 	.make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
2395 	.make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
2396 
2397 #ifdef CONFIG_X86_PAE
2398 	.set_pte_atomic = xen_set_pte_atomic,
2399 	.pte_clear = xen_pte_clear,
2400 	.pmd_clear = xen_pmd_clear,
2401 #endif	/* CONFIG_X86_PAE */
2402 	.set_pud = xen_set_pud_hyper,
2403 
2404 	.make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2405 	.pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
2406 
2407 #ifdef CONFIG_X86_64
2408 	.pud_val = PV_CALLEE_SAVE(xen_pud_val),
2409 	.make_pud = PV_CALLEE_SAVE(xen_make_pud),
2410 	.set_p4d = xen_set_p4d_hyper,
2411 
2412 	.alloc_pud = xen_alloc_pmd_init,
2413 	.release_pud = xen_release_pmd_init,
2414 #endif	/* CONFIG_X86_64 */
2415 
2416 	.activate_mm = xen_activate_mm,
2417 	.dup_mmap = xen_dup_mmap,
2418 	.exit_mmap = xen_exit_mmap,
2419 
2420 	.lazy_mode = {
2421 		.enter = paravirt_enter_lazy_mmu,
2422 		.leave = xen_leave_lazy_mmu,
2423 		.flush = paravirt_flush_lazy_mmu,
2424 	},
2425 
2426 	.set_fixmap = xen_set_fixmap,
2427 };
2428 
2429 void __init xen_init_mmu_ops(void)
2430 {
2431 	x86_init.paging.pagetable_init = xen_pagetable_init;
2432 
2433 	pv_mmu_ops = xen_mmu_ops;
2434 
2435 	memset(dummy_mapping, 0xff, PAGE_SIZE);
2436 }
2437 
2438 /* Protected by xen_reservation_lock. */
2439 #define MAX_CONTIG_ORDER 9 /* 2MB */
2440 static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2441 
2442 #define VOID_PTE (mfn_pte(0, __pgprot(0)))
2443 static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2444 				unsigned long *in_frames,
2445 				unsigned long *out_frames)
2446 {
2447 	int i;
2448 	struct multicall_space mcs;
2449 
2450 	xen_mc_batch();
2451 	for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2452 		mcs = __xen_mc_entry(0);
2453 
2454 		if (in_frames)
2455 			in_frames[i] = virt_to_mfn(vaddr);
2456 
2457 		MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
2458 		__set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
2459 
2460 		if (out_frames)
2461 			out_frames[i] = virt_to_pfn(vaddr);
2462 	}
2463 	xen_mc_issue(0);
2464 }
2465 
2466 /*
2467  * Update the pfn-to-mfn mappings for a virtual address range, either to
2468  * point to an array of mfns, or contiguously from a single starting
2469  * mfn.
2470  */
2471 static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2472 				     unsigned long *mfns,
2473 				     unsigned long first_mfn)
2474 {
2475 	unsigned i, limit;
2476 	unsigned long mfn;
2477 
2478 	xen_mc_batch();
2479 
2480 	limit = 1u << order;
2481 	for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2482 		struct multicall_space mcs;
2483 		unsigned flags;
2484 
2485 		mcs = __xen_mc_entry(0);
2486 		if (mfns)
2487 			mfn = mfns[i];
2488 		else
2489 			mfn = first_mfn + i;
2490 
2491 		if (i < (limit - 1))
2492 			flags = 0;
2493 		else {
2494 			if (order == 0)
2495 				flags = UVMF_INVLPG | UVMF_ALL;
2496 			else
2497 				flags = UVMF_TLB_FLUSH | UVMF_ALL;
2498 		}
2499 
2500 		MULTI_update_va_mapping(mcs.mc, vaddr,
2501 				mfn_pte(mfn, PAGE_KERNEL), flags);
2502 
2503 		set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2504 	}
2505 
2506 	xen_mc_issue(0);
2507 }
2508 
2509 /*
2510  * Perform the hypercall to exchange a region of our pfns to point to
2511  * memory with the required contiguous alignment.  Takes the pfns as
2512  * input, and populates mfns as output.
2513  *
2514  * Returns a success code indicating whether the hypervisor was able to
2515  * satisfy the request or not.
2516  */
2517 static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2518 			       unsigned long *pfns_in,
2519 			       unsigned long extents_out,
2520 			       unsigned int order_out,
2521 			       unsigned long *mfns_out,
2522 			       unsigned int address_bits)
2523 {
2524 	long rc;
2525 	int success;
2526 
2527 	struct xen_memory_exchange exchange = {
2528 		.in = {
2529 			.nr_extents   = extents_in,
2530 			.extent_order = order_in,
2531 			.extent_start = pfns_in,
2532 			.domid        = DOMID_SELF
2533 		},
2534 		.out = {
2535 			.nr_extents   = extents_out,
2536 			.extent_order = order_out,
2537 			.extent_start = mfns_out,
2538 			.address_bits = address_bits,
2539 			.domid        = DOMID_SELF
2540 		}
2541 	};
2542 
2543 	BUG_ON(extents_in << order_in != extents_out << order_out);
2544 
2545 	rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2546 	success = (exchange.nr_exchanged == extents_in);
2547 
2548 	BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2549 	BUG_ON(success && (rc != 0));
2550 
2551 	return success;
2552 }
2553 
2554 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
2555 				 unsigned int address_bits,
2556 				 dma_addr_t *dma_handle)
2557 {
2558 	unsigned long *in_frames = discontig_frames, out_frame;
2559 	unsigned long  flags;
2560 	int            success;
2561 	unsigned long vstart = (unsigned long)phys_to_virt(pstart);
2562 
2563 	/*
2564 	 * Currently an auto-translated guest will not perform I/O, nor will
2565 	 * it require PAE page directories below 4GB. Therefore any calls to
2566 	 * this function are redundant and can be ignored.
2567 	 */
2568 
2569 	if (unlikely(order > MAX_CONTIG_ORDER))
2570 		return -ENOMEM;
2571 
2572 	memset((void *) vstart, 0, PAGE_SIZE << order);
2573 
2574 	spin_lock_irqsave(&xen_reservation_lock, flags);
2575 
2576 	/* 1. Zap current PTEs, remembering MFNs. */
2577 	xen_zap_pfn_range(vstart, order, in_frames, NULL);
2578 
2579 	/* 2. Get a new contiguous memory extent. */
2580 	out_frame = virt_to_pfn(vstart);
2581 	success = xen_exchange_memory(1UL << order, 0, in_frames,
2582 				      1, order, &out_frame,
2583 				      address_bits);
2584 
2585 	/* 3. Map the new extent in place of old pages. */
2586 	if (success)
2587 		xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2588 	else
2589 		xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2590 
2591 	spin_unlock_irqrestore(&xen_reservation_lock, flags);
2592 
2593 	*dma_handle = virt_to_machine(vstart).maddr;
2594 	return success ? 0 : -ENOMEM;
2595 }
2596 EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2597 
2598 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
2599 {
2600 	unsigned long *out_frames = discontig_frames, in_frame;
2601 	unsigned long  flags;
2602 	int success;
2603 	unsigned long vstart;
2604 
2605 	if (unlikely(order > MAX_CONTIG_ORDER))
2606 		return;
2607 
2608 	vstart = (unsigned long)phys_to_virt(pstart);
2609 	memset((void *) vstart, 0, PAGE_SIZE << order);
2610 
2611 	spin_lock_irqsave(&xen_reservation_lock, flags);
2612 
2613 	/* 1. Find start MFN of contiguous extent. */
2614 	in_frame = virt_to_mfn(vstart);
2615 
2616 	/* 2. Zap current PTEs. */
2617 	xen_zap_pfn_range(vstart, order, NULL, out_frames);
2618 
2619 	/* 3. Do the exchange for non-contiguous MFNs. */
2620 	success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2621 					0, out_frames, 0);
2622 
2623 	/* 4. Map new pages in place of old pages. */
2624 	if (success)
2625 		xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2626 	else
2627 		xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2628 
2629 	spin_unlock_irqrestore(&xen_reservation_lock, flags);
2630 }
2631 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2632 
2633 #ifdef CONFIG_KEXEC_CORE
2634 phys_addr_t paddr_vmcoreinfo_note(void)
2635 {
2636 	if (xen_pv_domain())
2637 		return virt_to_machine(vmcoreinfo_note).maddr;
2638 	else
2639 		return __pa(vmcoreinfo_note);
2640 }
2641 #endif /* CONFIG_KEXEC_CORE */
2642