xref: /linux/arch/x86/xen/mmu.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 #include <linux/pfn.h>
2 #include <asm/xen/page.h>
3 #include <asm/xen/hypercall.h>
4 #include <xen/interface/memory.h>
5 
6 #include "multicalls.h"
7 #include "mmu.h"
8 
9 /*
10  * Protects atomic reservation decrease/increase against concurrent increases.
11  * Also protects non-atomic updates of current_pages and balloon lists.
12  */
13 DEFINE_SPINLOCK(xen_reservation_lock);
14 
15 unsigned long arbitrary_virt_to_mfn(void *vaddr)
16 {
17 	xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
18 
19 	return PFN_DOWN(maddr.maddr);
20 }
21 
22 xmaddr_t arbitrary_virt_to_machine(void *vaddr)
23 {
24 	unsigned long address = (unsigned long)vaddr;
25 	unsigned int level;
26 	pte_t *pte;
27 	unsigned offset;
28 
29 	/*
30 	 * if the PFN is in the linear mapped vaddr range, we can just use
31 	 * the (quick) virt_to_machine() p2m lookup
32 	 */
33 	if (virt_addr_valid(vaddr))
34 		return virt_to_machine(vaddr);
35 
36 	/* otherwise we have to do a (slower) full page-table walk */
37 
38 	pte = lookup_address(address, &level);
39 	BUG_ON(pte == NULL);
40 	offset = address & ~PAGE_MASK;
41 	return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
42 }
43 EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
44 
45 static void xen_flush_tlb_all(void)
46 {
47 	struct mmuext_op *op;
48 	struct multicall_space mcs;
49 
50 	trace_xen_mmu_flush_tlb_all(0);
51 
52 	preempt_disable();
53 
54 	mcs = xen_mc_entry(sizeof(*op));
55 
56 	op = mcs.args;
57 	op->cmd = MMUEXT_TLB_FLUSH_ALL;
58 	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
59 
60 	xen_mc_issue(PARAVIRT_LAZY_MMU);
61 
62 	preempt_enable();
63 }
64 
65 #define REMAP_BATCH_SIZE 16
66 
67 struct remap_data {
68 	xen_pfn_t *mfn;
69 	bool contiguous;
70 	pgprot_t prot;
71 	struct mmu_update *mmu_update;
72 };
73 
74 static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
75 				 unsigned long addr, void *data)
76 {
77 	struct remap_data *rmd = data;
78 	pte_t pte = pte_mkspecial(mfn_pte(*rmd->mfn, rmd->prot));
79 
80 	/* If we have a contiguous range, just update the mfn itself,
81 	   else update pointer to be "next mfn". */
82 	if (rmd->contiguous)
83 		(*rmd->mfn)++;
84 	else
85 		rmd->mfn++;
86 
87 	rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
88 	rmd->mmu_update->val = pte_val_ma(pte);
89 	rmd->mmu_update++;
90 
91 	return 0;
92 }
93 
94 static int do_remap_gfn(struct vm_area_struct *vma,
95 			unsigned long addr,
96 			xen_pfn_t *gfn, int nr,
97 			int *err_ptr, pgprot_t prot,
98 			unsigned domid,
99 			struct page **pages)
100 {
101 	int err = 0;
102 	struct remap_data rmd;
103 	struct mmu_update mmu_update[REMAP_BATCH_SIZE];
104 	unsigned long range;
105 	int mapped = 0;
106 
107 	BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
108 
109 	rmd.mfn = gfn;
110 	rmd.prot = prot;
111 	/* We use the err_ptr to indicate if there we are doing a contiguous
112 	 * mapping or a discontigious mapping. */
113 	rmd.contiguous = !err_ptr;
114 
115 	while (nr) {
116 		int index = 0;
117 		int done = 0;
118 		int batch = min(REMAP_BATCH_SIZE, nr);
119 		int batch_left = batch;
120 		range = (unsigned long)batch << PAGE_SHIFT;
121 
122 		rmd.mmu_update = mmu_update;
123 		err = apply_to_page_range(vma->vm_mm, addr, range,
124 					  remap_area_mfn_pte_fn, &rmd);
125 		if (err)
126 			goto out;
127 
128 		/* We record the error for each page that gives an error, but
129 		 * continue mapping until the whole set is done */
130 		do {
131 			int i;
132 
133 			err = HYPERVISOR_mmu_update(&mmu_update[index],
134 						    batch_left, &done, domid);
135 
136 			/*
137 			 * @err_ptr may be the same buffer as @gfn, so
138 			 * only clear it after each chunk of @gfn is
139 			 * used.
140 			 */
141 			if (err_ptr) {
142 				for (i = index; i < index + done; i++)
143 					err_ptr[i] = 0;
144 			}
145 			if (err < 0) {
146 				if (!err_ptr)
147 					goto out;
148 				err_ptr[i] = err;
149 				done++; /* Skip failed frame. */
150 			} else
151 				mapped += done;
152 			batch_left -= done;
153 			index += done;
154 		} while (batch_left);
155 
156 		nr -= batch;
157 		addr += range;
158 		if (err_ptr)
159 			err_ptr += batch;
160 		cond_resched();
161 	}
162 out:
163 
164 	xen_flush_tlb_all();
165 
166 	return err < 0 ? err : mapped;
167 }
168 
169 int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
170 			       unsigned long addr,
171 			       xen_pfn_t gfn, int nr,
172 			       pgprot_t prot, unsigned domid,
173 			       struct page **pages)
174 {
175 	return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages);
176 }
177 EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
178 
179 int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
180 			       unsigned long addr,
181 			       xen_pfn_t *gfn, int nr,
182 			       int *err_ptr, pgprot_t prot,
183 			       unsigned domid, struct page **pages)
184 {
185 	/* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
186 	 * and the consequences later is quite hard to detect what the actual
187 	 * cause of "wrong memory was mapped in".
188 	 */
189 	BUG_ON(err_ptr == NULL);
190 	return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages);
191 }
192 EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
193 
194 /* Returns: 0 success */
195 int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
196 			       int numpgs, struct page **pages)
197 {
198 	if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
199 		return 0;
200 
201 	return -EINVAL;
202 }
203 EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
204