1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Machine specific setup for xen
4 *
5 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
6 */
7
8 #include <linux/init.h>
9 #include <linux/iscsi_ibft.h>
10 #include <linux/sched.h>
11 #include <linux/kstrtox.h>
12 #include <linux/mm.h>
13 #include <linux/pm.h>
14 #include <linux/memblock.h>
15 #include <linux/cpuidle.h>
16 #include <linux/cpufreq.h>
17 #include <linux/memory_hotplug.h>
18 #include <linux/acpi.h>
19
20 #include <asm/elf.h>
21 #include <asm/vdso.h>
22 #include <asm/e820/api.h>
23 #include <asm/setup.h>
24 #include <asm/numa.h>
25 #include <asm/idtentry.h>
26 #include <asm/xen/hypervisor.h>
27 #include <asm/xen/hypercall.h>
28
29 #include <xen/xen.h>
30 #include <xen/page.h>
31 #include <xen/interface/callback.h>
32 #include <xen/interface/memory.h>
33 #include <xen/interface/physdev.h>
34 #include <xen/features.h>
35 #include <xen/hvc-console.h>
36 #include "xen-ops.h"
37
38 #define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
39
40 /* Number of pages released from the initial allocation. */
41 unsigned long xen_released_pages;
42
43 /* Memory map would allow PCI passthrough. */
44 bool xen_pv_pci_possible;
45
46 /* E820 map used during setting up memory. */
47 static struct e820_table xen_e820_table __initdata;
48
49 /* Number of initially usable memory pages. */
50 static unsigned long ini_nr_pages __initdata;
51
52 /*
53 * Buffer used to remap identity mapped pages. We only need the virtual space.
54 * The physical page behind this address is remapped as needed to different
55 * buffer pages.
56 */
57 #define REMAP_SIZE (P2M_PER_PAGE - 3)
58 static struct {
59 unsigned long next_area_mfn;
60 unsigned long target_pfn;
61 unsigned long size;
62 unsigned long mfns[REMAP_SIZE];
63 } xen_remap_buf __initdata __aligned(PAGE_SIZE);
64 static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
65
66 static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
67
xen_parse_512gb(void)68 static void __init xen_parse_512gb(void)
69 {
70 bool val = false;
71 char *arg;
72
73 arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit");
74 if (!arg)
75 return;
76
77 arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit=");
78 if (!arg)
79 val = true;
80 else if (kstrtobool(arg + strlen("xen_512gb_limit="), &val))
81 return;
82
83 xen_512gb_limit = val;
84 }
85
xen_del_extra_mem(unsigned long start_pfn,unsigned long n_pfns)86 static void __init xen_del_extra_mem(unsigned long start_pfn,
87 unsigned long n_pfns)
88 {
89 int i;
90 unsigned long start_r, size_r;
91
92 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
93 start_r = xen_extra_mem[i].start_pfn;
94 size_r = xen_extra_mem[i].n_pfns;
95
96 /* Start of region. */
97 if (start_r == start_pfn) {
98 BUG_ON(n_pfns > size_r);
99 xen_extra_mem[i].start_pfn += n_pfns;
100 xen_extra_mem[i].n_pfns -= n_pfns;
101 break;
102 }
103 /* End of region. */
104 if (start_r + size_r == start_pfn + n_pfns) {
105 BUG_ON(n_pfns > size_r);
106 xen_extra_mem[i].n_pfns -= n_pfns;
107 break;
108 }
109 /* Mid of region. */
110 if (start_pfn > start_r && start_pfn < start_r + size_r) {
111 BUG_ON(start_pfn + n_pfns > start_r + size_r);
112 xen_extra_mem[i].n_pfns = start_pfn - start_r;
113 /* Calling memblock_reserve() again is okay. */
114 xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r -
115 (start_pfn + n_pfns));
116 break;
117 }
118 }
119 memblock_phys_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
120 }
121
122 /*
123 * Called during boot before the p2m list can take entries beyond the
124 * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
125 * invalid.
126 */
xen_chk_extra_mem(unsigned long pfn)127 unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
128 {
129 int i;
130
131 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
132 if (pfn >= xen_extra_mem[i].start_pfn &&
133 pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns)
134 return INVALID_P2M_ENTRY;
135 }
136
137 return IDENTITY_FRAME(pfn);
138 }
139
140 /*
141 * Mark all pfns of extra mem as invalid in p2m list.
142 */
xen_inv_extra_mem(void)143 void __init xen_inv_extra_mem(void)
144 {
145 unsigned long pfn, pfn_s, pfn_e;
146 int i;
147
148 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
149 if (!xen_extra_mem[i].n_pfns)
150 continue;
151 pfn_s = xen_extra_mem[i].start_pfn;
152 pfn_e = pfn_s + xen_extra_mem[i].n_pfns;
153 for (pfn = pfn_s; pfn < pfn_e; pfn++)
154 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
155 }
156 }
157
158 /*
159 * Finds the next RAM pfn available in the E820 map after min_pfn.
160 * This function updates min_pfn with the pfn found and returns
161 * the size of that range or zero if not found.
162 */
xen_find_pfn_range(unsigned long * min_pfn)163 static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
164 {
165 const struct e820_entry *entry = xen_e820_table.entries;
166 unsigned int i;
167 unsigned long done = 0;
168
169 for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
170 unsigned long s_pfn;
171 unsigned long e_pfn;
172
173 if (entry->type != E820_TYPE_RAM)
174 continue;
175
176 e_pfn = PFN_DOWN(entry->addr + entry->size);
177
178 /* We only care about E820 after this */
179 if (e_pfn <= *min_pfn)
180 continue;
181
182 s_pfn = PFN_UP(entry->addr);
183
184 /* If min_pfn falls within the E820 entry, we want to start
185 * at the min_pfn PFN.
186 */
187 if (s_pfn <= *min_pfn) {
188 done = e_pfn - *min_pfn;
189 } else {
190 done = e_pfn - s_pfn;
191 *min_pfn = s_pfn;
192 }
193 break;
194 }
195
196 return done;
197 }
198
xen_free_mfn(unsigned long mfn)199 static int __init xen_free_mfn(unsigned long mfn)
200 {
201 struct xen_memory_reservation reservation = {
202 .address_bits = 0,
203 .extent_order = 0,
204 .domid = DOMID_SELF
205 };
206
207 set_xen_guest_handle(reservation.extent_start, &mfn);
208 reservation.nr_extents = 1;
209
210 return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
211 }
212
213 /*
214 * This releases a chunk of memory and then does the identity map. It's used
215 * as a fallback if the remapping fails.
216 */
xen_set_identity_and_release_chunk(unsigned long start_pfn,unsigned long end_pfn)217 static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
218 unsigned long end_pfn)
219 {
220 unsigned long pfn, end;
221 int ret;
222
223 WARN_ON(start_pfn > end_pfn);
224
225 /* Release pages first. */
226 end = min(end_pfn, ini_nr_pages);
227 for (pfn = start_pfn; pfn < end; pfn++) {
228 unsigned long mfn = pfn_to_mfn(pfn);
229
230 /* Make sure pfn exists to start with */
231 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
232 continue;
233
234 ret = xen_free_mfn(mfn);
235 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
236
237 if (ret == 1) {
238 xen_released_pages++;
239 if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
240 break;
241 } else
242 break;
243 }
244
245 set_phys_range_identity(start_pfn, end_pfn);
246 }
247
248 /*
249 * Helper function to update the p2m and m2p tables and kernel mapping.
250 */
xen_update_mem_tables(unsigned long pfn,unsigned long mfn)251 static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
252 {
253 struct mmu_update update = {
254 .ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
255 .val = pfn
256 };
257
258 /* Update p2m */
259 if (!set_phys_to_machine(pfn, mfn)) {
260 WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
261 pfn, mfn);
262 BUG();
263 }
264
265 /* Update m2p */
266 if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
267 WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
268 mfn, pfn);
269 BUG();
270 }
271
272 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
273 mfn_pte(mfn, PAGE_KERNEL), 0)) {
274 WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
275 mfn, pfn);
276 BUG();
277 }
278 }
279
280 /*
281 * This function updates the p2m and m2p tables with an identity map from
282 * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
283 * original allocation at remap_pfn. The information needed for remapping is
284 * saved in the memory itself to avoid the need for allocating buffers. The
285 * complete remap information is contained in a list of MFNs each containing
286 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
287 * This enables us to preserve the original mfn sequence while doing the
288 * remapping at a time when the memory management is capable of allocating
289 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
290 * its callers.
291 */
xen_do_set_identity_and_remap_chunk(unsigned long start_pfn,unsigned long size,unsigned long remap_pfn)292 static void __init xen_do_set_identity_and_remap_chunk(
293 unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
294 {
295 unsigned long buf = (unsigned long)&xen_remap_buf;
296 unsigned long mfn_save, mfn;
297 unsigned long ident_pfn_iter, remap_pfn_iter;
298 unsigned long ident_end_pfn = start_pfn + size;
299 unsigned long left = size;
300 unsigned int i, chunk;
301
302 WARN_ON(size == 0);
303
304 mfn_save = virt_to_mfn((void *)buf);
305
306 for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
307 ident_pfn_iter < ident_end_pfn;
308 ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
309 chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
310
311 /* Map first pfn to xen_remap_buf */
312 mfn = pfn_to_mfn(ident_pfn_iter);
313 set_pte_mfn(buf, mfn, PAGE_KERNEL);
314
315 /* Save mapping information in page */
316 xen_remap_buf.next_area_mfn = xen_remap_mfn;
317 xen_remap_buf.target_pfn = remap_pfn_iter;
318 xen_remap_buf.size = chunk;
319 for (i = 0; i < chunk; i++)
320 xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
321
322 /* Put remap buf into list. */
323 xen_remap_mfn = mfn;
324
325 /* Set identity map */
326 set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
327
328 left -= chunk;
329 }
330
331 /* Restore old xen_remap_buf mapping */
332 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
333 }
334
335 /*
336 * This function takes a contiguous pfn range that needs to be identity mapped
337 * and:
338 *
339 * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
340 * 2) Calls the do_ function to actually do the mapping/remapping work.
341 *
342 * The goal is to not allocate additional memory but to remap the existing
343 * pages. In the case of an error the underlying memory is simply released back
344 * to Xen and not remapped.
345 */
xen_set_identity_and_remap_chunk(unsigned long start_pfn,unsigned long end_pfn,unsigned long remap_pfn)346 static unsigned long __init xen_set_identity_and_remap_chunk(
347 unsigned long start_pfn, unsigned long end_pfn, unsigned long remap_pfn)
348 {
349 unsigned long pfn;
350 unsigned long i = 0;
351 unsigned long n = end_pfn - start_pfn;
352
353 if (remap_pfn == 0)
354 remap_pfn = ini_nr_pages;
355
356 while (i < n) {
357 unsigned long cur_pfn = start_pfn + i;
358 unsigned long left = n - i;
359 unsigned long size = left;
360 unsigned long remap_range_size;
361
362 /* Do not remap pages beyond the current allocation */
363 if (cur_pfn >= ini_nr_pages) {
364 /* Identity map remaining pages */
365 set_phys_range_identity(cur_pfn, cur_pfn + size);
366 break;
367 }
368 if (cur_pfn + size > ini_nr_pages)
369 size = ini_nr_pages - cur_pfn;
370
371 remap_range_size = xen_find_pfn_range(&remap_pfn);
372 if (!remap_range_size) {
373 pr_warn("Unable to find available pfn range, not remapping identity pages\n");
374 xen_set_identity_and_release_chunk(cur_pfn,
375 cur_pfn + left);
376 break;
377 }
378 /* Adjust size to fit in current e820 RAM region */
379 if (size > remap_range_size)
380 size = remap_range_size;
381
382 xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
383
384 /* Update variables to reflect new mappings. */
385 i += size;
386 remap_pfn += size;
387 }
388
389 /*
390 * If the PFNs are currently mapped, their VA mappings need to be
391 * zapped.
392 */
393 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
394 (void)HYPERVISOR_update_va_mapping(
395 (unsigned long)__va(pfn << PAGE_SHIFT),
396 native_make_pte(0), 0);
397
398 return remap_pfn;
399 }
400
xen_count_remap_pages(unsigned long start_pfn,unsigned long end_pfn,unsigned long remap_pages)401 static unsigned long __init xen_count_remap_pages(
402 unsigned long start_pfn, unsigned long end_pfn,
403 unsigned long remap_pages)
404 {
405 if (start_pfn >= ini_nr_pages)
406 return remap_pages;
407
408 return remap_pages + min(end_pfn, ini_nr_pages) - start_pfn;
409 }
410
xen_foreach_remap_area(unsigned long (* func)(unsigned long start_pfn,unsigned long end_pfn,unsigned long last_val))411 static unsigned long __init xen_foreach_remap_area(
412 unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
413 unsigned long last_val))
414 {
415 phys_addr_t start = 0;
416 unsigned long ret_val = 0;
417 const struct e820_entry *entry = xen_e820_table.entries;
418 int i;
419
420 /*
421 * Combine non-RAM regions and gaps until a RAM region (or the
422 * end of the map) is reached, then call the provided function
423 * to perform its duty on the non-RAM region.
424 *
425 * The combined non-RAM regions are rounded to a whole number
426 * of pages so any partial pages are accessible via the 1:1
427 * mapping. This is needed for some BIOSes that put (for
428 * example) the DMI tables in a reserved region that begins on
429 * a non-page boundary.
430 */
431 for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
432 phys_addr_t end = entry->addr + entry->size;
433 if (entry->type == E820_TYPE_RAM || i == xen_e820_table.nr_entries - 1) {
434 unsigned long start_pfn = PFN_DOWN(start);
435 unsigned long end_pfn = PFN_UP(end);
436
437 if (entry->type == E820_TYPE_RAM)
438 end_pfn = PFN_UP(entry->addr);
439
440 if (start_pfn < end_pfn)
441 ret_val = func(start_pfn, end_pfn, ret_val);
442 start = end;
443 }
444 }
445
446 return ret_val;
447 }
448
449 /*
450 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
451 * The remap information (which mfn remap to which pfn) is contained in the
452 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
453 * This scheme allows to remap the different chunks in arbitrary order while
454 * the resulting mapping will be independent from the order.
455 */
xen_remap_memory(void)456 void __init xen_remap_memory(void)
457 {
458 unsigned long buf = (unsigned long)&xen_remap_buf;
459 unsigned long mfn_save, pfn;
460 unsigned long remapped = 0;
461 unsigned int i;
462 unsigned long pfn_s = ~0UL;
463 unsigned long len = 0;
464
465 mfn_save = virt_to_mfn((void *)buf);
466
467 while (xen_remap_mfn != INVALID_P2M_ENTRY) {
468 /* Map the remap information */
469 set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
470
471 BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
472
473 pfn = xen_remap_buf.target_pfn;
474 for (i = 0; i < xen_remap_buf.size; i++) {
475 xen_update_mem_tables(pfn, xen_remap_buf.mfns[i]);
476 remapped++;
477 pfn++;
478 }
479 if (pfn_s == ~0UL || pfn == pfn_s) {
480 pfn_s = xen_remap_buf.target_pfn;
481 len += xen_remap_buf.size;
482 } else if (pfn_s + len == xen_remap_buf.target_pfn) {
483 len += xen_remap_buf.size;
484 } else {
485 xen_del_extra_mem(pfn_s, len);
486 pfn_s = xen_remap_buf.target_pfn;
487 len = xen_remap_buf.size;
488 }
489 xen_remap_mfn = xen_remap_buf.next_area_mfn;
490 }
491
492 if (pfn_s != ~0UL && len)
493 xen_del_extra_mem(pfn_s, len);
494
495 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
496
497 pr_info("Remapped %ld page(s)\n", remapped);
498
499 xen_do_remap_nonram();
500 }
501
xen_get_pages_limit(void)502 static unsigned long __init xen_get_pages_limit(void)
503 {
504 unsigned long limit;
505
506 limit = MAXMEM / PAGE_SIZE;
507 if (!xen_initial_domain() && xen_512gb_limit)
508 limit = GB(512) / PAGE_SIZE;
509
510 return limit;
511 }
512
xen_get_max_pages(void)513 static unsigned long __init xen_get_max_pages(void)
514 {
515 unsigned long max_pages, limit;
516 domid_t domid = DOMID_SELF;
517 long ret;
518
519 limit = xen_get_pages_limit();
520 max_pages = limit;
521
522 /*
523 * For the initial domain we use the maximum reservation as
524 * the maximum page.
525 *
526 * For guest domains the current maximum reservation reflects
527 * the current maximum rather than the static maximum. In this
528 * case the e820 map provided to us will cover the static
529 * maximum region.
530 */
531 if (xen_initial_domain()) {
532 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
533 if (ret > 0)
534 max_pages = ret;
535 }
536
537 return min(max_pages, limit);
538 }
539
xen_align_and_add_e820_region(phys_addr_t start,phys_addr_t size,int type)540 static void __init xen_align_and_add_e820_region(phys_addr_t start,
541 phys_addr_t size, int type)
542 {
543 phys_addr_t end = start + size;
544
545 /* Align RAM regions to page boundaries. */
546 if (type == E820_TYPE_RAM) {
547 start = PAGE_ALIGN(start);
548 end &= ~((phys_addr_t)PAGE_SIZE - 1);
549 #ifdef CONFIG_MEMORY_HOTPLUG
550 /*
551 * Don't allow adding memory not in E820 map while booting the
552 * system. Once the balloon driver is up it will remove that
553 * restriction again.
554 */
555 max_mem_size = end;
556 #endif
557 }
558
559 e820__range_add(start, end - start, type);
560 }
561
xen_ignore_unusable(void)562 static void __init xen_ignore_unusable(void)
563 {
564 struct e820_entry *entry = xen_e820_table.entries;
565 unsigned int i;
566
567 for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
568 if (entry->type == E820_TYPE_UNUSABLE)
569 entry->type = E820_TYPE_RAM;
570 }
571 }
572
xen_is_e820_reserved(phys_addr_t start,phys_addr_t size)573 static bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
574 {
575 struct e820_entry *entry;
576 unsigned mapcnt;
577 phys_addr_t end;
578
579 if (!size)
580 return false;
581
582 end = start + size;
583 entry = xen_e820_table.entries;
584
585 for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
586 if (entry->type == E820_TYPE_RAM && entry->addr <= start &&
587 (entry->addr + entry->size) >= end)
588 return false;
589
590 entry++;
591 }
592
593 return true;
594 }
595
596 /*
597 * Find a free area in physical memory not yet reserved and compliant with
598 * E820 map.
599 * Used to relocate pre-allocated areas like initrd or p2m list which are in
600 * conflict with the to be used E820 map.
601 * In case no area is found, return 0. Otherwise return the physical address
602 * of the area which is already reserved for convenience.
603 */
xen_find_free_area(phys_addr_t size)604 phys_addr_t __init xen_find_free_area(phys_addr_t size)
605 {
606 unsigned mapcnt;
607 phys_addr_t addr, start;
608 struct e820_entry *entry = xen_e820_table.entries;
609
610 for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++, entry++) {
611 if (entry->type != E820_TYPE_RAM || entry->size < size)
612 continue;
613 start = entry->addr;
614 for (addr = start; addr < start + size; addr += PAGE_SIZE) {
615 if (!memblock_is_reserved(addr))
616 continue;
617 start = addr + PAGE_SIZE;
618 if (start + size > entry->addr + entry->size)
619 break;
620 }
621 if (addr >= start + size) {
622 memblock_reserve(start, size);
623 return start;
624 }
625 }
626
627 return 0;
628 }
629
630 /*
631 * Swap a non-RAM E820 map entry with RAM above ini_nr_pages.
632 * Note that the E820 map is modified accordingly, but the P2M map isn't yet.
633 * The adaption of the P2M must be deferred until page allocation is possible.
634 */
xen_e820_swap_entry_with_ram(struct e820_entry * swap_entry)635 static void __init xen_e820_swap_entry_with_ram(struct e820_entry *swap_entry)
636 {
637 struct e820_entry *entry;
638 unsigned int mapcnt;
639 phys_addr_t mem_end = PFN_PHYS(ini_nr_pages);
640 phys_addr_t swap_addr, swap_size, entry_end;
641
642 swap_addr = PAGE_ALIGN_DOWN(swap_entry->addr);
643 swap_size = PAGE_ALIGN(swap_entry->addr - swap_addr + swap_entry->size);
644 entry = xen_e820_table.entries;
645
646 for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
647 entry_end = entry->addr + entry->size;
648 if (entry->type == E820_TYPE_RAM && entry->size >= swap_size &&
649 entry_end - swap_size >= mem_end) {
650 /* Reduce RAM entry by needed space (whole pages). */
651 entry->size -= swap_size;
652
653 /* Add new entry at the end of E820 map. */
654 entry = xen_e820_table.entries +
655 xen_e820_table.nr_entries;
656 xen_e820_table.nr_entries++;
657
658 /* Fill new entry (keep size and page offset). */
659 entry->type = swap_entry->type;
660 entry->addr = entry_end - swap_size +
661 swap_addr - swap_entry->addr;
662 entry->size = swap_entry->size;
663
664 /* Convert old entry to RAM, align to pages. */
665 swap_entry->type = E820_TYPE_RAM;
666 swap_entry->addr = swap_addr;
667 swap_entry->size = swap_size;
668
669 /* Remember PFN<->MFN relation for P2M update. */
670 xen_add_remap_nonram(swap_addr, entry_end - swap_size,
671 swap_size);
672
673 /* Order E820 table and merge entries. */
674 e820__update_table(&xen_e820_table);
675
676 return;
677 }
678
679 entry++;
680 }
681
682 xen_raw_console_write("No suitable area found for required E820 entry remapping action\n");
683 BUG();
684 }
685
686 /*
687 * Look for non-RAM memory types in a specific guest physical area and move
688 * those away if possible (ACPI NVS only for now).
689 */
xen_e820_resolve_conflicts(phys_addr_t start,phys_addr_t size)690 static void __init xen_e820_resolve_conflicts(phys_addr_t start,
691 phys_addr_t size)
692 {
693 struct e820_entry *entry;
694 unsigned int mapcnt;
695 phys_addr_t end;
696
697 if (!size)
698 return;
699
700 end = start + size;
701 entry = xen_e820_table.entries;
702
703 for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
704 if (entry->addr >= end)
705 return;
706
707 if (entry->addr + entry->size > start &&
708 entry->type == E820_TYPE_NVS)
709 xen_e820_swap_entry_with_ram(entry);
710
711 entry++;
712 }
713 }
714
715 /*
716 * Check for an area in physical memory to be usable for non-movable purposes.
717 * An area is considered to usable if the used E820 map lists it to be RAM or
718 * some other type which can be moved to higher PFNs while keeping the MFNs.
719 * In case the area is not usable, crash the system with an error message.
720 */
xen_chk_is_e820_usable(phys_addr_t start,phys_addr_t size,const char * component)721 void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size,
722 const char *component)
723 {
724 xen_e820_resolve_conflicts(start, size);
725
726 if (!xen_is_e820_reserved(start, size))
727 return;
728
729 xen_raw_console_write("Xen hypervisor allocated ");
730 xen_raw_console_write(component);
731 xen_raw_console_write(" memory conflicts with E820 map\n");
732 BUG();
733 }
734
735 /*
736 * Like memcpy, but with physical addresses for dest and src.
737 */
xen_phys_memcpy(phys_addr_t dest,phys_addr_t src,phys_addr_t n)738 static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src,
739 phys_addr_t n)
740 {
741 phys_addr_t dest_off, src_off, dest_len, src_len, len;
742 void *from, *to;
743
744 while (n) {
745 dest_off = dest & ~PAGE_MASK;
746 src_off = src & ~PAGE_MASK;
747 dest_len = n;
748 if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off)
749 dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off;
750 src_len = n;
751 if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off)
752 src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off;
753 len = min(dest_len, src_len);
754 to = early_memremap(dest - dest_off, dest_len + dest_off);
755 from = early_memremap(src - src_off, src_len + src_off);
756 memcpy(to, from, len);
757 early_memunmap(to, dest_len + dest_off);
758 early_memunmap(from, src_len + src_off);
759 n -= len;
760 dest += len;
761 src += len;
762 }
763 }
764
765 /*
766 * Reserve Xen mfn_list.
767 */
xen_reserve_xen_mfnlist(void)768 static void __init xen_reserve_xen_mfnlist(void)
769 {
770 phys_addr_t start, size;
771
772 if (xen_start_info->mfn_list >= __START_KERNEL_map) {
773 start = __pa(xen_start_info->mfn_list);
774 size = PFN_ALIGN(xen_start_info->nr_pages *
775 sizeof(unsigned long));
776 } else {
777 start = PFN_PHYS(xen_start_info->first_p2m_pfn);
778 size = PFN_PHYS(xen_start_info->nr_p2m_frames);
779 }
780
781 memblock_reserve(start, size);
782 if (!xen_is_e820_reserved(start, size))
783 return;
784
785 xen_relocate_p2m();
786 memblock_phys_free(start, size);
787 }
788
789 /**
790 * xen_memory_setup - Hook for machine specific memory setup.
791 **/
xen_memory_setup(void)792 char * __init xen_memory_setup(void)
793 {
794 unsigned long pfn_s, n_pfns;
795 phys_addr_t mem_end, addr, size, chunk_size;
796 u32 type;
797 int rc;
798 struct xen_memory_map memmap;
799 unsigned long max_pages;
800 unsigned long extra_pages = 0;
801 unsigned long maxmem_pages;
802 int i;
803 int op;
804
805 xen_parse_512gb();
806 ini_nr_pages = min(xen_get_pages_limit(), xen_start_info->nr_pages);
807 mem_end = PFN_PHYS(ini_nr_pages);
808
809 memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries);
810 set_xen_guest_handle(memmap.buffer, xen_e820_table.entries);
811
812 #if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_XEN_BALLOON)
813 xen_saved_max_mem_size = max_mem_size;
814 #endif
815
816 op = xen_initial_domain() ?
817 XENMEM_machine_memory_map :
818 XENMEM_memory_map;
819 rc = HYPERVISOR_memory_op(op, &memmap);
820 if (rc == -ENOSYS) {
821 BUG_ON(xen_initial_domain());
822 memmap.nr_entries = 1;
823 xen_e820_table.entries[0].addr = 0ULL;
824 xen_e820_table.entries[0].size = mem_end;
825 /* 8MB slack (to balance backend allocations). */
826 xen_e820_table.entries[0].size += 8ULL << 20;
827 xen_e820_table.entries[0].type = E820_TYPE_RAM;
828 rc = 0;
829 }
830 BUG_ON(rc);
831 BUG_ON(memmap.nr_entries == 0);
832 xen_e820_table.nr_entries = memmap.nr_entries;
833
834 if (xen_initial_domain()) {
835 /*
836 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
837 * regions, so if we're using the machine memory map leave the
838 * region as RAM as it is in the pseudo-physical map.
839 *
840 * UNUSABLE regions in domUs are not handled and will need
841 * a patch in the future.
842 */
843 xen_ignore_unusable();
844
845 #ifdef CONFIG_ISCSI_IBFT_FIND
846 /* Reserve 0.5 MiB to 1 MiB region so iBFT can be found */
847 xen_e820_table.entries[xen_e820_table.nr_entries].addr = IBFT_START;
848 xen_e820_table.entries[xen_e820_table.nr_entries].size = IBFT_END - IBFT_START;
849 xen_e820_table.entries[xen_e820_table.nr_entries].type = E820_TYPE_RESERVED;
850 xen_e820_table.nr_entries++;
851 #endif
852 }
853
854 /* Make sure the Xen-supplied memory map is well-ordered. */
855 e820__update_table(&xen_e820_table);
856
857 /*
858 * Check whether the kernel itself conflicts with the target E820 map.
859 * Failing now is better than running into weird problems later due
860 * to relocating (and even reusing) pages with kernel text or data.
861 */
862 xen_chk_is_e820_usable(__pa_symbol(_text),
863 __pa_symbol(_end) - __pa_symbol(_text),
864 "kernel");
865
866 /*
867 * Check for a conflict of the xen_start_info memory with the target
868 * E820 map.
869 */
870 xen_chk_is_e820_usable(__pa(xen_start_info), sizeof(*xen_start_info),
871 "xen_start_info");
872
873 /*
874 * Check for a conflict of the hypervisor supplied page tables with
875 * the target E820 map.
876 */
877 xen_pt_check_e820();
878
879 max_pages = xen_get_max_pages();
880
881 /* How many extra pages do we need due to remapping? */
882 max_pages += xen_foreach_remap_area(xen_count_remap_pages);
883
884 if (max_pages > ini_nr_pages)
885 extra_pages += max_pages - ini_nr_pages;
886
887 /*
888 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
889 * factor the base size.
890 *
891 * Make sure we have no memory above max_pages, as this area
892 * isn't handled by the p2m management.
893 */
894 maxmem_pages = EXTRA_MEM_RATIO * min(ini_nr_pages, PFN_DOWN(MAXMEM));
895 extra_pages = min3(maxmem_pages, extra_pages, max_pages - ini_nr_pages);
896 i = 0;
897 addr = xen_e820_table.entries[0].addr;
898 size = xen_e820_table.entries[0].size;
899 while (i < xen_e820_table.nr_entries) {
900 bool discard = false;
901
902 chunk_size = size;
903 type = xen_e820_table.entries[i].type;
904
905 if (type == E820_TYPE_RESERVED)
906 xen_pv_pci_possible = true;
907
908 if (type == E820_TYPE_RAM) {
909 if (addr < mem_end) {
910 chunk_size = min(size, mem_end - addr);
911 } else if (extra_pages) {
912 chunk_size = min(size, PFN_PHYS(extra_pages));
913 pfn_s = PFN_UP(addr);
914 n_pfns = PFN_DOWN(addr + chunk_size) - pfn_s;
915 extra_pages -= n_pfns;
916 xen_add_extra_mem(pfn_s, n_pfns);
917 xen_max_p2m_pfn = pfn_s + n_pfns;
918 } else
919 discard = true;
920 }
921
922 if (!discard)
923 xen_align_and_add_e820_region(addr, chunk_size, type);
924
925 addr += chunk_size;
926 size -= chunk_size;
927 if (size == 0) {
928 i++;
929 if (i < xen_e820_table.nr_entries) {
930 addr = xen_e820_table.entries[i].addr;
931 size = xen_e820_table.entries[i].size;
932 }
933 }
934 }
935
936 /*
937 * Set the rest as identity mapped, in case PCI BARs are
938 * located here.
939 */
940 set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
941
942 /*
943 * In domU, the ISA region is normal, usable memory, but we
944 * reserve ISA memory anyway because too many things poke
945 * about in there.
946 */
947 e820__range_add(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_TYPE_RESERVED);
948
949 e820__update_table(e820_table);
950
951 xen_reserve_xen_mfnlist();
952
953 /* Check for a conflict of the initrd with the target E820 map. */
954 if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image,
955 boot_params.hdr.ramdisk_size)) {
956 phys_addr_t new_area, start, size;
957
958 new_area = xen_find_free_area(boot_params.hdr.ramdisk_size);
959 if (!new_area) {
960 xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n");
961 BUG();
962 }
963
964 start = boot_params.hdr.ramdisk_image;
965 size = boot_params.hdr.ramdisk_size;
966 xen_phys_memcpy(new_area, start, size);
967 pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n",
968 start, start + size, new_area, new_area + size);
969 memblock_phys_free(start, size);
970 boot_params.hdr.ramdisk_image = new_area;
971 boot_params.ext_ramdisk_image = new_area >> 32;
972 }
973
974 /*
975 * Set identity map on non-RAM pages and prepare remapping the
976 * underlying RAM.
977 */
978 xen_foreach_remap_area(xen_set_identity_and_remap_chunk);
979
980 pr_info("Released %ld page(s)\n", xen_released_pages);
981
982 return "Xen";
983 }
984
register_callback(unsigned type,const void * func)985 static int register_callback(unsigned type, const void *func)
986 {
987 struct callback_register callback = {
988 .type = type,
989 .address = XEN_CALLBACK(__KERNEL_CS, func),
990 .flags = CALLBACKF_mask_events,
991 };
992
993 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
994 }
995
xen_enable_sysenter(void)996 void xen_enable_sysenter(void)
997 {
998 if (cpu_feature_enabled(X86_FEATURE_SYSENTER32) &&
999 register_callback(CALLBACKTYPE_sysenter, xen_entry_SYSENTER_compat))
1000 setup_clear_cpu_cap(X86_FEATURE_SYSENTER32);
1001 }
1002
xen_enable_syscall(void)1003 void xen_enable_syscall(void)
1004 {
1005 int ret;
1006
1007 ret = register_callback(CALLBACKTYPE_syscall, xen_entry_SYSCALL_64);
1008 if (ret != 0) {
1009 printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
1010 /* Pretty fatal; 64-bit userspace has no other
1011 mechanism for syscalls. */
1012 }
1013
1014 if (cpu_feature_enabled(X86_FEATURE_SYSCALL32) &&
1015 register_callback(CALLBACKTYPE_syscall32, xen_entry_SYSCALL_compat))
1016 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
1017 }
1018
xen_pvmmu_arch_setup(void)1019 static void __init xen_pvmmu_arch_setup(void)
1020 {
1021 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
1022
1023 if (register_callback(CALLBACKTYPE_event,
1024 xen_asm_exc_xen_hypervisor_callback) ||
1025 register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
1026 BUG();
1027
1028 xen_enable_sysenter();
1029 xen_enable_syscall();
1030 }
1031
1032 /* This function is not called for HVM domains */
xen_arch_setup(void)1033 void __init xen_arch_setup(void)
1034 {
1035 xen_panic_handler_init();
1036 xen_pvmmu_arch_setup();
1037
1038 #ifdef CONFIG_ACPI
1039 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
1040 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
1041 disable_acpi();
1042 }
1043 #endif
1044
1045 memcpy(boot_command_line, xen_start_info->cmd_line,
1046 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
1047 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
1048
1049 /* Set up idle, making sure it calls safe_halt() pvop */
1050 disable_cpuidle();
1051 disable_cpufreq();
1052 WARN_ON(xen_set_default_idle());
1053 #ifdef CONFIG_NUMA
1054 numa_off = 1;
1055 #endif
1056 }
1057