1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2002 Andi Kleen, SuSE Labs.
4 * Thanks to Ben LaHaise for precious feedback.
5 */
6 #include <linux/highmem.h>
7 #include <linux/memblock.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/interrupt.h>
11 #include <linux/seq_file.h>
12 #include <linux/proc_fs.h>
13 #include <linux/debugfs.h>
14 #include <linux/pfn.h>
15 #include <linux/percpu.h>
16 #include <linux/gfp.h>
17 #include <linux/pci.h>
18 #include <linux/vmalloc.h>
19 #include <linux/libnvdimm.h>
20 #include <linux/vmstat.h>
21 #include <linux/kernel.h>
22 #include <linux/cc_platform.h>
23 #include <linux/set_memory.h>
24 #include <linux/memregion.h>
25
26 #include <asm/e820/api.h>
27 #include <asm/processor.h>
28 #include <asm/tlbflush.h>
29 #include <asm/sections.h>
30 #include <asm/setup.h>
31 #include <linux/uaccess.h>
32 #include <asm/pgalloc.h>
33 #include <asm/proto.h>
34 #include <asm/memtype.h>
35
36 #include "../mm_internal.h"
37
38 /*
39 * The current flushing context - we pass it instead of 5 arguments:
40 */
41 struct cpa_data {
42 unsigned long *vaddr;
43 pgd_t *pgd;
44 pgprot_t mask_set;
45 pgprot_t mask_clr;
46 unsigned long numpages;
47 unsigned long curpage;
48 unsigned long pfn;
49 unsigned int flags;
50 unsigned int force_split : 1,
51 force_static_prot : 1,
52 force_flush_all : 1;
53 struct page **pages;
54 };
55
56 enum cpa_warn {
57 CPA_CONFLICT,
58 CPA_PROTECT,
59 CPA_DETECT,
60 };
61
62 static const int cpa_warn_level = CPA_PROTECT;
63
64 /*
65 * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings)
66 * using cpa_lock. So that we don't allow any other cpu, with stale large tlb
67 * entries change the page attribute in parallel to some other cpu
68 * splitting a large page entry along with changing the attribute.
69 */
70 static DEFINE_SPINLOCK(cpa_lock);
71
72 #define CPA_FLUSHTLB 1
73 #define CPA_ARRAY 2
74 #define CPA_PAGES_ARRAY 4
75 #define CPA_NO_CHECK_ALIAS 8 /* Do not search for aliases */
76
cachemode2pgprot(enum page_cache_mode pcm)77 static inline pgprot_t cachemode2pgprot(enum page_cache_mode pcm)
78 {
79 return __pgprot(cachemode2protval(pcm));
80 }
81
82 #ifdef CONFIG_PROC_FS
83 static unsigned long direct_pages_count[PG_LEVEL_NUM];
84
update_page_count(int level,unsigned long pages)85 void update_page_count(int level, unsigned long pages)
86 {
87 /* Protect against CPA */
88 spin_lock(&pgd_lock);
89 direct_pages_count[level] += pages;
90 spin_unlock(&pgd_lock);
91 }
92
split_page_count(int level)93 static void split_page_count(int level)
94 {
95 if (direct_pages_count[level] == 0)
96 return;
97
98 direct_pages_count[level]--;
99 if (system_state == SYSTEM_RUNNING) {
100 if (level == PG_LEVEL_2M)
101 count_vm_event(DIRECT_MAP_LEVEL2_SPLIT);
102 else if (level == PG_LEVEL_1G)
103 count_vm_event(DIRECT_MAP_LEVEL3_SPLIT);
104 }
105 direct_pages_count[level - 1] += PTRS_PER_PTE;
106 }
107
arch_report_meminfo(struct seq_file * m)108 void arch_report_meminfo(struct seq_file *m)
109 {
110 seq_printf(m, "DirectMap4k: %8lu kB\n",
111 direct_pages_count[PG_LEVEL_4K] << 2);
112 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
113 seq_printf(m, "DirectMap2M: %8lu kB\n",
114 direct_pages_count[PG_LEVEL_2M] << 11);
115 #else
116 seq_printf(m, "DirectMap4M: %8lu kB\n",
117 direct_pages_count[PG_LEVEL_2M] << 12);
118 #endif
119 if (direct_gbpages)
120 seq_printf(m, "DirectMap1G: %8lu kB\n",
121 direct_pages_count[PG_LEVEL_1G] << 20);
122 }
123 #else
split_page_count(int level)124 static inline void split_page_count(int level) { }
125 #endif
126
127 #ifdef CONFIG_X86_CPA_STATISTICS
128
129 static unsigned long cpa_1g_checked;
130 static unsigned long cpa_1g_sameprot;
131 static unsigned long cpa_1g_preserved;
132 static unsigned long cpa_2m_checked;
133 static unsigned long cpa_2m_sameprot;
134 static unsigned long cpa_2m_preserved;
135 static unsigned long cpa_4k_install;
136
cpa_inc_1g_checked(void)137 static inline void cpa_inc_1g_checked(void)
138 {
139 cpa_1g_checked++;
140 }
141
cpa_inc_2m_checked(void)142 static inline void cpa_inc_2m_checked(void)
143 {
144 cpa_2m_checked++;
145 }
146
cpa_inc_4k_install(void)147 static inline void cpa_inc_4k_install(void)
148 {
149 data_race(cpa_4k_install++);
150 }
151
cpa_inc_lp_sameprot(int level)152 static inline void cpa_inc_lp_sameprot(int level)
153 {
154 if (level == PG_LEVEL_1G)
155 cpa_1g_sameprot++;
156 else
157 cpa_2m_sameprot++;
158 }
159
cpa_inc_lp_preserved(int level)160 static inline void cpa_inc_lp_preserved(int level)
161 {
162 if (level == PG_LEVEL_1G)
163 cpa_1g_preserved++;
164 else
165 cpa_2m_preserved++;
166 }
167
cpastats_show(struct seq_file * m,void * p)168 static int cpastats_show(struct seq_file *m, void *p)
169 {
170 seq_printf(m, "1G pages checked: %16lu\n", cpa_1g_checked);
171 seq_printf(m, "1G pages sameprot: %16lu\n", cpa_1g_sameprot);
172 seq_printf(m, "1G pages preserved: %16lu\n", cpa_1g_preserved);
173 seq_printf(m, "2M pages checked: %16lu\n", cpa_2m_checked);
174 seq_printf(m, "2M pages sameprot: %16lu\n", cpa_2m_sameprot);
175 seq_printf(m, "2M pages preserved: %16lu\n", cpa_2m_preserved);
176 seq_printf(m, "4K pages set-checked: %16lu\n", cpa_4k_install);
177 return 0;
178 }
179
cpastats_open(struct inode * inode,struct file * file)180 static int cpastats_open(struct inode *inode, struct file *file)
181 {
182 return single_open(file, cpastats_show, NULL);
183 }
184
185 static const struct file_operations cpastats_fops = {
186 .open = cpastats_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190 };
191
cpa_stats_init(void)192 static int __init cpa_stats_init(void)
193 {
194 debugfs_create_file("cpa_stats", S_IRUSR, arch_debugfs_dir, NULL,
195 &cpastats_fops);
196 return 0;
197 }
198 late_initcall(cpa_stats_init);
199 #else
cpa_inc_1g_checked(void)200 static inline void cpa_inc_1g_checked(void) { }
cpa_inc_2m_checked(void)201 static inline void cpa_inc_2m_checked(void) { }
cpa_inc_4k_install(void)202 static inline void cpa_inc_4k_install(void) { }
cpa_inc_lp_sameprot(int level)203 static inline void cpa_inc_lp_sameprot(int level) { }
cpa_inc_lp_preserved(int level)204 static inline void cpa_inc_lp_preserved(int level) { }
205 #endif
206
207
208 static inline int
within(unsigned long addr,unsigned long start,unsigned long end)209 within(unsigned long addr, unsigned long start, unsigned long end)
210 {
211 return addr >= start && addr < end;
212 }
213
214 static inline int
within_inclusive(unsigned long addr,unsigned long start,unsigned long end)215 within_inclusive(unsigned long addr, unsigned long start, unsigned long end)
216 {
217 return addr >= start && addr <= end;
218 }
219
220 #ifdef CONFIG_X86_64
221
222 /*
223 * The kernel image is mapped into two places in the virtual address space
224 * (addresses without KASLR, of course):
225 *
226 * 1. The kernel direct map (0xffff880000000000)
227 * 2. The "high kernel map" (0xffffffff81000000)
228 *
229 * We actually execute out of #2. If we get the address of a kernel symbol, it
230 * points to #2, but almost all physical-to-virtual translations point to #1.
231 *
232 * This is so that we can have both a directmap of all physical memory *and*
233 * take full advantage of the limited (s32) immediate addressing range (2G)
234 * of x86_64.
235 *
236 * See Documentation/arch/x86/x86_64/mm.rst for more detail.
237 */
238
highmap_start_pfn(void)239 static inline unsigned long highmap_start_pfn(void)
240 {
241 return __pa_symbol(_text) >> PAGE_SHIFT;
242 }
243
highmap_end_pfn(void)244 static inline unsigned long highmap_end_pfn(void)
245 {
246 /* Do not reference physical address outside the kernel. */
247 return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT;
248 }
249
__cpa_pfn_in_highmap(unsigned long pfn)250 static bool __cpa_pfn_in_highmap(unsigned long pfn)
251 {
252 /*
253 * Kernel text has an alias mapping at a high address, known
254 * here as "highmap".
255 */
256 return within_inclusive(pfn, highmap_start_pfn(), highmap_end_pfn());
257 }
258
259 #else
260
__cpa_pfn_in_highmap(unsigned long pfn)261 static bool __cpa_pfn_in_highmap(unsigned long pfn)
262 {
263 /* There is no highmap on 32-bit */
264 return false;
265 }
266
267 #endif
268
269 /*
270 * See set_mce_nospec().
271 *
272 * Machine check recovery code needs to change cache mode of poisoned pages to
273 * UC to avoid speculative access logging another error. But passing the
274 * address of the 1:1 mapping to set_memory_uc() is a fine way to encourage a
275 * speculative access. So we cheat and flip the top bit of the address. This
276 * works fine for the code that updates the page tables. But at the end of the
277 * process we need to flush the TLB and cache and the non-canonical address
278 * causes a #GP fault when used by the INVLPG and CLFLUSH instructions.
279 *
280 * But in the common case we already have a canonical address. This code
281 * will fix the top bit if needed and is a no-op otherwise.
282 */
fix_addr(unsigned long addr)283 static inline unsigned long fix_addr(unsigned long addr)
284 {
285 #ifdef CONFIG_X86_64
286 return (long)(addr << 1) >> 1;
287 #else
288 return addr;
289 #endif
290 }
291
__cpa_addr(struct cpa_data * cpa,unsigned long idx)292 static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx)
293 {
294 if (cpa->flags & CPA_PAGES_ARRAY) {
295 struct page *page = cpa->pages[idx];
296
297 if (unlikely(PageHighMem(page)))
298 return 0;
299
300 return (unsigned long)page_address(page);
301 }
302
303 if (cpa->flags & CPA_ARRAY)
304 return cpa->vaddr[idx];
305
306 return *cpa->vaddr + idx * PAGE_SIZE;
307 }
308
309 /*
310 * Flushing functions
311 */
312
clflush_cache_range_opt(void * vaddr,unsigned int size)313 static void clflush_cache_range_opt(void *vaddr, unsigned int size)
314 {
315 const unsigned long clflush_size = boot_cpu_data.x86_clflush_size;
316 void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1));
317 void *vend = vaddr + size;
318
319 if (p >= vend)
320 return;
321
322 for (; p < vend; p += clflush_size)
323 clflushopt(p);
324 }
325
326 /**
327 * clflush_cache_range - flush a cache range with clflush
328 * @vaddr: virtual start address
329 * @size: number of bytes to flush
330 *
331 * CLFLUSHOPT is an unordered instruction which needs fencing with MFENCE or
332 * SFENCE to avoid ordering issues.
333 */
clflush_cache_range(void * vaddr,unsigned int size)334 void clflush_cache_range(void *vaddr, unsigned int size)
335 {
336 mb();
337 clflush_cache_range_opt(vaddr, size);
338 mb();
339 }
340 EXPORT_SYMBOL_GPL(clflush_cache_range);
341
342 #ifdef CONFIG_ARCH_HAS_PMEM_API
arch_invalidate_pmem(void * addr,size_t size)343 void arch_invalidate_pmem(void *addr, size_t size)
344 {
345 clflush_cache_range(addr, size);
346 }
347 EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
348 #endif
349
350 #ifdef CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
cpu_cache_has_invalidate_memregion(void)351 bool cpu_cache_has_invalidate_memregion(void)
352 {
353 return !cpu_feature_enabled(X86_FEATURE_HYPERVISOR);
354 }
355 EXPORT_SYMBOL_NS_GPL(cpu_cache_has_invalidate_memregion, "DEVMEM");
356
cpu_cache_invalidate_memregion(int res_desc)357 int cpu_cache_invalidate_memregion(int res_desc)
358 {
359 if (WARN_ON_ONCE(!cpu_cache_has_invalidate_memregion()))
360 return -ENXIO;
361 wbinvd_on_all_cpus();
362 return 0;
363 }
364 EXPORT_SYMBOL_NS_GPL(cpu_cache_invalidate_memregion, "DEVMEM");
365 #endif
366
__cpa_flush_all(void * arg)367 static void __cpa_flush_all(void *arg)
368 {
369 unsigned long cache = (unsigned long)arg;
370
371 /*
372 * Flush all to work around Errata in early athlons regarding
373 * large page flushing.
374 */
375 __flush_tlb_all();
376
377 if (cache && boot_cpu_data.x86 >= 4)
378 wbinvd();
379 }
380
cpa_flush_all(unsigned long cache)381 static void cpa_flush_all(unsigned long cache)
382 {
383 BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
384
385 on_each_cpu(__cpa_flush_all, (void *) cache, 1);
386 }
387
__cpa_flush_tlb(void * data)388 static void __cpa_flush_tlb(void *data)
389 {
390 struct cpa_data *cpa = data;
391 unsigned int i;
392
393 for (i = 0; i < cpa->numpages; i++)
394 flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i)));
395 }
396
cpa_flush(struct cpa_data * data,int cache)397 static void cpa_flush(struct cpa_data *data, int cache)
398 {
399 struct cpa_data *cpa = data;
400 unsigned int i;
401
402 BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
403
404 if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
405 cpa_flush_all(cache);
406 return;
407 }
408
409 if (cpa->force_flush_all || cpa->numpages > tlb_single_page_flush_ceiling)
410 flush_tlb_all();
411 else
412 on_each_cpu(__cpa_flush_tlb, cpa, 1);
413
414 if (!cache)
415 return;
416
417 mb();
418 for (i = 0; i < cpa->numpages; i++) {
419 unsigned long addr = __cpa_addr(cpa, i);
420 unsigned int level;
421
422 pte_t *pte = lookup_address(addr, &level);
423
424 /*
425 * Only flush present addresses:
426 */
427 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
428 clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE);
429 }
430 mb();
431 }
432
overlaps(unsigned long r1_start,unsigned long r1_end,unsigned long r2_start,unsigned long r2_end)433 static bool overlaps(unsigned long r1_start, unsigned long r1_end,
434 unsigned long r2_start, unsigned long r2_end)
435 {
436 return (r1_start <= r2_end && r1_end >= r2_start) ||
437 (r2_start <= r1_end && r2_end >= r1_start);
438 }
439
440 #ifdef CONFIG_PCI_BIOS
441 /*
442 * The BIOS area between 640k and 1Mb needs to be executable for PCI BIOS
443 * based config access (CONFIG_PCI_GOBIOS) support.
444 */
445 #define BIOS_PFN PFN_DOWN(BIOS_BEGIN)
446 #define BIOS_PFN_END PFN_DOWN(BIOS_END - 1)
447
protect_pci_bios(unsigned long spfn,unsigned long epfn)448 static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn)
449 {
450 if (pcibios_enabled && overlaps(spfn, epfn, BIOS_PFN, BIOS_PFN_END))
451 return _PAGE_NX;
452 return 0;
453 }
454 #else
protect_pci_bios(unsigned long spfn,unsigned long epfn)455 static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn)
456 {
457 return 0;
458 }
459 #endif
460
461 /*
462 * The .rodata section needs to be read-only. Using the pfn catches all
463 * aliases. This also includes __ro_after_init, so do not enforce until
464 * kernel_set_to_readonly is true.
465 */
protect_rodata(unsigned long spfn,unsigned long epfn)466 static pgprotval_t protect_rodata(unsigned long spfn, unsigned long epfn)
467 {
468 unsigned long epfn_ro, spfn_ro = PFN_DOWN(__pa_symbol(__start_rodata));
469
470 /*
471 * Note: __end_rodata is at page aligned and not inclusive, so
472 * subtract 1 to get the last enforced PFN in the rodata area.
473 */
474 epfn_ro = PFN_DOWN(__pa_symbol(__end_rodata)) - 1;
475
476 if (kernel_set_to_readonly && overlaps(spfn, epfn, spfn_ro, epfn_ro))
477 return _PAGE_RW;
478 return 0;
479 }
480
481 /*
482 * Protect kernel text against becoming non executable by forbidding
483 * _PAGE_NX. This protects only the high kernel mapping (_text -> _etext)
484 * out of which the kernel actually executes. Do not protect the low
485 * mapping.
486 *
487 * This does not cover __inittext since that is gone after boot.
488 */
protect_kernel_text(unsigned long start,unsigned long end)489 static pgprotval_t protect_kernel_text(unsigned long start, unsigned long end)
490 {
491 unsigned long t_end = (unsigned long)_etext - 1;
492 unsigned long t_start = (unsigned long)_text;
493
494 if (overlaps(start, end, t_start, t_end))
495 return _PAGE_NX;
496 return 0;
497 }
498
499 #if defined(CONFIG_X86_64)
500 /*
501 * Once the kernel maps the text as RO (kernel_set_to_readonly is set),
502 * kernel text mappings for the large page aligned text, rodata sections
503 * will be always read-only. For the kernel identity mappings covering the
504 * holes caused by this alignment can be anything that user asks.
505 *
506 * This will preserve the large page mappings for kernel text/data at no
507 * extra cost.
508 */
protect_kernel_text_ro(unsigned long start,unsigned long end)509 static pgprotval_t protect_kernel_text_ro(unsigned long start,
510 unsigned long end)
511 {
512 unsigned long t_end = (unsigned long)__end_rodata_hpage_align - 1;
513 unsigned long t_start = (unsigned long)_text;
514 unsigned int level;
515
516 if (!kernel_set_to_readonly || !overlaps(start, end, t_start, t_end))
517 return 0;
518 /*
519 * Don't enforce the !RW mapping for the kernel text mapping, if
520 * the current mapping is already using small page mapping. No
521 * need to work hard to preserve large page mappings in this case.
522 *
523 * This also fixes the Linux Xen paravirt guest boot failure caused
524 * by unexpected read-only mappings for kernel identity
525 * mappings. In this paravirt guest case, the kernel text mapping
526 * and the kernel identity mapping share the same page-table pages,
527 * so the protections for kernel text and identity mappings have to
528 * be the same.
529 */
530 if (lookup_address(start, &level) && (level != PG_LEVEL_4K))
531 return _PAGE_RW;
532 return 0;
533 }
534 #else
protect_kernel_text_ro(unsigned long start,unsigned long end)535 static pgprotval_t protect_kernel_text_ro(unsigned long start,
536 unsigned long end)
537 {
538 return 0;
539 }
540 #endif
541
conflicts(pgprot_t prot,pgprotval_t val)542 static inline bool conflicts(pgprot_t prot, pgprotval_t val)
543 {
544 return (pgprot_val(prot) & ~val) != pgprot_val(prot);
545 }
546
check_conflict(int warnlvl,pgprot_t prot,pgprotval_t val,unsigned long start,unsigned long end,unsigned long pfn,const char * txt)547 static inline void check_conflict(int warnlvl, pgprot_t prot, pgprotval_t val,
548 unsigned long start, unsigned long end,
549 unsigned long pfn, const char *txt)
550 {
551 static const char *lvltxt[] = {
552 [CPA_CONFLICT] = "conflict",
553 [CPA_PROTECT] = "protect",
554 [CPA_DETECT] = "detect",
555 };
556
557 if (warnlvl > cpa_warn_level || !conflicts(prot, val))
558 return;
559
560 pr_warn("CPA %8s %10s: 0x%016lx - 0x%016lx PFN %lx req %016llx prevent %016llx\n",
561 lvltxt[warnlvl], txt, start, end, pfn, (unsigned long long)pgprot_val(prot),
562 (unsigned long long)val);
563 }
564
565 /*
566 * Certain areas of memory on x86 require very specific protection flags,
567 * for example the BIOS area or kernel text. Callers don't always get this
568 * right (again, ioremap() on BIOS memory is not uncommon) so this function
569 * checks and fixes these known static required protection bits.
570 */
static_protections(pgprot_t prot,unsigned long start,unsigned long pfn,unsigned long npg,unsigned long lpsize,int warnlvl)571 static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
572 unsigned long pfn, unsigned long npg,
573 unsigned long lpsize, int warnlvl)
574 {
575 pgprotval_t forbidden, res;
576 unsigned long end;
577
578 /*
579 * There is no point in checking RW/NX conflicts when the requested
580 * mapping is setting the page !PRESENT.
581 */
582 if (!(pgprot_val(prot) & _PAGE_PRESENT))
583 return prot;
584
585 /* Operate on the virtual address */
586 end = start + npg * PAGE_SIZE - 1;
587
588 res = protect_kernel_text(start, end);
589 check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX");
590 forbidden = res;
591
592 /*
593 * Special case to preserve a large page. If the change spawns the
594 * full large page mapping then there is no point to split it
595 * up. Happens with ftrace and is going to be removed once ftrace
596 * switched to text_poke().
597 */
598 if (lpsize != (npg * PAGE_SIZE) || (start & (lpsize - 1))) {
599 res = protect_kernel_text_ro(start, end);
600 check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO");
601 forbidden |= res;
602 }
603
604 /* Check the PFN directly */
605 res = protect_pci_bios(pfn, pfn + npg - 1);
606 check_conflict(warnlvl, prot, res, start, end, pfn, "PCIBIOS NX");
607 forbidden |= res;
608
609 res = protect_rodata(pfn, pfn + npg - 1);
610 check_conflict(warnlvl, prot, res, start, end, pfn, "Rodata RO");
611 forbidden |= res;
612
613 return __pgprot(pgprot_val(prot) & ~forbidden);
614 }
615
616 /*
617 * Validate strict W^X semantics.
618 */
verify_rwx(pgprot_t old,pgprot_t new,unsigned long start,unsigned long pfn,unsigned long npg,bool nx,bool rw)619 static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long start,
620 unsigned long pfn, unsigned long npg,
621 bool nx, bool rw)
622 {
623 unsigned long end;
624
625 /*
626 * 32-bit has some unfixable W+X issues, like EFI code
627 * and writeable data being in the same page. Disable
628 * detection and enforcement there.
629 */
630 if (IS_ENABLED(CONFIG_X86_32))
631 return new;
632
633 /* Only verify when NX is supported: */
634 if (!(__supported_pte_mask & _PAGE_NX))
635 return new;
636
637 if (!((pgprot_val(old) ^ pgprot_val(new)) & (_PAGE_RW | _PAGE_NX)))
638 return new;
639
640 if ((pgprot_val(new) & (_PAGE_RW | _PAGE_NX)) != _PAGE_RW)
641 return new;
642
643 /* Non-leaf translation entries can disable writing or execution. */
644 if (!rw || nx)
645 return new;
646
647 end = start + npg * PAGE_SIZE - 1;
648 WARN_ONCE(1, "CPA detected W^X violation: %016llx -> %016llx range: 0x%016lx - 0x%016lx PFN %lx\n",
649 (unsigned long long)pgprot_val(old),
650 (unsigned long long)pgprot_val(new),
651 start, end, pfn);
652
653 /*
654 * For now, allow all permission change attempts by returning the
655 * attempted permissions. This can 'return old' to actively
656 * refuse the permission change at a later time.
657 */
658 return new;
659 }
660
661 /*
662 * Lookup the page table entry for a virtual address in a specific pgd.
663 * Return a pointer to the entry (or NULL if the entry does not exist),
664 * the level of the entry, and the effective NX and RW bits of all
665 * page table levels.
666 */
lookup_address_in_pgd_attr(pgd_t * pgd,unsigned long address,unsigned int * level,bool * nx,bool * rw)667 pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address,
668 unsigned int *level, bool *nx, bool *rw)
669 {
670 p4d_t *p4d;
671 pud_t *pud;
672 pmd_t *pmd;
673
674 *level = PG_LEVEL_256T;
675 *nx = false;
676 *rw = true;
677
678 if (pgd_none(*pgd))
679 return NULL;
680
681 *level = PG_LEVEL_512G;
682 *nx |= pgd_flags(*pgd) & _PAGE_NX;
683 *rw &= pgd_flags(*pgd) & _PAGE_RW;
684
685 p4d = p4d_offset(pgd, address);
686 if (p4d_none(*p4d))
687 return NULL;
688
689 if (p4d_leaf(*p4d) || !p4d_present(*p4d))
690 return (pte_t *)p4d;
691
692 *level = PG_LEVEL_1G;
693 *nx |= p4d_flags(*p4d) & _PAGE_NX;
694 *rw &= p4d_flags(*p4d) & _PAGE_RW;
695
696 pud = pud_offset(p4d, address);
697 if (pud_none(*pud))
698 return NULL;
699
700 if (pud_leaf(*pud) || !pud_present(*pud))
701 return (pte_t *)pud;
702
703 *level = PG_LEVEL_2M;
704 *nx |= pud_flags(*pud) & _PAGE_NX;
705 *rw &= pud_flags(*pud) & _PAGE_RW;
706
707 pmd = pmd_offset(pud, address);
708 if (pmd_none(*pmd))
709 return NULL;
710
711 if (pmd_leaf(*pmd) || !pmd_present(*pmd))
712 return (pte_t *)pmd;
713
714 *level = PG_LEVEL_4K;
715 *nx |= pmd_flags(*pmd) & _PAGE_NX;
716 *rw &= pmd_flags(*pmd) & _PAGE_RW;
717
718 return pte_offset_kernel(pmd, address);
719 }
720
721 /*
722 * Lookup the page table entry for a virtual address in a specific pgd.
723 * Return a pointer to the entry and the level of the mapping.
724 */
lookup_address_in_pgd(pgd_t * pgd,unsigned long address,unsigned int * level)725 pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
726 unsigned int *level)
727 {
728 bool nx, rw;
729
730 return lookup_address_in_pgd_attr(pgd, address, level, &nx, &rw);
731 }
732
733 /*
734 * Lookup the page table entry for a virtual address. Return a pointer
735 * to the entry and the level of the mapping.
736 *
737 * Note: the function returns p4d, pud or pmd either when the entry is marked
738 * large or when the present bit is not set. Otherwise it returns NULL.
739 */
lookup_address(unsigned long address,unsigned int * level)740 pte_t *lookup_address(unsigned long address, unsigned int *level)
741 {
742 return lookup_address_in_pgd(pgd_offset_k(address), address, level);
743 }
744 EXPORT_SYMBOL_GPL(lookup_address);
745
_lookup_address_cpa(struct cpa_data * cpa,unsigned long address,unsigned int * level,bool * nx,bool * rw)746 static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
747 unsigned int *level, bool *nx, bool *rw)
748 {
749 pgd_t *pgd;
750
751 if (!cpa->pgd)
752 pgd = pgd_offset_k(address);
753 else
754 pgd = cpa->pgd + pgd_index(address);
755
756 return lookup_address_in_pgd_attr(pgd, address, level, nx, rw);
757 }
758
759 /*
760 * Lookup the PMD entry for a virtual address. Return a pointer to the entry
761 * or NULL if not present.
762 */
lookup_pmd_address(unsigned long address)763 pmd_t *lookup_pmd_address(unsigned long address)
764 {
765 pgd_t *pgd;
766 p4d_t *p4d;
767 pud_t *pud;
768
769 pgd = pgd_offset_k(address);
770 if (pgd_none(*pgd))
771 return NULL;
772
773 p4d = p4d_offset(pgd, address);
774 if (p4d_none(*p4d) || p4d_leaf(*p4d) || !p4d_present(*p4d))
775 return NULL;
776
777 pud = pud_offset(p4d, address);
778 if (pud_none(*pud) || pud_leaf(*pud) || !pud_present(*pud))
779 return NULL;
780
781 return pmd_offset(pud, address);
782 }
783
784 /*
785 * This is necessary because __pa() does not work on some
786 * kinds of memory, like vmalloc() or the alloc_remap()
787 * areas on 32-bit NUMA systems. The percpu areas can
788 * end up in this kind of memory, for instance.
789 *
790 * Note that as long as the PTEs are well-formed with correct PFNs, this
791 * works without checking the PRESENT bit in the leaf PTE. This is unlike
792 * the similar vmalloc_to_page() and derivatives. Callers may depend on
793 * this behavior.
794 *
795 * This could be optimized, but it is only used in paths that are not perf
796 * sensitive, and keeping it unoptimized should increase the testing coverage
797 * for the more obscure platforms.
798 */
slow_virt_to_phys(void * __virt_addr)799 phys_addr_t slow_virt_to_phys(void *__virt_addr)
800 {
801 unsigned long virt_addr = (unsigned long)__virt_addr;
802 phys_addr_t phys_addr;
803 unsigned long offset;
804 enum pg_level level;
805 pte_t *pte;
806
807 pte = lookup_address(virt_addr, &level);
808 BUG_ON(!pte);
809
810 /*
811 * pXX_pfn() returns unsigned long, which must be cast to phys_addr_t
812 * before being left-shifted PAGE_SHIFT bits -- this trick is to
813 * make 32-PAE kernel work correctly.
814 */
815 switch (level) {
816 case PG_LEVEL_1G:
817 phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
818 offset = virt_addr & ~PUD_MASK;
819 break;
820 case PG_LEVEL_2M:
821 phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
822 offset = virt_addr & ~PMD_MASK;
823 break;
824 default:
825 phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
826 offset = virt_addr & ~PAGE_MASK;
827 }
828
829 return (phys_addr_t)(phys_addr | offset);
830 }
831 EXPORT_SYMBOL_GPL(slow_virt_to_phys);
832
833 /*
834 * Set the new pmd in all the pgds we know about:
835 */
__set_pmd_pte(pte_t * kpte,unsigned long address,pte_t pte)836 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
837 {
838 /* change init_mm */
839 set_pte_atomic(kpte, pte);
840 #ifdef CONFIG_X86_32
841 if (!SHARED_KERNEL_PMD) {
842 struct page *page;
843
844 list_for_each_entry(page, &pgd_list, lru) {
845 pgd_t *pgd;
846 p4d_t *p4d;
847 pud_t *pud;
848 pmd_t *pmd;
849
850 pgd = (pgd_t *)page_address(page) + pgd_index(address);
851 p4d = p4d_offset(pgd, address);
852 pud = pud_offset(p4d, address);
853 pmd = pmd_offset(pud, address);
854 set_pte_atomic((pte_t *)pmd, pte);
855 }
856 }
857 #endif
858 }
859
pgprot_clear_protnone_bits(pgprot_t prot)860 static pgprot_t pgprot_clear_protnone_bits(pgprot_t prot)
861 {
862 /*
863 * _PAGE_GLOBAL means "global page" for present PTEs.
864 * But, it is also used to indicate _PAGE_PROTNONE
865 * for non-present PTEs.
866 *
867 * This ensures that a _PAGE_GLOBAL PTE going from
868 * present to non-present is not confused as
869 * _PAGE_PROTNONE.
870 */
871 if (!(pgprot_val(prot) & _PAGE_PRESENT))
872 pgprot_val(prot) &= ~_PAGE_GLOBAL;
873
874 return prot;
875 }
876
__should_split_large_page(pte_t * kpte,unsigned long address,struct cpa_data * cpa)877 static int __should_split_large_page(pte_t *kpte, unsigned long address,
878 struct cpa_data *cpa)
879 {
880 unsigned long numpages, pmask, psize, lpaddr, pfn, old_pfn;
881 pgprot_t old_prot, new_prot, req_prot, chk_prot;
882 pte_t new_pte, *tmp;
883 enum pg_level level;
884 bool nx, rw;
885
886 /*
887 * Check for races, another CPU might have split this page
888 * up already:
889 */
890 tmp = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
891 if (tmp != kpte)
892 return 1;
893
894 switch (level) {
895 case PG_LEVEL_2M:
896 old_prot = pmd_pgprot(*(pmd_t *)kpte);
897 old_pfn = pmd_pfn(*(pmd_t *)kpte);
898 cpa_inc_2m_checked();
899 break;
900 case PG_LEVEL_1G:
901 old_prot = pud_pgprot(*(pud_t *)kpte);
902 old_pfn = pud_pfn(*(pud_t *)kpte);
903 cpa_inc_1g_checked();
904 break;
905 default:
906 return -EINVAL;
907 }
908
909 psize = page_level_size(level);
910 pmask = page_level_mask(level);
911
912 /*
913 * Calculate the number of pages, which fit into this large
914 * page starting at address:
915 */
916 lpaddr = (address + psize) & pmask;
917 numpages = (lpaddr - address) >> PAGE_SHIFT;
918 if (numpages < cpa->numpages)
919 cpa->numpages = numpages;
920
921 /*
922 * We are safe now. Check whether the new pgprot is the same:
923 * Convert protection attributes to 4k-format, as cpa->mask* are set
924 * up accordingly.
925 */
926
927 /* Clear PSE (aka _PAGE_PAT) and move PAT bit to correct position */
928 req_prot = pgprot_large_2_4k(old_prot);
929
930 pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
931 pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
932
933 /*
934 * req_prot is in format of 4k pages. It must be converted to large
935 * page format: the caching mode includes the PAT bit located at
936 * different bit positions in the two formats.
937 */
938 req_prot = pgprot_4k_2_large(req_prot);
939 req_prot = pgprot_clear_protnone_bits(req_prot);
940 if (pgprot_val(req_prot) & _PAGE_PRESENT)
941 pgprot_val(req_prot) |= _PAGE_PSE;
942
943 /*
944 * old_pfn points to the large page base pfn. So we need to add the
945 * offset of the virtual address:
946 */
947 pfn = old_pfn + ((address & (psize - 1)) >> PAGE_SHIFT);
948 cpa->pfn = pfn;
949
950 /*
951 * Calculate the large page base address and the number of 4K pages
952 * in the large page
953 */
954 lpaddr = address & pmask;
955 numpages = psize >> PAGE_SHIFT;
956
957 /*
958 * Sanity check that the existing mapping is correct versus the static
959 * protections. static_protections() guards against !PRESENT, so no
960 * extra conditional required here.
961 */
962 chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages,
963 psize, CPA_CONFLICT);
964
965 if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) {
966 /*
967 * Split the large page and tell the split code to
968 * enforce static protections.
969 */
970 cpa->force_static_prot = 1;
971 return 1;
972 }
973
974 /*
975 * Optimization: If the requested pgprot is the same as the current
976 * pgprot, then the large page can be preserved and no updates are
977 * required independent of alignment and length of the requested
978 * range. The above already established that the current pgprot is
979 * correct, which in consequence makes the requested pgprot correct
980 * as well if it is the same. The static protection scan below will
981 * not come to a different conclusion.
982 */
983 if (pgprot_val(req_prot) == pgprot_val(old_prot)) {
984 cpa_inc_lp_sameprot(level);
985 return 0;
986 }
987
988 /*
989 * If the requested range does not cover the full page, split it up
990 */
991 if (address != lpaddr || cpa->numpages != numpages)
992 return 1;
993
994 /*
995 * Check whether the requested pgprot is conflicting with a static
996 * protection requirement in the large page.
997 */
998 new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages,
999 psize, CPA_DETECT);
1000
1001 new_prot = verify_rwx(old_prot, new_prot, lpaddr, old_pfn, numpages,
1002 nx, rw);
1003
1004 /*
1005 * If there is a conflict, split the large page.
1006 *
1007 * There used to be a 4k wise evaluation trying really hard to
1008 * preserve the large pages, but experimentation has shown, that this
1009 * does not help at all. There might be corner cases which would
1010 * preserve one large page occasionally, but it's really not worth the
1011 * extra code and cycles for the common case.
1012 */
1013 if (pgprot_val(req_prot) != pgprot_val(new_prot))
1014 return 1;
1015
1016 /* All checks passed. Update the large page mapping. */
1017 new_pte = pfn_pte(old_pfn, new_prot);
1018 __set_pmd_pte(kpte, address, new_pte);
1019 cpa->flags |= CPA_FLUSHTLB;
1020 cpa_inc_lp_preserved(level);
1021 return 0;
1022 }
1023
should_split_large_page(pte_t * kpte,unsigned long address,struct cpa_data * cpa)1024 static int should_split_large_page(pte_t *kpte, unsigned long address,
1025 struct cpa_data *cpa)
1026 {
1027 int do_split;
1028
1029 if (cpa->force_split)
1030 return 1;
1031
1032 spin_lock(&pgd_lock);
1033 do_split = __should_split_large_page(kpte, address, cpa);
1034 spin_unlock(&pgd_lock);
1035
1036 return do_split;
1037 }
1038
split_set_pte(struct cpa_data * cpa,pte_t * pte,unsigned long pfn,pgprot_t ref_prot,unsigned long address,unsigned long size)1039 static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn,
1040 pgprot_t ref_prot, unsigned long address,
1041 unsigned long size)
1042 {
1043 unsigned int npg = PFN_DOWN(size);
1044 pgprot_t prot;
1045
1046 /*
1047 * If should_split_large_page() discovered an inconsistent mapping,
1048 * remove the invalid protection in the split mapping.
1049 */
1050 if (!cpa->force_static_prot)
1051 goto set;
1052
1053 /* Hand in lpsize = 0 to enforce the protection mechanism */
1054 prot = static_protections(ref_prot, address, pfn, npg, 0, CPA_PROTECT);
1055
1056 if (pgprot_val(prot) == pgprot_val(ref_prot))
1057 goto set;
1058
1059 /*
1060 * If this is splitting a PMD, fix it up. PUD splits cannot be
1061 * fixed trivially as that would require to rescan the newly
1062 * installed PMD mappings after returning from split_large_page()
1063 * so an eventual further split can allocate the necessary PTE
1064 * pages. Warn for now and revisit it in case this actually
1065 * happens.
1066 */
1067 if (size == PAGE_SIZE)
1068 ref_prot = prot;
1069 else
1070 pr_warn_once("CPA: Cannot fixup static protections for PUD split\n");
1071 set:
1072 set_pte(pte, pfn_pte(pfn, ref_prot));
1073 }
1074
1075 static int
__split_large_page(struct cpa_data * cpa,pte_t * kpte,unsigned long address,struct page * base)1076 __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
1077 struct page *base)
1078 {
1079 unsigned long lpaddr, lpinc, ref_pfn, pfn, pfninc = 1;
1080 pte_t *pbase = (pte_t *)page_address(base);
1081 unsigned int i, level;
1082 pgprot_t ref_prot;
1083 bool nx, rw;
1084 pte_t *tmp;
1085
1086 spin_lock(&pgd_lock);
1087 /*
1088 * Check for races, another CPU might have split this page
1089 * up for us already:
1090 */
1091 tmp = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
1092 if (tmp != kpte) {
1093 spin_unlock(&pgd_lock);
1094 return 1;
1095 }
1096
1097 paravirt_alloc_pte(&init_mm, page_to_pfn(base));
1098
1099 switch (level) {
1100 case PG_LEVEL_2M:
1101 ref_prot = pmd_pgprot(*(pmd_t *)kpte);
1102 /*
1103 * Clear PSE (aka _PAGE_PAT) and move
1104 * PAT bit to correct position.
1105 */
1106 ref_prot = pgprot_large_2_4k(ref_prot);
1107 ref_pfn = pmd_pfn(*(pmd_t *)kpte);
1108 lpaddr = address & PMD_MASK;
1109 lpinc = PAGE_SIZE;
1110 break;
1111
1112 case PG_LEVEL_1G:
1113 ref_prot = pud_pgprot(*(pud_t *)kpte);
1114 ref_pfn = pud_pfn(*(pud_t *)kpte);
1115 pfninc = PMD_SIZE >> PAGE_SHIFT;
1116 lpaddr = address & PUD_MASK;
1117 lpinc = PMD_SIZE;
1118 /*
1119 * Clear the PSE flags if the PRESENT flag is not set
1120 * otherwise pmd_present() will return true even on a non
1121 * present pmd.
1122 */
1123 if (!(pgprot_val(ref_prot) & _PAGE_PRESENT))
1124 pgprot_val(ref_prot) &= ~_PAGE_PSE;
1125 break;
1126
1127 default:
1128 spin_unlock(&pgd_lock);
1129 return 1;
1130 }
1131
1132 ref_prot = pgprot_clear_protnone_bits(ref_prot);
1133
1134 /*
1135 * Get the target pfn from the original entry:
1136 */
1137 pfn = ref_pfn;
1138 for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc, lpaddr += lpinc)
1139 split_set_pte(cpa, pbase + i, pfn, ref_prot, lpaddr, lpinc);
1140
1141 if (virt_addr_valid(address)) {
1142 unsigned long pfn = PFN_DOWN(__pa(address));
1143
1144 if (pfn_range_is_mapped(pfn, pfn + 1))
1145 split_page_count(level);
1146 }
1147
1148 /*
1149 * Install the new, split up pagetable.
1150 *
1151 * We use the standard kernel pagetable protections for the new
1152 * pagetable protections, the actual ptes set above control the
1153 * primary protection behavior:
1154 */
1155 __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE)));
1156
1157 /*
1158 * Do a global flush tlb after splitting the large page
1159 * and before we do the actual change page attribute in the PTE.
1160 *
1161 * Without this, we violate the TLB application note, that says:
1162 * "The TLBs may contain both ordinary and large-page
1163 * translations for a 4-KByte range of linear addresses. This
1164 * may occur if software modifies the paging structures so that
1165 * the page size used for the address range changes. If the two
1166 * translations differ with respect to page frame or attributes
1167 * (e.g., permissions), processor behavior is undefined and may
1168 * be implementation-specific."
1169 *
1170 * We do this global tlb flush inside the cpa_lock, so that we
1171 * don't allow any other cpu, with stale tlb entries change the
1172 * page attribute in parallel, that also falls into the
1173 * just split large page entry.
1174 */
1175 flush_tlb_all();
1176 spin_unlock(&pgd_lock);
1177
1178 return 0;
1179 }
1180
split_large_page(struct cpa_data * cpa,pte_t * kpte,unsigned long address)1181 static int split_large_page(struct cpa_data *cpa, pte_t *kpte,
1182 unsigned long address)
1183 {
1184 struct page *base;
1185
1186 if (!debug_pagealloc_enabled())
1187 spin_unlock(&cpa_lock);
1188 base = alloc_pages(GFP_KERNEL, 0);
1189 if (!debug_pagealloc_enabled())
1190 spin_lock(&cpa_lock);
1191 if (!base)
1192 return -ENOMEM;
1193
1194 if (__split_large_page(cpa, kpte, address, base))
1195 __free_page(base);
1196
1197 return 0;
1198 }
1199
try_to_free_pte_page(pte_t * pte)1200 static bool try_to_free_pte_page(pte_t *pte)
1201 {
1202 int i;
1203
1204 for (i = 0; i < PTRS_PER_PTE; i++)
1205 if (!pte_none(pte[i]))
1206 return false;
1207
1208 free_page((unsigned long)pte);
1209 return true;
1210 }
1211
try_to_free_pmd_page(pmd_t * pmd)1212 static bool try_to_free_pmd_page(pmd_t *pmd)
1213 {
1214 int i;
1215
1216 for (i = 0; i < PTRS_PER_PMD; i++)
1217 if (!pmd_none(pmd[i]))
1218 return false;
1219
1220 free_page((unsigned long)pmd);
1221 return true;
1222 }
1223
unmap_pte_range(pmd_t * pmd,unsigned long start,unsigned long end)1224 static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
1225 {
1226 pte_t *pte = pte_offset_kernel(pmd, start);
1227
1228 while (start < end) {
1229 set_pte(pte, __pte(0));
1230
1231 start += PAGE_SIZE;
1232 pte++;
1233 }
1234
1235 if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) {
1236 pmd_clear(pmd);
1237 return true;
1238 }
1239 return false;
1240 }
1241
__unmap_pmd_range(pud_t * pud,pmd_t * pmd,unsigned long start,unsigned long end)1242 static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd,
1243 unsigned long start, unsigned long end)
1244 {
1245 if (unmap_pte_range(pmd, start, end))
1246 if (try_to_free_pmd_page(pud_pgtable(*pud)))
1247 pud_clear(pud);
1248 }
1249
unmap_pmd_range(pud_t * pud,unsigned long start,unsigned long end)1250 static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end)
1251 {
1252 pmd_t *pmd = pmd_offset(pud, start);
1253
1254 /*
1255 * Not on a 2MB page boundary?
1256 */
1257 if (start & (PMD_SIZE - 1)) {
1258 unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
1259 unsigned long pre_end = min_t(unsigned long, end, next_page);
1260
1261 __unmap_pmd_range(pud, pmd, start, pre_end);
1262
1263 start = pre_end;
1264 pmd++;
1265 }
1266
1267 /*
1268 * Try to unmap in 2M chunks.
1269 */
1270 while (end - start >= PMD_SIZE) {
1271 if (pmd_leaf(*pmd))
1272 pmd_clear(pmd);
1273 else
1274 __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE);
1275
1276 start += PMD_SIZE;
1277 pmd++;
1278 }
1279
1280 /*
1281 * 4K leftovers?
1282 */
1283 if (start < end)
1284 return __unmap_pmd_range(pud, pmd, start, end);
1285
1286 /*
1287 * Try again to free the PMD page if haven't succeeded above.
1288 */
1289 if (!pud_none(*pud))
1290 if (try_to_free_pmd_page(pud_pgtable(*pud)))
1291 pud_clear(pud);
1292 }
1293
unmap_pud_range(p4d_t * p4d,unsigned long start,unsigned long end)1294 static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
1295 {
1296 pud_t *pud = pud_offset(p4d, start);
1297
1298 /*
1299 * Not on a GB page boundary?
1300 */
1301 if (start & (PUD_SIZE - 1)) {
1302 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
1303 unsigned long pre_end = min_t(unsigned long, end, next_page);
1304
1305 unmap_pmd_range(pud, start, pre_end);
1306
1307 start = pre_end;
1308 pud++;
1309 }
1310
1311 /*
1312 * Try to unmap in 1G chunks?
1313 */
1314 while (end - start >= PUD_SIZE) {
1315
1316 if (pud_leaf(*pud))
1317 pud_clear(pud);
1318 else
1319 unmap_pmd_range(pud, start, start + PUD_SIZE);
1320
1321 start += PUD_SIZE;
1322 pud++;
1323 }
1324
1325 /*
1326 * 2M leftovers?
1327 */
1328 if (start < end)
1329 unmap_pmd_range(pud, start, end);
1330
1331 /*
1332 * No need to try to free the PUD page because we'll free it in
1333 * populate_pgd's error path
1334 */
1335 }
1336
alloc_pte_page(pmd_t * pmd)1337 static int alloc_pte_page(pmd_t *pmd)
1338 {
1339 pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
1340 if (!pte)
1341 return -1;
1342
1343 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
1344 return 0;
1345 }
1346
alloc_pmd_page(pud_t * pud)1347 static int alloc_pmd_page(pud_t *pud)
1348 {
1349 pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
1350 if (!pmd)
1351 return -1;
1352
1353 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
1354 return 0;
1355 }
1356
populate_pte(struct cpa_data * cpa,unsigned long start,unsigned long end,unsigned num_pages,pmd_t * pmd,pgprot_t pgprot)1357 static void populate_pte(struct cpa_data *cpa,
1358 unsigned long start, unsigned long end,
1359 unsigned num_pages, pmd_t *pmd, pgprot_t pgprot)
1360 {
1361 pte_t *pte;
1362
1363 pte = pte_offset_kernel(pmd, start);
1364
1365 pgprot = pgprot_clear_protnone_bits(pgprot);
1366
1367 while (num_pages-- && start < end) {
1368 set_pte(pte, pfn_pte(cpa->pfn, pgprot));
1369
1370 start += PAGE_SIZE;
1371 cpa->pfn++;
1372 pte++;
1373 }
1374 }
1375
populate_pmd(struct cpa_data * cpa,unsigned long start,unsigned long end,unsigned num_pages,pud_t * pud,pgprot_t pgprot)1376 static long populate_pmd(struct cpa_data *cpa,
1377 unsigned long start, unsigned long end,
1378 unsigned num_pages, pud_t *pud, pgprot_t pgprot)
1379 {
1380 long cur_pages = 0;
1381 pmd_t *pmd;
1382 pgprot_t pmd_pgprot;
1383
1384 /*
1385 * Not on a 2M boundary?
1386 */
1387 if (start & (PMD_SIZE - 1)) {
1388 unsigned long pre_end = start + (num_pages << PAGE_SHIFT);
1389 unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
1390
1391 pre_end = min_t(unsigned long, pre_end, next_page);
1392 cur_pages = (pre_end - start) >> PAGE_SHIFT;
1393 cur_pages = min_t(unsigned int, num_pages, cur_pages);
1394
1395 /*
1396 * Need a PTE page?
1397 */
1398 pmd = pmd_offset(pud, start);
1399 if (pmd_none(*pmd))
1400 if (alloc_pte_page(pmd))
1401 return -1;
1402
1403 populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot);
1404
1405 start = pre_end;
1406 }
1407
1408 /*
1409 * We mapped them all?
1410 */
1411 if (num_pages == cur_pages)
1412 return cur_pages;
1413
1414 pmd_pgprot = pgprot_4k_2_large(pgprot);
1415
1416 while (end - start >= PMD_SIZE) {
1417
1418 /*
1419 * We cannot use a 1G page so allocate a PMD page if needed.
1420 */
1421 if (pud_none(*pud))
1422 if (alloc_pmd_page(pud))
1423 return -1;
1424
1425 pmd = pmd_offset(pud, start);
1426
1427 set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn,
1428 canon_pgprot(pmd_pgprot))));
1429
1430 start += PMD_SIZE;
1431 cpa->pfn += PMD_SIZE >> PAGE_SHIFT;
1432 cur_pages += PMD_SIZE >> PAGE_SHIFT;
1433 }
1434
1435 /*
1436 * Map trailing 4K pages.
1437 */
1438 if (start < end) {
1439 pmd = pmd_offset(pud, start);
1440 if (pmd_none(*pmd))
1441 if (alloc_pte_page(pmd))
1442 return -1;
1443
1444 populate_pte(cpa, start, end, num_pages - cur_pages,
1445 pmd, pgprot);
1446 }
1447 return num_pages;
1448 }
1449
populate_pud(struct cpa_data * cpa,unsigned long start,p4d_t * p4d,pgprot_t pgprot)1450 static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d,
1451 pgprot_t pgprot)
1452 {
1453 pud_t *pud;
1454 unsigned long end;
1455 long cur_pages = 0;
1456 pgprot_t pud_pgprot;
1457
1458 end = start + (cpa->numpages << PAGE_SHIFT);
1459
1460 /*
1461 * Not on a Gb page boundary? => map everything up to it with
1462 * smaller pages.
1463 */
1464 if (start & (PUD_SIZE - 1)) {
1465 unsigned long pre_end;
1466 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
1467
1468 pre_end = min_t(unsigned long, end, next_page);
1469 cur_pages = (pre_end - start) >> PAGE_SHIFT;
1470 cur_pages = min_t(int, (int)cpa->numpages, cur_pages);
1471
1472 pud = pud_offset(p4d, start);
1473
1474 /*
1475 * Need a PMD page?
1476 */
1477 if (pud_none(*pud))
1478 if (alloc_pmd_page(pud))
1479 return -1;
1480
1481 cur_pages = populate_pmd(cpa, start, pre_end, cur_pages,
1482 pud, pgprot);
1483 if (cur_pages < 0)
1484 return cur_pages;
1485
1486 start = pre_end;
1487 }
1488
1489 /* We mapped them all? */
1490 if (cpa->numpages == cur_pages)
1491 return cur_pages;
1492
1493 pud = pud_offset(p4d, start);
1494 pud_pgprot = pgprot_4k_2_large(pgprot);
1495
1496 /*
1497 * Map everything starting from the Gb boundary, possibly with 1G pages
1498 */
1499 while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) {
1500 set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn,
1501 canon_pgprot(pud_pgprot))));
1502
1503 start += PUD_SIZE;
1504 cpa->pfn += PUD_SIZE >> PAGE_SHIFT;
1505 cur_pages += PUD_SIZE >> PAGE_SHIFT;
1506 pud++;
1507 }
1508
1509 /* Map trailing leftover */
1510 if (start < end) {
1511 long tmp;
1512
1513 pud = pud_offset(p4d, start);
1514 if (pud_none(*pud))
1515 if (alloc_pmd_page(pud))
1516 return -1;
1517
1518 tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages,
1519 pud, pgprot);
1520 if (tmp < 0)
1521 return cur_pages;
1522
1523 cur_pages += tmp;
1524 }
1525 return cur_pages;
1526 }
1527
1528 /*
1529 * Restrictions for kernel page table do not necessarily apply when mapping in
1530 * an alternate PGD.
1531 */
populate_pgd(struct cpa_data * cpa,unsigned long addr)1532 static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
1533 {
1534 pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
1535 pud_t *pud = NULL; /* shut up gcc */
1536 p4d_t *p4d;
1537 pgd_t *pgd_entry;
1538 long ret;
1539
1540 pgd_entry = cpa->pgd + pgd_index(addr);
1541
1542 if (pgd_none(*pgd_entry)) {
1543 p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL);
1544 if (!p4d)
1545 return -1;
1546
1547 set_pgd(pgd_entry, __pgd(__pa(p4d) | _KERNPG_TABLE));
1548 }
1549
1550 /*
1551 * Allocate a PUD page and hand it down for mapping.
1552 */
1553 p4d = p4d_offset(pgd_entry, addr);
1554 if (p4d_none(*p4d)) {
1555 pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
1556 if (!pud)
1557 return -1;
1558
1559 set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
1560 }
1561
1562 pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr);
1563 pgprot_val(pgprot) |= pgprot_val(cpa->mask_set);
1564
1565 ret = populate_pud(cpa, addr, p4d, pgprot);
1566 if (ret < 0) {
1567 /*
1568 * Leave the PUD page in place in case some other CPU or thread
1569 * already found it, but remove any useless entries we just
1570 * added to it.
1571 */
1572 unmap_pud_range(p4d, addr,
1573 addr + (cpa->numpages << PAGE_SHIFT));
1574 return ret;
1575 }
1576
1577 cpa->numpages = ret;
1578 return 0;
1579 }
1580
__cpa_process_fault(struct cpa_data * cpa,unsigned long vaddr,int primary)1581 static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
1582 int primary)
1583 {
1584 if (cpa->pgd) {
1585 /*
1586 * Right now, we only execute this code path when mapping
1587 * the EFI virtual memory map regions, no other users
1588 * provide a ->pgd value. This may change in the future.
1589 */
1590 return populate_pgd(cpa, vaddr);
1591 }
1592
1593 /*
1594 * Ignore all non primary paths.
1595 */
1596 if (!primary) {
1597 cpa->numpages = 1;
1598 return 0;
1599 }
1600
1601 /*
1602 * Ignore the NULL PTE for kernel identity mapping, as it is expected
1603 * to have holes.
1604 * Also set numpages to '1' indicating that we processed cpa req for
1605 * one virtual address page and its pfn. TBD: numpages can be set based
1606 * on the initial value and the level returned by lookup_address().
1607 */
1608 if (within(vaddr, PAGE_OFFSET,
1609 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) {
1610 cpa->numpages = 1;
1611 cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
1612 return 0;
1613
1614 } else if (__cpa_pfn_in_highmap(cpa->pfn)) {
1615 /* Faults in the highmap are OK, so do not warn: */
1616 return -EFAULT;
1617 } else {
1618 WARN(1, KERN_WARNING "CPA: called for zero pte. "
1619 "vaddr = %lx cpa->vaddr = %lx\n", vaddr,
1620 *cpa->vaddr);
1621
1622 return -EFAULT;
1623 }
1624 }
1625
__change_page_attr(struct cpa_data * cpa,int primary)1626 static int __change_page_attr(struct cpa_data *cpa, int primary)
1627 {
1628 unsigned long address;
1629 int do_split, err;
1630 unsigned int level;
1631 pte_t *kpte, old_pte;
1632 bool nx, rw;
1633
1634 address = __cpa_addr(cpa, cpa->curpage);
1635 repeat:
1636 kpte = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
1637 if (!kpte)
1638 return __cpa_process_fault(cpa, address, primary);
1639
1640 old_pte = *kpte;
1641 if (pte_none(old_pte))
1642 return __cpa_process_fault(cpa, address, primary);
1643
1644 if (level == PG_LEVEL_4K) {
1645 pte_t new_pte;
1646 pgprot_t old_prot = pte_pgprot(old_pte);
1647 pgprot_t new_prot = pte_pgprot(old_pte);
1648 unsigned long pfn = pte_pfn(old_pte);
1649
1650 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
1651 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
1652
1653 cpa_inc_4k_install();
1654 /* Hand in lpsize = 0 to enforce the protection mechanism */
1655 new_prot = static_protections(new_prot, address, pfn, 1, 0,
1656 CPA_PROTECT);
1657
1658 new_prot = verify_rwx(old_prot, new_prot, address, pfn, 1,
1659 nx, rw);
1660
1661 new_prot = pgprot_clear_protnone_bits(new_prot);
1662
1663 /*
1664 * We need to keep the pfn from the existing PTE,
1665 * after all we're only going to change its attributes
1666 * not the memory it points to
1667 */
1668 new_pte = pfn_pte(pfn, new_prot);
1669 cpa->pfn = pfn;
1670 /*
1671 * Do we really change anything ?
1672 */
1673 if (pte_val(old_pte) != pte_val(new_pte)) {
1674 set_pte_atomic(kpte, new_pte);
1675 cpa->flags |= CPA_FLUSHTLB;
1676 }
1677 cpa->numpages = 1;
1678 return 0;
1679 }
1680
1681 /*
1682 * Check, whether we can keep the large page intact
1683 * and just change the pte:
1684 */
1685 do_split = should_split_large_page(kpte, address, cpa);
1686 /*
1687 * When the range fits into the existing large page,
1688 * return. cp->numpages and cpa->tlbflush have been updated in
1689 * try_large_page:
1690 */
1691 if (do_split <= 0)
1692 return do_split;
1693
1694 /*
1695 * We have to split the large page:
1696 */
1697 err = split_large_page(cpa, kpte, address);
1698 if (!err)
1699 goto repeat;
1700
1701 return err;
1702 }
1703
1704 static int __change_page_attr_set_clr(struct cpa_data *cpa, int primary);
1705
1706 /*
1707 * Check the directmap and "high kernel map" 'aliases'.
1708 */
cpa_process_alias(struct cpa_data * cpa)1709 static int cpa_process_alias(struct cpa_data *cpa)
1710 {
1711 struct cpa_data alias_cpa;
1712 unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
1713 unsigned long vaddr;
1714 int ret;
1715
1716 if (!pfn_range_is_mapped(cpa->pfn, cpa->pfn + 1))
1717 return 0;
1718
1719 /*
1720 * No need to redo, when the primary call touched the direct
1721 * mapping already:
1722 */
1723 vaddr = __cpa_addr(cpa, cpa->curpage);
1724 if (!(within(vaddr, PAGE_OFFSET,
1725 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
1726
1727 alias_cpa = *cpa;
1728 alias_cpa.vaddr = &laddr;
1729 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
1730 alias_cpa.curpage = 0;
1731
1732 /* Directmap always has NX set, do not modify. */
1733 if (__supported_pte_mask & _PAGE_NX) {
1734 alias_cpa.mask_clr.pgprot &= ~_PAGE_NX;
1735 alias_cpa.mask_set.pgprot &= ~_PAGE_NX;
1736 }
1737
1738 cpa->force_flush_all = 1;
1739
1740 ret = __change_page_attr_set_clr(&alias_cpa, 0);
1741 if (ret)
1742 return ret;
1743 }
1744
1745 #ifdef CONFIG_X86_64
1746 /*
1747 * If the primary call didn't touch the high mapping already
1748 * and the physical address is inside the kernel map, we need
1749 * to touch the high mapped kernel as well:
1750 */
1751 if (!within(vaddr, (unsigned long)_text, _brk_end) &&
1752 __cpa_pfn_in_highmap(cpa->pfn)) {
1753 unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
1754 __START_KERNEL_map - phys_base;
1755 alias_cpa = *cpa;
1756 alias_cpa.vaddr = &temp_cpa_vaddr;
1757 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
1758 alias_cpa.curpage = 0;
1759
1760 /*
1761 * [_text, _brk_end) also covers data, do not modify NX except
1762 * in cases where the highmap is the primary target.
1763 */
1764 if (__supported_pte_mask & _PAGE_NX) {
1765 alias_cpa.mask_clr.pgprot &= ~_PAGE_NX;
1766 alias_cpa.mask_set.pgprot &= ~_PAGE_NX;
1767 }
1768
1769 cpa->force_flush_all = 1;
1770 /*
1771 * The high mapping range is imprecise, so ignore the
1772 * return value.
1773 */
1774 __change_page_attr_set_clr(&alias_cpa, 0);
1775 }
1776 #endif
1777
1778 return 0;
1779 }
1780
__change_page_attr_set_clr(struct cpa_data * cpa,int primary)1781 static int __change_page_attr_set_clr(struct cpa_data *cpa, int primary)
1782 {
1783 unsigned long numpages = cpa->numpages;
1784 unsigned long rempages = numpages;
1785 int ret = 0;
1786
1787 /*
1788 * No changes, easy!
1789 */
1790 if (!(pgprot_val(cpa->mask_set) | pgprot_val(cpa->mask_clr)) &&
1791 !cpa->force_split)
1792 return ret;
1793
1794 while (rempages) {
1795 /*
1796 * Store the remaining nr of pages for the large page
1797 * preservation check.
1798 */
1799 cpa->numpages = rempages;
1800 /* for array changes, we can't use large page */
1801 if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY))
1802 cpa->numpages = 1;
1803
1804 if (!debug_pagealloc_enabled())
1805 spin_lock(&cpa_lock);
1806 ret = __change_page_attr(cpa, primary);
1807 if (!debug_pagealloc_enabled())
1808 spin_unlock(&cpa_lock);
1809 if (ret)
1810 goto out;
1811
1812 if (primary && !(cpa->flags & CPA_NO_CHECK_ALIAS)) {
1813 ret = cpa_process_alias(cpa);
1814 if (ret)
1815 goto out;
1816 }
1817
1818 /*
1819 * Adjust the number of pages with the result of the
1820 * CPA operation. Either a large page has been
1821 * preserved or a single page update happened.
1822 */
1823 BUG_ON(cpa->numpages > rempages || !cpa->numpages);
1824 rempages -= cpa->numpages;
1825 cpa->curpage += cpa->numpages;
1826 }
1827
1828 out:
1829 /* Restore the original numpages */
1830 cpa->numpages = numpages;
1831 return ret;
1832 }
1833
change_page_attr_set_clr(unsigned long * addr,int numpages,pgprot_t mask_set,pgprot_t mask_clr,int force_split,int in_flag,struct page ** pages)1834 static int change_page_attr_set_clr(unsigned long *addr, int numpages,
1835 pgprot_t mask_set, pgprot_t mask_clr,
1836 int force_split, int in_flag,
1837 struct page **pages)
1838 {
1839 struct cpa_data cpa;
1840 int ret, cache;
1841
1842 memset(&cpa, 0, sizeof(cpa));
1843
1844 /*
1845 * Check, if we are requested to set a not supported
1846 * feature. Clearing non-supported features is OK.
1847 */
1848 mask_set = canon_pgprot(mask_set);
1849
1850 if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split)
1851 return 0;
1852
1853 /* Ensure we are PAGE_SIZE aligned */
1854 if (in_flag & CPA_ARRAY) {
1855 int i;
1856 for (i = 0; i < numpages; i++) {
1857 if (addr[i] & ~PAGE_MASK) {
1858 addr[i] &= PAGE_MASK;
1859 WARN_ON_ONCE(1);
1860 }
1861 }
1862 } else if (!(in_flag & CPA_PAGES_ARRAY)) {
1863 /*
1864 * in_flag of CPA_PAGES_ARRAY implies it is aligned.
1865 * No need to check in that case
1866 */
1867 if (*addr & ~PAGE_MASK) {
1868 *addr &= PAGE_MASK;
1869 /*
1870 * People should not be passing in unaligned addresses:
1871 */
1872 WARN_ON_ONCE(1);
1873 }
1874 }
1875
1876 /* Must avoid aliasing mappings in the highmem code */
1877 kmap_flush_unused();
1878
1879 vm_unmap_aliases();
1880
1881 cpa.vaddr = addr;
1882 cpa.pages = pages;
1883 cpa.numpages = numpages;
1884 cpa.mask_set = mask_set;
1885 cpa.mask_clr = mask_clr;
1886 cpa.flags = in_flag;
1887 cpa.curpage = 0;
1888 cpa.force_split = force_split;
1889
1890 ret = __change_page_attr_set_clr(&cpa, 1);
1891
1892 /*
1893 * Check whether we really changed something:
1894 */
1895 if (!(cpa.flags & CPA_FLUSHTLB))
1896 goto out;
1897
1898 /*
1899 * No need to flush, when we did not set any of the caching
1900 * attributes:
1901 */
1902 cache = !!pgprot2cachemode(mask_set);
1903
1904 /*
1905 * On error; flush everything to be sure.
1906 */
1907 if (ret) {
1908 cpa_flush_all(cache);
1909 goto out;
1910 }
1911
1912 cpa_flush(&cpa, cache);
1913 out:
1914 return ret;
1915 }
1916
change_page_attr_set(unsigned long * addr,int numpages,pgprot_t mask,int array)1917 static inline int change_page_attr_set(unsigned long *addr, int numpages,
1918 pgprot_t mask, int array)
1919 {
1920 return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0,
1921 (array ? CPA_ARRAY : 0), NULL);
1922 }
1923
change_page_attr_clear(unsigned long * addr,int numpages,pgprot_t mask,int array)1924 static inline int change_page_attr_clear(unsigned long *addr, int numpages,
1925 pgprot_t mask, int array)
1926 {
1927 return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0,
1928 (array ? CPA_ARRAY : 0), NULL);
1929 }
1930
cpa_set_pages_array(struct page ** pages,int numpages,pgprot_t mask)1931 static inline int cpa_set_pages_array(struct page **pages, int numpages,
1932 pgprot_t mask)
1933 {
1934 return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0,
1935 CPA_PAGES_ARRAY, pages);
1936 }
1937
cpa_clear_pages_array(struct page ** pages,int numpages,pgprot_t mask)1938 static inline int cpa_clear_pages_array(struct page **pages, int numpages,
1939 pgprot_t mask)
1940 {
1941 return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0,
1942 CPA_PAGES_ARRAY, pages);
1943 }
1944
1945 /*
1946 * __set_memory_prot is an internal helper for callers that have been passed
1947 * a pgprot_t value from upper layers and a reservation has already been taken.
1948 * If you want to set the pgprot to a specific page protocol, use the
1949 * set_memory_xx() functions.
1950 */
__set_memory_prot(unsigned long addr,int numpages,pgprot_t prot)1951 int __set_memory_prot(unsigned long addr, int numpages, pgprot_t prot)
1952 {
1953 return change_page_attr_set_clr(&addr, numpages, prot,
1954 __pgprot(~pgprot_val(prot)), 0, 0,
1955 NULL);
1956 }
1957
_set_memory_uc(unsigned long addr,int numpages)1958 int _set_memory_uc(unsigned long addr, int numpages)
1959 {
1960 /*
1961 * for now UC MINUS. see comments in ioremap()
1962 * If you really need strong UC use ioremap_uc(), but note
1963 * that you cannot override IO areas with set_memory_*() as
1964 * these helpers cannot work with IO memory.
1965 */
1966 return change_page_attr_set(&addr, numpages,
1967 cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
1968 0);
1969 }
1970
set_memory_uc(unsigned long addr,int numpages)1971 int set_memory_uc(unsigned long addr, int numpages)
1972 {
1973 int ret;
1974
1975 /*
1976 * for now UC MINUS. see comments in ioremap()
1977 */
1978 ret = memtype_reserve(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
1979 _PAGE_CACHE_MODE_UC_MINUS, NULL);
1980 if (ret)
1981 goto out_err;
1982
1983 ret = _set_memory_uc(addr, numpages);
1984 if (ret)
1985 goto out_free;
1986
1987 return 0;
1988
1989 out_free:
1990 memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1991 out_err:
1992 return ret;
1993 }
1994 EXPORT_SYMBOL(set_memory_uc);
1995
_set_memory_wc(unsigned long addr,int numpages)1996 int _set_memory_wc(unsigned long addr, int numpages)
1997 {
1998 int ret;
1999
2000 ret = change_page_attr_set(&addr, numpages,
2001 cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
2002 0);
2003 if (!ret) {
2004 ret = change_page_attr_set_clr(&addr, numpages,
2005 cachemode2pgprot(_PAGE_CACHE_MODE_WC),
2006 __pgprot(_PAGE_CACHE_MASK),
2007 0, 0, NULL);
2008 }
2009 return ret;
2010 }
2011
set_memory_wc(unsigned long addr,int numpages)2012 int set_memory_wc(unsigned long addr, int numpages)
2013 {
2014 int ret;
2015
2016 ret = memtype_reserve(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
2017 _PAGE_CACHE_MODE_WC, NULL);
2018 if (ret)
2019 return ret;
2020
2021 ret = _set_memory_wc(addr, numpages);
2022 if (ret)
2023 memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
2024
2025 return ret;
2026 }
2027 EXPORT_SYMBOL(set_memory_wc);
2028
_set_memory_wt(unsigned long addr,int numpages)2029 int _set_memory_wt(unsigned long addr, int numpages)
2030 {
2031 return change_page_attr_set(&addr, numpages,
2032 cachemode2pgprot(_PAGE_CACHE_MODE_WT), 0);
2033 }
2034
_set_memory_wb(unsigned long addr,int numpages)2035 int _set_memory_wb(unsigned long addr, int numpages)
2036 {
2037 /* WB cache mode is hard wired to all cache attribute bits being 0 */
2038 return change_page_attr_clear(&addr, numpages,
2039 __pgprot(_PAGE_CACHE_MASK), 0);
2040 }
2041
set_memory_wb(unsigned long addr,int numpages)2042 int set_memory_wb(unsigned long addr, int numpages)
2043 {
2044 int ret;
2045
2046 ret = _set_memory_wb(addr, numpages);
2047 if (ret)
2048 return ret;
2049
2050 memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
2051 return 0;
2052 }
2053 EXPORT_SYMBOL(set_memory_wb);
2054
2055 /* Prevent speculative access to a page by marking it not-present */
2056 #ifdef CONFIG_X86_64
set_mce_nospec(unsigned long pfn)2057 int set_mce_nospec(unsigned long pfn)
2058 {
2059 unsigned long decoy_addr;
2060 int rc;
2061
2062 /* SGX pages are not in the 1:1 map */
2063 if (arch_is_platform_page(pfn << PAGE_SHIFT))
2064 return 0;
2065 /*
2066 * We would like to just call:
2067 * set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1);
2068 * but doing that would radically increase the odds of a
2069 * speculative access to the poison page because we'd have
2070 * the virtual address of the kernel 1:1 mapping sitting
2071 * around in registers.
2072 * Instead we get tricky. We create a non-canonical address
2073 * that looks just like the one we want, but has bit 63 flipped.
2074 * This relies on set_memory_XX() properly sanitizing any __pa()
2075 * results with __PHYSICAL_MASK or PTE_PFN_MASK.
2076 */
2077 decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));
2078
2079 rc = set_memory_np(decoy_addr, 1);
2080 if (rc)
2081 pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
2082 return rc;
2083 }
2084
2085 /* Restore full speculative operation to the pfn. */
clear_mce_nospec(unsigned long pfn)2086 int clear_mce_nospec(unsigned long pfn)
2087 {
2088 unsigned long addr = (unsigned long) pfn_to_kaddr(pfn);
2089
2090 return set_memory_p(addr, 1);
2091 }
2092 EXPORT_SYMBOL_GPL(clear_mce_nospec);
2093 #endif /* CONFIG_X86_64 */
2094
set_memory_x(unsigned long addr,int numpages)2095 int set_memory_x(unsigned long addr, int numpages)
2096 {
2097 if (!(__supported_pte_mask & _PAGE_NX))
2098 return 0;
2099
2100 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0);
2101 }
2102
set_memory_nx(unsigned long addr,int numpages)2103 int set_memory_nx(unsigned long addr, int numpages)
2104 {
2105 if (!(__supported_pte_mask & _PAGE_NX))
2106 return 0;
2107
2108 return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0);
2109 }
2110
set_memory_ro(unsigned long addr,int numpages)2111 int set_memory_ro(unsigned long addr, int numpages)
2112 {
2113 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW | _PAGE_DIRTY), 0);
2114 }
2115
set_memory_rox(unsigned long addr,int numpages)2116 int set_memory_rox(unsigned long addr, int numpages)
2117 {
2118 pgprot_t clr = __pgprot(_PAGE_RW | _PAGE_DIRTY);
2119
2120 if (__supported_pte_mask & _PAGE_NX)
2121 clr.pgprot |= _PAGE_NX;
2122
2123 return change_page_attr_clear(&addr, numpages, clr, 0);
2124 }
2125
set_memory_rw(unsigned long addr,int numpages)2126 int set_memory_rw(unsigned long addr, int numpages)
2127 {
2128 return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0);
2129 }
2130
set_memory_np(unsigned long addr,int numpages)2131 int set_memory_np(unsigned long addr, int numpages)
2132 {
2133 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
2134 }
2135
set_memory_np_noalias(unsigned long addr,int numpages)2136 int set_memory_np_noalias(unsigned long addr, int numpages)
2137 {
2138 return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
2139 __pgprot(_PAGE_PRESENT), 0,
2140 CPA_NO_CHECK_ALIAS, NULL);
2141 }
2142
set_memory_p(unsigned long addr,int numpages)2143 int set_memory_p(unsigned long addr, int numpages)
2144 {
2145 return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
2146 }
2147
set_memory_4k(unsigned long addr,int numpages)2148 int set_memory_4k(unsigned long addr, int numpages)
2149 {
2150 return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
2151 __pgprot(0), 1, 0, NULL);
2152 }
2153
set_memory_nonglobal(unsigned long addr,int numpages)2154 int set_memory_nonglobal(unsigned long addr, int numpages)
2155 {
2156 return change_page_attr_clear(&addr, numpages,
2157 __pgprot(_PAGE_GLOBAL), 0);
2158 }
2159
set_memory_global(unsigned long addr,int numpages)2160 int set_memory_global(unsigned long addr, int numpages)
2161 {
2162 return change_page_attr_set(&addr, numpages,
2163 __pgprot(_PAGE_GLOBAL), 0);
2164 }
2165
2166 /*
2167 * __set_memory_enc_pgtable() is used for the hypervisors that get
2168 * informed about "encryption" status via page tables.
2169 */
__set_memory_enc_pgtable(unsigned long addr,int numpages,bool enc)2170 static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
2171 {
2172 pgprot_t empty = __pgprot(0);
2173 struct cpa_data cpa;
2174 int ret;
2175
2176 /* Should not be working on unaligned addresses */
2177 if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr))
2178 addr &= PAGE_MASK;
2179
2180 memset(&cpa, 0, sizeof(cpa));
2181 cpa.vaddr = &addr;
2182 cpa.numpages = numpages;
2183 cpa.mask_set = enc ? pgprot_encrypted(empty) : pgprot_decrypted(empty);
2184 cpa.mask_clr = enc ? pgprot_decrypted(empty) : pgprot_encrypted(empty);
2185 cpa.pgd = init_mm.pgd;
2186
2187 /* Must avoid aliasing mappings in the highmem code */
2188 kmap_flush_unused();
2189 vm_unmap_aliases();
2190
2191 /* Flush the caches as needed before changing the encryption attribute. */
2192 if (x86_platform.guest.enc_tlb_flush_required(enc))
2193 cpa_flush(&cpa, x86_platform.guest.enc_cache_flush_required());
2194
2195 /* Notify hypervisor that we are about to set/clr encryption attribute. */
2196 ret = x86_platform.guest.enc_status_change_prepare(addr, numpages, enc);
2197 if (ret)
2198 goto vmm_fail;
2199
2200 ret = __change_page_attr_set_clr(&cpa, 1);
2201
2202 /*
2203 * After changing the encryption attribute, we need to flush TLBs again
2204 * in case any speculative TLB caching occurred (but no need to flush
2205 * caches again). We could just use cpa_flush_all(), but in case TLB
2206 * flushing gets optimized in the cpa_flush() path use the same logic
2207 * as above.
2208 */
2209 cpa_flush(&cpa, 0);
2210
2211 if (ret)
2212 return ret;
2213
2214 /* Notify hypervisor that we have successfully set/clr encryption attribute. */
2215 ret = x86_platform.guest.enc_status_change_finish(addr, numpages, enc);
2216 if (ret)
2217 goto vmm_fail;
2218
2219 return 0;
2220
2221 vmm_fail:
2222 WARN_ONCE(1, "CPA VMM failure to convert memory (addr=%p, numpages=%d) to %s: %d\n",
2223 (void *)addr, numpages, enc ? "private" : "shared", ret);
2224
2225 return ret;
2226 }
2227
2228 /*
2229 * The lock serializes conversions between private and shared memory.
2230 *
2231 * It is taken for read on conversion. A write lock guarantees that no
2232 * concurrent conversions are in progress.
2233 */
2234 static DECLARE_RWSEM(mem_enc_lock);
2235
2236 /*
2237 * Stop new private<->shared conversions.
2238 *
2239 * Taking the exclusive mem_enc_lock waits for in-flight conversions to complete.
2240 * The lock is not released to prevent new conversions from being started.
2241 */
set_memory_enc_stop_conversion(void)2242 bool set_memory_enc_stop_conversion(void)
2243 {
2244 /*
2245 * In a crash scenario, sleep is not allowed. Try to take the lock.
2246 * Failure indicates that there is a race with the conversion.
2247 */
2248 if (oops_in_progress)
2249 return down_write_trylock(&mem_enc_lock);
2250
2251 down_write(&mem_enc_lock);
2252
2253 return true;
2254 }
2255
__set_memory_enc_dec(unsigned long addr,int numpages,bool enc)2256 static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
2257 {
2258 int ret = 0;
2259
2260 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
2261 if (!down_read_trylock(&mem_enc_lock))
2262 return -EBUSY;
2263
2264 ret = __set_memory_enc_pgtable(addr, numpages, enc);
2265
2266 up_read(&mem_enc_lock);
2267 }
2268
2269 return ret;
2270 }
2271
set_memory_encrypted(unsigned long addr,int numpages)2272 int set_memory_encrypted(unsigned long addr, int numpages)
2273 {
2274 return __set_memory_enc_dec(addr, numpages, true);
2275 }
2276 EXPORT_SYMBOL_GPL(set_memory_encrypted);
2277
set_memory_decrypted(unsigned long addr,int numpages)2278 int set_memory_decrypted(unsigned long addr, int numpages)
2279 {
2280 return __set_memory_enc_dec(addr, numpages, false);
2281 }
2282 EXPORT_SYMBOL_GPL(set_memory_decrypted);
2283
set_pages_uc(struct page * page,int numpages)2284 int set_pages_uc(struct page *page, int numpages)
2285 {
2286 unsigned long addr = (unsigned long)page_address(page);
2287
2288 return set_memory_uc(addr, numpages);
2289 }
2290 EXPORT_SYMBOL(set_pages_uc);
2291
_set_pages_array(struct page ** pages,int numpages,enum page_cache_mode new_type)2292 static int _set_pages_array(struct page **pages, int numpages,
2293 enum page_cache_mode new_type)
2294 {
2295 unsigned long start;
2296 unsigned long end;
2297 enum page_cache_mode set_type;
2298 int i;
2299 int free_idx;
2300 int ret;
2301
2302 for (i = 0; i < numpages; i++) {
2303 if (PageHighMem(pages[i]))
2304 continue;
2305 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
2306 end = start + PAGE_SIZE;
2307 if (memtype_reserve(start, end, new_type, NULL))
2308 goto err_out;
2309 }
2310
2311 /* If WC, set to UC- first and then WC */
2312 set_type = (new_type == _PAGE_CACHE_MODE_WC) ?
2313 _PAGE_CACHE_MODE_UC_MINUS : new_type;
2314
2315 ret = cpa_set_pages_array(pages, numpages,
2316 cachemode2pgprot(set_type));
2317 if (!ret && new_type == _PAGE_CACHE_MODE_WC)
2318 ret = change_page_attr_set_clr(NULL, numpages,
2319 cachemode2pgprot(
2320 _PAGE_CACHE_MODE_WC),
2321 __pgprot(_PAGE_CACHE_MASK),
2322 0, CPA_PAGES_ARRAY, pages);
2323 if (ret)
2324 goto err_out;
2325 return 0; /* Success */
2326 err_out:
2327 free_idx = i;
2328 for (i = 0; i < free_idx; i++) {
2329 if (PageHighMem(pages[i]))
2330 continue;
2331 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
2332 end = start + PAGE_SIZE;
2333 memtype_free(start, end);
2334 }
2335 return -EINVAL;
2336 }
2337
set_pages_array_uc(struct page ** pages,int numpages)2338 int set_pages_array_uc(struct page **pages, int numpages)
2339 {
2340 return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_UC_MINUS);
2341 }
2342 EXPORT_SYMBOL(set_pages_array_uc);
2343
set_pages_array_wc(struct page ** pages,int numpages)2344 int set_pages_array_wc(struct page **pages, int numpages)
2345 {
2346 return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_WC);
2347 }
2348 EXPORT_SYMBOL(set_pages_array_wc);
2349
set_pages_wb(struct page * page,int numpages)2350 int set_pages_wb(struct page *page, int numpages)
2351 {
2352 unsigned long addr = (unsigned long)page_address(page);
2353
2354 return set_memory_wb(addr, numpages);
2355 }
2356 EXPORT_SYMBOL(set_pages_wb);
2357
set_pages_array_wb(struct page ** pages,int numpages)2358 int set_pages_array_wb(struct page **pages, int numpages)
2359 {
2360 int retval;
2361 unsigned long start;
2362 unsigned long end;
2363 int i;
2364
2365 /* WB cache mode is hard wired to all cache attribute bits being 0 */
2366 retval = cpa_clear_pages_array(pages, numpages,
2367 __pgprot(_PAGE_CACHE_MASK));
2368 if (retval)
2369 return retval;
2370
2371 for (i = 0; i < numpages; i++) {
2372 if (PageHighMem(pages[i]))
2373 continue;
2374 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
2375 end = start + PAGE_SIZE;
2376 memtype_free(start, end);
2377 }
2378
2379 return 0;
2380 }
2381 EXPORT_SYMBOL(set_pages_array_wb);
2382
set_pages_ro(struct page * page,int numpages)2383 int set_pages_ro(struct page *page, int numpages)
2384 {
2385 unsigned long addr = (unsigned long)page_address(page);
2386
2387 return set_memory_ro(addr, numpages);
2388 }
2389
set_pages_rw(struct page * page,int numpages)2390 int set_pages_rw(struct page *page, int numpages)
2391 {
2392 unsigned long addr = (unsigned long)page_address(page);
2393
2394 return set_memory_rw(addr, numpages);
2395 }
2396
__set_pages_p(struct page * page,int numpages)2397 static int __set_pages_p(struct page *page, int numpages)
2398 {
2399 unsigned long tempaddr = (unsigned long) page_address(page);
2400 struct cpa_data cpa = { .vaddr = &tempaddr,
2401 .pgd = NULL,
2402 .numpages = numpages,
2403 .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
2404 .mask_clr = __pgprot(0),
2405 .flags = CPA_NO_CHECK_ALIAS };
2406
2407 /*
2408 * No alias checking needed for setting present flag. otherwise,
2409 * we may need to break large pages for 64-bit kernel text
2410 * mappings (this adds to complexity if we want to do this from
2411 * atomic context especially). Let's keep it simple!
2412 */
2413 return __change_page_attr_set_clr(&cpa, 1);
2414 }
2415
__set_pages_np(struct page * page,int numpages)2416 static int __set_pages_np(struct page *page, int numpages)
2417 {
2418 unsigned long tempaddr = (unsigned long) page_address(page);
2419 struct cpa_data cpa = { .vaddr = &tempaddr,
2420 .pgd = NULL,
2421 .numpages = numpages,
2422 .mask_set = __pgprot(0),
2423 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
2424 .flags = CPA_NO_CHECK_ALIAS };
2425
2426 /*
2427 * No alias checking needed for setting not present flag. otherwise,
2428 * we may need to break large pages for 64-bit kernel text
2429 * mappings (this adds to complexity if we want to do this from
2430 * atomic context especially). Let's keep it simple!
2431 */
2432 return __change_page_attr_set_clr(&cpa, 1);
2433 }
2434
set_direct_map_invalid_noflush(struct page * page)2435 int set_direct_map_invalid_noflush(struct page *page)
2436 {
2437 return __set_pages_np(page, 1);
2438 }
2439
set_direct_map_default_noflush(struct page * page)2440 int set_direct_map_default_noflush(struct page *page)
2441 {
2442 return __set_pages_p(page, 1);
2443 }
2444
set_direct_map_valid_noflush(struct page * page,unsigned nr,bool valid)2445 int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid)
2446 {
2447 if (valid)
2448 return __set_pages_p(page, nr);
2449
2450 return __set_pages_np(page, nr);
2451 }
2452
2453 #ifdef CONFIG_DEBUG_PAGEALLOC
__kernel_map_pages(struct page * page,int numpages,int enable)2454 void __kernel_map_pages(struct page *page, int numpages, int enable)
2455 {
2456 if (PageHighMem(page))
2457 return;
2458 if (!enable) {
2459 debug_check_no_locks_freed(page_address(page),
2460 numpages * PAGE_SIZE);
2461 }
2462
2463 /*
2464 * The return value is ignored as the calls cannot fail.
2465 * Large pages for identity mappings are not used at boot time
2466 * and hence no memory allocations during large page split.
2467 */
2468 if (enable)
2469 __set_pages_p(page, numpages);
2470 else
2471 __set_pages_np(page, numpages);
2472
2473 /*
2474 * We should perform an IPI and flush all tlbs,
2475 * but that can deadlock->flush only current cpu.
2476 * Preemption needs to be disabled around __flush_tlb_all() due to
2477 * CR3 reload in __native_flush_tlb().
2478 */
2479 preempt_disable();
2480 __flush_tlb_all();
2481 preempt_enable();
2482
2483 arch_flush_lazy_mmu_mode();
2484 }
2485 #endif /* CONFIG_DEBUG_PAGEALLOC */
2486
kernel_page_present(struct page * page)2487 bool kernel_page_present(struct page *page)
2488 {
2489 unsigned int level;
2490 pte_t *pte;
2491
2492 if (PageHighMem(page))
2493 return false;
2494
2495 pte = lookup_address((unsigned long)page_address(page), &level);
2496 return (pte_val(*pte) & _PAGE_PRESENT);
2497 }
2498
kernel_map_pages_in_pgd(pgd_t * pgd,u64 pfn,unsigned long address,unsigned numpages,unsigned long page_flags)2499 int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
2500 unsigned numpages, unsigned long page_flags)
2501 {
2502 int retval = -EINVAL;
2503
2504 struct cpa_data cpa = {
2505 .vaddr = &address,
2506 .pfn = pfn,
2507 .pgd = pgd,
2508 .numpages = numpages,
2509 .mask_set = __pgprot(0),
2510 .mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW)),
2511 .flags = CPA_NO_CHECK_ALIAS,
2512 };
2513
2514 WARN_ONCE(num_online_cpus() > 1, "Don't call after initializing SMP");
2515
2516 if (!(__supported_pte_mask & _PAGE_NX))
2517 goto out;
2518
2519 if (!(page_flags & _PAGE_ENC))
2520 cpa.mask_clr = pgprot_encrypted(cpa.mask_clr);
2521
2522 cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags);
2523
2524 retval = __change_page_attr_set_clr(&cpa, 1);
2525 __flush_tlb_all();
2526
2527 out:
2528 return retval;
2529 }
2530
2531 /*
2532 * __flush_tlb_all() flushes mappings only on current CPU and hence this
2533 * function shouldn't be used in an SMP environment. Presently, it's used only
2534 * during boot (way before smp_init()) by EFI subsystem and hence is ok.
2535 */
kernel_unmap_pages_in_pgd(pgd_t * pgd,unsigned long address,unsigned long numpages)2536 int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
2537 unsigned long numpages)
2538 {
2539 int retval;
2540
2541 /*
2542 * The typical sequence for unmapping is to find a pte through
2543 * lookup_address_in_pgd() (ideally, it should never return NULL because
2544 * the address is already mapped) and change its protections. As pfn is
2545 * the *target* of a mapping, it's not useful while unmapping.
2546 */
2547 struct cpa_data cpa = {
2548 .vaddr = &address,
2549 .pfn = 0,
2550 .pgd = pgd,
2551 .numpages = numpages,
2552 .mask_set = __pgprot(0),
2553 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
2554 .flags = CPA_NO_CHECK_ALIAS,
2555 };
2556
2557 WARN_ONCE(num_online_cpus() > 1, "Don't call after initializing SMP");
2558
2559 retval = __change_page_attr_set_clr(&cpa, 1);
2560 __flush_tlb_all();
2561
2562 return retval;
2563 }
2564
2565 /*
2566 * The testcases use internal knowledge of the implementation that shouldn't
2567 * be exposed to the rest of the kernel. Include these directly here.
2568 */
2569 #ifdef CONFIG_CPA_DEBUG
2570 #include "cpa-test.c"
2571 #endif
2572