xref: /linux/arch/powerpc/platforms/pseries/svm.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Secure VM platform
4  *
5  * Copyright 2018 IBM Corporation
6  * Author: Anshuman Khandual <khandual@linux.vnet.ibm.com>
7  */
8 
9 #include <linux/mm.h>
10 #include <linux/memblock.h>
11 #include <linux/mem_encrypt.h>
12 #include <linux/cc_platform.h>
13 #include <asm/machdep.h>
14 #include <asm/svm.h>
15 #include <asm/swiotlb.h>
16 #include <asm/ultravisor.h>
17 #include <asm/dtl.h>
18 
19 static int __init init_svm(void)
20 {
21 	if (!is_secure_guest())
22 		return 0;
23 
24 	/* Don't release the SWIOTLB buffer. */
25 	ppc_swiotlb_enable = 1;
26 
27 	/*
28 	 * Since the guest memory is inaccessible to the host, devices always
29 	 * need to use the SWIOTLB buffer for DMA even if dma_capable() says
30 	 * otherwise.
31 	 */
32 	ppc_swiotlb_flags |= SWIOTLB_ANY | SWIOTLB_FORCE;
33 
34 	/* Share the SWIOTLB buffer with the host. */
35 	swiotlb_update_mem_attributes();
36 
37 	return 0;
38 }
39 machine_early_initcall(pseries, init_svm);
40 
41 int set_memory_encrypted(unsigned long addr, int numpages)
42 {
43 	if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
44 		return 0;
45 
46 	if (!PAGE_ALIGNED(addr))
47 		return -EINVAL;
48 
49 	uv_unshare_page(PHYS_PFN(__pa(addr)), numpages);
50 
51 	return 0;
52 }
53 
54 int set_memory_decrypted(unsigned long addr, int numpages)
55 {
56 	if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
57 		return 0;
58 
59 	if (!PAGE_ALIGNED(addr))
60 		return -EINVAL;
61 
62 	uv_share_page(PHYS_PFN(__pa(addr)), numpages);
63 
64 	return 0;
65 }
66 
67 /* There's one dispatch log per CPU. */
68 #define NR_DTL_PAGE (DISPATCH_LOG_BYTES * CONFIG_NR_CPUS / PAGE_SIZE)
69 
70 static struct page *dtl_page_store[NR_DTL_PAGE];
71 static long dtl_nr_pages;
72 
73 static bool is_dtl_page_shared(struct page *page)
74 {
75 	long i;
76 
77 	for (i = 0; i < dtl_nr_pages; i++)
78 		if (dtl_page_store[i] == page)
79 			return true;
80 
81 	return false;
82 }
83 
84 void dtl_cache_ctor(void *addr)
85 {
86 	unsigned long pfn = PHYS_PFN(__pa(addr));
87 	struct page *page = pfn_to_page(pfn);
88 
89 	if (!is_dtl_page_shared(page)) {
90 		dtl_page_store[dtl_nr_pages] = page;
91 		dtl_nr_pages++;
92 		WARN_ON(dtl_nr_pages >= NR_DTL_PAGE);
93 		uv_share_page(pfn, 1);
94 	}
95 }
96