xref: /linux/drivers/virt/coco/pkvm-guest/arm-pkvm-guest.c (revision 114143a595895c03fbefccfd8346fc51fb4908ed)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Support for the hypercall interface exposed to protected guests by
4  * pKVM.
5  *
6  * Author: Will Deacon <will@kernel.org>
7  * Copyright (C) 2024 Google LLC
8  */
9 
10 #include <linux/arm-smccc.h>
11 #include <linux/array_size.h>
12 #include <linux/io.h>
13 #include <linux/mem_encrypt.h>
14 #include <linux/mm.h>
15 #include <linux/pgtable.h>
16 
17 #include <asm/hypervisor.h>
18 
19 static size_t pkvm_granule;
20 
arm_smccc_do_one_page(u32 func_id,phys_addr_t phys)21 static int arm_smccc_do_one_page(u32 func_id, phys_addr_t phys)
22 {
23 	phys_addr_t end = phys + PAGE_SIZE;
24 
25 	while (phys < end) {
26 		struct arm_smccc_res res;
27 
28 		arm_smccc_1_1_invoke(func_id, phys, 0, 0, &res);
29 		if (res.a0 != SMCCC_RET_SUCCESS)
30 			return -EPERM;
31 
32 		phys += pkvm_granule;
33 	}
34 
35 	return 0;
36 }
37 
__set_memory_range(u32 func_id,unsigned long start,int numpages)38 static int __set_memory_range(u32 func_id, unsigned long start, int numpages)
39 {
40 	void *addr = (void *)start, *end = addr + numpages * PAGE_SIZE;
41 
42 	while (addr < end) {
43 		int err;
44 
45 		err = arm_smccc_do_one_page(func_id, virt_to_phys(addr));
46 		if (err)
47 			return err;
48 
49 		addr += PAGE_SIZE;
50 	}
51 
52 	return 0;
53 }
54 
pkvm_set_memory_encrypted(unsigned long addr,int numpages)55 static int pkvm_set_memory_encrypted(unsigned long addr, int numpages)
56 {
57 	return __set_memory_range(ARM_SMCCC_VENDOR_HYP_KVM_MEM_UNSHARE_FUNC_ID,
58 				  addr, numpages);
59 }
60 
pkvm_set_memory_decrypted(unsigned long addr,int numpages)61 static int pkvm_set_memory_decrypted(unsigned long addr, int numpages)
62 {
63 	return __set_memory_range(ARM_SMCCC_VENDOR_HYP_KVM_MEM_SHARE_FUNC_ID,
64 				  addr, numpages);
65 }
66 
67 static const struct arm64_mem_crypt_ops pkvm_crypt_ops = {
68 	.encrypt	= pkvm_set_memory_encrypted,
69 	.decrypt	= pkvm_set_memory_decrypted,
70 };
71 
mmio_guard_ioremap_hook(phys_addr_t phys,size_t size,pgprot_t * prot)72 static int mmio_guard_ioremap_hook(phys_addr_t phys, size_t size,
73 				   pgprot_t *prot)
74 {
75 	phys_addr_t end;
76 	pteval_t protval = pgprot_val(*prot);
77 
78 	/*
79 	 * We only expect MMIO emulation for regions mapped with device
80 	 * attributes.
81 	 */
82 	if (protval != PROT_DEVICE_nGnRE && protval != PROT_DEVICE_nGnRnE)
83 		return 0;
84 
85 	phys = PAGE_ALIGN_DOWN(phys);
86 	end = phys + PAGE_ALIGN(size);
87 
88 	while (phys < end) {
89 		const int func_id = ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_FUNC_ID;
90 		int err;
91 
92 		err = arm_smccc_do_one_page(func_id, phys);
93 		if (err)
94 			return err;
95 
96 		phys += PAGE_SIZE;
97 	}
98 
99 	return 0;
100 }
101 
pkvm_init_hyp_services(void)102 void pkvm_init_hyp_services(void)
103 {
104 	int i;
105 	struct arm_smccc_res res;
106 	const u32 funcs[] = {
107 		ARM_SMCCC_KVM_FUNC_HYP_MEMINFO,
108 		ARM_SMCCC_KVM_FUNC_MEM_SHARE,
109 		ARM_SMCCC_KVM_FUNC_MEM_UNSHARE,
110 	};
111 
112 	for (i = 0; i < ARRAY_SIZE(funcs); ++i) {
113 		if (!kvm_arm_hyp_service_available(funcs[i]))
114 			return;
115 	}
116 
117 	arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_HYP_MEMINFO_FUNC_ID,
118 			     0, 0, 0, &res);
119 	if (res.a0 > PAGE_SIZE) /* Includes error codes */
120 		return;
121 
122 	pkvm_granule = res.a0;
123 	arm64_mem_crypt_ops_register(&pkvm_crypt_ops);
124 
125 	if (kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_MMIO_GUARD))
126 		arm64_ioremap_prot_hook_register(&mmio_guard_ioremap_hook);
127 }
128