xref: /linux/arch/riscv/kvm/nacl.c (revision 60675d4ca1ef0857e44eba5849b74a3a998d0c0f)
1*d466c19cSAnup Patel // SPDX-License-Identifier: GPL-2.0
2*d466c19cSAnup Patel /*
3*d466c19cSAnup Patel  * Copyright (c) 2024 Ventana Micro Systems Inc.
4*d466c19cSAnup Patel  */
5*d466c19cSAnup Patel 
6*d466c19cSAnup Patel #include <linux/kvm_host.h>
7*d466c19cSAnup Patel #include <linux/vmalloc.h>
8*d466c19cSAnup Patel #include <asm/kvm_nacl.h>
9*d466c19cSAnup Patel 
10*d466c19cSAnup Patel DEFINE_STATIC_KEY_FALSE(kvm_riscv_nacl_available);
11*d466c19cSAnup Patel DEFINE_STATIC_KEY_FALSE(kvm_riscv_nacl_sync_csr_available);
12*d466c19cSAnup Patel DEFINE_STATIC_KEY_FALSE(kvm_riscv_nacl_sync_hfence_available);
13*d466c19cSAnup Patel DEFINE_STATIC_KEY_FALSE(kvm_riscv_nacl_sync_sret_available);
14*d466c19cSAnup Patel DEFINE_STATIC_KEY_FALSE(kvm_riscv_nacl_autoswap_csr_available);
15*d466c19cSAnup Patel DEFINE_PER_CPU(struct kvm_riscv_nacl, kvm_riscv_nacl);
16*d466c19cSAnup Patel 
17*d466c19cSAnup Patel void __kvm_riscv_nacl_hfence(void *shmem,
18*d466c19cSAnup Patel 			     unsigned long control,
19*d466c19cSAnup Patel 			     unsigned long page_num,
20*d466c19cSAnup Patel 			     unsigned long page_count)
21*d466c19cSAnup Patel {
22*d466c19cSAnup Patel 	int i, ent = -1, try_count = 5;
23*d466c19cSAnup Patel 	unsigned long *entp;
24*d466c19cSAnup Patel 
25*d466c19cSAnup Patel again:
26*d466c19cSAnup Patel 	for (i = 0; i < SBI_NACL_SHMEM_HFENCE_ENTRY_MAX; i++) {
27*d466c19cSAnup Patel 		entp = shmem + SBI_NACL_SHMEM_HFENCE_ENTRY_CONFIG(i);
28*d466c19cSAnup Patel 		if (lelong_to_cpu(*entp) & SBI_NACL_SHMEM_HFENCE_CONFIG_PEND)
29*d466c19cSAnup Patel 			continue;
30*d466c19cSAnup Patel 
31*d466c19cSAnup Patel 		ent = i;
32*d466c19cSAnup Patel 		break;
33*d466c19cSAnup Patel 	}
34*d466c19cSAnup Patel 
35*d466c19cSAnup Patel 	if (ent < 0) {
36*d466c19cSAnup Patel 		if (try_count) {
37*d466c19cSAnup Patel 			nacl_sync_hfence(-1UL);
38*d466c19cSAnup Patel 			goto again;
39*d466c19cSAnup Patel 		} else {
40*d466c19cSAnup Patel 			pr_warn("KVM: No free entry in NACL shared memory\n");
41*d466c19cSAnup Patel 			return;
42*d466c19cSAnup Patel 		}
43*d466c19cSAnup Patel 	}
44*d466c19cSAnup Patel 
45*d466c19cSAnup Patel 	entp = shmem + SBI_NACL_SHMEM_HFENCE_ENTRY_CONFIG(i);
46*d466c19cSAnup Patel 	*entp = cpu_to_lelong(control);
47*d466c19cSAnup Patel 	entp = shmem + SBI_NACL_SHMEM_HFENCE_ENTRY_PNUM(i);
48*d466c19cSAnup Patel 	*entp = cpu_to_lelong(page_num);
49*d466c19cSAnup Patel 	entp = shmem + SBI_NACL_SHMEM_HFENCE_ENTRY_PCOUNT(i);
50*d466c19cSAnup Patel 	*entp = cpu_to_lelong(page_count);
51*d466c19cSAnup Patel }
52*d466c19cSAnup Patel 
53*d466c19cSAnup Patel int kvm_riscv_nacl_enable(void)
54*d466c19cSAnup Patel {
55*d466c19cSAnup Patel 	int rc;
56*d466c19cSAnup Patel 	struct sbiret ret;
57*d466c19cSAnup Patel 	struct kvm_riscv_nacl *nacl;
58*d466c19cSAnup Patel 
59*d466c19cSAnup Patel 	if (!kvm_riscv_nacl_available())
60*d466c19cSAnup Patel 		return 0;
61*d466c19cSAnup Patel 	nacl = this_cpu_ptr(&kvm_riscv_nacl);
62*d466c19cSAnup Patel 
63*d466c19cSAnup Patel 	ret = sbi_ecall(SBI_EXT_NACL, SBI_EXT_NACL_SET_SHMEM,
64*d466c19cSAnup Patel 			nacl->shmem_phys, 0, 0, 0, 0, 0);
65*d466c19cSAnup Patel 	rc = sbi_err_map_linux_errno(ret.error);
66*d466c19cSAnup Patel 	if (rc)
67*d466c19cSAnup Patel 		return rc;
68*d466c19cSAnup Patel 
69*d466c19cSAnup Patel 	return 0;
70*d466c19cSAnup Patel }
71*d466c19cSAnup Patel 
72*d466c19cSAnup Patel void kvm_riscv_nacl_disable(void)
73*d466c19cSAnup Patel {
74*d466c19cSAnup Patel 	if (!kvm_riscv_nacl_available())
75*d466c19cSAnup Patel 		return;
76*d466c19cSAnup Patel 
77*d466c19cSAnup Patel 	sbi_ecall(SBI_EXT_NACL, SBI_EXT_NACL_SET_SHMEM,
78*d466c19cSAnup Patel 		  SBI_SHMEM_DISABLE, SBI_SHMEM_DISABLE, 0, 0, 0, 0);
79*d466c19cSAnup Patel }
80*d466c19cSAnup Patel 
81*d466c19cSAnup Patel void kvm_riscv_nacl_exit(void)
82*d466c19cSAnup Patel {
83*d466c19cSAnup Patel 	int cpu;
84*d466c19cSAnup Patel 	struct kvm_riscv_nacl *nacl;
85*d466c19cSAnup Patel 
86*d466c19cSAnup Patel 	if (!kvm_riscv_nacl_available())
87*d466c19cSAnup Patel 		return;
88*d466c19cSAnup Patel 
89*d466c19cSAnup Patel 	/* Allocate per-CPU shared memory */
90*d466c19cSAnup Patel 	for_each_possible_cpu(cpu) {
91*d466c19cSAnup Patel 		nacl = per_cpu_ptr(&kvm_riscv_nacl, cpu);
92*d466c19cSAnup Patel 		if (!nacl->shmem)
93*d466c19cSAnup Patel 			continue;
94*d466c19cSAnup Patel 
95*d466c19cSAnup Patel 		free_pages((unsigned long)nacl->shmem,
96*d466c19cSAnup Patel 			   get_order(SBI_NACL_SHMEM_SIZE));
97*d466c19cSAnup Patel 		nacl->shmem = NULL;
98*d466c19cSAnup Patel 		nacl->shmem_phys = 0;
99*d466c19cSAnup Patel 	}
100*d466c19cSAnup Patel }
101*d466c19cSAnup Patel 
102*d466c19cSAnup Patel static long nacl_probe_feature(long feature_id)
103*d466c19cSAnup Patel {
104*d466c19cSAnup Patel 	struct sbiret ret;
105*d466c19cSAnup Patel 
106*d466c19cSAnup Patel 	if (!kvm_riscv_nacl_available())
107*d466c19cSAnup Patel 		return 0;
108*d466c19cSAnup Patel 
109*d466c19cSAnup Patel 	ret = sbi_ecall(SBI_EXT_NACL, SBI_EXT_NACL_PROBE_FEATURE,
110*d466c19cSAnup Patel 			feature_id, 0, 0, 0, 0, 0);
111*d466c19cSAnup Patel 	return ret.value;
112*d466c19cSAnup Patel }
113*d466c19cSAnup Patel 
114*d466c19cSAnup Patel int kvm_riscv_nacl_init(void)
115*d466c19cSAnup Patel {
116*d466c19cSAnup Patel 	int cpu;
117*d466c19cSAnup Patel 	struct page *shmem_page;
118*d466c19cSAnup Patel 	struct kvm_riscv_nacl *nacl;
119*d466c19cSAnup Patel 
120*d466c19cSAnup Patel 	if (sbi_spec_version < sbi_mk_version(1, 0) ||
121*d466c19cSAnup Patel 	    sbi_probe_extension(SBI_EXT_NACL) <= 0)
122*d466c19cSAnup Patel 		return -ENODEV;
123*d466c19cSAnup Patel 
124*d466c19cSAnup Patel 	/* Enable NACL support */
125*d466c19cSAnup Patel 	static_branch_enable(&kvm_riscv_nacl_available);
126*d466c19cSAnup Patel 
127*d466c19cSAnup Patel 	/* Probe NACL features */
128*d466c19cSAnup Patel 	if (nacl_probe_feature(SBI_NACL_FEAT_SYNC_CSR))
129*d466c19cSAnup Patel 		static_branch_enable(&kvm_riscv_nacl_sync_csr_available);
130*d466c19cSAnup Patel 	if (nacl_probe_feature(SBI_NACL_FEAT_SYNC_HFENCE))
131*d466c19cSAnup Patel 		static_branch_enable(&kvm_riscv_nacl_sync_hfence_available);
132*d466c19cSAnup Patel 	if (nacl_probe_feature(SBI_NACL_FEAT_SYNC_SRET))
133*d466c19cSAnup Patel 		static_branch_enable(&kvm_riscv_nacl_sync_sret_available);
134*d466c19cSAnup Patel 	if (nacl_probe_feature(SBI_NACL_FEAT_AUTOSWAP_CSR))
135*d466c19cSAnup Patel 		static_branch_enable(&kvm_riscv_nacl_autoswap_csr_available);
136*d466c19cSAnup Patel 
137*d466c19cSAnup Patel 	/* Allocate per-CPU shared memory */
138*d466c19cSAnup Patel 	for_each_possible_cpu(cpu) {
139*d466c19cSAnup Patel 		nacl = per_cpu_ptr(&kvm_riscv_nacl, cpu);
140*d466c19cSAnup Patel 
141*d466c19cSAnup Patel 		shmem_page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
142*d466c19cSAnup Patel 					 get_order(SBI_NACL_SHMEM_SIZE));
143*d466c19cSAnup Patel 		if (!shmem_page) {
144*d466c19cSAnup Patel 			kvm_riscv_nacl_exit();
145*d466c19cSAnup Patel 			return -ENOMEM;
146*d466c19cSAnup Patel 		}
147*d466c19cSAnup Patel 		nacl->shmem = page_to_virt(shmem_page);
148*d466c19cSAnup Patel 		nacl->shmem_phys = page_to_phys(shmem_page);
149*d466c19cSAnup Patel 	}
150*d466c19cSAnup Patel 
151*d466c19cSAnup Patel 	return 0;
152*d466c19cSAnup Patel }
153