xref: /linux/arch/s390/kernel/uv.c (revision 954a209f431c06b62718a49b403bd4c549f0d6fb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common Ultravisor functions and initialization
4  *
5  * Copyright IBM Corp. 2019, 2024
6  */
7 #define KMSG_COMPONENT "prot_virt"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/sizes.h>
13 #include <linux/bitmap.h>
14 #include <linux/memblock.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <linux/pagewalk.h>
18 #include <asm/facility.h>
19 #include <asm/sections.h>
20 #include <asm/uv.h>
21 
22 /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
23 int __bootdata_preserved(prot_virt_guest);
24 EXPORT_SYMBOL(prot_virt_guest);
25 
26 /*
27  * uv_info contains both host and guest information but it's currently only
28  * expected to be used within modules if it's the KVM module or for
29  * any PV guest module.
30  *
31  * The kernel itself will write these values once in uv_query_info()
32  * and then make some of them readable via a sysfs interface.
33  */
34 struct uv_info __bootdata_preserved(uv_info);
35 EXPORT_SYMBOL(uv_info);
36 
37 int __bootdata_preserved(prot_virt_host);
38 EXPORT_SYMBOL(prot_virt_host);
39 
uv_init(phys_addr_t stor_base,unsigned long stor_len)40 static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len)
41 {
42 	struct uv_cb_init uvcb = {
43 		.header.cmd = UVC_CMD_INIT_UV,
44 		.header.len = sizeof(uvcb),
45 		.stor_origin = stor_base,
46 		.stor_len = stor_len,
47 	};
48 
49 	if (uv_call(0, (uint64_t)&uvcb)) {
50 		pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
51 		       uvcb.header.rc, uvcb.header.rrc);
52 		return -1;
53 	}
54 	return 0;
55 }
56 
setup_uv(void)57 void __init setup_uv(void)
58 {
59 	void *uv_stor_base;
60 
61 	if (!is_prot_virt_host())
62 		return;
63 
64 	uv_stor_base = memblock_alloc_try_nid(
65 		uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
66 		MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
67 	if (!uv_stor_base) {
68 		pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
69 			uv_info.uv_base_stor_len);
70 		goto fail;
71 	}
72 
73 	if (uv_init(__pa(uv_stor_base), uv_info.uv_base_stor_len)) {
74 		memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
75 		goto fail;
76 	}
77 
78 	pr_info("Reserving %luMB as ultravisor base storage\n",
79 		uv_info.uv_base_stor_len >> 20);
80 	return;
81 fail:
82 	pr_info("Disabling support for protected virtualization");
83 	prot_virt_host = 0;
84 }
85 
86 /*
87  * Requests the Ultravisor to pin the page in the shared state. This will
88  * cause an intercept when the guest attempts to unshare the pinned page.
89  */
uv_pin_shared(unsigned long paddr)90 int uv_pin_shared(unsigned long paddr)
91 {
92 	struct uv_cb_cfs uvcb = {
93 		.header.cmd = UVC_CMD_PIN_PAGE_SHARED,
94 		.header.len = sizeof(uvcb),
95 		.paddr = paddr,
96 	};
97 
98 	if (uv_call(0, (u64)&uvcb))
99 		return -EINVAL;
100 	return 0;
101 }
102 EXPORT_SYMBOL_GPL(uv_pin_shared);
103 
104 /*
105  * Requests the Ultravisor to destroy a guest page and make it
106  * accessible to the host. The destroy clears the page instead of
107  * exporting.
108  *
109  * @paddr: Absolute host address of page to be destroyed
110  */
uv_destroy(unsigned long paddr)111 static int uv_destroy(unsigned long paddr)
112 {
113 	struct uv_cb_cfs uvcb = {
114 		.header.cmd = UVC_CMD_DESTR_SEC_STOR,
115 		.header.len = sizeof(uvcb),
116 		.paddr = paddr
117 	};
118 
119 	if (uv_call(0, (u64)&uvcb)) {
120 		/*
121 		 * Older firmware uses 107/d as an indication of a non secure
122 		 * page. Let us emulate the newer variant (no-op).
123 		 */
124 		if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
125 			return 0;
126 		return -EINVAL;
127 	}
128 	return 0;
129 }
130 
131 /*
132  * The caller must already hold a reference to the folio
133  */
uv_destroy_folio(struct folio * folio)134 int uv_destroy_folio(struct folio *folio)
135 {
136 	int rc;
137 
138 	/* See gmap_make_secure(): large folios cannot be secure */
139 	if (unlikely(folio_test_large(folio)))
140 		return 0;
141 
142 	folio_get(folio);
143 	rc = uv_destroy(folio_to_phys(folio));
144 	if (!rc)
145 		clear_bit(PG_arch_1, &folio->flags);
146 	folio_put(folio);
147 	return rc;
148 }
149 EXPORT_SYMBOL(uv_destroy_folio);
150 
151 /*
152  * The present PTE still indirectly holds a folio reference through the mapping.
153  */
uv_destroy_pte(pte_t pte)154 int uv_destroy_pte(pte_t pte)
155 {
156 	VM_WARN_ON(!pte_present(pte));
157 	return uv_destroy_folio(pfn_folio(pte_pfn(pte)));
158 }
159 
160 /*
161  * Requests the Ultravisor to encrypt a guest page and make it
162  * accessible to the host for paging (export).
163  *
164  * @paddr: Absolute host address of page to be exported
165  */
uv_convert_from_secure(unsigned long paddr)166 int uv_convert_from_secure(unsigned long paddr)
167 {
168 	struct uv_cb_cfs uvcb = {
169 		.header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
170 		.header.len = sizeof(uvcb),
171 		.paddr = paddr
172 	};
173 
174 	if (uv_call(0, (u64)&uvcb))
175 		return -EINVAL;
176 	return 0;
177 }
178 EXPORT_SYMBOL_GPL(uv_convert_from_secure);
179 
180 /*
181  * The caller must already hold a reference to the folio.
182  */
uv_convert_from_secure_folio(struct folio * folio)183 int uv_convert_from_secure_folio(struct folio *folio)
184 {
185 	int rc;
186 
187 	/* See gmap_make_secure(): large folios cannot be secure */
188 	if (unlikely(folio_test_large(folio)))
189 		return 0;
190 
191 	folio_get(folio);
192 	rc = uv_convert_from_secure(folio_to_phys(folio));
193 	if (!rc)
194 		clear_bit(PG_arch_1, &folio->flags);
195 	folio_put(folio);
196 	return rc;
197 }
198 EXPORT_SYMBOL_GPL(uv_convert_from_secure_folio);
199 
200 /*
201  * The present PTE still indirectly holds a folio reference through the mapping.
202  */
uv_convert_from_secure_pte(pte_t pte)203 int uv_convert_from_secure_pte(pte_t pte)
204 {
205 	VM_WARN_ON(!pte_present(pte));
206 	return uv_convert_from_secure_folio(pfn_folio(pte_pfn(pte)));
207 }
208 
209 /*
210  * Calculate the expected ref_count for a folio that would otherwise have no
211  * further pins. This was cribbed from similar functions in other places in
212  * the kernel, but with some slight modifications. We know that a secure
213  * folio can not be a large folio, for example.
214  */
expected_folio_refs(struct folio * folio)215 static int expected_folio_refs(struct folio *folio)
216 {
217 	int res;
218 
219 	res = folio_mapcount(folio);
220 	if (folio_test_swapcache(folio)) {
221 		res++;
222 	} else if (folio_mapping(folio)) {
223 		res++;
224 		if (folio->private)
225 			res++;
226 	}
227 	return res;
228 }
229 
230 /**
231  * make_folio_secure() - make a folio secure
232  * @folio: the folio to make secure
233  * @uvcb: the uvcb that describes the UVC to be used
234  *
235  * The folio @folio will be made secure if possible, @uvcb will be passed
236  * as-is to the UVC.
237  *
238  * Return: 0 on success;
239  *         -EBUSY if the folio is in writeback or has too many references;
240  *         -E2BIG if the folio is large;
241  *         -EAGAIN if the UVC needs to be attempted again;
242  *         -ENXIO if the address is not mapped;
243  *         -EINVAL if the UVC failed for other reasons.
244  *
245  * Context: The caller must hold exactly one extra reference on the folio
246  *          (it's the same logic as split_folio())
247  */
make_folio_secure(struct folio * folio,struct uv_cb_header * uvcb)248 int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
249 {
250 	int expected, cc = 0;
251 
252 	if (folio_test_large(folio))
253 		return -E2BIG;
254 	if (folio_test_writeback(folio))
255 		return -EBUSY;
256 	expected = expected_folio_refs(folio) + 1;
257 	if (!folio_ref_freeze(folio, expected))
258 		return -EBUSY;
259 	set_bit(PG_arch_1, &folio->flags);
260 	/*
261 	 * If the UVC does not succeed or fail immediately, we don't want to
262 	 * loop for long, or we might get stall notifications.
263 	 * On the other hand, this is a complex scenario and we are holding a lot of
264 	 * locks, so we can't easily sleep and reschedule. We try only once,
265 	 * and if the UVC returned busy or partial completion, we return
266 	 * -EAGAIN and we let the callers deal with it.
267 	 */
268 	cc = __uv_call(0, (u64)uvcb);
269 	folio_ref_unfreeze(folio, expected);
270 	/*
271 	 * Return -ENXIO if the folio was not mapped, -EINVAL for other errors.
272 	 * If busy or partially completed, return -EAGAIN.
273 	 */
274 	if (cc == UVC_CC_OK)
275 		return 0;
276 	else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL)
277 		return -EAGAIN;
278 	return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
279 }
280 EXPORT_SYMBOL_GPL(make_folio_secure);
281 
282 /*
283  * To be called with the folio locked or with an extra reference! This will
284  * prevent gmap_make_secure from touching the folio concurrently. Having 2
285  * parallel arch_make_folio_accessible is fine, as the UV calls will become a
286  * no-op if the folio is already exported.
287  */
arch_make_folio_accessible(struct folio * folio)288 int arch_make_folio_accessible(struct folio *folio)
289 {
290 	int rc = 0;
291 
292 	/* See gmap_make_secure(): large folios cannot be secure */
293 	if (unlikely(folio_test_large(folio)))
294 		return 0;
295 
296 	/*
297 	 * PG_arch_1 is used in 2 places:
298 	 * 1. for storage keys of hugetlb folios and KVM
299 	 * 2. As an indication that this small folio might be secure. This can
300 	 *    overindicate, e.g. we set the bit before calling
301 	 *    convert_to_secure.
302 	 * As secure pages are never large folios, both variants can co-exists.
303 	 */
304 	if (!test_bit(PG_arch_1, &folio->flags))
305 		return 0;
306 
307 	rc = uv_pin_shared(folio_to_phys(folio));
308 	if (!rc) {
309 		clear_bit(PG_arch_1, &folio->flags);
310 		return 0;
311 	}
312 
313 	rc = uv_convert_from_secure(folio_to_phys(folio));
314 	if (!rc) {
315 		clear_bit(PG_arch_1, &folio->flags);
316 		return 0;
317 	}
318 
319 	return rc;
320 }
321 EXPORT_SYMBOL_GPL(arch_make_folio_accessible);
322 
uv_query_facilities(struct kobject * kobj,struct kobj_attribute * attr,char * buf)323 static ssize_t uv_query_facilities(struct kobject *kobj,
324 				   struct kobj_attribute *attr, char *buf)
325 {
326 	return sysfs_emit(buf, "%lx\n%lx\n%lx\n%lx\n",
327 			  uv_info.inst_calls_list[0],
328 			  uv_info.inst_calls_list[1],
329 			  uv_info.inst_calls_list[2],
330 			  uv_info.inst_calls_list[3]);
331 }
332 
333 static struct kobj_attribute uv_query_facilities_attr =
334 	__ATTR(facilities, 0444, uv_query_facilities, NULL);
335 
uv_query_supp_se_hdr_ver(struct kobject * kobj,struct kobj_attribute * attr,char * buf)336 static ssize_t uv_query_supp_se_hdr_ver(struct kobject *kobj,
337 					struct kobj_attribute *attr, char *buf)
338 {
339 	return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_ver);
340 }
341 
342 static struct kobj_attribute uv_query_supp_se_hdr_ver_attr =
343 	__ATTR(supp_se_hdr_ver, 0444, uv_query_supp_se_hdr_ver, NULL);
344 
uv_query_supp_se_hdr_pcf(struct kobject * kobj,struct kobj_attribute * attr,char * buf)345 static ssize_t uv_query_supp_se_hdr_pcf(struct kobject *kobj,
346 					struct kobj_attribute *attr, char *buf)
347 {
348 	return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_pcf);
349 }
350 
351 static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr =
352 	__ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL);
353 
uv_query_dump_cpu_len(struct kobject * kobj,struct kobj_attribute * attr,char * buf)354 static ssize_t uv_query_dump_cpu_len(struct kobject *kobj,
355 				     struct kobj_attribute *attr, char *buf)
356 {
357 	return sysfs_emit(buf, "%lx\n", uv_info.guest_cpu_stor_len);
358 }
359 
360 static struct kobj_attribute uv_query_dump_cpu_len_attr =
361 	__ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL);
362 
uv_query_dump_storage_state_len(struct kobject * kobj,struct kobj_attribute * attr,char * buf)363 static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj,
364 					       struct kobj_attribute *attr, char *buf)
365 {
366 	return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_storage_state_len);
367 }
368 
369 static struct kobj_attribute uv_query_dump_storage_state_len_attr =
370 	__ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL);
371 
uv_query_dump_finalize_len(struct kobject * kobj,struct kobj_attribute * attr,char * buf)372 static ssize_t uv_query_dump_finalize_len(struct kobject *kobj,
373 					  struct kobj_attribute *attr, char *buf)
374 {
375 	return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_finalize_len);
376 }
377 
378 static struct kobj_attribute uv_query_dump_finalize_len_attr =
379 	__ATTR(dump_finalize_len, 0444, uv_query_dump_finalize_len, NULL);
380 
uv_query_feature_indications(struct kobject * kobj,struct kobj_attribute * attr,char * buf)381 static ssize_t uv_query_feature_indications(struct kobject *kobj,
382 					    struct kobj_attribute *attr, char *buf)
383 {
384 	return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
385 }
386 
387 static struct kobj_attribute uv_query_feature_indications_attr =
388 	__ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
389 
uv_query_max_guest_cpus(struct kobject * kobj,struct kobj_attribute * attr,char * buf)390 static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
391 				       struct kobj_attribute *attr, char *buf)
392 {
393 	return sysfs_emit(buf, "%d\n", uv_info.max_guest_cpu_id + 1);
394 }
395 
396 static struct kobj_attribute uv_query_max_guest_cpus_attr =
397 	__ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
398 
uv_query_max_guest_vms(struct kobject * kobj,struct kobj_attribute * attr,char * buf)399 static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
400 				      struct kobj_attribute *attr, char *buf)
401 {
402 	return sysfs_emit(buf, "%d\n", uv_info.max_num_sec_conf);
403 }
404 
405 static struct kobj_attribute uv_query_max_guest_vms_attr =
406 	__ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
407 
uv_query_max_guest_addr(struct kobject * kobj,struct kobj_attribute * attr,char * buf)408 static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
409 				       struct kobj_attribute *attr, char *buf)
410 {
411 	return sysfs_emit(buf, "%lx\n", uv_info.max_sec_stor_addr);
412 }
413 
414 static struct kobj_attribute uv_query_max_guest_addr_attr =
415 	__ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
416 
uv_query_supp_att_req_hdr_ver(struct kobject * kobj,struct kobj_attribute * attr,char * buf)417 static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj,
418 					     struct kobj_attribute *attr, char *buf)
419 {
420 	return sysfs_emit(buf, "%lx\n", uv_info.supp_att_req_hdr_ver);
421 }
422 
423 static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr =
424 	__ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL);
425 
uv_query_supp_att_pflags(struct kobject * kobj,struct kobj_attribute * attr,char * buf)426 static ssize_t uv_query_supp_att_pflags(struct kobject *kobj,
427 					struct kobj_attribute *attr, char *buf)
428 {
429 	return sysfs_emit(buf, "%lx\n", uv_info.supp_att_pflags);
430 }
431 
432 static struct kobj_attribute uv_query_supp_att_pflags_attr =
433 	__ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL);
434 
uv_query_supp_add_secret_req_ver(struct kobject * kobj,struct kobj_attribute * attr,char * buf)435 static ssize_t uv_query_supp_add_secret_req_ver(struct kobject *kobj,
436 						struct kobj_attribute *attr, char *buf)
437 {
438 	return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_req_ver);
439 }
440 
441 static struct kobj_attribute uv_query_supp_add_secret_req_ver_attr =
442 	__ATTR(supp_add_secret_req_ver, 0444, uv_query_supp_add_secret_req_ver, NULL);
443 
uv_query_supp_add_secret_pcf(struct kobject * kobj,struct kobj_attribute * attr,char * buf)444 static ssize_t uv_query_supp_add_secret_pcf(struct kobject *kobj,
445 					    struct kobj_attribute *attr, char *buf)
446 {
447 	return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_pcf);
448 }
449 
450 static struct kobj_attribute uv_query_supp_add_secret_pcf_attr =
451 	__ATTR(supp_add_secret_pcf, 0444, uv_query_supp_add_secret_pcf, NULL);
452 
uv_query_supp_secret_types(struct kobject * kobj,struct kobj_attribute * attr,char * buf)453 static ssize_t uv_query_supp_secret_types(struct kobject *kobj,
454 					  struct kobj_attribute *attr, char *buf)
455 {
456 	return sysfs_emit(buf, "%lx\n", uv_info.supp_secret_types);
457 }
458 
459 static struct kobj_attribute uv_query_supp_secret_types_attr =
460 	__ATTR(supp_secret_types, 0444, uv_query_supp_secret_types, NULL);
461 
uv_query_max_secrets(struct kobject * kobj,struct kobj_attribute * attr,char * buf)462 static ssize_t uv_query_max_secrets(struct kobject *kobj,
463 				    struct kobj_attribute *attr, char *buf)
464 {
465 	return sysfs_emit(buf, "%d\n",
466 			  uv_info.max_assoc_secrets + uv_info.max_retr_secrets);
467 }
468 
469 static struct kobj_attribute uv_query_max_secrets_attr =
470 	__ATTR(max_secrets, 0444, uv_query_max_secrets, NULL);
471 
uv_query_max_retr_secrets(struct kobject * kobj,struct kobj_attribute * attr,char * buf)472 static ssize_t uv_query_max_retr_secrets(struct kobject *kobj,
473 					 struct kobj_attribute *attr, char *buf)
474 {
475 	return sysfs_emit(buf, "%d\n", uv_info.max_retr_secrets);
476 }
477 
478 static struct kobj_attribute uv_query_max_retr_secrets_attr =
479 	__ATTR(max_retr_secrets, 0444, uv_query_max_retr_secrets, NULL);
480 
uv_query_max_assoc_secrets(struct kobject * kobj,struct kobj_attribute * attr,char * buf)481 static ssize_t uv_query_max_assoc_secrets(struct kobject *kobj,
482 					  struct kobj_attribute *attr,
483 					  char *buf)
484 {
485 	return sysfs_emit(buf, "%d\n", uv_info.max_assoc_secrets);
486 }
487 
488 static struct kobj_attribute uv_query_max_assoc_secrets_attr =
489 	__ATTR(max_assoc_secrets, 0444, uv_query_max_assoc_secrets, NULL);
490 
491 static struct attribute *uv_query_attrs[] = {
492 	&uv_query_facilities_attr.attr,
493 	&uv_query_feature_indications_attr.attr,
494 	&uv_query_max_guest_cpus_attr.attr,
495 	&uv_query_max_guest_vms_attr.attr,
496 	&uv_query_max_guest_addr_attr.attr,
497 	&uv_query_supp_se_hdr_ver_attr.attr,
498 	&uv_query_supp_se_hdr_pcf_attr.attr,
499 	&uv_query_dump_storage_state_len_attr.attr,
500 	&uv_query_dump_finalize_len_attr.attr,
501 	&uv_query_dump_cpu_len_attr.attr,
502 	&uv_query_supp_att_req_hdr_ver_attr.attr,
503 	&uv_query_supp_att_pflags_attr.attr,
504 	&uv_query_supp_add_secret_req_ver_attr.attr,
505 	&uv_query_supp_add_secret_pcf_attr.attr,
506 	&uv_query_supp_secret_types_attr.attr,
507 	&uv_query_max_secrets_attr.attr,
508 	&uv_query_max_assoc_secrets_attr.attr,
509 	&uv_query_max_retr_secrets_attr.attr,
510 	NULL,
511 };
512 
uv_query_keys(void)513 static inline struct uv_cb_query_keys uv_query_keys(void)
514 {
515 	struct uv_cb_query_keys uvcb = {
516 		.header.cmd = UVC_CMD_QUERY_KEYS,
517 		.header.len = sizeof(uvcb)
518 	};
519 
520 	uv_call(0, (uint64_t)&uvcb);
521 	return uvcb;
522 }
523 
emit_hash(struct uv_key_hash * hash,char * buf,int at)524 static inline ssize_t emit_hash(struct uv_key_hash *hash, char *buf, int at)
525 {
526 	return sysfs_emit_at(buf, at, "%016llx%016llx%016llx%016llx\n",
527 			    hash->dword[0], hash->dword[1], hash->dword[2], hash->dword[3]);
528 }
529 
uv_keys_host_key(struct kobject * kobj,struct kobj_attribute * attr,char * buf)530 static ssize_t uv_keys_host_key(struct kobject *kobj,
531 				struct kobj_attribute *attr, char *buf)
532 {
533 	struct uv_cb_query_keys uvcb = uv_query_keys();
534 
535 	return emit_hash(&uvcb.key_hashes[UVC_QUERY_KEYS_IDX_HK], buf, 0);
536 }
537 
538 static struct kobj_attribute uv_keys_host_key_attr =
539 	__ATTR(host_key, 0444, uv_keys_host_key, NULL);
540 
uv_keys_backup_host_key(struct kobject * kobj,struct kobj_attribute * attr,char * buf)541 static ssize_t uv_keys_backup_host_key(struct kobject *kobj,
542 				       struct kobj_attribute *attr, char *buf)
543 {
544 	struct uv_cb_query_keys uvcb = uv_query_keys();
545 
546 	return emit_hash(&uvcb.key_hashes[UVC_QUERY_KEYS_IDX_BACK_HK], buf, 0);
547 }
548 
549 static struct kobj_attribute uv_keys_backup_host_key_attr =
550 	__ATTR(backup_host_key, 0444, uv_keys_backup_host_key, NULL);
551 
uv_keys_all(struct kobject * kobj,struct kobj_attribute * attr,char * buf)552 static ssize_t uv_keys_all(struct kobject *kobj,
553 			   struct kobj_attribute *attr, char *buf)
554 {
555 	struct uv_cb_query_keys uvcb = uv_query_keys();
556 	ssize_t len = 0;
557 	int i;
558 
559 	for (i = 0; i < ARRAY_SIZE(uvcb.key_hashes); i++)
560 		len += emit_hash(uvcb.key_hashes + i, buf, len);
561 
562 	return len;
563 }
564 
565 static struct kobj_attribute uv_keys_all_attr =
566 	__ATTR(all, 0444, uv_keys_all, NULL);
567 
568 static struct attribute_group uv_query_attr_group = {
569 	.attrs = uv_query_attrs,
570 };
571 
572 static struct attribute *uv_keys_attrs[] = {
573 	&uv_keys_host_key_attr.attr,
574 	&uv_keys_backup_host_key_attr.attr,
575 	&uv_keys_all_attr.attr,
576 	NULL,
577 };
578 
579 static struct attribute_group uv_keys_attr_group = {
580 	.attrs = uv_keys_attrs,
581 };
582 
uv_is_prot_virt_guest(struct kobject * kobj,struct kobj_attribute * attr,char * buf)583 static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
584 				     struct kobj_attribute *attr, char *buf)
585 {
586 	return sysfs_emit(buf, "%d\n", prot_virt_guest);
587 }
588 
uv_is_prot_virt_host(struct kobject * kobj,struct kobj_attribute * attr,char * buf)589 static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
590 				    struct kobj_attribute *attr, char *buf)
591 {
592 	return sysfs_emit(buf, "%d\n", prot_virt_host);
593 }
594 
595 static struct kobj_attribute uv_prot_virt_guest =
596 	__ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL);
597 
598 static struct kobj_attribute uv_prot_virt_host =
599 	__ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL);
600 
601 static const struct attribute *uv_prot_virt_attrs[] = {
602 	&uv_prot_virt_guest.attr,
603 	&uv_prot_virt_host.attr,
604 	NULL,
605 };
606 
607 static struct kset *uv_query_kset;
608 static struct kset *uv_keys_kset;
609 static struct kobject *uv_kobj;
610 
uv_sysfs_dir_init(const struct attribute_group * grp,struct kset ** uv_dir_kset,const char * name)611 static int __init uv_sysfs_dir_init(const struct attribute_group *grp,
612 				    struct kset **uv_dir_kset, const char *name)
613 {
614 	struct kset *kset;
615 	int rc;
616 
617 	kset = kset_create_and_add(name, NULL, uv_kobj);
618 	if (!kset)
619 		return -ENOMEM;
620 	*uv_dir_kset = kset;
621 
622 	rc = sysfs_create_group(&kset->kobj, grp);
623 	if (rc)
624 		kset_unregister(kset);
625 	return rc;
626 }
627 
uv_sysfs_init(void)628 static int __init uv_sysfs_init(void)
629 {
630 	int rc = -ENOMEM;
631 
632 	if (!test_facility(158))
633 		return 0;
634 
635 	uv_kobj = kobject_create_and_add("uv", firmware_kobj);
636 	if (!uv_kobj)
637 		return -ENOMEM;
638 
639 	rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs);
640 	if (rc)
641 		goto out_kobj;
642 
643 	rc = uv_sysfs_dir_init(&uv_query_attr_group, &uv_query_kset, "query");
644 	if (rc)
645 		goto out_ind_files;
646 
647 	/* Get installed key hashes if available, ignore any errors */
648 	if (test_bit_inv(BIT_UVC_CMD_QUERY_KEYS, uv_info.inst_calls_list))
649 		uv_sysfs_dir_init(&uv_keys_attr_group, &uv_keys_kset, "keys");
650 
651 	return 0;
652 
653 out_ind_files:
654 	sysfs_remove_files(uv_kobj, uv_prot_virt_attrs);
655 out_kobj:
656 	kobject_del(uv_kobj);
657 	kobject_put(uv_kobj);
658 	return rc;
659 }
660 device_initcall(uv_sysfs_init);
661 
662 /*
663  * Find the secret with the secret_id in the provided list.
664  *
665  * Context: might sleep.
666  */
find_secret_in_page(const u8 secret_id[UV_SECRET_ID_LEN],const struct uv_secret_list * list,struct uv_secret_list_item_hdr * secret)667 static int find_secret_in_page(const u8 secret_id[UV_SECRET_ID_LEN],
668 			       const struct uv_secret_list *list,
669 			       struct uv_secret_list_item_hdr *secret)
670 {
671 	u16 i;
672 
673 	for (i = 0; i < list->total_num_secrets; i++) {
674 		if (memcmp(secret_id, list->secrets[i].id, UV_SECRET_ID_LEN) == 0) {
675 			*secret = list->secrets[i].hdr;
676 			return 0;
677 		}
678 	}
679 	return -ENOENT;
680 }
681 
682 /*
683  * Do the actual search for `uv_get_secret_metadata`.
684  *
685  * Context: might sleep.
686  */
find_secret(const u8 secret_id[UV_SECRET_ID_LEN],struct uv_secret_list * list,struct uv_secret_list_item_hdr * secret)687 static int find_secret(const u8 secret_id[UV_SECRET_ID_LEN],
688 		       struct uv_secret_list *list,
689 		       struct uv_secret_list_item_hdr *secret)
690 {
691 	u16 start_idx = 0;
692 	u16 list_rc;
693 	int ret;
694 
695 	do {
696 		uv_list_secrets(list, start_idx, &list_rc, NULL);
697 		if (list_rc != UVC_RC_EXECUTED && list_rc != UVC_RC_MORE_DATA) {
698 			if (list_rc == UVC_RC_INV_CMD)
699 				return -ENODEV;
700 			else
701 				return -EIO;
702 		}
703 		ret = find_secret_in_page(secret_id, list, secret);
704 		if (ret == 0)
705 			return ret;
706 		start_idx = list->next_secret_idx;
707 	} while (list_rc == UVC_RC_MORE_DATA && start_idx < list->next_secret_idx);
708 
709 	return -ENOENT;
710 }
711 
712 /**
713  * uv_get_secret_metadata() - get secret metadata for a given secret id.
714  * @secret_id: search pattern.
715  * @secret: output data, containing the secret's metadata.
716  *
717  * Search for a secret with the given secret_id in the Ultravisor secret store.
718  *
719  * Context: might sleep.
720  *
721  * Return:
722  * * %0:	- Found entry; secret->idx and secret->type are valid.
723  * * %ENOENT	- No entry found.
724  * * %ENODEV:	- Not supported: UV not available or command not available.
725  * * %EIO:	- Other unexpected UV error.
726  */
uv_get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN],struct uv_secret_list_item_hdr * secret)727 int uv_get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN],
728 			   struct uv_secret_list_item_hdr *secret)
729 {
730 	struct uv_secret_list *buf;
731 	int rc;
732 
733 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
734 	if (!buf)
735 		return -ENOMEM;
736 	rc = find_secret(secret_id, buf, secret);
737 	kfree(buf);
738 	return rc;
739 }
740 EXPORT_SYMBOL_GPL(uv_get_secret_metadata);
741 
742 /**
743  * uv_retrieve_secret() - get the secret value for the secret index.
744  * @secret_idx: Secret index for which the secret should be retrieved.
745  * @buf: Buffer to store retrieved secret.
746  * @buf_size: Size of the buffer. The correct buffer size is reported as part of
747  * the result from `uv_get_secret_metadata`.
748  *
749  * Calls the Retrieve Secret UVC and translates the UV return code into an errno.
750  *
751  * Context: might sleep.
752  *
753  * Return:
754  * * %0		- Entry found; buffer contains a valid secret.
755  * * %ENOENT:	- No entry found or secret at the index is non-retrievable.
756  * * %ENODEV:	- Not supported: UV not available or command not available.
757  * * %EINVAL:	- Buffer too small for content.
758  * * %EIO:	- Other unexpected UV error.
759  */
uv_retrieve_secret(u16 secret_idx,u8 * buf,size_t buf_size)760 int uv_retrieve_secret(u16 secret_idx, u8 *buf, size_t buf_size)
761 {
762 	struct uv_cb_retr_secr uvcb = {
763 		.header.len = sizeof(uvcb),
764 		.header.cmd = UVC_CMD_RETR_SECRET,
765 		.secret_idx = secret_idx,
766 		.buf_addr = (u64)buf,
767 		.buf_size = buf_size,
768 	};
769 
770 	uv_call_sched(0, (u64)&uvcb);
771 
772 	switch (uvcb.header.rc) {
773 	case UVC_RC_EXECUTED:
774 		return 0;
775 	case UVC_RC_INV_CMD:
776 		return -ENODEV;
777 	case UVC_RC_RETR_SECR_STORE_EMPTY:
778 	case UVC_RC_RETR_SECR_INV_SECRET:
779 	case UVC_RC_RETR_SECR_INV_IDX:
780 		return -ENOENT;
781 	case UVC_RC_RETR_SECR_BUF_SMALL:
782 		return -EINVAL;
783 	default:
784 		return -EIO;
785 	}
786 }
787 EXPORT_SYMBOL_GPL(uv_retrieve_secret);
788