xref: /linux/arch/s390/kernel/uv.c (revision f45f8f0ed4c6d3a9be27ff27347408e1c1bbb364)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common Ultravisor functions and initialization
4  *
5  * Copyright IBM Corp. 2019, 2024
6  */
7 #define KMSG_COMPONENT "prot_virt"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/sizes.h>
13 #include <linux/bitmap.h>
14 #include <linux/memblock.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <linux/pagewalk.h>
18 #include <asm/facility.h>
19 #include <asm/sections.h>
20 #include <asm/uv.h>
21 
22 /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
23 int __bootdata_preserved(prot_virt_guest);
24 EXPORT_SYMBOL(prot_virt_guest);
25 
26 /*
27  * uv_info contains both host and guest information but it's currently only
28  * expected to be used within modules if it's the KVM module or for
29  * any PV guest module.
30  *
31  * The kernel itself will write these values once in uv_query_info()
32  * and then make some of them readable via a sysfs interface.
33  */
34 struct uv_info __bootdata_preserved(uv_info);
35 EXPORT_SYMBOL(uv_info);
36 
37 int __bootdata_preserved(prot_virt_host);
38 EXPORT_SYMBOL(prot_virt_host);
39 
uv_init(phys_addr_t stor_base,unsigned long stor_len)40 static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len)
41 {
42 	struct uv_cb_init uvcb = {
43 		.header.cmd = UVC_CMD_INIT_UV,
44 		.header.len = sizeof(uvcb),
45 		.stor_origin = stor_base,
46 		.stor_len = stor_len,
47 	};
48 
49 	if (uv_call(0, (uint64_t)&uvcb)) {
50 		pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
51 		       uvcb.header.rc, uvcb.header.rrc);
52 		return -1;
53 	}
54 	return 0;
55 }
56 
setup_uv(void)57 void __init setup_uv(void)
58 {
59 	void *uv_stor_base;
60 
61 	if (!is_prot_virt_host())
62 		return;
63 
64 	uv_stor_base = memblock_alloc_try_nid(
65 		uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
66 		MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
67 	if (!uv_stor_base) {
68 		pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
69 			uv_info.uv_base_stor_len);
70 		goto fail;
71 	}
72 
73 	if (uv_init(__pa(uv_stor_base), uv_info.uv_base_stor_len)) {
74 		memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
75 		goto fail;
76 	}
77 
78 	pr_info("Reserving %luMB as ultravisor base storage\n",
79 		uv_info.uv_base_stor_len >> 20);
80 	return;
81 fail:
82 	pr_info("Disabling support for protected virtualization");
83 	prot_virt_host = 0;
84 }
85 
86 /*
87  * Requests the Ultravisor to pin the page in the shared state. This will
88  * cause an intercept when the guest attempts to unshare the pinned page.
89  */
uv_pin_shared(unsigned long paddr)90 int uv_pin_shared(unsigned long paddr)
91 {
92 	struct uv_cb_cfs uvcb = {
93 		.header.cmd = UVC_CMD_PIN_PAGE_SHARED,
94 		.header.len = sizeof(uvcb),
95 		.paddr = paddr,
96 	};
97 
98 	if (uv_call(0, (u64)&uvcb))
99 		return -EINVAL;
100 	return 0;
101 }
102 EXPORT_SYMBOL_GPL(uv_pin_shared);
103 
104 /*
105  * Requests the Ultravisor to destroy a guest page and make it
106  * accessible to the host. The destroy clears the page instead of
107  * exporting.
108  *
109  * @paddr: Absolute host address of page to be destroyed
110  */
uv_destroy(unsigned long paddr)111 static int uv_destroy(unsigned long paddr)
112 {
113 	struct uv_cb_cfs uvcb = {
114 		.header.cmd = UVC_CMD_DESTR_SEC_STOR,
115 		.header.len = sizeof(uvcb),
116 		.paddr = paddr
117 	};
118 
119 	if (uv_call(0, (u64)&uvcb)) {
120 		/*
121 		 * Older firmware uses 107/d as an indication of a non secure
122 		 * page. Let us emulate the newer variant (no-op).
123 		 */
124 		if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
125 			return 0;
126 		return -EINVAL;
127 	}
128 	return 0;
129 }
130 
131 /*
132  * The caller must already hold a reference to the folio
133  */
uv_destroy_folio(struct folio * folio)134 int uv_destroy_folio(struct folio *folio)
135 {
136 	int rc;
137 
138 	/* See gmap_make_secure(): large folios cannot be secure */
139 	if (unlikely(folio_test_large(folio)))
140 		return 0;
141 
142 	folio_get(folio);
143 	rc = uv_destroy(folio_to_phys(folio));
144 	if (!rc)
145 		clear_bit(PG_arch_1, &folio->flags);
146 	folio_put(folio);
147 	return rc;
148 }
149 EXPORT_SYMBOL(uv_destroy_folio);
150 
151 /*
152  * The present PTE still indirectly holds a folio reference through the mapping.
153  */
uv_destroy_pte(pte_t pte)154 int uv_destroy_pte(pte_t pte)
155 {
156 	VM_WARN_ON(!pte_present(pte));
157 	return uv_destroy_folio(pfn_folio(pte_pfn(pte)));
158 }
159 
160 /*
161  * Requests the Ultravisor to encrypt a guest page and make it
162  * accessible to the host for paging (export).
163  *
164  * @paddr: Absolute host address of page to be exported
165  */
uv_convert_from_secure(unsigned long paddr)166 int uv_convert_from_secure(unsigned long paddr)
167 {
168 	struct uv_cb_cfs uvcb = {
169 		.header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
170 		.header.len = sizeof(uvcb),
171 		.paddr = paddr
172 	};
173 
174 	if (uv_call(0, (u64)&uvcb))
175 		return -EINVAL;
176 	return 0;
177 }
178 EXPORT_SYMBOL_GPL(uv_convert_from_secure);
179 
180 /*
181  * The caller must already hold a reference to the folio.
182  */
uv_convert_from_secure_folio(struct folio * folio)183 int uv_convert_from_secure_folio(struct folio *folio)
184 {
185 	int rc;
186 
187 	/* See gmap_make_secure(): large folios cannot be secure */
188 	if (unlikely(folio_test_large(folio)))
189 		return 0;
190 
191 	folio_get(folio);
192 	rc = uv_convert_from_secure(folio_to_phys(folio));
193 	if (!rc)
194 		clear_bit(PG_arch_1, &folio->flags);
195 	folio_put(folio);
196 	return rc;
197 }
198 EXPORT_SYMBOL_GPL(uv_convert_from_secure_folio);
199 
200 /*
201  * The present PTE still indirectly holds a folio reference through the mapping.
202  */
uv_convert_from_secure_pte(pte_t pte)203 int uv_convert_from_secure_pte(pte_t pte)
204 {
205 	VM_WARN_ON(!pte_present(pte));
206 	return uv_convert_from_secure_folio(pfn_folio(pte_pfn(pte)));
207 }
208 
209 /**
210  * should_export_before_import - Determine whether an export is needed
211  * before an import-like operation
212  * @uvcb: the Ultravisor control block of the UVC to be performed
213  * @mm: the mm of the process
214  *
215  * Returns whether an export is needed before every import-like operation.
216  * This is needed for shared pages, which don't trigger a secure storage
217  * exception when accessed from a different guest.
218  *
219  * Although considered as one, the Unpin Page UVC is not an actual import,
220  * so it is not affected.
221  *
222  * No export is needed also when there is only one protected VM, because the
223  * page cannot belong to the wrong VM in that case (there is no "other VM"
224  * it can belong to).
225  *
226  * Return: true if an export is needed before every import, otherwise false.
227  */
should_export_before_import(struct uv_cb_header * uvcb,struct mm_struct * mm)228 static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
229 {
230 	/*
231 	 * The misc feature indicates, among other things, that importing a
232 	 * shared page from a different protected VM will automatically also
233 	 * transfer its ownership.
234 	 */
235 	if (uv_has_feature(BIT_UV_FEAT_MISC))
236 		return false;
237 	if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
238 		return false;
239 	return atomic_read(&mm->context.protected_count) > 1;
240 }
241 
242 /*
243  * Calculate the expected ref_count for a folio that would otherwise have no
244  * further pins. This was cribbed from similar functions in other places in
245  * the kernel, but with some slight modifications. We know that a secure
246  * folio can not be a large folio, for example.
247  */
expected_folio_refs(struct folio * folio)248 static int expected_folio_refs(struct folio *folio)
249 {
250 	int res;
251 
252 	res = folio_mapcount(folio);
253 	if (folio_test_swapcache(folio)) {
254 		res++;
255 	} else if (folio_mapping(folio)) {
256 		res++;
257 		if (folio->private)
258 			res++;
259 	}
260 	return res;
261 }
262 
263 /**
264  * __make_folio_secure() - make a folio secure
265  * @folio: the folio to make secure
266  * @uvcb: the uvcb that describes the UVC to be used
267  *
268  * The folio @folio will be made secure if possible, @uvcb will be passed
269  * as-is to the UVC.
270  *
271  * Return: 0 on success;
272  *         -EBUSY if the folio is in writeback or has too many references;
273  *         -EAGAIN if the UVC needs to be attempted again;
274  *         -ENXIO if the address is not mapped;
275  *         -EINVAL if the UVC failed for other reasons.
276  *
277  * Context: The caller must hold exactly one extra reference on the folio
278  *          (it's the same logic as split_folio()), and the folio must be
279  *          locked.
280  */
__make_folio_secure(struct folio * folio,struct uv_cb_header * uvcb)281 static int __make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
282 {
283 	int expected, cc = 0;
284 
285 	if (folio_test_writeback(folio))
286 		return -EBUSY;
287 	expected = expected_folio_refs(folio) + 1;
288 	if (!folio_ref_freeze(folio, expected))
289 		return -EBUSY;
290 	set_bit(PG_arch_1, &folio->flags);
291 	/*
292 	 * If the UVC does not succeed or fail immediately, we don't want to
293 	 * loop for long, or we might get stall notifications.
294 	 * On the other hand, this is a complex scenario and we are holding a lot of
295 	 * locks, so we can't easily sleep and reschedule. We try only once,
296 	 * and if the UVC returned busy or partial completion, we return
297 	 * -EAGAIN and we let the callers deal with it.
298 	 */
299 	cc = __uv_call(0, (u64)uvcb);
300 	folio_ref_unfreeze(folio, expected);
301 	/*
302 	 * Return -ENXIO if the folio was not mapped, -EINVAL for other errors.
303 	 * If busy or partially completed, return -EAGAIN.
304 	 */
305 	if (cc == UVC_CC_OK)
306 		return 0;
307 	else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL)
308 		return -EAGAIN;
309 	return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
310 }
311 
make_folio_secure(struct mm_struct * mm,struct folio * folio,struct uv_cb_header * uvcb)312 static int make_folio_secure(struct mm_struct *mm, struct folio *folio, struct uv_cb_header *uvcb)
313 {
314 	int rc;
315 
316 	if (!folio_trylock(folio))
317 		return -EAGAIN;
318 	if (should_export_before_import(uvcb, mm))
319 		uv_convert_from_secure(folio_to_phys(folio));
320 	rc = __make_folio_secure(folio, uvcb);
321 	folio_unlock(folio);
322 
323 	return rc;
324 }
325 
326 /**
327  * s390_wiggle_split_folio() - try to drain extra references to a folio and optionally split.
328  * @mm:    the mm containing the folio to work on
329  * @folio: the folio
330  * @split: whether to split a large folio
331  *
332  * Context: Must be called while holding an extra reference to the folio;
333  *          the mm lock should not be held.
334  * Return: 0 if the folio was split successfully;
335  *         -EAGAIN if the folio was not split successfully but another attempt
336  *                 can be made, or if @split was set to false;
337  *         -EINVAL in case of other errors. See split_folio().
338  */
s390_wiggle_split_folio(struct mm_struct * mm,struct folio * folio,bool split)339 static int s390_wiggle_split_folio(struct mm_struct *mm, struct folio *folio, bool split)
340 {
341 	int rc;
342 
343 	lockdep_assert_not_held(&mm->mmap_lock);
344 	folio_wait_writeback(folio);
345 	lru_add_drain_all();
346 	if (split) {
347 		folio_lock(folio);
348 		rc = split_folio(folio);
349 		folio_unlock(folio);
350 
351 		if (rc != -EBUSY)
352 			return rc;
353 	}
354 	return -EAGAIN;
355 }
356 
make_hva_secure(struct mm_struct * mm,unsigned long hva,struct uv_cb_header * uvcb)357 int make_hva_secure(struct mm_struct *mm, unsigned long hva, struct uv_cb_header *uvcb)
358 {
359 	struct vm_area_struct *vma;
360 	struct folio_walk fw;
361 	struct folio *folio;
362 	int rc;
363 
364 	mmap_read_lock(mm);
365 	vma = vma_lookup(mm, hva);
366 	if (!vma) {
367 		mmap_read_unlock(mm);
368 		return -EFAULT;
369 	}
370 	folio = folio_walk_start(&fw, vma, hva, 0);
371 	if (!folio) {
372 		mmap_read_unlock(mm);
373 		return -ENXIO;
374 	}
375 
376 	folio_get(folio);
377 	/*
378 	 * Secure pages cannot be huge and userspace should not combine both.
379 	 * In case userspace does it anyway this will result in an -EFAULT for
380 	 * the unpack. The guest is thus never reaching secure mode.
381 	 * If userspace plays dirty tricks and decides to map huge pages at a
382 	 * later point in time, it will receive a segmentation fault or
383 	 * KVM_RUN will return -EFAULT.
384 	 */
385 	if (folio_test_hugetlb(folio))
386 		rc = -EFAULT;
387 	else if (folio_test_large(folio))
388 		rc = -E2BIG;
389 	else if (!pte_write(fw.pte) || (pte_val(fw.pte) & _PAGE_INVALID))
390 		rc = -ENXIO;
391 	else
392 		rc = make_folio_secure(mm, folio, uvcb);
393 	folio_walk_end(&fw, vma);
394 	mmap_read_unlock(mm);
395 
396 	if (rc == -E2BIG || rc == -EBUSY)
397 		rc = s390_wiggle_split_folio(mm, folio, rc == -E2BIG);
398 	folio_put(folio);
399 
400 	return rc;
401 }
402 EXPORT_SYMBOL_GPL(make_hva_secure);
403 
404 /*
405  * To be called with the folio locked or with an extra reference! This will
406  * prevent gmap_make_secure from touching the folio concurrently. Having 2
407  * parallel arch_make_folio_accessible is fine, as the UV calls will become a
408  * no-op if the folio is already exported.
409  */
arch_make_folio_accessible(struct folio * folio)410 int arch_make_folio_accessible(struct folio *folio)
411 {
412 	int rc = 0;
413 
414 	/* See gmap_make_secure(): large folios cannot be secure */
415 	if (unlikely(folio_test_large(folio)))
416 		return 0;
417 
418 	/*
419 	 * PG_arch_1 is used in 2 places:
420 	 * 1. for storage keys of hugetlb folios and KVM
421 	 * 2. As an indication that this small folio might be secure. This can
422 	 *    overindicate, e.g. we set the bit before calling
423 	 *    convert_to_secure.
424 	 * As secure pages are never large folios, both variants can co-exists.
425 	 */
426 	if (!test_bit(PG_arch_1, &folio->flags))
427 		return 0;
428 
429 	rc = uv_pin_shared(folio_to_phys(folio));
430 	if (!rc) {
431 		clear_bit(PG_arch_1, &folio->flags);
432 		return 0;
433 	}
434 
435 	rc = uv_convert_from_secure(folio_to_phys(folio));
436 	if (!rc) {
437 		clear_bit(PG_arch_1, &folio->flags);
438 		return 0;
439 	}
440 
441 	return rc;
442 }
443 EXPORT_SYMBOL_GPL(arch_make_folio_accessible);
444 
uv_query_facilities(struct kobject * kobj,struct kobj_attribute * attr,char * buf)445 static ssize_t uv_query_facilities(struct kobject *kobj,
446 				   struct kobj_attribute *attr, char *buf)
447 {
448 	return sysfs_emit(buf, "%lx\n%lx\n%lx\n%lx\n",
449 			  uv_info.inst_calls_list[0],
450 			  uv_info.inst_calls_list[1],
451 			  uv_info.inst_calls_list[2],
452 			  uv_info.inst_calls_list[3]);
453 }
454 
455 static struct kobj_attribute uv_query_facilities_attr =
456 	__ATTR(facilities, 0444, uv_query_facilities, NULL);
457 
uv_query_supp_se_hdr_ver(struct kobject * kobj,struct kobj_attribute * attr,char * buf)458 static ssize_t uv_query_supp_se_hdr_ver(struct kobject *kobj,
459 					struct kobj_attribute *attr, char *buf)
460 {
461 	return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_ver);
462 }
463 
464 static struct kobj_attribute uv_query_supp_se_hdr_ver_attr =
465 	__ATTR(supp_se_hdr_ver, 0444, uv_query_supp_se_hdr_ver, NULL);
466 
uv_query_supp_se_hdr_pcf(struct kobject * kobj,struct kobj_attribute * attr,char * buf)467 static ssize_t uv_query_supp_se_hdr_pcf(struct kobject *kobj,
468 					struct kobj_attribute *attr, char *buf)
469 {
470 	return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_pcf);
471 }
472 
473 static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr =
474 	__ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL);
475 
uv_query_dump_cpu_len(struct kobject * kobj,struct kobj_attribute * attr,char * buf)476 static ssize_t uv_query_dump_cpu_len(struct kobject *kobj,
477 				     struct kobj_attribute *attr, char *buf)
478 {
479 	return sysfs_emit(buf, "%lx\n", uv_info.guest_cpu_stor_len);
480 }
481 
482 static struct kobj_attribute uv_query_dump_cpu_len_attr =
483 	__ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL);
484 
uv_query_dump_storage_state_len(struct kobject * kobj,struct kobj_attribute * attr,char * buf)485 static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj,
486 					       struct kobj_attribute *attr, char *buf)
487 {
488 	return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_storage_state_len);
489 }
490 
491 static struct kobj_attribute uv_query_dump_storage_state_len_attr =
492 	__ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL);
493 
uv_query_dump_finalize_len(struct kobject * kobj,struct kobj_attribute * attr,char * buf)494 static ssize_t uv_query_dump_finalize_len(struct kobject *kobj,
495 					  struct kobj_attribute *attr, char *buf)
496 {
497 	return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_finalize_len);
498 }
499 
500 static struct kobj_attribute uv_query_dump_finalize_len_attr =
501 	__ATTR(dump_finalize_len, 0444, uv_query_dump_finalize_len, NULL);
502 
uv_query_feature_indications(struct kobject * kobj,struct kobj_attribute * attr,char * buf)503 static ssize_t uv_query_feature_indications(struct kobject *kobj,
504 					    struct kobj_attribute *attr, char *buf)
505 {
506 	return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
507 }
508 
509 static struct kobj_attribute uv_query_feature_indications_attr =
510 	__ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
511 
uv_query_max_guest_cpus(struct kobject * kobj,struct kobj_attribute * attr,char * buf)512 static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
513 				       struct kobj_attribute *attr, char *buf)
514 {
515 	return sysfs_emit(buf, "%d\n", uv_info.max_guest_cpu_id + 1);
516 }
517 
518 static struct kobj_attribute uv_query_max_guest_cpus_attr =
519 	__ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
520 
uv_query_max_guest_vms(struct kobject * kobj,struct kobj_attribute * attr,char * buf)521 static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
522 				      struct kobj_attribute *attr, char *buf)
523 {
524 	return sysfs_emit(buf, "%d\n", uv_info.max_num_sec_conf);
525 }
526 
527 static struct kobj_attribute uv_query_max_guest_vms_attr =
528 	__ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
529 
uv_query_max_guest_addr(struct kobject * kobj,struct kobj_attribute * attr,char * buf)530 static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
531 				       struct kobj_attribute *attr, char *buf)
532 {
533 	return sysfs_emit(buf, "%lx\n", uv_info.max_sec_stor_addr);
534 }
535 
536 static struct kobj_attribute uv_query_max_guest_addr_attr =
537 	__ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
538 
uv_query_supp_att_req_hdr_ver(struct kobject * kobj,struct kobj_attribute * attr,char * buf)539 static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj,
540 					     struct kobj_attribute *attr, char *buf)
541 {
542 	return sysfs_emit(buf, "%lx\n", uv_info.supp_att_req_hdr_ver);
543 }
544 
545 static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr =
546 	__ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL);
547 
uv_query_supp_att_pflags(struct kobject * kobj,struct kobj_attribute * attr,char * buf)548 static ssize_t uv_query_supp_att_pflags(struct kobject *kobj,
549 					struct kobj_attribute *attr, char *buf)
550 {
551 	return sysfs_emit(buf, "%lx\n", uv_info.supp_att_pflags);
552 }
553 
554 static struct kobj_attribute uv_query_supp_att_pflags_attr =
555 	__ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL);
556 
uv_query_supp_add_secret_req_ver(struct kobject * kobj,struct kobj_attribute * attr,char * buf)557 static ssize_t uv_query_supp_add_secret_req_ver(struct kobject *kobj,
558 						struct kobj_attribute *attr, char *buf)
559 {
560 	return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_req_ver);
561 }
562 
563 static struct kobj_attribute uv_query_supp_add_secret_req_ver_attr =
564 	__ATTR(supp_add_secret_req_ver, 0444, uv_query_supp_add_secret_req_ver, NULL);
565 
uv_query_supp_add_secret_pcf(struct kobject * kobj,struct kobj_attribute * attr,char * buf)566 static ssize_t uv_query_supp_add_secret_pcf(struct kobject *kobj,
567 					    struct kobj_attribute *attr, char *buf)
568 {
569 	return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_pcf);
570 }
571 
572 static struct kobj_attribute uv_query_supp_add_secret_pcf_attr =
573 	__ATTR(supp_add_secret_pcf, 0444, uv_query_supp_add_secret_pcf, NULL);
574 
uv_query_supp_secret_types(struct kobject * kobj,struct kobj_attribute * attr,char * buf)575 static ssize_t uv_query_supp_secret_types(struct kobject *kobj,
576 					  struct kobj_attribute *attr, char *buf)
577 {
578 	return sysfs_emit(buf, "%lx\n", uv_info.supp_secret_types);
579 }
580 
581 static struct kobj_attribute uv_query_supp_secret_types_attr =
582 	__ATTR(supp_secret_types, 0444, uv_query_supp_secret_types, NULL);
583 
uv_query_max_secrets(struct kobject * kobj,struct kobj_attribute * attr,char * buf)584 static ssize_t uv_query_max_secrets(struct kobject *kobj,
585 				    struct kobj_attribute *attr, char *buf)
586 {
587 	return sysfs_emit(buf, "%d\n",
588 			  uv_info.max_assoc_secrets + uv_info.max_retr_secrets);
589 }
590 
591 static struct kobj_attribute uv_query_max_secrets_attr =
592 	__ATTR(max_secrets, 0444, uv_query_max_secrets, NULL);
593 
uv_query_max_retr_secrets(struct kobject * kobj,struct kobj_attribute * attr,char * buf)594 static ssize_t uv_query_max_retr_secrets(struct kobject *kobj,
595 					 struct kobj_attribute *attr, char *buf)
596 {
597 	return sysfs_emit(buf, "%d\n", uv_info.max_retr_secrets);
598 }
599 
600 static struct kobj_attribute uv_query_max_retr_secrets_attr =
601 	__ATTR(max_retr_secrets, 0444, uv_query_max_retr_secrets, NULL);
602 
uv_query_max_assoc_secrets(struct kobject * kobj,struct kobj_attribute * attr,char * buf)603 static ssize_t uv_query_max_assoc_secrets(struct kobject *kobj,
604 					  struct kobj_attribute *attr,
605 					  char *buf)
606 {
607 	return sysfs_emit(buf, "%d\n", uv_info.max_assoc_secrets);
608 }
609 
610 static struct kobj_attribute uv_query_max_assoc_secrets_attr =
611 	__ATTR(max_assoc_secrets, 0444, uv_query_max_assoc_secrets, NULL);
612 
613 static struct attribute *uv_query_attrs[] = {
614 	&uv_query_facilities_attr.attr,
615 	&uv_query_feature_indications_attr.attr,
616 	&uv_query_max_guest_cpus_attr.attr,
617 	&uv_query_max_guest_vms_attr.attr,
618 	&uv_query_max_guest_addr_attr.attr,
619 	&uv_query_supp_se_hdr_ver_attr.attr,
620 	&uv_query_supp_se_hdr_pcf_attr.attr,
621 	&uv_query_dump_storage_state_len_attr.attr,
622 	&uv_query_dump_finalize_len_attr.attr,
623 	&uv_query_dump_cpu_len_attr.attr,
624 	&uv_query_supp_att_req_hdr_ver_attr.attr,
625 	&uv_query_supp_att_pflags_attr.attr,
626 	&uv_query_supp_add_secret_req_ver_attr.attr,
627 	&uv_query_supp_add_secret_pcf_attr.attr,
628 	&uv_query_supp_secret_types_attr.attr,
629 	&uv_query_max_secrets_attr.attr,
630 	&uv_query_max_assoc_secrets_attr.attr,
631 	&uv_query_max_retr_secrets_attr.attr,
632 	NULL,
633 };
634 
uv_query_keys(void)635 static inline struct uv_cb_query_keys uv_query_keys(void)
636 {
637 	struct uv_cb_query_keys uvcb = {
638 		.header.cmd = UVC_CMD_QUERY_KEYS,
639 		.header.len = sizeof(uvcb)
640 	};
641 
642 	uv_call(0, (uint64_t)&uvcb);
643 	return uvcb;
644 }
645 
emit_hash(struct uv_key_hash * hash,char * buf,int at)646 static inline ssize_t emit_hash(struct uv_key_hash *hash, char *buf, int at)
647 {
648 	return sysfs_emit_at(buf, at, "%016llx%016llx%016llx%016llx\n",
649 			    hash->dword[0], hash->dword[1], hash->dword[2], hash->dword[3]);
650 }
651 
uv_keys_host_key(struct kobject * kobj,struct kobj_attribute * attr,char * buf)652 static ssize_t uv_keys_host_key(struct kobject *kobj,
653 				struct kobj_attribute *attr, char *buf)
654 {
655 	struct uv_cb_query_keys uvcb = uv_query_keys();
656 
657 	return emit_hash(&uvcb.key_hashes[UVC_QUERY_KEYS_IDX_HK], buf, 0);
658 }
659 
660 static struct kobj_attribute uv_keys_host_key_attr =
661 	__ATTR(host_key, 0444, uv_keys_host_key, NULL);
662 
uv_keys_backup_host_key(struct kobject * kobj,struct kobj_attribute * attr,char * buf)663 static ssize_t uv_keys_backup_host_key(struct kobject *kobj,
664 				       struct kobj_attribute *attr, char *buf)
665 {
666 	struct uv_cb_query_keys uvcb = uv_query_keys();
667 
668 	return emit_hash(&uvcb.key_hashes[UVC_QUERY_KEYS_IDX_BACK_HK], buf, 0);
669 }
670 
671 static struct kobj_attribute uv_keys_backup_host_key_attr =
672 	__ATTR(backup_host_key, 0444, uv_keys_backup_host_key, NULL);
673 
uv_keys_all(struct kobject * kobj,struct kobj_attribute * attr,char * buf)674 static ssize_t uv_keys_all(struct kobject *kobj,
675 			   struct kobj_attribute *attr, char *buf)
676 {
677 	struct uv_cb_query_keys uvcb = uv_query_keys();
678 	ssize_t len = 0;
679 	int i;
680 
681 	for (i = 0; i < ARRAY_SIZE(uvcb.key_hashes); i++)
682 		len += emit_hash(uvcb.key_hashes + i, buf, len);
683 
684 	return len;
685 }
686 
687 static struct kobj_attribute uv_keys_all_attr =
688 	__ATTR(all, 0444, uv_keys_all, NULL);
689 
690 static struct attribute_group uv_query_attr_group = {
691 	.attrs = uv_query_attrs,
692 };
693 
694 static struct attribute *uv_keys_attrs[] = {
695 	&uv_keys_host_key_attr.attr,
696 	&uv_keys_backup_host_key_attr.attr,
697 	&uv_keys_all_attr.attr,
698 	NULL,
699 };
700 
701 static struct attribute_group uv_keys_attr_group = {
702 	.attrs = uv_keys_attrs,
703 };
704 
uv_is_prot_virt_guest(struct kobject * kobj,struct kobj_attribute * attr,char * buf)705 static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
706 				     struct kobj_attribute *attr, char *buf)
707 {
708 	return sysfs_emit(buf, "%d\n", prot_virt_guest);
709 }
710 
uv_is_prot_virt_host(struct kobject * kobj,struct kobj_attribute * attr,char * buf)711 static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
712 				    struct kobj_attribute *attr, char *buf)
713 {
714 	return sysfs_emit(buf, "%d\n", prot_virt_host);
715 }
716 
717 static struct kobj_attribute uv_prot_virt_guest =
718 	__ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL);
719 
720 static struct kobj_attribute uv_prot_virt_host =
721 	__ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL);
722 
723 static const struct attribute *uv_prot_virt_attrs[] = {
724 	&uv_prot_virt_guest.attr,
725 	&uv_prot_virt_host.attr,
726 	NULL,
727 };
728 
729 static struct kset *uv_query_kset;
730 static struct kset *uv_keys_kset;
731 static struct kobject *uv_kobj;
732 
uv_sysfs_dir_init(const struct attribute_group * grp,struct kset ** uv_dir_kset,const char * name)733 static int __init uv_sysfs_dir_init(const struct attribute_group *grp,
734 				    struct kset **uv_dir_kset, const char *name)
735 {
736 	struct kset *kset;
737 	int rc;
738 
739 	kset = kset_create_and_add(name, NULL, uv_kobj);
740 	if (!kset)
741 		return -ENOMEM;
742 	*uv_dir_kset = kset;
743 
744 	rc = sysfs_create_group(&kset->kobj, grp);
745 	if (rc)
746 		kset_unregister(kset);
747 	return rc;
748 }
749 
uv_sysfs_init(void)750 static int __init uv_sysfs_init(void)
751 {
752 	int rc = -ENOMEM;
753 
754 	if (!test_facility(158))
755 		return 0;
756 
757 	uv_kobj = kobject_create_and_add("uv", firmware_kobj);
758 	if (!uv_kobj)
759 		return -ENOMEM;
760 
761 	rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs);
762 	if (rc)
763 		goto out_kobj;
764 
765 	rc = uv_sysfs_dir_init(&uv_query_attr_group, &uv_query_kset, "query");
766 	if (rc)
767 		goto out_ind_files;
768 
769 	/* Get installed key hashes if available, ignore any errors */
770 	if (test_bit_inv(BIT_UVC_CMD_QUERY_KEYS, uv_info.inst_calls_list))
771 		uv_sysfs_dir_init(&uv_keys_attr_group, &uv_keys_kset, "keys");
772 
773 	return 0;
774 
775 out_ind_files:
776 	sysfs_remove_files(uv_kobj, uv_prot_virt_attrs);
777 out_kobj:
778 	kobject_del(uv_kobj);
779 	kobject_put(uv_kobj);
780 	return rc;
781 }
782 device_initcall(uv_sysfs_init);
783 
784 /*
785  * Find the secret with the secret_id in the provided list.
786  *
787  * Context: might sleep.
788  */
find_secret_in_page(const u8 secret_id[UV_SECRET_ID_LEN],const struct uv_secret_list * list,struct uv_secret_list_item_hdr * secret)789 static int find_secret_in_page(const u8 secret_id[UV_SECRET_ID_LEN],
790 			       const struct uv_secret_list *list,
791 			       struct uv_secret_list_item_hdr *secret)
792 {
793 	u16 i;
794 
795 	for (i = 0; i < list->total_num_secrets; i++) {
796 		if (memcmp(secret_id, list->secrets[i].id, UV_SECRET_ID_LEN) == 0) {
797 			*secret = list->secrets[i].hdr;
798 			return 0;
799 		}
800 	}
801 	return -ENOENT;
802 }
803 
804 /*
805  * Do the actual search for `uv_get_secret_metadata`.
806  *
807  * Context: might sleep.
808  */
find_secret(const u8 secret_id[UV_SECRET_ID_LEN],struct uv_secret_list * list,struct uv_secret_list_item_hdr * secret)809 static int find_secret(const u8 secret_id[UV_SECRET_ID_LEN],
810 		       struct uv_secret_list *list,
811 		       struct uv_secret_list_item_hdr *secret)
812 {
813 	u16 start_idx = 0;
814 	u16 list_rc;
815 	int ret;
816 
817 	do {
818 		uv_list_secrets(list, start_idx, &list_rc, NULL);
819 		if (list_rc != UVC_RC_EXECUTED && list_rc != UVC_RC_MORE_DATA) {
820 			if (list_rc == UVC_RC_INV_CMD)
821 				return -ENODEV;
822 			else
823 				return -EIO;
824 		}
825 		ret = find_secret_in_page(secret_id, list, secret);
826 		if (ret == 0)
827 			return ret;
828 		start_idx = list->next_secret_idx;
829 	} while (list_rc == UVC_RC_MORE_DATA && start_idx < list->next_secret_idx);
830 
831 	return -ENOENT;
832 }
833 
834 /**
835  * uv_get_secret_metadata() - get secret metadata for a given secret id.
836  * @secret_id: search pattern.
837  * @secret: output data, containing the secret's metadata.
838  *
839  * Search for a secret with the given secret_id in the Ultravisor secret store.
840  *
841  * Context: might sleep.
842  *
843  * Return:
844  * * %0:	- Found entry; secret->idx and secret->type are valid.
845  * * %ENOENT	- No entry found.
846  * * %ENODEV:	- Not supported: UV not available or command not available.
847  * * %EIO:	- Other unexpected UV error.
848  */
uv_get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN],struct uv_secret_list_item_hdr * secret)849 int uv_get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN],
850 			   struct uv_secret_list_item_hdr *secret)
851 {
852 	struct uv_secret_list *buf;
853 	int rc;
854 
855 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
856 	if (!buf)
857 		return -ENOMEM;
858 	rc = find_secret(secret_id, buf, secret);
859 	kfree(buf);
860 	return rc;
861 }
862 EXPORT_SYMBOL_GPL(uv_get_secret_metadata);
863 
864 /**
865  * uv_retrieve_secret() - get the secret value for the secret index.
866  * @secret_idx: Secret index for which the secret should be retrieved.
867  * @buf: Buffer to store retrieved secret.
868  * @buf_size: Size of the buffer. The correct buffer size is reported as part of
869  * the result from `uv_get_secret_metadata`.
870  *
871  * Calls the Retrieve Secret UVC and translates the UV return code into an errno.
872  *
873  * Context: might sleep.
874  *
875  * Return:
876  * * %0		- Entry found; buffer contains a valid secret.
877  * * %ENOENT:	- No entry found or secret at the index is non-retrievable.
878  * * %ENODEV:	- Not supported: UV not available or command not available.
879  * * %EINVAL:	- Buffer too small for content.
880  * * %EIO:	- Other unexpected UV error.
881  */
uv_retrieve_secret(u16 secret_idx,u8 * buf,size_t buf_size)882 int uv_retrieve_secret(u16 secret_idx, u8 *buf, size_t buf_size)
883 {
884 	struct uv_cb_retr_secr uvcb = {
885 		.header.len = sizeof(uvcb),
886 		.header.cmd = UVC_CMD_RETR_SECRET,
887 		.secret_idx = secret_idx,
888 		.buf_addr = (u64)buf,
889 		.buf_size = buf_size,
890 	};
891 
892 	uv_call_sched(0, (u64)&uvcb);
893 
894 	switch (uvcb.header.rc) {
895 	case UVC_RC_EXECUTED:
896 		return 0;
897 	case UVC_RC_INV_CMD:
898 		return -ENODEV;
899 	case UVC_RC_RETR_SECR_STORE_EMPTY:
900 	case UVC_RC_RETR_SECR_INV_SECRET:
901 	case UVC_RC_RETR_SECR_INV_IDX:
902 		return -ENOENT;
903 	case UVC_RC_RETR_SECR_BUF_SMALL:
904 		return -EINVAL;
905 	default:
906 		return -EIO;
907 	}
908 }
909 EXPORT_SYMBOL_GPL(uv_retrieve_secret);
910