xref: /linux/arch/s390/kernel/uv.c (revision fcab107abe1ab5be9dbe874baa722372da8f4f73)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common Ultravisor functions and initialization
4  *
5  * Copyright IBM Corp. 2019, 2024
6  */
7 #define KMSG_COMPONENT "prot_virt"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/sizes.h>
13 #include <linux/bitmap.h>
14 #include <linux/memblock.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <linux/pagewalk.h>
18 #include <linux/backing-dev.h>
19 #include <asm/facility.h>
20 #include <asm/sections.h>
21 #include <asm/uv.h>
22 
23 /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
24 int __bootdata_preserved(prot_virt_guest);
25 EXPORT_SYMBOL(prot_virt_guest);
26 
27 /*
28  * uv_info contains both host and guest information but it's currently only
29  * expected to be used within modules if it's the KVM module or for
30  * any PV guest module.
31  *
32  * The kernel itself will write these values once in uv_query_info()
33  * and then make some of them readable via a sysfs interface.
34  */
35 struct uv_info __bootdata_preserved(uv_info);
36 EXPORT_SYMBOL(uv_info);
37 
38 int __bootdata_preserved(prot_virt_host);
39 EXPORT_SYMBOL(prot_virt_host);
40 
41 static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len)
42 {
43 	struct uv_cb_init uvcb = {
44 		.header.cmd = UVC_CMD_INIT_UV,
45 		.header.len = sizeof(uvcb),
46 		.stor_origin = stor_base,
47 		.stor_len = stor_len,
48 	};
49 
50 	if (uv_call(0, (uint64_t)&uvcb)) {
51 		pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
52 		       uvcb.header.rc, uvcb.header.rrc);
53 		return -1;
54 	}
55 	return 0;
56 }
57 
58 void __init setup_uv(void)
59 {
60 	void *uv_stor_base;
61 
62 	if (!is_prot_virt_host())
63 		return;
64 
65 	uv_stor_base = memblock_alloc_try_nid(
66 		uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
67 		MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
68 	if (!uv_stor_base) {
69 		pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
70 			uv_info.uv_base_stor_len);
71 		goto fail;
72 	}
73 
74 	if (uv_init(__pa(uv_stor_base), uv_info.uv_base_stor_len)) {
75 		memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
76 		goto fail;
77 	}
78 
79 	pr_info("Reserving %luMB as ultravisor base storage\n",
80 		uv_info.uv_base_stor_len >> 20);
81 	return;
82 fail:
83 	pr_info("Disabling support for protected virtualization");
84 	prot_virt_host = 0;
85 }
86 
87 /*
88  * Requests the Ultravisor to pin the page in the shared state. This will
89  * cause an intercept when the guest attempts to unshare the pinned page.
90  */
91 int uv_pin_shared(unsigned long paddr)
92 {
93 	struct uv_cb_cfs uvcb = {
94 		.header.cmd = UVC_CMD_PIN_PAGE_SHARED,
95 		.header.len = sizeof(uvcb),
96 		.paddr = paddr,
97 	};
98 
99 	if (uv_call(0, (u64)&uvcb))
100 		return -EINVAL;
101 	return 0;
102 }
103 EXPORT_SYMBOL_GPL(uv_pin_shared);
104 
105 /*
106  * Requests the Ultravisor to destroy a guest page and make it
107  * accessible to the host. The destroy clears the page instead of
108  * exporting.
109  *
110  * @paddr: Absolute host address of page to be destroyed
111  */
112 static int uv_destroy(unsigned long paddr)
113 {
114 	struct uv_cb_cfs uvcb = {
115 		.header.cmd = UVC_CMD_DESTR_SEC_STOR,
116 		.header.len = sizeof(uvcb),
117 		.paddr = paddr
118 	};
119 
120 	if (uv_call(0, (u64)&uvcb)) {
121 		/*
122 		 * Older firmware uses 107/d as an indication of a non secure
123 		 * page. Let us emulate the newer variant (no-op).
124 		 */
125 		if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
126 			return 0;
127 		return -EINVAL;
128 	}
129 	return 0;
130 }
131 
132 /*
133  * The caller must already hold a reference to the folio
134  */
135 int uv_destroy_folio(struct folio *folio)
136 {
137 	int rc;
138 
139 	/* Large folios cannot be secure */
140 	if (unlikely(folio_test_large(folio)))
141 		return 0;
142 
143 	folio_get(folio);
144 	rc = uv_destroy(folio_to_phys(folio));
145 	if (!rc)
146 		clear_bit(PG_arch_1, &folio->flags);
147 	folio_put(folio);
148 	return rc;
149 }
150 EXPORT_SYMBOL(uv_destroy_folio);
151 
152 /*
153  * The present PTE still indirectly holds a folio reference through the mapping.
154  */
155 int uv_destroy_pte(pte_t pte)
156 {
157 	VM_WARN_ON(!pte_present(pte));
158 	return uv_destroy_folio(pfn_folio(pte_pfn(pte)));
159 }
160 
161 /*
162  * Requests the Ultravisor to encrypt a guest page and make it
163  * accessible to the host for paging (export).
164  *
165  * @paddr: Absolute host address of page to be exported
166  */
167 int uv_convert_from_secure(unsigned long paddr)
168 {
169 	struct uv_cb_cfs uvcb = {
170 		.header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
171 		.header.len = sizeof(uvcb),
172 		.paddr = paddr
173 	};
174 
175 	if (uv_call(0, (u64)&uvcb))
176 		return -EINVAL;
177 	return 0;
178 }
179 EXPORT_SYMBOL_GPL(uv_convert_from_secure);
180 
181 /*
182  * The caller must already hold a reference to the folio.
183  */
184 int uv_convert_from_secure_folio(struct folio *folio)
185 {
186 	int rc;
187 
188 	/* Large folios cannot be secure */
189 	if (unlikely(folio_test_large(folio)))
190 		return 0;
191 
192 	folio_get(folio);
193 	rc = uv_convert_from_secure(folio_to_phys(folio));
194 	if (!rc)
195 		clear_bit(PG_arch_1, &folio->flags);
196 	folio_put(folio);
197 	return rc;
198 }
199 EXPORT_SYMBOL_GPL(uv_convert_from_secure_folio);
200 
201 /*
202  * The present PTE still indirectly holds a folio reference through the mapping.
203  */
204 int uv_convert_from_secure_pte(pte_t pte)
205 {
206 	VM_WARN_ON(!pte_present(pte));
207 	return uv_convert_from_secure_folio(pfn_folio(pte_pfn(pte)));
208 }
209 
210 /**
211  * should_export_before_import - Determine whether an export is needed
212  * before an import-like operation
213  * @uvcb: the Ultravisor control block of the UVC to be performed
214  * @mm: the mm of the process
215  *
216  * Returns whether an export is needed before every import-like operation.
217  * This is needed for shared pages, which don't trigger a secure storage
218  * exception when accessed from a different guest.
219  *
220  * Although considered as one, the Unpin Page UVC is not an actual import,
221  * so it is not affected.
222  *
223  * No export is needed also when there is only one protected VM, because the
224  * page cannot belong to the wrong VM in that case (there is no "other VM"
225  * it can belong to).
226  *
227  * Return: true if an export is needed before every import, otherwise false.
228  */
229 static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
230 {
231 	/*
232 	 * The misc feature indicates, among other things, that importing a
233 	 * shared page from a different protected VM will automatically also
234 	 * transfer its ownership.
235 	 */
236 	if (uv_has_feature(BIT_UV_FEAT_MISC))
237 		return false;
238 	if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
239 		return false;
240 	return atomic_read(&mm->context.protected_count) > 1;
241 }
242 
243 /*
244  * Calculate the expected ref_count for a folio that would otherwise have no
245  * further pins. This was cribbed from similar functions in other places in
246  * the kernel, but with some slight modifications. We know that a secure
247  * folio can not be a large folio, for example.
248  */
249 static int expected_folio_refs(struct folio *folio)
250 {
251 	int res;
252 
253 	res = folio_mapcount(folio);
254 	if (folio_test_swapcache(folio)) {
255 		res++;
256 	} else if (folio_mapping(folio)) {
257 		res++;
258 		if (folio->private)
259 			res++;
260 	}
261 	return res;
262 }
263 
264 /**
265  * __make_folio_secure() - make a folio secure
266  * @folio: the folio to make secure
267  * @uvcb: the uvcb that describes the UVC to be used
268  *
269  * The folio @folio will be made secure if possible, @uvcb will be passed
270  * as-is to the UVC.
271  *
272  * Return: 0 on success;
273  *         -EBUSY if the folio is in writeback or has too many references;
274  *         -EAGAIN if the UVC needs to be attempted again;
275  *         -ENXIO if the address is not mapped;
276  *         -EINVAL if the UVC failed for other reasons.
277  *
278  * Context: The caller must hold exactly one extra reference on the folio
279  *          (it's the same logic as split_folio()), and the folio must be
280  *          locked.
281  */
282 static int __make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
283 {
284 	int expected, cc = 0;
285 
286 	if (folio_test_writeback(folio))
287 		return -EBUSY;
288 	expected = expected_folio_refs(folio) + 1;
289 	if (!folio_ref_freeze(folio, expected))
290 		return -EBUSY;
291 	set_bit(PG_arch_1, &folio->flags);
292 	/*
293 	 * If the UVC does not succeed or fail immediately, we don't want to
294 	 * loop for long, or we might get stall notifications.
295 	 * On the other hand, this is a complex scenario and we are holding a lot of
296 	 * locks, so we can't easily sleep and reschedule. We try only once,
297 	 * and if the UVC returned busy or partial completion, we return
298 	 * -EAGAIN and we let the callers deal with it.
299 	 */
300 	cc = __uv_call(0, (u64)uvcb);
301 	folio_ref_unfreeze(folio, expected);
302 	/*
303 	 * Return -ENXIO if the folio was not mapped, -EINVAL for other errors.
304 	 * If busy or partially completed, return -EAGAIN.
305 	 */
306 	if (cc == UVC_CC_OK)
307 		return 0;
308 	else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL)
309 		return -EAGAIN;
310 	return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
311 }
312 
313 static int make_folio_secure(struct mm_struct *mm, struct folio *folio, struct uv_cb_header *uvcb)
314 {
315 	int rc;
316 
317 	if (!folio_trylock(folio))
318 		return -EAGAIN;
319 	if (should_export_before_import(uvcb, mm))
320 		uv_convert_from_secure(folio_to_phys(folio));
321 	rc = __make_folio_secure(folio, uvcb);
322 	folio_unlock(folio);
323 
324 	return rc;
325 }
326 
327 /**
328  * s390_wiggle_split_folio() - try to drain extra references to a folio and
329  *			       split the folio if it is large.
330  * @mm:    the mm containing the folio to work on
331  * @folio: the folio
332  *
333  * Context: Must be called while holding an extra reference to the folio;
334  *          the mm lock should not be held.
335  * Return: 0 if the operation was successful;
336  *	   -EAGAIN if splitting the large folio was not successful,
337  *		   but another attempt can be made;
338  *	   -EINVAL in case of other folio splitting errors. See split_folio().
339  */
340 static int s390_wiggle_split_folio(struct mm_struct *mm, struct folio *folio)
341 {
342 	int rc, tried_splits;
343 
344 	lockdep_assert_not_held(&mm->mmap_lock);
345 	folio_wait_writeback(folio);
346 	lru_add_drain_all();
347 
348 	if (!folio_test_large(folio))
349 		return 0;
350 
351 	for (tried_splits = 0; tried_splits < 2; tried_splits++) {
352 		struct address_space *mapping;
353 		loff_t lstart, lend;
354 		struct inode *inode;
355 
356 		folio_lock(folio);
357 		rc = split_folio(folio);
358 		if (rc != -EBUSY) {
359 			folio_unlock(folio);
360 			return rc;
361 		}
362 
363 		/*
364 		 * Splitting with -EBUSY can fail for various reasons, but we
365 		 * have to handle one case explicitly for now: some mappings
366 		 * don't allow for splitting dirty folios; writeback will
367 		 * mark them clean again, including marking all page table
368 		 * entries mapping the folio read-only, to catch future write
369 		 * attempts.
370 		 *
371 		 * While the system should be writing back dirty folios in the
372 		 * background, we obtained this folio by looking up a writable
373 		 * page table entry. On these problematic mappings, writable
374 		 * page table entries imply dirty folios, preventing the
375 		 * split in the first place.
376 		 *
377 		 * To prevent a livelock when trigger writeback manually and
378 		 * letting the caller look up the folio again in the page
379 		 * table (turning it dirty), immediately try to split again.
380 		 *
381 		 * This is only a problem for some mappings (e.g., XFS);
382 		 * mappings that do not support writeback (e.g., shmem) do not
383 		 * apply.
384 		 */
385 		if (!folio_test_dirty(folio) || folio_test_anon(folio) ||
386 		    !folio->mapping || !mapping_can_writeback(folio->mapping)) {
387 			folio_unlock(folio);
388 			break;
389 		}
390 
391 		/*
392 		 * Ideally, we'd only trigger writeback on this exact folio. But
393 		 * there is no easy way to do that, so we'll stabilize the
394 		 * mapping while we still hold the folio lock, so we can drop
395 		 * the folio lock to trigger writeback on the range currently
396 		 * covered by the folio instead.
397 		 */
398 		mapping = folio->mapping;
399 		lstart = folio_pos(folio);
400 		lend = lstart + folio_size(folio) - 1;
401 		inode = igrab(mapping->host);
402 		folio_unlock(folio);
403 
404 		if (unlikely(!inode))
405 			break;
406 
407 		filemap_write_and_wait_range(mapping, lstart, lend);
408 		iput(mapping->host);
409 	}
410 	return -EAGAIN;
411 }
412 
413 int make_hva_secure(struct mm_struct *mm, unsigned long hva, struct uv_cb_header *uvcb)
414 {
415 	struct vm_area_struct *vma;
416 	struct folio_walk fw;
417 	struct folio *folio;
418 	int rc;
419 
420 	mmap_read_lock(mm);
421 	vma = vma_lookup(mm, hva);
422 	if (!vma) {
423 		mmap_read_unlock(mm);
424 		return -EFAULT;
425 	}
426 	folio = folio_walk_start(&fw, vma, hva, 0);
427 	if (!folio) {
428 		mmap_read_unlock(mm);
429 		return -ENXIO;
430 	}
431 
432 	folio_get(folio);
433 	/*
434 	 * Secure pages cannot be huge and userspace should not combine both.
435 	 * In case userspace does it anyway this will result in an -EFAULT for
436 	 * the unpack. The guest is thus never reaching secure mode.
437 	 * If userspace plays dirty tricks and decides to map huge pages at a
438 	 * later point in time, it will receive a segmentation fault or
439 	 * KVM_RUN will return -EFAULT.
440 	 */
441 	if (folio_test_hugetlb(folio))
442 		rc = -EFAULT;
443 	else if (folio_test_large(folio))
444 		rc = -E2BIG;
445 	else if (!pte_write(fw.pte) || (pte_val(fw.pte) & _PAGE_INVALID))
446 		rc = -ENXIO;
447 	else
448 		rc = make_folio_secure(mm, folio, uvcb);
449 	folio_walk_end(&fw, vma);
450 	mmap_read_unlock(mm);
451 
452 	if (rc == -E2BIG || rc == -EBUSY) {
453 		rc = s390_wiggle_split_folio(mm, folio);
454 		if (!rc)
455 			rc = -EAGAIN;
456 	}
457 	folio_put(folio);
458 
459 	return rc;
460 }
461 EXPORT_SYMBOL_GPL(make_hva_secure);
462 
463 /*
464  * To be called with the folio locked or with an extra reference! This will
465  * prevent kvm_s390_pv_make_secure() from touching the folio concurrently.
466  * Having 2 parallel arch_make_folio_accessible is fine, as the UV calls will
467  * become a no-op if the folio is already exported.
468  */
469 int arch_make_folio_accessible(struct folio *folio)
470 {
471 	int rc = 0;
472 
473 	/* Large folios cannot be secure */
474 	if (unlikely(folio_test_large(folio)))
475 		return 0;
476 
477 	/*
478 	 * PG_arch_1 is used in 2 places:
479 	 * 1. for storage keys of hugetlb folios and KVM
480 	 * 2. As an indication that this small folio might be secure. This can
481 	 *    overindicate, e.g. we set the bit before calling
482 	 *    convert_to_secure.
483 	 * As secure pages are never large folios, both variants can co-exists.
484 	 */
485 	if (!test_bit(PG_arch_1, &folio->flags))
486 		return 0;
487 
488 	rc = uv_pin_shared(folio_to_phys(folio));
489 	if (!rc) {
490 		clear_bit(PG_arch_1, &folio->flags);
491 		return 0;
492 	}
493 
494 	rc = uv_convert_from_secure(folio_to_phys(folio));
495 	if (!rc) {
496 		clear_bit(PG_arch_1, &folio->flags);
497 		return 0;
498 	}
499 
500 	return rc;
501 }
502 EXPORT_SYMBOL_GPL(arch_make_folio_accessible);
503 
504 static ssize_t uv_query_facilities(struct kobject *kobj,
505 				   struct kobj_attribute *attr, char *buf)
506 {
507 	return sysfs_emit(buf, "%lx\n%lx\n%lx\n%lx\n",
508 			  uv_info.inst_calls_list[0],
509 			  uv_info.inst_calls_list[1],
510 			  uv_info.inst_calls_list[2],
511 			  uv_info.inst_calls_list[3]);
512 }
513 
514 static struct kobj_attribute uv_query_facilities_attr =
515 	__ATTR(facilities, 0444, uv_query_facilities, NULL);
516 
517 static ssize_t uv_query_supp_se_hdr_ver(struct kobject *kobj,
518 					struct kobj_attribute *attr, char *buf)
519 {
520 	return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_ver);
521 }
522 
523 static struct kobj_attribute uv_query_supp_se_hdr_ver_attr =
524 	__ATTR(supp_se_hdr_ver, 0444, uv_query_supp_se_hdr_ver, NULL);
525 
526 static ssize_t uv_query_supp_se_hdr_pcf(struct kobject *kobj,
527 					struct kobj_attribute *attr, char *buf)
528 {
529 	return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_pcf);
530 }
531 
532 static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr =
533 	__ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL);
534 
535 static ssize_t uv_query_dump_cpu_len(struct kobject *kobj,
536 				     struct kobj_attribute *attr, char *buf)
537 {
538 	return sysfs_emit(buf, "%lx\n", uv_info.guest_cpu_stor_len);
539 }
540 
541 static struct kobj_attribute uv_query_dump_cpu_len_attr =
542 	__ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL);
543 
544 static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj,
545 					       struct kobj_attribute *attr, char *buf)
546 {
547 	return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_storage_state_len);
548 }
549 
550 static struct kobj_attribute uv_query_dump_storage_state_len_attr =
551 	__ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL);
552 
553 static ssize_t uv_query_dump_finalize_len(struct kobject *kobj,
554 					  struct kobj_attribute *attr, char *buf)
555 {
556 	return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_finalize_len);
557 }
558 
559 static struct kobj_attribute uv_query_dump_finalize_len_attr =
560 	__ATTR(dump_finalize_len, 0444, uv_query_dump_finalize_len, NULL);
561 
562 static ssize_t uv_query_feature_indications(struct kobject *kobj,
563 					    struct kobj_attribute *attr, char *buf)
564 {
565 	return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
566 }
567 
568 static struct kobj_attribute uv_query_feature_indications_attr =
569 	__ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
570 
571 static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
572 				       struct kobj_attribute *attr, char *buf)
573 {
574 	return sysfs_emit(buf, "%d\n", uv_info.max_guest_cpu_id + 1);
575 }
576 
577 static struct kobj_attribute uv_query_max_guest_cpus_attr =
578 	__ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
579 
580 static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
581 				      struct kobj_attribute *attr, char *buf)
582 {
583 	return sysfs_emit(buf, "%d\n", uv_info.max_num_sec_conf);
584 }
585 
586 static struct kobj_attribute uv_query_max_guest_vms_attr =
587 	__ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
588 
589 static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
590 				       struct kobj_attribute *attr, char *buf)
591 {
592 	return sysfs_emit(buf, "%lx\n", uv_info.max_sec_stor_addr);
593 }
594 
595 static struct kobj_attribute uv_query_max_guest_addr_attr =
596 	__ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
597 
598 static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj,
599 					     struct kobj_attribute *attr, char *buf)
600 {
601 	return sysfs_emit(buf, "%lx\n", uv_info.supp_att_req_hdr_ver);
602 }
603 
604 static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr =
605 	__ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL);
606 
607 static ssize_t uv_query_supp_att_pflags(struct kobject *kobj,
608 					struct kobj_attribute *attr, char *buf)
609 {
610 	return sysfs_emit(buf, "%lx\n", uv_info.supp_att_pflags);
611 }
612 
613 static struct kobj_attribute uv_query_supp_att_pflags_attr =
614 	__ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL);
615 
616 static ssize_t uv_query_supp_add_secret_req_ver(struct kobject *kobj,
617 						struct kobj_attribute *attr, char *buf)
618 {
619 	return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_req_ver);
620 }
621 
622 static struct kobj_attribute uv_query_supp_add_secret_req_ver_attr =
623 	__ATTR(supp_add_secret_req_ver, 0444, uv_query_supp_add_secret_req_ver, NULL);
624 
625 static ssize_t uv_query_supp_add_secret_pcf(struct kobject *kobj,
626 					    struct kobj_attribute *attr, char *buf)
627 {
628 	return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_pcf);
629 }
630 
631 static struct kobj_attribute uv_query_supp_add_secret_pcf_attr =
632 	__ATTR(supp_add_secret_pcf, 0444, uv_query_supp_add_secret_pcf, NULL);
633 
634 static ssize_t uv_query_supp_secret_types(struct kobject *kobj,
635 					  struct kobj_attribute *attr, char *buf)
636 {
637 	return sysfs_emit(buf, "%lx\n", uv_info.supp_secret_types);
638 }
639 
640 static struct kobj_attribute uv_query_supp_secret_types_attr =
641 	__ATTR(supp_secret_types, 0444, uv_query_supp_secret_types, NULL);
642 
643 static ssize_t uv_query_max_secrets(struct kobject *kobj,
644 				    struct kobj_attribute *attr, char *buf)
645 {
646 	return sysfs_emit(buf, "%d\n",
647 			  uv_info.max_assoc_secrets + uv_info.max_retr_secrets);
648 }
649 
650 static struct kobj_attribute uv_query_max_secrets_attr =
651 	__ATTR(max_secrets, 0444, uv_query_max_secrets, NULL);
652 
653 static ssize_t uv_query_max_retr_secrets(struct kobject *kobj,
654 					 struct kobj_attribute *attr, char *buf)
655 {
656 	return sysfs_emit(buf, "%d\n", uv_info.max_retr_secrets);
657 }
658 
659 static struct kobj_attribute uv_query_max_retr_secrets_attr =
660 	__ATTR(max_retr_secrets, 0444, uv_query_max_retr_secrets, NULL);
661 
662 static ssize_t uv_query_max_assoc_secrets(struct kobject *kobj,
663 					  struct kobj_attribute *attr,
664 					  char *buf)
665 {
666 	return sysfs_emit(buf, "%d\n", uv_info.max_assoc_secrets);
667 }
668 
669 static struct kobj_attribute uv_query_max_assoc_secrets_attr =
670 	__ATTR(max_assoc_secrets, 0444, uv_query_max_assoc_secrets, NULL);
671 
672 static struct attribute *uv_query_attrs[] = {
673 	&uv_query_facilities_attr.attr,
674 	&uv_query_feature_indications_attr.attr,
675 	&uv_query_max_guest_cpus_attr.attr,
676 	&uv_query_max_guest_vms_attr.attr,
677 	&uv_query_max_guest_addr_attr.attr,
678 	&uv_query_supp_se_hdr_ver_attr.attr,
679 	&uv_query_supp_se_hdr_pcf_attr.attr,
680 	&uv_query_dump_storage_state_len_attr.attr,
681 	&uv_query_dump_finalize_len_attr.attr,
682 	&uv_query_dump_cpu_len_attr.attr,
683 	&uv_query_supp_att_req_hdr_ver_attr.attr,
684 	&uv_query_supp_att_pflags_attr.attr,
685 	&uv_query_supp_add_secret_req_ver_attr.attr,
686 	&uv_query_supp_add_secret_pcf_attr.attr,
687 	&uv_query_supp_secret_types_attr.attr,
688 	&uv_query_max_secrets_attr.attr,
689 	&uv_query_max_assoc_secrets_attr.attr,
690 	&uv_query_max_retr_secrets_attr.attr,
691 	NULL,
692 };
693 
694 static inline struct uv_cb_query_keys uv_query_keys(void)
695 {
696 	struct uv_cb_query_keys uvcb = {
697 		.header.cmd = UVC_CMD_QUERY_KEYS,
698 		.header.len = sizeof(uvcb)
699 	};
700 
701 	uv_call(0, (uint64_t)&uvcb);
702 	return uvcb;
703 }
704 
705 static inline ssize_t emit_hash(struct uv_key_hash *hash, char *buf, int at)
706 {
707 	return sysfs_emit_at(buf, at, "%016llx%016llx%016llx%016llx\n",
708 			    hash->dword[0], hash->dword[1], hash->dword[2], hash->dword[3]);
709 }
710 
711 static ssize_t uv_keys_host_key(struct kobject *kobj,
712 				struct kobj_attribute *attr, char *buf)
713 {
714 	struct uv_cb_query_keys uvcb = uv_query_keys();
715 
716 	return emit_hash(&uvcb.key_hashes[UVC_QUERY_KEYS_IDX_HK], buf, 0);
717 }
718 
719 static struct kobj_attribute uv_keys_host_key_attr =
720 	__ATTR(host_key, 0444, uv_keys_host_key, NULL);
721 
722 static ssize_t uv_keys_backup_host_key(struct kobject *kobj,
723 				       struct kobj_attribute *attr, char *buf)
724 {
725 	struct uv_cb_query_keys uvcb = uv_query_keys();
726 
727 	return emit_hash(&uvcb.key_hashes[UVC_QUERY_KEYS_IDX_BACK_HK], buf, 0);
728 }
729 
730 static struct kobj_attribute uv_keys_backup_host_key_attr =
731 	__ATTR(backup_host_key, 0444, uv_keys_backup_host_key, NULL);
732 
733 static ssize_t uv_keys_all(struct kobject *kobj,
734 			   struct kobj_attribute *attr, char *buf)
735 {
736 	struct uv_cb_query_keys uvcb = uv_query_keys();
737 	ssize_t len = 0;
738 	int i;
739 
740 	for (i = 0; i < ARRAY_SIZE(uvcb.key_hashes); i++)
741 		len += emit_hash(uvcb.key_hashes + i, buf, len);
742 
743 	return len;
744 }
745 
746 static struct kobj_attribute uv_keys_all_attr =
747 	__ATTR(all, 0444, uv_keys_all, NULL);
748 
749 static struct attribute_group uv_query_attr_group = {
750 	.attrs = uv_query_attrs,
751 };
752 
753 static struct attribute *uv_keys_attrs[] = {
754 	&uv_keys_host_key_attr.attr,
755 	&uv_keys_backup_host_key_attr.attr,
756 	&uv_keys_all_attr.attr,
757 	NULL,
758 };
759 
760 static struct attribute_group uv_keys_attr_group = {
761 	.attrs = uv_keys_attrs,
762 };
763 
764 static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
765 				     struct kobj_attribute *attr, char *buf)
766 {
767 	return sysfs_emit(buf, "%d\n", prot_virt_guest);
768 }
769 
770 static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
771 				    struct kobj_attribute *attr, char *buf)
772 {
773 	return sysfs_emit(buf, "%d\n", prot_virt_host);
774 }
775 
776 static struct kobj_attribute uv_prot_virt_guest =
777 	__ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL);
778 
779 static struct kobj_attribute uv_prot_virt_host =
780 	__ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL);
781 
782 static const struct attribute *uv_prot_virt_attrs[] = {
783 	&uv_prot_virt_guest.attr,
784 	&uv_prot_virt_host.attr,
785 	NULL,
786 };
787 
788 static struct kset *uv_query_kset;
789 static struct kset *uv_keys_kset;
790 static struct kobject *uv_kobj;
791 
792 static int __init uv_sysfs_dir_init(const struct attribute_group *grp,
793 				    struct kset **uv_dir_kset, const char *name)
794 {
795 	struct kset *kset;
796 	int rc;
797 
798 	kset = kset_create_and_add(name, NULL, uv_kobj);
799 	if (!kset)
800 		return -ENOMEM;
801 	*uv_dir_kset = kset;
802 
803 	rc = sysfs_create_group(&kset->kobj, grp);
804 	if (rc)
805 		kset_unregister(kset);
806 	return rc;
807 }
808 
809 static int __init uv_sysfs_init(void)
810 {
811 	int rc = -ENOMEM;
812 
813 	if (!test_facility(158))
814 		return 0;
815 
816 	uv_kobj = kobject_create_and_add("uv", firmware_kobj);
817 	if (!uv_kobj)
818 		return -ENOMEM;
819 
820 	rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs);
821 	if (rc)
822 		goto out_kobj;
823 
824 	rc = uv_sysfs_dir_init(&uv_query_attr_group, &uv_query_kset, "query");
825 	if (rc)
826 		goto out_ind_files;
827 
828 	/* Get installed key hashes if available, ignore any errors */
829 	if (test_bit_inv(BIT_UVC_CMD_QUERY_KEYS, uv_info.inst_calls_list))
830 		uv_sysfs_dir_init(&uv_keys_attr_group, &uv_keys_kset, "keys");
831 
832 	return 0;
833 
834 out_ind_files:
835 	sysfs_remove_files(uv_kobj, uv_prot_virt_attrs);
836 out_kobj:
837 	kobject_del(uv_kobj);
838 	kobject_put(uv_kobj);
839 	return rc;
840 }
841 device_initcall(uv_sysfs_init);
842 
843 /*
844  * Locate a secret in the list by its id.
845  * @secret_id: search pattern.
846  * @list: ephemeral buffer space
847  * @secret: output data, containing the secret's metadata.
848  *
849  * Search for a secret with the given secret_id in the Ultravisor secret store.
850  *
851  * Context: might sleep.
852  */
853 static int find_secret_in_page(const u8 secret_id[UV_SECRET_ID_LEN],
854 			       const struct uv_secret_list *list,
855 			       struct uv_secret_list_item_hdr *secret)
856 {
857 	u16 i;
858 
859 	for (i = 0; i < list->total_num_secrets; i++) {
860 		if (memcmp(secret_id, list->secrets[i].id, UV_SECRET_ID_LEN) == 0) {
861 			*secret = list->secrets[i].hdr;
862 			return 0;
863 		}
864 	}
865 	return -ENOENT;
866 }
867 
868 /*
869  * Do the actual search for `uv_get_secret_metadata`.
870  * @secret_id: search pattern.
871  * @list: ephemeral buffer space
872  * @secret: output data, containing the secret's metadata.
873  *
874  * Context: might sleep.
875  */
876 int uv_find_secret(const u8 secret_id[UV_SECRET_ID_LEN],
877 		   struct uv_secret_list *list,
878 		   struct uv_secret_list_item_hdr *secret)
879 {
880 	u16 start_idx = 0;
881 	u16 list_rc;
882 	int ret;
883 
884 	do {
885 		uv_list_secrets(list, start_idx, &list_rc, NULL);
886 		if (list_rc != UVC_RC_EXECUTED && list_rc != UVC_RC_MORE_DATA) {
887 			if (list_rc == UVC_RC_INV_CMD)
888 				return -ENODEV;
889 			else
890 				return -EIO;
891 		}
892 		ret = find_secret_in_page(secret_id, list, secret);
893 		if (ret == 0)
894 			return ret;
895 		start_idx = list->next_secret_idx;
896 	} while (list_rc == UVC_RC_MORE_DATA && start_idx < list->next_secret_idx);
897 
898 	return -ENOENT;
899 }
900 EXPORT_SYMBOL_GPL(uv_find_secret);
901 
902 /**
903  * uv_retrieve_secret() - get the secret value for the secret index.
904  * @secret_idx: Secret index for which the secret should be retrieved.
905  * @buf: Buffer to store retrieved secret.
906  * @buf_size: Size of the buffer. The correct buffer size is reported as part of
907  * the result from `uv_get_secret_metadata`.
908  *
909  * Calls the Retrieve Secret UVC and translates the UV return code into an errno.
910  *
911  * Context: might sleep.
912  *
913  * Return:
914  * * %0		- Entry found; buffer contains a valid secret.
915  * * %ENOENT:	- No entry found or secret at the index is non-retrievable.
916  * * %ENODEV:	- Not supported: UV not available or command not available.
917  * * %EINVAL:	- Buffer too small for content.
918  * * %EIO:	- Other unexpected UV error.
919  */
920 int uv_retrieve_secret(u16 secret_idx, u8 *buf, size_t buf_size)
921 {
922 	struct uv_cb_retr_secr uvcb = {
923 		.header.len = sizeof(uvcb),
924 		.header.cmd = UVC_CMD_RETR_SECRET,
925 		.secret_idx = secret_idx,
926 		.buf_addr = (u64)buf,
927 		.buf_size = buf_size,
928 	};
929 
930 	uv_call_sched(0, (u64)&uvcb);
931 
932 	switch (uvcb.header.rc) {
933 	case UVC_RC_EXECUTED:
934 		return 0;
935 	case UVC_RC_INV_CMD:
936 		return -ENODEV;
937 	case UVC_RC_RETR_SECR_STORE_EMPTY:
938 	case UVC_RC_RETR_SECR_INV_SECRET:
939 	case UVC_RC_RETR_SECR_INV_IDX:
940 		return -ENOENT;
941 	case UVC_RC_RETR_SECR_BUF_SMALL:
942 		return -EINVAL;
943 	default:
944 		return -EIO;
945 	}
946 }
947 EXPORT_SYMBOL_GPL(uv_retrieve_secret);
948