xref: /linux/arch/s390/kernel/uv.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common Ultravisor functions and initialization
4  *
5  * Copyright IBM Corp. 2019, 2024
6  */
7 #define KMSG_COMPONENT "prot_virt"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/sizes.h>
13 #include <linux/bitmap.h>
14 #include <linux/memblock.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <linux/pagewalk.h>
18 #include <asm/facility.h>
19 #include <asm/sections.h>
20 #include <asm/uv.h>
21 
22 #if !IS_ENABLED(CONFIG_KVM)
23 unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
24 {
25 	return 0;
26 }
27 
28 int gmap_fault(struct gmap *gmap, unsigned long gaddr,
29 	       unsigned int fault_flags)
30 {
31 	return 0;
32 }
33 #endif
34 
35 /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
36 int __bootdata_preserved(prot_virt_guest);
37 EXPORT_SYMBOL(prot_virt_guest);
38 
39 /*
40  * uv_info contains both host and guest information but it's currently only
41  * expected to be used within modules if it's the KVM module or for
42  * any PV guest module.
43  *
44  * The kernel itself will write these values once in uv_query_info()
45  * and then make some of them readable via a sysfs interface.
46  */
47 struct uv_info __bootdata_preserved(uv_info);
48 EXPORT_SYMBOL(uv_info);
49 
50 int __bootdata_preserved(prot_virt_host);
51 EXPORT_SYMBOL(prot_virt_host);
52 
53 static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len)
54 {
55 	struct uv_cb_init uvcb = {
56 		.header.cmd = UVC_CMD_INIT_UV,
57 		.header.len = sizeof(uvcb),
58 		.stor_origin = stor_base,
59 		.stor_len = stor_len,
60 	};
61 
62 	if (uv_call(0, (uint64_t)&uvcb)) {
63 		pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
64 		       uvcb.header.rc, uvcb.header.rrc);
65 		return -1;
66 	}
67 	return 0;
68 }
69 
70 void __init setup_uv(void)
71 {
72 	void *uv_stor_base;
73 
74 	if (!is_prot_virt_host())
75 		return;
76 
77 	uv_stor_base = memblock_alloc_try_nid(
78 		uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
79 		MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
80 	if (!uv_stor_base) {
81 		pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
82 			uv_info.uv_base_stor_len);
83 		goto fail;
84 	}
85 
86 	if (uv_init(__pa(uv_stor_base), uv_info.uv_base_stor_len)) {
87 		memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
88 		goto fail;
89 	}
90 
91 	pr_info("Reserving %luMB as ultravisor base storage\n",
92 		uv_info.uv_base_stor_len >> 20);
93 	return;
94 fail:
95 	pr_info("Disabling support for protected virtualization");
96 	prot_virt_host = 0;
97 }
98 
99 /*
100  * Requests the Ultravisor to pin the page in the shared state. This will
101  * cause an intercept when the guest attempts to unshare the pinned page.
102  */
103 int uv_pin_shared(unsigned long paddr)
104 {
105 	struct uv_cb_cfs uvcb = {
106 		.header.cmd = UVC_CMD_PIN_PAGE_SHARED,
107 		.header.len = sizeof(uvcb),
108 		.paddr = paddr,
109 	};
110 
111 	if (uv_call(0, (u64)&uvcb))
112 		return -EINVAL;
113 	return 0;
114 }
115 EXPORT_SYMBOL_GPL(uv_pin_shared);
116 
117 /*
118  * Requests the Ultravisor to destroy a guest page and make it
119  * accessible to the host. The destroy clears the page instead of
120  * exporting.
121  *
122  * @paddr: Absolute host address of page to be destroyed
123  */
124 static int uv_destroy(unsigned long paddr)
125 {
126 	struct uv_cb_cfs uvcb = {
127 		.header.cmd = UVC_CMD_DESTR_SEC_STOR,
128 		.header.len = sizeof(uvcb),
129 		.paddr = paddr
130 	};
131 
132 	if (uv_call(0, (u64)&uvcb)) {
133 		/*
134 		 * Older firmware uses 107/d as an indication of a non secure
135 		 * page. Let us emulate the newer variant (no-op).
136 		 */
137 		if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
138 			return 0;
139 		return -EINVAL;
140 	}
141 	return 0;
142 }
143 
144 /*
145  * The caller must already hold a reference to the folio
146  */
147 int uv_destroy_folio(struct folio *folio)
148 {
149 	int rc;
150 
151 	/* See gmap_make_secure(): large folios cannot be secure */
152 	if (unlikely(folio_test_large(folio)))
153 		return 0;
154 
155 	folio_get(folio);
156 	rc = uv_destroy(folio_to_phys(folio));
157 	if (!rc)
158 		clear_bit(PG_arch_1, &folio->flags);
159 	folio_put(folio);
160 	return rc;
161 }
162 
163 /*
164  * The present PTE still indirectly holds a folio reference through the mapping.
165  */
166 int uv_destroy_pte(pte_t pte)
167 {
168 	VM_WARN_ON(!pte_present(pte));
169 	return uv_destroy_folio(pfn_folio(pte_pfn(pte)));
170 }
171 
172 /*
173  * Requests the Ultravisor to encrypt a guest page and make it
174  * accessible to the host for paging (export).
175  *
176  * @paddr: Absolute host address of page to be exported
177  */
178 static int uv_convert_from_secure(unsigned long paddr)
179 {
180 	struct uv_cb_cfs uvcb = {
181 		.header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
182 		.header.len = sizeof(uvcb),
183 		.paddr = paddr
184 	};
185 
186 	if (uv_call(0, (u64)&uvcb))
187 		return -EINVAL;
188 	return 0;
189 }
190 
191 /*
192  * The caller must already hold a reference to the folio.
193  */
194 static int uv_convert_from_secure_folio(struct folio *folio)
195 {
196 	int rc;
197 
198 	/* See gmap_make_secure(): large folios cannot be secure */
199 	if (unlikely(folio_test_large(folio)))
200 		return 0;
201 
202 	folio_get(folio);
203 	rc = uv_convert_from_secure(folio_to_phys(folio));
204 	if (!rc)
205 		clear_bit(PG_arch_1, &folio->flags);
206 	folio_put(folio);
207 	return rc;
208 }
209 
210 /*
211  * The present PTE still indirectly holds a folio reference through the mapping.
212  */
213 int uv_convert_from_secure_pte(pte_t pte)
214 {
215 	VM_WARN_ON(!pte_present(pte));
216 	return uv_convert_from_secure_folio(pfn_folio(pte_pfn(pte)));
217 }
218 
219 /*
220  * Calculate the expected ref_count for a folio that would otherwise have no
221  * further pins. This was cribbed from similar functions in other places in
222  * the kernel, but with some slight modifications. We know that a secure
223  * folio can not be a large folio, for example.
224  */
225 static int expected_folio_refs(struct folio *folio)
226 {
227 	int res;
228 
229 	res = folio_mapcount(folio);
230 	if (folio_test_swapcache(folio)) {
231 		res++;
232 	} else if (folio_mapping(folio)) {
233 		res++;
234 		if (folio->private)
235 			res++;
236 	}
237 	return res;
238 }
239 
240 static int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
241 {
242 	int expected, cc = 0;
243 
244 	if (folio_test_writeback(folio))
245 		return -EAGAIN;
246 	expected = expected_folio_refs(folio);
247 	if (!folio_ref_freeze(folio, expected))
248 		return -EBUSY;
249 	set_bit(PG_arch_1, &folio->flags);
250 	/*
251 	 * If the UVC does not succeed or fail immediately, we don't want to
252 	 * loop for long, or we might get stall notifications.
253 	 * On the other hand, this is a complex scenario and we are holding a lot of
254 	 * locks, so we can't easily sleep and reschedule. We try only once,
255 	 * and if the UVC returned busy or partial completion, we return
256 	 * -EAGAIN and we let the callers deal with it.
257 	 */
258 	cc = __uv_call(0, (u64)uvcb);
259 	folio_ref_unfreeze(folio, expected);
260 	/*
261 	 * Return -ENXIO if the folio was not mapped, -EINVAL for other errors.
262 	 * If busy or partially completed, return -EAGAIN.
263 	 */
264 	if (cc == UVC_CC_OK)
265 		return 0;
266 	else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL)
267 		return -EAGAIN;
268 	return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
269 }
270 
271 /**
272  * should_export_before_import - Determine whether an export is needed
273  * before an import-like operation
274  * @uvcb: the Ultravisor control block of the UVC to be performed
275  * @mm: the mm of the process
276  *
277  * Returns whether an export is needed before every import-like operation.
278  * This is needed for shared pages, which don't trigger a secure storage
279  * exception when accessed from a different guest.
280  *
281  * Although considered as one, the Unpin Page UVC is not an actual import,
282  * so it is not affected.
283  *
284  * No export is needed also when there is only one protected VM, because the
285  * page cannot belong to the wrong VM in that case (there is no "other VM"
286  * it can belong to).
287  *
288  * Return: true if an export is needed before every import, otherwise false.
289  */
290 static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
291 {
292 	/*
293 	 * The misc feature indicates, among other things, that importing a
294 	 * shared page from a different protected VM will automatically also
295 	 * transfer its ownership.
296 	 */
297 	if (uv_has_feature(BIT_UV_FEAT_MISC))
298 		return false;
299 	if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
300 		return false;
301 	return atomic_read(&mm->context.protected_count) > 1;
302 }
303 
304 /*
305  * Drain LRU caches: the local one on first invocation and the ones of all
306  * CPUs on successive invocations. Returns "true" on the first invocation.
307  */
308 static bool drain_lru(bool *drain_lru_called)
309 {
310 	/*
311 	 * If we have tried a local drain and the folio refcount
312 	 * still does not match our expected safe value, try with a
313 	 * system wide drain. This is needed if the pagevecs holding
314 	 * the page are on a different CPU.
315 	 */
316 	if (*drain_lru_called) {
317 		lru_add_drain_all();
318 		/* We give up here, don't retry immediately. */
319 		return false;
320 	}
321 	/*
322 	 * We are here if the folio refcount does not match the
323 	 * expected safe value. The main culprits are usually
324 	 * pagevecs. With lru_add_drain() we drain the pagevecs
325 	 * on the local CPU so that hopefully the refcount will
326 	 * reach the expected safe value.
327 	 */
328 	lru_add_drain();
329 	*drain_lru_called = true;
330 	/* The caller should try again immediately */
331 	return true;
332 }
333 
334 /*
335  * Requests the Ultravisor to make a page accessible to a guest.
336  * If it's brought in the first time, it will be cleared. If
337  * it has been exported before, it will be decrypted and integrity
338  * checked.
339  */
340 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
341 {
342 	struct vm_area_struct *vma;
343 	bool drain_lru_called = false;
344 	spinlock_t *ptelock;
345 	unsigned long uaddr;
346 	struct folio *folio;
347 	pte_t *ptep;
348 	int rc;
349 
350 again:
351 	rc = -EFAULT;
352 	mmap_read_lock(gmap->mm);
353 
354 	uaddr = __gmap_translate(gmap, gaddr);
355 	if (IS_ERR_VALUE(uaddr))
356 		goto out;
357 	vma = vma_lookup(gmap->mm, uaddr);
358 	if (!vma)
359 		goto out;
360 	/*
361 	 * Secure pages cannot be huge and userspace should not combine both.
362 	 * In case userspace does it anyway this will result in an -EFAULT for
363 	 * the unpack. The guest is thus never reaching secure mode. If
364 	 * userspace is playing dirty tricky with mapping huge pages later
365 	 * on this will result in a segmentation fault.
366 	 */
367 	if (is_vm_hugetlb_page(vma))
368 		goto out;
369 
370 	rc = -ENXIO;
371 	ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
372 	if (!ptep)
373 		goto out;
374 	if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) {
375 		folio = page_folio(pte_page(*ptep));
376 		rc = -EAGAIN;
377 		if (folio_test_large(folio)) {
378 			rc = -E2BIG;
379 		} else if (folio_trylock(folio)) {
380 			if (should_export_before_import(uvcb, gmap->mm))
381 				uv_convert_from_secure(PFN_PHYS(folio_pfn(folio)));
382 			rc = make_folio_secure(folio, uvcb);
383 			folio_unlock(folio);
384 		}
385 
386 		/*
387 		 * Once we drop the PTL, the folio may get unmapped and
388 		 * freed immediately. We need a temporary reference.
389 		 */
390 		if (rc == -EAGAIN || rc == -E2BIG)
391 			folio_get(folio);
392 	}
393 	pte_unmap_unlock(ptep, ptelock);
394 out:
395 	mmap_read_unlock(gmap->mm);
396 
397 	switch (rc) {
398 	case -E2BIG:
399 		folio_lock(folio);
400 		rc = split_folio(folio);
401 		folio_unlock(folio);
402 		folio_put(folio);
403 
404 		switch (rc) {
405 		case 0:
406 			/* Splitting succeeded, try again immediately. */
407 			goto again;
408 		case -EAGAIN:
409 			/* Additional folio references. */
410 			if (drain_lru(&drain_lru_called))
411 				goto again;
412 			return -EAGAIN;
413 		case -EBUSY:
414 			/* Unexpected race. */
415 			return -EAGAIN;
416 		}
417 		WARN_ON_ONCE(1);
418 		return -ENXIO;
419 	case -EAGAIN:
420 		/*
421 		 * If we are here because the UVC returned busy or partial
422 		 * completion, this is just a useless check, but it is safe.
423 		 */
424 		folio_wait_writeback(folio);
425 		folio_put(folio);
426 		return -EAGAIN;
427 	case -EBUSY:
428 		/* Additional folio references. */
429 		if (drain_lru(&drain_lru_called))
430 			goto again;
431 		return -EAGAIN;
432 	case -ENXIO:
433 		if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
434 			return -EFAULT;
435 		return -EAGAIN;
436 	}
437 	return rc;
438 }
439 EXPORT_SYMBOL_GPL(gmap_make_secure);
440 
441 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
442 {
443 	struct uv_cb_cts uvcb = {
444 		.header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
445 		.header.len = sizeof(uvcb),
446 		.guest_handle = gmap->guest_handle,
447 		.gaddr = gaddr,
448 	};
449 
450 	return gmap_make_secure(gmap, gaddr, &uvcb);
451 }
452 EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
453 
454 /**
455  * gmap_destroy_page - Destroy a guest page.
456  * @gmap: the gmap of the guest
457  * @gaddr: the guest address to destroy
458  *
459  * An attempt will be made to destroy the given guest page. If the attempt
460  * fails, an attempt is made to export the page. If both attempts fail, an
461  * appropriate error is returned.
462  */
463 int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
464 {
465 	struct vm_area_struct *vma;
466 	struct folio_walk fw;
467 	unsigned long uaddr;
468 	struct folio *folio;
469 	int rc;
470 
471 	rc = -EFAULT;
472 	mmap_read_lock(gmap->mm);
473 
474 	uaddr = __gmap_translate(gmap, gaddr);
475 	if (IS_ERR_VALUE(uaddr))
476 		goto out;
477 	vma = vma_lookup(gmap->mm, uaddr);
478 	if (!vma)
479 		goto out;
480 	/*
481 	 * Huge pages should not be able to become secure
482 	 */
483 	if (is_vm_hugetlb_page(vma))
484 		goto out;
485 
486 	rc = 0;
487 	folio = folio_walk_start(&fw, vma, uaddr, 0);
488 	if (!folio)
489 		goto out;
490 	/*
491 	 * See gmap_make_secure(): large folios cannot be secure. Small
492 	 * folio implies FW_LEVEL_PTE.
493 	 */
494 	if (folio_test_large(folio) || !pte_write(fw.pte))
495 		goto out_walk_end;
496 	rc = uv_destroy_folio(folio);
497 	/*
498 	 * Fault handlers can race; it is possible that two CPUs will fault
499 	 * on the same secure page. One CPU can destroy the page, reboot,
500 	 * re-enter secure mode and import it, while the second CPU was
501 	 * stuck at the beginning of the handler. At some point the second
502 	 * CPU will be able to progress, and it will not be able to destroy
503 	 * the page. In that case we do not want to terminate the process,
504 	 * we instead try to export the page.
505 	 */
506 	if (rc)
507 		rc = uv_convert_from_secure_folio(folio);
508 out_walk_end:
509 	folio_walk_end(&fw, vma);
510 out:
511 	mmap_read_unlock(gmap->mm);
512 	return rc;
513 }
514 EXPORT_SYMBOL_GPL(gmap_destroy_page);
515 
516 /*
517  * To be called with the folio locked or with an extra reference! This will
518  * prevent gmap_make_secure from touching the folio concurrently. Having 2
519  * parallel arch_make_folio_accessible is fine, as the UV calls will become a
520  * no-op if the folio is already exported.
521  */
522 int arch_make_folio_accessible(struct folio *folio)
523 {
524 	int rc = 0;
525 
526 	/* See gmap_make_secure(): large folios cannot be secure */
527 	if (unlikely(folio_test_large(folio)))
528 		return 0;
529 
530 	/*
531 	 * PG_arch_1 is used in 2 places:
532 	 * 1. for storage keys of hugetlb folios and KVM
533 	 * 2. As an indication that this small folio might be secure. This can
534 	 *    overindicate, e.g. we set the bit before calling
535 	 *    convert_to_secure.
536 	 * As secure pages are never large folios, both variants can co-exists.
537 	 */
538 	if (!test_bit(PG_arch_1, &folio->flags))
539 		return 0;
540 
541 	rc = uv_pin_shared(folio_to_phys(folio));
542 	if (!rc) {
543 		clear_bit(PG_arch_1, &folio->flags);
544 		return 0;
545 	}
546 
547 	rc = uv_convert_from_secure(folio_to_phys(folio));
548 	if (!rc) {
549 		clear_bit(PG_arch_1, &folio->flags);
550 		return 0;
551 	}
552 
553 	return rc;
554 }
555 EXPORT_SYMBOL_GPL(arch_make_folio_accessible);
556 
557 static ssize_t uv_query_facilities(struct kobject *kobj,
558 				   struct kobj_attribute *attr, char *buf)
559 {
560 	return sysfs_emit(buf, "%lx\n%lx\n%lx\n%lx\n",
561 			  uv_info.inst_calls_list[0],
562 			  uv_info.inst_calls_list[1],
563 			  uv_info.inst_calls_list[2],
564 			  uv_info.inst_calls_list[3]);
565 }
566 
567 static struct kobj_attribute uv_query_facilities_attr =
568 	__ATTR(facilities, 0444, uv_query_facilities, NULL);
569 
570 static ssize_t uv_query_supp_se_hdr_ver(struct kobject *kobj,
571 					struct kobj_attribute *attr, char *buf)
572 {
573 	return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_ver);
574 }
575 
576 static struct kobj_attribute uv_query_supp_se_hdr_ver_attr =
577 	__ATTR(supp_se_hdr_ver, 0444, uv_query_supp_se_hdr_ver, NULL);
578 
579 static ssize_t uv_query_supp_se_hdr_pcf(struct kobject *kobj,
580 					struct kobj_attribute *attr, char *buf)
581 {
582 	return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_pcf);
583 }
584 
585 static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr =
586 	__ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL);
587 
588 static ssize_t uv_query_dump_cpu_len(struct kobject *kobj,
589 				     struct kobj_attribute *attr, char *buf)
590 {
591 	return sysfs_emit(buf, "%lx\n", uv_info.guest_cpu_stor_len);
592 }
593 
594 static struct kobj_attribute uv_query_dump_cpu_len_attr =
595 	__ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL);
596 
597 static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj,
598 					       struct kobj_attribute *attr, char *buf)
599 {
600 	return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_storage_state_len);
601 }
602 
603 static struct kobj_attribute uv_query_dump_storage_state_len_attr =
604 	__ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL);
605 
606 static ssize_t uv_query_dump_finalize_len(struct kobject *kobj,
607 					  struct kobj_attribute *attr, char *buf)
608 {
609 	return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_finalize_len);
610 }
611 
612 static struct kobj_attribute uv_query_dump_finalize_len_attr =
613 	__ATTR(dump_finalize_len, 0444, uv_query_dump_finalize_len, NULL);
614 
615 static ssize_t uv_query_feature_indications(struct kobject *kobj,
616 					    struct kobj_attribute *attr, char *buf)
617 {
618 	return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
619 }
620 
621 static struct kobj_attribute uv_query_feature_indications_attr =
622 	__ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
623 
624 static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
625 				       struct kobj_attribute *attr, char *buf)
626 {
627 	return sysfs_emit(buf, "%d\n", uv_info.max_guest_cpu_id + 1);
628 }
629 
630 static struct kobj_attribute uv_query_max_guest_cpus_attr =
631 	__ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
632 
633 static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
634 				      struct kobj_attribute *attr, char *buf)
635 {
636 	return sysfs_emit(buf, "%d\n", uv_info.max_num_sec_conf);
637 }
638 
639 static struct kobj_attribute uv_query_max_guest_vms_attr =
640 	__ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
641 
642 static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
643 				       struct kobj_attribute *attr, char *buf)
644 {
645 	return sysfs_emit(buf, "%lx\n", uv_info.max_sec_stor_addr);
646 }
647 
648 static struct kobj_attribute uv_query_max_guest_addr_attr =
649 	__ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
650 
651 static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj,
652 					     struct kobj_attribute *attr, char *buf)
653 {
654 	return sysfs_emit(buf, "%lx\n", uv_info.supp_att_req_hdr_ver);
655 }
656 
657 static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr =
658 	__ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL);
659 
660 static ssize_t uv_query_supp_att_pflags(struct kobject *kobj,
661 					struct kobj_attribute *attr, char *buf)
662 {
663 	return sysfs_emit(buf, "%lx\n", uv_info.supp_att_pflags);
664 }
665 
666 static struct kobj_attribute uv_query_supp_att_pflags_attr =
667 	__ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL);
668 
669 static ssize_t uv_query_supp_add_secret_req_ver(struct kobject *kobj,
670 						struct kobj_attribute *attr, char *buf)
671 {
672 	return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_req_ver);
673 }
674 
675 static struct kobj_attribute uv_query_supp_add_secret_req_ver_attr =
676 	__ATTR(supp_add_secret_req_ver, 0444, uv_query_supp_add_secret_req_ver, NULL);
677 
678 static ssize_t uv_query_supp_add_secret_pcf(struct kobject *kobj,
679 					    struct kobj_attribute *attr, char *buf)
680 {
681 	return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_pcf);
682 }
683 
684 static struct kobj_attribute uv_query_supp_add_secret_pcf_attr =
685 	__ATTR(supp_add_secret_pcf, 0444, uv_query_supp_add_secret_pcf, NULL);
686 
687 static ssize_t uv_query_supp_secret_types(struct kobject *kobj,
688 					  struct kobj_attribute *attr, char *buf)
689 {
690 	return sysfs_emit(buf, "%lx\n", uv_info.supp_secret_types);
691 }
692 
693 static struct kobj_attribute uv_query_supp_secret_types_attr =
694 	__ATTR(supp_secret_types, 0444, uv_query_supp_secret_types, NULL);
695 
696 static ssize_t uv_query_max_secrets(struct kobject *kobj,
697 				    struct kobj_attribute *attr, char *buf)
698 {
699 	return sysfs_emit(buf, "%d\n",
700 			  uv_info.max_assoc_secrets + uv_info.max_retr_secrets);
701 }
702 
703 static struct kobj_attribute uv_query_max_secrets_attr =
704 	__ATTR(max_secrets, 0444, uv_query_max_secrets, NULL);
705 
706 static ssize_t uv_query_max_retr_secrets(struct kobject *kobj,
707 					 struct kobj_attribute *attr, char *buf)
708 {
709 	return sysfs_emit(buf, "%d\n", uv_info.max_retr_secrets);
710 }
711 
712 static struct kobj_attribute uv_query_max_retr_secrets_attr =
713 	__ATTR(max_retr_secrets, 0444, uv_query_max_retr_secrets, NULL);
714 
715 static ssize_t uv_query_max_assoc_secrets(struct kobject *kobj,
716 					  struct kobj_attribute *attr,
717 					  char *buf)
718 {
719 	return sysfs_emit(buf, "%d\n", uv_info.max_assoc_secrets);
720 }
721 
722 static struct kobj_attribute uv_query_max_assoc_secrets_attr =
723 	__ATTR(max_assoc_secrets, 0444, uv_query_max_assoc_secrets, NULL);
724 
725 static struct attribute *uv_query_attrs[] = {
726 	&uv_query_facilities_attr.attr,
727 	&uv_query_feature_indications_attr.attr,
728 	&uv_query_max_guest_cpus_attr.attr,
729 	&uv_query_max_guest_vms_attr.attr,
730 	&uv_query_max_guest_addr_attr.attr,
731 	&uv_query_supp_se_hdr_ver_attr.attr,
732 	&uv_query_supp_se_hdr_pcf_attr.attr,
733 	&uv_query_dump_storage_state_len_attr.attr,
734 	&uv_query_dump_finalize_len_attr.attr,
735 	&uv_query_dump_cpu_len_attr.attr,
736 	&uv_query_supp_att_req_hdr_ver_attr.attr,
737 	&uv_query_supp_att_pflags_attr.attr,
738 	&uv_query_supp_add_secret_req_ver_attr.attr,
739 	&uv_query_supp_add_secret_pcf_attr.attr,
740 	&uv_query_supp_secret_types_attr.attr,
741 	&uv_query_max_secrets_attr.attr,
742 	&uv_query_max_assoc_secrets_attr.attr,
743 	&uv_query_max_retr_secrets_attr.attr,
744 	NULL,
745 };
746 
747 static inline struct uv_cb_query_keys uv_query_keys(void)
748 {
749 	struct uv_cb_query_keys uvcb = {
750 		.header.cmd = UVC_CMD_QUERY_KEYS,
751 		.header.len = sizeof(uvcb)
752 	};
753 
754 	uv_call(0, (uint64_t)&uvcb);
755 	return uvcb;
756 }
757 
758 static inline ssize_t emit_hash(struct uv_key_hash *hash, char *buf, int at)
759 {
760 	return sysfs_emit_at(buf, at, "%016llx%016llx%016llx%016llx\n",
761 			    hash->dword[0], hash->dword[1], hash->dword[2], hash->dword[3]);
762 }
763 
764 static ssize_t uv_keys_host_key(struct kobject *kobj,
765 				struct kobj_attribute *attr, char *buf)
766 {
767 	struct uv_cb_query_keys uvcb = uv_query_keys();
768 
769 	return emit_hash(&uvcb.key_hashes[UVC_QUERY_KEYS_IDX_HK], buf, 0);
770 }
771 
772 static struct kobj_attribute uv_keys_host_key_attr =
773 	__ATTR(host_key, 0444, uv_keys_host_key, NULL);
774 
775 static ssize_t uv_keys_backup_host_key(struct kobject *kobj,
776 				       struct kobj_attribute *attr, char *buf)
777 {
778 	struct uv_cb_query_keys uvcb = uv_query_keys();
779 
780 	return emit_hash(&uvcb.key_hashes[UVC_QUERY_KEYS_IDX_BACK_HK], buf, 0);
781 }
782 
783 static struct kobj_attribute uv_keys_backup_host_key_attr =
784 	__ATTR(backup_host_key, 0444, uv_keys_backup_host_key, NULL);
785 
786 static ssize_t uv_keys_all(struct kobject *kobj,
787 			   struct kobj_attribute *attr, char *buf)
788 {
789 	struct uv_cb_query_keys uvcb = uv_query_keys();
790 	ssize_t len = 0;
791 	int i;
792 
793 	for (i = 0; i < ARRAY_SIZE(uvcb.key_hashes); i++)
794 		len += emit_hash(uvcb.key_hashes + i, buf, len);
795 
796 	return len;
797 }
798 
799 static struct kobj_attribute uv_keys_all_attr =
800 	__ATTR(all, 0444, uv_keys_all, NULL);
801 
802 static struct attribute_group uv_query_attr_group = {
803 	.attrs = uv_query_attrs,
804 };
805 
806 static struct attribute *uv_keys_attrs[] = {
807 	&uv_keys_host_key_attr.attr,
808 	&uv_keys_backup_host_key_attr.attr,
809 	&uv_keys_all_attr.attr,
810 	NULL,
811 };
812 
813 static struct attribute_group uv_keys_attr_group = {
814 	.attrs = uv_keys_attrs,
815 };
816 
817 static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
818 				     struct kobj_attribute *attr, char *buf)
819 {
820 	return sysfs_emit(buf, "%d\n", prot_virt_guest);
821 }
822 
823 static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
824 				    struct kobj_attribute *attr, char *buf)
825 {
826 	return sysfs_emit(buf, "%d\n", prot_virt_host);
827 }
828 
829 static struct kobj_attribute uv_prot_virt_guest =
830 	__ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL);
831 
832 static struct kobj_attribute uv_prot_virt_host =
833 	__ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL);
834 
835 static const struct attribute *uv_prot_virt_attrs[] = {
836 	&uv_prot_virt_guest.attr,
837 	&uv_prot_virt_host.attr,
838 	NULL,
839 };
840 
841 static struct kset *uv_query_kset;
842 static struct kset *uv_keys_kset;
843 static struct kobject *uv_kobj;
844 
845 static int __init uv_sysfs_dir_init(const struct attribute_group *grp,
846 				    struct kset **uv_dir_kset, const char *name)
847 {
848 	struct kset *kset;
849 	int rc;
850 
851 	kset = kset_create_and_add(name, NULL, uv_kobj);
852 	if (!kset)
853 		return -ENOMEM;
854 	*uv_dir_kset = kset;
855 
856 	rc = sysfs_create_group(&kset->kobj, grp);
857 	if (rc)
858 		kset_unregister(kset);
859 	return rc;
860 }
861 
862 static int __init uv_sysfs_init(void)
863 {
864 	int rc = -ENOMEM;
865 
866 	if (!test_facility(158))
867 		return 0;
868 
869 	uv_kobj = kobject_create_and_add("uv", firmware_kobj);
870 	if (!uv_kobj)
871 		return -ENOMEM;
872 
873 	rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs);
874 	if (rc)
875 		goto out_kobj;
876 
877 	rc = uv_sysfs_dir_init(&uv_query_attr_group, &uv_query_kset, "query");
878 	if (rc)
879 		goto out_ind_files;
880 
881 	/* Get installed key hashes if available, ignore any errors */
882 	if (test_bit_inv(BIT_UVC_CMD_QUERY_KEYS, uv_info.inst_calls_list))
883 		uv_sysfs_dir_init(&uv_keys_attr_group, &uv_keys_kset, "keys");
884 
885 	return 0;
886 
887 out_ind_files:
888 	sysfs_remove_files(uv_kobj, uv_prot_virt_attrs);
889 out_kobj:
890 	kobject_del(uv_kobj);
891 	kobject_put(uv_kobj);
892 	return rc;
893 }
894 device_initcall(uv_sysfs_init);
895 
896 /*
897  * Find the secret with the secret_id in the provided list.
898  *
899  * Context: might sleep.
900  */
901 static int find_secret_in_page(const u8 secret_id[UV_SECRET_ID_LEN],
902 			       const struct uv_secret_list *list,
903 			       struct uv_secret_list_item_hdr *secret)
904 {
905 	u16 i;
906 
907 	for (i = 0; i < list->total_num_secrets; i++) {
908 		if (memcmp(secret_id, list->secrets[i].id, UV_SECRET_ID_LEN) == 0) {
909 			*secret = list->secrets[i].hdr;
910 			return 0;
911 		}
912 	}
913 	return -ENOENT;
914 }
915 
916 /*
917  * Do the actual search for `uv_get_secret_metadata`.
918  *
919  * Context: might sleep.
920  */
921 static int find_secret(const u8 secret_id[UV_SECRET_ID_LEN],
922 		       struct uv_secret_list *list,
923 		       struct uv_secret_list_item_hdr *secret)
924 {
925 	u16 start_idx = 0;
926 	u16 list_rc;
927 	int ret;
928 
929 	do {
930 		uv_list_secrets(list, start_idx, &list_rc, NULL);
931 		if (list_rc != UVC_RC_EXECUTED && list_rc != UVC_RC_MORE_DATA) {
932 			if (list_rc == UVC_RC_INV_CMD)
933 				return -ENODEV;
934 			else
935 				return -EIO;
936 		}
937 		ret = find_secret_in_page(secret_id, list, secret);
938 		if (ret == 0)
939 			return ret;
940 		start_idx = list->next_secret_idx;
941 	} while (list_rc == UVC_RC_MORE_DATA && start_idx < list->next_secret_idx);
942 
943 	return -ENOENT;
944 }
945 
946 /**
947  * uv_get_secret_metadata() - get secret metadata for a given secret id.
948  * @secret_id: search pattern.
949  * @secret: output data, containing the secret's metadata.
950  *
951  * Search for a secret with the given secret_id in the Ultravisor secret store.
952  *
953  * Context: might sleep.
954  *
955  * Return:
956  * * %0:	- Found entry; secret->idx and secret->type are valid.
957  * * %ENOENT	- No entry found.
958  * * %ENODEV:	- Not supported: UV not available or command not available.
959  * * %EIO:	- Other unexpected UV error.
960  */
961 int uv_get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN],
962 			   struct uv_secret_list_item_hdr *secret)
963 {
964 	struct uv_secret_list *buf;
965 	int rc;
966 
967 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
968 	if (!buf)
969 		return -ENOMEM;
970 	rc = find_secret(secret_id, buf, secret);
971 	kfree(buf);
972 	return rc;
973 }
974 EXPORT_SYMBOL_GPL(uv_get_secret_metadata);
975 
976 /**
977  * uv_retrieve_secret() - get the secret value for the secret index.
978  * @secret_idx: Secret index for which the secret should be retrieved.
979  * @buf: Buffer to store retrieved secret.
980  * @buf_size: Size of the buffer. The correct buffer size is reported as part of
981  * the result from `uv_get_secret_metadata`.
982  *
983  * Calls the Retrieve Secret UVC and translates the UV return code into an errno.
984  *
985  * Context: might sleep.
986  *
987  * Return:
988  * * %0		- Entry found; buffer contains a valid secret.
989  * * %ENOENT:	- No entry found or secret at the index is non-retrievable.
990  * * %ENODEV:	- Not supported: UV not available or command not available.
991  * * %EINVAL:	- Buffer too small for content.
992  * * %EIO:	- Other unexpected UV error.
993  */
994 int uv_retrieve_secret(u16 secret_idx, u8 *buf, size_t buf_size)
995 {
996 	struct uv_cb_retr_secr uvcb = {
997 		.header.len = sizeof(uvcb),
998 		.header.cmd = UVC_CMD_RETR_SECRET,
999 		.secret_idx = secret_idx,
1000 		.buf_addr = (u64)buf,
1001 		.buf_size = buf_size,
1002 	};
1003 
1004 	uv_call_sched(0, (u64)&uvcb);
1005 
1006 	switch (uvcb.header.rc) {
1007 	case UVC_RC_EXECUTED:
1008 		return 0;
1009 	case UVC_RC_INV_CMD:
1010 		return -ENODEV;
1011 	case UVC_RC_RETR_SECR_STORE_EMPTY:
1012 	case UVC_RC_RETR_SECR_INV_SECRET:
1013 	case UVC_RC_RETR_SECR_INV_IDX:
1014 		return -ENOENT;
1015 	case UVC_RC_RETR_SECR_BUF_SMALL:
1016 		return -EINVAL;
1017 	default:
1018 		return -EIO;
1019 	}
1020 }
1021 EXPORT_SYMBOL_GPL(uv_retrieve_secret);
1022