xref: /linux/arch/s390/kernel/uv.c (revision 123760841a2e5977d4e97f86999b3784df58801d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common Ultravisor functions and initialization
4  *
5  * Copyright IBM Corp. 2019, 2020
6  */
7 #define KMSG_COMPONENT "prot_virt"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/sizes.h>
13 #include <linux/bitmap.h>
14 #include <linux/memblock.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <asm/facility.h>
18 #include <asm/sections.h>
19 #include <asm/uv.h>
20 
21 /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
22 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
23 int __bootdata_preserved(prot_virt_guest);
24 EXPORT_SYMBOL(prot_virt_guest);
25 #endif
26 
27 /*
28  * uv_info contains both host and guest information but it's currently only
29  * expected to be used within modules if it's the KVM module or for
30  * any PV guest module.
31  *
32  * The kernel itself will write these values once in uv_query_info()
33  * and then make some of them readable via a sysfs interface.
34  */
35 struct uv_info __bootdata_preserved(uv_info);
36 EXPORT_SYMBOL(uv_info);
37 
38 #if IS_ENABLED(CONFIG_KVM)
39 int __bootdata_preserved(prot_virt_host);
40 EXPORT_SYMBOL(prot_virt_host);
41 
42 static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len)
43 {
44 	struct uv_cb_init uvcb = {
45 		.header.cmd = UVC_CMD_INIT_UV,
46 		.header.len = sizeof(uvcb),
47 		.stor_origin = stor_base,
48 		.stor_len = stor_len,
49 	};
50 
51 	if (uv_call(0, (uint64_t)&uvcb)) {
52 		pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
53 		       uvcb.header.rc, uvcb.header.rrc);
54 		return -1;
55 	}
56 	return 0;
57 }
58 
59 void __init setup_uv(void)
60 {
61 	void *uv_stor_base;
62 
63 	if (!is_prot_virt_host())
64 		return;
65 
66 	uv_stor_base = memblock_alloc_try_nid(
67 		uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
68 		MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
69 	if (!uv_stor_base) {
70 		pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
71 			uv_info.uv_base_stor_len);
72 		goto fail;
73 	}
74 
75 	if (uv_init(__pa(uv_stor_base), uv_info.uv_base_stor_len)) {
76 		memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
77 		goto fail;
78 	}
79 
80 	pr_info("Reserving %luMB as ultravisor base storage\n",
81 		uv_info.uv_base_stor_len >> 20);
82 	return;
83 fail:
84 	pr_info("Disabling support for protected virtualization");
85 	prot_virt_host = 0;
86 }
87 
88 /*
89  * Requests the Ultravisor to pin the page in the shared state. This will
90  * cause an intercept when the guest attempts to unshare the pinned page.
91  */
92 int uv_pin_shared(unsigned long paddr)
93 {
94 	struct uv_cb_cfs uvcb = {
95 		.header.cmd = UVC_CMD_PIN_PAGE_SHARED,
96 		.header.len = sizeof(uvcb),
97 		.paddr = paddr,
98 	};
99 
100 	if (uv_call(0, (u64)&uvcb))
101 		return -EINVAL;
102 	return 0;
103 }
104 EXPORT_SYMBOL_GPL(uv_pin_shared);
105 
106 /*
107  * Requests the Ultravisor to destroy a guest page and make it
108  * accessible to the host. The destroy clears the page instead of
109  * exporting.
110  *
111  * @paddr: Absolute host address of page to be destroyed
112  */
113 static int uv_destroy_page(unsigned long paddr)
114 {
115 	struct uv_cb_cfs uvcb = {
116 		.header.cmd = UVC_CMD_DESTR_SEC_STOR,
117 		.header.len = sizeof(uvcb),
118 		.paddr = paddr
119 	};
120 
121 	if (uv_call(0, (u64)&uvcb)) {
122 		/*
123 		 * Older firmware uses 107/d as an indication of a non secure
124 		 * page. Let us emulate the newer variant (no-op).
125 		 */
126 		if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
127 			return 0;
128 		return -EINVAL;
129 	}
130 	return 0;
131 }
132 
133 /*
134  * The caller must already hold a reference to the page
135  */
136 int uv_destroy_owned_page(unsigned long paddr)
137 {
138 	struct page *page = phys_to_page(paddr);
139 	int rc;
140 
141 	get_page(page);
142 	rc = uv_destroy_page(paddr);
143 	if (!rc)
144 		clear_bit(PG_arch_1, &page->flags);
145 	put_page(page);
146 	return rc;
147 }
148 
149 /*
150  * Requests the Ultravisor to encrypt a guest page and make it
151  * accessible to the host for paging (export).
152  *
153  * @paddr: Absolute host address of page to be exported
154  */
155 int uv_convert_from_secure(unsigned long paddr)
156 {
157 	struct uv_cb_cfs uvcb = {
158 		.header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
159 		.header.len = sizeof(uvcb),
160 		.paddr = paddr
161 	};
162 
163 	if (uv_call(0, (u64)&uvcb))
164 		return -EINVAL;
165 	return 0;
166 }
167 
168 /*
169  * The caller must already hold a reference to the page
170  */
171 int uv_convert_owned_from_secure(unsigned long paddr)
172 {
173 	struct page *page = phys_to_page(paddr);
174 	int rc;
175 
176 	get_page(page);
177 	rc = uv_convert_from_secure(paddr);
178 	if (!rc)
179 		clear_bit(PG_arch_1, &page->flags);
180 	put_page(page);
181 	return rc;
182 }
183 
184 /*
185  * Calculate the expected ref_count for a page that would otherwise have no
186  * further pins. This was cribbed from similar functions in other places in
187  * the kernel, but with some slight modifications. We know that a secure
188  * page can not be a huge page for example.
189  */
190 static int expected_page_refs(struct page *page)
191 {
192 	int res;
193 
194 	res = page_mapcount(page);
195 	if (PageSwapCache(page)) {
196 		res++;
197 	} else if (page_mapping(page)) {
198 		res++;
199 		if (page_has_private(page))
200 			res++;
201 	}
202 	return res;
203 }
204 
205 static int make_page_secure(struct page *page, struct uv_cb_header *uvcb)
206 {
207 	int expected, cc = 0;
208 
209 	if (PageWriteback(page))
210 		return -EAGAIN;
211 	expected = expected_page_refs(page);
212 	if (!page_ref_freeze(page, expected))
213 		return -EBUSY;
214 	set_bit(PG_arch_1, &page->flags);
215 	/*
216 	 * If the UVC does not succeed or fail immediately, we don't want to
217 	 * loop for long, or we might get stall notifications.
218 	 * On the other hand, this is a complex scenario and we are holding a lot of
219 	 * locks, so we can't easily sleep and reschedule. We try only once,
220 	 * and if the UVC returned busy or partial completion, we return
221 	 * -EAGAIN and we let the callers deal with it.
222 	 */
223 	cc = __uv_call(0, (u64)uvcb);
224 	page_ref_unfreeze(page, expected);
225 	/*
226 	 * Return -ENXIO if the page was not mapped, -EINVAL for other errors.
227 	 * If busy or partially completed, return -EAGAIN.
228 	 */
229 	if (cc == UVC_CC_OK)
230 		return 0;
231 	else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL)
232 		return -EAGAIN;
233 	return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
234 }
235 
236 /**
237  * should_export_before_import - Determine whether an export is needed
238  * before an import-like operation
239  * @uvcb: the Ultravisor control block of the UVC to be performed
240  * @mm: the mm of the process
241  *
242  * Returns whether an export is needed before every import-like operation.
243  * This is needed for shared pages, which don't trigger a secure storage
244  * exception when accessed from a different guest.
245  *
246  * Although considered as one, the Unpin Page UVC is not an actual import,
247  * so it is not affected.
248  *
249  * No export is needed also when there is only one protected VM, because the
250  * page cannot belong to the wrong VM in that case (there is no "other VM"
251  * it can belong to).
252  *
253  * Return: true if an export is needed before every import, otherwise false.
254  */
255 static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
256 {
257 	/*
258 	 * The misc feature indicates, among other things, that importing a
259 	 * shared page from a different protected VM will automatically also
260 	 * transfer its ownership.
261 	 */
262 	if (uv_has_feature(BIT_UV_FEAT_MISC))
263 		return false;
264 	if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
265 		return false;
266 	return atomic_read(&mm->context.protected_count) > 1;
267 }
268 
269 /*
270  * Requests the Ultravisor to make a page accessible to a guest.
271  * If it's brought in the first time, it will be cleared. If
272  * it has been exported before, it will be decrypted and integrity
273  * checked.
274  */
275 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
276 {
277 	struct vm_area_struct *vma;
278 	bool local_drain = false;
279 	spinlock_t *ptelock;
280 	unsigned long uaddr;
281 	struct page *page;
282 	pte_t *ptep;
283 	int rc;
284 
285 again:
286 	rc = -EFAULT;
287 	mmap_read_lock(gmap->mm);
288 
289 	uaddr = __gmap_translate(gmap, gaddr);
290 	if (IS_ERR_VALUE(uaddr))
291 		goto out;
292 	vma = vma_lookup(gmap->mm, uaddr);
293 	if (!vma)
294 		goto out;
295 	/*
296 	 * Secure pages cannot be huge and userspace should not combine both.
297 	 * In case userspace does it anyway this will result in an -EFAULT for
298 	 * the unpack. The guest is thus never reaching secure mode. If
299 	 * userspace is playing dirty tricky with mapping huge pages later
300 	 * on this will result in a segmentation fault.
301 	 */
302 	if (is_vm_hugetlb_page(vma))
303 		goto out;
304 
305 	rc = -ENXIO;
306 	ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
307 	if (!ptep)
308 		goto out;
309 	if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) {
310 		page = pte_page(*ptep);
311 		rc = -EAGAIN;
312 		if (trylock_page(page)) {
313 			if (should_export_before_import(uvcb, gmap->mm))
314 				uv_convert_from_secure(page_to_phys(page));
315 			rc = make_page_secure(page, uvcb);
316 			unlock_page(page);
317 		}
318 	}
319 	pte_unmap_unlock(ptep, ptelock);
320 out:
321 	mmap_read_unlock(gmap->mm);
322 
323 	if (rc == -EAGAIN) {
324 		/*
325 		 * If we are here because the UVC returned busy or partial
326 		 * completion, this is just a useless check, but it is safe.
327 		 */
328 		wait_on_page_writeback(page);
329 	} else if (rc == -EBUSY) {
330 		/*
331 		 * If we have tried a local drain and the page refcount
332 		 * still does not match our expected safe value, try with a
333 		 * system wide drain. This is needed if the pagevecs holding
334 		 * the page are on a different CPU.
335 		 */
336 		if (local_drain) {
337 			lru_add_drain_all();
338 			/* We give up here, and let the caller try again */
339 			return -EAGAIN;
340 		}
341 		/*
342 		 * We are here if the page refcount does not match the
343 		 * expected safe value. The main culprits are usually
344 		 * pagevecs. With lru_add_drain() we drain the pagevecs
345 		 * on the local CPU so that hopefully the refcount will
346 		 * reach the expected safe value.
347 		 */
348 		lru_add_drain();
349 		local_drain = true;
350 		/* And now we try again immediately after draining */
351 		goto again;
352 	} else if (rc == -ENXIO) {
353 		if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
354 			return -EFAULT;
355 		return -EAGAIN;
356 	}
357 	return rc;
358 }
359 EXPORT_SYMBOL_GPL(gmap_make_secure);
360 
361 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
362 {
363 	struct uv_cb_cts uvcb = {
364 		.header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
365 		.header.len = sizeof(uvcb),
366 		.guest_handle = gmap->guest_handle,
367 		.gaddr = gaddr,
368 	};
369 
370 	return gmap_make_secure(gmap, gaddr, &uvcb);
371 }
372 EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
373 
374 /**
375  * gmap_destroy_page - Destroy a guest page.
376  * @gmap: the gmap of the guest
377  * @gaddr: the guest address to destroy
378  *
379  * An attempt will be made to destroy the given guest page. If the attempt
380  * fails, an attempt is made to export the page. If both attempts fail, an
381  * appropriate error is returned.
382  */
383 int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
384 {
385 	struct vm_area_struct *vma;
386 	unsigned long uaddr;
387 	struct page *page;
388 	int rc;
389 
390 	rc = -EFAULT;
391 	mmap_read_lock(gmap->mm);
392 
393 	uaddr = __gmap_translate(gmap, gaddr);
394 	if (IS_ERR_VALUE(uaddr))
395 		goto out;
396 	vma = vma_lookup(gmap->mm, uaddr);
397 	if (!vma)
398 		goto out;
399 	/*
400 	 * Huge pages should not be able to become secure
401 	 */
402 	if (is_vm_hugetlb_page(vma))
403 		goto out;
404 
405 	rc = 0;
406 	/* we take an extra reference here */
407 	page = follow_page(vma, uaddr, FOLL_WRITE | FOLL_GET);
408 	if (IS_ERR_OR_NULL(page))
409 		goto out;
410 	rc = uv_destroy_owned_page(page_to_phys(page));
411 	/*
412 	 * Fault handlers can race; it is possible that two CPUs will fault
413 	 * on the same secure page. One CPU can destroy the page, reboot,
414 	 * re-enter secure mode and import it, while the second CPU was
415 	 * stuck at the beginning of the handler. At some point the second
416 	 * CPU will be able to progress, and it will not be able to destroy
417 	 * the page. In that case we do not want to terminate the process,
418 	 * we instead try to export the page.
419 	 */
420 	if (rc)
421 		rc = uv_convert_owned_from_secure(page_to_phys(page));
422 	put_page(page);
423 out:
424 	mmap_read_unlock(gmap->mm);
425 	return rc;
426 }
427 EXPORT_SYMBOL_GPL(gmap_destroy_page);
428 
429 /*
430  * To be called with the page locked or with an extra reference! This will
431  * prevent gmap_make_secure from touching the page concurrently. Having 2
432  * parallel make_page_accessible is fine, as the UV calls will become a
433  * no-op if the page is already exported.
434  */
435 int arch_make_page_accessible(struct page *page)
436 {
437 	int rc = 0;
438 
439 	/* Hugepage cannot be protected, so nothing to do */
440 	if (PageHuge(page))
441 		return 0;
442 
443 	/*
444 	 * PG_arch_1 is used in 3 places:
445 	 * 1. for kernel page tables during early boot
446 	 * 2. for storage keys of huge pages and KVM
447 	 * 3. As an indication that this page might be secure. This can
448 	 *    overindicate, e.g. we set the bit before calling
449 	 *    convert_to_secure.
450 	 * As secure pages are never huge, all 3 variants can co-exists.
451 	 */
452 	if (!test_bit(PG_arch_1, &page->flags))
453 		return 0;
454 
455 	rc = uv_pin_shared(page_to_phys(page));
456 	if (!rc) {
457 		clear_bit(PG_arch_1, &page->flags);
458 		return 0;
459 	}
460 
461 	rc = uv_convert_from_secure(page_to_phys(page));
462 	if (!rc) {
463 		clear_bit(PG_arch_1, &page->flags);
464 		return 0;
465 	}
466 
467 	return rc;
468 }
469 EXPORT_SYMBOL_GPL(arch_make_page_accessible);
470 
471 #endif
472 
473 #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
474 static ssize_t uv_query_facilities(struct kobject *kobj,
475 				   struct kobj_attribute *attr, char *buf)
476 {
477 	return sysfs_emit(buf, "%lx\n%lx\n%lx\n%lx\n",
478 			  uv_info.inst_calls_list[0],
479 			  uv_info.inst_calls_list[1],
480 			  uv_info.inst_calls_list[2],
481 			  uv_info.inst_calls_list[3]);
482 }
483 
484 static struct kobj_attribute uv_query_facilities_attr =
485 	__ATTR(facilities, 0444, uv_query_facilities, NULL);
486 
487 static ssize_t uv_query_supp_se_hdr_ver(struct kobject *kobj,
488 					struct kobj_attribute *attr, char *buf)
489 {
490 	return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_ver);
491 }
492 
493 static struct kobj_attribute uv_query_supp_se_hdr_ver_attr =
494 	__ATTR(supp_se_hdr_ver, 0444, uv_query_supp_se_hdr_ver, NULL);
495 
496 static ssize_t uv_query_supp_se_hdr_pcf(struct kobject *kobj,
497 					struct kobj_attribute *attr, char *buf)
498 {
499 	return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_pcf);
500 }
501 
502 static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr =
503 	__ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL);
504 
505 static ssize_t uv_query_dump_cpu_len(struct kobject *kobj,
506 				     struct kobj_attribute *attr, char *buf)
507 {
508 	return sysfs_emit(buf, "%lx\n", uv_info.guest_cpu_stor_len);
509 }
510 
511 static struct kobj_attribute uv_query_dump_cpu_len_attr =
512 	__ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL);
513 
514 static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj,
515 					       struct kobj_attribute *attr, char *buf)
516 {
517 	return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_storage_state_len);
518 }
519 
520 static struct kobj_attribute uv_query_dump_storage_state_len_attr =
521 	__ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL);
522 
523 static ssize_t uv_query_dump_finalize_len(struct kobject *kobj,
524 					  struct kobj_attribute *attr, char *buf)
525 {
526 	return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_finalize_len);
527 }
528 
529 static struct kobj_attribute uv_query_dump_finalize_len_attr =
530 	__ATTR(dump_finalize_len, 0444, uv_query_dump_finalize_len, NULL);
531 
532 static ssize_t uv_query_feature_indications(struct kobject *kobj,
533 					    struct kobj_attribute *attr, char *buf)
534 {
535 	return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
536 }
537 
538 static struct kobj_attribute uv_query_feature_indications_attr =
539 	__ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
540 
541 static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
542 				       struct kobj_attribute *attr, char *buf)
543 {
544 	return sysfs_emit(buf, "%d\n", uv_info.max_guest_cpu_id + 1);
545 }
546 
547 static struct kobj_attribute uv_query_max_guest_cpus_attr =
548 	__ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
549 
550 static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
551 				      struct kobj_attribute *attr, char *buf)
552 {
553 	return sysfs_emit(buf, "%d\n", uv_info.max_num_sec_conf);
554 }
555 
556 static struct kobj_attribute uv_query_max_guest_vms_attr =
557 	__ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
558 
559 static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
560 				       struct kobj_attribute *attr, char *buf)
561 {
562 	return sysfs_emit(buf, "%lx\n", uv_info.max_sec_stor_addr);
563 }
564 
565 static struct kobj_attribute uv_query_max_guest_addr_attr =
566 	__ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
567 
568 static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj,
569 					     struct kobj_attribute *attr, char *buf)
570 {
571 	return sysfs_emit(buf, "%lx\n", uv_info.supp_att_req_hdr_ver);
572 }
573 
574 static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr =
575 	__ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL);
576 
577 static ssize_t uv_query_supp_att_pflags(struct kobject *kobj,
578 					struct kobj_attribute *attr, char *buf)
579 {
580 	return sysfs_emit(buf, "%lx\n", uv_info.supp_att_pflags);
581 }
582 
583 static struct kobj_attribute uv_query_supp_att_pflags_attr =
584 	__ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL);
585 
586 static ssize_t uv_query_supp_add_secret_req_ver(struct kobject *kobj,
587 						struct kobj_attribute *attr, char *buf)
588 {
589 	return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_req_ver);
590 }
591 
592 static struct kobj_attribute uv_query_supp_add_secret_req_ver_attr =
593 	__ATTR(supp_add_secret_req_ver, 0444, uv_query_supp_add_secret_req_ver, NULL);
594 
595 static ssize_t uv_query_supp_add_secret_pcf(struct kobject *kobj,
596 					    struct kobj_attribute *attr, char *buf)
597 {
598 	return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_pcf);
599 }
600 
601 static struct kobj_attribute uv_query_supp_add_secret_pcf_attr =
602 	__ATTR(supp_add_secret_pcf, 0444, uv_query_supp_add_secret_pcf, NULL);
603 
604 static ssize_t uv_query_supp_secret_types(struct kobject *kobj,
605 					  struct kobj_attribute *attr, char *buf)
606 {
607 	return sysfs_emit(buf, "%lx\n", uv_info.supp_secret_types);
608 }
609 
610 static struct kobj_attribute uv_query_supp_secret_types_attr =
611 	__ATTR(supp_secret_types, 0444, uv_query_supp_secret_types, NULL);
612 
613 static ssize_t uv_query_max_secrets(struct kobject *kobj,
614 				    struct kobj_attribute *attr, char *buf)
615 {
616 	return sysfs_emit(buf, "%d\n", uv_info.max_secrets);
617 }
618 
619 static struct kobj_attribute uv_query_max_secrets_attr =
620 	__ATTR(max_secrets, 0444, uv_query_max_secrets, NULL);
621 
622 static struct attribute *uv_query_attrs[] = {
623 	&uv_query_facilities_attr.attr,
624 	&uv_query_feature_indications_attr.attr,
625 	&uv_query_max_guest_cpus_attr.attr,
626 	&uv_query_max_guest_vms_attr.attr,
627 	&uv_query_max_guest_addr_attr.attr,
628 	&uv_query_supp_se_hdr_ver_attr.attr,
629 	&uv_query_supp_se_hdr_pcf_attr.attr,
630 	&uv_query_dump_storage_state_len_attr.attr,
631 	&uv_query_dump_finalize_len_attr.attr,
632 	&uv_query_dump_cpu_len_attr.attr,
633 	&uv_query_supp_att_req_hdr_ver_attr.attr,
634 	&uv_query_supp_att_pflags_attr.attr,
635 	&uv_query_supp_add_secret_req_ver_attr.attr,
636 	&uv_query_supp_add_secret_pcf_attr.attr,
637 	&uv_query_supp_secret_types_attr.attr,
638 	&uv_query_max_secrets_attr.attr,
639 	NULL,
640 };
641 
642 static struct attribute_group uv_query_attr_group = {
643 	.attrs = uv_query_attrs,
644 };
645 
646 static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
647 				     struct kobj_attribute *attr, char *buf)
648 {
649 	int val = 0;
650 
651 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
652 	val = prot_virt_guest;
653 #endif
654 	return sysfs_emit(buf, "%d\n", val);
655 }
656 
657 static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
658 				    struct kobj_attribute *attr, char *buf)
659 {
660 	int val = 0;
661 
662 #if IS_ENABLED(CONFIG_KVM)
663 	val = prot_virt_host;
664 #endif
665 
666 	return sysfs_emit(buf, "%d\n", val);
667 }
668 
669 static struct kobj_attribute uv_prot_virt_guest =
670 	__ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL);
671 
672 static struct kobj_attribute uv_prot_virt_host =
673 	__ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL);
674 
675 static const struct attribute *uv_prot_virt_attrs[] = {
676 	&uv_prot_virt_guest.attr,
677 	&uv_prot_virt_host.attr,
678 	NULL,
679 };
680 
681 static struct kset *uv_query_kset;
682 static struct kobject *uv_kobj;
683 
684 static int __init uv_info_init(void)
685 {
686 	int rc = -ENOMEM;
687 
688 	if (!test_facility(158))
689 		return 0;
690 
691 	uv_kobj = kobject_create_and_add("uv", firmware_kobj);
692 	if (!uv_kobj)
693 		return -ENOMEM;
694 
695 	rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs);
696 	if (rc)
697 		goto out_kobj;
698 
699 	uv_query_kset = kset_create_and_add("query", NULL, uv_kobj);
700 	if (!uv_query_kset) {
701 		rc = -ENOMEM;
702 		goto out_ind_files;
703 	}
704 
705 	rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group);
706 	if (!rc)
707 		return 0;
708 
709 	kset_unregister(uv_query_kset);
710 out_ind_files:
711 	sysfs_remove_files(uv_kobj, uv_prot_virt_attrs);
712 out_kobj:
713 	kobject_del(uv_kobj);
714 	kobject_put(uv_kobj);
715 	return rc;
716 }
717 device_initcall(uv_info_init);
718 #endif
719