xref: /linux/arch/s390/kernel/uv.c (revision c2a96b7f187fb6a455836d4a6e113947ff11de97)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common Ultravisor functions and initialization
4  *
5  * Copyright IBM Corp. 2019, 2020
6  */
7 #define KMSG_COMPONENT "prot_virt"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/sizes.h>
13 #include <linux/bitmap.h>
14 #include <linux/memblock.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <asm/facility.h>
18 #include <asm/sections.h>
19 #include <asm/uv.h>
20 
21 /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
22 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
23 int __bootdata_preserved(prot_virt_guest);
24 EXPORT_SYMBOL(prot_virt_guest);
25 #endif
26 
27 /*
28  * uv_info contains both host and guest information but it's currently only
29  * expected to be used within modules if it's the KVM module or for
30  * any PV guest module.
31  *
32  * The kernel itself will write these values once in uv_query_info()
33  * and then make some of them readable via a sysfs interface.
34  */
35 struct uv_info __bootdata_preserved(uv_info);
36 EXPORT_SYMBOL(uv_info);
37 
38 #if IS_ENABLED(CONFIG_KVM)
39 int __bootdata_preserved(prot_virt_host);
40 EXPORT_SYMBOL(prot_virt_host);
41 
42 static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len)
43 {
44 	struct uv_cb_init uvcb = {
45 		.header.cmd = UVC_CMD_INIT_UV,
46 		.header.len = sizeof(uvcb),
47 		.stor_origin = stor_base,
48 		.stor_len = stor_len,
49 	};
50 
51 	if (uv_call(0, (uint64_t)&uvcb)) {
52 		pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
53 		       uvcb.header.rc, uvcb.header.rrc);
54 		return -1;
55 	}
56 	return 0;
57 }
58 
59 void __init setup_uv(void)
60 {
61 	void *uv_stor_base;
62 
63 	if (!is_prot_virt_host())
64 		return;
65 
66 	uv_stor_base = memblock_alloc_try_nid(
67 		uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
68 		MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
69 	if (!uv_stor_base) {
70 		pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
71 			uv_info.uv_base_stor_len);
72 		goto fail;
73 	}
74 
75 	if (uv_init(__pa(uv_stor_base), uv_info.uv_base_stor_len)) {
76 		memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
77 		goto fail;
78 	}
79 
80 	pr_info("Reserving %luMB as ultravisor base storage\n",
81 		uv_info.uv_base_stor_len >> 20);
82 	return;
83 fail:
84 	pr_info("Disabling support for protected virtualization");
85 	prot_virt_host = 0;
86 }
87 
88 /*
89  * Requests the Ultravisor to pin the page in the shared state. This will
90  * cause an intercept when the guest attempts to unshare the pinned page.
91  */
92 int uv_pin_shared(unsigned long paddr)
93 {
94 	struct uv_cb_cfs uvcb = {
95 		.header.cmd = UVC_CMD_PIN_PAGE_SHARED,
96 		.header.len = sizeof(uvcb),
97 		.paddr = paddr,
98 	};
99 
100 	if (uv_call(0, (u64)&uvcb))
101 		return -EINVAL;
102 	return 0;
103 }
104 EXPORT_SYMBOL_GPL(uv_pin_shared);
105 
106 /*
107  * Requests the Ultravisor to destroy a guest page and make it
108  * accessible to the host. The destroy clears the page instead of
109  * exporting.
110  *
111  * @paddr: Absolute host address of page to be destroyed
112  */
113 static int uv_destroy(unsigned long paddr)
114 {
115 	struct uv_cb_cfs uvcb = {
116 		.header.cmd = UVC_CMD_DESTR_SEC_STOR,
117 		.header.len = sizeof(uvcb),
118 		.paddr = paddr
119 	};
120 
121 	if (uv_call(0, (u64)&uvcb)) {
122 		/*
123 		 * Older firmware uses 107/d as an indication of a non secure
124 		 * page. Let us emulate the newer variant (no-op).
125 		 */
126 		if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
127 			return 0;
128 		return -EINVAL;
129 	}
130 	return 0;
131 }
132 
133 /*
134  * The caller must already hold a reference to the folio
135  */
136 int uv_destroy_folio(struct folio *folio)
137 {
138 	int rc;
139 
140 	/* See gmap_make_secure(): large folios cannot be secure */
141 	if (unlikely(folio_test_large(folio)))
142 		return 0;
143 
144 	folio_get(folio);
145 	rc = uv_destroy(folio_to_phys(folio));
146 	if (!rc)
147 		clear_bit(PG_arch_1, &folio->flags);
148 	folio_put(folio);
149 	return rc;
150 }
151 
152 /*
153  * The present PTE still indirectly holds a folio reference through the mapping.
154  */
155 int uv_destroy_pte(pte_t pte)
156 {
157 	VM_WARN_ON(!pte_present(pte));
158 	return uv_destroy_folio(pfn_folio(pte_pfn(pte)));
159 }
160 
161 /*
162  * Requests the Ultravisor to encrypt a guest page and make it
163  * accessible to the host for paging (export).
164  *
165  * @paddr: Absolute host address of page to be exported
166  */
167 static int uv_convert_from_secure(unsigned long paddr)
168 {
169 	struct uv_cb_cfs uvcb = {
170 		.header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
171 		.header.len = sizeof(uvcb),
172 		.paddr = paddr
173 	};
174 
175 	if (uv_call(0, (u64)&uvcb))
176 		return -EINVAL;
177 	return 0;
178 }
179 
180 /*
181  * The caller must already hold a reference to the folio.
182  */
183 static int uv_convert_from_secure_folio(struct folio *folio)
184 {
185 	int rc;
186 
187 	/* See gmap_make_secure(): large folios cannot be secure */
188 	if (unlikely(folio_test_large(folio)))
189 		return 0;
190 
191 	folio_get(folio);
192 	rc = uv_convert_from_secure(folio_to_phys(folio));
193 	if (!rc)
194 		clear_bit(PG_arch_1, &folio->flags);
195 	folio_put(folio);
196 	return rc;
197 }
198 
199 /*
200  * The present PTE still indirectly holds a folio reference through the mapping.
201  */
202 int uv_convert_from_secure_pte(pte_t pte)
203 {
204 	VM_WARN_ON(!pte_present(pte));
205 	return uv_convert_from_secure_folio(pfn_folio(pte_pfn(pte)));
206 }
207 
208 /*
209  * Calculate the expected ref_count for a folio that would otherwise have no
210  * further pins. This was cribbed from similar functions in other places in
211  * the kernel, but with some slight modifications. We know that a secure
212  * folio can not be a large folio, for example.
213  */
214 static int expected_folio_refs(struct folio *folio)
215 {
216 	int res;
217 
218 	res = folio_mapcount(folio);
219 	if (folio_test_swapcache(folio)) {
220 		res++;
221 	} else if (folio_mapping(folio)) {
222 		res++;
223 		if (folio->private)
224 			res++;
225 	}
226 	return res;
227 }
228 
229 static int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
230 {
231 	int expected, cc = 0;
232 
233 	if (folio_test_writeback(folio))
234 		return -EAGAIN;
235 	expected = expected_folio_refs(folio);
236 	if (!folio_ref_freeze(folio, expected))
237 		return -EBUSY;
238 	set_bit(PG_arch_1, &folio->flags);
239 	/*
240 	 * If the UVC does not succeed or fail immediately, we don't want to
241 	 * loop for long, or we might get stall notifications.
242 	 * On the other hand, this is a complex scenario and we are holding a lot of
243 	 * locks, so we can't easily sleep and reschedule. We try only once,
244 	 * and if the UVC returned busy or partial completion, we return
245 	 * -EAGAIN and we let the callers deal with it.
246 	 */
247 	cc = __uv_call(0, (u64)uvcb);
248 	folio_ref_unfreeze(folio, expected);
249 	/*
250 	 * Return -ENXIO if the folio was not mapped, -EINVAL for other errors.
251 	 * If busy or partially completed, return -EAGAIN.
252 	 */
253 	if (cc == UVC_CC_OK)
254 		return 0;
255 	else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL)
256 		return -EAGAIN;
257 	return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
258 }
259 
260 /**
261  * should_export_before_import - Determine whether an export is needed
262  * before an import-like operation
263  * @uvcb: the Ultravisor control block of the UVC to be performed
264  * @mm: the mm of the process
265  *
266  * Returns whether an export is needed before every import-like operation.
267  * This is needed for shared pages, which don't trigger a secure storage
268  * exception when accessed from a different guest.
269  *
270  * Although considered as one, the Unpin Page UVC is not an actual import,
271  * so it is not affected.
272  *
273  * No export is needed also when there is only one protected VM, because the
274  * page cannot belong to the wrong VM in that case (there is no "other VM"
275  * it can belong to).
276  *
277  * Return: true if an export is needed before every import, otherwise false.
278  */
279 static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
280 {
281 	/*
282 	 * The misc feature indicates, among other things, that importing a
283 	 * shared page from a different protected VM will automatically also
284 	 * transfer its ownership.
285 	 */
286 	if (uv_has_feature(BIT_UV_FEAT_MISC))
287 		return false;
288 	if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
289 		return false;
290 	return atomic_read(&mm->context.protected_count) > 1;
291 }
292 
293 /*
294  * Drain LRU caches: the local one on first invocation and the ones of all
295  * CPUs on successive invocations. Returns "true" on the first invocation.
296  */
297 static bool drain_lru(bool *drain_lru_called)
298 {
299 	/*
300 	 * If we have tried a local drain and the folio refcount
301 	 * still does not match our expected safe value, try with a
302 	 * system wide drain. This is needed if the pagevecs holding
303 	 * the page are on a different CPU.
304 	 */
305 	if (*drain_lru_called) {
306 		lru_add_drain_all();
307 		/* We give up here, don't retry immediately. */
308 		return false;
309 	}
310 	/*
311 	 * We are here if the folio refcount does not match the
312 	 * expected safe value. The main culprits are usually
313 	 * pagevecs. With lru_add_drain() we drain the pagevecs
314 	 * on the local CPU so that hopefully the refcount will
315 	 * reach the expected safe value.
316 	 */
317 	lru_add_drain();
318 	*drain_lru_called = true;
319 	/* The caller should try again immediately */
320 	return true;
321 }
322 
323 /*
324  * Requests the Ultravisor to make a page accessible to a guest.
325  * If it's brought in the first time, it will be cleared. If
326  * it has been exported before, it will be decrypted and integrity
327  * checked.
328  */
329 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
330 {
331 	struct vm_area_struct *vma;
332 	bool drain_lru_called = false;
333 	spinlock_t *ptelock;
334 	unsigned long uaddr;
335 	struct folio *folio;
336 	pte_t *ptep;
337 	int rc;
338 
339 again:
340 	rc = -EFAULT;
341 	mmap_read_lock(gmap->mm);
342 
343 	uaddr = __gmap_translate(gmap, gaddr);
344 	if (IS_ERR_VALUE(uaddr))
345 		goto out;
346 	vma = vma_lookup(gmap->mm, uaddr);
347 	if (!vma)
348 		goto out;
349 	/*
350 	 * Secure pages cannot be huge and userspace should not combine both.
351 	 * In case userspace does it anyway this will result in an -EFAULT for
352 	 * the unpack. The guest is thus never reaching secure mode. If
353 	 * userspace is playing dirty tricky with mapping huge pages later
354 	 * on this will result in a segmentation fault.
355 	 */
356 	if (is_vm_hugetlb_page(vma))
357 		goto out;
358 
359 	rc = -ENXIO;
360 	ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
361 	if (!ptep)
362 		goto out;
363 	if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) {
364 		folio = page_folio(pte_page(*ptep));
365 		rc = -EAGAIN;
366 		if (folio_test_large(folio)) {
367 			rc = -E2BIG;
368 		} else if (folio_trylock(folio)) {
369 			if (should_export_before_import(uvcb, gmap->mm))
370 				uv_convert_from_secure(PFN_PHYS(folio_pfn(folio)));
371 			rc = make_folio_secure(folio, uvcb);
372 			folio_unlock(folio);
373 		}
374 
375 		/*
376 		 * Once we drop the PTL, the folio may get unmapped and
377 		 * freed immediately. We need a temporary reference.
378 		 */
379 		if (rc == -EAGAIN || rc == -E2BIG)
380 			folio_get(folio);
381 	}
382 	pte_unmap_unlock(ptep, ptelock);
383 out:
384 	mmap_read_unlock(gmap->mm);
385 
386 	switch (rc) {
387 	case -E2BIG:
388 		folio_lock(folio);
389 		rc = split_folio(folio);
390 		folio_unlock(folio);
391 		folio_put(folio);
392 
393 		switch (rc) {
394 		case 0:
395 			/* Splitting succeeded, try again immediately. */
396 			goto again;
397 		case -EAGAIN:
398 			/* Additional folio references. */
399 			if (drain_lru(&drain_lru_called))
400 				goto again;
401 			return -EAGAIN;
402 		case -EBUSY:
403 			/* Unexpected race. */
404 			return -EAGAIN;
405 		}
406 		WARN_ON_ONCE(1);
407 		return -ENXIO;
408 	case -EAGAIN:
409 		/*
410 		 * If we are here because the UVC returned busy or partial
411 		 * completion, this is just a useless check, but it is safe.
412 		 */
413 		folio_wait_writeback(folio);
414 		folio_put(folio);
415 		return -EAGAIN;
416 	case -EBUSY:
417 		/* Additional folio references. */
418 		if (drain_lru(&drain_lru_called))
419 			goto again;
420 		return -EAGAIN;
421 	case -ENXIO:
422 		if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
423 			return -EFAULT;
424 		return -EAGAIN;
425 	}
426 	return rc;
427 }
428 EXPORT_SYMBOL_GPL(gmap_make_secure);
429 
430 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
431 {
432 	struct uv_cb_cts uvcb = {
433 		.header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
434 		.header.len = sizeof(uvcb),
435 		.guest_handle = gmap->guest_handle,
436 		.gaddr = gaddr,
437 	};
438 
439 	return gmap_make_secure(gmap, gaddr, &uvcb);
440 }
441 EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
442 
443 /**
444  * gmap_destroy_page - Destroy a guest page.
445  * @gmap: the gmap of the guest
446  * @gaddr: the guest address to destroy
447  *
448  * An attempt will be made to destroy the given guest page. If the attempt
449  * fails, an attempt is made to export the page. If both attempts fail, an
450  * appropriate error is returned.
451  */
452 int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
453 {
454 	struct vm_area_struct *vma;
455 	unsigned long uaddr;
456 	struct folio *folio;
457 	struct page *page;
458 	int rc;
459 
460 	rc = -EFAULT;
461 	mmap_read_lock(gmap->mm);
462 
463 	uaddr = __gmap_translate(gmap, gaddr);
464 	if (IS_ERR_VALUE(uaddr))
465 		goto out;
466 	vma = vma_lookup(gmap->mm, uaddr);
467 	if (!vma)
468 		goto out;
469 	/*
470 	 * Huge pages should not be able to become secure
471 	 */
472 	if (is_vm_hugetlb_page(vma))
473 		goto out;
474 
475 	rc = 0;
476 	/* we take an extra reference here */
477 	page = follow_page(vma, uaddr, FOLL_WRITE | FOLL_GET);
478 	if (IS_ERR_OR_NULL(page))
479 		goto out;
480 	folio = page_folio(page);
481 	rc = uv_destroy_folio(folio);
482 	/*
483 	 * Fault handlers can race; it is possible that two CPUs will fault
484 	 * on the same secure page. One CPU can destroy the page, reboot,
485 	 * re-enter secure mode and import it, while the second CPU was
486 	 * stuck at the beginning of the handler. At some point the second
487 	 * CPU will be able to progress, and it will not be able to destroy
488 	 * the page. In that case we do not want to terminate the process,
489 	 * we instead try to export the page.
490 	 */
491 	if (rc)
492 		rc = uv_convert_from_secure_folio(folio);
493 	folio_put(folio);
494 out:
495 	mmap_read_unlock(gmap->mm);
496 	return rc;
497 }
498 EXPORT_SYMBOL_GPL(gmap_destroy_page);
499 
500 /*
501  * To be called with the folio locked or with an extra reference! This will
502  * prevent gmap_make_secure from touching the folio concurrently. Having 2
503  * parallel arch_make_folio_accessible is fine, as the UV calls will become a
504  * no-op if the folio is already exported.
505  */
506 int arch_make_folio_accessible(struct folio *folio)
507 {
508 	int rc = 0;
509 
510 	/* See gmap_make_secure(): large folios cannot be secure */
511 	if (unlikely(folio_test_large(folio)))
512 		return 0;
513 
514 	/*
515 	 * PG_arch_1 is used in 2 places:
516 	 * 1. for storage keys of hugetlb folios and KVM
517 	 * 2. As an indication that this small folio might be secure. This can
518 	 *    overindicate, e.g. we set the bit before calling
519 	 *    convert_to_secure.
520 	 * As secure pages are never large folios, both variants can co-exists.
521 	 */
522 	if (!test_bit(PG_arch_1, &folio->flags))
523 		return 0;
524 
525 	rc = uv_pin_shared(folio_to_phys(folio));
526 	if (!rc) {
527 		clear_bit(PG_arch_1, &folio->flags);
528 		return 0;
529 	}
530 
531 	rc = uv_convert_from_secure(folio_to_phys(folio));
532 	if (!rc) {
533 		clear_bit(PG_arch_1, &folio->flags);
534 		return 0;
535 	}
536 
537 	return rc;
538 }
539 EXPORT_SYMBOL_GPL(arch_make_folio_accessible);
540 
541 int arch_make_page_accessible(struct page *page)
542 {
543 	return arch_make_folio_accessible(page_folio(page));
544 }
545 EXPORT_SYMBOL_GPL(arch_make_page_accessible);
546 #endif
547 
548 #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
549 static ssize_t uv_query_facilities(struct kobject *kobj,
550 				   struct kobj_attribute *attr, char *buf)
551 {
552 	return sysfs_emit(buf, "%lx\n%lx\n%lx\n%lx\n",
553 			  uv_info.inst_calls_list[0],
554 			  uv_info.inst_calls_list[1],
555 			  uv_info.inst_calls_list[2],
556 			  uv_info.inst_calls_list[3]);
557 }
558 
559 static struct kobj_attribute uv_query_facilities_attr =
560 	__ATTR(facilities, 0444, uv_query_facilities, NULL);
561 
562 static ssize_t uv_query_supp_se_hdr_ver(struct kobject *kobj,
563 					struct kobj_attribute *attr, char *buf)
564 {
565 	return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_ver);
566 }
567 
568 static struct kobj_attribute uv_query_supp_se_hdr_ver_attr =
569 	__ATTR(supp_se_hdr_ver, 0444, uv_query_supp_se_hdr_ver, NULL);
570 
571 static ssize_t uv_query_supp_se_hdr_pcf(struct kobject *kobj,
572 					struct kobj_attribute *attr, char *buf)
573 {
574 	return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_pcf);
575 }
576 
577 static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr =
578 	__ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL);
579 
580 static ssize_t uv_query_dump_cpu_len(struct kobject *kobj,
581 				     struct kobj_attribute *attr, char *buf)
582 {
583 	return sysfs_emit(buf, "%lx\n", uv_info.guest_cpu_stor_len);
584 }
585 
586 static struct kobj_attribute uv_query_dump_cpu_len_attr =
587 	__ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL);
588 
589 static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj,
590 					       struct kobj_attribute *attr, char *buf)
591 {
592 	return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_storage_state_len);
593 }
594 
595 static struct kobj_attribute uv_query_dump_storage_state_len_attr =
596 	__ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL);
597 
598 static ssize_t uv_query_dump_finalize_len(struct kobject *kobj,
599 					  struct kobj_attribute *attr, char *buf)
600 {
601 	return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_finalize_len);
602 }
603 
604 static struct kobj_attribute uv_query_dump_finalize_len_attr =
605 	__ATTR(dump_finalize_len, 0444, uv_query_dump_finalize_len, NULL);
606 
607 static ssize_t uv_query_feature_indications(struct kobject *kobj,
608 					    struct kobj_attribute *attr, char *buf)
609 {
610 	return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
611 }
612 
613 static struct kobj_attribute uv_query_feature_indications_attr =
614 	__ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
615 
616 static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
617 				       struct kobj_attribute *attr, char *buf)
618 {
619 	return sysfs_emit(buf, "%d\n", uv_info.max_guest_cpu_id + 1);
620 }
621 
622 static struct kobj_attribute uv_query_max_guest_cpus_attr =
623 	__ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
624 
625 static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
626 				      struct kobj_attribute *attr, char *buf)
627 {
628 	return sysfs_emit(buf, "%d\n", uv_info.max_num_sec_conf);
629 }
630 
631 static struct kobj_attribute uv_query_max_guest_vms_attr =
632 	__ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
633 
634 static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
635 				       struct kobj_attribute *attr, char *buf)
636 {
637 	return sysfs_emit(buf, "%lx\n", uv_info.max_sec_stor_addr);
638 }
639 
640 static struct kobj_attribute uv_query_max_guest_addr_attr =
641 	__ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
642 
643 static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj,
644 					     struct kobj_attribute *attr, char *buf)
645 {
646 	return sysfs_emit(buf, "%lx\n", uv_info.supp_att_req_hdr_ver);
647 }
648 
649 static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr =
650 	__ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL);
651 
652 static ssize_t uv_query_supp_att_pflags(struct kobject *kobj,
653 					struct kobj_attribute *attr, char *buf)
654 {
655 	return sysfs_emit(buf, "%lx\n", uv_info.supp_att_pflags);
656 }
657 
658 static struct kobj_attribute uv_query_supp_att_pflags_attr =
659 	__ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL);
660 
661 static ssize_t uv_query_supp_add_secret_req_ver(struct kobject *kobj,
662 						struct kobj_attribute *attr, char *buf)
663 {
664 	return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_req_ver);
665 }
666 
667 static struct kobj_attribute uv_query_supp_add_secret_req_ver_attr =
668 	__ATTR(supp_add_secret_req_ver, 0444, uv_query_supp_add_secret_req_ver, NULL);
669 
670 static ssize_t uv_query_supp_add_secret_pcf(struct kobject *kobj,
671 					    struct kobj_attribute *attr, char *buf)
672 {
673 	return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_pcf);
674 }
675 
676 static struct kobj_attribute uv_query_supp_add_secret_pcf_attr =
677 	__ATTR(supp_add_secret_pcf, 0444, uv_query_supp_add_secret_pcf, NULL);
678 
679 static ssize_t uv_query_supp_secret_types(struct kobject *kobj,
680 					  struct kobj_attribute *attr, char *buf)
681 {
682 	return sysfs_emit(buf, "%lx\n", uv_info.supp_secret_types);
683 }
684 
685 static struct kobj_attribute uv_query_supp_secret_types_attr =
686 	__ATTR(supp_secret_types, 0444, uv_query_supp_secret_types, NULL);
687 
688 static ssize_t uv_query_max_secrets(struct kobject *kobj,
689 				    struct kobj_attribute *attr, char *buf)
690 {
691 	return sysfs_emit(buf, "%d\n", uv_info.max_secrets);
692 }
693 
694 static struct kobj_attribute uv_query_max_secrets_attr =
695 	__ATTR(max_secrets, 0444, uv_query_max_secrets, NULL);
696 
697 static struct attribute *uv_query_attrs[] = {
698 	&uv_query_facilities_attr.attr,
699 	&uv_query_feature_indications_attr.attr,
700 	&uv_query_max_guest_cpus_attr.attr,
701 	&uv_query_max_guest_vms_attr.attr,
702 	&uv_query_max_guest_addr_attr.attr,
703 	&uv_query_supp_se_hdr_ver_attr.attr,
704 	&uv_query_supp_se_hdr_pcf_attr.attr,
705 	&uv_query_dump_storage_state_len_attr.attr,
706 	&uv_query_dump_finalize_len_attr.attr,
707 	&uv_query_dump_cpu_len_attr.attr,
708 	&uv_query_supp_att_req_hdr_ver_attr.attr,
709 	&uv_query_supp_att_pflags_attr.attr,
710 	&uv_query_supp_add_secret_req_ver_attr.attr,
711 	&uv_query_supp_add_secret_pcf_attr.attr,
712 	&uv_query_supp_secret_types_attr.attr,
713 	&uv_query_max_secrets_attr.attr,
714 	NULL,
715 };
716 
717 static struct attribute_group uv_query_attr_group = {
718 	.attrs = uv_query_attrs,
719 };
720 
721 static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
722 				     struct kobj_attribute *attr, char *buf)
723 {
724 	int val = 0;
725 
726 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
727 	val = prot_virt_guest;
728 #endif
729 	return sysfs_emit(buf, "%d\n", val);
730 }
731 
732 static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
733 				    struct kobj_attribute *attr, char *buf)
734 {
735 	int val = 0;
736 
737 #if IS_ENABLED(CONFIG_KVM)
738 	val = prot_virt_host;
739 #endif
740 
741 	return sysfs_emit(buf, "%d\n", val);
742 }
743 
744 static struct kobj_attribute uv_prot_virt_guest =
745 	__ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL);
746 
747 static struct kobj_attribute uv_prot_virt_host =
748 	__ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL);
749 
750 static const struct attribute *uv_prot_virt_attrs[] = {
751 	&uv_prot_virt_guest.attr,
752 	&uv_prot_virt_host.attr,
753 	NULL,
754 };
755 
756 static struct kset *uv_query_kset;
757 static struct kobject *uv_kobj;
758 
759 static int __init uv_info_init(void)
760 {
761 	int rc = -ENOMEM;
762 
763 	if (!test_facility(158))
764 		return 0;
765 
766 	uv_kobj = kobject_create_and_add("uv", firmware_kobj);
767 	if (!uv_kobj)
768 		return -ENOMEM;
769 
770 	rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs);
771 	if (rc)
772 		goto out_kobj;
773 
774 	uv_query_kset = kset_create_and_add("query", NULL, uv_kobj);
775 	if (!uv_query_kset) {
776 		rc = -ENOMEM;
777 		goto out_ind_files;
778 	}
779 
780 	rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group);
781 	if (!rc)
782 		return 0;
783 
784 	kset_unregister(uv_query_kset);
785 out_ind_files:
786 	sysfs_remove_files(uv_kobj, uv_prot_virt_attrs);
787 out_kobj:
788 	kobject_del(uv_kobj);
789 	kobject_put(uv_kobj);
790 	return rc;
791 }
792 device_initcall(uv_info_init);
793 #endif
794