1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Common Ultravisor functions and initialization
4 *
5 * Copyright IBM Corp. 2019, 2020
6 */
7 #define KMSG_COMPONENT "prot_virt"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/sizes.h>
13 #include <linux/bitmap.h>
14 #include <linux/memblock.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <linux/pagewalk.h>
18 #include <asm/facility.h>
19 #include <asm/sections.h>
20 #include <asm/uv.h>
21
22 #if !IS_ENABLED(CONFIG_KVM)
__gmap_translate(struct gmap * gmap,unsigned long gaddr)23 unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
24 {
25 return 0;
26 }
27
gmap_fault(struct gmap * gmap,unsigned long gaddr,unsigned int fault_flags)28 int gmap_fault(struct gmap *gmap, unsigned long gaddr,
29 unsigned int fault_flags)
30 {
31 return 0;
32 }
33 #endif
34
35 /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
36 int __bootdata_preserved(prot_virt_guest);
37 EXPORT_SYMBOL(prot_virt_guest);
38
39 /*
40 * uv_info contains both host and guest information but it's currently only
41 * expected to be used within modules if it's the KVM module or for
42 * any PV guest module.
43 *
44 * The kernel itself will write these values once in uv_query_info()
45 * and then make some of them readable via a sysfs interface.
46 */
47 struct uv_info __bootdata_preserved(uv_info);
48 EXPORT_SYMBOL(uv_info);
49
50 int __bootdata_preserved(prot_virt_host);
51 EXPORT_SYMBOL(prot_virt_host);
52
uv_init(phys_addr_t stor_base,unsigned long stor_len)53 static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len)
54 {
55 struct uv_cb_init uvcb = {
56 .header.cmd = UVC_CMD_INIT_UV,
57 .header.len = sizeof(uvcb),
58 .stor_origin = stor_base,
59 .stor_len = stor_len,
60 };
61
62 if (uv_call(0, (uint64_t)&uvcb)) {
63 pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
64 uvcb.header.rc, uvcb.header.rrc);
65 return -1;
66 }
67 return 0;
68 }
69
setup_uv(void)70 void __init setup_uv(void)
71 {
72 void *uv_stor_base;
73
74 if (!is_prot_virt_host())
75 return;
76
77 uv_stor_base = memblock_alloc_try_nid(
78 uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
79 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
80 if (!uv_stor_base) {
81 pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
82 uv_info.uv_base_stor_len);
83 goto fail;
84 }
85
86 if (uv_init(__pa(uv_stor_base), uv_info.uv_base_stor_len)) {
87 memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
88 goto fail;
89 }
90
91 pr_info("Reserving %luMB as ultravisor base storage\n",
92 uv_info.uv_base_stor_len >> 20);
93 return;
94 fail:
95 pr_info("Disabling support for protected virtualization");
96 prot_virt_host = 0;
97 }
98
99 /*
100 * Requests the Ultravisor to pin the page in the shared state. This will
101 * cause an intercept when the guest attempts to unshare the pinned page.
102 */
uv_pin_shared(unsigned long paddr)103 int uv_pin_shared(unsigned long paddr)
104 {
105 struct uv_cb_cfs uvcb = {
106 .header.cmd = UVC_CMD_PIN_PAGE_SHARED,
107 .header.len = sizeof(uvcb),
108 .paddr = paddr,
109 };
110
111 if (uv_call(0, (u64)&uvcb))
112 return -EINVAL;
113 return 0;
114 }
115 EXPORT_SYMBOL_GPL(uv_pin_shared);
116
117 /*
118 * Requests the Ultravisor to destroy a guest page and make it
119 * accessible to the host. The destroy clears the page instead of
120 * exporting.
121 *
122 * @paddr: Absolute host address of page to be destroyed
123 */
uv_destroy(unsigned long paddr)124 static int uv_destroy(unsigned long paddr)
125 {
126 struct uv_cb_cfs uvcb = {
127 .header.cmd = UVC_CMD_DESTR_SEC_STOR,
128 .header.len = sizeof(uvcb),
129 .paddr = paddr
130 };
131
132 if (uv_call(0, (u64)&uvcb)) {
133 /*
134 * Older firmware uses 107/d as an indication of a non secure
135 * page. Let us emulate the newer variant (no-op).
136 */
137 if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
138 return 0;
139 return -EINVAL;
140 }
141 return 0;
142 }
143
144 /*
145 * The caller must already hold a reference to the folio
146 */
uv_destroy_folio(struct folio * folio)147 int uv_destroy_folio(struct folio *folio)
148 {
149 int rc;
150
151 /* See gmap_make_secure(): large folios cannot be secure */
152 if (unlikely(folio_test_large(folio)))
153 return 0;
154
155 folio_get(folio);
156 rc = uv_destroy(folio_to_phys(folio));
157 if (!rc)
158 clear_bit(PG_arch_1, &folio->flags);
159 folio_put(folio);
160 return rc;
161 }
162
163 /*
164 * The present PTE still indirectly holds a folio reference through the mapping.
165 */
uv_destroy_pte(pte_t pte)166 int uv_destroy_pte(pte_t pte)
167 {
168 VM_WARN_ON(!pte_present(pte));
169 return uv_destroy_folio(pfn_folio(pte_pfn(pte)));
170 }
171
172 /*
173 * Requests the Ultravisor to encrypt a guest page and make it
174 * accessible to the host for paging (export).
175 *
176 * @paddr: Absolute host address of page to be exported
177 */
uv_convert_from_secure(unsigned long paddr)178 static int uv_convert_from_secure(unsigned long paddr)
179 {
180 struct uv_cb_cfs uvcb = {
181 .header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
182 .header.len = sizeof(uvcb),
183 .paddr = paddr
184 };
185
186 if (uv_call(0, (u64)&uvcb))
187 return -EINVAL;
188 return 0;
189 }
190
191 /*
192 * The caller must already hold a reference to the folio.
193 */
uv_convert_from_secure_folio(struct folio * folio)194 static int uv_convert_from_secure_folio(struct folio *folio)
195 {
196 int rc;
197
198 /* See gmap_make_secure(): large folios cannot be secure */
199 if (unlikely(folio_test_large(folio)))
200 return 0;
201
202 folio_get(folio);
203 rc = uv_convert_from_secure(folio_to_phys(folio));
204 if (!rc)
205 clear_bit(PG_arch_1, &folio->flags);
206 folio_put(folio);
207 return rc;
208 }
209
210 /*
211 * The present PTE still indirectly holds a folio reference through the mapping.
212 */
uv_convert_from_secure_pte(pte_t pte)213 int uv_convert_from_secure_pte(pte_t pte)
214 {
215 VM_WARN_ON(!pte_present(pte));
216 return uv_convert_from_secure_folio(pfn_folio(pte_pfn(pte)));
217 }
218
219 /*
220 * Calculate the expected ref_count for a folio that would otherwise have no
221 * further pins. This was cribbed from similar functions in other places in
222 * the kernel, but with some slight modifications. We know that a secure
223 * folio can not be a large folio, for example.
224 */
expected_folio_refs(struct folio * folio)225 static int expected_folio_refs(struct folio *folio)
226 {
227 int res;
228
229 res = folio_mapcount(folio);
230 if (folio_test_swapcache(folio)) {
231 res++;
232 } else if (folio_mapping(folio)) {
233 res++;
234 if (folio->private)
235 res++;
236 }
237 return res;
238 }
239
make_folio_secure(struct folio * folio,struct uv_cb_header * uvcb)240 static int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
241 {
242 int expected, cc = 0;
243
244 if (folio_test_writeback(folio))
245 return -EAGAIN;
246 expected = expected_folio_refs(folio);
247 if (!folio_ref_freeze(folio, expected))
248 return -EBUSY;
249 set_bit(PG_arch_1, &folio->flags);
250 /*
251 * If the UVC does not succeed or fail immediately, we don't want to
252 * loop for long, or we might get stall notifications.
253 * On the other hand, this is a complex scenario and we are holding a lot of
254 * locks, so we can't easily sleep and reschedule. We try only once,
255 * and if the UVC returned busy or partial completion, we return
256 * -EAGAIN and we let the callers deal with it.
257 */
258 cc = __uv_call(0, (u64)uvcb);
259 folio_ref_unfreeze(folio, expected);
260 /*
261 * Return -ENXIO if the folio was not mapped, -EINVAL for other errors.
262 * If busy or partially completed, return -EAGAIN.
263 */
264 if (cc == UVC_CC_OK)
265 return 0;
266 else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL)
267 return -EAGAIN;
268 return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
269 }
270
271 /**
272 * should_export_before_import - Determine whether an export is needed
273 * before an import-like operation
274 * @uvcb: the Ultravisor control block of the UVC to be performed
275 * @mm: the mm of the process
276 *
277 * Returns whether an export is needed before every import-like operation.
278 * This is needed for shared pages, which don't trigger a secure storage
279 * exception when accessed from a different guest.
280 *
281 * Although considered as one, the Unpin Page UVC is not an actual import,
282 * so it is not affected.
283 *
284 * No export is needed also when there is only one protected VM, because the
285 * page cannot belong to the wrong VM in that case (there is no "other VM"
286 * it can belong to).
287 *
288 * Return: true if an export is needed before every import, otherwise false.
289 */
should_export_before_import(struct uv_cb_header * uvcb,struct mm_struct * mm)290 static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
291 {
292 /*
293 * The misc feature indicates, among other things, that importing a
294 * shared page from a different protected VM will automatically also
295 * transfer its ownership.
296 */
297 if (uv_has_feature(BIT_UV_FEAT_MISC))
298 return false;
299 if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
300 return false;
301 return atomic_read(&mm->context.protected_count) > 1;
302 }
303
304 /*
305 * Drain LRU caches: the local one on first invocation and the ones of all
306 * CPUs on successive invocations. Returns "true" on the first invocation.
307 */
drain_lru(bool * drain_lru_called)308 static bool drain_lru(bool *drain_lru_called)
309 {
310 /*
311 * If we have tried a local drain and the folio refcount
312 * still does not match our expected safe value, try with a
313 * system wide drain. This is needed if the pagevecs holding
314 * the page are on a different CPU.
315 */
316 if (*drain_lru_called) {
317 lru_add_drain_all();
318 /* We give up here, don't retry immediately. */
319 return false;
320 }
321 /*
322 * We are here if the folio refcount does not match the
323 * expected safe value. The main culprits are usually
324 * pagevecs. With lru_add_drain() we drain the pagevecs
325 * on the local CPU so that hopefully the refcount will
326 * reach the expected safe value.
327 */
328 lru_add_drain();
329 *drain_lru_called = true;
330 /* The caller should try again immediately */
331 return true;
332 }
333
334 /*
335 * Requests the Ultravisor to make a page accessible to a guest.
336 * If it's brought in the first time, it will be cleared. If
337 * it has been exported before, it will be decrypted and integrity
338 * checked.
339 */
gmap_make_secure(struct gmap * gmap,unsigned long gaddr,void * uvcb)340 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
341 {
342 struct vm_area_struct *vma;
343 bool drain_lru_called = false;
344 spinlock_t *ptelock;
345 unsigned long uaddr;
346 struct folio *folio;
347 pte_t *ptep;
348 int rc;
349
350 again:
351 rc = -EFAULT;
352 mmap_read_lock(gmap->mm);
353
354 uaddr = __gmap_translate(gmap, gaddr);
355 if (IS_ERR_VALUE(uaddr))
356 goto out;
357 vma = vma_lookup(gmap->mm, uaddr);
358 if (!vma)
359 goto out;
360 /*
361 * Secure pages cannot be huge and userspace should not combine both.
362 * In case userspace does it anyway this will result in an -EFAULT for
363 * the unpack. The guest is thus never reaching secure mode. If
364 * userspace is playing dirty tricky with mapping huge pages later
365 * on this will result in a segmentation fault.
366 */
367 if (is_vm_hugetlb_page(vma))
368 goto out;
369
370 rc = -ENXIO;
371 ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
372 if (!ptep)
373 goto out;
374 if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) {
375 folio = page_folio(pte_page(*ptep));
376 rc = -EAGAIN;
377 if (folio_test_large(folio)) {
378 rc = -E2BIG;
379 } else if (folio_trylock(folio)) {
380 if (should_export_before_import(uvcb, gmap->mm))
381 uv_convert_from_secure(PFN_PHYS(folio_pfn(folio)));
382 rc = make_folio_secure(folio, uvcb);
383 folio_unlock(folio);
384 }
385
386 /*
387 * Once we drop the PTL, the folio may get unmapped and
388 * freed immediately. We need a temporary reference.
389 */
390 if (rc == -EAGAIN || rc == -E2BIG)
391 folio_get(folio);
392 }
393 pte_unmap_unlock(ptep, ptelock);
394 out:
395 mmap_read_unlock(gmap->mm);
396
397 switch (rc) {
398 case -E2BIG:
399 folio_lock(folio);
400 rc = split_folio(folio);
401 folio_unlock(folio);
402 folio_put(folio);
403
404 switch (rc) {
405 case 0:
406 /* Splitting succeeded, try again immediately. */
407 goto again;
408 case -EAGAIN:
409 /* Additional folio references. */
410 if (drain_lru(&drain_lru_called))
411 goto again;
412 return -EAGAIN;
413 case -EBUSY:
414 /* Unexpected race. */
415 return -EAGAIN;
416 }
417 WARN_ON_ONCE(1);
418 return -ENXIO;
419 case -EAGAIN:
420 /*
421 * If we are here because the UVC returned busy or partial
422 * completion, this is just a useless check, but it is safe.
423 */
424 folio_wait_writeback(folio);
425 folio_put(folio);
426 return -EAGAIN;
427 case -EBUSY:
428 /* Additional folio references. */
429 if (drain_lru(&drain_lru_called))
430 goto again;
431 return -EAGAIN;
432 case -ENXIO:
433 if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
434 return -EFAULT;
435 return -EAGAIN;
436 }
437 return rc;
438 }
439 EXPORT_SYMBOL_GPL(gmap_make_secure);
440
gmap_convert_to_secure(struct gmap * gmap,unsigned long gaddr)441 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
442 {
443 struct uv_cb_cts uvcb = {
444 .header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
445 .header.len = sizeof(uvcb),
446 .guest_handle = gmap->guest_handle,
447 .gaddr = gaddr,
448 };
449
450 return gmap_make_secure(gmap, gaddr, &uvcb);
451 }
452 EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
453
454 /**
455 * gmap_destroy_page - Destroy a guest page.
456 * @gmap: the gmap of the guest
457 * @gaddr: the guest address to destroy
458 *
459 * An attempt will be made to destroy the given guest page. If the attempt
460 * fails, an attempt is made to export the page. If both attempts fail, an
461 * appropriate error is returned.
462 */
gmap_destroy_page(struct gmap * gmap,unsigned long gaddr)463 int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
464 {
465 struct vm_area_struct *vma;
466 struct folio_walk fw;
467 unsigned long uaddr;
468 struct folio *folio;
469 int rc;
470
471 rc = -EFAULT;
472 mmap_read_lock(gmap->mm);
473
474 uaddr = __gmap_translate(gmap, gaddr);
475 if (IS_ERR_VALUE(uaddr))
476 goto out;
477 vma = vma_lookup(gmap->mm, uaddr);
478 if (!vma)
479 goto out;
480 /*
481 * Huge pages should not be able to become secure
482 */
483 if (is_vm_hugetlb_page(vma))
484 goto out;
485
486 rc = 0;
487 folio = folio_walk_start(&fw, vma, uaddr, 0);
488 if (!folio)
489 goto out;
490 /*
491 * See gmap_make_secure(): large folios cannot be secure. Small
492 * folio implies FW_LEVEL_PTE.
493 */
494 if (folio_test_large(folio) || !pte_write(fw.pte))
495 goto out_walk_end;
496 rc = uv_destroy_folio(folio);
497 /*
498 * Fault handlers can race; it is possible that two CPUs will fault
499 * on the same secure page. One CPU can destroy the page, reboot,
500 * re-enter secure mode and import it, while the second CPU was
501 * stuck at the beginning of the handler. At some point the second
502 * CPU will be able to progress, and it will not be able to destroy
503 * the page. In that case we do not want to terminate the process,
504 * we instead try to export the page.
505 */
506 if (rc)
507 rc = uv_convert_from_secure_folio(folio);
508 out_walk_end:
509 folio_walk_end(&fw, vma);
510 out:
511 mmap_read_unlock(gmap->mm);
512 return rc;
513 }
514 EXPORT_SYMBOL_GPL(gmap_destroy_page);
515
516 /*
517 * To be called with the folio locked or with an extra reference! This will
518 * prevent gmap_make_secure from touching the folio concurrently. Having 2
519 * parallel arch_make_folio_accessible is fine, as the UV calls will become a
520 * no-op if the folio is already exported.
521 */
arch_make_folio_accessible(struct folio * folio)522 int arch_make_folio_accessible(struct folio *folio)
523 {
524 int rc = 0;
525
526 /* See gmap_make_secure(): large folios cannot be secure */
527 if (unlikely(folio_test_large(folio)))
528 return 0;
529
530 /*
531 * PG_arch_1 is used in 2 places:
532 * 1. for storage keys of hugetlb folios and KVM
533 * 2. As an indication that this small folio might be secure. This can
534 * overindicate, e.g. we set the bit before calling
535 * convert_to_secure.
536 * As secure pages are never large folios, both variants can co-exists.
537 */
538 if (!test_bit(PG_arch_1, &folio->flags))
539 return 0;
540
541 rc = uv_pin_shared(folio_to_phys(folio));
542 if (!rc) {
543 clear_bit(PG_arch_1, &folio->flags);
544 return 0;
545 }
546
547 rc = uv_convert_from_secure(folio_to_phys(folio));
548 if (!rc) {
549 clear_bit(PG_arch_1, &folio->flags);
550 return 0;
551 }
552
553 return rc;
554 }
555 EXPORT_SYMBOL_GPL(arch_make_folio_accessible);
556
uv_query_facilities(struct kobject * kobj,struct kobj_attribute * attr,char * buf)557 static ssize_t uv_query_facilities(struct kobject *kobj,
558 struct kobj_attribute *attr, char *buf)
559 {
560 return sysfs_emit(buf, "%lx\n%lx\n%lx\n%lx\n",
561 uv_info.inst_calls_list[0],
562 uv_info.inst_calls_list[1],
563 uv_info.inst_calls_list[2],
564 uv_info.inst_calls_list[3]);
565 }
566
567 static struct kobj_attribute uv_query_facilities_attr =
568 __ATTR(facilities, 0444, uv_query_facilities, NULL);
569
uv_query_supp_se_hdr_ver(struct kobject * kobj,struct kobj_attribute * attr,char * buf)570 static ssize_t uv_query_supp_se_hdr_ver(struct kobject *kobj,
571 struct kobj_attribute *attr, char *buf)
572 {
573 return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_ver);
574 }
575
576 static struct kobj_attribute uv_query_supp_se_hdr_ver_attr =
577 __ATTR(supp_se_hdr_ver, 0444, uv_query_supp_se_hdr_ver, NULL);
578
uv_query_supp_se_hdr_pcf(struct kobject * kobj,struct kobj_attribute * attr,char * buf)579 static ssize_t uv_query_supp_se_hdr_pcf(struct kobject *kobj,
580 struct kobj_attribute *attr, char *buf)
581 {
582 return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_pcf);
583 }
584
585 static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr =
586 __ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL);
587
uv_query_dump_cpu_len(struct kobject * kobj,struct kobj_attribute * attr,char * buf)588 static ssize_t uv_query_dump_cpu_len(struct kobject *kobj,
589 struct kobj_attribute *attr, char *buf)
590 {
591 return sysfs_emit(buf, "%lx\n", uv_info.guest_cpu_stor_len);
592 }
593
594 static struct kobj_attribute uv_query_dump_cpu_len_attr =
595 __ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL);
596
uv_query_dump_storage_state_len(struct kobject * kobj,struct kobj_attribute * attr,char * buf)597 static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj,
598 struct kobj_attribute *attr, char *buf)
599 {
600 return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_storage_state_len);
601 }
602
603 static struct kobj_attribute uv_query_dump_storage_state_len_attr =
604 __ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL);
605
uv_query_dump_finalize_len(struct kobject * kobj,struct kobj_attribute * attr,char * buf)606 static ssize_t uv_query_dump_finalize_len(struct kobject *kobj,
607 struct kobj_attribute *attr, char *buf)
608 {
609 return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_finalize_len);
610 }
611
612 static struct kobj_attribute uv_query_dump_finalize_len_attr =
613 __ATTR(dump_finalize_len, 0444, uv_query_dump_finalize_len, NULL);
614
uv_query_feature_indications(struct kobject * kobj,struct kobj_attribute * attr,char * buf)615 static ssize_t uv_query_feature_indications(struct kobject *kobj,
616 struct kobj_attribute *attr, char *buf)
617 {
618 return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
619 }
620
621 static struct kobj_attribute uv_query_feature_indications_attr =
622 __ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
623
uv_query_max_guest_cpus(struct kobject * kobj,struct kobj_attribute * attr,char * buf)624 static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
625 struct kobj_attribute *attr, char *buf)
626 {
627 return sysfs_emit(buf, "%d\n", uv_info.max_guest_cpu_id + 1);
628 }
629
630 static struct kobj_attribute uv_query_max_guest_cpus_attr =
631 __ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
632
uv_query_max_guest_vms(struct kobject * kobj,struct kobj_attribute * attr,char * buf)633 static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
634 struct kobj_attribute *attr, char *buf)
635 {
636 return sysfs_emit(buf, "%d\n", uv_info.max_num_sec_conf);
637 }
638
639 static struct kobj_attribute uv_query_max_guest_vms_attr =
640 __ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
641
uv_query_max_guest_addr(struct kobject * kobj,struct kobj_attribute * attr,char * buf)642 static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
643 struct kobj_attribute *attr, char *buf)
644 {
645 return sysfs_emit(buf, "%lx\n", uv_info.max_sec_stor_addr);
646 }
647
648 static struct kobj_attribute uv_query_max_guest_addr_attr =
649 __ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
650
uv_query_supp_att_req_hdr_ver(struct kobject * kobj,struct kobj_attribute * attr,char * buf)651 static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj,
652 struct kobj_attribute *attr, char *buf)
653 {
654 return sysfs_emit(buf, "%lx\n", uv_info.supp_att_req_hdr_ver);
655 }
656
657 static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr =
658 __ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL);
659
uv_query_supp_att_pflags(struct kobject * kobj,struct kobj_attribute * attr,char * buf)660 static ssize_t uv_query_supp_att_pflags(struct kobject *kobj,
661 struct kobj_attribute *attr, char *buf)
662 {
663 return sysfs_emit(buf, "%lx\n", uv_info.supp_att_pflags);
664 }
665
666 static struct kobj_attribute uv_query_supp_att_pflags_attr =
667 __ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL);
668
uv_query_supp_add_secret_req_ver(struct kobject * kobj,struct kobj_attribute * attr,char * buf)669 static ssize_t uv_query_supp_add_secret_req_ver(struct kobject *kobj,
670 struct kobj_attribute *attr, char *buf)
671 {
672 return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_req_ver);
673 }
674
675 static struct kobj_attribute uv_query_supp_add_secret_req_ver_attr =
676 __ATTR(supp_add_secret_req_ver, 0444, uv_query_supp_add_secret_req_ver, NULL);
677
uv_query_supp_add_secret_pcf(struct kobject * kobj,struct kobj_attribute * attr,char * buf)678 static ssize_t uv_query_supp_add_secret_pcf(struct kobject *kobj,
679 struct kobj_attribute *attr, char *buf)
680 {
681 return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_pcf);
682 }
683
684 static struct kobj_attribute uv_query_supp_add_secret_pcf_attr =
685 __ATTR(supp_add_secret_pcf, 0444, uv_query_supp_add_secret_pcf, NULL);
686
uv_query_supp_secret_types(struct kobject * kobj,struct kobj_attribute * attr,char * buf)687 static ssize_t uv_query_supp_secret_types(struct kobject *kobj,
688 struct kobj_attribute *attr, char *buf)
689 {
690 return sysfs_emit(buf, "%lx\n", uv_info.supp_secret_types);
691 }
692
693 static struct kobj_attribute uv_query_supp_secret_types_attr =
694 __ATTR(supp_secret_types, 0444, uv_query_supp_secret_types, NULL);
695
uv_query_max_secrets(struct kobject * kobj,struct kobj_attribute * attr,char * buf)696 static ssize_t uv_query_max_secrets(struct kobject *kobj,
697 struct kobj_attribute *attr, char *buf)
698 {
699 return sysfs_emit(buf, "%d\n", uv_info.max_secrets);
700 }
701
702 static struct kobj_attribute uv_query_max_secrets_attr =
703 __ATTR(max_secrets, 0444, uv_query_max_secrets, NULL);
704
705 static struct attribute *uv_query_attrs[] = {
706 &uv_query_facilities_attr.attr,
707 &uv_query_feature_indications_attr.attr,
708 &uv_query_max_guest_cpus_attr.attr,
709 &uv_query_max_guest_vms_attr.attr,
710 &uv_query_max_guest_addr_attr.attr,
711 &uv_query_supp_se_hdr_ver_attr.attr,
712 &uv_query_supp_se_hdr_pcf_attr.attr,
713 &uv_query_dump_storage_state_len_attr.attr,
714 &uv_query_dump_finalize_len_attr.attr,
715 &uv_query_dump_cpu_len_attr.attr,
716 &uv_query_supp_att_req_hdr_ver_attr.attr,
717 &uv_query_supp_att_pflags_attr.attr,
718 &uv_query_supp_add_secret_req_ver_attr.attr,
719 &uv_query_supp_add_secret_pcf_attr.attr,
720 &uv_query_supp_secret_types_attr.attr,
721 &uv_query_max_secrets_attr.attr,
722 NULL,
723 };
724
725 static struct attribute_group uv_query_attr_group = {
726 .attrs = uv_query_attrs,
727 };
728
uv_is_prot_virt_guest(struct kobject * kobj,struct kobj_attribute * attr,char * buf)729 static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
730 struct kobj_attribute *attr, char *buf)
731 {
732 return sysfs_emit(buf, "%d\n", prot_virt_guest);
733 }
734
uv_is_prot_virt_host(struct kobject * kobj,struct kobj_attribute * attr,char * buf)735 static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
736 struct kobj_attribute *attr, char *buf)
737 {
738 return sysfs_emit(buf, "%d\n", prot_virt_host);
739 }
740
741 static struct kobj_attribute uv_prot_virt_guest =
742 __ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL);
743
744 static struct kobj_attribute uv_prot_virt_host =
745 __ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL);
746
747 static const struct attribute *uv_prot_virt_attrs[] = {
748 &uv_prot_virt_guest.attr,
749 &uv_prot_virt_host.attr,
750 NULL,
751 };
752
753 static struct kset *uv_query_kset;
754 static struct kobject *uv_kobj;
755
uv_info_init(void)756 static int __init uv_info_init(void)
757 {
758 int rc = -ENOMEM;
759
760 if (!test_facility(158))
761 return 0;
762
763 uv_kobj = kobject_create_and_add("uv", firmware_kobj);
764 if (!uv_kobj)
765 return -ENOMEM;
766
767 rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs);
768 if (rc)
769 goto out_kobj;
770
771 uv_query_kset = kset_create_and_add("query", NULL, uv_kobj);
772 if (!uv_query_kset) {
773 rc = -ENOMEM;
774 goto out_ind_files;
775 }
776
777 rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group);
778 if (!rc)
779 return 0;
780
781 kset_unregister(uv_query_kset);
782 out_ind_files:
783 sysfs_remove_files(uv_kobj, uv_prot_virt_attrs);
784 out_kobj:
785 kobject_del(uv_kobj);
786 kobject_put(uv_kobj);
787 return rc;
788 }
789 device_initcall(uv_info_init);
790