xref: /linux/arch/s390/kernel/uv.c (revision 6e17c6de3ddf3073741d9c91a796ee696914d8a0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common Ultravisor functions and initialization
4  *
5  * Copyright IBM Corp. 2019, 2020
6  */
7 #define KMSG_COMPONENT "prot_virt"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/sizes.h>
13 #include <linux/bitmap.h>
14 #include <linux/memblock.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <asm/facility.h>
18 #include <asm/sections.h>
19 #include <asm/uv.h>
20 
21 /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
22 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
23 int __bootdata_preserved(prot_virt_guest);
24 #endif
25 
26 struct uv_info __bootdata_preserved(uv_info);
27 
28 #if IS_ENABLED(CONFIG_KVM)
29 int __bootdata_preserved(prot_virt_host);
30 EXPORT_SYMBOL(prot_virt_host);
31 EXPORT_SYMBOL(uv_info);
32 
33 static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len)
34 {
35 	struct uv_cb_init uvcb = {
36 		.header.cmd = UVC_CMD_INIT_UV,
37 		.header.len = sizeof(uvcb),
38 		.stor_origin = stor_base,
39 		.stor_len = stor_len,
40 	};
41 
42 	if (uv_call(0, (uint64_t)&uvcb)) {
43 		pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
44 		       uvcb.header.rc, uvcb.header.rrc);
45 		return -1;
46 	}
47 	return 0;
48 }
49 
50 void __init setup_uv(void)
51 {
52 	void *uv_stor_base;
53 
54 	if (!is_prot_virt_host())
55 		return;
56 
57 	uv_stor_base = memblock_alloc_try_nid(
58 		uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
59 		MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
60 	if (!uv_stor_base) {
61 		pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
62 			uv_info.uv_base_stor_len);
63 		goto fail;
64 	}
65 
66 	if (uv_init(__pa(uv_stor_base), uv_info.uv_base_stor_len)) {
67 		memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
68 		goto fail;
69 	}
70 
71 	pr_info("Reserving %luMB as ultravisor base storage\n",
72 		uv_info.uv_base_stor_len >> 20);
73 	return;
74 fail:
75 	pr_info("Disabling support for protected virtualization");
76 	prot_virt_host = 0;
77 }
78 
79 /*
80  * Requests the Ultravisor to pin the page in the shared state. This will
81  * cause an intercept when the guest attempts to unshare the pinned page.
82  */
83 static int uv_pin_shared(unsigned long paddr)
84 {
85 	struct uv_cb_cfs uvcb = {
86 		.header.cmd = UVC_CMD_PIN_PAGE_SHARED,
87 		.header.len = sizeof(uvcb),
88 		.paddr = paddr,
89 	};
90 
91 	if (uv_call(0, (u64)&uvcb))
92 		return -EINVAL;
93 	return 0;
94 }
95 
96 /*
97  * Requests the Ultravisor to destroy a guest page and make it
98  * accessible to the host. The destroy clears the page instead of
99  * exporting.
100  *
101  * @paddr: Absolute host address of page to be destroyed
102  */
103 static int uv_destroy_page(unsigned long paddr)
104 {
105 	struct uv_cb_cfs uvcb = {
106 		.header.cmd = UVC_CMD_DESTR_SEC_STOR,
107 		.header.len = sizeof(uvcb),
108 		.paddr = paddr
109 	};
110 
111 	if (uv_call(0, (u64)&uvcb)) {
112 		/*
113 		 * Older firmware uses 107/d as an indication of a non secure
114 		 * page. Let us emulate the newer variant (no-op).
115 		 */
116 		if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
117 			return 0;
118 		return -EINVAL;
119 	}
120 	return 0;
121 }
122 
123 /*
124  * The caller must already hold a reference to the page
125  */
126 int uv_destroy_owned_page(unsigned long paddr)
127 {
128 	struct page *page = phys_to_page(paddr);
129 	int rc;
130 
131 	get_page(page);
132 	rc = uv_destroy_page(paddr);
133 	if (!rc)
134 		clear_bit(PG_arch_1, &page->flags);
135 	put_page(page);
136 	return rc;
137 }
138 
139 /*
140  * Requests the Ultravisor to encrypt a guest page and make it
141  * accessible to the host for paging (export).
142  *
143  * @paddr: Absolute host address of page to be exported
144  */
145 int uv_convert_from_secure(unsigned long paddr)
146 {
147 	struct uv_cb_cfs uvcb = {
148 		.header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
149 		.header.len = sizeof(uvcb),
150 		.paddr = paddr
151 	};
152 
153 	if (uv_call(0, (u64)&uvcb))
154 		return -EINVAL;
155 	return 0;
156 }
157 
158 /*
159  * The caller must already hold a reference to the page
160  */
161 int uv_convert_owned_from_secure(unsigned long paddr)
162 {
163 	struct page *page = phys_to_page(paddr);
164 	int rc;
165 
166 	get_page(page);
167 	rc = uv_convert_from_secure(paddr);
168 	if (!rc)
169 		clear_bit(PG_arch_1, &page->flags);
170 	put_page(page);
171 	return rc;
172 }
173 
174 /*
175  * Calculate the expected ref_count for a page that would otherwise have no
176  * further pins. This was cribbed from similar functions in other places in
177  * the kernel, but with some slight modifications. We know that a secure
178  * page can not be a huge page for example.
179  */
180 static int expected_page_refs(struct page *page)
181 {
182 	int res;
183 
184 	res = page_mapcount(page);
185 	if (PageSwapCache(page)) {
186 		res++;
187 	} else if (page_mapping(page)) {
188 		res++;
189 		if (page_has_private(page))
190 			res++;
191 	}
192 	return res;
193 }
194 
195 static int make_page_secure(struct page *page, struct uv_cb_header *uvcb)
196 {
197 	int expected, cc = 0;
198 
199 	if (PageWriteback(page))
200 		return -EAGAIN;
201 	expected = expected_page_refs(page);
202 	if (!page_ref_freeze(page, expected))
203 		return -EBUSY;
204 	set_bit(PG_arch_1, &page->flags);
205 	/*
206 	 * If the UVC does not succeed or fail immediately, we don't want to
207 	 * loop for long, or we might get stall notifications.
208 	 * On the other hand, this is a complex scenario and we are holding a lot of
209 	 * locks, so we can't easily sleep and reschedule. We try only once,
210 	 * and if the UVC returned busy or partial completion, we return
211 	 * -EAGAIN and we let the callers deal with it.
212 	 */
213 	cc = __uv_call(0, (u64)uvcb);
214 	page_ref_unfreeze(page, expected);
215 	/*
216 	 * Return -ENXIO if the page was not mapped, -EINVAL for other errors.
217 	 * If busy or partially completed, return -EAGAIN.
218 	 */
219 	if (cc == UVC_CC_OK)
220 		return 0;
221 	else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL)
222 		return -EAGAIN;
223 	return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
224 }
225 
226 /**
227  * should_export_before_import - Determine whether an export is needed
228  * before an import-like operation
229  * @uvcb: the Ultravisor control block of the UVC to be performed
230  * @mm: the mm of the process
231  *
232  * Returns whether an export is needed before every import-like operation.
233  * This is needed for shared pages, which don't trigger a secure storage
234  * exception when accessed from a different guest.
235  *
236  * Although considered as one, the Unpin Page UVC is not an actual import,
237  * so it is not affected.
238  *
239  * No export is needed also when there is only one protected VM, because the
240  * page cannot belong to the wrong VM in that case (there is no "other VM"
241  * it can belong to).
242  *
243  * Return: true if an export is needed before every import, otherwise false.
244  */
245 static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
246 {
247 	/*
248 	 * The misc feature indicates, among other things, that importing a
249 	 * shared page from a different protected VM will automatically also
250 	 * transfer its ownership.
251 	 */
252 	if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications))
253 		return false;
254 	if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
255 		return false;
256 	return atomic_read(&mm->context.protected_count) > 1;
257 }
258 
259 /*
260  * Requests the Ultravisor to make a page accessible to a guest.
261  * If it's brought in the first time, it will be cleared. If
262  * it has been exported before, it will be decrypted and integrity
263  * checked.
264  */
265 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
266 {
267 	struct vm_area_struct *vma;
268 	bool local_drain = false;
269 	spinlock_t *ptelock;
270 	unsigned long uaddr;
271 	struct page *page;
272 	pte_t *ptep;
273 	int rc;
274 
275 again:
276 	rc = -EFAULT;
277 	mmap_read_lock(gmap->mm);
278 
279 	uaddr = __gmap_translate(gmap, gaddr);
280 	if (IS_ERR_VALUE(uaddr))
281 		goto out;
282 	vma = vma_lookup(gmap->mm, uaddr);
283 	if (!vma)
284 		goto out;
285 	/*
286 	 * Secure pages cannot be huge and userspace should not combine both.
287 	 * In case userspace does it anyway this will result in an -EFAULT for
288 	 * the unpack. The guest is thus never reaching secure mode. If
289 	 * userspace is playing dirty tricky with mapping huge pages later
290 	 * on this will result in a segmentation fault.
291 	 */
292 	if (is_vm_hugetlb_page(vma))
293 		goto out;
294 
295 	rc = -ENXIO;
296 	ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
297 	if (!ptep)
298 		goto out;
299 	if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) {
300 		page = pte_page(*ptep);
301 		rc = -EAGAIN;
302 		if (trylock_page(page)) {
303 			if (should_export_before_import(uvcb, gmap->mm))
304 				uv_convert_from_secure(page_to_phys(page));
305 			rc = make_page_secure(page, uvcb);
306 			unlock_page(page);
307 		}
308 	}
309 	pte_unmap_unlock(ptep, ptelock);
310 out:
311 	mmap_read_unlock(gmap->mm);
312 
313 	if (rc == -EAGAIN) {
314 		/*
315 		 * If we are here because the UVC returned busy or partial
316 		 * completion, this is just a useless check, but it is safe.
317 		 */
318 		wait_on_page_writeback(page);
319 	} else if (rc == -EBUSY) {
320 		/*
321 		 * If we have tried a local drain and the page refcount
322 		 * still does not match our expected safe value, try with a
323 		 * system wide drain. This is needed if the pagevecs holding
324 		 * the page are on a different CPU.
325 		 */
326 		if (local_drain) {
327 			lru_add_drain_all();
328 			/* We give up here, and let the caller try again */
329 			return -EAGAIN;
330 		}
331 		/*
332 		 * We are here if the page refcount does not match the
333 		 * expected safe value. The main culprits are usually
334 		 * pagevecs. With lru_add_drain() we drain the pagevecs
335 		 * on the local CPU so that hopefully the refcount will
336 		 * reach the expected safe value.
337 		 */
338 		lru_add_drain();
339 		local_drain = true;
340 		/* And now we try again immediately after draining */
341 		goto again;
342 	} else if (rc == -ENXIO) {
343 		if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
344 			return -EFAULT;
345 		return -EAGAIN;
346 	}
347 	return rc;
348 }
349 EXPORT_SYMBOL_GPL(gmap_make_secure);
350 
351 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
352 {
353 	struct uv_cb_cts uvcb = {
354 		.header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
355 		.header.len = sizeof(uvcb),
356 		.guest_handle = gmap->guest_handle,
357 		.gaddr = gaddr,
358 	};
359 
360 	return gmap_make_secure(gmap, gaddr, &uvcb);
361 }
362 EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
363 
364 /**
365  * gmap_destroy_page - Destroy a guest page.
366  * @gmap: the gmap of the guest
367  * @gaddr: the guest address to destroy
368  *
369  * An attempt will be made to destroy the given guest page. If the attempt
370  * fails, an attempt is made to export the page. If both attempts fail, an
371  * appropriate error is returned.
372  */
373 int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
374 {
375 	struct vm_area_struct *vma;
376 	unsigned long uaddr;
377 	struct page *page;
378 	int rc;
379 
380 	rc = -EFAULT;
381 	mmap_read_lock(gmap->mm);
382 
383 	uaddr = __gmap_translate(gmap, gaddr);
384 	if (IS_ERR_VALUE(uaddr))
385 		goto out;
386 	vma = vma_lookup(gmap->mm, uaddr);
387 	if (!vma)
388 		goto out;
389 	/*
390 	 * Huge pages should not be able to become secure
391 	 */
392 	if (is_vm_hugetlb_page(vma))
393 		goto out;
394 
395 	rc = 0;
396 	/* we take an extra reference here */
397 	page = follow_page(vma, uaddr, FOLL_WRITE | FOLL_GET);
398 	if (IS_ERR_OR_NULL(page))
399 		goto out;
400 	rc = uv_destroy_owned_page(page_to_phys(page));
401 	/*
402 	 * Fault handlers can race; it is possible that two CPUs will fault
403 	 * on the same secure page. One CPU can destroy the page, reboot,
404 	 * re-enter secure mode and import it, while the second CPU was
405 	 * stuck at the beginning of the handler. At some point the second
406 	 * CPU will be able to progress, and it will not be able to destroy
407 	 * the page. In that case we do not want to terminate the process,
408 	 * we instead try to export the page.
409 	 */
410 	if (rc)
411 		rc = uv_convert_owned_from_secure(page_to_phys(page));
412 	put_page(page);
413 out:
414 	mmap_read_unlock(gmap->mm);
415 	return rc;
416 }
417 EXPORT_SYMBOL_GPL(gmap_destroy_page);
418 
419 /*
420  * To be called with the page locked or with an extra reference! This will
421  * prevent gmap_make_secure from touching the page concurrently. Having 2
422  * parallel make_page_accessible is fine, as the UV calls will become a
423  * no-op if the page is already exported.
424  */
425 int arch_make_page_accessible(struct page *page)
426 {
427 	int rc = 0;
428 
429 	/* Hugepage cannot be protected, so nothing to do */
430 	if (PageHuge(page))
431 		return 0;
432 
433 	/*
434 	 * PG_arch_1 is used in 3 places:
435 	 * 1. for kernel page tables during early boot
436 	 * 2. for storage keys of huge pages and KVM
437 	 * 3. As an indication that this page might be secure. This can
438 	 *    overindicate, e.g. we set the bit before calling
439 	 *    convert_to_secure.
440 	 * As secure pages are never huge, all 3 variants can co-exists.
441 	 */
442 	if (!test_bit(PG_arch_1, &page->flags))
443 		return 0;
444 
445 	rc = uv_pin_shared(page_to_phys(page));
446 	if (!rc) {
447 		clear_bit(PG_arch_1, &page->flags);
448 		return 0;
449 	}
450 
451 	rc = uv_convert_from_secure(page_to_phys(page));
452 	if (!rc) {
453 		clear_bit(PG_arch_1, &page->flags);
454 		return 0;
455 	}
456 
457 	return rc;
458 }
459 EXPORT_SYMBOL_GPL(arch_make_page_accessible);
460 
461 #endif
462 
463 #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
464 static ssize_t uv_query_facilities(struct kobject *kobj,
465 				   struct kobj_attribute *attr, char *page)
466 {
467 	return scnprintf(page, PAGE_SIZE, "%lx\n%lx\n%lx\n%lx\n",
468 			uv_info.inst_calls_list[0],
469 			uv_info.inst_calls_list[1],
470 			uv_info.inst_calls_list[2],
471 			uv_info.inst_calls_list[3]);
472 }
473 
474 static struct kobj_attribute uv_query_facilities_attr =
475 	__ATTR(facilities, 0444, uv_query_facilities, NULL);
476 
477 static ssize_t uv_query_supp_se_hdr_ver(struct kobject *kobj,
478 					struct kobj_attribute *attr, char *buf)
479 {
480 	return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_ver);
481 }
482 
483 static struct kobj_attribute uv_query_supp_se_hdr_ver_attr =
484 	__ATTR(supp_se_hdr_ver, 0444, uv_query_supp_se_hdr_ver, NULL);
485 
486 static ssize_t uv_query_supp_se_hdr_pcf(struct kobject *kobj,
487 					struct kobj_attribute *attr, char *buf)
488 {
489 	return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_pcf);
490 }
491 
492 static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr =
493 	__ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL);
494 
495 static ssize_t uv_query_dump_cpu_len(struct kobject *kobj,
496 				     struct kobj_attribute *attr, char *page)
497 {
498 	return scnprintf(page, PAGE_SIZE, "%lx\n",
499 			uv_info.guest_cpu_stor_len);
500 }
501 
502 static struct kobj_attribute uv_query_dump_cpu_len_attr =
503 	__ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL);
504 
505 static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj,
506 					       struct kobj_attribute *attr, char *page)
507 {
508 	return scnprintf(page, PAGE_SIZE, "%lx\n",
509 			uv_info.conf_dump_storage_state_len);
510 }
511 
512 static struct kobj_attribute uv_query_dump_storage_state_len_attr =
513 	__ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL);
514 
515 static ssize_t uv_query_dump_finalize_len(struct kobject *kobj,
516 					  struct kobj_attribute *attr, char *page)
517 {
518 	return scnprintf(page, PAGE_SIZE, "%lx\n",
519 			uv_info.conf_dump_finalize_len);
520 }
521 
522 static struct kobj_attribute uv_query_dump_finalize_len_attr =
523 	__ATTR(dump_finalize_len, 0444, uv_query_dump_finalize_len, NULL);
524 
525 static ssize_t uv_query_feature_indications(struct kobject *kobj,
526 					    struct kobj_attribute *attr, char *buf)
527 {
528 	return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
529 }
530 
531 static struct kobj_attribute uv_query_feature_indications_attr =
532 	__ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
533 
534 static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
535 				       struct kobj_attribute *attr, char *page)
536 {
537 	return scnprintf(page, PAGE_SIZE, "%d\n",
538 			uv_info.max_guest_cpu_id + 1);
539 }
540 
541 static struct kobj_attribute uv_query_max_guest_cpus_attr =
542 	__ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
543 
544 static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
545 				      struct kobj_attribute *attr, char *page)
546 {
547 	return scnprintf(page, PAGE_SIZE, "%d\n",
548 			uv_info.max_num_sec_conf);
549 }
550 
551 static struct kobj_attribute uv_query_max_guest_vms_attr =
552 	__ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
553 
554 static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
555 				       struct kobj_attribute *attr, char *page)
556 {
557 	return scnprintf(page, PAGE_SIZE, "%lx\n",
558 			uv_info.max_sec_stor_addr);
559 }
560 
561 static struct kobj_attribute uv_query_max_guest_addr_attr =
562 	__ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
563 
564 static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj,
565 					     struct kobj_attribute *attr, char *page)
566 {
567 	return scnprintf(page, PAGE_SIZE, "%lx\n", uv_info.supp_att_req_hdr_ver);
568 }
569 
570 static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr =
571 	__ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL);
572 
573 static ssize_t uv_query_supp_att_pflags(struct kobject *kobj,
574 					struct kobj_attribute *attr, char *page)
575 {
576 	return scnprintf(page, PAGE_SIZE, "%lx\n", uv_info.supp_att_pflags);
577 }
578 
579 static struct kobj_attribute uv_query_supp_att_pflags_attr =
580 	__ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL);
581 
582 static struct attribute *uv_query_attrs[] = {
583 	&uv_query_facilities_attr.attr,
584 	&uv_query_feature_indications_attr.attr,
585 	&uv_query_max_guest_cpus_attr.attr,
586 	&uv_query_max_guest_vms_attr.attr,
587 	&uv_query_max_guest_addr_attr.attr,
588 	&uv_query_supp_se_hdr_ver_attr.attr,
589 	&uv_query_supp_se_hdr_pcf_attr.attr,
590 	&uv_query_dump_storage_state_len_attr.attr,
591 	&uv_query_dump_finalize_len_attr.attr,
592 	&uv_query_dump_cpu_len_attr.attr,
593 	&uv_query_supp_att_req_hdr_ver_attr.attr,
594 	&uv_query_supp_att_pflags_attr.attr,
595 	NULL,
596 };
597 
598 static struct attribute_group uv_query_attr_group = {
599 	.attrs = uv_query_attrs,
600 };
601 
602 static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
603 				     struct kobj_attribute *attr, char *page)
604 {
605 	int val = 0;
606 
607 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
608 	val = prot_virt_guest;
609 #endif
610 	return scnprintf(page, PAGE_SIZE, "%d\n", val);
611 }
612 
613 static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
614 				    struct kobj_attribute *attr, char *page)
615 {
616 	int val = 0;
617 
618 #if IS_ENABLED(CONFIG_KVM)
619 	val = prot_virt_host;
620 #endif
621 
622 	return scnprintf(page, PAGE_SIZE, "%d\n", val);
623 }
624 
625 static struct kobj_attribute uv_prot_virt_guest =
626 	__ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL);
627 
628 static struct kobj_attribute uv_prot_virt_host =
629 	__ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL);
630 
631 static const struct attribute *uv_prot_virt_attrs[] = {
632 	&uv_prot_virt_guest.attr,
633 	&uv_prot_virt_host.attr,
634 	NULL,
635 };
636 
637 static struct kset *uv_query_kset;
638 static struct kobject *uv_kobj;
639 
640 static int __init uv_info_init(void)
641 {
642 	int rc = -ENOMEM;
643 
644 	if (!test_facility(158))
645 		return 0;
646 
647 	uv_kobj = kobject_create_and_add("uv", firmware_kobj);
648 	if (!uv_kobj)
649 		return -ENOMEM;
650 
651 	rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs);
652 	if (rc)
653 		goto out_kobj;
654 
655 	uv_query_kset = kset_create_and_add("query", NULL, uv_kobj);
656 	if (!uv_query_kset) {
657 		rc = -ENOMEM;
658 		goto out_ind_files;
659 	}
660 
661 	rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group);
662 	if (!rc)
663 		return 0;
664 
665 	kset_unregister(uv_query_kset);
666 out_ind_files:
667 	sysfs_remove_files(uv_kobj, uv_prot_virt_attrs);
668 out_kobj:
669 	kobject_del(uv_kobj);
670 	kobject_put(uv_kobj);
671 	return rc;
672 }
673 device_initcall(uv_info_init);
674 #endif
675