xref: /linux/arch/s390/include/asm/uv.h (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Ultravisor Interfaces
4  *
5  * Copyright IBM Corp. 2019, 2024
6  *
7  * Author(s):
8  *	Vasily Gorbik <gor@linux.ibm.com>
9  *	Janosch Frank <frankja@linux.ibm.com>
10  */
11 #ifndef _ASM_S390_UV_H
12 #define _ASM_S390_UV_H
13 
14 #include <linux/types.h>
15 #include <linux/errno.h>
16 #include <linux/bug.h>
17 #include <linux/sched.h>
18 #include <asm/page.h>
19 #include <asm/gmap.h>
20 #include <asm/asm.h>
21 
22 #define UVC_CC_OK	0
23 #define UVC_CC_ERROR	1
24 #define UVC_CC_BUSY	2
25 #define UVC_CC_PARTIAL	3
26 
27 #define UVC_RC_EXECUTED		0x0001
28 #define UVC_RC_INV_CMD		0x0002
29 #define UVC_RC_INV_STATE	0x0003
30 #define UVC_RC_INV_LEN		0x0005
31 #define UVC_RC_NO_RESUME	0x0007
32 #define UVC_RC_MORE_DATA	0x0100
33 #define UVC_RC_NEED_DESTROY	0x8000
34 
35 #define UVC_CMD_QUI			0x0001
36 #define UVC_CMD_QUERY_KEYS		0x0002
37 #define UVC_CMD_INIT_UV			0x000f
38 #define UVC_CMD_CREATE_SEC_CONF		0x0100
39 #define UVC_CMD_DESTROY_SEC_CONF	0x0101
40 #define UVC_CMD_DESTROY_SEC_CONF_FAST	0x0102
41 #define UVC_CMD_CREATE_SEC_CPU		0x0120
42 #define UVC_CMD_DESTROY_SEC_CPU		0x0121
43 #define UVC_CMD_CONV_TO_SEC_STOR	0x0200
44 #define UVC_CMD_CONV_FROM_SEC_STOR	0x0201
45 #define UVC_CMD_DESTR_SEC_STOR		0x0202
46 #define UVC_CMD_SET_SEC_CONF_PARAMS	0x0300
47 #define UVC_CMD_UNPACK_IMG		0x0301
48 #define UVC_CMD_VERIFY_IMG		0x0302
49 #define UVC_CMD_CPU_RESET		0x0310
50 #define UVC_CMD_CPU_RESET_INITIAL	0x0311
51 #define UVC_CMD_PREPARE_RESET		0x0320
52 #define UVC_CMD_CPU_RESET_CLEAR		0x0321
53 #define UVC_CMD_CPU_SET_STATE		0x0330
54 #define UVC_CMD_SET_UNSHARE_ALL		0x0340
55 #define UVC_CMD_PIN_PAGE_SHARED		0x0341
56 #define UVC_CMD_UNPIN_PAGE_SHARED	0x0342
57 #define UVC_CMD_DUMP_INIT		0x0400
58 #define UVC_CMD_DUMP_CONF_STOR_STATE	0x0401
59 #define UVC_CMD_DUMP_CPU		0x0402
60 #define UVC_CMD_DUMP_COMPLETE		0x0403
61 #define UVC_CMD_SET_SHARED_ACCESS	0x1000
62 #define UVC_CMD_REMOVE_SHARED_ACCESS	0x1001
63 #define UVC_CMD_RETR_ATTEST		0x1020
64 #define UVC_CMD_ADD_SECRET		0x1031
65 #define UVC_CMD_LIST_SECRETS		0x1033
66 #define UVC_CMD_LOCK_SECRETS		0x1034
67 #define UVC_CMD_RETR_SECRET		0x1035
68 
69 /* Bits in installed uv calls */
70 enum uv_cmds_inst {
71 	BIT_UVC_CMD_QUI = 0,
72 	BIT_UVC_CMD_INIT_UV = 1,
73 	BIT_UVC_CMD_CREATE_SEC_CONF = 2,
74 	BIT_UVC_CMD_DESTROY_SEC_CONF = 3,
75 	BIT_UVC_CMD_CREATE_SEC_CPU = 4,
76 	BIT_UVC_CMD_DESTROY_SEC_CPU = 5,
77 	BIT_UVC_CMD_CONV_TO_SEC_STOR = 6,
78 	BIT_UVC_CMD_CONV_FROM_SEC_STOR = 7,
79 	BIT_UVC_CMD_SET_SHARED_ACCESS = 8,
80 	BIT_UVC_CMD_REMOVE_SHARED_ACCESS = 9,
81 	BIT_UVC_CMD_SET_SEC_PARMS = 11,
82 	BIT_UVC_CMD_UNPACK_IMG = 13,
83 	BIT_UVC_CMD_VERIFY_IMG = 14,
84 	BIT_UVC_CMD_CPU_RESET = 15,
85 	BIT_UVC_CMD_CPU_RESET_INITIAL = 16,
86 	BIT_UVC_CMD_CPU_SET_STATE = 17,
87 	BIT_UVC_CMD_PREPARE_RESET = 18,
88 	BIT_UVC_CMD_CPU_PERFORM_CLEAR_RESET = 19,
89 	BIT_UVC_CMD_UNSHARE_ALL = 20,
90 	BIT_UVC_CMD_PIN_PAGE_SHARED = 21,
91 	BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22,
92 	BIT_UVC_CMD_DESTROY_SEC_CONF_FAST = 23,
93 	BIT_UVC_CMD_DUMP_INIT = 24,
94 	BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE = 25,
95 	BIT_UVC_CMD_DUMP_CPU = 26,
96 	BIT_UVC_CMD_DUMP_COMPLETE = 27,
97 	BIT_UVC_CMD_RETR_ATTEST = 28,
98 	BIT_UVC_CMD_ADD_SECRET = 29,
99 	BIT_UVC_CMD_LIST_SECRETS = 30,
100 	BIT_UVC_CMD_LOCK_SECRETS = 31,
101 	BIT_UVC_CMD_RETR_SECRET = 33,
102 	BIT_UVC_CMD_QUERY_KEYS = 34,
103 };
104 
105 enum uv_feat_ind {
106 	BIT_UV_FEAT_MISC = 0,
107 	BIT_UV_FEAT_AIV = 1,
108 	BIT_UV_FEAT_AP = 4,
109 	BIT_UV_FEAT_AP_INTR = 5,
110 };
111 
112 struct uv_cb_header {
113 	u16 len;
114 	u16 cmd;	/* Command Code */
115 	u16 rc;		/* Response Code */
116 	u16 rrc;	/* Return Reason Code */
117 } __packed __aligned(8);
118 
119 /* Query Ultravisor Information */
120 struct uv_cb_qui {
121 	struct uv_cb_header header;		/* 0x0000 */
122 	u64 reserved08;				/* 0x0008 */
123 	u64 inst_calls_list[4];			/* 0x0010 */
124 	u64 reserved30[2];			/* 0x0030 */
125 	u64 uv_base_stor_len;			/* 0x0040 */
126 	u64 reserved48;				/* 0x0048 */
127 	u64 conf_base_phys_stor_len;		/* 0x0050 */
128 	u64 conf_base_virt_stor_len;		/* 0x0058 */
129 	u64 conf_virt_var_stor_len;		/* 0x0060 */
130 	u64 cpu_stor_len;			/* 0x0068 */
131 	u32 reserved70[3];			/* 0x0070 */
132 	u32 max_num_sec_conf;			/* 0x007c */
133 	u64 max_guest_stor_addr;		/* 0x0080 */
134 	u8  reserved88[0x9e - 0x88];		/* 0x0088 */
135 	u16 max_guest_cpu_id;			/* 0x009e */
136 	u64 uv_feature_indications;		/* 0x00a0 */
137 	u64 reserveda8;				/* 0x00a8 */
138 	u64 supp_se_hdr_versions;		/* 0x00b0 */
139 	u64 supp_se_hdr_pcf;			/* 0x00b8 */
140 	u64 reservedc0;				/* 0x00c0 */
141 	u64 conf_dump_storage_state_len;	/* 0x00c8 */
142 	u64 conf_dump_finalize_len;		/* 0x00d0 */
143 	u64 reservedd8;				/* 0x00d8 */
144 	u64 supp_att_req_hdr_ver;		/* 0x00e0 */
145 	u64 supp_att_pflags;			/* 0x00e8 */
146 	u64 reservedf0;				/* 0x00f0 */
147 	u64 supp_add_secret_req_ver;		/* 0x00f8 */
148 	u64 supp_add_secret_pcf;		/* 0x0100 */
149 	u64 supp_secret_types;			/* 0x0108 */
150 	u16 max_assoc_secrets;			/* 0x0110 */
151 	u16 max_retr_secrets;			/* 0x0112 */
152 	u8 reserved114[0x120 - 0x114];		/* 0x0114 */
153 } __packed __aligned(8);
154 
155 struct uv_key_hash {
156 	u64 dword[4];
157 } __packed __aligned(8);
158 
159 #define UVC_QUERY_KEYS_IDX_HK		0
160 #define UVC_QUERY_KEYS_IDX_BACK_HK	1
161 
162 /* Query Ultravisor Keys */
163 struct uv_cb_query_keys {
164 	struct uv_cb_header header;		/* 0x0000 */
165 	u64 reserved08[3];			/* 0x0008 */
166 	struct uv_key_hash key_hashes[15];	/* 0x0020 */
167 } __packed __aligned(8);
168 static_assert(sizeof(struct uv_cb_query_keys) == 0x200);
169 
170 /* Initialize Ultravisor */
171 struct uv_cb_init {
172 	struct uv_cb_header header;
173 	u64 reserved08[2];
174 	u64 stor_origin;
175 	u64 stor_len;
176 	u64 reserved28[4];
177 } __packed __aligned(8);
178 
179 /* Create Guest Configuration */
180 struct uv_cb_cgc {
181 	struct uv_cb_header header;
182 	u64 reserved08[2];
183 	u64 guest_handle;
184 	u64 conf_base_stor_origin;
185 	u64 conf_virt_stor_origin;
186 	u8  reserved30[6];
187 	union {
188 		struct {
189 			u16 : 14;
190 			u16 ap_instr_intr : 1;
191 			u16 ap_allow_instr : 1;
192 		};
193 		u16 raw;
194 	} flags;
195 	u64 guest_stor_origin;
196 	u64 guest_stor_len;
197 	u64 guest_sca;
198 	u64 guest_asce;
199 	u64 reserved58[5];
200 } __packed __aligned(8);
201 
202 /* Create Secure CPU */
203 struct uv_cb_csc {
204 	struct uv_cb_header header;
205 	u64 reserved08[2];
206 	u64 cpu_handle;
207 	u64 guest_handle;
208 	u64 stor_origin;
209 	u8  reserved30[6];
210 	u16 num;
211 	u64 state_origin;
212 	u64 reserved40[4];
213 } __packed __aligned(8);
214 
215 /* Convert to Secure */
216 struct uv_cb_cts {
217 	struct uv_cb_header header;
218 	u64 reserved08[2];
219 	u64 guest_handle;
220 	u64 gaddr;
221 } __packed __aligned(8);
222 
223 /* Convert from Secure / Pin Page Shared */
224 struct uv_cb_cfs {
225 	struct uv_cb_header header;
226 	u64 reserved08[2];
227 	u64 paddr;
228 } __packed __aligned(8);
229 
230 /* Set Secure Config Parameter */
231 struct uv_cb_ssc {
232 	struct uv_cb_header header;
233 	u64 reserved08[2];
234 	u64 guest_handle;
235 	u64 sec_header_origin;
236 	u32 sec_header_len;
237 	u32 reserved2c;
238 	u64 reserved30[4];
239 } __packed __aligned(8);
240 
241 /* Unpack */
242 struct uv_cb_unp {
243 	struct uv_cb_header header;
244 	u64 reserved08[2];
245 	u64 guest_handle;
246 	u64 gaddr;
247 	u64 tweak[2];
248 	u64 reserved38[3];
249 } __packed __aligned(8);
250 
251 #define PV_CPU_STATE_OPR	1
252 #define PV_CPU_STATE_STP	2
253 #define PV_CPU_STATE_CHKSTP	3
254 #define PV_CPU_STATE_OPR_LOAD	5
255 
256 struct uv_cb_cpu_set_state {
257 	struct uv_cb_header header;
258 	u64 reserved08[2];
259 	u64 cpu_handle;
260 	u8  reserved20[7];
261 	u8  state;
262 	u64 reserved28[5];
263 };
264 
265 /*
266  * A common UV call struct for calls that take no payload
267  * Examples:
268  * Destroy cpu/config
269  * Verify
270  */
271 struct uv_cb_nodata {
272 	struct uv_cb_header header;
273 	u64 reserved08[2];
274 	u64 handle;
275 	u64 reserved20[4];
276 } __packed __aligned(8);
277 
278 /* Destroy Configuration Fast */
279 struct uv_cb_destroy_fast {
280 	struct uv_cb_header header;
281 	u64 reserved08[2];
282 	u64 handle;
283 	u64 reserved20[5];
284 } __packed __aligned(8);
285 
286 /* Set Shared Access */
287 struct uv_cb_share {
288 	struct uv_cb_header header;
289 	u64 reserved08[3];
290 	u64 paddr;
291 	u64 reserved28;
292 } __packed __aligned(8);
293 
294 /* Retrieve Attestation Measurement */
295 struct uv_cb_attest {
296 	struct uv_cb_header header;	/* 0x0000 */
297 	u64 reserved08[2];		/* 0x0008 */
298 	u64 arcb_addr;			/* 0x0018 */
299 	u64 cont_token;			/* 0x0020 */
300 	u8  reserved28[6];		/* 0x0028 */
301 	u16 user_data_len;		/* 0x002e */
302 	u8  user_data[256];		/* 0x0030 */
303 	u32 reserved130[3];		/* 0x0130 */
304 	u32 meas_len;			/* 0x013c */
305 	u64 meas_addr;			/* 0x0140 */
306 	u8  config_uid[16];		/* 0x0148 */
307 	u32 reserved158;		/* 0x0158 */
308 	u32 add_data_len;		/* 0x015c */
309 	u64 add_data_addr;		/* 0x0160 */
310 	u64 reserved168[4];		/* 0x0168 */
311 } __packed __aligned(8);
312 
313 struct uv_cb_dump_cpu {
314 	struct uv_cb_header header;
315 	u64 reserved08[2];
316 	u64 cpu_handle;
317 	u64 dump_area_origin;
318 	u64 reserved28[5];
319 } __packed __aligned(8);
320 
321 struct uv_cb_dump_stor_state {
322 	struct uv_cb_header header;
323 	u64 reserved08[2];
324 	u64 config_handle;
325 	u64 dump_area_origin;
326 	u64 gaddr;
327 	u64 reserved28[4];
328 } __packed __aligned(8);
329 
330 struct uv_cb_dump_complete {
331 	struct uv_cb_header header;
332 	u64 reserved08[2];
333 	u64 config_handle;
334 	u64 dump_area_origin;
335 	u64 reserved30[5];
336 } __packed __aligned(8);
337 
338 /*
339  * A common UV call struct for pv guests that contains a single address
340  * Examples:
341  * Add Secret
342  */
343 struct uv_cb_guest_addr {
344 	struct uv_cb_header header;
345 	u64 reserved08[3];
346 	u64 addr;
347 	u64 reserved28[4];
348 } __packed __aligned(8);
349 
350 #define UVC_RC_RETR_SECR_BUF_SMALL	0x0109
351 #define UVC_RC_RETR_SECR_STORE_EMPTY	0x010f
352 #define UVC_RC_RETR_SECR_INV_IDX	0x0110
353 #define UVC_RC_RETR_SECR_INV_SECRET	0x0111
354 
355 struct uv_cb_retr_secr {
356 	struct uv_cb_header header;
357 	u64 reserved08[2];
358 	u16 secret_idx;
359 	u16 reserved1a;
360 	u32 buf_size;
361 	u64 buf_addr;
362 	u64 reserved28[4];
363 }  __packed __aligned(8);
364 
365 struct uv_cb_list_secrets {
366 	struct uv_cb_header header;
367 	u64 reserved08[2];
368 	u8  reserved18[6];
369 	u16 start_idx;
370 	u64 list_addr;
371 	u64 reserved28[4];
372 } __packed __aligned(8);
373 
374 enum uv_secret_types {
375 	UV_SECRET_INVAL = 0x0,
376 	UV_SECRET_NULL = 0x1,
377 	UV_SECRET_ASSOCIATION = 0x2,
378 	UV_SECRET_PLAIN = 0x3,
379 	UV_SECRET_AES_128 = 0x4,
380 	UV_SECRET_AES_192 = 0x5,
381 	UV_SECRET_AES_256 = 0x6,
382 	UV_SECRET_AES_XTS_128 = 0x7,
383 	UV_SECRET_AES_XTS_256 = 0x8,
384 	UV_SECRET_HMAC_SHA_256 = 0x9,
385 	UV_SECRET_HMAC_SHA_512 = 0xa,
386 	/* 0x0b - 0x10 reserved */
387 	UV_SECRET_ECDSA_P256 = 0x11,
388 	UV_SECRET_ECDSA_P384 = 0x12,
389 	UV_SECRET_ECDSA_P521 = 0x13,
390 	UV_SECRET_ECDSA_ED25519 = 0x14,
391 	UV_SECRET_ECDSA_ED448 = 0x15,
392 };
393 
394 /**
395  * uv_secret_list_item_hdr - UV secret metadata.
396  * @index: Index of the secret in the secret list.
397  * @type: Type of the secret. See `enum uv_secret_types`.
398  * @length: Length of the stored secret.
399  */
400 struct uv_secret_list_item_hdr {
401 	u16 index;
402 	u16 type;
403 	u32 length;
404 } __packed __aligned(8);
405 
406 #define UV_SECRET_ID_LEN 32
407 /**
408  * uv_secret_list_item - UV secret entry.
409  * @hdr: The metadata of this secret.
410  * @id: The ID of this secret, not the secret itself.
411  */
412 struct uv_secret_list_item {
413 	struct uv_secret_list_item_hdr hdr;
414 	u64 reserverd08;
415 	u8 id[UV_SECRET_ID_LEN];
416 } __packed __aligned(8);
417 
418 /**
419  * uv_secret_list - UV secret-metadata list.
420  * @num_secr_stored: Number of secrets stored in this list.
421  * @total_num_secrets: Number of secrets stored in the UV for this guest.
422  * @next_secret_idx: positive number if there are more secrets available or zero.
423  * @secrets: Up to 85 UV-secret metadata entries.
424  */
425 struct uv_secret_list {
426 	u16 num_secr_stored;
427 	u16 total_num_secrets;
428 	u16 next_secret_idx;
429 	u16 reserved_06;
430 	u64 reserved_08;
431 	struct uv_secret_list_item secrets[85];
432 } __packed __aligned(8);
433 static_assert(sizeof(struct uv_secret_list) == PAGE_SIZE);
434 
__uv_call(unsigned long r1,unsigned long r2)435 static inline int __uv_call(unsigned long r1, unsigned long r2)
436 {
437 	int cc;
438 
439 	asm volatile(
440 		"	.insn	 rrf,0xb9a40000,%[r1],%[r2],0,0\n"
441 		CC_IPM(cc)
442 		: CC_OUT(cc, cc)
443 		: [r1] "a" (r1), [r2] "a" (r2)
444 		: CC_CLOBBER_LIST("memory"));
445 	return CC_TRANSFORM(cc);
446 }
447 
uv_call(unsigned long r1,unsigned long r2)448 static inline int uv_call(unsigned long r1, unsigned long r2)
449 {
450 	int cc;
451 
452 	do {
453 		cc = __uv_call(r1, r2);
454 	} while (cc > 1);
455 	return cc;
456 }
457 
458 /* Low level uv_call that avoids stalls for long running busy conditions  */
uv_call_sched(unsigned long r1,unsigned long r2)459 static inline int uv_call_sched(unsigned long r1, unsigned long r2)
460 {
461 	int cc;
462 
463 	do {
464 		cc = __uv_call(r1, r2);
465 		cond_resched();
466 	} while (cc > 1);
467 	return cc;
468 }
469 
470 /*
471  * special variant of uv_call that only transports the cpu or guest
472  * handle and the command, like destroy or verify.
473  */
uv_cmd_nodata(u64 handle,u16 cmd,u16 * rc,u16 * rrc)474 static inline int uv_cmd_nodata(u64 handle, u16 cmd, u16 *rc, u16 *rrc)
475 {
476 	struct uv_cb_nodata uvcb = {
477 		.header.cmd = cmd,
478 		.header.len = sizeof(uvcb),
479 		.handle = handle,
480 	};
481 	int cc;
482 
483 	WARN(!handle, "No handle provided to Ultravisor call cmd %x\n", cmd);
484 	cc = uv_call_sched(0, (u64)&uvcb);
485 	*rc = uvcb.header.rc;
486 	*rrc = uvcb.header.rrc;
487 	return cc ? -EINVAL : 0;
488 }
489 
490 /**
491  * uv_list_secrets() - Do a List Secrets UVC.
492  *
493  * @buf: Buffer to write list into; size of one page.
494  * @start_idx: The smallest index that should be included in the list.
495  *		For the fist invocation use 0.
496  * @rc: Pointer to store the return code or NULL.
497  * @rrc: Pointer to store the return reason code or NULL.
498  *
499  * This function calls the List Secrets UVC. The result is written into `buf`,
500  * that needs to be at least one page of writable memory.
501  * `buf` consists of:
502  * * %struct uv_secret_list_hdr
503  * * %struct uv_secret_list_item (multiple)
504  *
505  * For `start_idx` use _0_ for the first call. If there are more secrets available
506  * but could not fit into the page then `rc` is `UVC_RC_MORE_DATA`.
507  * In this case use `uv_secret_list_hdr.next_secret_idx` for `start_idx`.
508  *
509  * Context: might sleep.
510  *
511  * Return: The UVC condition code.
512  */
uv_list_secrets(struct uv_secret_list * buf,u16 start_idx,u16 * rc,u16 * rrc)513 static inline int uv_list_secrets(struct uv_secret_list *buf, u16 start_idx,
514 				  u16 *rc, u16 *rrc)
515 {
516 	struct uv_cb_list_secrets uvcb = {
517 		.header.len = sizeof(uvcb),
518 		.header.cmd = UVC_CMD_LIST_SECRETS,
519 		.start_idx = start_idx,
520 		.list_addr = (u64)buf,
521 	};
522 	int cc = uv_call_sched(0, (u64)&uvcb);
523 
524 	if (rc)
525 		*rc = uvcb.header.rc;
526 	if (rrc)
527 		*rrc = uvcb.header.rrc;
528 
529 	return cc;
530 }
531 
532 struct uv_info {
533 	unsigned long inst_calls_list[4];
534 	unsigned long uv_base_stor_len;
535 	unsigned long guest_base_stor_len;
536 	unsigned long guest_virt_base_stor_len;
537 	unsigned long guest_virt_var_stor_len;
538 	unsigned long guest_cpu_stor_len;
539 	unsigned long max_sec_stor_addr;
540 	unsigned int max_num_sec_conf;
541 	unsigned short max_guest_cpu_id;
542 	unsigned long uv_feature_indications;
543 	unsigned long supp_se_hdr_ver;
544 	unsigned long supp_se_hdr_pcf;
545 	unsigned long conf_dump_storage_state_len;
546 	unsigned long conf_dump_finalize_len;
547 	unsigned long supp_att_req_hdr_ver;
548 	unsigned long supp_att_pflags;
549 	unsigned long supp_add_secret_req_ver;
550 	unsigned long supp_add_secret_pcf;
551 	unsigned long supp_secret_types;
552 	unsigned short max_assoc_secrets;
553 	unsigned short max_retr_secrets;
554 };
555 
556 extern struct uv_info uv_info;
557 
uv_has_feature(u8 feature_bit)558 static inline bool uv_has_feature(u8 feature_bit)
559 {
560 	if (feature_bit >= sizeof(uv_info.uv_feature_indications) * 8)
561 		return false;
562 	return test_bit_inv(feature_bit, &uv_info.uv_feature_indications);
563 }
564 
565 extern int prot_virt_guest;
566 
is_prot_virt_guest(void)567 static inline int is_prot_virt_guest(void)
568 {
569 	return prot_virt_guest;
570 }
571 
share(unsigned long addr,u16 cmd)572 static inline int share(unsigned long addr, u16 cmd)
573 {
574 	struct uv_cb_share uvcb = {
575 		.header.cmd = cmd,
576 		.header.len = sizeof(uvcb),
577 		.paddr = addr
578 	};
579 
580 	if (!is_prot_virt_guest())
581 		return -EOPNOTSUPP;
582 	/*
583 	 * Sharing is page wise, if we encounter addresses that are
584 	 * not page aligned, we assume something went wrong. If
585 	 * malloced structs are passed to this function, we could leak
586 	 * data to the hypervisor.
587 	 */
588 	BUG_ON(addr & ~PAGE_MASK);
589 
590 	if (!uv_call(0, (u64)&uvcb))
591 		return 0;
592 	pr_err("%s UVC failed (rc: 0x%x, rrc: 0x%x), possible hypervisor bug.\n",
593 	       uvcb.header.cmd == UVC_CMD_SET_SHARED_ACCESS ? "Share" : "Unshare",
594 	       uvcb.header.rc, uvcb.header.rrc);
595 	panic("System security cannot be guaranteed unless the system panics now.\n");
596 }
597 
598 /*
599  * Guest 2 request to the Ultravisor to make a page shared with the
600  * hypervisor for IO.
601  *
602  * @addr: Real or absolute address of the page to be shared
603  */
uv_set_shared(unsigned long addr)604 static inline int uv_set_shared(unsigned long addr)
605 {
606 	return share(addr, UVC_CMD_SET_SHARED_ACCESS);
607 }
608 
609 /*
610  * Guest 2 request to the Ultravisor to make a page unshared.
611  *
612  * @addr: Real or absolute address of the page to be unshared
613  */
uv_remove_shared(unsigned long addr)614 static inline int uv_remove_shared(unsigned long addr)
615 {
616 	return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS);
617 }
618 
619 int uv_get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN],
620 			   struct uv_secret_list_item_hdr *secret);
621 int uv_retrieve_secret(u16 secret_idx, u8 *buf, size_t buf_size);
622 
623 extern int prot_virt_host;
624 
is_prot_virt_host(void)625 static inline int is_prot_virt_host(void)
626 {
627 	return prot_virt_host;
628 }
629 
630 int uv_pin_shared(unsigned long paddr);
631 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
632 int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr);
633 int uv_destroy_folio(struct folio *folio);
634 int uv_destroy_pte(pte_t pte);
635 int uv_convert_from_secure_pte(pte_t pte);
636 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
637 
638 void setup_uv(void);
639 
640 #endif /* _ASM_S390_UV_H */
641