1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Ultravisor Interfaces
4 *
5 * Copyright IBM Corp. 2019, 2022
6 *
7 * Author(s):
8 * Vasily Gorbik <gor@linux.ibm.com>
9 * Janosch Frank <frankja@linux.ibm.com>
10 */
11 #ifndef _ASM_S390_UV_H
12 #define _ASM_S390_UV_H
13
14 #include <linux/types.h>
15 #include <linux/errno.h>
16 #include <linux/bug.h>
17 #include <linux/sched.h>
18 #include <asm/page.h>
19 #include <asm/gmap.h>
20
21 #define UVC_CC_OK 0
22 #define UVC_CC_ERROR 1
23 #define UVC_CC_BUSY 2
24 #define UVC_CC_PARTIAL 3
25
26 #define UVC_RC_EXECUTED 0x0001
27 #define UVC_RC_INV_CMD 0x0002
28 #define UVC_RC_INV_STATE 0x0003
29 #define UVC_RC_INV_LEN 0x0005
30 #define UVC_RC_NO_RESUME 0x0007
31 #define UVC_RC_NEED_DESTROY 0x8000
32
33 #define UVC_CMD_QUI 0x0001
34 #define UVC_CMD_INIT_UV 0x000f
35 #define UVC_CMD_CREATE_SEC_CONF 0x0100
36 #define UVC_CMD_DESTROY_SEC_CONF 0x0101
37 #define UVC_CMD_DESTROY_SEC_CONF_FAST 0x0102
38 #define UVC_CMD_CREATE_SEC_CPU 0x0120
39 #define UVC_CMD_DESTROY_SEC_CPU 0x0121
40 #define UVC_CMD_CONV_TO_SEC_STOR 0x0200
41 #define UVC_CMD_CONV_FROM_SEC_STOR 0x0201
42 #define UVC_CMD_DESTR_SEC_STOR 0x0202
43 #define UVC_CMD_SET_SEC_CONF_PARAMS 0x0300
44 #define UVC_CMD_UNPACK_IMG 0x0301
45 #define UVC_CMD_VERIFY_IMG 0x0302
46 #define UVC_CMD_CPU_RESET 0x0310
47 #define UVC_CMD_CPU_RESET_INITIAL 0x0311
48 #define UVC_CMD_PREPARE_RESET 0x0320
49 #define UVC_CMD_CPU_RESET_CLEAR 0x0321
50 #define UVC_CMD_CPU_SET_STATE 0x0330
51 #define UVC_CMD_SET_UNSHARE_ALL 0x0340
52 #define UVC_CMD_PIN_PAGE_SHARED 0x0341
53 #define UVC_CMD_UNPIN_PAGE_SHARED 0x0342
54 #define UVC_CMD_DUMP_INIT 0x0400
55 #define UVC_CMD_DUMP_CONF_STOR_STATE 0x0401
56 #define UVC_CMD_DUMP_CPU 0x0402
57 #define UVC_CMD_DUMP_COMPLETE 0x0403
58 #define UVC_CMD_SET_SHARED_ACCESS 0x1000
59 #define UVC_CMD_REMOVE_SHARED_ACCESS 0x1001
60 #define UVC_CMD_RETR_ATTEST 0x1020
61 #define UVC_CMD_ADD_SECRET 0x1031
62 #define UVC_CMD_LIST_SECRETS 0x1033
63 #define UVC_CMD_LOCK_SECRETS 0x1034
64
65 /* Bits in installed uv calls */
66 enum uv_cmds_inst {
67 BIT_UVC_CMD_QUI = 0,
68 BIT_UVC_CMD_INIT_UV = 1,
69 BIT_UVC_CMD_CREATE_SEC_CONF = 2,
70 BIT_UVC_CMD_DESTROY_SEC_CONF = 3,
71 BIT_UVC_CMD_CREATE_SEC_CPU = 4,
72 BIT_UVC_CMD_DESTROY_SEC_CPU = 5,
73 BIT_UVC_CMD_CONV_TO_SEC_STOR = 6,
74 BIT_UVC_CMD_CONV_FROM_SEC_STOR = 7,
75 BIT_UVC_CMD_SET_SHARED_ACCESS = 8,
76 BIT_UVC_CMD_REMOVE_SHARED_ACCESS = 9,
77 BIT_UVC_CMD_SET_SEC_PARMS = 11,
78 BIT_UVC_CMD_UNPACK_IMG = 13,
79 BIT_UVC_CMD_VERIFY_IMG = 14,
80 BIT_UVC_CMD_CPU_RESET = 15,
81 BIT_UVC_CMD_CPU_RESET_INITIAL = 16,
82 BIT_UVC_CMD_CPU_SET_STATE = 17,
83 BIT_UVC_CMD_PREPARE_RESET = 18,
84 BIT_UVC_CMD_CPU_PERFORM_CLEAR_RESET = 19,
85 BIT_UVC_CMD_UNSHARE_ALL = 20,
86 BIT_UVC_CMD_PIN_PAGE_SHARED = 21,
87 BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22,
88 BIT_UVC_CMD_DESTROY_SEC_CONF_FAST = 23,
89 BIT_UVC_CMD_DUMP_INIT = 24,
90 BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE = 25,
91 BIT_UVC_CMD_DUMP_CPU = 26,
92 BIT_UVC_CMD_DUMP_COMPLETE = 27,
93 BIT_UVC_CMD_RETR_ATTEST = 28,
94 BIT_UVC_CMD_ADD_SECRET = 29,
95 BIT_UVC_CMD_LIST_SECRETS = 30,
96 BIT_UVC_CMD_LOCK_SECRETS = 31,
97 };
98
99 enum uv_feat_ind {
100 BIT_UV_FEAT_MISC = 0,
101 BIT_UV_FEAT_AIV = 1,
102 BIT_UV_FEAT_AP = 4,
103 BIT_UV_FEAT_AP_INTR = 5,
104 };
105
106 struct uv_cb_header {
107 u16 len;
108 u16 cmd; /* Command Code */
109 u16 rc; /* Response Code */
110 u16 rrc; /* Return Reason Code */
111 } __packed __aligned(8);
112
113 /* Query Ultravisor Information */
114 struct uv_cb_qui {
115 struct uv_cb_header header; /* 0x0000 */
116 u64 reserved08; /* 0x0008 */
117 u64 inst_calls_list[4]; /* 0x0010 */
118 u64 reserved30[2]; /* 0x0030 */
119 u64 uv_base_stor_len; /* 0x0040 */
120 u64 reserved48; /* 0x0048 */
121 u64 conf_base_phys_stor_len; /* 0x0050 */
122 u64 conf_base_virt_stor_len; /* 0x0058 */
123 u64 conf_virt_var_stor_len; /* 0x0060 */
124 u64 cpu_stor_len; /* 0x0068 */
125 u32 reserved70[3]; /* 0x0070 */
126 u32 max_num_sec_conf; /* 0x007c */
127 u64 max_guest_stor_addr; /* 0x0080 */
128 u8 reserved88[0x9e - 0x88]; /* 0x0088 */
129 u16 max_guest_cpu_id; /* 0x009e */
130 u64 uv_feature_indications; /* 0x00a0 */
131 u64 reserveda8; /* 0x00a8 */
132 u64 supp_se_hdr_versions; /* 0x00b0 */
133 u64 supp_se_hdr_pcf; /* 0x00b8 */
134 u64 reservedc0; /* 0x00c0 */
135 u64 conf_dump_storage_state_len; /* 0x00c8 */
136 u64 conf_dump_finalize_len; /* 0x00d0 */
137 u64 reservedd8; /* 0x00d8 */
138 u64 supp_att_req_hdr_ver; /* 0x00e0 */
139 u64 supp_att_pflags; /* 0x00e8 */
140 u64 reservedf0; /* 0x00f0 */
141 u64 supp_add_secret_req_ver; /* 0x00f8 */
142 u64 supp_add_secret_pcf; /* 0x0100 */
143 u64 supp_secret_types; /* 0x0180 */
144 u16 max_secrets; /* 0x0110 */
145 u8 reserved112[0x120 - 0x112]; /* 0x0112 */
146 } __packed __aligned(8);
147
148 /* Initialize Ultravisor */
149 struct uv_cb_init {
150 struct uv_cb_header header;
151 u64 reserved08[2];
152 u64 stor_origin;
153 u64 stor_len;
154 u64 reserved28[4];
155 } __packed __aligned(8);
156
157 /* Create Guest Configuration */
158 struct uv_cb_cgc {
159 struct uv_cb_header header;
160 u64 reserved08[2];
161 u64 guest_handle;
162 u64 conf_base_stor_origin;
163 u64 conf_virt_stor_origin;
164 u8 reserved30[6];
165 union {
166 struct {
167 u16 : 14;
168 u16 ap_instr_intr : 1;
169 u16 ap_allow_instr : 1;
170 };
171 u16 raw;
172 } flags;
173 u64 guest_stor_origin;
174 u64 guest_stor_len;
175 u64 guest_sca;
176 u64 guest_asce;
177 u64 reserved58[5];
178 } __packed __aligned(8);
179
180 /* Create Secure CPU */
181 struct uv_cb_csc {
182 struct uv_cb_header header;
183 u64 reserved08[2];
184 u64 cpu_handle;
185 u64 guest_handle;
186 u64 stor_origin;
187 u8 reserved30[6];
188 u16 num;
189 u64 state_origin;
190 u64 reserved40[4];
191 } __packed __aligned(8);
192
193 /* Convert to Secure */
194 struct uv_cb_cts {
195 struct uv_cb_header header;
196 u64 reserved08[2];
197 u64 guest_handle;
198 u64 gaddr;
199 } __packed __aligned(8);
200
201 /* Convert from Secure / Pin Page Shared */
202 struct uv_cb_cfs {
203 struct uv_cb_header header;
204 u64 reserved08[2];
205 u64 paddr;
206 } __packed __aligned(8);
207
208 /* Set Secure Config Parameter */
209 struct uv_cb_ssc {
210 struct uv_cb_header header;
211 u64 reserved08[2];
212 u64 guest_handle;
213 u64 sec_header_origin;
214 u32 sec_header_len;
215 u32 reserved2c;
216 u64 reserved30[4];
217 } __packed __aligned(8);
218
219 /* Unpack */
220 struct uv_cb_unp {
221 struct uv_cb_header header;
222 u64 reserved08[2];
223 u64 guest_handle;
224 u64 gaddr;
225 u64 tweak[2];
226 u64 reserved38[3];
227 } __packed __aligned(8);
228
229 #define PV_CPU_STATE_OPR 1
230 #define PV_CPU_STATE_STP 2
231 #define PV_CPU_STATE_CHKSTP 3
232 #define PV_CPU_STATE_OPR_LOAD 5
233
234 struct uv_cb_cpu_set_state {
235 struct uv_cb_header header;
236 u64 reserved08[2];
237 u64 cpu_handle;
238 u8 reserved20[7];
239 u8 state;
240 u64 reserved28[5];
241 };
242
243 /*
244 * A common UV call struct for calls that take no payload
245 * Examples:
246 * Destroy cpu/config
247 * Verify
248 */
249 struct uv_cb_nodata {
250 struct uv_cb_header header;
251 u64 reserved08[2];
252 u64 handle;
253 u64 reserved20[4];
254 } __packed __aligned(8);
255
256 /* Destroy Configuration Fast */
257 struct uv_cb_destroy_fast {
258 struct uv_cb_header header;
259 u64 reserved08[2];
260 u64 handle;
261 u64 reserved20[5];
262 } __packed __aligned(8);
263
264 /* Set Shared Access */
265 struct uv_cb_share {
266 struct uv_cb_header header;
267 u64 reserved08[3];
268 u64 paddr;
269 u64 reserved28;
270 } __packed __aligned(8);
271
272 /* Retrieve Attestation Measurement */
273 struct uv_cb_attest {
274 struct uv_cb_header header; /* 0x0000 */
275 u64 reserved08[2]; /* 0x0008 */
276 u64 arcb_addr; /* 0x0018 */
277 u64 cont_token; /* 0x0020 */
278 u8 reserved28[6]; /* 0x0028 */
279 u16 user_data_len; /* 0x002e */
280 u8 user_data[256]; /* 0x0030 */
281 u32 reserved130[3]; /* 0x0130 */
282 u32 meas_len; /* 0x013c */
283 u64 meas_addr; /* 0x0140 */
284 u8 config_uid[16]; /* 0x0148 */
285 u32 reserved158; /* 0x0158 */
286 u32 add_data_len; /* 0x015c */
287 u64 add_data_addr; /* 0x0160 */
288 u64 reserved168[4]; /* 0x0168 */
289 } __packed __aligned(8);
290
291 struct uv_cb_dump_cpu {
292 struct uv_cb_header header;
293 u64 reserved08[2];
294 u64 cpu_handle;
295 u64 dump_area_origin;
296 u64 reserved28[5];
297 } __packed __aligned(8);
298
299 struct uv_cb_dump_stor_state {
300 struct uv_cb_header header;
301 u64 reserved08[2];
302 u64 config_handle;
303 u64 dump_area_origin;
304 u64 gaddr;
305 u64 reserved28[4];
306 } __packed __aligned(8);
307
308 struct uv_cb_dump_complete {
309 struct uv_cb_header header;
310 u64 reserved08[2];
311 u64 config_handle;
312 u64 dump_area_origin;
313 u64 reserved30[5];
314 } __packed __aligned(8);
315
316 /*
317 * A common UV call struct for pv guests that contains a single address
318 * Examples:
319 * Add Secret
320 * List Secrets
321 */
322 struct uv_cb_guest_addr {
323 struct uv_cb_header header;
324 u64 reserved08[3];
325 u64 addr;
326 u64 reserved28[4];
327 } __packed __aligned(8);
328
__uv_call(unsigned long r1,unsigned long r2)329 static inline int __uv_call(unsigned long r1, unsigned long r2)
330 {
331 int cc;
332
333 asm volatile(
334 " .insn rrf,0xB9A40000,%[r1],%[r2],0,0\n"
335 " ipm %[cc]\n"
336 " srl %[cc],28\n"
337 : [cc] "=d" (cc)
338 : [r1] "a" (r1), [r2] "a" (r2)
339 : "memory", "cc");
340 return cc;
341 }
342
uv_call(unsigned long r1,unsigned long r2)343 static inline int uv_call(unsigned long r1, unsigned long r2)
344 {
345 int cc;
346
347 do {
348 cc = __uv_call(r1, r2);
349 } while (cc > 1);
350 return cc;
351 }
352
353 /* Low level uv_call that avoids stalls for long running busy conditions */
uv_call_sched(unsigned long r1,unsigned long r2)354 static inline int uv_call_sched(unsigned long r1, unsigned long r2)
355 {
356 int cc;
357
358 do {
359 cc = __uv_call(r1, r2);
360 cond_resched();
361 } while (cc > 1);
362 return cc;
363 }
364
365 /*
366 * special variant of uv_call that only transports the cpu or guest
367 * handle and the command, like destroy or verify.
368 */
uv_cmd_nodata(u64 handle,u16 cmd,u16 * rc,u16 * rrc)369 static inline int uv_cmd_nodata(u64 handle, u16 cmd, u16 *rc, u16 *rrc)
370 {
371 struct uv_cb_nodata uvcb = {
372 .header.cmd = cmd,
373 .header.len = sizeof(uvcb),
374 .handle = handle,
375 };
376 int cc;
377
378 WARN(!handle, "No handle provided to Ultravisor call cmd %x\n", cmd);
379 cc = uv_call_sched(0, (u64)&uvcb);
380 *rc = uvcb.header.rc;
381 *rrc = uvcb.header.rrc;
382 return cc ? -EINVAL : 0;
383 }
384
385 struct uv_info {
386 unsigned long inst_calls_list[4];
387 unsigned long uv_base_stor_len;
388 unsigned long guest_base_stor_len;
389 unsigned long guest_virt_base_stor_len;
390 unsigned long guest_virt_var_stor_len;
391 unsigned long guest_cpu_stor_len;
392 unsigned long max_sec_stor_addr;
393 unsigned int max_num_sec_conf;
394 unsigned short max_guest_cpu_id;
395 unsigned long uv_feature_indications;
396 unsigned long supp_se_hdr_ver;
397 unsigned long supp_se_hdr_pcf;
398 unsigned long conf_dump_storage_state_len;
399 unsigned long conf_dump_finalize_len;
400 unsigned long supp_att_req_hdr_ver;
401 unsigned long supp_att_pflags;
402 unsigned long supp_add_secret_req_ver;
403 unsigned long supp_add_secret_pcf;
404 unsigned long supp_secret_types;
405 unsigned short max_secrets;
406 };
407
408 extern struct uv_info uv_info;
409
uv_has_feature(u8 feature_bit)410 static inline bool uv_has_feature(u8 feature_bit)
411 {
412 if (feature_bit >= sizeof(uv_info.uv_feature_indications) * 8)
413 return false;
414 return test_bit_inv(feature_bit, &uv_info.uv_feature_indications);
415 }
416
417 extern int prot_virt_guest;
418
is_prot_virt_guest(void)419 static inline int is_prot_virt_guest(void)
420 {
421 return prot_virt_guest;
422 }
423
share(unsigned long addr,u16 cmd)424 static inline int share(unsigned long addr, u16 cmd)
425 {
426 struct uv_cb_share uvcb = {
427 .header.cmd = cmd,
428 .header.len = sizeof(uvcb),
429 .paddr = addr
430 };
431
432 if (!is_prot_virt_guest())
433 return -EOPNOTSUPP;
434 /*
435 * Sharing is page wise, if we encounter addresses that are
436 * not page aligned, we assume something went wrong. If
437 * malloced structs are passed to this function, we could leak
438 * data to the hypervisor.
439 */
440 BUG_ON(addr & ~PAGE_MASK);
441
442 if (!uv_call(0, (u64)&uvcb))
443 return 0;
444 pr_err("%s UVC failed (rc: 0x%x, rrc: 0x%x), possible hypervisor bug.\n",
445 uvcb.header.cmd == UVC_CMD_SET_SHARED_ACCESS ? "Share" : "Unshare",
446 uvcb.header.rc, uvcb.header.rrc);
447 panic("System security cannot be guaranteed unless the system panics now.\n");
448 }
449
450 /*
451 * Guest 2 request to the Ultravisor to make a page shared with the
452 * hypervisor for IO.
453 *
454 * @addr: Real or absolute address of the page to be shared
455 */
uv_set_shared(unsigned long addr)456 static inline int uv_set_shared(unsigned long addr)
457 {
458 return share(addr, UVC_CMD_SET_SHARED_ACCESS);
459 }
460
461 /*
462 * Guest 2 request to the Ultravisor to make a page unshared.
463 *
464 * @addr: Real or absolute address of the page to be unshared
465 */
uv_remove_shared(unsigned long addr)466 static inline int uv_remove_shared(unsigned long addr)
467 {
468 return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS);
469 }
470
471 extern int prot_virt_host;
472
is_prot_virt_host(void)473 static inline int is_prot_virt_host(void)
474 {
475 return prot_virt_host;
476 }
477
478 int uv_pin_shared(unsigned long paddr);
479 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
480 int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr);
481 int uv_destroy_folio(struct folio *folio);
482 int uv_destroy_pte(pte_t pte);
483 int uv_convert_from_secure_pte(pte_t pte);
484 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
485
486 void setup_uv(void);
487
488 #endif /* _ASM_S390_UV_H */
489