1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * AMD Encrypted Register State Support 4 * 5 * Author: Joerg Roedel <jroedel@suse.de> 6 */ 7 8 #ifndef __ASM_ENCRYPTED_STATE_H 9 #define __ASM_ENCRYPTED_STATE_H 10 11 #include <linux/types.h> 12 #include <linux/sev-guest.h> 13 14 #include <asm/insn.h> 15 #include <asm/sev-common.h> 16 #include <asm/coco.h> 17 #include <asm/set_memory.h> 18 19 #define GHCB_PROTOCOL_MIN 1ULL 20 #define GHCB_PROTOCOL_MAX 2ULL 21 #define GHCB_DEFAULT_USAGE 0ULL 22 23 #define VMGEXIT() { asm volatile("rep; vmmcall\n\r"); } 24 25 struct boot_params; 26 27 enum es_result { 28 ES_OK, /* All good */ 29 ES_UNSUPPORTED, /* Requested operation not supported */ 30 ES_VMM_ERROR, /* Unexpected state from the VMM */ 31 ES_DECODE_FAILED, /* Instruction decoding failed */ 32 ES_EXCEPTION, /* Instruction caused exception */ 33 ES_RETRY, /* Retry instruction emulation */ 34 }; 35 36 struct es_fault_info { 37 unsigned long vector; 38 unsigned long error_code; 39 unsigned long cr2; 40 }; 41 42 struct pt_regs; 43 44 /* ES instruction emulation context */ 45 struct es_em_ctxt { 46 struct pt_regs *regs; 47 struct insn insn; 48 struct es_fault_info fi; 49 }; 50 51 /* 52 * AMD SEV Confidential computing blob structure. The structure is 53 * defined in OVMF UEFI firmware header: 54 * https://github.com/tianocore/edk2/blob/master/OvmfPkg/Include/Guid/ConfidentialComputingSevSnpBlob.h 55 */ 56 #define CC_BLOB_SEV_HDR_MAGIC 0x45444d41 57 struct cc_blob_sev_info { 58 u32 magic; 59 u16 version; 60 u16 reserved; 61 u64 secrets_phys; 62 u32 secrets_len; 63 u32 rsvd1; 64 u64 cpuid_phys; 65 u32 cpuid_len; 66 u32 rsvd2; 67 } __packed; 68 69 void do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code); 70 71 static inline u64 lower_bits(u64 val, unsigned int bits) 72 { 73 u64 mask = (1ULL << bits) - 1; 74 75 return (val & mask); 76 } 77 78 struct real_mode_header; 79 enum stack_type; 80 81 /* Early IDT entry points for #VC handler */ 82 extern void vc_no_ghcb(void); 83 extern void vc_boot_ghcb(void); 84 extern bool handle_vc_boot_ghcb(struct pt_regs *regs); 85 86 /* PVALIDATE return codes */ 87 #define PVALIDATE_FAIL_SIZEMISMATCH 6 88 89 /* Software defined (when rFlags.CF = 1) */ 90 #define PVALIDATE_FAIL_NOUPDATE 255 91 92 /* RMUPDATE detected 4K page and 2MB page overlap. */ 93 #define RMPUPDATE_FAIL_OVERLAP 4 94 95 /* PSMASH failed due to concurrent access by another CPU */ 96 #define PSMASH_FAIL_INUSE 3 97 98 /* RMP page size */ 99 #define RMP_PG_SIZE_4K 0 100 #define RMP_PG_SIZE_2M 1 101 #define RMP_TO_PG_LEVEL(level) (((level) == RMP_PG_SIZE_4K) ? PG_LEVEL_4K : PG_LEVEL_2M) 102 #define PG_LEVEL_TO_RMP(level) (((level) == PG_LEVEL_4K) ? RMP_PG_SIZE_4K : RMP_PG_SIZE_2M) 103 104 struct rmp_state { 105 u64 gpa; 106 u8 assigned; 107 u8 pagesize; 108 u8 immutable; 109 u8 rsvd; 110 u32 asid; 111 } __packed; 112 113 #define RMPADJUST_VMSA_PAGE_BIT BIT(16) 114 115 /* SNP Guest message request */ 116 struct snp_req_data { 117 unsigned long req_gpa; 118 unsigned long resp_gpa; 119 unsigned long data_gpa; 120 unsigned int data_npages; 121 }; 122 123 #define MAX_AUTHTAG_LEN 32 124 #define AUTHTAG_LEN 16 125 #define AAD_LEN 48 126 #define MSG_HDR_VER 1 127 128 #define SNP_REQ_MAX_RETRY_DURATION (60*HZ) 129 #define SNP_REQ_RETRY_DELAY (2*HZ) 130 131 /* See SNP spec SNP_GUEST_REQUEST section for the structure */ 132 enum msg_type { 133 SNP_MSG_TYPE_INVALID = 0, 134 SNP_MSG_CPUID_REQ, 135 SNP_MSG_CPUID_RSP, 136 SNP_MSG_KEY_REQ, 137 SNP_MSG_KEY_RSP, 138 SNP_MSG_REPORT_REQ, 139 SNP_MSG_REPORT_RSP, 140 SNP_MSG_EXPORT_REQ, 141 SNP_MSG_EXPORT_RSP, 142 SNP_MSG_IMPORT_REQ, 143 SNP_MSG_IMPORT_RSP, 144 SNP_MSG_ABSORB_REQ, 145 SNP_MSG_ABSORB_RSP, 146 SNP_MSG_VMRK_REQ, 147 SNP_MSG_VMRK_RSP, 148 149 SNP_MSG_TSC_INFO_REQ = 17, 150 SNP_MSG_TSC_INFO_RSP, 151 152 SNP_MSG_TYPE_MAX 153 }; 154 155 enum aead_algo { 156 SNP_AEAD_INVALID, 157 SNP_AEAD_AES_256_GCM, 158 }; 159 160 struct snp_guest_msg_hdr { 161 u8 authtag[MAX_AUTHTAG_LEN]; 162 u64 msg_seqno; 163 u8 rsvd1[8]; 164 u8 algo; 165 u8 hdr_version; 166 u16 hdr_sz; 167 u8 msg_type; 168 u8 msg_version; 169 u16 msg_sz; 170 u32 rsvd2; 171 u8 msg_vmpck; 172 u8 rsvd3[35]; 173 } __packed; 174 175 struct snp_guest_msg { 176 struct snp_guest_msg_hdr hdr; 177 u8 payload[PAGE_SIZE - sizeof(struct snp_guest_msg_hdr)]; 178 } __packed; 179 180 #define SNP_TSC_INFO_REQ_SZ 128 181 182 struct snp_tsc_info_req { 183 u8 rsvd[SNP_TSC_INFO_REQ_SZ]; 184 } __packed; 185 186 struct snp_tsc_info_resp { 187 u32 status; 188 u32 rsvd1; 189 u64 tsc_scale; 190 u64 tsc_offset; 191 u32 tsc_factor; 192 u8 rsvd2[100]; 193 } __packed; 194 195 struct snp_guest_req { 196 void *req_buf; 197 size_t req_sz; 198 199 void *resp_buf; 200 size_t resp_sz; 201 202 u64 exit_code; 203 unsigned int vmpck_id; 204 u8 msg_version; 205 u8 msg_type; 206 }; 207 208 /* 209 * The secrets page contains 96-bytes of reserved field that can be used by 210 * the guest OS. The guest OS uses the area to save the message sequence 211 * number for each VMPCK. 212 * 213 * See the GHCB spec section Secret page layout for the format for this area. 214 */ 215 struct secrets_os_area { 216 u32 msg_seqno_0; 217 u32 msg_seqno_1; 218 u32 msg_seqno_2; 219 u32 msg_seqno_3; 220 u64 ap_jump_table_pa; 221 u8 rsvd[40]; 222 u8 guest_usage[32]; 223 } __packed; 224 225 #define VMPCK_KEY_LEN 32 226 227 /* See the SNP spec version 0.9 for secrets page format */ 228 struct snp_secrets_page { 229 u32 version; 230 u32 imien : 1, 231 rsvd1 : 31; 232 u32 fms; 233 u32 rsvd2; 234 u8 gosvw[16]; 235 u8 vmpck0[VMPCK_KEY_LEN]; 236 u8 vmpck1[VMPCK_KEY_LEN]; 237 u8 vmpck2[VMPCK_KEY_LEN]; 238 u8 vmpck3[VMPCK_KEY_LEN]; 239 struct secrets_os_area os_area; 240 241 u8 vmsa_tweak_bitmap[64]; 242 243 /* SVSM fields */ 244 u64 svsm_base; 245 u64 svsm_size; 246 u64 svsm_caa; 247 u32 svsm_max_version; 248 u8 svsm_guest_vmpl; 249 u8 rsvd3[3]; 250 251 /* Remainder of page */ 252 u8 rsvd4[3744]; 253 } __packed; 254 255 struct snp_msg_desc { 256 /* request and response are in unencrypted memory */ 257 struct snp_guest_msg *request, *response; 258 259 /* 260 * Avoid information leakage by double-buffering shared messages 261 * in fields that are in regular encrypted memory. 262 */ 263 struct snp_guest_msg secret_request, secret_response; 264 265 struct snp_secrets_page *secrets; 266 struct snp_req_data input; 267 268 void *certs_data; 269 270 struct aesgcm_ctx *ctx; 271 272 u32 *os_area_msg_seqno; 273 u8 *vmpck; 274 int vmpck_id; 275 }; 276 277 /* 278 * The SVSM Calling Area (CA) related structures. 279 */ 280 struct svsm_ca { 281 u8 call_pending; 282 u8 mem_available; 283 u8 rsvd1[6]; 284 285 u8 svsm_buffer[PAGE_SIZE - 8]; 286 }; 287 288 #define SVSM_SUCCESS 0 289 #define SVSM_ERR_INCOMPLETE 0x80000000 290 #define SVSM_ERR_UNSUPPORTED_PROTOCOL 0x80000001 291 #define SVSM_ERR_UNSUPPORTED_CALL 0x80000002 292 #define SVSM_ERR_INVALID_ADDRESS 0x80000003 293 #define SVSM_ERR_INVALID_FORMAT 0x80000004 294 #define SVSM_ERR_INVALID_PARAMETER 0x80000005 295 #define SVSM_ERR_INVALID_REQUEST 0x80000006 296 #define SVSM_ERR_BUSY 0x80000007 297 #define SVSM_PVALIDATE_FAIL_SIZEMISMATCH 0x80001006 298 299 /* 300 * The SVSM PVALIDATE related structures 301 */ 302 struct svsm_pvalidate_entry { 303 u64 page_size : 2, 304 action : 1, 305 ignore_cf : 1, 306 rsvd : 8, 307 pfn : 52; 308 }; 309 310 struct svsm_pvalidate_call { 311 u16 num_entries; 312 u16 cur_index; 313 314 u8 rsvd1[4]; 315 316 struct svsm_pvalidate_entry entry[]; 317 }; 318 319 #define SVSM_PVALIDATE_MAX_COUNT ((sizeof_field(struct svsm_ca, svsm_buffer) - \ 320 offsetof(struct svsm_pvalidate_call, entry)) / \ 321 sizeof(struct svsm_pvalidate_entry)) 322 323 /* 324 * The SVSM Attestation related structures 325 */ 326 struct svsm_loc_entry { 327 u64 pa; 328 u32 len; 329 u8 rsvd[4]; 330 }; 331 332 struct svsm_attest_call { 333 struct svsm_loc_entry report_buf; 334 struct svsm_loc_entry nonce; 335 struct svsm_loc_entry manifest_buf; 336 struct svsm_loc_entry certificates_buf; 337 338 /* For attesting a single service */ 339 u8 service_guid[16]; 340 u32 service_manifest_ver; 341 u8 rsvd[4]; 342 }; 343 344 /* PTE descriptor used for the prepare_pte_enc() operations. */ 345 struct pte_enc_desc { 346 pte_t *kpte; 347 int pte_level; 348 bool encrypt; 349 /* pfn of the kpte above */ 350 unsigned long pfn; 351 /* physical address of @pfn */ 352 unsigned long pa; 353 /* virtual address of @pfn */ 354 void *va; 355 /* memory covered by the pte */ 356 unsigned long size; 357 pgprot_t new_pgprot; 358 }; 359 360 /* 361 * SVSM protocol structure 362 */ 363 struct svsm_call { 364 struct svsm_ca *caa; 365 u64 rax; 366 u64 rcx; 367 u64 rdx; 368 u64 r8; 369 u64 r9; 370 u64 rax_out; 371 u64 rcx_out; 372 u64 rdx_out; 373 u64 r8_out; 374 u64 r9_out; 375 }; 376 377 #define SVSM_CORE_CALL(x) ((0ULL << 32) | (x)) 378 #define SVSM_CORE_REMAP_CA 0 379 #define SVSM_CORE_PVALIDATE 1 380 #define SVSM_CORE_CREATE_VCPU 2 381 #define SVSM_CORE_DELETE_VCPU 3 382 383 #define SVSM_ATTEST_CALL(x) ((1ULL << 32) | (x)) 384 #define SVSM_ATTEST_SERVICES 0 385 #define SVSM_ATTEST_SINGLE_SERVICE 1 386 387 #ifdef CONFIG_AMD_MEM_ENCRYPT 388 389 extern u8 snp_vmpl; 390 391 extern void __sev_es_ist_enter(struct pt_regs *regs); 392 extern void __sev_es_ist_exit(void); 393 static __always_inline void sev_es_ist_enter(struct pt_regs *regs) 394 { 395 if (cc_vendor == CC_VENDOR_AMD && 396 cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) 397 __sev_es_ist_enter(regs); 398 } 399 static __always_inline void sev_es_ist_exit(void) 400 { 401 if (cc_vendor == CC_VENDOR_AMD && 402 cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) 403 __sev_es_ist_exit(); 404 } 405 extern int sev_es_setup_ap_jump_table(struct real_mode_header *rmh); 406 extern void __sev_es_nmi_complete(void); 407 static __always_inline void sev_es_nmi_complete(void) 408 { 409 if (cc_vendor == CC_VENDOR_AMD && 410 cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) 411 __sev_es_nmi_complete(); 412 } 413 extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd); 414 extern void sev_enable(struct boot_params *bp); 415 416 /* 417 * RMPADJUST modifies the RMP permissions of a page of a lesser- 418 * privileged (numerically higher) VMPL. 419 * 420 * If the guest is running at a higher-privilege than the privilege 421 * level the instruction is targeting, the instruction will succeed, 422 * otherwise, it will fail. 423 */ 424 static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs) 425 { 426 int rc; 427 428 /* "rmpadjust" mnemonic support in binutils 2.36 and newer */ 429 asm volatile(".byte 0xF3,0x0F,0x01,0xFE\n\t" 430 : "=a"(rc) 431 : "a"(vaddr), "c"(rmp_psize), "d"(attrs) 432 : "memory", "cc"); 433 434 return rc; 435 } 436 static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate) 437 { 438 bool no_rmpupdate; 439 int rc; 440 441 /* "pvalidate" mnemonic support in binutils 2.36 and newer */ 442 asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFF\n\t" 443 CC_SET(c) 444 : CC_OUT(c) (no_rmpupdate), "=a"(rc) 445 : "a"(vaddr), "c"(rmp_psize), "d"(validate) 446 : "memory", "cc"); 447 448 if (no_rmpupdate) 449 return PVALIDATE_FAIL_NOUPDATE; 450 451 return rc; 452 } 453 454 struct snp_guest_request_ioctl; 455 456 void setup_ghcb(void); 457 void early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, 458 unsigned long npages); 459 void early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, 460 unsigned long npages); 461 void snp_set_memory_shared(unsigned long vaddr, unsigned long npages); 462 void snp_set_memory_private(unsigned long vaddr, unsigned long npages); 463 void snp_set_wakeup_secondary_cpu(void); 464 bool snp_init(struct boot_params *bp); 465 void __noreturn snp_abort(void); 466 void snp_dmi_setup(void); 467 int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call, struct svsm_attest_call *input); 468 void snp_accept_memory(phys_addr_t start, phys_addr_t end); 469 u64 snp_get_unsupported_features(u64 status); 470 u64 sev_get_status(void); 471 void sev_show_status(void); 472 void snp_update_svsm_ca(void); 473 int prepare_pte_enc(struct pte_enc_desc *d); 474 void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot); 475 void snp_kexec_finish(void); 476 void snp_kexec_begin(void); 477 478 int snp_msg_init(struct snp_msg_desc *mdesc, int vmpck_id); 479 struct snp_msg_desc *snp_msg_alloc(void); 480 void snp_msg_free(struct snp_msg_desc *mdesc); 481 int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req, 482 struct snp_guest_request_ioctl *rio); 483 484 void __init snp_secure_tsc_prepare(void); 485 void __init snp_secure_tsc_init(void); 486 487 #else /* !CONFIG_AMD_MEM_ENCRYPT */ 488 489 #define snp_vmpl 0 490 static inline void sev_es_ist_enter(struct pt_regs *regs) { } 491 static inline void sev_es_ist_exit(void) { } 492 static inline int sev_es_setup_ap_jump_table(struct real_mode_header *rmh) { return 0; } 493 static inline void sev_es_nmi_complete(void) { } 494 static inline int sev_es_efi_map_ghcbs(pgd_t *pgd) { return 0; } 495 static inline void sev_enable(struct boot_params *bp) { } 496 static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate) { return 0; } 497 static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs) { return 0; } 498 static inline void setup_ghcb(void) { } 499 static inline void __init 500 early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned long npages) { } 501 static inline void __init 502 early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned long npages) { } 503 static inline void snp_set_memory_shared(unsigned long vaddr, unsigned long npages) { } 504 static inline void snp_set_memory_private(unsigned long vaddr, unsigned long npages) { } 505 static inline void snp_set_wakeup_secondary_cpu(void) { } 506 static inline bool snp_init(struct boot_params *bp) { return false; } 507 static inline void snp_abort(void) { } 508 static inline void snp_dmi_setup(void) { } 509 static inline int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call, struct svsm_attest_call *input) 510 { 511 return -ENOTTY; 512 } 513 static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { } 514 static inline u64 snp_get_unsupported_features(u64 status) { return 0; } 515 static inline u64 sev_get_status(void) { return 0; } 516 static inline void sev_show_status(void) { } 517 static inline void snp_update_svsm_ca(void) { } 518 static inline int prepare_pte_enc(struct pte_enc_desc *d) { return 0; } 519 static inline void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot) { } 520 static inline void snp_kexec_finish(void) { } 521 static inline void snp_kexec_begin(void) { } 522 static inline int snp_msg_init(struct snp_msg_desc *mdesc, int vmpck_id) { return -1; } 523 static inline struct snp_msg_desc *snp_msg_alloc(void) { return NULL; } 524 static inline void snp_msg_free(struct snp_msg_desc *mdesc) { } 525 static inline int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req, 526 struct snp_guest_request_ioctl *rio) { return -ENODEV; } 527 static inline void __init snp_secure_tsc_prepare(void) { } 528 static inline void __init snp_secure_tsc_init(void) { } 529 530 #endif /* CONFIG_AMD_MEM_ENCRYPT */ 531 532 #ifdef CONFIG_KVM_AMD_SEV 533 bool snp_probe_rmptable_info(void); 534 int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level); 535 void snp_dump_hva_rmpentry(unsigned long address); 536 int psmash(u64 pfn); 537 int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 asid, bool immutable); 538 int rmp_make_shared(u64 pfn, enum pg_level level); 539 void snp_leak_pages(u64 pfn, unsigned int npages); 540 void kdump_sev_callback(void); 541 void snp_fixup_e820_tables(void); 542 #else 543 static inline bool snp_probe_rmptable_info(void) { return false; } 544 static inline int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) { return -ENODEV; } 545 static inline void snp_dump_hva_rmpentry(unsigned long address) {} 546 static inline int psmash(u64 pfn) { return -ENODEV; } 547 static inline int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 asid, 548 bool immutable) 549 { 550 return -ENODEV; 551 } 552 static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV; } 553 static inline void snp_leak_pages(u64 pfn, unsigned int npages) {} 554 static inline void kdump_sev_callback(void) { } 555 static inline void snp_fixup_e820_tables(void) {} 556 #endif 557 558 #endif 559