1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * AMD Encrypted Register State Support 4 * 5 * Author: Joerg Roedel <jroedel@suse.de> 6 */ 7 8 #ifndef __ASM_ENCRYPTED_STATE_H 9 #define __ASM_ENCRYPTED_STATE_H 10 11 #include <linux/types.h> 12 #include <linux/sev-guest.h> 13 14 #include <asm/insn.h> 15 #include <asm/sev-common.h> 16 #include <asm/coco.h> 17 #include <asm/set_memory.h> 18 #include <asm/svm.h> 19 20 #define GHCB_PROTOCOL_MIN 1ULL 21 #define GHCB_PROTOCOL_MAX 2ULL 22 #define GHCB_DEFAULT_USAGE 0ULL 23 24 #define VMGEXIT() { asm volatile("rep; vmmcall\n\r"); } 25 26 struct boot_params; 27 28 enum es_result { 29 ES_OK, /* All good */ 30 ES_UNSUPPORTED, /* Requested operation not supported */ 31 ES_VMM_ERROR, /* Unexpected state from the VMM */ 32 ES_DECODE_FAILED, /* Instruction decoding failed */ 33 ES_EXCEPTION, /* Instruction caused exception */ 34 ES_RETRY, /* Retry instruction emulation */ 35 }; 36 37 struct es_fault_info { 38 unsigned long vector; 39 unsigned long error_code; 40 unsigned long cr2; 41 }; 42 43 struct pt_regs; 44 45 /* ES instruction emulation context */ 46 struct es_em_ctxt { 47 struct pt_regs *regs; 48 struct insn insn; 49 struct es_fault_info fi; 50 }; 51 52 /* 53 * AMD SEV Confidential computing blob structure. The structure is 54 * defined in OVMF UEFI firmware header: 55 * https://github.com/tianocore/edk2/blob/master/OvmfPkg/Include/Guid/ConfidentialComputingSevSnpBlob.h 56 */ 57 #define CC_BLOB_SEV_HDR_MAGIC 0x45444d41 58 struct cc_blob_sev_info { 59 u32 magic; 60 u16 version; 61 u16 reserved; 62 u64 secrets_phys; 63 u32 secrets_len; 64 u32 rsvd1; 65 u64 cpuid_phys; 66 u32 cpuid_len; 67 u32 rsvd2; 68 } __packed; 69 70 void do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code); 71 72 static inline u64 lower_bits(u64 val, unsigned int bits) 73 { 74 u64 mask = (1ULL << bits) - 1; 75 76 return (val & mask); 77 } 78 79 struct real_mode_header; 80 enum stack_type; 81 82 /* Early IDT entry points for #VC handler */ 83 extern void vc_no_ghcb(void); 84 extern void vc_boot_ghcb(void); 85 extern bool handle_vc_boot_ghcb(struct pt_regs *regs); 86 87 /* 88 * Individual entries of the SNP CPUID table, as defined by the SNP 89 * Firmware ABI, Revision 0.9, Section 7.1, Table 14. 90 */ 91 struct snp_cpuid_fn { 92 u32 eax_in; 93 u32 ecx_in; 94 u64 xcr0_in; 95 u64 xss_in; 96 u32 eax; 97 u32 ebx; 98 u32 ecx; 99 u32 edx; 100 u64 __reserved; 101 } __packed; 102 103 /* 104 * SNP CPUID table, as defined by the SNP Firmware ABI, Revision 0.9, 105 * Section 8.14.2.6. Also noted there is the SNP firmware-enforced limit 106 * of 64 entries per CPUID table. 107 */ 108 #define SNP_CPUID_COUNT_MAX 64 109 110 struct snp_cpuid_table { 111 u32 count; 112 u32 __reserved1; 113 u64 __reserved2; 114 struct snp_cpuid_fn fn[SNP_CPUID_COUNT_MAX]; 115 } __packed; 116 117 /* PVALIDATE return codes */ 118 #define PVALIDATE_FAIL_SIZEMISMATCH 6 119 120 /* Software defined (when rFlags.CF = 1) */ 121 #define PVALIDATE_FAIL_NOUPDATE 255 122 123 /* RMUPDATE detected 4K page and 2MB page overlap. */ 124 #define RMPUPDATE_FAIL_OVERLAP 4 125 126 /* PSMASH failed due to concurrent access by another CPU */ 127 #define PSMASH_FAIL_INUSE 3 128 129 /* RMP page size */ 130 #define RMP_PG_SIZE_4K 0 131 #define RMP_PG_SIZE_2M 1 132 #define RMP_TO_PG_LEVEL(level) (((level) == RMP_PG_SIZE_4K) ? PG_LEVEL_4K : PG_LEVEL_2M) 133 #define PG_LEVEL_TO_RMP(level) (((level) == PG_LEVEL_4K) ? RMP_PG_SIZE_4K : RMP_PG_SIZE_2M) 134 135 struct rmp_state { 136 u64 gpa; 137 u8 assigned; 138 u8 pagesize; 139 u8 immutable; 140 u8 rsvd; 141 u32 asid; 142 } __packed; 143 144 #define RMPADJUST_VMSA_PAGE_BIT BIT(16) 145 146 /* SNP Guest message request */ 147 struct snp_req_data { 148 unsigned long req_gpa; 149 unsigned long resp_gpa; 150 unsigned long data_gpa; 151 unsigned int data_npages; 152 }; 153 154 #define MAX_AUTHTAG_LEN 32 155 #define AUTHTAG_LEN 16 156 #define AAD_LEN 48 157 #define MSG_HDR_VER 1 158 159 #define SNP_REQ_MAX_RETRY_DURATION (60*HZ) 160 #define SNP_REQ_RETRY_DELAY (2*HZ) 161 162 /* See SNP spec SNP_GUEST_REQUEST section for the structure */ 163 enum msg_type { 164 SNP_MSG_TYPE_INVALID = 0, 165 SNP_MSG_CPUID_REQ, 166 SNP_MSG_CPUID_RSP, 167 SNP_MSG_KEY_REQ, 168 SNP_MSG_KEY_RSP, 169 SNP_MSG_REPORT_REQ, 170 SNP_MSG_REPORT_RSP, 171 SNP_MSG_EXPORT_REQ, 172 SNP_MSG_EXPORT_RSP, 173 SNP_MSG_IMPORT_REQ, 174 SNP_MSG_IMPORT_RSP, 175 SNP_MSG_ABSORB_REQ, 176 SNP_MSG_ABSORB_RSP, 177 SNP_MSG_VMRK_REQ, 178 SNP_MSG_VMRK_RSP, 179 180 SNP_MSG_TSC_INFO_REQ = 17, 181 SNP_MSG_TSC_INFO_RSP, 182 183 SNP_MSG_TYPE_MAX 184 }; 185 186 enum aead_algo { 187 SNP_AEAD_INVALID, 188 SNP_AEAD_AES_256_GCM, 189 }; 190 191 struct snp_guest_msg_hdr { 192 u8 authtag[MAX_AUTHTAG_LEN]; 193 u64 msg_seqno; 194 u8 rsvd1[8]; 195 u8 algo; 196 u8 hdr_version; 197 u16 hdr_sz; 198 u8 msg_type; 199 u8 msg_version; 200 u16 msg_sz; 201 u32 rsvd2; 202 u8 msg_vmpck; 203 u8 rsvd3[35]; 204 } __packed; 205 206 struct snp_guest_msg { 207 struct snp_guest_msg_hdr hdr; 208 u8 payload[PAGE_SIZE - sizeof(struct snp_guest_msg_hdr)]; 209 } __packed; 210 211 #define SNP_TSC_INFO_REQ_SZ 128 212 213 struct snp_tsc_info_req { 214 u8 rsvd[SNP_TSC_INFO_REQ_SZ]; 215 } __packed; 216 217 struct snp_tsc_info_resp { 218 u32 status; 219 u32 rsvd1; 220 u64 tsc_scale; 221 u64 tsc_offset; 222 u32 tsc_factor; 223 u8 rsvd2[100]; 224 } __packed; 225 226 /* 227 * Obtain the mean TSC frequency by decreasing the nominal TSC frequency with 228 * TSC_FACTOR as documented in the SNP Firmware ABI specification: 229 * 230 * GUEST_TSC_FREQ * (1 - (TSC_FACTOR * 0.00001)) 231 * 232 * which is equivalent to: 233 * 234 * GUEST_TSC_FREQ -= (GUEST_TSC_FREQ * TSC_FACTOR) / 100000; 235 */ 236 #define SNP_SCALE_TSC_FREQ(freq, factor) ((freq) - (freq) * (factor) / 100000) 237 238 struct snp_guest_req { 239 void *req_buf; 240 size_t req_sz; 241 242 void *resp_buf; 243 size_t resp_sz; 244 245 u64 exit_code; 246 unsigned int vmpck_id; 247 u8 msg_version; 248 u8 msg_type; 249 250 struct snp_req_data input; 251 void *certs_data; 252 }; 253 254 /* 255 * The secrets page contains 96-bytes of reserved field that can be used by 256 * the guest OS. The guest OS uses the area to save the message sequence 257 * number for each VMPCK. 258 * 259 * See the GHCB spec section Secret page layout for the format for this area. 260 */ 261 struct secrets_os_area { 262 u32 msg_seqno_0; 263 u32 msg_seqno_1; 264 u32 msg_seqno_2; 265 u32 msg_seqno_3; 266 u64 ap_jump_table_pa; 267 u8 rsvd[40]; 268 u8 guest_usage[32]; 269 } __packed; 270 271 #define VMPCK_KEY_LEN 32 272 273 /* See the SNP spec version 0.9 for secrets page format */ 274 struct snp_secrets_page { 275 u32 version; 276 u32 imien : 1, 277 rsvd1 : 31; 278 u32 fms; 279 u32 rsvd2; 280 u8 gosvw[16]; 281 u8 vmpck0[VMPCK_KEY_LEN]; 282 u8 vmpck1[VMPCK_KEY_LEN]; 283 u8 vmpck2[VMPCK_KEY_LEN]; 284 u8 vmpck3[VMPCK_KEY_LEN]; 285 struct secrets_os_area os_area; 286 287 u8 vmsa_tweak_bitmap[64]; 288 289 /* SVSM fields */ 290 u64 svsm_base; 291 u64 svsm_size; 292 u64 svsm_caa; 293 u32 svsm_max_version; 294 u8 svsm_guest_vmpl; 295 u8 rsvd3[3]; 296 297 /* The percentage decrease from nominal to mean TSC frequency. */ 298 u32 tsc_factor; 299 300 /* Remainder of page */ 301 u8 rsvd4[3740]; 302 } __packed; 303 304 struct snp_msg_desc { 305 /* request and response are in unencrypted memory */ 306 struct snp_guest_msg *request, *response; 307 308 /* 309 * Avoid information leakage by double-buffering shared messages 310 * in fields that are in regular encrypted memory. 311 */ 312 struct snp_guest_msg secret_request, secret_response; 313 314 struct snp_secrets_page *secrets; 315 316 struct aesgcm_ctx *ctx; 317 318 u32 *os_area_msg_seqno; 319 u8 *vmpck; 320 int vmpck_id; 321 }; 322 323 /* 324 * The SVSM Calling Area (CA) related structures. 325 */ 326 struct svsm_ca { 327 u8 call_pending; 328 u8 mem_available; 329 u8 rsvd1[6]; 330 331 u8 svsm_buffer[PAGE_SIZE - 8]; 332 }; 333 334 #define SVSM_SUCCESS 0 335 #define SVSM_ERR_INCOMPLETE 0x80000000 336 #define SVSM_ERR_UNSUPPORTED_PROTOCOL 0x80000001 337 #define SVSM_ERR_UNSUPPORTED_CALL 0x80000002 338 #define SVSM_ERR_INVALID_ADDRESS 0x80000003 339 #define SVSM_ERR_INVALID_FORMAT 0x80000004 340 #define SVSM_ERR_INVALID_PARAMETER 0x80000005 341 #define SVSM_ERR_INVALID_REQUEST 0x80000006 342 #define SVSM_ERR_BUSY 0x80000007 343 #define SVSM_PVALIDATE_FAIL_SIZEMISMATCH 0x80001006 344 345 /* 346 * The SVSM PVALIDATE related structures 347 */ 348 struct svsm_pvalidate_entry { 349 u64 page_size : 2, 350 action : 1, 351 ignore_cf : 1, 352 rsvd : 8, 353 pfn : 52; 354 }; 355 356 struct svsm_pvalidate_call { 357 u16 num_entries; 358 u16 cur_index; 359 360 u8 rsvd1[4]; 361 362 struct svsm_pvalidate_entry entry[]; 363 }; 364 365 #define SVSM_PVALIDATE_MAX_COUNT ((sizeof_field(struct svsm_ca, svsm_buffer) - \ 366 offsetof(struct svsm_pvalidate_call, entry)) / \ 367 sizeof(struct svsm_pvalidate_entry)) 368 369 /* 370 * The SVSM Attestation related structures 371 */ 372 struct svsm_loc_entry { 373 u64 pa; 374 u32 len; 375 u8 rsvd[4]; 376 }; 377 378 struct svsm_attest_call { 379 struct svsm_loc_entry report_buf; 380 struct svsm_loc_entry nonce; 381 struct svsm_loc_entry manifest_buf; 382 struct svsm_loc_entry certificates_buf; 383 384 /* For attesting a single service */ 385 u8 service_guid[16]; 386 u32 service_manifest_ver; 387 u8 rsvd[4]; 388 }; 389 390 /* PTE descriptor used for the prepare_pte_enc() operations. */ 391 struct pte_enc_desc { 392 pte_t *kpte; 393 int pte_level; 394 bool encrypt; 395 /* pfn of the kpte above */ 396 unsigned long pfn; 397 /* physical address of @pfn */ 398 unsigned long pa; 399 /* virtual address of @pfn */ 400 void *va; 401 /* memory covered by the pte */ 402 unsigned long size; 403 pgprot_t new_pgprot; 404 }; 405 406 /* 407 * SVSM protocol structure 408 */ 409 struct svsm_call { 410 struct svsm_ca *caa; 411 u64 rax; 412 u64 rcx; 413 u64 rdx; 414 u64 r8; 415 u64 r9; 416 u64 rax_out; 417 u64 rcx_out; 418 u64 rdx_out; 419 u64 r8_out; 420 u64 r9_out; 421 }; 422 423 #define SVSM_CORE_CALL(x) ((0ULL << 32) | (x)) 424 #define SVSM_CORE_REMAP_CA 0 425 #define SVSM_CORE_PVALIDATE 1 426 #define SVSM_CORE_CREATE_VCPU 2 427 #define SVSM_CORE_DELETE_VCPU 3 428 429 #define SVSM_ATTEST_CALL(x) ((1ULL << 32) | (x)) 430 #define SVSM_ATTEST_SERVICES 0 431 #define SVSM_ATTEST_SINGLE_SERVICE 1 432 433 #define SVSM_VTPM_CALL(x) ((2ULL << 32) | (x)) 434 #define SVSM_VTPM_QUERY 0 435 #define SVSM_VTPM_CMD 1 436 437 #ifdef CONFIG_AMD_MEM_ENCRYPT 438 439 extern u8 snp_vmpl; 440 441 extern void __sev_es_ist_enter(struct pt_regs *regs); 442 extern void __sev_es_ist_exit(void); 443 static __always_inline void sev_es_ist_enter(struct pt_regs *regs) 444 { 445 if (cc_vendor == CC_VENDOR_AMD && 446 cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) 447 __sev_es_ist_enter(regs); 448 } 449 static __always_inline void sev_es_ist_exit(void) 450 { 451 if (cc_vendor == CC_VENDOR_AMD && 452 cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) 453 __sev_es_ist_exit(); 454 } 455 extern int sev_es_setup_ap_jump_table(struct real_mode_header *rmh); 456 extern void __sev_es_nmi_complete(void); 457 static __always_inline void sev_es_nmi_complete(void) 458 { 459 if (cc_vendor == CC_VENDOR_AMD && 460 cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) 461 __sev_es_nmi_complete(); 462 } 463 extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd); 464 extern void sev_enable(struct boot_params *bp); 465 466 /* 467 * RMPADJUST modifies the RMP permissions of a page of a lesser- 468 * privileged (numerically higher) VMPL. 469 * 470 * If the guest is running at a higher-privilege than the privilege 471 * level the instruction is targeting, the instruction will succeed, 472 * otherwise, it will fail. 473 */ 474 static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs) 475 { 476 int rc; 477 478 /* "rmpadjust" mnemonic support in binutils 2.36 and newer */ 479 asm volatile(".byte 0xF3,0x0F,0x01,0xFE\n\t" 480 : "=a"(rc) 481 : "a"(vaddr), "c"(rmp_psize), "d"(attrs) 482 : "memory", "cc"); 483 484 return rc; 485 } 486 static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate) 487 { 488 bool no_rmpupdate; 489 int rc; 490 491 /* "pvalidate" mnemonic support in binutils 2.36 and newer */ 492 asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFF\n\t" 493 CC_SET(c) 494 : CC_OUT(c) (no_rmpupdate), "=a"(rc) 495 : "a"(vaddr), "c"(rmp_psize), "d"(validate) 496 : "memory", "cc"); 497 498 if (no_rmpupdate) 499 return PVALIDATE_FAIL_NOUPDATE; 500 501 return rc; 502 } 503 504 struct snp_guest_request_ioctl; 505 506 void setup_ghcb(void); 507 void early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, 508 unsigned long npages); 509 void early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, 510 unsigned long npages); 511 void snp_set_memory_shared(unsigned long vaddr, unsigned long npages); 512 void snp_set_memory_private(unsigned long vaddr, unsigned long npages); 513 void snp_set_wakeup_secondary_cpu(void); 514 bool snp_init(struct boot_params *bp); 515 void __noreturn snp_abort(void); 516 void snp_dmi_setup(void); 517 int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call, struct svsm_attest_call *input); 518 void snp_accept_memory(phys_addr_t start, phys_addr_t end); 519 u64 snp_get_unsupported_features(u64 status); 520 u64 sev_get_status(void); 521 void sev_show_status(void); 522 void snp_update_svsm_ca(void); 523 int prepare_pte_enc(struct pte_enc_desc *d); 524 void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot); 525 void snp_kexec_finish(void); 526 void snp_kexec_begin(void); 527 528 int snp_msg_init(struct snp_msg_desc *mdesc, int vmpck_id); 529 struct snp_msg_desc *snp_msg_alloc(void); 530 void snp_msg_free(struct snp_msg_desc *mdesc); 531 int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req, 532 struct snp_guest_request_ioctl *rio); 533 534 int snp_svsm_vtpm_send_command(u8 *buffer); 535 536 void __init snp_secure_tsc_prepare(void); 537 void __init snp_secure_tsc_init(void); 538 539 static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb) 540 { 541 ghcb->save.sw_exit_code = 0; 542 __builtin_memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); 543 } 544 545 void vc_forward_exception(struct es_em_ctxt *ctxt); 546 547 /* I/O parameters for CPUID-related helpers */ 548 struct cpuid_leaf { 549 u32 fn; 550 u32 subfn; 551 u32 eax; 552 u32 ebx; 553 u32 ecx; 554 u32 edx; 555 }; 556 557 int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf); 558 559 void __noreturn sev_es_terminate(unsigned int set, unsigned int reason); 560 enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb, 561 struct es_em_ctxt *ctxt, 562 u64 exit_code, u64 exit_info_1, 563 u64 exit_info_2); 564 565 extern struct ghcb *boot_ghcb; 566 567 #else /* !CONFIG_AMD_MEM_ENCRYPT */ 568 569 #define snp_vmpl 0 570 static inline void sev_es_ist_enter(struct pt_regs *regs) { } 571 static inline void sev_es_ist_exit(void) { } 572 static inline int sev_es_setup_ap_jump_table(struct real_mode_header *rmh) { return 0; } 573 static inline void sev_es_nmi_complete(void) { } 574 static inline int sev_es_efi_map_ghcbs(pgd_t *pgd) { return 0; } 575 static inline void sev_enable(struct boot_params *bp) { } 576 static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate) { return 0; } 577 static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs) { return 0; } 578 static inline void setup_ghcb(void) { } 579 static inline void __init 580 early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned long npages) { } 581 static inline void __init 582 early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned long npages) { } 583 static inline void snp_set_memory_shared(unsigned long vaddr, unsigned long npages) { } 584 static inline void snp_set_memory_private(unsigned long vaddr, unsigned long npages) { } 585 static inline void snp_set_wakeup_secondary_cpu(void) { } 586 static inline bool snp_init(struct boot_params *bp) { return false; } 587 static inline void snp_abort(void) { } 588 static inline void snp_dmi_setup(void) { } 589 static inline int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call, struct svsm_attest_call *input) 590 { 591 return -ENOTTY; 592 } 593 static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { } 594 static inline u64 snp_get_unsupported_features(u64 status) { return 0; } 595 static inline u64 sev_get_status(void) { return 0; } 596 static inline void sev_show_status(void) { } 597 static inline void snp_update_svsm_ca(void) { } 598 static inline int prepare_pte_enc(struct pte_enc_desc *d) { return 0; } 599 static inline void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot) { } 600 static inline void snp_kexec_finish(void) { } 601 static inline void snp_kexec_begin(void) { } 602 static inline int snp_msg_init(struct snp_msg_desc *mdesc, int vmpck_id) { return -1; } 603 static inline struct snp_msg_desc *snp_msg_alloc(void) { return NULL; } 604 static inline void snp_msg_free(struct snp_msg_desc *mdesc) { } 605 static inline int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req, 606 struct snp_guest_request_ioctl *rio) { return -ENODEV; } 607 static inline int snp_svsm_vtpm_send_command(u8 *buffer) { return -ENODEV; } 608 static inline void __init snp_secure_tsc_prepare(void) { } 609 static inline void __init snp_secure_tsc_init(void) { } 610 611 #endif /* CONFIG_AMD_MEM_ENCRYPT */ 612 613 #ifdef CONFIG_KVM_AMD_SEV 614 bool snp_probe_rmptable_info(void); 615 int snp_rmptable_init(void); 616 int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level); 617 void snp_dump_hva_rmpentry(unsigned long address); 618 int psmash(u64 pfn); 619 int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 asid, bool immutable); 620 int rmp_make_shared(u64 pfn, enum pg_level level); 621 void snp_leak_pages(u64 pfn, unsigned int npages); 622 void kdump_sev_callback(void); 623 void snp_fixup_e820_tables(void); 624 #else 625 static inline bool snp_probe_rmptable_info(void) { return false; } 626 static inline int snp_rmptable_init(void) { return -ENOSYS; } 627 static inline int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) { return -ENODEV; } 628 static inline void snp_dump_hva_rmpentry(unsigned long address) {} 629 static inline int psmash(u64 pfn) { return -ENODEV; } 630 static inline int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 asid, 631 bool immutable) 632 { 633 return -ENODEV; 634 } 635 static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV; } 636 static inline void snp_leak_pages(u64 pfn, unsigned int npages) {} 637 static inline void kdump_sev_callback(void) { } 638 static inline void snp_fixup_e820_tables(void) {} 639 #endif 640 641 #endif 642