1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020 Google LLC 4 * Author: Will Deacon <will@kernel.org> 5 */ 6 7 #ifndef __ARM64_KVM_PGTABLE_H__ 8 #define __ARM64_KVM_PGTABLE_H__ 9 10 #include <linux/bits.h> 11 #include <linux/kvm_host.h> 12 #include <linux/types.h> 13 14 #define KVM_PGTABLE_FIRST_LEVEL -1 15 #define KVM_PGTABLE_LAST_LEVEL 3 16 17 /* 18 * The largest supported block sizes for KVM (no 52-bit PA support): 19 * - 4K (level 1): 1GB 20 * - 16K (level 2): 32MB 21 * - 64K (level 2): 512MB 22 */ 23 #ifdef CONFIG_ARM64_4K_PAGES 24 #define KVM_PGTABLE_MIN_BLOCK_LEVEL 1 25 #else 26 #define KVM_PGTABLE_MIN_BLOCK_LEVEL 2 27 #endif 28 29 #define kvm_lpa2_is_enabled() system_supports_lpa2() 30 31 static inline u64 kvm_get_parange_max(void) 32 { 33 if (kvm_lpa2_is_enabled() || 34 (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && PAGE_SHIFT == 16)) 35 return ID_AA64MMFR0_EL1_PARANGE_52; 36 else 37 return ID_AA64MMFR0_EL1_PARANGE_48; 38 } 39 40 static inline u64 kvm_get_parange(u64 mmfr0) 41 { 42 u64 parange_max = kvm_get_parange_max(); 43 u64 parange = cpuid_feature_extract_unsigned_field(mmfr0, 44 ID_AA64MMFR0_EL1_PARANGE_SHIFT); 45 if (parange > parange_max) 46 parange = parange_max; 47 48 return parange; 49 } 50 51 typedef u64 kvm_pte_t; 52 53 #define KVM_PTE_VALID BIT(0) 54 55 #define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT) 56 #define KVM_PTE_ADDR_51_48 GENMASK(15, 12) 57 #define KVM_PTE_ADDR_MASK_LPA2 GENMASK(49, PAGE_SHIFT) 58 #define KVM_PTE_ADDR_51_50_LPA2 GENMASK(9, 8) 59 60 #define KVM_PHYS_INVALID (-1ULL) 61 62 #define KVM_PTE_TYPE BIT(1) 63 #define KVM_PTE_TYPE_BLOCK 0 64 #define KVM_PTE_TYPE_PAGE 1 65 #define KVM_PTE_TYPE_TABLE 1 66 67 #define KVM_PTE_LEAF_ATTR_LO GENMASK(11, 2) 68 69 #define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2) 70 #define KVM_PTE_LEAF_ATTR_LO_S1_AP GENMASK(7, 6) 71 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO \ 72 ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 2 : 3; }) 73 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW \ 74 ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 0 : 1; }) 75 #define KVM_PTE_LEAF_ATTR_LO_S1_SH GENMASK(9, 8) 76 #define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS 3 77 #define KVM_PTE_LEAF_ATTR_LO_S1_AF BIT(10) 78 79 #define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR GENMASK(5, 2) 80 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R BIT(6) 81 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W BIT(7) 82 #define KVM_PTE_LEAF_ATTR_LO_S2_SH GENMASK(9, 8) 83 #define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS 3 84 #define KVM_PTE_LEAF_ATTR_LO_S2_AF BIT(10) 85 86 #define KVM_PTE_LEAF_ATTR_HI GENMASK(63, 50) 87 88 #define KVM_PTE_LEAF_ATTR_HI_SW GENMASK(58, 55) 89 90 #define KVM_PTE_LEAF_ATTR_HI_S1_XN BIT(54) 91 92 #define KVM_PTE_LEAF_ATTR_HI_S2_XN GENMASK(54, 53) 93 94 #define KVM_PTE_LEAF_ATTR_HI_S1_GP BIT(50) 95 96 #define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \ 97 KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \ 98 KVM_PTE_LEAF_ATTR_HI_S2_XN) 99 100 #define KVM_INVALID_PTE_OWNER_MASK GENMASK(9, 2) 101 #define KVM_MAX_OWNER_ID 1 102 103 /* 104 * Used to indicate a pte for which a 'break-before-make' sequence is in 105 * progress. 106 */ 107 #define KVM_INVALID_PTE_LOCKED BIT(10) 108 109 static inline bool kvm_pte_valid(kvm_pte_t pte) 110 { 111 return pte & KVM_PTE_VALID; 112 } 113 114 static inline u64 kvm_pte_to_phys(kvm_pte_t pte) 115 { 116 u64 pa; 117 118 if (kvm_lpa2_is_enabled()) { 119 pa = pte & KVM_PTE_ADDR_MASK_LPA2; 120 pa |= FIELD_GET(KVM_PTE_ADDR_51_50_LPA2, pte) << 50; 121 } else { 122 pa = pte & KVM_PTE_ADDR_MASK; 123 if (PAGE_SHIFT == 16) 124 pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48; 125 } 126 127 return pa; 128 } 129 130 static inline kvm_pte_t kvm_phys_to_pte(u64 pa) 131 { 132 kvm_pte_t pte; 133 134 if (kvm_lpa2_is_enabled()) { 135 pte = pa & KVM_PTE_ADDR_MASK_LPA2; 136 pa &= GENMASK(51, 50); 137 pte |= FIELD_PREP(KVM_PTE_ADDR_51_50_LPA2, pa >> 50); 138 } else { 139 pte = pa & KVM_PTE_ADDR_MASK; 140 if (PAGE_SHIFT == 16) { 141 pa &= GENMASK(51, 48); 142 pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48); 143 } 144 } 145 146 return pte; 147 } 148 149 static inline kvm_pfn_t kvm_pte_to_pfn(kvm_pte_t pte) 150 { 151 return __phys_to_pfn(kvm_pte_to_phys(pte)); 152 } 153 154 static inline u64 kvm_granule_shift(s8 level) 155 { 156 /* Assumes KVM_PGTABLE_LAST_LEVEL is 3 */ 157 return ARM64_HW_PGTABLE_LEVEL_SHIFT(level); 158 } 159 160 static inline u64 kvm_granule_size(s8 level) 161 { 162 return BIT(kvm_granule_shift(level)); 163 } 164 165 static inline bool kvm_level_supports_block_mapping(s8 level) 166 { 167 return level >= KVM_PGTABLE_MIN_BLOCK_LEVEL; 168 } 169 170 static inline u32 kvm_supported_block_sizes(void) 171 { 172 s8 level = KVM_PGTABLE_MIN_BLOCK_LEVEL; 173 u32 r = 0; 174 175 for (; level <= KVM_PGTABLE_LAST_LEVEL; level++) 176 r |= BIT(kvm_granule_shift(level)); 177 178 return r; 179 } 180 181 static inline bool kvm_is_block_size_supported(u64 size) 182 { 183 bool is_power_of_two = IS_ALIGNED(size, size); 184 185 return is_power_of_two && (size & kvm_supported_block_sizes()); 186 } 187 188 /** 189 * struct kvm_pgtable_mm_ops - Memory management callbacks. 190 * @zalloc_page: Allocate a single zeroed memory page. 191 * The @arg parameter can be used by the walker 192 * to pass a memcache. The initial refcount of 193 * the page is 1. 194 * @zalloc_pages_exact: Allocate an exact number of zeroed memory pages. 195 * The @size parameter is in bytes, and is rounded 196 * up to the next page boundary. The resulting 197 * allocation is physically contiguous. 198 * @free_pages_exact: Free an exact number of memory pages previously 199 * allocated by zalloc_pages_exact. 200 * @free_unlinked_table: Free an unlinked paging structure by unlinking and 201 * dropping references. 202 * @get_page: Increment the refcount on a page. 203 * @put_page: Decrement the refcount on a page. When the 204 * refcount reaches 0 the page is automatically 205 * freed. 206 * @page_count: Return the refcount of a page. 207 * @phys_to_virt: Convert a physical address into a virtual 208 * address mapped in the current context. 209 * @virt_to_phys: Convert a virtual address mapped in the current 210 * context into a physical address. 211 * @dcache_clean_inval_poc: Clean and invalidate the data cache to the PoC 212 * for the specified memory address range. 213 * @icache_inval_pou: Invalidate the instruction cache to the PoU 214 * for the specified memory address range. 215 */ 216 struct kvm_pgtable_mm_ops { 217 void* (*zalloc_page)(void *arg); 218 void* (*zalloc_pages_exact)(size_t size); 219 void (*free_pages_exact)(void *addr, size_t size); 220 void (*free_unlinked_table)(void *addr, s8 level); 221 void (*get_page)(void *addr); 222 void (*put_page)(void *addr); 223 int (*page_count)(void *addr); 224 void* (*phys_to_virt)(phys_addr_t phys); 225 phys_addr_t (*virt_to_phys)(void *addr); 226 void (*dcache_clean_inval_poc)(void *addr, size_t size); 227 void (*icache_inval_pou)(void *addr, size_t size); 228 }; 229 230 /** 231 * enum kvm_pgtable_stage2_flags - Stage-2 page-table flags. 232 * @KVM_PGTABLE_S2_NOFWB: Don't enforce Normal-WB even if the CPUs have 233 * ARM64_HAS_STAGE2_FWB. 234 * @KVM_PGTABLE_S2_IDMAP: Only use identity mappings. 235 */ 236 enum kvm_pgtable_stage2_flags { 237 KVM_PGTABLE_S2_NOFWB = BIT(0), 238 KVM_PGTABLE_S2_IDMAP = BIT(1), 239 }; 240 241 /** 242 * enum kvm_pgtable_prot - Page-table permissions and attributes. 243 * @KVM_PGTABLE_PROT_UX: Unprivileged execute permission. 244 * @KVM_PGTABLE_PROT_PX: Privileged execute permission. 245 * @KVM_PGTABLE_PROT_X: Privileged and unprivileged execute permission. 246 * @KVM_PGTABLE_PROT_W: Write permission. 247 * @KVM_PGTABLE_PROT_R: Read permission. 248 * @KVM_PGTABLE_PROT_DEVICE: Device attributes. 249 * @KVM_PGTABLE_PROT_NORMAL_NC: Normal noncacheable attributes. 250 * @KVM_PGTABLE_PROT_SW0: Software bit 0. 251 * @KVM_PGTABLE_PROT_SW1: Software bit 1. 252 * @KVM_PGTABLE_PROT_SW2: Software bit 2. 253 * @KVM_PGTABLE_PROT_SW3: Software bit 3. 254 */ 255 enum kvm_pgtable_prot { 256 KVM_PGTABLE_PROT_PX = BIT(0), 257 KVM_PGTABLE_PROT_UX = BIT(1), 258 KVM_PGTABLE_PROT_X = KVM_PGTABLE_PROT_PX | 259 KVM_PGTABLE_PROT_UX, 260 KVM_PGTABLE_PROT_W = BIT(2), 261 KVM_PGTABLE_PROT_R = BIT(3), 262 263 KVM_PGTABLE_PROT_DEVICE = BIT(4), 264 KVM_PGTABLE_PROT_NORMAL_NC = BIT(5), 265 266 KVM_PGTABLE_PROT_SW0 = BIT(55), 267 KVM_PGTABLE_PROT_SW1 = BIT(56), 268 KVM_PGTABLE_PROT_SW2 = BIT(57), 269 KVM_PGTABLE_PROT_SW3 = BIT(58), 270 }; 271 272 #define KVM_PGTABLE_PROT_RW (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W) 273 #define KVM_PGTABLE_PROT_RWX (KVM_PGTABLE_PROT_RW | KVM_PGTABLE_PROT_X) 274 275 #define PKVM_HOST_MEM_PROT KVM_PGTABLE_PROT_RWX 276 #define PKVM_HOST_MMIO_PROT KVM_PGTABLE_PROT_RW 277 278 #define PAGE_HYP KVM_PGTABLE_PROT_RW 279 #define PAGE_HYP_EXEC (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_X) 280 #define PAGE_HYP_RO (KVM_PGTABLE_PROT_R) 281 #define PAGE_HYP_DEVICE (PAGE_HYP | KVM_PGTABLE_PROT_DEVICE) 282 283 typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end, 284 enum kvm_pgtable_prot prot); 285 286 /** 287 * enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk. 288 * @KVM_PGTABLE_WALK_LEAF: Visit leaf entries, including invalid 289 * entries. 290 * @KVM_PGTABLE_WALK_TABLE_PRE: Visit table entries before their 291 * children. 292 * @KVM_PGTABLE_WALK_TABLE_POST: Visit table entries after their 293 * children. 294 * @KVM_PGTABLE_WALK_SHARED: Indicates the page-tables may be shared 295 * with other software walkers. 296 * @KVM_PGTABLE_WALK_HANDLE_FAULT: Indicates the page-table walk was 297 * invoked from a fault handler. 298 * @KVM_PGTABLE_WALK_SKIP_BBM_TLBI: Visit and update table entries 299 * without Break-before-make's 300 * TLB invalidation. 301 * @KVM_PGTABLE_WALK_SKIP_CMO: Visit and update table entries 302 * without Cache maintenance 303 * operations required. 304 */ 305 enum kvm_pgtable_walk_flags { 306 KVM_PGTABLE_WALK_LEAF = BIT(0), 307 KVM_PGTABLE_WALK_TABLE_PRE = BIT(1), 308 KVM_PGTABLE_WALK_TABLE_POST = BIT(2), 309 KVM_PGTABLE_WALK_SHARED = BIT(3), 310 KVM_PGTABLE_WALK_HANDLE_FAULT = BIT(4), 311 KVM_PGTABLE_WALK_SKIP_BBM_TLBI = BIT(5), 312 KVM_PGTABLE_WALK_SKIP_CMO = BIT(6), 313 }; 314 315 struct kvm_pgtable_visit_ctx { 316 kvm_pte_t *ptep; 317 kvm_pte_t old; 318 void *arg; 319 struct kvm_pgtable_mm_ops *mm_ops; 320 u64 start; 321 u64 addr; 322 u64 end; 323 s8 level; 324 enum kvm_pgtable_walk_flags flags; 325 }; 326 327 typedef int (*kvm_pgtable_visitor_fn_t)(const struct kvm_pgtable_visit_ctx *ctx, 328 enum kvm_pgtable_walk_flags visit); 329 330 static inline bool kvm_pgtable_walk_shared(const struct kvm_pgtable_visit_ctx *ctx) 331 { 332 return ctx->flags & KVM_PGTABLE_WALK_SHARED; 333 } 334 335 /** 336 * struct kvm_pgtable_walker - Hook into a page-table walk. 337 * @cb: Callback function to invoke during the walk. 338 * @arg: Argument passed to the callback function. 339 * @flags: Bitwise-OR of flags to identify the entry types on which to 340 * invoke the callback function. 341 */ 342 struct kvm_pgtable_walker { 343 const kvm_pgtable_visitor_fn_t cb; 344 void * const arg; 345 const enum kvm_pgtable_walk_flags flags; 346 }; 347 348 /* 349 * RCU cannot be used in a non-kernel context such as the hyp. As such, page 350 * table walkers used in hyp do not call into RCU and instead use other 351 * synchronization mechanisms (such as a spinlock). 352 */ 353 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__) 354 355 typedef kvm_pte_t *kvm_pteref_t; 356 357 static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker, 358 kvm_pteref_t pteref) 359 { 360 return pteref; 361 } 362 363 static inline kvm_pte_t *kvm_dereference_pteref_raw(kvm_pteref_t pteref) 364 { 365 return pteref; 366 } 367 368 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker) 369 { 370 /* 371 * Due to the lack of RCU (or a similar protection scheme), only 372 * non-shared table walkers are allowed in the hypervisor. 373 */ 374 if (walker->flags & KVM_PGTABLE_WALK_SHARED) 375 return -EPERM; 376 377 return 0; 378 } 379 380 static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker) {} 381 382 static inline bool kvm_pgtable_walk_lock_held(void) 383 { 384 return true; 385 } 386 387 #else 388 389 typedef kvm_pte_t __rcu *kvm_pteref_t; 390 391 static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker, 392 kvm_pteref_t pteref) 393 { 394 return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED)); 395 } 396 397 static inline kvm_pte_t *kvm_dereference_pteref_raw(kvm_pteref_t pteref) 398 { 399 return rcu_dereference_raw(pteref); 400 } 401 402 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker) 403 { 404 if (walker->flags & KVM_PGTABLE_WALK_SHARED) 405 rcu_read_lock(); 406 407 return 0; 408 } 409 410 static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker) 411 { 412 if (walker->flags & KVM_PGTABLE_WALK_SHARED) 413 rcu_read_unlock(); 414 } 415 416 static inline bool kvm_pgtable_walk_lock_held(void) 417 { 418 return rcu_read_lock_held(); 419 } 420 421 #endif 422 423 /** 424 * struct kvm_pgtable - KVM page-table. 425 * @ia_bits: Maximum input address size, in bits. 426 * @start_level: Level at which the page-table walk starts. 427 * @pgd: Pointer to the first top-level entry of the page-table. 428 * @mm_ops: Memory management callbacks. 429 * @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables. 430 * @flags: Stage-2 page-table flags. 431 * @force_pte_cb: Function that returns true if page level mappings must 432 * be used instead of block mappings. 433 */ 434 struct kvm_pgtable { 435 union { 436 struct rb_root_cached pkvm_mappings; 437 struct { 438 u32 ia_bits; 439 s8 start_level; 440 kvm_pteref_t pgd; 441 struct kvm_pgtable_mm_ops *mm_ops; 442 443 /* Stage-2 only */ 444 enum kvm_pgtable_stage2_flags flags; 445 kvm_pgtable_force_pte_cb_t force_pte_cb; 446 }; 447 }; 448 struct kvm_s2_mmu *mmu; 449 }; 450 451 /** 452 * kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table. 453 * @pgt: Uninitialised page-table structure to initialise. 454 * @va_bits: Maximum virtual address bits. 455 * @mm_ops: Memory management callbacks. 456 * 457 * Return: 0 on success, negative error code on failure. 458 */ 459 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits, 460 struct kvm_pgtable_mm_ops *mm_ops); 461 462 /** 463 * kvm_pgtable_hyp_destroy() - Destroy an unused hypervisor stage-1 page-table. 464 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init(). 465 * 466 * The page-table is assumed to be unreachable by any hardware walkers prior 467 * to freeing and therefore no TLB invalidation is performed. 468 */ 469 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt); 470 471 /** 472 * kvm_pgtable_hyp_map() - Install a mapping in a hypervisor stage-1 page-table. 473 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init(). 474 * @addr: Virtual address at which to place the mapping. 475 * @size: Size of the mapping. 476 * @phys: Physical address of the memory to map. 477 * @prot: Permissions and attributes for the mapping. 478 * 479 * The offset of @addr within a page is ignored, @size is rounded-up to 480 * the next page boundary and @phys is rounded-down to the previous page 481 * boundary. 482 * 483 * If device attributes are not explicitly requested in @prot, then the 484 * mapping will be normal, cacheable. Attempts to install a new mapping 485 * for a virtual address that is already mapped will be rejected with an 486 * error and a WARN(). 487 * 488 * Return: 0 on success, negative error code on failure. 489 */ 490 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, 491 enum kvm_pgtable_prot prot); 492 493 /** 494 * kvm_pgtable_hyp_unmap() - Remove a mapping from a hypervisor stage-1 page-table. 495 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init(). 496 * @addr: Virtual address from which to remove the mapping. 497 * @size: Size of the mapping. 498 * 499 * The offset of @addr within a page is ignored, @size is rounded-up to 500 * the next page boundary and @phys is rounded-down to the previous page 501 * boundary. 502 * 503 * TLB invalidation is performed for each page-table entry cleared during the 504 * unmapping operation and the reference count for the page-table page 505 * containing the cleared entry is decremented, with unreferenced pages being 506 * freed. The unmapping operation will stop early if it encounters either an 507 * invalid page-table entry or a valid block mapping which maps beyond the range 508 * being unmapped. 509 * 510 * Return: Number of bytes unmapped, which may be 0. 511 */ 512 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size); 513 514 /** 515 * kvm_get_vtcr() - Helper to construct VTCR_EL2 516 * @mmfr0: Sanitized value of SYS_ID_AA64MMFR0_EL1 register. 517 * @mmfr1: Sanitized value of SYS_ID_AA64MMFR1_EL1 register. 518 * @phys_shfit: Value to set in VTCR_EL2.T0SZ. 519 * 520 * The VTCR value is common across all the physical CPUs on the system. 521 * We use system wide sanitised values to fill in different fields, 522 * except for Hardware Management of Access Flags. HA Flag is set 523 * unconditionally on all CPUs, as it is safe to run with or without 524 * the feature and the bit is RES0 on CPUs that don't support it. 525 * 526 * Return: VTCR_EL2 value 527 */ 528 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift); 529 530 /** 531 * kvm_pgtable_stage2_pgd_size() - Helper to compute size of a stage-2 PGD 532 * @vtcr: Content of the VTCR register. 533 * 534 * Return: the size (in bytes) of the stage-2 PGD 535 */ 536 size_t kvm_pgtable_stage2_pgd_size(u64 vtcr); 537 538 /** 539 * __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table. 540 * @pgt: Uninitialised page-table structure to initialise. 541 * @mmu: S2 MMU context for this S2 translation 542 * @mm_ops: Memory management callbacks. 543 * @flags: Stage-2 configuration flags. 544 * @force_pte_cb: Function that returns true if page level mappings must 545 * be used instead of block mappings. 546 * 547 * Return: 0 on success, negative error code on failure. 548 */ 549 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, 550 struct kvm_pgtable_mm_ops *mm_ops, 551 enum kvm_pgtable_stage2_flags flags, 552 kvm_pgtable_force_pte_cb_t force_pte_cb); 553 554 static inline int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, 555 struct kvm_pgtable_mm_ops *mm_ops) 556 { 557 return __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL); 558 } 559 560 /** 561 * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table. 562 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 563 * 564 * The page-table is assumed to be unreachable by any hardware walkers prior 565 * to freeing and therefore no TLB invalidation is performed. 566 */ 567 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt); 568 569 /** 570 * kvm_pgtable_stage2_destroy_range() - Destroy the unlinked range of addresses. 571 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 572 * @addr: Intermediate physical address at which to place the mapping. 573 * @size: Size of the mapping. 574 * 575 * The page-table is assumed to be unreachable by any hardware walkers prior 576 * to freeing and therefore no TLB invalidation is performed. 577 */ 578 void kvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt, 579 u64 addr, u64 size); 580 581 /** 582 * kvm_pgtable_stage2_destroy_pgd() - Destroy the PGD of guest stage-2 page-table. 583 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 584 * 585 * It is assumed that the rest of the page-table is freed before this operation. 586 */ 587 void kvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt); 588 589 /** 590 * kvm_pgtable_stage2_free_unlinked() - Free an unlinked stage-2 paging structure. 591 * @mm_ops: Memory management callbacks. 592 * @pgtable: Unlinked stage-2 paging structure to be freed. 593 * @level: Level of the stage-2 paging structure to be freed. 594 * 595 * The page-table is assumed to be unreachable by any hardware walkers prior to 596 * freeing and therefore no TLB invalidation is performed. 597 */ 598 void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level); 599 600 /** 601 * kvm_pgtable_stage2_create_unlinked() - Create an unlinked stage-2 paging structure. 602 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 603 * @phys: Physical address of the memory to map. 604 * @level: Starting level of the stage-2 paging structure to be created. 605 * @prot: Permissions and attributes for the mapping. 606 * @mc: Cache of pre-allocated and zeroed memory from which to allocate 607 * page-table pages. 608 * @force_pte: Force mappings to PAGE_SIZE granularity. 609 * 610 * Returns an unlinked page-table tree. This new page-table tree is 611 * not reachable (i.e., it is unlinked) from the root pgd and it's 612 * therefore unreachableby the hardware page-table walker. No TLB 613 * invalidation or CMOs are performed. 614 * 615 * If device attributes are not explicitly requested in @prot, then the 616 * mapping will be normal, cacheable. 617 * 618 * Return: The fully populated (unlinked) stage-2 paging structure, or 619 * an ERR_PTR(error) on failure. 620 */ 621 kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, 622 u64 phys, s8 level, 623 enum kvm_pgtable_prot prot, 624 void *mc, bool force_pte); 625 626 /** 627 * kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table. 628 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 629 * @addr: Intermediate physical address at which to place the mapping. 630 * @size: Size of the mapping. 631 * @phys: Physical address of the memory to map. 632 * @prot: Permissions and attributes for the mapping. 633 * @mc: Cache of pre-allocated and zeroed memory from which to allocate 634 * page-table pages. 635 * @flags: Flags to control the page-table walk (ex. a shared walk) 636 * 637 * The offset of @addr within a page is ignored, @size is rounded-up to 638 * the next page boundary and @phys is rounded-down to the previous page 639 * boundary. 640 * 641 * If device attributes are not explicitly requested in @prot, then the 642 * mapping will be normal, cacheable. 643 * 644 * Note that the update of a valid leaf PTE in this function will be aborted, 645 * if it's trying to recreate the exact same mapping or only change the access 646 * permissions. Instead, the vCPU will exit one more time from guest if still 647 * needed and then go through the path of relaxing permissions. 648 * 649 * Note that this function will both coalesce existing table entries and split 650 * existing block mappings, relying on page-faults to fault back areas outside 651 * of the new mapping lazily. 652 * 653 * Return: 0 on success, negative error code on failure. 654 */ 655 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, 656 u64 phys, enum kvm_pgtable_prot prot, 657 void *mc, enum kvm_pgtable_walk_flags flags); 658 659 /** 660 * kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to 661 * track ownership. 662 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 663 * @addr: Base intermediate physical address to annotate. 664 * @size: Size of the annotated range. 665 * @mc: Cache of pre-allocated and zeroed memory from which to allocate 666 * page-table pages. 667 * @owner_id: Unique identifier for the owner of the page. 668 * 669 * By default, all page-tables are owned by identifier 0. This function can be 670 * used to mark portions of the IPA space as owned by other entities. When a 671 * stage 2 is used with identity-mappings, these annotations allow to use the 672 * page-table data structure as a simple rmap. 673 * 674 * Return: 0 on success, negative error code on failure. 675 */ 676 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size, 677 void *mc, u8 owner_id); 678 679 /** 680 * kvm_pgtable_stage2_unmap() - Remove a mapping from a guest stage-2 page-table. 681 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 682 * @addr: Intermediate physical address from which to remove the mapping. 683 * @size: Size of the mapping. 684 * 685 * The offset of @addr within a page is ignored and @size is rounded-up to 686 * the next page boundary. 687 * 688 * TLB invalidation is performed for each page-table entry cleared during the 689 * unmapping operation and the reference count for the page-table page 690 * containing the cleared entry is decremented, with unreferenced pages being 691 * freed. Unmapping a cacheable page will ensure that it is clean to the PoC if 692 * FWB is not supported by the CPU. 693 * 694 * Return: 0 on success, negative error code on failure. 695 */ 696 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size); 697 698 /** 699 * kvm_pgtable_stage2_wrprotect() - Write-protect guest stage-2 address range 700 * without TLB invalidation. 701 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 702 * @addr: Intermediate physical address from which to write-protect, 703 * @size: Size of the range. 704 * 705 * The offset of @addr within a page is ignored and @size is rounded-up to 706 * the next page boundary. 707 * 708 * Note that it is the caller's responsibility to invalidate the TLB after 709 * calling this function to ensure that the updated permissions are visible 710 * to the CPUs. 711 * 712 * Return: 0 on success, negative error code on failure. 713 */ 714 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size); 715 716 /** 717 * kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry. 718 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 719 * @addr: Intermediate physical address to identify the page-table entry. 720 * @flags: Flags to control the page-table walk (ex. a shared walk) 721 * 722 * The offset of @addr within a page is ignored. 723 * 724 * If there is a valid, leaf page-table entry used to translate @addr, then 725 * set the access flag in that entry. 726 */ 727 void kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr, 728 enum kvm_pgtable_walk_flags flags); 729 730 /** 731 * kvm_pgtable_stage2_test_clear_young() - Test and optionally clear the access 732 * flag in a page-table entry. 733 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 734 * @addr: Intermediate physical address to identify the page-table entry. 735 * @size: Size of the address range to visit. 736 * @mkold: True if the access flag should be cleared. 737 * 738 * The offset of @addr within a page is ignored. 739 * 740 * Tests and conditionally clears the access flag for every valid, leaf 741 * page-table entry used to translate the range [@addr, @addr + @size). 742 * 743 * Note that it is the caller's responsibility to invalidate the TLB after 744 * calling this function to ensure that the updated permissions are visible 745 * to the CPUs. 746 * 747 * Return: True if any of the visited PTEs had the access flag set. 748 */ 749 bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, 750 u64 size, bool mkold); 751 752 /** 753 * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a 754 * page-table entry. 755 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 756 * @addr: Intermediate physical address to identify the page-table entry. 757 * @prot: Additional permissions to grant for the mapping. 758 * @flags: Flags to control the page-table walk (ex. a shared walk) 759 * 760 * The offset of @addr within a page is ignored. 761 * 762 * If there is a valid, leaf page-table entry used to translate @addr, then 763 * relax the permissions in that entry according to the read, write and 764 * execute permissions specified by @prot. No permissions are removed, and 765 * TLB invalidation is performed after updating the entry. Software bits cannot 766 * be set or cleared using kvm_pgtable_stage2_relax_perms(). 767 * 768 * Return: 0 on success, negative error code on failure. 769 */ 770 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, 771 enum kvm_pgtable_prot prot, 772 enum kvm_pgtable_walk_flags flags); 773 774 /** 775 * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point 776 * of Coherency for guest stage-2 address 777 * range. 778 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 779 * @addr: Intermediate physical address from which to flush. 780 * @size: Size of the range. 781 * 782 * The offset of @addr within a page is ignored and @size is rounded-up to 783 * the next page boundary. 784 * 785 * Return: 0 on success, negative error code on failure. 786 */ 787 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size); 788 789 /** 790 * kvm_pgtable_stage2_split() - Split a range of huge pages into leaf PTEs pointing 791 * to PAGE_SIZE guest pages. 792 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init(). 793 * @addr: Intermediate physical address from which to split. 794 * @size: Size of the range. 795 * @mc: Cache of pre-allocated and zeroed memory from which to allocate 796 * page-table pages. 797 * 798 * The function tries to split any level 1 or 2 entry that overlaps 799 * with the input range (given by @addr and @size). 800 * 801 * Return: 0 on success, negative error code on failure. Note that 802 * kvm_pgtable_stage2_split() is best effort: it tries to break as many 803 * blocks in the input range as allowed by @mc_capacity. 804 */ 805 int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size, 806 struct kvm_mmu_memory_cache *mc); 807 808 /** 809 * kvm_pgtable_walk() - Walk a page-table. 810 * @pgt: Page-table structure initialised by kvm_pgtable_*_init(). 811 * @addr: Input address for the start of the walk. 812 * @size: Size of the range to walk. 813 * @walker: Walker callback description. 814 * 815 * The offset of @addr within a page is ignored and @size is rounded-up to 816 * the next page boundary. 817 * 818 * The walker will walk the page-table entries corresponding to the input 819 * address range specified, visiting entries according to the walker flags. 820 * Invalid entries are treated as leaf entries. The visited page table entry is 821 * reloaded after invoking the walker callback, allowing the walker to descend 822 * into a newly installed table. 823 * 824 * Returning a negative error code from the walker callback function will 825 * terminate the walk immediately with the same error code. 826 * 827 * Return: 0 on success, negative error code on failure. 828 */ 829 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, 830 struct kvm_pgtable_walker *walker); 831 832 /** 833 * kvm_pgtable_get_leaf() - Walk a page-table and retrieve the leaf entry 834 * with its level. 835 * @pgt: Page-table structure initialised by kvm_pgtable_*_init() 836 * or a similar initialiser. 837 * @addr: Input address for the start of the walk. 838 * @ptep: Pointer to storage for the retrieved PTE. 839 * @level: Pointer to storage for the level of the retrieved PTE. 840 * 841 * The offset of @addr within a page is ignored. 842 * 843 * The walker will walk the page-table entries corresponding to the input 844 * address specified, retrieving the leaf corresponding to this address. 845 * Invalid entries are treated as leaf entries. 846 * 847 * Return: 0 on success, negative error code on failure. 848 */ 849 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr, 850 kvm_pte_t *ptep, s8 *level); 851 852 /** 853 * kvm_pgtable_stage2_pte_prot() - Retrieve the protection attributes of a 854 * stage-2 Page-Table Entry. 855 * @pte: Page-table entry 856 * 857 * Return: protection attributes of the page-table entry in the enum 858 * kvm_pgtable_prot format. 859 */ 860 enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte); 861 862 /** 863 * kvm_pgtable_hyp_pte_prot() - Retrieve the protection attributes of a stage-1 864 * Page-Table Entry. 865 * @pte: Page-table entry 866 * 867 * Return: protection attributes of the page-table entry in the enum 868 * kvm_pgtable_prot format. 869 */ 870 enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte); 871 872 /** 873 * kvm_tlb_flush_vmid_range() - Invalidate/flush a range of TLB entries 874 * 875 * @mmu: Stage-2 KVM MMU struct 876 * @addr: The base Intermediate physical address from which to invalidate 877 * @size: Size of the range from the base to invalidate 878 */ 879 void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, 880 phys_addr_t addr, size_t size); 881 #endif /* __ARM64_KVM_PGTABLE_H__ */ 882