1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020 Google LLC 4 * Author: Will Deacon <will@kernel.org> 5 */ 6 7 #ifndef __ARM64_KVM_PGTABLE_H__ 8 #define __ARM64_KVM_PGTABLE_H__ 9 10 #include <linux/bits.h> 11 #include <linux/kvm_host.h> 12 #include <linux/types.h> 13 14 #define KVM_PGTABLE_FIRST_LEVEL -1 15 #define KVM_PGTABLE_LAST_LEVEL 3 16 17 /* 18 * The largest supported block sizes for KVM (no 52-bit PA support): 19 * - 4K (level 1): 1GB 20 * - 16K (level 2): 32MB 21 * - 64K (level 2): 512MB 22 */ 23 #ifdef CONFIG_ARM64_4K_PAGES 24 #define KVM_PGTABLE_MIN_BLOCK_LEVEL 1 25 #else 26 #define KVM_PGTABLE_MIN_BLOCK_LEVEL 2 27 #endif 28 29 #define kvm_lpa2_is_enabled() system_supports_lpa2() 30 31 static inline u64 kvm_get_parange_max(void) 32 { 33 if (kvm_lpa2_is_enabled() || 34 (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && PAGE_SHIFT == 16)) 35 return ID_AA64MMFR0_EL1_PARANGE_52; 36 else 37 return ID_AA64MMFR0_EL1_PARANGE_48; 38 } 39 40 static inline u64 kvm_get_parange(u64 mmfr0) 41 { 42 u64 parange_max = kvm_get_parange_max(); 43 u64 parange = cpuid_feature_extract_unsigned_field(mmfr0, 44 ID_AA64MMFR0_EL1_PARANGE_SHIFT); 45 if (parange > parange_max) 46 parange = parange_max; 47 48 return parange; 49 } 50 51 typedef u64 kvm_pte_t; 52 53 #define KVM_PTE_VALID BIT(0) 54 55 #define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT) 56 #define KVM_PTE_ADDR_51_48 GENMASK(15, 12) 57 #define KVM_PTE_ADDR_MASK_LPA2 GENMASK(49, PAGE_SHIFT) 58 #define KVM_PTE_ADDR_51_50_LPA2 GENMASK(9, 8) 59 60 #define KVM_PHYS_INVALID (-1ULL) 61 62 #define KVM_PTE_TYPE BIT(1) 63 #define KVM_PTE_TYPE_BLOCK 0 64 #define KVM_PTE_TYPE_PAGE 1 65 #define KVM_PTE_TYPE_TABLE 1 66 67 #define KVM_PTE_LEAF_ATTR_LO GENMASK(11, 2) 68 69 #define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2) 70 #define KVM_PTE_LEAF_ATTR_LO_S1_AP GENMASK(7, 6) 71 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO \ 72 ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 2 : 3; }) 73 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW \ 74 ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 0 : 1; }) 75 #define KVM_PTE_LEAF_ATTR_LO_S1_SH GENMASK(9, 8) 76 #define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS 3 77 #define KVM_PTE_LEAF_ATTR_LO_S1_AF BIT(10) 78 79 #define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR GENMASK(5, 2) 80 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R BIT(6) 81 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W BIT(7) 82 #define KVM_PTE_LEAF_ATTR_LO_S2_SH GENMASK(9, 8) 83 #define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS 3 84 #define KVM_PTE_LEAF_ATTR_LO_S2_AF BIT(10) 85 86 #define KVM_PTE_LEAF_ATTR_HI GENMASK(63, 50) 87 88 #define KVM_PTE_LEAF_ATTR_HI_SW GENMASK(58, 55) 89 90 #define KVM_PTE_LEAF_ATTR_HI_S1_XN BIT(54) 91 #define KVM_PTE_LEAF_ATTR_HI_S1_UXN BIT(54) 92 #define KVM_PTE_LEAF_ATTR_HI_S1_PXN BIT(53) 93 94 #define KVM_PTE_LEAF_ATTR_HI_S2_XN GENMASK(54, 53) 95 96 #define KVM_PTE_LEAF_ATTR_HI_S1_GP BIT(50) 97 98 #define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \ 99 KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \ 100 KVM_PTE_LEAF_ATTR_HI_S2_XN) 101 102 #define KVM_INVALID_PTE_OWNER_MASK GENMASK(9, 2) 103 #define KVM_MAX_OWNER_ID 1 104 105 /* 106 * Used to indicate a pte for which a 'break-before-make' sequence is in 107 * progress. 108 */ 109 #define KVM_INVALID_PTE_LOCKED BIT(10) 110 111 static inline bool kvm_pte_valid(kvm_pte_t pte) 112 { 113 return pte & KVM_PTE_VALID; 114 } 115 116 static inline u64 kvm_pte_to_phys(kvm_pte_t pte) 117 { 118 u64 pa; 119 120 if (kvm_lpa2_is_enabled()) { 121 pa = pte & KVM_PTE_ADDR_MASK_LPA2; 122 pa |= FIELD_GET(KVM_PTE_ADDR_51_50_LPA2, pte) << 50; 123 } else { 124 pa = pte & KVM_PTE_ADDR_MASK; 125 if (PAGE_SHIFT == 16) 126 pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48; 127 } 128 129 return pa; 130 } 131 132 static inline kvm_pte_t kvm_phys_to_pte(u64 pa) 133 { 134 kvm_pte_t pte; 135 136 if (kvm_lpa2_is_enabled()) { 137 pte = pa & KVM_PTE_ADDR_MASK_LPA2; 138 pa &= GENMASK(51, 50); 139 pte |= FIELD_PREP(KVM_PTE_ADDR_51_50_LPA2, pa >> 50); 140 } else { 141 pte = pa & KVM_PTE_ADDR_MASK; 142 if (PAGE_SHIFT == 16) { 143 pa &= GENMASK(51, 48); 144 pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48); 145 } 146 } 147 148 return pte; 149 } 150 151 static inline kvm_pfn_t kvm_pte_to_pfn(kvm_pte_t pte) 152 { 153 return __phys_to_pfn(kvm_pte_to_phys(pte)); 154 } 155 156 static inline u64 kvm_granule_shift(s8 level) 157 { 158 /* Assumes KVM_PGTABLE_LAST_LEVEL is 3 */ 159 return ARM64_HW_PGTABLE_LEVEL_SHIFT(level); 160 } 161 162 static inline u64 kvm_granule_size(s8 level) 163 { 164 return BIT(kvm_granule_shift(level)); 165 } 166 167 static inline bool kvm_level_supports_block_mapping(s8 level) 168 { 169 return level >= KVM_PGTABLE_MIN_BLOCK_LEVEL; 170 } 171 172 static inline u32 kvm_supported_block_sizes(void) 173 { 174 s8 level = KVM_PGTABLE_MIN_BLOCK_LEVEL; 175 u32 r = 0; 176 177 for (; level <= KVM_PGTABLE_LAST_LEVEL; level++) 178 r |= BIT(kvm_granule_shift(level)); 179 180 return r; 181 } 182 183 static inline bool kvm_is_block_size_supported(u64 size) 184 { 185 bool is_power_of_two = IS_ALIGNED(size, size); 186 187 return is_power_of_two && (size & kvm_supported_block_sizes()); 188 } 189 190 /** 191 * struct kvm_pgtable_mm_ops - Memory management callbacks. 192 * @zalloc_page: Allocate a single zeroed memory page. 193 * The @arg parameter can be used by the walker 194 * to pass a memcache. The initial refcount of 195 * the page is 1. 196 * @zalloc_pages_exact: Allocate an exact number of zeroed memory pages. 197 * The @size parameter is in bytes, and is rounded 198 * up to the next page boundary. The resulting 199 * allocation is physically contiguous. 200 * @free_pages_exact: Free an exact number of memory pages previously 201 * allocated by zalloc_pages_exact. 202 * @free_unlinked_table: Free an unlinked paging structure by unlinking and 203 * dropping references. 204 * @get_page: Increment the refcount on a page. 205 * @put_page: Decrement the refcount on a page. When the 206 * refcount reaches 0 the page is automatically 207 * freed. 208 * @page_count: Return the refcount of a page. 209 * @phys_to_virt: Convert a physical address into a virtual 210 * address mapped in the current context. 211 * @virt_to_phys: Convert a virtual address mapped in the current 212 * context into a physical address. 213 * @dcache_clean_inval_poc: Clean and invalidate the data cache to the PoC 214 * for the specified memory address range. 215 * @icache_inval_pou: Invalidate the instruction cache to the PoU 216 * for the specified memory address range. 217 */ 218 struct kvm_pgtable_mm_ops { 219 void* (*zalloc_page)(void *arg); 220 void* (*zalloc_pages_exact)(size_t size); 221 void (*free_pages_exact)(void *addr, size_t size); 222 void (*free_unlinked_table)(void *addr, s8 level); 223 void (*get_page)(void *addr); 224 void (*put_page)(void *addr); 225 int (*page_count)(void *addr); 226 void* (*phys_to_virt)(phys_addr_t phys); 227 phys_addr_t (*virt_to_phys)(void *addr); 228 void (*dcache_clean_inval_poc)(void *addr, size_t size); 229 void (*icache_inval_pou)(void *addr, size_t size); 230 }; 231 232 /** 233 * enum kvm_pgtable_stage2_flags - Stage-2 page-table flags. 234 * @KVM_PGTABLE_S2_IDMAP: Only use identity mappings. 235 * @KVM_PGTABLE_S2_AS_S1: Final memory attributes are that of Stage-1. 236 */ 237 enum kvm_pgtable_stage2_flags { 238 KVM_PGTABLE_S2_IDMAP = BIT(0), 239 KVM_PGTABLE_S2_AS_S1 = BIT(1), 240 }; 241 242 /** 243 * enum kvm_pgtable_prot - Page-table permissions and attributes. 244 * @KVM_PGTABLE_PROT_UX: Unprivileged execute permission. 245 * @KVM_PGTABLE_PROT_PX: Privileged execute permission. 246 * @KVM_PGTABLE_PROT_X: Privileged and unprivileged execute permission. 247 * @KVM_PGTABLE_PROT_W: Write permission. 248 * @KVM_PGTABLE_PROT_R: Read permission. 249 * @KVM_PGTABLE_PROT_DEVICE: Device attributes. 250 * @KVM_PGTABLE_PROT_NORMAL_NC: Normal noncacheable attributes. 251 * @KVM_PGTABLE_PROT_SW0: Software bit 0. 252 * @KVM_PGTABLE_PROT_SW1: Software bit 1. 253 * @KVM_PGTABLE_PROT_SW2: Software bit 2. 254 * @KVM_PGTABLE_PROT_SW3: Software bit 3. 255 */ 256 enum kvm_pgtable_prot { 257 KVM_PGTABLE_PROT_PX = BIT(0), 258 KVM_PGTABLE_PROT_UX = BIT(1), 259 KVM_PGTABLE_PROT_X = KVM_PGTABLE_PROT_PX | 260 KVM_PGTABLE_PROT_UX, 261 KVM_PGTABLE_PROT_W = BIT(2), 262 KVM_PGTABLE_PROT_R = BIT(3), 263 264 KVM_PGTABLE_PROT_DEVICE = BIT(4), 265 KVM_PGTABLE_PROT_NORMAL_NC = BIT(5), 266 267 KVM_PGTABLE_PROT_SW0 = BIT(55), 268 KVM_PGTABLE_PROT_SW1 = BIT(56), 269 KVM_PGTABLE_PROT_SW2 = BIT(57), 270 KVM_PGTABLE_PROT_SW3 = BIT(58), 271 }; 272 273 #define KVM_PGTABLE_PROT_RW (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W) 274 #define KVM_PGTABLE_PROT_RWX (KVM_PGTABLE_PROT_RW | KVM_PGTABLE_PROT_X) 275 276 #define PKVM_HOST_MEM_PROT KVM_PGTABLE_PROT_RWX 277 #define PKVM_HOST_MMIO_PROT KVM_PGTABLE_PROT_RW 278 279 #define PAGE_HYP KVM_PGTABLE_PROT_RW 280 #define PAGE_HYP_EXEC (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_X) 281 #define PAGE_HYP_RO (KVM_PGTABLE_PROT_R) 282 #define PAGE_HYP_DEVICE (PAGE_HYP | KVM_PGTABLE_PROT_DEVICE) 283 284 typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end, 285 enum kvm_pgtable_prot prot); 286 287 /** 288 * enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk. 289 * @KVM_PGTABLE_WALK_LEAF: Visit leaf entries, including invalid 290 * entries. 291 * @KVM_PGTABLE_WALK_TABLE_PRE: Visit table entries before their 292 * children. 293 * @KVM_PGTABLE_WALK_TABLE_POST: Visit table entries after their 294 * children. 295 * @KVM_PGTABLE_WALK_SHARED: Indicates the page-tables may be shared 296 * with other software walkers. 297 * @KVM_PGTABLE_WALK_IGNORE_EAGAIN: Don't terminate the walk early if 298 * the walker returns -EAGAIN. 299 * @KVM_PGTABLE_WALK_SKIP_BBM_TLBI: Visit and update table entries 300 * without Break-before-make's 301 * TLB invalidation. 302 * @KVM_PGTABLE_WALK_SKIP_CMO: Visit and update table entries 303 * without Cache maintenance 304 * operations required. 305 */ 306 enum kvm_pgtable_walk_flags { 307 KVM_PGTABLE_WALK_LEAF = BIT(0), 308 KVM_PGTABLE_WALK_TABLE_PRE = BIT(1), 309 KVM_PGTABLE_WALK_TABLE_POST = BIT(2), 310 KVM_PGTABLE_WALK_SHARED = BIT(3), 311 KVM_PGTABLE_WALK_IGNORE_EAGAIN = BIT(4), 312 KVM_PGTABLE_WALK_SKIP_BBM_TLBI = BIT(5), 313 KVM_PGTABLE_WALK_SKIP_CMO = BIT(6), 314 }; 315 316 struct kvm_pgtable_visit_ctx { 317 kvm_pte_t *ptep; 318 kvm_pte_t old; 319 void *arg; 320 struct kvm_pgtable_mm_ops *mm_ops; 321 u64 start; 322 u64 addr; 323 u64 end; 324 s8 level; 325 enum kvm_pgtable_walk_flags flags; 326 }; 327 328 typedef int (*kvm_pgtable_visitor_fn_t)(const struct kvm_pgtable_visit_ctx *ctx, 329 enum kvm_pgtable_walk_flags visit); 330 331 static inline bool kvm_pgtable_walk_shared(const struct kvm_pgtable_visit_ctx *ctx) 332 { 333 return ctx->flags & KVM_PGTABLE_WALK_SHARED; 334 } 335 336 /** 337 * struct kvm_pgtable_walker - Hook into a page-table walk. 338 * @cb: Callback function to invoke during the walk. 339 * @arg: Argument passed to the callback function. 340 * @flags: Bitwise-OR of flags to identify the entry types on which to 341 * invoke the callback function. 342 */ 343 struct kvm_pgtable_walker { 344 const kvm_pgtable_visitor_fn_t cb; 345 void * const arg; 346 const enum kvm_pgtable_walk_flags flags; 347 }; 348 349 /* 350 * RCU cannot be used in a non-kernel context such as the hyp. As such, page 351 * table walkers used in hyp do not call into RCU and instead use other 352 * synchronization mechanisms (such as a spinlock). 353 */ 354 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__) 355 356 typedef kvm_pte_t *kvm_pteref_t; 357 358 static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker, 359 kvm_pteref_t pteref) 360 { 361 return pteref; 362 } 363 364 static inline kvm_pte_t *kvm_dereference_pteref_raw(kvm_pteref_t pteref) 365 { 366 return pteref; 367 } 368 369 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker) 370 { 371 /* 372 * Due to the lack of RCU (or a similar protection scheme), only 373 * non-shared table walkers are allowed in the hypervisor. 374 */ 375 if (walker->flags & KVM_PGTABLE_WALK_SHARED) 376 return -EPERM; 377 378 return 0; 379 } 380 381 static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker) {} 382 383 static inline bool kvm_pgtable_walk_lock_held(void) 384 { 385 return true; 386 } 387 388 #else 389 390 typedef kvm_pte_t __rcu *kvm_pteref_t; 391 392 static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker, 393 kvm_pteref_t pteref) 394 { 395 return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED)); 396 } 397 398 static inline kvm_pte_t *kvm_dereference_pteref_raw(kvm_pteref_t pteref) 399 { 400 return rcu_dereference_raw(pteref); 401 } 402 403 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker) 404 { 405 if (walker->flags & KVM_PGTABLE_WALK_SHARED) 406 rcu_read_lock(); 407 408 return 0; 409 } 410 411 static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker) 412 { 413 if (walker->flags & KVM_PGTABLE_WALK_SHARED) 414 rcu_read_unlock(); 415 } 416 417 static inline bool kvm_pgtable_walk_lock_held(void) 418 { 419 return rcu_read_lock_held(); 420 } 421 422 #endif 423 424 /** 425 * struct kvm_pgtable - KVM page-table. 426 * @ia_bits: Maximum input address size, in bits. 427 * @start_level: Level at which the page-table walk starts. 428 * @pgd: Pointer to the first top-level entry of the page-table. 429 * @mm_ops: Memory management callbacks. 430 * @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables. 431 * @flags: Stage-2 page-table flags. 432 * @force_pte_cb: Function that returns true if page level mappings must 433 * be used instead of block mappings. 434 */ 435 struct kvm_pgtable { 436 union { 437 struct rb_root_cached pkvm_mappings; 438 struct { 439 u32 ia_bits; 440 s8 start_level; 441 kvm_pteref_t pgd; 442 struct kvm_pgtable_mm_ops *mm_ops; 443 444 /* Stage-2 only */ 445 enum kvm_pgtable_stage2_flags flags; 446 kvm_pgtable_force_pte_cb_t force_pte_cb; 447 }; 448 }; 449 struct kvm_s2_mmu *mmu; 450 }; 451 452 /** 453 * kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table. 454 * @pgt: Uninitialised page-table structure to initialise. 455 * @va_bits: Maximum virtual address bits. 456 * @mm_ops: Memory management callbacks. 457 * 458 * Return: 0 on success, negative error code on failure. 459 */ 460 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits, 461 struct kvm_pgtable_mm_ops *mm_ops); 462 463 /** 464 * kvm_pgtable_hyp_destroy() - Destroy an unused hypervisor stage-1 page-table. 465 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init(). 466 * 467 * The page-table is assumed to be unreachable by any hardware walkers prior 468 * to freeing and therefore no TLB invalidation is performed. 469 */ 470 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt); 471 472 /** 473 * kvm_pgtable_hyp_map() - Install a mapping in a hypervisor stage-1 page-table. 474 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init(). 475 * @addr: Virtual address at which to place the mapping. 476 * @size: Size of the mapping. 477 * @phys: Physical address of the memory to map. 478 * @prot: Permissions and attributes for the mapping. 479 * 480 * The offset of @addr within a page is ignored, @size is rounded-up to 481 * the next page boundary and @phys is rounded-down to the previous page 482 * boundary. 483 * 484 * If device attributes are not explicitly requested in @prot, then the 485 * mapping will be normal, cacheable. Attempts to install a new mapping 486 * for a virtual address that is already mapped will be rejected with an 487 * error and a WARN(). 488 * 489 * Return: 0 on success, negative error code on failure. 490 */ 491 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, 492 enum kvm_pgtable_prot prot); 493 494 /** 495 * kvm_pgtable_hyp_unmap() - Remove a mapping from a hypervisor stage-1 page-table. 496 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init(). 497 * @addr: Virtual address from which to remove the mapping. 498 * @size: Size of the mapping. 499 * 500 * The offset of @addr within a page is ignored, @size is rounded-up to 501 * the next page boundary and @phys is rounded-down to the previous page 502 * boundary. 503 * 504 * TLB invalidation is performed for each page-table entry cleared during the 505 * unmapping operation and the reference count for the page-table page 506 * containing the cleared entry is decremented, with unreferenced pages being 507 * freed. The unmapping operation will stop early if it encounters either an 508 * invalid page-table entry or a valid block mapping which maps beyond the range 509 * being unmapped. 510 * 511 * Return: Number of bytes unmapped, which may be 0. 512 */ 513 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size); 514 515 /** 516 * kvm_get_vtcr() - Helper to construct VTCR_EL2 517 * @mmfr0: Sanitized value of SYS_ID_AA64MMFR0_EL1 register. 518 * @mmfr1: Sanitized value of SYS_ID_AA64MMFR1_EL1 register. 519 * @phys_shfit: Value to set in VTCR_EL2.T0SZ. 520 * 521 * The VTCR value is common across all the physical CPUs on the system. 522 * We use system wide sanitised values to fill in different fields, 523 * except for Hardware Management of Access Flags. HA Flag is set 524 * unconditionally on all CPUs, as it is safe to run with or without 525 * the feature and the bit is RES0 on CPUs that don't support it. 526 * 527 * Return: VTCR_EL2 value 528 */ 529 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift); 530 531 /** 532 * kvm_pgtable_stage2_pgd_size() - Helper to compute size of a stage-2 PGD 533 * @vtcr: Content of the VTCR register. 534 * 535 * Return: the size (in bytes) of the stage-2 PGD 536 */ 537 size_t kvm_pgtable_stage2_pgd_size(u64 vtcr); 538 539 /** 540 * __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table. 541 * @pgt: Uninitialised page-table structure to initialise. 542 * @mmu: S2 MMU context for this S2 translation 543 * @mm_ops: Memory management callbacks. 544 * @flags: Stage-2 configuration flags. 545 * @force_pte_cb: Function that returns true if page level mappings must 546 * be used instead of block mappings. 547 * 548 * Return: 0 on success, negative error code on failure. 549 */ 550 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, 551 struct kvm_pgtable_mm_ops *mm_ops, 552 enum kvm_pgtable_stage2_flags flags, 553 kvm_pgtable_force_pte_cb_t force_pte_cb); 554 555 static inline int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, 556 struct kvm_pgtable_mm_ops *mm_ops) 557 { 558 return __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL); 559 } 560 561 /** 562 * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table. 563 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 564 * 565 * The page-table is assumed to be unreachable by any hardware walkers prior 566 * to freeing and therefore no TLB invalidation is performed. 567 */ 568 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt); 569 570 /** 571 * kvm_pgtable_stage2_destroy_range() - Destroy the unlinked range of addresses. 572 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 573 * @addr: Intermediate physical address at which to place the mapping. 574 * @size: Size of the mapping. 575 * 576 * The page-table is assumed to be unreachable by any hardware walkers prior 577 * to freeing and therefore no TLB invalidation is performed. 578 */ 579 void kvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt, 580 u64 addr, u64 size); 581 582 /** 583 * kvm_pgtable_stage2_destroy_pgd() - Destroy the PGD of guest stage-2 page-table. 584 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 585 * 586 * It is assumed that the rest of the page-table is freed before this operation. 587 */ 588 void kvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt); 589 590 /** 591 * kvm_pgtable_stage2_free_unlinked() - Free an unlinked stage-2 paging structure. 592 * @mm_ops: Memory management callbacks. 593 * @pgtable: Unlinked stage-2 paging structure to be freed. 594 * @level: Level of the stage-2 paging structure to be freed. 595 * 596 * The page-table is assumed to be unreachable by any hardware walkers prior to 597 * freeing and therefore no TLB invalidation is performed. 598 */ 599 void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level); 600 601 /** 602 * kvm_pgtable_stage2_create_unlinked() - Create an unlinked stage-2 paging structure. 603 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 604 * @phys: Physical address of the memory to map. 605 * @level: Starting level of the stage-2 paging structure to be created. 606 * @prot: Permissions and attributes for the mapping. 607 * @mc: Cache of pre-allocated and zeroed memory from which to allocate 608 * page-table pages. 609 * @force_pte: Force mappings to PAGE_SIZE granularity. 610 * 611 * Returns an unlinked page-table tree. This new page-table tree is 612 * not reachable (i.e., it is unlinked) from the root pgd and it's 613 * therefore unreachableby the hardware page-table walker. No TLB 614 * invalidation or CMOs are performed. 615 * 616 * If device attributes are not explicitly requested in @prot, then the 617 * mapping will be normal, cacheable. 618 * 619 * Return: The fully populated (unlinked) stage-2 paging structure, or 620 * an ERR_PTR(error) on failure. 621 */ 622 kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, 623 u64 phys, s8 level, 624 enum kvm_pgtable_prot prot, 625 void *mc, bool force_pte); 626 627 /** 628 * kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table. 629 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 630 * @addr: Intermediate physical address at which to place the mapping. 631 * @size: Size of the mapping. 632 * @phys: Physical address of the memory to map. 633 * @prot: Permissions and attributes for the mapping. 634 * @mc: Cache of pre-allocated and zeroed memory from which to allocate 635 * page-table pages. 636 * @flags: Flags to control the page-table walk (ex. a shared walk) 637 * 638 * The offset of @addr within a page is ignored, @size is rounded-up to 639 * the next page boundary and @phys is rounded-down to the previous page 640 * boundary. 641 * 642 * If device attributes are not explicitly requested in @prot, then the 643 * mapping will be normal, cacheable. 644 * 645 * Note that the update of a valid leaf PTE in this function will be aborted, 646 * if it's trying to recreate the exact same mapping or only change the access 647 * permissions. Instead, the vCPU will exit one more time from guest if still 648 * needed and then go through the path of relaxing permissions. 649 * 650 * Note that this function will both coalesce existing table entries and split 651 * existing block mappings, relying on page-faults to fault back areas outside 652 * of the new mapping lazily. 653 * 654 * Return: 0 on success, negative error code on failure. 655 */ 656 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, 657 u64 phys, enum kvm_pgtable_prot prot, 658 void *mc, enum kvm_pgtable_walk_flags flags); 659 660 /** 661 * kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to 662 * track ownership. 663 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 664 * @addr: Base intermediate physical address to annotate. 665 * @size: Size of the annotated range. 666 * @mc: Cache of pre-allocated and zeroed memory from which to allocate 667 * page-table pages. 668 * @owner_id: Unique identifier for the owner of the page. 669 * 670 * By default, all page-tables are owned by identifier 0. This function can be 671 * used to mark portions of the IPA space as owned by other entities. When a 672 * stage 2 is used with identity-mappings, these annotations allow to use the 673 * page-table data structure as a simple rmap. 674 * 675 * Return: 0 on success, negative error code on failure. 676 */ 677 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size, 678 void *mc, u8 owner_id); 679 680 /** 681 * kvm_pgtable_stage2_unmap() - Remove a mapping from a guest stage-2 page-table. 682 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 683 * @addr: Intermediate physical address from which to remove the mapping. 684 * @size: Size of the mapping. 685 * 686 * The offset of @addr within a page is ignored and @size is rounded-up to 687 * the next page boundary. 688 * 689 * TLB invalidation is performed for each page-table entry cleared during the 690 * unmapping operation and the reference count for the page-table page 691 * containing the cleared entry is decremented, with unreferenced pages being 692 * freed. Unmapping a cacheable page will ensure that it is clean to the PoC if 693 * FWB is not supported by the CPU. 694 * 695 * Return: 0 on success, negative error code on failure. 696 */ 697 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size); 698 699 /** 700 * kvm_pgtable_stage2_wrprotect() - Write-protect guest stage-2 address range 701 * without TLB invalidation. 702 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 703 * @addr: Intermediate physical address from which to write-protect, 704 * @size: Size of the range. 705 * 706 * The offset of @addr within a page is ignored and @size is rounded-up to 707 * the next page boundary. 708 * 709 * Note that it is the caller's responsibility to invalidate the TLB after 710 * calling this function to ensure that the updated permissions are visible 711 * to the CPUs. 712 * 713 * Return: 0 on success, negative error code on failure. 714 */ 715 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size); 716 717 /** 718 * kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry. 719 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 720 * @addr: Intermediate physical address to identify the page-table entry. 721 * @flags: Flags to control the page-table walk (ex. a shared walk) 722 * 723 * The offset of @addr within a page is ignored. 724 * 725 * If there is a valid, leaf page-table entry used to translate @addr, then 726 * set the access flag in that entry. 727 */ 728 void kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr, 729 enum kvm_pgtable_walk_flags flags); 730 731 /** 732 * kvm_pgtable_stage2_test_clear_young() - Test and optionally clear the access 733 * flag in a page-table entry. 734 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 735 * @addr: Intermediate physical address to identify the page-table entry. 736 * @size: Size of the address range to visit. 737 * @mkold: True if the access flag should be cleared. 738 * 739 * The offset of @addr within a page is ignored. 740 * 741 * Tests and conditionally clears the access flag for every valid, leaf 742 * page-table entry used to translate the range [@addr, @addr + @size). 743 * 744 * Note that it is the caller's responsibility to invalidate the TLB after 745 * calling this function to ensure that the updated permissions are visible 746 * to the CPUs. 747 * 748 * Return: True if any of the visited PTEs had the access flag set. 749 */ 750 bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, 751 u64 size, bool mkold); 752 753 /** 754 * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a 755 * page-table entry. 756 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 757 * @addr: Intermediate physical address to identify the page-table entry. 758 * @prot: Additional permissions to grant for the mapping. 759 * @flags: Flags to control the page-table walk (ex. a shared walk) 760 * 761 * The offset of @addr within a page is ignored. 762 * 763 * If there is a valid, leaf page-table entry used to translate @addr, then 764 * relax the permissions in that entry according to the read, write and 765 * execute permissions specified by @prot. No permissions are removed, and 766 * TLB invalidation is performed after updating the entry. Software bits cannot 767 * be set or cleared using kvm_pgtable_stage2_relax_perms(). 768 * 769 * Return: 0 on success, negative error code on failure. 770 */ 771 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, 772 enum kvm_pgtable_prot prot, 773 enum kvm_pgtable_walk_flags flags); 774 775 /** 776 * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point 777 * of Coherency for guest stage-2 address 778 * range. 779 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 780 * @addr: Intermediate physical address from which to flush. 781 * @size: Size of the range. 782 * 783 * The offset of @addr within a page is ignored and @size is rounded-up to 784 * the next page boundary. 785 * 786 * Return: 0 on success, negative error code on failure. 787 */ 788 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size); 789 790 /** 791 * kvm_pgtable_stage2_split() - Split a range of huge pages into leaf PTEs pointing 792 * to PAGE_SIZE guest pages. 793 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init(). 794 * @addr: Intermediate physical address from which to split. 795 * @size: Size of the range. 796 * @mc: Cache of pre-allocated and zeroed memory from which to allocate 797 * page-table pages. 798 * 799 * The function tries to split any level 1 or 2 entry that overlaps 800 * with the input range (given by @addr and @size). 801 * 802 * Return: 0 on success, negative error code on failure. Note that 803 * kvm_pgtable_stage2_split() is best effort: it tries to break as many 804 * blocks in the input range as allowed by @mc_capacity. 805 */ 806 int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size, 807 struct kvm_mmu_memory_cache *mc); 808 809 /** 810 * kvm_pgtable_walk() - Walk a page-table. 811 * @pgt: Page-table structure initialised by kvm_pgtable_*_init(). 812 * @addr: Input address for the start of the walk. 813 * @size: Size of the range to walk. 814 * @walker: Walker callback description. 815 * 816 * The offset of @addr within a page is ignored and @size is rounded-up to 817 * the next page boundary. 818 * 819 * The walker will walk the page-table entries corresponding to the input 820 * address range specified, visiting entries according to the walker flags. 821 * Invalid entries are treated as leaf entries. The visited page table entry is 822 * reloaded after invoking the walker callback, allowing the walker to descend 823 * into a newly installed table. 824 * 825 * Returning a negative error code from the walker callback function will 826 * terminate the walk immediately with the same error code. 827 * 828 * Return: 0 on success, negative error code on failure. 829 */ 830 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, 831 struct kvm_pgtable_walker *walker); 832 833 /** 834 * kvm_pgtable_get_leaf() - Walk a page-table and retrieve the leaf entry 835 * with its level. 836 * @pgt: Page-table structure initialised by kvm_pgtable_*_init() 837 * or a similar initialiser. 838 * @addr: Input address for the start of the walk. 839 * @ptep: Pointer to storage for the retrieved PTE. 840 * @level: Pointer to storage for the level of the retrieved PTE. 841 * 842 * The offset of @addr within a page is ignored. 843 * 844 * The walker will walk the page-table entries corresponding to the input 845 * address specified, retrieving the leaf corresponding to this address. 846 * Invalid entries are treated as leaf entries. 847 * 848 * Return: 0 on success, negative error code on failure. 849 */ 850 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr, 851 kvm_pte_t *ptep, s8 *level); 852 853 /** 854 * kvm_pgtable_stage2_pte_prot() - Retrieve the protection attributes of a 855 * stage-2 Page-Table Entry. 856 * @pte: Page-table entry 857 * 858 * Return: protection attributes of the page-table entry in the enum 859 * kvm_pgtable_prot format. 860 */ 861 enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte); 862 863 /** 864 * kvm_pgtable_hyp_pte_prot() - Retrieve the protection attributes of a stage-1 865 * Page-Table Entry. 866 * @pte: Page-table entry 867 * 868 * Return: protection attributes of the page-table entry in the enum 869 * kvm_pgtable_prot format. 870 */ 871 enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte); 872 873 /** 874 * kvm_tlb_flush_vmid_range() - Invalidate/flush a range of TLB entries 875 * 876 * @mmu: Stage-2 KVM MMU struct 877 * @addr: The base Intermediate physical address from which to invalidate 878 * @size: Size of the range from the base to invalidate 879 */ 880 void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, 881 phys_addr_t addr, size_t size); 882 #endif /* __ARM64_KVM_PGTABLE_H__ */ 883