1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited 4 */ 5 6 #ifndef __ASM_LOONGARCH_KVM_MMU_H__ 7 #define __ASM_LOONGARCH_KVM_MMU_H__ 8 9 #include <linux/kvm_host.h> 10 #include <asm/pgalloc.h> 11 #include <asm/tlb.h> 12 13 /* 14 * KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels 15 * for which pages need to be cached. 16 */ 17 #define KVM_MMU_CACHE_MIN_PAGES (CONFIG_PGTABLE_LEVELS - 1) 18 19 /* 20 * _PAGE_MODIFIED is a SW pte bit, it records page ever written on host 21 * kernel, on secondary MMU it records the page writeable attribute, in 22 * order for fast path handling. 23 */ 24 #define KVM_PAGE_WRITEABLE _PAGE_MODIFIED 25 26 #define _KVM_FLUSH_PGTABLE 0x1 27 #define _KVM_HAS_PGMASK 0x2 28 #define kvm_pfn_pte(pfn, prot) (((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot)) 29 #define kvm_pte_pfn(x) ((phys_addr_t)((x & _PFN_MASK) >> PFN_PTE_SHIFT)) 30 31 typedef unsigned long kvm_pte_t; 32 typedef struct kvm_ptw_ctx kvm_ptw_ctx; 33 typedef int (*kvm_pte_ops)(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx); 34 35 struct kvm_ptw_ctx { 36 kvm_pte_ops ops; 37 unsigned long flag; 38 39 /* for kvm_arch_mmu_enable_log_dirty_pt_masked use */ 40 unsigned long mask; 41 unsigned long gfn; 42 43 /* page walk mmu info */ 44 unsigned int level; 45 unsigned long pgtable_shift; 46 unsigned long invalid_entry; 47 unsigned long *invalid_ptes; 48 unsigned int *pte_shifts; 49 void *opaque; 50 51 /* free pte table page list */ 52 struct list_head list; 53 }; 54 55 kvm_pte_t *kvm_pgd_alloc(void); 56 57 static inline void kvm_set_pte(kvm_pte_t *ptep, kvm_pte_t val) 58 { 59 WRITE_ONCE(*ptep, val); 60 } 61 62 static inline int kvm_pte_young(kvm_pte_t pte) { return pte & _PAGE_ACCESSED; } 63 static inline int kvm_pte_huge(kvm_pte_t pte) { return pte & _PAGE_HUGE; } 64 static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & __WRITEABLE; } 65 static inline int kvm_pte_writeable(kvm_pte_t pte) { return pte & KVM_PAGE_WRITEABLE; } 66 67 static inline kvm_pte_t kvm_pte_mkyoung(kvm_pte_t pte) 68 { 69 return pte | _PAGE_ACCESSED; 70 } 71 72 static inline kvm_pte_t kvm_pte_mkold(kvm_pte_t pte) 73 { 74 return pte & ~_PAGE_ACCESSED; 75 } 76 77 static inline kvm_pte_t kvm_pte_mkdirty(kvm_pte_t pte) 78 { 79 return pte | __WRITEABLE; 80 } 81 82 static inline kvm_pte_t kvm_pte_mkclean(kvm_pte_t pte) 83 { 84 return pte & ~__WRITEABLE; 85 } 86 87 static inline kvm_pte_t kvm_pte_mkhuge(kvm_pte_t pte) 88 { 89 return pte | _PAGE_HUGE; 90 } 91 92 static inline kvm_pte_t kvm_pte_mksmall(kvm_pte_t pte) 93 { 94 return pte & ~_PAGE_HUGE; 95 } 96 97 static inline kvm_pte_t kvm_pte_mkwriteable(kvm_pte_t pte) 98 { 99 return pte | KVM_PAGE_WRITEABLE; 100 } 101 102 static inline int kvm_need_flush(kvm_ptw_ctx *ctx) 103 { 104 return ctx->flag & _KVM_FLUSH_PGTABLE; 105 } 106 107 static inline kvm_pte_t *kvm_pgtable_offset(kvm_ptw_ctx *ctx, kvm_pte_t *table, 108 phys_addr_t addr) 109 { 110 111 return table + ((addr >> ctx->pgtable_shift) & (PTRS_PER_PTE - 1)); 112 } 113 114 static inline phys_addr_t kvm_pgtable_addr_end(kvm_ptw_ctx *ctx, 115 phys_addr_t addr, phys_addr_t end) 116 { 117 phys_addr_t boundary, size; 118 119 size = 0x1UL << ctx->pgtable_shift; 120 boundary = (addr + size) & ~(size - 1); 121 return (boundary - 1 < end - 1) ? boundary : end; 122 } 123 124 static inline int kvm_pte_present(kvm_ptw_ctx *ctx, kvm_pte_t *entry) 125 { 126 if (!ctx || ctx->level == 0) 127 return !!(*entry & _PAGE_PRESENT); 128 129 return *entry != ctx->invalid_entry; 130 } 131 132 static inline int kvm_pte_none(kvm_ptw_ctx *ctx, kvm_pte_t *entry) 133 { 134 return *entry == ctx->invalid_entry; 135 } 136 137 static inline void kvm_ptw_enter(kvm_ptw_ctx *ctx) 138 { 139 ctx->level--; 140 ctx->pgtable_shift = ctx->pte_shifts[ctx->level]; 141 ctx->invalid_entry = ctx->invalid_ptes[ctx->level]; 142 } 143 144 static inline void kvm_ptw_exit(kvm_ptw_ctx *ctx) 145 { 146 ctx->level++; 147 ctx->pgtable_shift = ctx->pte_shifts[ctx->level]; 148 ctx->invalid_entry = ctx->invalid_ptes[ctx->level]; 149 } 150 151 #endif /* __ASM_LOONGARCH_KVM_MMU_H__ */ 152