1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2020 ARM Ltd. 4 */ 5 #ifndef __ASM_MTE_H 6 #define __ASM_MTE_H 7 8 #include <asm/compiler.h> 9 #include <asm/mte-def.h> 10 11 #ifndef __ASSEMBLY__ 12 13 #include <linux/bitfield.h> 14 #include <linux/kasan-enabled.h> 15 #include <linux/page-flags.h> 16 #include <linux/sched.h> 17 #include <linux/types.h> 18 19 #include <asm/pgtable-types.h> 20 21 void mte_clear_page_tags(void *addr); 22 unsigned long mte_copy_tags_from_user(void *to, const void __user *from, 23 unsigned long n); 24 unsigned long mte_copy_tags_to_user(void __user *to, void *from, 25 unsigned long n); 26 int mte_save_tags(struct page *page); 27 void mte_save_page_tags(const void *page_addr, void *tag_storage); 28 void mte_restore_tags(swp_entry_t entry, struct page *page); 29 void mte_restore_page_tags(void *page_addr, const void *tag_storage); 30 void mte_invalidate_tags(int type, pgoff_t offset); 31 void mte_invalidate_tags_area(int type); 32 void *mte_allocate_tag_storage(void); 33 void mte_free_tag_storage(char *storage); 34 35 #ifdef CONFIG_ARM64_MTE 36 37 /* track which pages have valid allocation tags */ 38 #define PG_mte_tagged PG_arch_2 39 /* simple lock to avoid multiple threads tagging the same page */ 40 #define PG_mte_lock PG_arch_3 41 42 static inline void set_page_mte_tagged(struct page *page) 43 { 44 VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page))); 45 46 /* 47 * Ensure that the tags written prior to this function are visible 48 * before the page flags update. 49 */ 50 smp_wmb(); 51 set_bit(PG_mte_tagged, &page->flags); 52 } 53 54 static inline bool page_mte_tagged(struct page *page) 55 { 56 bool ret = test_bit(PG_mte_tagged, &page->flags); 57 58 VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page))); 59 60 /* 61 * If the page is tagged, ensure ordering with a likely subsequent 62 * read of the tags. 63 */ 64 if (ret) 65 smp_rmb(); 66 return ret; 67 } 68 69 /* 70 * Lock the page for tagging and return 'true' if the page can be tagged, 71 * 'false' if already tagged. PG_mte_tagged is never cleared and therefore the 72 * locking only happens once for page initialisation. 73 * 74 * The page MTE lock state: 75 * 76 * Locked: PG_mte_lock && !PG_mte_tagged 77 * Unlocked: !PG_mte_lock || PG_mte_tagged 78 * 79 * Acquire semantics only if the page is tagged (returning 'false'). 80 */ 81 static inline bool try_page_mte_tagging(struct page *page) 82 { 83 VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page))); 84 85 if (!test_and_set_bit(PG_mte_lock, &page->flags)) 86 return true; 87 88 /* 89 * The tags are either being initialised or may have been initialised 90 * already. Check if the PG_mte_tagged flag has been set or wait 91 * otherwise. 92 */ 93 smp_cond_load_acquire(&page->flags, VAL & (1UL << PG_mte_tagged)); 94 95 return false; 96 } 97 98 void mte_zero_clear_page_tags(void *addr); 99 void mte_sync_tags(pte_t pte, unsigned int nr_pages); 100 void mte_copy_page_tags(void *kto, const void *kfrom); 101 void mte_thread_init_user(void); 102 void mte_thread_switch(struct task_struct *next); 103 void mte_cpu_setup(void); 104 void mte_suspend_enter(void); 105 void mte_suspend_exit(void); 106 long set_mte_ctrl(struct task_struct *task, unsigned long arg); 107 long get_mte_ctrl(struct task_struct *task); 108 int mte_ptrace_copy_tags(struct task_struct *child, long request, 109 unsigned long addr, unsigned long data); 110 size_t mte_probe_user_range(const char __user *uaddr, size_t size); 111 112 #else /* CONFIG_ARM64_MTE */ 113 114 /* unused if !CONFIG_ARM64_MTE, silence the compiler */ 115 #define PG_mte_tagged 0 116 117 static inline void set_page_mte_tagged(struct page *page) 118 { 119 } 120 static inline bool page_mte_tagged(struct page *page) 121 { 122 return false; 123 } 124 static inline bool try_page_mte_tagging(struct page *page) 125 { 126 return false; 127 } 128 static inline void mte_zero_clear_page_tags(void *addr) 129 { 130 } 131 static inline void mte_sync_tags(pte_t pte, unsigned int nr_pages) 132 { 133 } 134 static inline void mte_copy_page_tags(void *kto, const void *kfrom) 135 { 136 } 137 static inline void mte_thread_init_user(void) 138 { 139 } 140 static inline void mte_thread_switch(struct task_struct *next) 141 { 142 } 143 static inline void mte_suspend_enter(void) 144 { 145 } 146 static inline void mte_suspend_exit(void) 147 { 148 } 149 static inline long set_mte_ctrl(struct task_struct *task, unsigned long arg) 150 { 151 return 0; 152 } 153 static inline long get_mte_ctrl(struct task_struct *task) 154 { 155 return 0; 156 } 157 static inline int mte_ptrace_copy_tags(struct task_struct *child, 158 long request, unsigned long addr, 159 unsigned long data) 160 { 161 return -EIO; 162 } 163 164 #endif /* CONFIG_ARM64_MTE */ 165 166 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_ARM64_MTE) 167 static inline void folio_set_hugetlb_mte_tagged(struct folio *folio) 168 { 169 VM_WARN_ON_ONCE(!folio_test_hugetlb(folio)); 170 171 /* 172 * Ensure that the tags written prior to this function are visible 173 * before the folio flags update. 174 */ 175 smp_wmb(); 176 set_bit(PG_mte_tagged, &folio->flags); 177 178 } 179 180 static inline bool folio_test_hugetlb_mte_tagged(struct folio *folio) 181 { 182 bool ret = test_bit(PG_mte_tagged, &folio->flags); 183 184 VM_WARN_ON_ONCE(!folio_test_hugetlb(folio)); 185 186 /* 187 * If the folio is tagged, ensure ordering with a likely subsequent 188 * read of the tags. 189 */ 190 if (ret) 191 smp_rmb(); 192 return ret; 193 } 194 195 static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio) 196 { 197 VM_WARN_ON_ONCE(!folio_test_hugetlb(folio)); 198 199 if (!test_and_set_bit(PG_mte_lock, &folio->flags)) 200 return true; 201 202 /* 203 * The tags are either being initialised or may have been initialised 204 * already. Check if the PG_mte_tagged flag has been set or wait 205 * otherwise. 206 */ 207 smp_cond_load_acquire(&folio->flags, VAL & (1UL << PG_mte_tagged)); 208 209 return false; 210 } 211 #else 212 static inline void folio_set_hugetlb_mte_tagged(struct folio *folio) 213 { 214 } 215 216 static inline bool folio_test_hugetlb_mte_tagged(struct folio *folio) 217 { 218 return false; 219 } 220 221 static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio) 222 { 223 return false; 224 } 225 #endif 226 227 static inline void mte_disable_tco_entry(struct task_struct *task) 228 { 229 if (!system_supports_mte()) 230 return; 231 232 /* 233 * Re-enable tag checking (TCO set on exception entry). This is only 234 * necessary if MTE is enabled in either the kernel or the userspace 235 * task in synchronous or asymmetric mode (SCTLR_EL1.TCF0 bit 0 is set 236 * for both). With MTE disabled in the kernel and disabled or 237 * asynchronous in userspace, tag check faults (including in uaccesses) 238 * are not reported, therefore there is no need to re-enable checking. 239 * This is beneficial on microarchitectures where re-enabling TCO is 240 * expensive. 241 */ 242 if (kasan_hw_tags_enabled() || 243 (task->thread.sctlr_user & (1UL << SCTLR_EL1_TCF0_SHIFT))) 244 asm volatile(SET_PSTATE_TCO(0)); 245 } 246 247 #ifdef CONFIG_KASAN_HW_TAGS 248 void mte_check_tfsr_el1(void); 249 250 static inline void mte_check_tfsr_entry(void) 251 { 252 if (!kasan_hw_tags_enabled()) 253 return; 254 255 mte_check_tfsr_el1(); 256 } 257 258 static inline void mte_check_tfsr_exit(void) 259 { 260 if (!kasan_hw_tags_enabled()) 261 return; 262 263 /* 264 * The asynchronous faults are sync'ed automatically with 265 * TFSR_EL1 on kernel entry but for exit an explicit dsb() 266 * is required. 267 */ 268 dsb(nsh); 269 isb(); 270 271 mte_check_tfsr_el1(); 272 } 273 #else 274 static inline void mte_check_tfsr_el1(void) 275 { 276 } 277 static inline void mte_check_tfsr_entry(void) 278 { 279 } 280 static inline void mte_check_tfsr_exit(void) 281 { 282 } 283 #endif /* CONFIG_KASAN_HW_TAGS */ 284 285 #endif /* __ASSEMBLY__ */ 286 #endif /* __ASM_MTE_H */ 287