1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2020 ARM Limited 3 4 #include <fcntl.h> 5 #include <sched.h> 6 #include <signal.h> 7 #include <stdio.h> 8 #include <stdlib.h> 9 #include <time.h> 10 #include <unistd.h> 11 12 #include <linux/auxvec.h> 13 #include <sys/auxv.h> 14 #include <sys/mman.h> 15 #include <sys/prctl.h> 16 17 #include <asm/hwcap.h> 18 19 #include "kselftest.h" 20 #include "mte_common_util.h" 21 #include "mte_def.h" 22 23 #ifndef SA_EXPOSE_TAGBITS 24 #define SA_EXPOSE_TAGBITS 0x00000800 25 #endif 26 27 #define INIT_BUFFER_SIZE 256 28 29 struct mte_fault_cxt cur_mte_cxt; 30 bool mtefar_support; 31 bool mtestonly_support; 32 static unsigned int mte_cur_mode; 33 static unsigned int mte_cur_pstate_tco; 34 static bool mte_cur_stonly; 35 36 void mte_default_handler(int signum, siginfo_t *si, void *uc) 37 { 38 struct sigaction sa; 39 unsigned long addr = (unsigned long)si->si_addr; 40 unsigned char si_tag, si_atag; 41 42 sigaction(signum, NULL, &sa); 43 44 if (sa.sa_flags & SA_EXPOSE_TAGBITS) { 45 si_tag = MT_FETCH_TAG(addr); 46 si_atag = MT_FETCH_ATAG(addr); 47 addr = MT_CLEAR_TAGS(addr); 48 } else { 49 si_tag = 0; 50 si_atag = 0; 51 } 52 53 if (signum == SIGSEGV) { 54 #ifdef DEBUG 55 ksft_print_msg("INFO: SIGSEGV signal at pc=%lx, fault addr=%lx, si_code=%lx, si_tag=%x, si_atag=%x\n", 56 ((ucontext_t *)uc)->uc_mcontext.pc, addr, si->si_code, si_tag, si_atag); 57 #endif 58 if (si->si_code == SEGV_MTEAERR) { 59 if (cur_mte_cxt.trig_si_code == si->si_code) 60 cur_mte_cxt.fault_valid = true; 61 else 62 ksft_print_msg("Got unexpected SEGV_MTEAERR at pc=%llx, fault addr=%lx\n", 63 ((ucontext_t *)uc)->uc_mcontext.pc, 64 addr); 65 return; 66 } 67 /* Compare the context for precise error */ 68 else if (si->si_code == SEGV_MTESERR) { 69 if ((!mtefar_support && si_atag) || (si_atag != MT_FETCH_ATAG(cur_mte_cxt.trig_addr))) { 70 ksft_print_msg("Invalid MTE synchronous exception caught for address tag! si_tag=%x, si_atag: %x\n", si_tag, si_atag); 71 exit(KSFT_FAIL); 72 } 73 74 if (cur_mte_cxt.trig_si_code == si->si_code && 75 ((cur_mte_cxt.trig_range >= 0 && 76 addr >= MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) && 77 addr <= (MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)) || 78 (cur_mte_cxt.trig_range < 0 && 79 addr <= MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) && 80 addr >= (MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)))) { 81 cur_mte_cxt.fault_valid = true; 82 /* Adjust the pc by 4 */ 83 ((ucontext_t *)uc)->uc_mcontext.pc += 4; 84 } else { 85 ksft_print_msg("Invalid MTE synchronous exception caught!\n"); 86 exit(1); 87 } 88 } else { 89 ksft_print_msg("Unknown SIGSEGV exception caught!\n"); 90 exit(1); 91 } 92 } else if (signum == SIGBUS) { 93 ksft_print_msg("INFO: SIGBUS signal at pc=%llx, fault addr=%lx, si_code=%x\n", 94 ((ucontext_t *)uc)->uc_mcontext.pc, addr, si->si_code); 95 if ((cur_mte_cxt.trig_range >= 0 && 96 addr >= MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) && 97 addr <= (MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)) || 98 (cur_mte_cxt.trig_range < 0 && 99 addr <= MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) && 100 addr >= (MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range))) { 101 cur_mte_cxt.fault_valid = true; 102 /* Adjust the pc by 4 */ 103 ((ucontext_t *)uc)->uc_mcontext.pc += 4; 104 } 105 } 106 } 107 108 void mte_register_signal(int signal, void (*handler)(int, siginfo_t *, void *), 109 bool export_tags) 110 { 111 struct sigaction sa; 112 113 sa.sa_sigaction = handler; 114 sa.sa_flags = SA_SIGINFO; 115 116 if (export_tags && signal == SIGSEGV) 117 sa.sa_flags |= SA_EXPOSE_TAGBITS; 118 119 sigemptyset(&sa.sa_mask); 120 sigaction(signal, &sa, NULL); 121 } 122 123 void mte_wait_after_trig(void) 124 { 125 sched_yield(); 126 } 127 128 void *mte_insert_tags(void *ptr, size_t size) 129 { 130 void *tag_ptr; 131 int align_size; 132 133 if (!ptr || (unsigned long)(ptr) & MT_ALIGN_GRANULE) { 134 ksft_print_msg("FAIL: Addr=%p: invalid\n", ptr); 135 return NULL; 136 } 137 align_size = MT_ALIGN_UP(size); 138 tag_ptr = mte_insert_random_tag(ptr); 139 mte_set_tag_address_range(tag_ptr, align_size); 140 return tag_ptr; 141 } 142 143 void mte_clear_tags(void *ptr, size_t size) 144 { 145 if (!ptr || (unsigned long)(ptr) & MT_ALIGN_GRANULE) { 146 ksft_print_msg("FAIL: Addr=%p: invalid\n", ptr); 147 return; 148 } 149 size = MT_ALIGN_UP(size); 150 ptr = (void *)MT_CLEAR_TAG((unsigned long)ptr); 151 mte_clear_tag_address_range(ptr, size); 152 } 153 154 void *mte_insert_atag(void *ptr) 155 { 156 unsigned char atag; 157 158 atag = mtefar_support ? (random() % MT_ATAG_MASK) + 1 : 0; 159 return (void *)MT_SET_ATAG((unsigned long)ptr, atag); 160 } 161 162 void *mte_clear_atag(void *ptr) 163 { 164 return (void *)MT_CLEAR_ATAG((unsigned long)ptr); 165 } 166 167 static void *__mte_allocate_memory_range(size_t size, int mem_type, int mapping, 168 size_t range_before, size_t range_after, 169 bool tags, int fd) 170 { 171 void *ptr; 172 int prot_flag, map_flag; 173 size_t entire_size = size + range_before + range_after; 174 175 switch (mem_type) { 176 case USE_MALLOC: 177 return malloc(entire_size) + range_before; 178 case USE_MMAP: 179 case USE_MPROTECT: 180 break; 181 default: 182 ksft_print_msg("FAIL: Invalid allocate request\n"); 183 return NULL; 184 } 185 186 prot_flag = PROT_READ | PROT_WRITE; 187 if (mem_type == USE_MMAP) 188 prot_flag |= PROT_MTE; 189 190 map_flag = mapping; 191 if (fd == -1) 192 map_flag = MAP_ANONYMOUS | map_flag; 193 if (!(mapping & MAP_SHARED)) 194 map_flag |= MAP_PRIVATE; 195 ptr = mmap(NULL, entire_size, prot_flag, map_flag, fd, 0); 196 if (ptr == MAP_FAILED) { 197 ksft_perror("mmap()"); 198 return NULL; 199 } 200 if (mem_type == USE_MPROTECT) { 201 if (mprotect(ptr, entire_size, prot_flag | PROT_MTE)) { 202 ksft_perror("mprotect(PROT_MTE)"); 203 munmap(ptr, size); 204 return NULL; 205 } 206 } 207 if (tags) 208 ptr = mte_insert_tags(ptr + range_before, size); 209 return ptr; 210 } 211 212 void *mte_allocate_memory_tag_range(size_t size, int mem_type, int mapping, 213 size_t range_before, size_t range_after) 214 { 215 return __mte_allocate_memory_range(size, mem_type, mapping, range_before, 216 range_after, true, -1); 217 } 218 219 void *mte_allocate_memory(size_t size, int mem_type, int mapping, bool tags) 220 { 221 return __mte_allocate_memory_range(size, mem_type, mapping, 0, 0, tags, -1); 222 } 223 224 void *mte_allocate_file_memory(size_t size, int mem_type, int mapping, bool tags, int fd) 225 { 226 int index; 227 char buffer[INIT_BUFFER_SIZE]; 228 229 if (mem_type != USE_MPROTECT && mem_type != USE_MMAP) { 230 ksft_print_msg("FAIL: Invalid mmap file request\n"); 231 return NULL; 232 } 233 /* Initialize the file for mappable size */ 234 lseek(fd, 0, SEEK_SET); 235 for (index = INIT_BUFFER_SIZE; index < size; index += INIT_BUFFER_SIZE) { 236 if (write(fd, buffer, INIT_BUFFER_SIZE) != INIT_BUFFER_SIZE) { 237 ksft_perror("initialising buffer"); 238 return NULL; 239 } 240 } 241 index -= INIT_BUFFER_SIZE; 242 if (write(fd, buffer, size - index) != size - index) { 243 ksft_perror("initialising buffer"); 244 return NULL; 245 } 246 return __mte_allocate_memory_range(size, mem_type, mapping, 0, 0, tags, fd); 247 } 248 249 void *mte_allocate_file_memory_tag_range(size_t size, int mem_type, int mapping, 250 size_t range_before, size_t range_after, int fd) 251 { 252 int index; 253 char buffer[INIT_BUFFER_SIZE]; 254 int map_size = size + range_before + range_after; 255 256 if (mem_type != USE_MPROTECT && mem_type != USE_MMAP) { 257 ksft_print_msg("FAIL: Invalid mmap file request\n"); 258 return NULL; 259 } 260 /* Initialize the file for mappable size */ 261 lseek(fd, 0, SEEK_SET); 262 for (index = INIT_BUFFER_SIZE; index < map_size; index += INIT_BUFFER_SIZE) 263 if (write(fd, buffer, INIT_BUFFER_SIZE) != INIT_BUFFER_SIZE) { 264 ksft_perror("initialising buffer"); 265 return NULL; 266 } 267 index -= INIT_BUFFER_SIZE; 268 if (write(fd, buffer, map_size - index) != map_size - index) { 269 ksft_perror("initialising buffer"); 270 return NULL; 271 } 272 return __mte_allocate_memory_range(size, mem_type, mapping, range_before, 273 range_after, true, fd); 274 } 275 276 static void __mte_free_memory_range(void *ptr, size_t size, int mem_type, 277 size_t range_before, size_t range_after, bool tags) 278 { 279 switch (mem_type) { 280 case USE_MALLOC: 281 free(ptr - range_before); 282 break; 283 case USE_MMAP: 284 case USE_MPROTECT: 285 if (tags) 286 mte_clear_tags(ptr, size); 287 munmap(ptr - range_before, size + range_before + range_after); 288 break; 289 default: 290 ksft_print_msg("FAIL: Invalid free request\n"); 291 break; 292 } 293 } 294 295 void mte_free_memory_tag_range(void *ptr, size_t size, int mem_type, 296 size_t range_before, size_t range_after) 297 { 298 __mte_free_memory_range(ptr, size, mem_type, range_before, range_after, true); 299 } 300 301 void mte_free_memory(void *ptr, size_t size, int mem_type, bool tags) 302 { 303 __mte_free_memory_range(ptr, size, mem_type, 0, 0, tags); 304 } 305 306 void mte_initialize_current_context(int mode, uintptr_t ptr, ssize_t range) 307 { 308 cur_mte_cxt.fault_valid = false; 309 cur_mte_cxt.trig_addr = ptr; 310 cur_mte_cxt.trig_range = range; 311 if (mode == MTE_SYNC_ERR) 312 cur_mte_cxt.trig_si_code = SEGV_MTESERR; 313 else if (mode == MTE_ASYNC_ERR) 314 cur_mte_cxt.trig_si_code = SEGV_MTEAERR; 315 else 316 cur_mte_cxt.trig_si_code = 0; 317 } 318 319 int mte_switch_mode(int mte_option, unsigned long incl_mask, bool stonly) 320 { 321 unsigned long en = 0; 322 323 switch (mte_option) { 324 case MTE_NONE_ERR: 325 case MTE_SYNC_ERR: 326 case MTE_ASYNC_ERR: 327 break; 328 default: 329 ksft_print_msg("FAIL: Invalid MTE option %x\n", mte_option); 330 return -EINVAL; 331 } 332 333 if (incl_mask & ~MT_INCLUDE_TAG_MASK) { 334 ksft_print_msg("FAIL: Invalid incl_mask %lx\n", incl_mask); 335 return -EINVAL; 336 } 337 338 en = PR_TAGGED_ADDR_ENABLE; 339 switch (mte_option) { 340 case MTE_SYNC_ERR: 341 en |= PR_MTE_TCF_SYNC; 342 break; 343 case MTE_ASYNC_ERR: 344 en |= PR_MTE_TCF_ASYNC; 345 break; 346 case MTE_NONE_ERR: 347 en |= PR_MTE_TCF_NONE; 348 break; 349 } 350 351 if (mtestonly_support && stonly) 352 en |= PR_MTE_STORE_ONLY; 353 354 en |= (incl_mask << PR_MTE_TAG_SHIFT); 355 /* Enable address tagging ABI, mte error reporting mode and tag inclusion mask. */ 356 if (prctl(PR_SET_TAGGED_ADDR_CTRL, en, 0, 0, 0) != 0) { 357 ksft_print_msg("FAIL:prctl PR_SET_TAGGED_ADDR_CTRL for mte mode\n"); 358 return -EINVAL; 359 } 360 return 0; 361 } 362 363 int mte_default_setup(void) 364 { 365 unsigned long hwcaps2 = getauxval(AT_HWCAP2); 366 unsigned long hwcaps3 = getauxval(AT_HWCAP3); 367 unsigned long en = 0; 368 int ret; 369 370 /* To generate random address tag */ 371 srandom(time(NULL)); 372 373 if (!(hwcaps2 & HWCAP2_MTE)) 374 ksft_exit_skip("MTE features unavailable\n"); 375 376 mtefar_support = !!(hwcaps3 & HWCAP3_MTE_FAR); 377 378 if (hwcaps3 & HWCAP3_MTE_STORE_ONLY) 379 mtestonly_support = true; 380 381 /* Get current mte mode */ 382 ret = prctl(PR_GET_TAGGED_ADDR_CTRL, en, 0, 0, 0); 383 if (ret < 0) { 384 ksft_print_msg("FAIL:prctl PR_GET_TAGGED_ADDR_CTRL with error =%d\n", ret); 385 return KSFT_FAIL; 386 } 387 if (ret & PR_MTE_TCF_SYNC) 388 mte_cur_mode = MTE_SYNC_ERR; 389 else if (ret & PR_MTE_TCF_ASYNC) 390 mte_cur_mode = MTE_ASYNC_ERR; 391 else if (ret & PR_MTE_TCF_NONE) 392 mte_cur_mode = MTE_NONE_ERR; 393 394 mte_cur_stonly = (ret & PR_MTE_STORE_ONLY) ? true : false; 395 396 mte_cur_pstate_tco = mte_get_pstate_tco(); 397 /* Disable PSTATE.TCO */ 398 mte_disable_pstate_tco(); 399 return 0; 400 } 401 402 void mte_restore_setup(void) 403 { 404 mte_switch_mode(mte_cur_mode, MTE_ALLOW_NON_ZERO_TAG, mte_cur_stonly); 405 if (mte_cur_pstate_tco == MT_PSTATE_TCO_EN) 406 mte_enable_pstate_tco(); 407 else if (mte_cur_pstate_tco == MT_PSTATE_TCO_DIS) 408 mte_disable_pstate_tco(); 409 } 410 411 int create_temp_file(void) 412 { 413 int fd; 414 char filename[] = "/dev/shm/tmp_XXXXXX"; 415 416 /* Create a file in the tmpfs filesystem */ 417 fd = mkstemp(&filename[0]); 418 if (fd == -1) { 419 ksft_perror(filename); 420 ksft_print_msg("FAIL: Unable to open temporary file\n"); 421 return 0; 422 } 423 unlink(&filename[0]); 424 return fd; 425 } 426