1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2023 ARM Limited. 4 * Original author: Mark Brown <broonie@kernel.org> 5 */ 6 7 #define _GNU_SOURCE 8 9 #include <errno.h> 10 #include <stdbool.h> 11 #include <stddef.h> 12 #include <stdio.h> 13 #include <stdlib.h> 14 #include <string.h> 15 #include <unistd.h> 16 17 #include <sys/auxv.h> 18 #include <sys/prctl.h> 19 #include <sys/ptrace.h> 20 #include <sys/types.h> 21 #include <sys/uio.h> 22 #include <sys/wait.h> 23 24 #include <linux/kernel.h> 25 26 #include <asm/sigcontext.h> 27 #include <asm/sve_context.h> 28 #include <asm/ptrace.h> 29 30 #include "../../kselftest.h" 31 32 #include "fp-ptrace.h" 33 34 #include <linux/bits.h> 35 36 #define FPMR_LSCALE2_MASK GENMASK(37, 32) 37 #define FPMR_NSCALE_MASK GENMASK(31, 24) 38 #define FPMR_LSCALE_MASK GENMASK(22, 16) 39 #define FPMR_OSC_MASK GENMASK(15, 15) 40 #define FPMR_OSM_MASK GENMASK(14, 14) 41 42 /* <linux/elf.h> and <sys/auxv.h> don't like each other, so: */ 43 #ifndef NT_ARM_SVE 44 #define NT_ARM_SVE 0x405 45 #endif 46 47 #ifndef NT_ARM_SSVE 48 #define NT_ARM_SSVE 0x40b 49 #endif 50 51 #ifndef NT_ARM_ZA 52 #define NT_ARM_ZA 0x40c 53 #endif 54 55 #ifndef NT_ARM_ZT 56 #define NT_ARM_ZT 0x40d 57 #endif 58 59 #ifndef NT_ARM_FPMR 60 #define NT_ARM_FPMR 0x40e 61 #endif 62 63 #define ARCH_VQ_MAX 256 64 65 /* VL 128..2048 in powers of 2 */ 66 #define MAX_NUM_VLS 5 67 68 /* 69 * FPMR bits we can set without doing feature checks to see if values 70 * are valid. 71 */ 72 #define FPMR_SAFE_BITS (FPMR_LSCALE2_MASK | FPMR_NSCALE_MASK | \ 73 FPMR_LSCALE_MASK | FPMR_OSC_MASK | FPMR_OSM_MASK) 74 75 #define NUM_FPR 32 76 __uint128_t v_in[NUM_FPR]; 77 __uint128_t v_expected[NUM_FPR]; 78 __uint128_t v_out[NUM_FPR]; 79 80 char z_in[__SVE_ZREGS_SIZE(ARCH_VQ_MAX)]; 81 char z_expected[__SVE_ZREGS_SIZE(ARCH_VQ_MAX)]; 82 char z_out[__SVE_ZREGS_SIZE(ARCH_VQ_MAX)]; 83 84 char p_in[__SVE_PREGS_SIZE(ARCH_VQ_MAX)]; 85 char p_expected[__SVE_PREGS_SIZE(ARCH_VQ_MAX)]; 86 char p_out[__SVE_PREGS_SIZE(ARCH_VQ_MAX)]; 87 88 char ffr_in[__SVE_PREG_SIZE(ARCH_VQ_MAX)]; 89 char ffr_expected[__SVE_PREG_SIZE(ARCH_VQ_MAX)]; 90 char ffr_out[__SVE_PREG_SIZE(ARCH_VQ_MAX)]; 91 92 char za_in[ZA_SIG_REGS_SIZE(ARCH_VQ_MAX)]; 93 char za_expected[ZA_SIG_REGS_SIZE(ARCH_VQ_MAX)]; 94 char za_out[ZA_SIG_REGS_SIZE(ARCH_VQ_MAX)]; 95 96 char zt_in[ZT_SIG_REG_BYTES]; 97 char zt_expected[ZT_SIG_REG_BYTES]; 98 char zt_out[ZT_SIG_REG_BYTES]; 99 100 uint64_t fpmr_in, fpmr_expected, fpmr_out; 101 102 uint64_t sve_vl_out; 103 uint64_t sme_vl_out; 104 uint64_t svcr_in, svcr_expected, svcr_out; 105 106 void load_and_save(int flags); 107 108 static bool got_alarm; 109 110 static void handle_alarm(int sig, siginfo_t *info, void *context) 111 { 112 got_alarm = true; 113 } 114 115 #ifdef CONFIG_CPU_BIG_ENDIAN 116 static __uint128_t arm64_cpu_to_le128(__uint128_t x) 117 { 118 u64 a = swab64(x); 119 u64 b = swab64(x >> 64); 120 121 return ((__uint128_t)a << 64) | b; 122 } 123 #else 124 static __uint128_t arm64_cpu_to_le128(__uint128_t x) 125 { 126 return x; 127 } 128 #endif 129 130 #define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x) 131 132 static bool sve_supported(void) 133 { 134 return getauxval(AT_HWCAP) & HWCAP_SVE; 135 } 136 137 static bool sme_supported(void) 138 { 139 return getauxval(AT_HWCAP2) & HWCAP2_SME; 140 } 141 142 static bool sme2_supported(void) 143 { 144 return getauxval(AT_HWCAP2) & HWCAP2_SME2; 145 } 146 147 static bool fa64_supported(void) 148 { 149 return getauxval(AT_HWCAP2) & HWCAP2_SME_FA64; 150 } 151 152 static bool fpmr_supported(void) 153 { 154 return getauxval(AT_HWCAP2) & HWCAP2_FPMR; 155 } 156 157 static bool compare_buffer(const char *name, void *out, 158 void *expected, size_t size) 159 { 160 void *tmp; 161 162 if (memcmp(out, expected, size) == 0) 163 return true; 164 165 ksft_print_msg("Mismatch in %s\n", name); 166 167 /* Did we just get zeros back? */ 168 tmp = malloc(size); 169 if (!tmp) { 170 ksft_print_msg("OOM allocating %lu bytes for %s\n", 171 size, name); 172 ksft_exit_fail(); 173 } 174 memset(tmp, 0, size); 175 176 if (memcmp(out, tmp, size) == 0) 177 ksft_print_msg("%s is zero\n", name); 178 179 free(tmp); 180 181 return false; 182 } 183 184 struct test_config { 185 int sve_vl_in; 186 int sve_vl_expected; 187 int sme_vl_in; 188 int sme_vl_expected; 189 int svcr_in; 190 int svcr_expected; 191 }; 192 193 struct test_definition { 194 const char *name; 195 bool sve_vl_change; 196 bool (*supported)(struct test_config *config); 197 void (*set_expected_values)(struct test_config *config); 198 void (*modify_values)(pid_t child, struct test_config *test_config); 199 }; 200 201 static int vl_in(struct test_config *config) 202 { 203 int vl; 204 205 if (config->svcr_in & SVCR_SM) 206 vl = config->sme_vl_in; 207 else 208 vl = config->sve_vl_in; 209 210 return vl; 211 } 212 213 static int vl_expected(struct test_config *config) 214 { 215 int vl; 216 217 if (config->svcr_expected & SVCR_SM) 218 vl = config->sme_vl_expected; 219 else 220 vl = config->sve_vl_expected; 221 222 return vl; 223 } 224 225 static void run_child(struct test_config *config) 226 { 227 int ret, flags; 228 229 /* Let the parent attach to us */ 230 ret = ptrace(PTRACE_TRACEME, 0, 0, 0); 231 if (ret < 0) 232 ksft_exit_fail_msg("PTRACE_TRACEME failed: %s (%d)\n", 233 strerror(errno), errno); 234 235 /* VL setup */ 236 if (sve_supported()) { 237 ret = prctl(PR_SVE_SET_VL, config->sve_vl_in); 238 if (ret != config->sve_vl_in) { 239 ksft_print_msg("Failed to set SVE VL %d: %d\n", 240 config->sve_vl_in, ret); 241 } 242 } 243 244 if (sme_supported()) { 245 ret = prctl(PR_SME_SET_VL, config->sme_vl_in); 246 if (ret != config->sme_vl_in) { 247 ksft_print_msg("Failed to set SME VL %d: %d\n", 248 config->sme_vl_in, ret); 249 } 250 } 251 252 /* Load values and wait for the parent */ 253 flags = 0; 254 if (sve_supported()) 255 flags |= HAVE_SVE; 256 if (sme_supported()) 257 flags |= HAVE_SME; 258 if (sme2_supported()) 259 flags |= HAVE_SME2; 260 if (fa64_supported()) 261 flags |= HAVE_FA64; 262 if (fpmr_supported()) 263 flags |= HAVE_FPMR; 264 265 load_and_save(flags); 266 267 exit(0); 268 } 269 270 static void read_one_child_regs(pid_t child, char *name, 271 struct iovec *iov_parent, 272 struct iovec *iov_child) 273 { 274 int len = iov_parent->iov_len; 275 int ret; 276 277 ret = process_vm_readv(child, iov_parent, 1, iov_child, 1, 0); 278 if (ret == -1) 279 ksft_print_msg("%s read failed: %s (%d)\n", 280 name, strerror(errno), errno); 281 else if (ret != len) 282 ksft_print_msg("Short read of %s: %d\n", name, ret); 283 } 284 285 static void read_child_regs(pid_t child) 286 { 287 struct iovec iov_parent, iov_child; 288 289 /* 290 * Since the child fork()ed from us the buffer addresses are 291 * the same in parent and child. 292 */ 293 iov_parent.iov_base = &v_out; 294 iov_parent.iov_len = sizeof(v_out); 295 iov_child.iov_base = &v_out; 296 iov_child.iov_len = sizeof(v_out); 297 read_one_child_regs(child, "FPSIMD", &iov_parent, &iov_child); 298 299 if (sve_supported() || sme_supported()) { 300 iov_parent.iov_base = &sve_vl_out; 301 iov_parent.iov_len = sizeof(sve_vl_out); 302 iov_child.iov_base = &sve_vl_out; 303 iov_child.iov_len = sizeof(sve_vl_out); 304 read_one_child_regs(child, "SVE VL", &iov_parent, &iov_child); 305 306 iov_parent.iov_base = &z_out; 307 iov_parent.iov_len = sizeof(z_out); 308 iov_child.iov_base = &z_out; 309 iov_child.iov_len = sizeof(z_out); 310 read_one_child_regs(child, "Z", &iov_parent, &iov_child); 311 312 iov_parent.iov_base = &p_out; 313 iov_parent.iov_len = sizeof(p_out); 314 iov_child.iov_base = &p_out; 315 iov_child.iov_len = sizeof(p_out); 316 read_one_child_regs(child, "P", &iov_parent, &iov_child); 317 318 iov_parent.iov_base = &ffr_out; 319 iov_parent.iov_len = sizeof(ffr_out); 320 iov_child.iov_base = &ffr_out; 321 iov_child.iov_len = sizeof(ffr_out); 322 read_one_child_regs(child, "FFR", &iov_parent, &iov_child); 323 } 324 325 if (sme_supported()) { 326 iov_parent.iov_base = &sme_vl_out; 327 iov_parent.iov_len = sizeof(sme_vl_out); 328 iov_child.iov_base = &sme_vl_out; 329 iov_child.iov_len = sizeof(sme_vl_out); 330 read_one_child_regs(child, "SME VL", &iov_parent, &iov_child); 331 332 iov_parent.iov_base = &svcr_out; 333 iov_parent.iov_len = sizeof(svcr_out); 334 iov_child.iov_base = &svcr_out; 335 iov_child.iov_len = sizeof(svcr_out); 336 read_one_child_regs(child, "SVCR", &iov_parent, &iov_child); 337 338 iov_parent.iov_base = &za_out; 339 iov_parent.iov_len = sizeof(za_out); 340 iov_child.iov_base = &za_out; 341 iov_child.iov_len = sizeof(za_out); 342 read_one_child_regs(child, "ZA", &iov_parent, &iov_child); 343 } 344 345 if (sme2_supported()) { 346 iov_parent.iov_base = &zt_out; 347 iov_parent.iov_len = sizeof(zt_out); 348 iov_child.iov_base = &zt_out; 349 iov_child.iov_len = sizeof(zt_out); 350 read_one_child_regs(child, "ZT", &iov_parent, &iov_child); 351 } 352 353 if (fpmr_supported()) { 354 iov_parent.iov_base = &fpmr_out; 355 iov_parent.iov_len = sizeof(fpmr_out); 356 iov_child.iov_base = &fpmr_out; 357 iov_child.iov_len = sizeof(fpmr_out); 358 read_one_child_regs(child, "FPMR", &iov_parent, &iov_child); 359 } 360 } 361 362 static bool continue_breakpoint(pid_t child, 363 enum __ptrace_request restart_type) 364 { 365 struct user_pt_regs pt_regs; 366 struct iovec iov; 367 int ret; 368 369 /* Get PC */ 370 iov.iov_base = &pt_regs; 371 iov.iov_len = sizeof(pt_regs); 372 ret = ptrace(PTRACE_GETREGSET, child, NT_PRSTATUS, &iov); 373 if (ret < 0) { 374 ksft_print_msg("Failed to get PC: %s (%d)\n", 375 strerror(errno), errno); 376 return false; 377 } 378 379 /* Skip over the BRK */ 380 pt_regs.pc += 4; 381 ret = ptrace(PTRACE_SETREGSET, child, NT_PRSTATUS, &iov); 382 if (ret < 0) { 383 ksft_print_msg("Failed to skip BRK: %s (%d)\n", 384 strerror(errno), errno); 385 return false; 386 } 387 388 /* Restart */ 389 ret = ptrace(restart_type, child, 0, 0); 390 if (ret < 0) { 391 ksft_print_msg("Failed to restart child: %s (%d)\n", 392 strerror(errno), errno); 393 return false; 394 } 395 396 return true; 397 } 398 399 static bool check_ptrace_values_sve(pid_t child, struct test_config *config) 400 { 401 struct user_sve_header *sve; 402 struct user_fpsimd_state *fpsimd; 403 struct iovec iov; 404 int ret, vq; 405 bool pass = true; 406 407 if (!sve_supported()) 408 return true; 409 410 vq = __sve_vq_from_vl(config->sve_vl_in); 411 412 iov.iov_len = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE); 413 iov.iov_base = malloc(iov.iov_len); 414 if (!iov.iov_base) { 415 ksft_print_msg("OOM allocating %lu byte SVE buffer\n", 416 iov.iov_len); 417 return false; 418 } 419 420 ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_SVE, &iov); 421 if (ret != 0) { 422 ksft_print_msg("Failed to read initial SVE: %s (%d)\n", 423 strerror(errno), errno); 424 pass = false; 425 goto out; 426 } 427 428 sve = iov.iov_base; 429 430 if (sve->vl != config->sve_vl_in) { 431 ksft_print_msg("Mismatch in initial SVE VL: %d != %d\n", 432 sve->vl, config->sve_vl_in); 433 pass = false; 434 } 435 436 /* If we are in streaming mode we should just read FPSIMD */ 437 if ((config->svcr_in & SVCR_SM) && (sve->flags & SVE_PT_REGS_SVE)) { 438 ksft_print_msg("NT_ARM_SVE reports SVE with PSTATE.SM\n"); 439 pass = false; 440 } 441 442 if (svcr_in & SVCR_SM) { 443 if (sve->size != sizeof(sve)) { 444 ksft_print_msg("NT_ARM_SVE reports data with PSTATE.SM\n"); 445 pass = false; 446 } 447 } else { 448 if (sve->size != SVE_PT_SIZE(vq, sve->flags)) { 449 ksft_print_msg("Mismatch in SVE header size: %d != %lu\n", 450 sve->size, SVE_PT_SIZE(vq, sve->flags)); 451 pass = false; 452 } 453 } 454 455 /* The registers might be in completely different formats! */ 456 if (sve->flags & SVE_PT_REGS_SVE) { 457 if (!compare_buffer("initial SVE Z", 458 iov.iov_base + SVE_PT_SVE_ZREG_OFFSET(vq, 0), 459 z_in, SVE_PT_SVE_ZREGS_SIZE(vq))) 460 pass = false; 461 462 if (!compare_buffer("initial SVE P", 463 iov.iov_base + SVE_PT_SVE_PREG_OFFSET(vq, 0), 464 p_in, SVE_PT_SVE_PREGS_SIZE(vq))) 465 pass = false; 466 467 if (!compare_buffer("initial SVE FFR", 468 iov.iov_base + SVE_PT_SVE_FFR_OFFSET(vq), 469 ffr_in, SVE_PT_SVE_PREG_SIZE(vq))) 470 pass = false; 471 } else { 472 fpsimd = iov.iov_base + SVE_PT_FPSIMD_OFFSET; 473 if (!compare_buffer("initial V via SVE", &fpsimd->vregs[0], 474 v_in, sizeof(v_in))) 475 pass = false; 476 } 477 478 out: 479 free(iov.iov_base); 480 return pass; 481 } 482 483 static bool check_ptrace_values_ssve(pid_t child, struct test_config *config) 484 { 485 struct user_sve_header *sve; 486 struct user_fpsimd_state *fpsimd; 487 struct iovec iov; 488 int ret, vq; 489 bool pass = true; 490 491 if (!sme_supported()) 492 return true; 493 494 vq = __sve_vq_from_vl(config->sme_vl_in); 495 496 iov.iov_len = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE); 497 iov.iov_base = malloc(iov.iov_len); 498 if (!iov.iov_base) { 499 ksft_print_msg("OOM allocating %lu byte SSVE buffer\n", 500 iov.iov_len); 501 return false; 502 } 503 504 ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_SSVE, &iov); 505 if (ret != 0) { 506 ksft_print_msg("Failed to read initial SSVE: %s (%d)\n", 507 strerror(errno), errno); 508 pass = false; 509 goto out; 510 } 511 512 sve = iov.iov_base; 513 514 if (sve->vl != config->sme_vl_in) { 515 ksft_print_msg("Mismatch in initial SSVE VL: %d != %d\n", 516 sve->vl, config->sme_vl_in); 517 pass = false; 518 } 519 520 if ((config->svcr_in & SVCR_SM) && !(sve->flags & SVE_PT_REGS_SVE)) { 521 ksft_print_msg("NT_ARM_SSVE reports FPSIMD with PSTATE.SM\n"); 522 pass = false; 523 } 524 525 if (!(svcr_in & SVCR_SM)) { 526 if (sve->size != sizeof(sve)) { 527 ksft_print_msg("NT_ARM_SSVE reports data without PSTATE.SM\n"); 528 pass = false; 529 } 530 } else { 531 if (sve->size != SVE_PT_SIZE(vq, sve->flags)) { 532 ksft_print_msg("Mismatch in SSVE header size: %d != %lu\n", 533 sve->size, SVE_PT_SIZE(vq, sve->flags)); 534 pass = false; 535 } 536 } 537 538 /* The registers might be in completely different formats! */ 539 if (sve->flags & SVE_PT_REGS_SVE) { 540 if (!compare_buffer("initial SSVE Z", 541 iov.iov_base + SVE_PT_SVE_ZREG_OFFSET(vq, 0), 542 z_in, SVE_PT_SVE_ZREGS_SIZE(vq))) 543 pass = false; 544 545 if (!compare_buffer("initial SSVE P", 546 iov.iov_base + SVE_PT_SVE_PREG_OFFSET(vq, 0), 547 p_in, SVE_PT_SVE_PREGS_SIZE(vq))) 548 pass = false; 549 550 if (!compare_buffer("initial SSVE FFR", 551 iov.iov_base + SVE_PT_SVE_FFR_OFFSET(vq), 552 ffr_in, SVE_PT_SVE_PREG_SIZE(vq))) 553 pass = false; 554 } else { 555 fpsimd = iov.iov_base + SVE_PT_FPSIMD_OFFSET; 556 if (!compare_buffer("initial V via SSVE", 557 &fpsimd->vregs[0], v_in, sizeof(v_in))) 558 pass = false; 559 } 560 561 out: 562 free(iov.iov_base); 563 return pass; 564 } 565 566 static bool check_ptrace_values_za(pid_t child, struct test_config *config) 567 { 568 struct user_za_header *za; 569 struct iovec iov; 570 int ret, vq; 571 bool pass = true; 572 573 if (!sme_supported()) 574 return true; 575 576 vq = __sve_vq_from_vl(config->sme_vl_in); 577 578 iov.iov_len = ZA_SIG_CONTEXT_SIZE(vq); 579 iov.iov_base = malloc(iov.iov_len); 580 if (!iov.iov_base) { 581 ksft_print_msg("OOM allocating %lu byte ZA buffer\n", 582 iov.iov_len); 583 return false; 584 } 585 586 ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_ZA, &iov); 587 if (ret != 0) { 588 ksft_print_msg("Failed to read initial ZA: %s (%d)\n", 589 strerror(errno), errno); 590 pass = false; 591 goto out; 592 } 593 594 za = iov.iov_base; 595 596 if (za->vl != config->sme_vl_in) { 597 ksft_print_msg("Mismatch in initial SME VL: %d != %d\n", 598 za->vl, config->sme_vl_in); 599 pass = false; 600 } 601 602 /* If PSTATE.ZA is not set we should just read the header */ 603 if (config->svcr_in & SVCR_ZA) { 604 if (za->size != ZA_PT_SIZE(vq)) { 605 ksft_print_msg("Unexpected ZA ptrace read size: %d != %lu\n", 606 za->size, ZA_PT_SIZE(vq)); 607 pass = false; 608 } 609 610 if (!compare_buffer("initial ZA", 611 iov.iov_base + ZA_PT_ZA_OFFSET, 612 za_in, ZA_PT_ZA_SIZE(vq))) 613 pass = false; 614 } else { 615 if (za->size != sizeof(*za)) { 616 ksft_print_msg("Unexpected ZA ptrace read size: %d != %lu\n", 617 za->size, sizeof(*za)); 618 pass = false; 619 } 620 } 621 622 out: 623 free(iov.iov_base); 624 return pass; 625 } 626 627 static bool check_ptrace_values_zt(pid_t child, struct test_config *config) 628 { 629 uint8_t buf[512]; 630 struct iovec iov; 631 int ret; 632 633 if (!sme2_supported()) 634 return true; 635 636 iov.iov_base = &buf; 637 iov.iov_len = ZT_SIG_REG_BYTES; 638 ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_ZT, &iov); 639 if (ret != 0) { 640 ksft_print_msg("Failed to read initial ZT: %s (%d)\n", 641 strerror(errno), errno); 642 return false; 643 } 644 645 return compare_buffer("initial ZT", buf, zt_in, ZT_SIG_REG_BYTES); 646 } 647 648 static bool check_ptrace_values_fpmr(pid_t child, struct test_config *config) 649 { 650 uint64_t val; 651 struct iovec iov; 652 int ret; 653 654 if (!fpmr_supported()) 655 return true; 656 657 iov.iov_base = &val; 658 iov.iov_len = sizeof(val); 659 ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_FPMR, &iov); 660 if (ret != 0) { 661 ksft_print_msg("Failed to read initial FPMR: %s (%d)\n", 662 strerror(errno), errno); 663 return false; 664 } 665 666 return compare_buffer("initial FPMR", &val, &fpmr_in, sizeof(val)); 667 } 668 669 static bool check_ptrace_values(pid_t child, struct test_config *config) 670 { 671 bool pass = true; 672 struct user_fpsimd_state fpsimd; 673 struct iovec iov; 674 int ret; 675 676 iov.iov_base = &fpsimd; 677 iov.iov_len = sizeof(fpsimd); 678 ret = ptrace(PTRACE_GETREGSET, child, NT_PRFPREG, &iov); 679 if (ret == 0) { 680 if (!compare_buffer("initial V", &fpsimd.vregs, v_in, 681 sizeof(v_in))) { 682 pass = false; 683 } 684 } else { 685 ksft_print_msg("Failed to read initial V: %s (%d)\n", 686 strerror(errno), errno); 687 pass = false; 688 } 689 690 if (!check_ptrace_values_sve(child, config)) 691 pass = false; 692 693 if (!check_ptrace_values_ssve(child, config)) 694 pass = false; 695 696 if (!check_ptrace_values_za(child, config)) 697 pass = false; 698 699 if (!check_ptrace_values_zt(child, config)) 700 pass = false; 701 702 if (!check_ptrace_values_fpmr(child, config)) 703 pass = false; 704 705 return pass; 706 } 707 708 static bool run_parent(pid_t child, struct test_definition *test, 709 struct test_config *config) 710 { 711 int wait_status, ret; 712 pid_t pid; 713 bool pass; 714 715 /* Initial attach */ 716 while (1) { 717 pid = waitpid(child, &wait_status, 0); 718 if (pid < 0) { 719 if (errno == EINTR) 720 continue; 721 ksft_exit_fail_msg("waitpid() failed: %s (%d)\n", 722 strerror(errno), errno); 723 } 724 725 if (pid == child) 726 break; 727 } 728 729 if (WIFEXITED(wait_status)) { 730 ksft_print_msg("Child exited loading values with status %d\n", 731 WEXITSTATUS(wait_status)); 732 pass = false; 733 goto out; 734 } 735 736 if (WIFSIGNALED(wait_status)) { 737 ksft_print_msg("Child died from signal %d loading values\n", 738 WTERMSIG(wait_status)); 739 pass = false; 740 goto out; 741 } 742 743 /* Read initial values via ptrace */ 744 pass = check_ptrace_values(child, config); 745 746 /* Do whatever writes we want to do */ 747 if (test->modify_values) 748 test->modify_values(child, config); 749 750 if (!continue_breakpoint(child, PTRACE_CONT)) 751 goto cleanup; 752 753 while (1) { 754 pid = waitpid(child, &wait_status, 0); 755 if (pid < 0) { 756 if (errno == EINTR) 757 continue; 758 ksft_exit_fail_msg("waitpid() failed: %s (%d)\n", 759 strerror(errno), errno); 760 } 761 762 if (pid == child) 763 break; 764 } 765 766 if (WIFEXITED(wait_status)) { 767 ksft_print_msg("Child exited saving values with status %d\n", 768 WEXITSTATUS(wait_status)); 769 pass = false; 770 goto out; 771 } 772 773 if (WIFSIGNALED(wait_status)) { 774 ksft_print_msg("Child died from signal %d saving values\n", 775 WTERMSIG(wait_status)); 776 pass = false; 777 goto out; 778 } 779 780 /* See what happened as a result */ 781 read_child_regs(child); 782 783 if (!continue_breakpoint(child, PTRACE_DETACH)) 784 goto cleanup; 785 786 /* The child should exit cleanly */ 787 got_alarm = false; 788 alarm(1); 789 while (1) { 790 if (got_alarm) { 791 ksft_print_msg("Wait for child timed out\n"); 792 goto cleanup; 793 } 794 795 pid = waitpid(child, &wait_status, 0); 796 if (pid < 0) { 797 if (errno == EINTR) 798 continue; 799 ksft_exit_fail_msg("waitpid() failed: %s (%d)\n", 800 strerror(errno), errno); 801 } 802 803 if (pid == child) 804 break; 805 } 806 alarm(0); 807 808 if (got_alarm) { 809 ksft_print_msg("Timed out waiting for child\n"); 810 pass = false; 811 goto cleanup; 812 } 813 814 if (pid == child && WIFSIGNALED(wait_status)) { 815 ksft_print_msg("Child died from signal %d cleaning up\n", 816 WTERMSIG(wait_status)); 817 pass = false; 818 goto out; 819 } 820 821 if (pid == child && WIFEXITED(wait_status)) { 822 if (WEXITSTATUS(wait_status) != 0) { 823 ksft_print_msg("Child exited with error %d\n", 824 WEXITSTATUS(wait_status)); 825 pass = false; 826 } 827 } else { 828 ksft_print_msg("Child did not exit cleanly\n"); 829 pass = false; 830 goto cleanup; 831 } 832 833 goto out; 834 835 cleanup: 836 ret = kill(child, SIGKILL); 837 if (ret != 0) { 838 ksft_print_msg("kill() failed: %s (%d)\n", 839 strerror(errno), errno); 840 return false; 841 } 842 843 while (1) { 844 pid = waitpid(child, &wait_status, 0); 845 if (pid < 0) { 846 if (errno == EINTR) 847 continue; 848 ksft_exit_fail_msg("waitpid() failed: %s (%d)\n", 849 strerror(errno), errno); 850 } 851 852 if (pid == child) 853 break; 854 } 855 856 out: 857 return pass; 858 } 859 860 static void fill_random(void *buf, size_t size) 861 { 862 int i; 863 uint32_t *lbuf = buf; 864 865 /* random() returns a 32 bit number regardless of the size of long */ 866 for (i = 0; i < size / sizeof(uint32_t); i++) 867 lbuf[i] = random(); 868 } 869 870 static void fill_random_ffr(void *buf, size_t vq) 871 { 872 uint8_t *lbuf = buf; 873 int bits, i; 874 875 /* 876 * Only values with a continuous set of 0..n bits set are 877 * valid for FFR, set all bits then clear a random number of 878 * high bits. 879 */ 880 memset(buf, 0, __SVE_FFR_SIZE(vq)); 881 882 bits = random() % (__SVE_FFR_SIZE(vq) * 8); 883 for (i = 0; i < bits / 8; i++) 884 lbuf[i] = 0xff; 885 if (bits / 8 != __SVE_FFR_SIZE(vq)) 886 lbuf[i] = (1 << (bits % 8)) - 1; 887 } 888 889 static void fpsimd_to_sve(__uint128_t *v, char *z, int vl) 890 { 891 int vq = __sve_vq_from_vl(vl); 892 int i; 893 __uint128_t *p; 894 895 if (!vl) 896 return; 897 898 for (i = 0; i < __SVE_NUM_ZREGS; i++) { 899 p = (__uint128_t *)&z[__SVE_ZREG_OFFSET(vq, i)]; 900 *p = arm64_cpu_to_le128(v[i]); 901 } 902 } 903 904 static void set_initial_values(struct test_config *config) 905 { 906 int vq = __sve_vq_from_vl(vl_in(config)); 907 int sme_vq = __sve_vq_from_vl(config->sme_vl_in); 908 909 svcr_in = config->svcr_in; 910 svcr_expected = config->svcr_expected; 911 svcr_out = 0; 912 913 fill_random(&v_in, sizeof(v_in)); 914 memcpy(v_expected, v_in, sizeof(v_in)); 915 memset(v_out, 0, sizeof(v_out)); 916 917 /* Changes will be handled in the test case */ 918 if (sve_supported() || (config->svcr_in & SVCR_SM)) { 919 /* The low 128 bits of Z are shared with the V registers */ 920 fill_random(&z_in, __SVE_ZREGS_SIZE(vq)); 921 fpsimd_to_sve(v_in, z_in, vl_in(config)); 922 memcpy(z_expected, z_in, __SVE_ZREGS_SIZE(vq)); 923 memset(z_out, 0, sizeof(z_out)); 924 925 fill_random(&p_in, __SVE_PREGS_SIZE(vq)); 926 memcpy(p_expected, p_in, __SVE_PREGS_SIZE(vq)); 927 memset(p_out, 0, sizeof(p_out)); 928 929 if ((config->svcr_in & SVCR_SM) && !fa64_supported()) 930 memset(ffr_in, 0, __SVE_PREG_SIZE(vq)); 931 else 932 fill_random_ffr(&ffr_in, vq); 933 memcpy(ffr_expected, ffr_in, __SVE_PREG_SIZE(vq)); 934 memset(ffr_out, 0, __SVE_PREG_SIZE(vq)); 935 } 936 937 if (config->svcr_in & SVCR_ZA) 938 fill_random(za_in, ZA_SIG_REGS_SIZE(sme_vq)); 939 else 940 memset(za_in, 0, ZA_SIG_REGS_SIZE(sme_vq)); 941 if (config->svcr_expected & SVCR_ZA) 942 memcpy(za_expected, za_in, ZA_SIG_REGS_SIZE(sme_vq)); 943 else 944 memset(za_expected, 0, ZA_SIG_REGS_SIZE(sme_vq)); 945 if (sme_supported()) 946 memset(za_out, 0, sizeof(za_out)); 947 948 if (sme2_supported()) { 949 if (config->svcr_in & SVCR_ZA) 950 fill_random(zt_in, ZT_SIG_REG_BYTES); 951 else 952 memset(zt_in, 0, ZT_SIG_REG_BYTES); 953 if (config->svcr_expected & SVCR_ZA) 954 memcpy(zt_expected, zt_in, ZT_SIG_REG_BYTES); 955 else 956 memset(zt_expected, 0, ZT_SIG_REG_BYTES); 957 memset(zt_out, 0, sizeof(zt_out)); 958 } 959 960 if (fpmr_supported()) { 961 fill_random(&fpmr_in, sizeof(fpmr_in)); 962 fpmr_in &= FPMR_SAFE_BITS; 963 fpmr_expected = fpmr_in; 964 } else { 965 fpmr_in = 0; 966 fpmr_expected = 0; 967 fpmr_out = 0; 968 } 969 } 970 971 static bool check_memory_values(struct test_config *config) 972 { 973 bool pass = true; 974 int vq, sme_vq; 975 976 if (!compare_buffer("saved V", v_out, v_expected, sizeof(v_out))) 977 pass = false; 978 979 vq = __sve_vq_from_vl(vl_expected(config)); 980 sme_vq = __sve_vq_from_vl(config->sme_vl_expected); 981 982 if (svcr_out != svcr_expected) { 983 ksft_print_msg("Mismatch in saved SVCR %lx != %lx\n", 984 svcr_out, svcr_expected); 985 pass = false; 986 } 987 988 if (sve_vl_out != config->sve_vl_expected) { 989 ksft_print_msg("Mismatch in SVE VL: %ld != %d\n", 990 sve_vl_out, config->sve_vl_expected); 991 pass = false; 992 } 993 994 if (sme_vl_out != config->sme_vl_expected) { 995 ksft_print_msg("Mismatch in SME VL: %ld != %d\n", 996 sme_vl_out, config->sme_vl_expected); 997 pass = false; 998 } 999 1000 if (!compare_buffer("saved Z", z_out, z_expected, 1001 __SVE_ZREGS_SIZE(vq))) 1002 pass = false; 1003 1004 if (!compare_buffer("saved P", p_out, p_expected, 1005 __SVE_PREGS_SIZE(vq))) 1006 pass = false; 1007 1008 if (!compare_buffer("saved FFR", ffr_out, ffr_expected, 1009 __SVE_PREG_SIZE(vq))) 1010 pass = false; 1011 1012 if (!compare_buffer("saved ZA", za_out, za_expected, 1013 ZA_PT_ZA_SIZE(sme_vq))) 1014 pass = false; 1015 1016 if (!compare_buffer("saved ZT", zt_out, zt_expected, ZT_SIG_REG_BYTES)) 1017 pass = false; 1018 1019 if (fpmr_out != fpmr_expected) { 1020 ksft_print_msg("Mismatch in saved FPMR: %lx != %lx\n", 1021 fpmr_out, fpmr_expected); 1022 pass = false; 1023 } 1024 1025 return pass; 1026 } 1027 1028 static bool sve_sme_same(struct test_config *config) 1029 { 1030 if (config->sve_vl_in != config->sve_vl_expected) 1031 return false; 1032 1033 if (config->sme_vl_in != config->sme_vl_expected) 1034 return false; 1035 1036 if (config->svcr_in != config->svcr_expected) 1037 return false; 1038 1039 return true; 1040 } 1041 1042 static bool sve_write_supported(struct test_config *config) 1043 { 1044 if (!sve_supported() && !sme_supported()) 1045 return false; 1046 1047 if ((config->svcr_in & SVCR_ZA) != (config->svcr_expected & SVCR_ZA)) 1048 return false; 1049 1050 if (config->svcr_expected & SVCR_SM) { 1051 if (config->sve_vl_in != config->sve_vl_expected) { 1052 return false; 1053 } 1054 1055 /* Changing the SME VL disables ZA */ 1056 if ((config->svcr_expected & SVCR_ZA) && 1057 (config->sme_vl_in != config->sme_vl_expected)) { 1058 return false; 1059 } 1060 } else { 1061 if (config->sme_vl_in != config->sme_vl_expected) { 1062 return false; 1063 } 1064 } 1065 1066 return true; 1067 } 1068 1069 static void fpsimd_write_expected(struct test_config *config) 1070 { 1071 int vl; 1072 1073 fill_random(&v_expected, sizeof(v_expected)); 1074 1075 /* The SVE registers are flushed by a FPSIMD write */ 1076 vl = vl_expected(config); 1077 1078 memset(z_expected, 0, __SVE_ZREGS_SIZE(__sve_vq_from_vl(vl))); 1079 memset(p_expected, 0, __SVE_PREGS_SIZE(__sve_vq_from_vl(vl))); 1080 memset(ffr_expected, 0, __SVE_PREG_SIZE(__sve_vq_from_vl(vl))); 1081 1082 fpsimd_to_sve(v_expected, z_expected, vl); 1083 } 1084 1085 static void fpsimd_write(pid_t child, struct test_config *test_config) 1086 { 1087 struct user_fpsimd_state fpsimd; 1088 struct iovec iov; 1089 int ret; 1090 1091 memset(&fpsimd, 0, sizeof(fpsimd)); 1092 memcpy(&fpsimd.vregs, v_expected, sizeof(v_expected)); 1093 1094 iov.iov_base = &fpsimd; 1095 iov.iov_len = sizeof(fpsimd); 1096 ret = ptrace(PTRACE_SETREGSET, child, NT_PRFPREG, &iov); 1097 if (ret == -1) 1098 ksft_print_msg("FPSIMD set failed: (%s) %d\n", 1099 strerror(errno), errno); 1100 } 1101 1102 static bool fpmr_write_supported(struct test_config *config) 1103 { 1104 if (!fpmr_supported()) 1105 return false; 1106 1107 if (!sve_sme_same(config)) 1108 return false; 1109 1110 return true; 1111 } 1112 1113 static void fpmr_write_expected(struct test_config *config) 1114 { 1115 fill_random(&fpmr_expected, sizeof(fpmr_expected)); 1116 fpmr_expected &= FPMR_SAFE_BITS; 1117 } 1118 1119 static void fpmr_write(pid_t child, struct test_config *config) 1120 { 1121 struct iovec iov; 1122 int ret; 1123 1124 iov.iov_len = sizeof(fpmr_expected); 1125 iov.iov_base = &fpmr_expected; 1126 ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_FPMR, &iov); 1127 if (ret != 0) 1128 ksft_print_msg("Failed to write FPMR: %s (%d)\n", 1129 strerror(errno), errno); 1130 } 1131 1132 static void sve_write_expected(struct test_config *config) 1133 { 1134 int vl = vl_expected(config); 1135 int sme_vq = __sve_vq_from_vl(config->sme_vl_expected); 1136 1137 fill_random(z_expected, __SVE_ZREGS_SIZE(__sve_vq_from_vl(vl))); 1138 fill_random(p_expected, __SVE_PREGS_SIZE(__sve_vq_from_vl(vl))); 1139 1140 if ((svcr_expected & SVCR_SM) && !fa64_supported()) 1141 memset(ffr_expected, 0, __SVE_PREG_SIZE(sme_vq)); 1142 else 1143 fill_random_ffr(ffr_expected, __sve_vq_from_vl(vl)); 1144 1145 /* Share the low bits of Z with V */ 1146 fill_random(&v_expected, sizeof(v_expected)); 1147 fpsimd_to_sve(v_expected, z_expected, vl); 1148 1149 if (config->sme_vl_in != config->sme_vl_expected) { 1150 memset(za_expected, 0, ZA_PT_ZA_SIZE(sme_vq)); 1151 memset(zt_expected, 0, sizeof(zt_expected)); 1152 } 1153 } 1154 1155 static void sve_write(pid_t child, struct test_config *config) 1156 { 1157 struct user_sve_header *sve; 1158 struct iovec iov; 1159 int ret, vl, vq, regset; 1160 1161 vl = vl_expected(config); 1162 vq = __sve_vq_from_vl(vl); 1163 1164 iov.iov_len = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE); 1165 iov.iov_base = malloc(iov.iov_len); 1166 if (!iov.iov_base) { 1167 ksft_print_msg("Failed allocating %lu byte SVE write buffer\n", 1168 iov.iov_len); 1169 return; 1170 } 1171 memset(iov.iov_base, 0, iov.iov_len); 1172 1173 sve = iov.iov_base; 1174 sve->size = iov.iov_len; 1175 sve->flags = SVE_PT_REGS_SVE; 1176 sve->vl = vl; 1177 1178 memcpy(iov.iov_base + SVE_PT_SVE_ZREG_OFFSET(vq, 0), 1179 z_expected, SVE_PT_SVE_ZREGS_SIZE(vq)); 1180 memcpy(iov.iov_base + SVE_PT_SVE_PREG_OFFSET(vq, 0), 1181 p_expected, SVE_PT_SVE_PREGS_SIZE(vq)); 1182 memcpy(iov.iov_base + SVE_PT_SVE_FFR_OFFSET(vq), 1183 ffr_expected, SVE_PT_SVE_PREG_SIZE(vq)); 1184 1185 if (svcr_expected & SVCR_SM) 1186 regset = NT_ARM_SSVE; 1187 else 1188 regset = NT_ARM_SVE; 1189 1190 ret = ptrace(PTRACE_SETREGSET, child, regset, &iov); 1191 if (ret != 0) 1192 ksft_print_msg("Failed to write SVE: %s (%d)\n", 1193 strerror(errno), errno); 1194 1195 free(iov.iov_base); 1196 } 1197 1198 static bool za_write_supported(struct test_config *config) 1199 { 1200 if ((config->svcr_in & SVCR_SM) != (config->svcr_expected & SVCR_SM)) 1201 return false; 1202 1203 return true; 1204 } 1205 1206 static void za_write_expected(struct test_config *config) 1207 { 1208 int sme_vq, sve_vq; 1209 1210 sme_vq = __sve_vq_from_vl(config->sme_vl_expected); 1211 1212 if (config->svcr_expected & SVCR_ZA) { 1213 fill_random(za_expected, ZA_PT_ZA_SIZE(sme_vq)); 1214 } else { 1215 memset(za_expected, 0, ZA_PT_ZA_SIZE(sme_vq)); 1216 memset(zt_expected, 0, sizeof(zt_expected)); 1217 } 1218 1219 /* Changing the SME VL flushes ZT, SVE state */ 1220 if (config->sme_vl_in != config->sme_vl_expected) { 1221 sve_vq = __sve_vq_from_vl(vl_expected(config)); 1222 memset(z_expected, 0, __SVE_ZREGS_SIZE(sve_vq)); 1223 memset(p_expected, 0, __SVE_PREGS_SIZE(sve_vq)); 1224 memset(ffr_expected, 0, __SVE_PREG_SIZE(sve_vq)); 1225 memset(zt_expected, 0, sizeof(zt_expected)); 1226 1227 fpsimd_to_sve(v_expected, z_expected, vl_expected(config)); 1228 } 1229 } 1230 1231 static void za_write(pid_t child, struct test_config *config) 1232 { 1233 struct user_za_header *za; 1234 struct iovec iov; 1235 int ret, vq; 1236 1237 vq = __sve_vq_from_vl(config->sme_vl_expected); 1238 1239 if (config->svcr_expected & SVCR_ZA) 1240 iov.iov_len = ZA_PT_SIZE(vq); 1241 else 1242 iov.iov_len = sizeof(*za); 1243 iov.iov_base = malloc(iov.iov_len); 1244 if (!iov.iov_base) { 1245 ksft_print_msg("Failed allocating %lu byte ZA write buffer\n", 1246 iov.iov_len); 1247 return; 1248 } 1249 memset(iov.iov_base, 0, iov.iov_len); 1250 1251 za = iov.iov_base; 1252 za->size = iov.iov_len; 1253 za->vl = config->sme_vl_expected; 1254 if (config->svcr_expected & SVCR_ZA) 1255 memcpy(iov.iov_base + ZA_PT_ZA_OFFSET, za_expected, 1256 ZA_PT_ZA_SIZE(vq)); 1257 1258 ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_ZA, &iov); 1259 if (ret != 0) 1260 ksft_print_msg("Failed to write ZA: %s (%d)\n", 1261 strerror(errno), errno); 1262 1263 free(iov.iov_base); 1264 } 1265 1266 static bool zt_write_supported(struct test_config *config) 1267 { 1268 if (!sme2_supported()) 1269 return false; 1270 if (config->sme_vl_in != config->sme_vl_expected) 1271 return false; 1272 if (!(config->svcr_expected & SVCR_ZA)) 1273 return false; 1274 if ((config->svcr_in & SVCR_SM) != (config->svcr_expected & SVCR_SM)) 1275 return false; 1276 1277 return true; 1278 } 1279 1280 static void zt_write_expected(struct test_config *config) 1281 { 1282 int sme_vq; 1283 1284 sme_vq = __sve_vq_from_vl(config->sme_vl_expected); 1285 1286 if (config->svcr_expected & SVCR_ZA) { 1287 fill_random(zt_expected, sizeof(zt_expected)); 1288 } else { 1289 memset(za_expected, 0, ZA_PT_ZA_SIZE(sme_vq)); 1290 memset(zt_expected, 0, sizeof(zt_expected)); 1291 } 1292 } 1293 1294 static void zt_write(pid_t child, struct test_config *config) 1295 { 1296 struct iovec iov; 1297 int ret; 1298 1299 iov.iov_len = ZT_SIG_REG_BYTES; 1300 iov.iov_base = zt_expected; 1301 ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_ZT, &iov); 1302 if (ret != 0) 1303 ksft_print_msg("Failed to write ZT: %s (%d)\n", 1304 strerror(errno), errno); 1305 } 1306 1307 /* Actually run a test */ 1308 static void run_test(struct test_definition *test, struct test_config *config) 1309 { 1310 pid_t child; 1311 char name[1024]; 1312 bool pass; 1313 1314 if (sve_supported() && sme_supported()) 1315 snprintf(name, sizeof(name), "%s, SVE %d->%d, SME %d/%x->%d/%x", 1316 test->name, 1317 config->sve_vl_in, config->sve_vl_expected, 1318 config->sme_vl_in, config->svcr_in, 1319 config->sme_vl_expected, config->svcr_expected); 1320 else if (sve_supported()) 1321 snprintf(name, sizeof(name), "%s, SVE %d->%d", test->name, 1322 config->sve_vl_in, config->sve_vl_expected); 1323 else if (sme_supported()) 1324 snprintf(name, sizeof(name), "%s, SME %d/%x->%d/%x", 1325 test->name, 1326 config->sme_vl_in, config->svcr_in, 1327 config->sme_vl_expected, config->svcr_expected); 1328 else 1329 snprintf(name, sizeof(name), "%s", test->name); 1330 1331 if (test->supported && !test->supported(config)) { 1332 ksft_test_result_skip("%s\n", name); 1333 return; 1334 } 1335 1336 set_initial_values(config); 1337 1338 if (test->set_expected_values) 1339 test->set_expected_values(config); 1340 1341 child = fork(); 1342 if (child < 0) 1343 ksft_exit_fail_msg("fork() failed: %s (%d)\n", 1344 strerror(errno), errno); 1345 /* run_child() never returns */ 1346 if (child == 0) 1347 run_child(config); 1348 1349 pass = run_parent(child, test, config); 1350 if (!check_memory_values(config)) 1351 pass = false; 1352 1353 ksft_test_result(pass, "%s\n", name); 1354 } 1355 1356 static void run_tests(struct test_definition defs[], int count, 1357 struct test_config *config) 1358 { 1359 int i; 1360 1361 for (i = 0; i < count; i++) 1362 run_test(&defs[i], config); 1363 } 1364 1365 static struct test_definition base_test_defs[] = { 1366 { 1367 .name = "No writes", 1368 .supported = sve_sme_same, 1369 }, 1370 { 1371 .name = "FPSIMD write", 1372 .supported = sve_sme_same, 1373 .set_expected_values = fpsimd_write_expected, 1374 .modify_values = fpsimd_write, 1375 }, 1376 { 1377 .name = "FPMR write", 1378 .supported = fpmr_write_supported, 1379 .set_expected_values = fpmr_write_expected, 1380 .modify_values = fpmr_write, 1381 }, 1382 }; 1383 1384 static struct test_definition sve_test_defs[] = { 1385 { 1386 .name = "SVE write", 1387 .supported = sve_write_supported, 1388 .set_expected_values = sve_write_expected, 1389 .modify_values = sve_write, 1390 }, 1391 }; 1392 1393 static struct test_definition za_test_defs[] = { 1394 { 1395 .name = "ZA write", 1396 .supported = za_write_supported, 1397 .set_expected_values = za_write_expected, 1398 .modify_values = za_write, 1399 }, 1400 }; 1401 1402 static struct test_definition zt_test_defs[] = { 1403 { 1404 .name = "ZT write", 1405 .supported = zt_write_supported, 1406 .set_expected_values = zt_write_expected, 1407 .modify_values = zt_write, 1408 }, 1409 }; 1410 1411 static int sve_vls[MAX_NUM_VLS], sme_vls[MAX_NUM_VLS]; 1412 static int sve_vl_count, sme_vl_count; 1413 1414 static void probe_vls(const char *name, int vls[], int *vl_count, int set_vl) 1415 { 1416 unsigned int vq; 1417 int vl; 1418 1419 *vl_count = 0; 1420 1421 for (vq = ARCH_VQ_MAX; vq > 0; vq /= 2) { 1422 vl = prctl(set_vl, vq * 16); 1423 if (vl == -1) 1424 ksft_exit_fail_msg("SET_VL failed: %s (%d)\n", 1425 strerror(errno), errno); 1426 1427 vl &= PR_SVE_VL_LEN_MASK; 1428 1429 if (*vl_count && (vl == vls[*vl_count - 1])) 1430 break; 1431 1432 vq = sve_vq_from_vl(vl); 1433 1434 vls[*vl_count] = vl; 1435 *vl_count += 1; 1436 } 1437 1438 if (*vl_count > 2) { 1439 /* Just use the minimum and maximum */ 1440 vls[1] = vls[*vl_count - 1]; 1441 ksft_print_msg("%d %s VLs, using %d and %d\n", 1442 *vl_count, name, vls[0], vls[1]); 1443 *vl_count = 2; 1444 } else { 1445 ksft_print_msg("%d %s VLs\n", *vl_count, name); 1446 } 1447 } 1448 1449 static struct { 1450 int svcr_in, svcr_expected; 1451 } svcr_combinations[] = { 1452 { .svcr_in = 0, .svcr_expected = 0, }, 1453 { .svcr_in = 0, .svcr_expected = SVCR_SM, }, 1454 { .svcr_in = 0, .svcr_expected = SVCR_ZA, }, 1455 /* Can't enable both SM and ZA with a single ptrace write */ 1456 1457 { .svcr_in = SVCR_SM, .svcr_expected = 0, }, 1458 { .svcr_in = SVCR_SM, .svcr_expected = SVCR_SM, }, 1459 { .svcr_in = SVCR_SM, .svcr_expected = SVCR_ZA, }, 1460 { .svcr_in = SVCR_SM, .svcr_expected = SVCR_SM | SVCR_ZA, }, 1461 1462 { .svcr_in = SVCR_ZA, .svcr_expected = 0, }, 1463 { .svcr_in = SVCR_ZA, .svcr_expected = SVCR_SM, }, 1464 { .svcr_in = SVCR_ZA, .svcr_expected = SVCR_ZA, }, 1465 { .svcr_in = SVCR_ZA, .svcr_expected = SVCR_SM | SVCR_ZA, }, 1466 1467 { .svcr_in = SVCR_SM | SVCR_ZA, .svcr_expected = 0, }, 1468 { .svcr_in = SVCR_SM | SVCR_ZA, .svcr_expected = SVCR_SM, }, 1469 { .svcr_in = SVCR_SM | SVCR_ZA, .svcr_expected = SVCR_ZA, }, 1470 { .svcr_in = SVCR_SM | SVCR_ZA, .svcr_expected = SVCR_SM | SVCR_ZA, }, 1471 }; 1472 1473 static void run_sve_tests(void) 1474 { 1475 struct test_config test_config; 1476 int i, j; 1477 1478 if (!sve_supported()) 1479 return; 1480 1481 test_config.sme_vl_in = sme_vls[0]; 1482 test_config.sme_vl_expected = sme_vls[0]; 1483 test_config.svcr_in = 0; 1484 test_config.svcr_expected = 0; 1485 1486 for (i = 0; i < sve_vl_count; i++) { 1487 test_config.sve_vl_in = sve_vls[i]; 1488 1489 for (j = 0; j < sve_vl_count; j++) { 1490 test_config.sve_vl_expected = sve_vls[j]; 1491 1492 run_tests(base_test_defs, 1493 ARRAY_SIZE(base_test_defs), 1494 &test_config); 1495 if (sve_supported()) 1496 run_tests(sve_test_defs, 1497 ARRAY_SIZE(sve_test_defs), 1498 &test_config); 1499 } 1500 } 1501 1502 } 1503 1504 static void run_sme_tests(void) 1505 { 1506 struct test_config test_config; 1507 int i, j, k; 1508 1509 if (!sme_supported()) 1510 return; 1511 1512 test_config.sve_vl_in = sve_vls[0]; 1513 test_config.sve_vl_expected = sve_vls[0]; 1514 1515 /* 1516 * Every SME VL/SVCR combination 1517 */ 1518 for (i = 0; i < sme_vl_count; i++) { 1519 test_config.sme_vl_in = sme_vls[i]; 1520 1521 for (j = 0; j < sme_vl_count; j++) { 1522 test_config.sme_vl_expected = sme_vls[j]; 1523 1524 for (k = 0; k < ARRAY_SIZE(svcr_combinations); k++) { 1525 test_config.svcr_in = svcr_combinations[k].svcr_in; 1526 test_config.svcr_expected = svcr_combinations[k].svcr_expected; 1527 1528 run_tests(base_test_defs, 1529 ARRAY_SIZE(base_test_defs), 1530 &test_config); 1531 run_tests(sve_test_defs, 1532 ARRAY_SIZE(sve_test_defs), 1533 &test_config); 1534 run_tests(za_test_defs, 1535 ARRAY_SIZE(za_test_defs), 1536 &test_config); 1537 1538 if (sme2_supported()) 1539 run_tests(zt_test_defs, 1540 ARRAY_SIZE(zt_test_defs), 1541 &test_config); 1542 } 1543 } 1544 } 1545 } 1546 1547 int main(void) 1548 { 1549 struct test_config test_config; 1550 struct sigaction sa; 1551 int tests, ret, tmp; 1552 1553 srandom(getpid()); 1554 1555 ksft_print_header(); 1556 1557 if (sve_supported()) { 1558 probe_vls("SVE", sve_vls, &sve_vl_count, PR_SVE_SET_VL); 1559 1560 tests = ARRAY_SIZE(base_test_defs) + 1561 ARRAY_SIZE(sve_test_defs); 1562 tests *= sve_vl_count * sve_vl_count; 1563 } else { 1564 /* Only run the FPSIMD tests */ 1565 sve_vl_count = 1; 1566 tests = ARRAY_SIZE(base_test_defs); 1567 } 1568 1569 if (sme_supported()) { 1570 probe_vls("SME", sme_vls, &sme_vl_count, PR_SME_SET_VL); 1571 1572 tmp = ARRAY_SIZE(base_test_defs) + ARRAY_SIZE(sve_test_defs) 1573 + ARRAY_SIZE(za_test_defs); 1574 1575 if (sme2_supported()) 1576 tmp += ARRAY_SIZE(zt_test_defs); 1577 1578 tmp *= sme_vl_count * sme_vl_count; 1579 tmp *= ARRAY_SIZE(svcr_combinations); 1580 tests += tmp; 1581 } else { 1582 sme_vl_count = 1; 1583 } 1584 1585 if (sme2_supported()) 1586 ksft_print_msg("SME2 supported\n"); 1587 1588 if (fa64_supported()) 1589 ksft_print_msg("FA64 supported\n"); 1590 1591 if (fpmr_supported()) 1592 ksft_print_msg("FPMR supported\n"); 1593 1594 ksft_set_plan(tests); 1595 1596 /* Get signal handers ready before we start any children */ 1597 memset(&sa, 0, sizeof(sa)); 1598 sa.sa_sigaction = handle_alarm; 1599 sa.sa_flags = SA_RESTART | SA_SIGINFO; 1600 sigemptyset(&sa.sa_mask); 1601 ret = sigaction(SIGALRM, &sa, NULL); 1602 if (ret < 0) 1603 ksft_print_msg("Failed to install SIGALRM handler: %s (%d)\n", 1604 strerror(errno), errno); 1605 1606 /* 1607 * Run the test set if there is no SVE or SME, with those we 1608 * have to pick a VL for each run. 1609 */ 1610 if (!sve_supported()) { 1611 test_config.sve_vl_in = 0; 1612 test_config.sve_vl_expected = 0; 1613 test_config.sme_vl_in = 0; 1614 test_config.sme_vl_expected = 0; 1615 test_config.svcr_in = 0; 1616 test_config.svcr_expected = 0; 1617 1618 run_tests(base_test_defs, ARRAY_SIZE(base_test_defs), 1619 &test_config); 1620 } 1621 1622 run_sve_tests(); 1623 run_sme_tests(); 1624 1625 ksft_finished(); 1626 } 1627