1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015-2021 ARM Limited. 4 * Original author: Dave Martin <Dave.Martin@arm.com> 5 */ 6 #include <errno.h> 7 #include <stdbool.h> 8 #include <stddef.h> 9 #include <stdio.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <unistd.h> 13 #include <sys/auxv.h> 14 #include <sys/prctl.h> 15 #include <sys/ptrace.h> 16 #include <sys/types.h> 17 #include <sys/uio.h> 18 #include <sys/wait.h> 19 #include <asm/sigcontext.h> 20 #include <asm/ptrace.h> 21 22 #include "../../kselftest.h" 23 24 /* <linux/elf.h> and <sys/auxv.h> don't like each other, so: */ 25 #ifndef NT_ARM_SVE 26 #define NT_ARM_SVE 0x405 27 #endif 28 29 #ifndef NT_ARM_SSVE 30 #define NT_ARM_SSVE 0x40b 31 #endif 32 33 /* 34 * The architecture defines the maximum VQ as 16 but for extensibility 35 * the kernel specifies the SVE_VQ_MAX as 512 resulting in us running 36 * a *lot* more tests than are useful if we use it. Until the 37 * architecture is extended let's limit our coverage to what is 38 * currently allowed, plus one extra to ensure we cover constraining 39 * the VL as expected. 40 */ 41 #define TEST_VQ_MAX 17 42 43 struct vec_type { 44 const char *name; 45 unsigned long hwcap_type; 46 unsigned long hwcap; 47 int regset; 48 int prctl_set; 49 }; 50 51 static const struct vec_type vec_types[] = { 52 { 53 .name = "SVE", 54 .hwcap_type = AT_HWCAP, 55 .hwcap = HWCAP_SVE, 56 .regset = NT_ARM_SVE, 57 .prctl_set = PR_SVE_SET_VL, 58 }, 59 { 60 .name = "Streaming SVE", 61 .hwcap_type = AT_HWCAP2, 62 .hwcap = HWCAP2_SME, 63 .regset = NT_ARM_SSVE, 64 .prctl_set = PR_SME_SET_VL, 65 }, 66 }; 67 68 #define VL_TESTS (((TEST_VQ_MAX - SVE_VQ_MIN) + 1) * 4) 69 #define FLAG_TESTS 2 70 #define FPSIMD_TESTS 2 71 72 #define EXPECTED_TESTS ((VL_TESTS + FLAG_TESTS + FPSIMD_TESTS) * ARRAY_SIZE(vec_types)) 73 74 static void fill_buf(char *buf, size_t size) 75 { 76 int i; 77 78 for (i = 0; i < size; i++) 79 buf[i] = random(); 80 } 81 82 static int do_child(void) 83 { 84 if (ptrace(PTRACE_TRACEME, -1, NULL, NULL)) 85 ksft_exit_fail_msg("ptrace(PTRACE_TRACEME) failed: %s (%d)\n", 86 strerror(errno), errno); 87 88 if (raise(SIGSTOP)) 89 ksft_exit_fail_msg("raise(SIGSTOP) failed: %s (%d)\n", 90 strerror(errno), errno); 91 92 return EXIT_SUCCESS; 93 } 94 95 static int get_fpsimd(pid_t pid, struct user_fpsimd_state *fpsimd) 96 { 97 struct iovec iov; 98 99 iov.iov_base = fpsimd; 100 iov.iov_len = sizeof(*fpsimd); 101 return ptrace(PTRACE_GETREGSET, pid, NT_PRFPREG, &iov); 102 } 103 104 static int set_fpsimd(pid_t pid, struct user_fpsimd_state *fpsimd) 105 { 106 struct iovec iov; 107 108 iov.iov_base = fpsimd; 109 iov.iov_len = sizeof(*fpsimd); 110 return ptrace(PTRACE_SETREGSET, pid, NT_PRFPREG, &iov); 111 } 112 113 static struct user_sve_header *get_sve(pid_t pid, const struct vec_type *type, 114 void **buf, size_t *size) 115 { 116 struct user_sve_header *sve; 117 void *p; 118 size_t sz = sizeof *sve; 119 struct iovec iov; 120 121 while (1) { 122 if (*size < sz) { 123 p = realloc(*buf, sz); 124 if (!p) { 125 errno = ENOMEM; 126 goto error; 127 } 128 129 *buf = p; 130 *size = sz; 131 } 132 133 iov.iov_base = *buf; 134 iov.iov_len = sz; 135 if (ptrace(PTRACE_GETREGSET, pid, type->regset, &iov)) 136 goto error; 137 138 sve = *buf; 139 if (sve->size <= sz) 140 break; 141 142 sz = sve->size; 143 } 144 145 return sve; 146 147 error: 148 return NULL; 149 } 150 151 static int set_sve(pid_t pid, const struct vec_type *type, 152 const struct user_sve_header *sve) 153 { 154 struct iovec iov; 155 156 iov.iov_base = (void *)sve; 157 iov.iov_len = sve->size; 158 return ptrace(PTRACE_SETREGSET, pid, type->regset, &iov); 159 } 160 161 /* Validate setting and getting the inherit flag */ 162 static void ptrace_set_get_inherit(pid_t child, const struct vec_type *type) 163 { 164 struct user_sve_header sve; 165 struct user_sve_header *new_sve = NULL; 166 size_t new_sve_size = 0; 167 int ret; 168 169 /* First set the flag */ 170 memset(&sve, 0, sizeof(sve)); 171 sve.size = sizeof(sve); 172 sve.vl = sve_vl_from_vq(SVE_VQ_MIN); 173 sve.flags = SVE_PT_VL_INHERIT; 174 ret = set_sve(child, type, &sve); 175 if (ret != 0) { 176 ksft_test_result_fail("Failed to set %s SVE_PT_VL_INHERIT\n", 177 type->name); 178 return; 179 } 180 181 /* 182 * Read back the new register state and verify that we have 183 * set the flags we expected. 184 */ 185 if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) { 186 ksft_test_result_fail("Failed to read %s SVE flags\n", 187 type->name); 188 return; 189 } 190 191 ksft_test_result(new_sve->flags & SVE_PT_VL_INHERIT, 192 "%s SVE_PT_VL_INHERIT set\n", type->name); 193 194 /* Now clear */ 195 sve.flags &= ~SVE_PT_VL_INHERIT; 196 ret = set_sve(child, type, &sve); 197 if (ret != 0) { 198 ksft_test_result_fail("Failed to clear %s SVE_PT_VL_INHERIT\n", 199 type->name); 200 return; 201 } 202 203 if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) { 204 ksft_test_result_fail("Failed to read %s SVE flags\n", 205 type->name); 206 return; 207 } 208 209 ksft_test_result(!(new_sve->flags & SVE_PT_VL_INHERIT), 210 "%s SVE_PT_VL_INHERIT cleared\n", type->name); 211 212 free(new_sve); 213 } 214 215 /* Validate attempting to set the specfied VL via ptrace */ 216 static void ptrace_set_get_vl(pid_t child, const struct vec_type *type, 217 unsigned int vl, bool *supported) 218 { 219 struct user_sve_header sve; 220 struct user_sve_header *new_sve = NULL; 221 size_t new_sve_size = 0; 222 int ret, prctl_vl; 223 224 *supported = false; 225 226 /* Check if the VL is supported in this process */ 227 prctl_vl = prctl(type->prctl_set, vl); 228 if (prctl_vl == -1) 229 ksft_exit_fail_msg("prctl(PR_%s_SET_VL) failed: %s (%d)\n", 230 type->name, strerror(errno), errno); 231 232 /* If the VL is not supported then a supported VL will be returned */ 233 *supported = (prctl_vl == vl); 234 235 /* Set the VL by doing a set with no register payload */ 236 memset(&sve, 0, sizeof(sve)); 237 sve.size = sizeof(sve); 238 sve.vl = vl; 239 ret = set_sve(child, type, &sve); 240 if (ret != 0) { 241 ksft_test_result_fail("Failed to set %s VL %u\n", 242 type->name, vl); 243 return; 244 } 245 246 /* 247 * Read back the new register state and verify that we have the 248 * same VL that we got from prctl() on ourselves. 249 */ 250 if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) { 251 ksft_test_result_fail("Failed to read %s VL %u\n", 252 type->name, vl); 253 return; 254 } 255 256 ksft_test_result(new_sve->vl = prctl_vl, "Set %s VL %u\n", 257 type->name, vl); 258 259 free(new_sve); 260 } 261 262 static void check_u32(unsigned int vl, const char *reg, 263 uint32_t *in, uint32_t *out, int *errors) 264 { 265 if (*in != *out) { 266 printf("# VL %d %s wrote %x read %x\n", 267 vl, reg, *in, *out); 268 (*errors)++; 269 } 270 } 271 272 /* Access the FPSIMD registers via the SVE regset */ 273 static void ptrace_sve_fpsimd(pid_t child, const struct vec_type *type) 274 { 275 void *svebuf; 276 struct user_sve_header *sve; 277 struct user_fpsimd_state *fpsimd, new_fpsimd; 278 unsigned int i, j; 279 unsigned char *p; 280 int ret; 281 282 svebuf = malloc(SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD)); 283 if (!svebuf) { 284 ksft_test_result_fail("Failed to allocate FPSIMD buffer\n"); 285 return; 286 } 287 288 memset(svebuf, 0, SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD)); 289 sve = svebuf; 290 sve->flags = SVE_PT_REGS_FPSIMD; 291 sve->size = SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD); 292 sve->vl = 16; /* We don't care what the VL is */ 293 294 /* Try to set a known FPSIMD state via PT_REGS_SVE */ 295 fpsimd = (struct user_fpsimd_state *)((char *)sve + 296 SVE_PT_FPSIMD_OFFSET); 297 for (i = 0; i < 32; ++i) { 298 p = (unsigned char *)&fpsimd->vregs[i]; 299 300 for (j = 0; j < sizeof(fpsimd->vregs[i]); ++j) 301 p[j] = j; 302 } 303 304 ret = set_sve(child, type, sve); 305 ksft_test_result(ret == 0, "%s FPSIMD set via SVE: %d\n", 306 type->name, ret); 307 if (ret) 308 goto out; 309 310 /* Verify via the FPSIMD regset */ 311 if (get_fpsimd(child, &new_fpsimd)) { 312 ksft_test_result_fail("get_fpsimd(): %s\n", 313 strerror(errno)); 314 goto out; 315 } 316 if (memcmp(fpsimd, &new_fpsimd, sizeof(*fpsimd)) == 0) 317 ksft_test_result_pass("%s get_fpsimd() gave same state\n", 318 type->name); 319 else 320 ksft_test_result_fail("%s get_fpsimd() gave different state\n", 321 type->name); 322 323 out: 324 free(svebuf); 325 } 326 327 /* Validate attempting to set SVE data and read SVE data */ 328 static void ptrace_set_sve_get_sve_data(pid_t child, 329 const struct vec_type *type, 330 unsigned int vl) 331 { 332 void *write_buf; 333 void *read_buf = NULL; 334 struct user_sve_header *write_sve; 335 struct user_sve_header *read_sve; 336 size_t read_sve_size = 0; 337 unsigned int vq = sve_vq_from_vl(vl); 338 int ret, i; 339 size_t data_size; 340 int errors = 0; 341 342 data_size = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE); 343 write_buf = malloc(data_size); 344 if (!write_buf) { 345 ksft_test_result_fail("Error allocating %ld byte buffer for %s VL %u\n", 346 data_size, type->name, vl); 347 return; 348 } 349 write_sve = write_buf; 350 351 /* Set up some data and write it out */ 352 memset(write_sve, 0, data_size); 353 write_sve->size = data_size; 354 write_sve->vl = vl; 355 write_sve->flags = SVE_PT_REGS_SVE; 356 357 for (i = 0; i < __SVE_NUM_ZREGS; i++) 358 fill_buf(write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i), 359 SVE_PT_SVE_ZREG_SIZE(vq)); 360 361 for (i = 0; i < __SVE_NUM_PREGS; i++) 362 fill_buf(write_buf + SVE_PT_SVE_PREG_OFFSET(vq, i), 363 SVE_PT_SVE_PREG_SIZE(vq)); 364 365 fill_buf(write_buf + SVE_PT_SVE_FPSR_OFFSET(vq), SVE_PT_SVE_FPSR_SIZE); 366 fill_buf(write_buf + SVE_PT_SVE_FPCR_OFFSET(vq), SVE_PT_SVE_FPCR_SIZE); 367 368 /* TODO: Generate a valid FFR pattern */ 369 370 ret = set_sve(child, type, write_sve); 371 if (ret != 0) { 372 ksft_test_result_fail("Failed to set %s VL %u data\n", 373 type->name, vl); 374 goto out; 375 } 376 377 /* Read the data back */ 378 if (!get_sve(child, type, (void **)&read_buf, &read_sve_size)) { 379 ksft_test_result_fail("Failed to read %s VL %u data\n", 380 type->name, vl); 381 goto out; 382 } 383 read_sve = read_buf; 384 385 /* We might read more data if there's extensions we don't know */ 386 if (read_sve->size < write_sve->size) { 387 ksft_test_result_fail("%s wrote %d bytes, only read %d\n", 388 type->name, write_sve->size, 389 read_sve->size); 390 goto out_read; 391 } 392 393 for (i = 0; i < __SVE_NUM_ZREGS; i++) { 394 if (memcmp(write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i), 395 read_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i), 396 SVE_PT_SVE_ZREG_SIZE(vq)) != 0) { 397 printf("# Mismatch in %u Z%d\n", vl, i); 398 errors++; 399 } 400 } 401 402 for (i = 0; i < __SVE_NUM_PREGS; i++) { 403 if (memcmp(write_buf + SVE_PT_SVE_PREG_OFFSET(vq, i), 404 read_buf + SVE_PT_SVE_PREG_OFFSET(vq, i), 405 SVE_PT_SVE_PREG_SIZE(vq)) != 0) { 406 printf("# Mismatch in %u P%d\n", vl, i); 407 errors++; 408 } 409 } 410 411 check_u32(vl, "FPSR", write_buf + SVE_PT_SVE_FPSR_OFFSET(vq), 412 read_buf + SVE_PT_SVE_FPSR_OFFSET(vq), &errors); 413 check_u32(vl, "FPCR", write_buf + SVE_PT_SVE_FPCR_OFFSET(vq), 414 read_buf + SVE_PT_SVE_FPCR_OFFSET(vq), &errors); 415 416 ksft_test_result(errors == 0, "Set and get %s data for VL %u\n", 417 type->name, vl); 418 419 out_read: 420 free(read_buf); 421 out: 422 free(write_buf); 423 } 424 425 /* Validate attempting to set SVE data and read it via the FPSIMD regset */ 426 static void ptrace_set_sve_get_fpsimd_data(pid_t child, 427 const struct vec_type *type, 428 unsigned int vl) 429 { 430 void *write_buf; 431 struct user_sve_header *write_sve; 432 unsigned int vq = sve_vq_from_vl(vl); 433 struct user_fpsimd_state fpsimd_state; 434 int ret, i; 435 size_t data_size; 436 int errors = 0; 437 438 if (__BYTE_ORDER == __BIG_ENDIAN) { 439 ksft_test_result_skip("Big endian not supported\n"); 440 return; 441 } 442 443 data_size = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE); 444 write_buf = malloc(data_size); 445 if (!write_buf) { 446 ksft_test_result_fail("Error allocating %ld byte buffer for %s VL %u\n", 447 data_size, type->name, vl); 448 return; 449 } 450 write_sve = write_buf; 451 452 /* Set up some data and write it out */ 453 memset(write_sve, 0, data_size); 454 write_sve->size = data_size; 455 write_sve->vl = vl; 456 write_sve->flags = SVE_PT_REGS_SVE; 457 458 for (i = 0; i < __SVE_NUM_ZREGS; i++) 459 fill_buf(write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i), 460 SVE_PT_SVE_ZREG_SIZE(vq)); 461 462 fill_buf(write_buf + SVE_PT_SVE_FPSR_OFFSET(vq), SVE_PT_SVE_FPSR_SIZE); 463 fill_buf(write_buf + SVE_PT_SVE_FPCR_OFFSET(vq), SVE_PT_SVE_FPCR_SIZE); 464 465 ret = set_sve(child, type, write_sve); 466 if (ret != 0) { 467 ksft_test_result_fail("Failed to set %s VL %u data\n", 468 type->name, vl); 469 goto out; 470 } 471 472 /* Read the data back */ 473 if (get_fpsimd(child, &fpsimd_state)) { 474 ksft_test_result_fail("Failed to read %s VL %u FPSIMD data\n", 475 type->name, vl); 476 goto out; 477 } 478 479 for (i = 0; i < __SVE_NUM_ZREGS; i++) { 480 __uint128_t tmp = 0; 481 482 /* 483 * Z regs are stored endianness invariant, this won't 484 * work for big endian 485 */ 486 memcpy(&tmp, write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i), 487 sizeof(tmp)); 488 489 if (tmp != fpsimd_state.vregs[i]) { 490 printf("# Mismatch in FPSIMD for %s VL %u Z%d\n", 491 type->name, vl, i); 492 errors++; 493 } 494 } 495 496 check_u32(vl, "FPSR", write_buf + SVE_PT_SVE_FPSR_OFFSET(vq), 497 &fpsimd_state.fpsr, &errors); 498 check_u32(vl, "FPCR", write_buf + SVE_PT_SVE_FPCR_OFFSET(vq), 499 &fpsimd_state.fpcr, &errors); 500 501 ksft_test_result(errors == 0, "Set and get FPSIMD data for %s VL %u\n", 502 type->name, vl); 503 504 out: 505 free(write_buf); 506 } 507 508 /* Validate attempting to set FPSIMD data and read it via the SVE regset */ 509 static void ptrace_set_fpsimd_get_sve_data(pid_t child, 510 const struct vec_type *type, 511 unsigned int vl) 512 { 513 void *read_buf = NULL; 514 unsigned char *p; 515 struct user_sve_header *read_sve; 516 unsigned int vq = sve_vq_from_vl(vl); 517 struct user_fpsimd_state write_fpsimd; 518 int ret, i, j; 519 size_t read_sve_size = 0; 520 size_t expected_size; 521 int errors = 0; 522 523 if (__BYTE_ORDER == __BIG_ENDIAN) { 524 ksft_test_result_skip("Big endian not supported\n"); 525 return; 526 } 527 528 for (i = 0; i < 32; ++i) { 529 p = (unsigned char *)&write_fpsimd.vregs[i]; 530 531 for (j = 0; j < sizeof(write_fpsimd.vregs[i]); ++j) 532 p[j] = j; 533 } 534 535 ret = set_fpsimd(child, &write_fpsimd); 536 if (ret != 0) { 537 ksft_test_result_fail("Failed to set FPSIMD state: %d\n)", 538 ret); 539 return; 540 } 541 542 if (!get_sve(child, type, (void **)&read_buf, &read_sve_size)) { 543 ksft_test_result_fail("Failed to read %s VL %u data\n", 544 type->name, vl); 545 return; 546 } 547 read_sve = read_buf; 548 549 if (read_sve->vl != vl) { 550 ksft_test_result_fail("Child VL != expected VL: %u != %u\n", 551 read_sve->vl, vl); 552 goto out; 553 } 554 555 /* The kernel may return either SVE or FPSIMD format */ 556 switch (read_sve->flags & SVE_PT_REGS_MASK) { 557 case SVE_PT_REGS_FPSIMD: 558 expected_size = SVE_PT_FPSIMD_SIZE(vq, SVE_PT_REGS_FPSIMD); 559 if (read_sve_size < expected_size) { 560 ksft_test_result_fail("Read %ld bytes, expected %ld\n", 561 read_sve_size, expected_size); 562 goto out; 563 } 564 565 ret = memcmp(&write_fpsimd, read_buf + SVE_PT_FPSIMD_OFFSET, 566 sizeof(write_fpsimd)); 567 if (ret != 0) { 568 ksft_print_msg("Read FPSIMD data mismatch\n"); 569 errors++; 570 } 571 break; 572 573 case SVE_PT_REGS_SVE: 574 expected_size = SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE); 575 if (read_sve_size < expected_size) { 576 ksft_test_result_fail("Read %ld bytes, expected %ld\n", 577 read_sve_size, expected_size); 578 goto out; 579 } 580 581 for (i = 0; i < __SVE_NUM_ZREGS; i++) { 582 __uint128_t tmp = 0; 583 584 /* 585 * Z regs are stored endianness invariant, this won't 586 * work for big endian 587 */ 588 memcpy(&tmp, read_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i), 589 sizeof(tmp)); 590 591 if (tmp != write_fpsimd.vregs[i]) { 592 ksft_print_msg("Mismatch in FPSIMD for %s VL %u Z%d/V%d\n", 593 type->name, vl, i, i); 594 errors++; 595 } 596 } 597 598 check_u32(vl, "FPSR", &write_fpsimd.fpsr, 599 read_buf + SVE_PT_SVE_FPSR_OFFSET(vq), &errors); 600 check_u32(vl, "FPCR", &write_fpsimd.fpcr, 601 read_buf + SVE_PT_SVE_FPCR_OFFSET(vq), &errors); 602 break; 603 default: 604 ksft_print_msg("Unexpected regs type %d\n", 605 read_sve->flags & SVE_PT_REGS_MASK); 606 errors++; 607 break; 608 } 609 610 ksft_test_result(errors == 0, "Set FPSIMD, read via SVE for %s VL %u\n", 611 type->name, vl); 612 613 out: 614 free(read_buf); 615 } 616 617 static int do_parent(pid_t child) 618 { 619 int ret = EXIT_FAILURE; 620 pid_t pid; 621 int status, i; 622 siginfo_t si; 623 unsigned int vq, vl; 624 bool vl_supported; 625 626 ksft_print_msg("Parent is %d, child is %d\n", getpid(), child); 627 628 /* Attach to the child */ 629 while (1) { 630 int sig; 631 632 pid = wait(&status); 633 if (pid == -1) { 634 perror("wait"); 635 goto error; 636 } 637 638 /* 639 * This should never happen but it's hard to flag in 640 * the framework. 641 */ 642 if (pid != child) 643 continue; 644 645 if (WIFEXITED(status) || WIFSIGNALED(status)) 646 ksft_exit_fail_msg("Child died unexpectedly\n"); 647 648 if (!WIFSTOPPED(status)) 649 goto error; 650 651 sig = WSTOPSIG(status); 652 653 if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &si)) { 654 if (errno == ESRCH) 655 goto disappeared; 656 657 if (errno == EINVAL) { 658 sig = 0; /* bust group-stop */ 659 goto cont; 660 } 661 662 ksft_test_result_fail("PTRACE_GETSIGINFO: %s\n", 663 strerror(errno)); 664 goto error; 665 } 666 667 if (sig == SIGSTOP && si.si_code == SI_TKILL && 668 si.si_pid == pid) 669 break; 670 671 cont: 672 if (ptrace(PTRACE_CONT, pid, NULL, sig)) { 673 if (errno == ESRCH) 674 goto disappeared; 675 676 ksft_test_result_fail("PTRACE_CONT: %s\n", 677 strerror(errno)); 678 goto error; 679 } 680 } 681 682 for (i = 0; i < ARRAY_SIZE(vec_types); i++) { 683 /* FPSIMD via SVE regset */ 684 if (getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap) { 685 ptrace_sve_fpsimd(child, &vec_types[i]); 686 } else { 687 ksft_test_result_skip("%s FPSIMD set via SVE\n", 688 vec_types[i].name); 689 ksft_test_result_skip("%s FPSIMD read\n", 690 vec_types[i].name); 691 } 692 693 /* prctl() flags */ 694 if (getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap) { 695 ptrace_set_get_inherit(child, &vec_types[i]); 696 } else { 697 ksft_test_result_skip("%s SVE_PT_VL_INHERIT set\n", 698 vec_types[i].name); 699 ksft_test_result_skip("%s SVE_PT_VL_INHERIT cleared\n", 700 vec_types[i].name); 701 } 702 703 /* Step through every possible VQ */ 704 for (vq = SVE_VQ_MIN; vq <= TEST_VQ_MAX; vq++) { 705 vl = sve_vl_from_vq(vq); 706 707 /* First, try to set this vector length */ 708 if (getauxval(vec_types[i].hwcap_type) & 709 vec_types[i].hwcap) { 710 ptrace_set_get_vl(child, &vec_types[i], vl, 711 &vl_supported); 712 } else { 713 ksft_test_result_skip("%s get/set VL %d\n", 714 vec_types[i].name, vl); 715 vl_supported = false; 716 } 717 718 /* If the VL is supported validate data set/get */ 719 if (vl_supported) { 720 ptrace_set_sve_get_sve_data(child, &vec_types[i], vl); 721 ptrace_set_sve_get_fpsimd_data(child, &vec_types[i], vl); 722 ptrace_set_fpsimd_get_sve_data(child, &vec_types[i], vl); 723 } else { 724 ksft_test_result_skip("%s set SVE get SVE for VL %d\n", 725 vec_types[i].name, vl); 726 ksft_test_result_skip("%s set SVE get FPSIMD for VL %d\n", 727 vec_types[i].name, vl); 728 ksft_test_result_skip("%s set FPSIMD get SVE for VL %d\n", 729 vec_types[i].name, vl); 730 } 731 } 732 } 733 734 ret = EXIT_SUCCESS; 735 736 error: 737 kill(child, SIGKILL); 738 739 disappeared: 740 return ret; 741 } 742 743 int main(void) 744 { 745 int ret = EXIT_SUCCESS; 746 pid_t child; 747 748 srandom(getpid()); 749 750 ksft_print_header(); 751 ksft_set_plan(EXPECTED_TESTS); 752 753 if (!(getauxval(AT_HWCAP) & HWCAP_SVE)) 754 ksft_exit_skip("SVE not available\n"); 755 756 child = fork(); 757 if (!child) 758 return do_child(); 759 760 if (do_parent(child)) 761 ret = EXIT_FAILURE; 762 763 ksft_print_cnts(); 764 765 return ret; 766 } 767