1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015-2021 ARM Limited. 4 * Original author: Dave Martin <Dave.Martin@arm.com> 5 */ 6 #include <errno.h> 7 #include <stdbool.h> 8 #include <stddef.h> 9 #include <stdio.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <unistd.h> 13 #include <sys/auxv.h> 14 #include <sys/prctl.h> 15 #include <sys/ptrace.h> 16 #include <sys/types.h> 17 #include <sys/uio.h> 18 #include <sys/wait.h> 19 #include <asm/sigcontext.h> 20 #include <asm/ptrace.h> 21 22 #include "../../kselftest.h" 23 24 /* <linux/elf.h> and <sys/auxv.h> don't like each other, so: */ 25 #ifndef NT_ARM_SVE 26 #define NT_ARM_SVE 0x405 27 #endif 28 29 #ifndef NT_ARM_SSVE 30 #define NT_ARM_SSVE 0x40b 31 #endif 32 33 /* 34 * The architecture defines the maximum VQ as 16 but for extensibility 35 * the kernel specifies the SVE_VQ_MAX as 512 resulting in us running 36 * a *lot* more tests than are useful if we use it. Until the 37 * architecture is extended let's limit our coverage to what is 38 * currently allowed, plus one extra to ensure we cover constraining 39 * the VL as expected. 40 */ 41 #define TEST_VQ_MAX 17 42 43 struct vec_type { 44 const char *name; 45 unsigned long hwcap_type; 46 unsigned long hwcap; 47 int regset; 48 int prctl_set; 49 }; 50 51 static const struct vec_type vec_types[] = { 52 { 53 .name = "SVE", 54 .hwcap_type = AT_HWCAP, 55 .hwcap = HWCAP_SVE, 56 .regset = NT_ARM_SVE, 57 .prctl_set = PR_SVE_SET_VL, 58 }, 59 { 60 .name = "Streaming SVE", 61 .hwcap_type = AT_HWCAP2, 62 .hwcap = HWCAP2_SME, 63 .regset = NT_ARM_SSVE, 64 .prctl_set = PR_SME_SET_VL, 65 }, 66 }; 67 68 #define VL_TESTS (((TEST_VQ_MAX - SVE_VQ_MIN) + 1) * 4) 69 #define FLAG_TESTS 2 70 #define FPSIMD_TESTS 2 71 72 #define EXPECTED_TESTS ((VL_TESTS + FLAG_TESTS + FPSIMD_TESTS) * ARRAY_SIZE(vec_types)) 73 74 static void fill_buf(char *buf, size_t size) 75 { 76 int i; 77 78 for (i = 0; i < size; i++) 79 buf[i] = random(); 80 } 81 82 static int do_child(void) 83 { 84 if (ptrace(PTRACE_TRACEME, -1, NULL, NULL)) 85 ksft_exit_fail_msg("ptrace(PTRACE_TRACEME) failed: %s (%d)\n", 86 strerror(errno), errno); 87 88 if (raise(SIGSTOP)) 89 ksft_exit_fail_msg("raise(SIGSTOP) failed: %s (%d)\n", 90 strerror(errno), errno); 91 92 return EXIT_SUCCESS; 93 } 94 95 static int get_fpsimd(pid_t pid, struct user_fpsimd_state *fpsimd) 96 { 97 struct iovec iov; 98 99 iov.iov_base = fpsimd; 100 iov.iov_len = sizeof(*fpsimd); 101 return ptrace(PTRACE_GETREGSET, pid, NT_PRFPREG, &iov); 102 } 103 104 static int set_fpsimd(pid_t pid, struct user_fpsimd_state *fpsimd) 105 { 106 struct iovec iov; 107 108 iov.iov_base = fpsimd; 109 iov.iov_len = sizeof(*fpsimd); 110 return ptrace(PTRACE_SETREGSET, pid, NT_PRFPREG, &iov); 111 } 112 113 static struct user_sve_header *get_sve(pid_t pid, const struct vec_type *type, 114 void **buf, size_t *size) 115 { 116 struct user_sve_header *sve; 117 void *p; 118 size_t sz = sizeof *sve; 119 struct iovec iov; 120 121 while (1) { 122 if (*size < sz) { 123 p = realloc(*buf, sz); 124 if (!p) { 125 errno = ENOMEM; 126 goto error; 127 } 128 129 *buf = p; 130 *size = sz; 131 } 132 133 iov.iov_base = *buf; 134 iov.iov_len = sz; 135 if (ptrace(PTRACE_GETREGSET, pid, type->regset, &iov)) 136 goto error; 137 138 sve = *buf; 139 if (sve->size <= sz) 140 break; 141 142 sz = sve->size; 143 } 144 145 return sve; 146 147 error: 148 return NULL; 149 } 150 151 static int set_sve(pid_t pid, const struct vec_type *type, 152 const struct user_sve_header *sve) 153 { 154 struct iovec iov; 155 156 iov.iov_base = (void *)sve; 157 iov.iov_len = sve->size; 158 return ptrace(PTRACE_SETREGSET, pid, type->regset, &iov); 159 } 160 161 /* Validate setting and getting the inherit flag */ 162 static void ptrace_set_get_inherit(pid_t child, const struct vec_type *type) 163 { 164 struct user_sve_header sve; 165 struct user_sve_header *new_sve = NULL; 166 size_t new_sve_size = 0; 167 int ret; 168 169 /* First set the flag */ 170 memset(&sve, 0, sizeof(sve)); 171 sve.size = sizeof(sve); 172 sve.vl = sve_vl_from_vq(SVE_VQ_MIN); 173 sve.flags = SVE_PT_VL_INHERIT | SVE_PT_REGS_SVE; 174 ret = set_sve(child, type, &sve); 175 if (ret != 0) { 176 ksft_test_result_fail("Failed to set %s SVE_PT_VL_INHERIT\n", 177 type->name); 178 return; 179 } 180 181 /* 182 * Read back the new register state and verify that we have 183 * set the flags we expected. 184 */ 185 if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) { 186 ksft_test_result_fail("Failed to read %s SVE flags\n", 187 type->name); 188 return; 189 } 190 191 ksft_test_result(new_sve->flags & SVE_PT_VL_INHERIT, 192 "%s SVE_PT_VL_INHERIT set\n", type->name); 193 194 /* Now clear */ 195 sve.flags &= ~SVE_PT_VL_INHERIT; 196 ret = set_sve(child, type, &sve); 197 if (ret != 0) { 198 ksft_test_result_fail("Failed to clear %s SVE_PT_VL_INHERIT\n", 199 type->name); 200 return; 201 } 202 203 if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) { 204 ksft_test_result_fail("Failed to read %s SVE flags\n", 205 type->name); 206 return; 207 } 208 209 ksft_test_result(!(new_sve->flags & SVE_PT_VL_INHERIT), 210 "%s SVE_PT_VL_INHERIT cleared\n", type->name); 211 212 free(new_sve); 213 } 214 215 /* Validate attempting to set the specfied VL via ptrace */ 216 static void ptrace_set_get_vl(pid_t child, const struct vec_type *type, 217 unsigned int vl, bool *supported) 218 { 219 struct user_sve_header sve; 220 struct user_sve_header *new_sve = NULL; 221 size_t new_sve_size = 0; 222 int ret, prctl_vl; 223 224 *supported = false; 225 226 /* Check if the VL is supported in this process */ 227 prctl_vl = prctl(type->prctl_set, vl); 228 if (prctl_vl == -1) 229 ksft_exit_fail_msg("prctl(PR_%s_SET_VL) failed: %s (%d)\n", 230 type->name, strerror(errno), errno); 231 232 /* If the VL is not supported then a supported VL will be returned */ 233 *supported = (prctl_vl == vl); 234 235 /* Set the VL by doing a set with no register payload */ 236 memset(&sve, 0, sizeof(sve)); 237 sve.size = sizeof(sve); 238 sve.flags = SVE_PT_REGS_SVE; 239 sve.vl = vl; 240 ret = set_sve(child, type, &sve); 241 if (ret != 0) { 242 ksft_test_result_fail("Failed to set %s VL %u\n", 243 type->name, vl); 244 return; 245 } 246 247 /* 248 * Read back the new register state and verify that we have the 249 * same VL that we got from prctl() on ourselves. 250 */ 251 if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) { 252 ksft_test_result_fail("Failed to read %s VL %u\n", 253 type->name, vl); 254 return; 255 } 256 257 ksft_test_result(new_sve->vl == prctl_vl, "Set %s VL %u\n", 258 type->name, vl); 259 260 free(new_sve); 261 } 262 263 static void check_u32(unsigned int vl, const char *reg, 264 uint32_t *in, uint32_t *out, int *errors) 265 { 266 if (*in != *out) { 267 printf("# VL %d %s wrote %x read %x\n", 268 vl, reg, *in, *out); 269 (*errors)++; 270 } 271 } 272 273 /* Access the FPSIMD registers via the SVE regset */ 274 static void ptrace_sve_fpsimd(pid_t child, const struct vec_type *type) 275 { 276 void *svebuf; 277 struct user_sve_header *sve; 278 struct user_fpsimd_state *fpsimd, new_fpsimd; 279 unsigned int i, j; 280 unsigned char *p; 281 int ret; 282 283 svebuf = malloc(SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD)); 284 if (!svebuf) { 285 ksft_test_result_fail("Failed to allocate FPSIMD buffer\n"); 286 return; 287 } 288 289 memset(svebuf, 0, SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD)); 290 sve = svebuf; 291 sve->flags = SVE_PT_REGS_FPSIMD; 292 sve->size = SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD); 293 sve->vl = 16; /* We don't care what the VL is */ 294 295 /* Try to set a known FPSIMD state via PT_REGS_SVE */ 296 fpsimd = (struct user_fpsimd_state *)((char *)sve + 297 SVE_PT_FPSIMD_OFFSET); 298 for (i = 0; i < 32; ++i) { 299 p = (unsigned char *)&fpsimd->vregs[i]; 300 301 for (j = 0; j < sizeof(fpsimd->vregs[i]); ++j) 302 p[j] = j; 303 } 304 305 /* This should only succeed for SVE */ 306 ret = set_sve(child, type, sve); 307 ksft_test_result((type->regset == NT_ARM_SVE) == (ret == 0), 308 "%s FPSIMD set via SVE: %d\n", 309 type->name, ret); 310 if (ret) 311 goto out; 312 313 /* Verify via the FPSIMD regset */ 314 if (get_fpsimd(child, &new_fpsimd)) { 315 ksft_test_result_fail("get_fpsimd(): %s\n", 316 strerror(errno)); 317 goto out; 318 } 319 if (memcmp(fpsimd, &new_fpsimd, sizeof(*fpsimd)) == 0) 320 ksft_test_result_pass("%s get_fpsimd() gave same state\n", 321 type->name); 322 else 323 ksft_test_result_fail("%s get_fpsimd() gave different state\n", 324 type->name); 325 326 out: 327 free(svebuf); 328 } 329 330 /* Validate attempting to set SVE data and read SVE data */ 331 static void ptrace_set_sve_get_sve_data(pid_t child, 332 const struct vec_type *type, 333 unsigned int vl) 334 { 335 void *write_buf; 336 void *read_buf = NULL; 337 struct user_sve_header *write_sve; 338 struct user_sve_header *read_sve; 339 size_t read_sve_size = 0; 340 unsigned int vq = sve_vq_from_vl(vl); 341 int ret, i; 342 size_t data_size; 343 int errors = 0; 344 345 data_size = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE); 346 write_buf = malloc(data_size); 347 if (!write_buf) { 348 ksft_test_result_fail("Error allocating %ld byte buffer for %s VL %u\n", 349 data_size, type->name, vl); 350 return; 351 } 352 write_sve = write_buf; 353 354 /* Set up some data and write it out */ 355 memset(write_sve, 0, data_size); 356 write_sve->size = data_size; 357 write_sve->vl = vl; 358 write_sve->flags = SVE_PT_REGS_SVE; 359 360 for (i = 0; i < __SVE_NUM_ZREGS; i++) 361 fill_buf(write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i), 362 SVE_PT_SVE_ZREG_SIZE(vq)); 363 364 for (i = 0; i < __SVE_NUM_PREGS; i++) 365 fill_buf(write_buf + SVE_PT_SVE_PREG_OFFSET(vq, i), 366 SVE_PT_SVE_PREG_SIZE(vq)); 367 368 fill_buf(write_buf + SVE_PT_SVE_FPSR_OFFSET(vq), SVE_PT_SVE_FPSR_SIZE); 369 fill_buf(write_buf + SVE_PT_SVE_FPCR_OFFSET(vq), SVE_PT_SVE_FPCR_SIZE); 370 371 /* TODO: Generate a valid FFR pattern */ 372 373 ret = set_sve(child, type, write_sve); 374 if (ret != 0) { 375 ksft_test_result_fail("Failed to set %s VL %u data\n", 376 type->name, vl); 377 goto out; 378 } 379 380 /* Read the data back */ 381 if (!get_sve(child, type, (void **)&read_buf, &read_sve_size)) { 382 ksft_test_result_fail("Failed to read %s VL %u data\n", 383 type->name, vl); 384 goto out; 385 } 386 read_sve = read_buf; 387 388 /* We might read more data if there's extensions we don't know */ 389 if (read_sve->size < write_sve->size) { 390 ksft_test_result_fail("%s wrote %d bytes, only read %d\n", 391 type->name, write_sve->size, 392 read_sve->size); 393 goto out_read; 394 } 395 396 for (i = 0; i < __SVE_NUM_ZREGS; i++) { 397 if (memcmp(write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i), 398 read_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i), 399 SVE_PT_SVE_ZREG_SIZE(vq)) != 0) { 400 printf("# Mismatch in %u Z%d\n", vl, i); 401 errors++; 402 } 403 } 404 405 for (i = 0; i < __SVE_NUM_PREGS; i++) { 406 if (memcmp(write_buf + SVE_PT_SVE_PREG_OFFSET(vq, i), 407 read_buf + SVE_PT_SVE_PREG_OFFSET(vq, i), 408 SVE_PT_SVE_PREG_SIZE(vq)) != 0) { 409 printf("# Mismatch in %u P%d\n", vl, i); 410 errors++; 411 } 412 } 413 414 check_u32(vl, "FPSR", write_buf + SVE_PT_SVE_FPSR_OFFSET(vq), 415 read_buf + SVE_PT_SVE_FPSR_OFFSET(vq), &errors); 416 check_u32(vl, "FPCR", write_buf + SVE_PT_SVE_FPCR_OFFSET(vq), 417 read_buf + SVE_PT_SVE_FPCR_OFFSET(vq), &errors); 418 419 ksft_test_result(errors == 0, "Set and get %s data for VL %u\n", 420 type->name, vl); 421 422 out_read: 423 free(read_buf); 424 out: 425 free(write_buf); 426 } 427 428 /* Validate attempting to set SVE data and read it via the FPSIMD regset */ 429 static void ptrace_set_sve_get_fpsimd_data(pid_t child, 430 const struct vec_type *type, 431 unsigned int vl) 432 { 433 void *write_buf; 434 struct user_sve_header *write_sve; 435 unsigned int vq = sve_vq_from_vl(vl); 436 struct user_fpsimd_state fpsimd_state; 437 int ret, i; 438 size_t data_size; 439 int errors = 0; 440 441 if (__BYTE_ORDER == __BIG_ENDIAN) { 442 ksft_test_result_skip("Big endian not supported\n"); 443 return; 444 } 445 446 data_size = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE); 447 write_buf = malloc(data_size); 448 if (!write_buf) { 449 ksft_test_result_fail("Error allocating %ld byte buffer for %s VL %u\n", 450 data_size, type->name, vl); 451 return; 452 } 453 write_sve = write_buf; 454 455 /* Set up some data and write it out */ 456 memset(write_sve, 0, data_size); 457 write_sve->size = data_size; 458 write_sve->vl = vl; 459 write_sve->flags = SVE_PT_REGS_SVE; 460 461 for (i = 0; i < __SVE_NUM_ZREGS; i++) 462 fill_buf(write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i), 463 SVE_PT_SVE_ZREG_SIZE(vq)); 464 465 fill_buf(write_buf + SVE_PT_SVE_FPSR_OFFSET(vq), SVE_PT_SVE_FPSR_SIZE); 466 fill_buf(write_buf + SVE_PT_SVE_FPCR_OFFSET(vq), SVE_PT_SVE_FPCR_SIZE); 467 468 ret = set_sve(child, type, write_sve); 469 if (ret != 0) { 470 ksft_test_result_fail("Failed to set %s VL %u data\n", 471 type->name, vl); 472 goto out; 473 } 474 475 /* Read the data back */ 476 if (get_fpsimd(child, &fpsimd_state)) { 477 ksft_test_result_fail("Failed to read %s VL %u FPSIMD data\n", 478 type->name, vl); 479 goto out; 480 } 481 482 for (i = 0; i < __SVE_NUM_ZREGS; i++) { 483 __uint128_t tmp = 0; 484 485 /* 486 * Z regs are stored endianness invariant, this won't 487 * work for big endian 488 */ 489 memcpy(&tmp, write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i), 490 sizeof(tmp)); 491 492 if (tmp != fpsimd_state.vregs[i]) { 493 printf("# Mismatch in FPSIMD for %s VL %u Z%d\n", 494 type->name, vl, i); 495 errors++; 496 } 497 } 498 499 check_u32(vl, "FPSR", write_buf + SVE_PT_SVE_FPSR_OFFSET(vq), 500 &fpsimd_state.fpsr, &errors); 501 check_u32(vl, "FPCR", write_buf + SVE_PT_SVE_FPCR_OFFSET(vq), 502 &fpsimd_state.fpcr, &errors); 503 504 ksft_test_result(errors == 0, "Set and get FPSIMD data for %s VL %u\n", 505 type->name, vl); 506 507 out: 508 free(write_buf); 509 } 510 511 /* Validate attempting to set FPSIMD data and read it via the SVE regset */ 512 static void ptrace_set_fpsimd_get_sve_data(pid_t child, 513 const struct vec_type *type, 514 unsigned int vl) 515 { 516 void *read_buf = NULL; 517 unsigned char *p; 518 struct user_sve_header *read_sve; 519 unsigned int vq = sve_vq_from_vl(vl); 520 struct user_fpsimd_state write_fpsimd; 521 int ret, i, j; 522 size_t read_sve_size = 0; 523 size_t expected_size; 524 int errors = 0; 525 526 if (__BYTE_ORDER == __BIG_ENDIAN) { 527 ksft_test_result_skip("Big endian not supported\n"); 528 return; 529 } 530 531 for (i = 0; i < 32; ++i) { 532 p = (unsigned char *)&write_fpsimd.vregs[i]; 533 534 for (j = 0; j < sizeof(write_fpsimd.vregs[i]); ++j) 535 p[j] = j; 536 } 537 538 ret = set_fpsimd(child, &write_fpsimd); 539 if (ret != 0) { 540 ksft_test_result_fail("Failed to set FPSIMD state: %d\n)", 541 ret); 542 return; 543 } 544 545 if (!get_sve(child, type, (void **)&read_buf, &read_sve_size)) { 546 ksft_test_result_fail("Failed to read %s VL %u data\n", 547 type->name, vl); 548 return; 549 } 550 read_sve = read_buf; 551 552 if (read_sve->vl != vl) { 553 ksft_test_result_fail("Child VL != expected VL: %u != %u\n", 554 read_sve->vl, vl); 555 goto out; 556 } 557 558 /* The kernel may return either SVE or FPSIMD format */ 559 switch (read_sve->flags & SVE_PT_REGS_MASK) { 560 case SVE_PT_REGS_FPSIMD: 561 expected_size = SVE_PT_FPSIMD_SIZE(vq, SVE_PT_REGS_FPSIMD); 562 if (read_sve_size < expected_size) { 563 ksft_test_result_fail("Read %ld bytes, expected %ld\n", 564 read_sve_size, expected_size); 565 goto out; 566 } 567 568 ret = memcmp(&write_fpsimd, read_buf + SVE_PT_FPSIMD_OFFSET, 569 sizeof(write_fpsimd)); 570 if (ret != 0) { 571 ksft_print_msg("Read FPSIMD data mismatch\n"); 572 errors++; 573 } 574 break; 575 576 case SVE_PT_REGS_SVE: 577 expected_size = SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE); 578 if (read_sve_size < expected_size) { 579 ksft_test_result_fail("Read %ld bytes, expected %ld\n", 580 read_sve_size, expected_size); 581 goto out; 582 } 583 584 for (i = 0; i < __SVE_NUM_ZREGS; i++) { 585 __uint128_t tmp = 0; 586 587 /* 588 * Z regs are stored endianness invariant, this won't 589 * work for big endian 590 */ 591 memcpy(&tmp, read_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i), 592 sizeof(tmp)); 593 594 if (tmp != write_fpsimd.vregs[i]) { 595 ksft_print_msg("Mismatch in FPSIMD for %s VL %u Z%d/V%d\n", 596 type->name, vl, i, i); 597 errors++; 598 } 599 } 600 601 check_u32(vl, "FPSR", &write_fpsimd.fpsr, 602 read_buf + SVE_PT_SVE_FPSR_OFFSET(vq), &errors); 603 check_u32(vl, "FPCR", &write_fpsimd.fpcr, 604 read_buf + SVE_PT_SVE_FPCR_OFFSET(vq), &errors); 605 break; 606 default: 607 ksft_print_msg("Unexpected regs type %d\n", 608 read_sve->flags & SVE_PT_REGS_MASK); 609 errors++; 610 break; 611 } 612 613 ksft_test_result(errors == 0, "Set FPSIMD, read via SVE for %s VL %u\n", 614 type->name, vl); 615 616 out: 617 free(read_buf); 618 } 619 620 static int do_parent(pid_t child) 621 { 622 int ret = EXIT_FAILURE; 623 pid_t pid; 624 int status, i; 625 siginfo_t si; 626 unsigned int vq, vl; 627 bool vl_supported; 628 629 ksft_print_msg("Parent is %d, child is %d\n", getpid(), child); 630 631 /* Attach to the child */ 632 while (1) { 633 int sig; 634 635 pid = wait(&status); 636 if (pid == -1) { 637 perror("wait"); 638 goto error; 639 } 640 641 /* 642 * This should never happen but it's hard to flag in 643 * the framework. 644 */ 645 if (pid != child) 646 continue; 647 648 if (WIFEXITED(status) || WIFSIGNALED(status)) 649 ksft_exit_fail_msg("Child died unexpectedly\n"); 650 651 if (!WIFSTOPPED(status)) 652 goto error; 653 654 sig = WSTOPSIG(status); 655 656 if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &si)) { 657 if (errno == ESRCH) 658 goto disappeared; 659 660 if (errno == EINVAL) { 661 sig = 0; /* bust group-stop */ 662 goto cont; 663 } 664 665 ksft_test_result_fail("PTRACE_GETSIGINFO: %s\n", 666 strerror(errno)); 667 goto error; 668 } 669 670 if (sig == SIGSTOP && si.si_code == SI_TKILL && 671 si.si_pid == pid) 672 break; 673 674 cont: 675 if (ptrace(PTRACE_CONT, pid, NULL, sig)) { 676 if (errno == ESRCH) 677 goto disappeared; 678 679 ksft_test_result_fail("PTRACE_CONT: %s\n", 680 strerror(errno)); 681 goto error; 682 } 683 } 684 685 for (i = 0; i < ARRAY_SIZE(vec_types); i++) { 686 /* FPSIMD via SVE regset */ 687 if (getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap) { 688 ptrace_sve_fpsimd(child, &vec_types[i]); 689 } else { 690 ksft_test_result_skip("%s FPSIMD set via SVE\n", 691 vec_types[i].name); 692 ksft_test_result_skip("%s FPSIMD read\n", 693 vec_types[i].name); 694 } 695 696 /* prctl() flags */ 697 if (getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap) { 698 ptrace_set_get_inherit(child, &vec_types[i]); 699 } else { 700 ksft_test_result_skip("%s SVE_PT_VL_INHERIT set\n", 701 vec_types[i].name); 702 ksft_test_result_skip("%s SVE_PT_VL_INHERIT cleared\n", 703 vec_types[i].name); 704 } 705 706 /* Step through every possible VQ */ 707 for (vq = SVE_VQ_MIN; vq <= TEST_VQ_MAX; vq++) { 708 vl = sve_vl_from_vq(vq); 709 710 /* First, try to set this vector length */ 711 if (getauxval(vec_types[i].hwcap_type) & 712 vec_types[i].hwcap) { 713 ptrace_set_get_vl(child, &vec_types[i], vl, 714 &vl_supported); 715 } else { 716 ksft_test_result_skip("%s get/set VL %d\n", 717 vec_types[i].name, vl); 718 vl_supported = false; 719 } 720 721 /* If the VL is supported validate data set/get */ 722 if (vl_supported) { 723 ptrace_set_sve_get_sve_data(child, &vec_types[i], vl); 724 ptrace_set_sve_get_fpsimd_data(child, &vec_types[i], vl); 725 ptrace_set_fpsimd_get_sve_data(child, &vec_types[i], vl); 726 } else { 727 ksft_test_result_skip("%s set SVE get SVE for VL %d\n", 728 vec_types[i].name, vl); 729 ksft_test_result_skip("%s set SVE get FPSIMD for VL %d\n", 730 vec_types[i].name, vl); 731 ksft_test_result_skip("%s set FPSIMD get SVE for VL %d\n", 732 vec_types[i].name, vl); 733 } 734 } 735 } 736 737 ret = EXIT_SUCCESS; 738 739 error: 740 kill(child, SIGKILL); 741 742 disappeared: 743 return ret; 744 } 745 746 int main(void) 747 { 748 int ret = EXIT_SUCCESS; 749 pid_t child; 750 751 srandom(getpid()); 752 753 ksft_print_header(); 754 ksft_set_plan(EXPECTED_TESTS); 755 756 child = fork(); 757 if (!child) 758 return do_child(); 759 760 if (do_parent(child)) 761 ret = EXIT_FAILURE; 762 763 ksft_print_cnts(); 764 765 return ret; 766 } 767