1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2015-2021 ARM Limited.
4 * Original author: Dave Martin <Dave.Martin@arm.com>
5 */
6 #include <errno.h>
7 #include <stdbool.h>
8 #include <stddef.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <unistd.h>
13 #include <sys/auxv.h>
14 #include <sys/prctl.h>
15 #include <sys/ptrace.h>
16 #include <sys/types.h>
17 #include <sys/uio.h>
18 #include <sys/wait.h>
19 #include <asm/sigcontext.h>
20 #include <asm/ptrace.h>
21
22 #include "../../kselftest.h"
23
24 /* <linux/elf.h> and <sys/auxv.h> don't like each other, so: */
25 #ifndef NT_ARM_SVE
26 #define NT_ARM_SVE 0x405
27 #endif
28
29 #ifndef NT_ARM_SSVE
30 #define NT_ARM_SSVE 0x40b
31 #endif
32
33 /*
34 * The architecture defines the maximum VQ as 16 but for extensibility
35 * the kernel specifies the SVE_VQ_MAX as 512 resulting in us running
36 * a *lot* more tests than are useful if we use it. Until the
37 * architecture is extended let's limit our coverage to what is
38 * currently allowed, plus one extra to ensure we cover constraining
39 * the VL as expected.
40 */
41 #define TEST_VQ_MAX 17
42
43 struct vec_type {
44 const char *name;
45 unsigned long hwcap_type;
46 unsigned long hwcap;
47 int regset;
48 int prctl_set;
49 };
50
51 static const struct vec_type vec_types[] = {
52 {
53 .name = "SVE",
54 .hwcap_type = AT_HWCAP,
55 .hwcap = HWCAP_SVE,
56 .regset = NT_ARM_SVE,
57 .prctl_set = PR_SVE_SET_VL,
58 },
59 {
60 .name = "Streaming SVE",
61 .hwcap_type = AT_HWCAP2,
62 .hwcap = HWCAP2_SME,
63 .regset = NT_ARM_SSVE,
64 .prctl_set = PR_SME_SET_VL,
65 },
66 };
67
68 #define VL_TESTS (((TEST_VQ_MAX - SVE_VQ_MIN) + 1) * 4)
69 #define FLAG_TESTS 4
70 #define FPSIMD_TESTS 2
71
72 #define EXPECTED_TESTS ((VL_TESTS + FLAG_TESTS + FPSIMD_TESTS) * ARRAY_SIZE(vec_types))
73
fill_buf(char * buf,size_t size)74 static void fill_buf(char *buf, size_t size)
75 {
76 int i;
77
78 for (i = 0; i < size; i++)
79 buf[i] = random();
80 }
81
do_child(void)82 static int do_child(void)
83 {
84 if (ptrace(PTRACE_TRACEME, -1, NULL, NULL))
85 ksft_exit_fail_msg("ptrace(PTRACE_TRACEME) failed: %s (%d)\n",
86 strerror(errno), errno);
87
88 if (raise(SIGSTOP))
89 ksft_exit_fail_msg("raise(SIGSTOP) failed: %s (%d)\n",
90 strerror(errno), errno);
91
92 return EXIT_SUCCESS;
93 }
94
get_fpsimd(pid_t pid,struct user_fpsimd_state * fpsimd)95 static int get_fpsimd(pid_t pid, struct user_fpsimd_state *fpsimd)
96 {
97 struct iovec iov;
98 int ret;
99
100 iov.iov_base = fpsimd;
101 iov.iov_len = sizeof(*fpsimd);
102 ret = ptrace(PTRACE_GETREGSET, pid, NT_PRFPREG, &iov);
103 if (ret == -1)
104 ksft_perror("ptrace(PTRACE_GETREGSET)");
105 return ret;
106 }
107
set_fpsimd(pid_t pid,struct user_fpsimd_state * fpsimd)108 static int set_fpsimd(pid_t pid, struct user_fpsimd_state *fpsimd)
109 {
110 struct iovec iov;
111 int ret;
112
113 iov.iov_base = fpsimd;
114 iov.iov_len = sizeof(*fpsimd);
115 ret = ptrace(PTRACE_SETREGSET, pid, NT_PRFPREG, &iov);
116 if (ret == -1)
117 ksft_perror("ptrace(PTRACE_SETREGSET)");
118 return ret;
119 }
120
get_sve(pid_t pid,const struct vec_type * type,void ** buf,size_t * size)121 static struct user_sve_header *get_sve(pid_t pid, const struct vec_type *type,
122 void **buf, size_t *size)
123 {
124 struct user_sve_header *sve;
125 void *p;
126 size_t sz = sizeof(*sve);
127 struct iovec iov;
128 int ret;
129
130 while (1) {
131 if (*size < sz) {
132 p = realloc(*buf, sz);
133 if (!p) {
134 errno = ENOMEM;
135 goto error;
136 }
137
138 *buf = p;
139 *size = sz;
140 }
141
142 iov.iov_base = *buf;
143 iov.iov_len = sz;
144 ret = ptrace(PTRACE_GETREGSET, pid, type->regset, &iov);
145 if (ret) {
146 ksft_perror("ptrace(PTRACE_GETREGSET)");
147 goto error;
148 }
149
150 sve = *buf;
151 if (sve->size <= sz)
152 break;
153
154 sz = sve->size;
155 }
156
157 return sve;
158
159 error:
160 return NULL;
161 }
162
set_sve(pid_t pid,const struct vec_type * type,const struct user_sve_header * sve)163 static int set_sve(pid_t pid, const struct vec_type *type,
164 const struct user_sve_header *sve)
165 {
166 struct iovec iov;
167 int ret;
168
169 iov.iov_base = (void *)sve;
170 iov.iov_len = sve->size;
171 ret = ptrace(PTRACE_SETREGSET, pid, type->regset, &iov);
172 if (ret == -1)
173 ksft_perror("ptrace(PTRACE_SETREGSET)");
174 return ret;
175 }
176
177 /* A read operation fails */
read_fails(pid_t child,const struct vec_type * type)178 static void read_fails(pid_t child, const struct vec_type *type)
179 {
180 struct user_sve_header *new_sve = NULL;
181 size_t new_sve_size = 0;
182 void *ret;
183
184 ret = get_sve(child, type, (void **)&new_sve, &new_sve_size);
185
186 ksft_test_result(ret == NULL, "%s unsupported read fails\n",
187 type->name);
188
189 free(new_sve);
190 }
191
192 /* A write operation fails */
write_fails(pid_t child,const struct vec_type * type)193 static void write_fails(pid_t child, const struct vec_type *type)
194 {
195 struct user_sve_header sve;
196 int ret;
197
198 /* Just the header, no data */
199 memset(&sve, 0, sizeof(sve));
200 sve.size = sizeof(sve);
201 sve.flags = SVE_PT_REGS_SVE;
202 sve.vl = SVE_VL_MIN;
203 ret = set_sve(child, type, &sve);
204
205 ksft_test_result(ret != 0, "%s unsupported write fails\n",
206 type->name);
207 }
208
209 /* Validate setting and getting the inherit flag */
ptrace_set_get_inherit(pid_t child,const struct vec_type * type)210 static void ptrace_set_get_inherit(pid_t child, const struct vec_type *type)
211 {
212 struct user_sve_header sve;
213 struct user_sve_header *new_sve = NULL;
214 size_t new_sve_size = 0;
215 int ret;
216
217 /* First set the flag */
218 memset(&sve, 0, sizeof(sve));
219 sve.size = sizeof(sve);
220 sve.vl = sve_vl_from_vq(SVE_VQ_MIN);
221 sve.flags = SVE_PT_VL_INHERIT | SVE_PT_REGS_SVE;
222 ret = set_sve(child, type, &sve);
223 if (ret != 0) {
224 ksft_test_result_fail("Failed to set %s SVE_PT_VL_INHERIT\n",
225 type->name);
226 return;
227 }
228
229 /*
230 * Read back the new register state and verify that we have
231 * set the flags we expected.
232 */
233 if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) {
234 ksft_test_result_fail("Failed to read %s SVE flags\n",
235 type->name);
236 return;
237 }
238
239 ksft_test_result(new_sve->flags & SVE_PT_VL_INHERIT,
240 "%s SVE_PT_VL_INHERIT set\n", type->name);
241
242 /* Now clear */
243 sve.flags &= ~SVE_PT_VL_INHERIT;
244 ret = set_sve(child, type, &sve);
245 if (ret != 0) {
246 ksft_test_result_fail("Failed to clear %s SVE_PT_VL_INHERIT\n",
247 type->name);
248 return;
249 }
250
251 if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) {
252 ksft_test_result_fail("Failed to read %s SVE flags\n",
253 type->name);
254 return;
255 }
256
257 ksft_test_result(!(new_sve->flags & SVE_PT_VL_INHERIT),
258 "%s SVE_PT_VL_INHERIT cleared\n", type->name);
259
260 free(new_sve);
261 }
262
263 /* Validate attempting to set the specfied VL via ptrace */
ptrace_set_get_vl(pid_t child,const struct vec_type * type,unsigned int vl,bool * supported)264 static void ptrace_set_get_vl(pid_t child, const struct vec_type *type,
265 unsigned int vl, bool *supported)
266 {
267 struct user_sve_header sve;
268 struct user_sve_header *new_sve = NULL;
269 size_t new_sve_size = 0;
270 int ret, prctl_vl;
271
272 *supported = false;
273
274 /* Check if the VL is supported in this process */
275 prctl_vl = prctl(type->prctl_set, vl);
276 if (prctl_vl == -1)
277 ksft_exit_fail_msg("prctl(PR_%s_SET_VL) failed: %s (%d)\n",
278 type->name, strerror(errno), errno);
279
280 /* If the VL is not supported then a supported VL will be returned */
281 *supported = (prctl_vl == vl);
282
283 /* Set the VL by doing a set with no register payload */
284 memset(&sve, 0, sizeof(sve));
285 sve.size = sizeof(sve);
286 sve.flags = SVE_PT_REGS_SVE;
287 sve.vl = vl;
288 ret = set_sve(child, type, &sve);
289 if (ret != 0) {
290 ksft_test_result_fail("Failed to set %s VL %u\n",
291 type->name, vl);
292 return;
293 }
294
295 /*
296 * Read back the new register state and verify that we have the
297 * same VL that we got from prctl() on ourselves.
298 */
299 if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) {
300 ksft_test_result_fail("Failed to read %s VL %u\n",
301 type->name, vl);
302 return;
303 }
304
305 ksft_test_result(new_sve->vl == prctl_vl, "Set %s VL %u\n",
306 type->name, vl);
307
308 free(new_sve);
309 }
310
check_u32(unsigned int vl,const char * reg,uint32_t * in,uint32_t * out,int * errors)311 static void check_u32(unsigned int vl, const char *reg,
312 uint32_t *in, uint32_t *out, int *errors)
313 {
314 if (*in != *out) {
315 printf("# VL %d %s wrote %x read %x\n",
316 vl, reg, *in, *out);
317 (*errors)++;
318 }
319 }
320
321 /* Set out of range VLs */
ptrace_set_vl_ranges(pid_t child,const struct vec_type * type)322 static void ptrace_set_vl_ranges(pid_t child, const struct vec_type *type)
323 {
324 struct user_sve_header sve;
325 int ret;
326
327 memset(&sve, 0, sizeof(sve));
328 sve.flags = SVE_PT_REGS_SVE;
329 sve.size = sizeof(sve);
330
331 ret = set_sve(child, type, &sve);
332 ksft_test_result(ret != 0, "%s Set invalid VL 0\n", type->name);
333
334 sve.vl = SVE_VL_MAX + SVE_VQ_BYTES;
335 ret = set_sve(child, type, &sve);
336 ksft_test_result(ret != 0, "%s Set invalid VL %d\n", type->name,
337 SVE_VL_MAX + SVE_VQ_BYTES);
338 }
339
340 /* Access the FPSIMD registers via the SVE regset */
ptrace_sve_fpsimd(pid_t child,const struct vec_type * type)341 static void ptrace_sve_fpsimd(pid_t child, const struct vec_type *type)
342 {
343 void *svebuf;
344 struct user_sve_header *sve;
345 struct user_fpsimd_state *fpsimd, new_fpsimd;
346 unsigned int i, j;
347 unsigned char *p;
348 int ret;
349
350 svebuf = malloc(SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD));
351 if (!svebuf) {
352 ksft_test_result_fail("Failed to allocate FPSIMD buffer\n");
353 return;
354 }
355
356 memset(svebuf, 0, SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD));
357 sve = svebuf;
358 sve->flags = SVE_PT_REGS_FPSIMD;
359 sve->size = SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD);
360 sve->vl = 16; /* We don't care what the VL is */
361
362 /* Try to set a known FPSIMD state via PT_REGS_SVE */
363 fpsimd = (struct user_fpsimd_state *)((char *)sve +
364 SVE_PT_FPSIMD_OFFSET);
365 for (i = 0; i < 32; ++i) {
366 p = (unsigned char *)&fpsimd->vregs[i];
367
368 for (j = 0; j < sizeof(fpsimd->vregs[i]); ++j)
369 p[j] = j;
370 }
371
372 /* This should only succeed for SVE */
373 ret = set_sve(child, type, sve);
374 ksft_test_result((type->regset == NT_ARM_SVE) == (ret == 0),
375 "%s FPSIMD set via SVE: %d\n",
376 type->name, ret);
377 if (ret)
378 goto out;
379
380 /* Verify via the FPSIMD regset */
381 if (get_fpsimd(child, &new_fpsimd)) {
382 ksft_test_result_fail("get_fpsimd(): %s\n",
383 strerror(errno));
384 goto out;
385 }
386 if (memcmp(fpsimd, &new_fpsimd, sizeof(*fpsimd)) == 0)
387 ksft_test_result_pass("%s get_fpsimd() gave same state\n",
388 type->name);
389 else
390 ksft_test_result_fail("%s get_fpsimd() gave different state\n",
391 type->name);
392
393 out:
394 free(svebuf);
395 }
396
397 /* Validate attempting to set SVE data and read SVE data */
ptrace_set_sve_get_sve_data(pid_t child,const struct vec_type * type,unsigned int vl)398 static void ptrace_set_sve_get_sve_data(pid_t child,
399 const struct vec_type *type,
400 unsigned int vl)
401 {
402 void *write_buf;
403 void *read_buf = NULL;
404 struct user_sve_header *write_sve;
405 struct user_sve_header *read_sve;
406 size_t read_sve_size = 0;
407 unsigned int vq = sve_vq_from_vl(vl);
408 int ret, i;
409 size_t data_size;
410 int errors = 0;
411
412 data_size = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE);
413 write_buf = malloc(data_size);
414 if (!write_buf) {
415 ksft_test_result_fail("Error allocating %ld byte buffer for %s VL %u\n",
416 data_size, type->name, vl);
417 return;
418 }
419 write_sve = write_buf;
420
421 /* Set up some data and write it out */
422 memset(write_sve, 0, data_size);
423 write_sve->size = data_size;
424 write_sve->vl = vl;
425 write_sve->flags = SVE_PT_REGS_SVE;
426
427 for (i = 0; i < __SVE_NUM_ZREGS; i++)
428 fill_buf(write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i),
429 SVE_PT_SVE_ZREG_SIZE(vq));
430
431 for (i = 0; i < __SVE_NUM_PREGS; i++)
432 fill_buf(write_buf + SVE_PT_SVE_PREG_OFFSET(vq, i),
433 SVE_PT_SVE_PREG_SIZE(vq));
434
435 fill_buf(write_buf + SVE_PT_SVE_FPSR_OFFSET(vq), SVE_PT_SVE_FPSR_SIZE);
436 fill_buf(write_buf + SVE_PT_SVE_FPCR_OFFSET(vq), SVE_PT_SVE_FPCR_SIZE);
437
438 /* TODO: Generate a valid FFR pattern */
439
440 ret = set_sve(child, type, write_sve);
441 if (ret != 0) {
442 ksft_test_result_fail("Failed to set %s VL %u data\n",
443 type->name, vl);
444 goto out;
445 }
446
447 /* Read the data back */
448 if (!get_sve(child, type, (void **)&read_buf, &read_sve_size)) {
449 ksft_test_result_fail("Failed to read %s VL %u data\n",
450 type->name, vl);
451 goto out;
452 }
453 read_sve = read_buf;
454
455 /* We might read more data if there's extensions we don't know */
456 if (read_sve->size < write_sve->size) {
457 ksft_test_result_fail("%s wrote %d bytes, only read %d\n",
458 type->name, write_sve->size,
459 read_sve->size);
460 goto out_read;
461 }
462
463 for (i = 0; i < __SVE_NUM_ZREGS; i++) {
464 if (memcmp(write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i),
465 read_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i),
466 SVE_PT_SVE_ZREG_SIZE(vq)) != 0) {
467 printf("# Mismatch in %u Z%d\n", vl, i);
468 errors++;
469 }
470 }
471
472 for (i = 0; i < __SVE_NUM_PREGS; i++) {
473 if (memcmp(write_buf + SVE_PT_SVE_PREG_OFFSET(vq, i),
474 read_buf + SVE_PT_SVE_PREG_OFFSET(vq, i),
475 SVE_PT_SVE_PREG_SIZE(vq)) != 0) {
476 printf("# Mismatch in %u P%d\n", vl, i);
477 errors++;
478 }
479 }
480
481 check_u32(vl, "FPSR", write_buf + SVE_PT_SVE_FPSR_OFFSET(vq),
482 read_buf + SVE_PT_SVE_FPSR_OFFSET(vq), &errors);
483 check_u32(vl, "FPCR", write_buf + SVE_PT_SVE_FPCR_OFFSET(vq),
484 read_buf + SVE_PT_SVE_FPCR_OFFSET(vq), &errors);
485
486 ksft_test_result(errors == 0, "Set and get %s data for VL %u\n",
487 type->name, vl);
488
489 out_read:
490 free(read_buf);
491 out:
492 free(write_buf);
493 }
494
495 /* Validate attempting to set SVE data and read it via the FPSIMD regset */
ptrace_set_sve_get_fpsimd_data(pid_t child,const struct vec_type * type,unsigned int vl)496 static void ptrace_set_sve_get_fpsimd_data(pid_t child,
497 const struct vec_type *type,
498 unsigned int vl)
499 {
500 void *write_buf;
501 struct user_sve_header *write_sve;
502 unsigned int vq = sve_vq_from_vl(vl);
503 struct user_fpsimd_state fpsimd_state;
504 int ret, i;
505 size_t data_size;
506 int errors = 0;
507
508 if (__BYTE_ORDER == __BIG_ENDIAN) {
509 ksft_test_result_skip("Big endian not supported\n");
510 return;
511 }
512
513 data_size = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE);
514 write_buf = malloc(data_size);
515 if (!write_buf) {
516 ksft_test_result_fail("Error allocating %ld byte buffer for %s VL %u\n",
517 data_size, type->name, vl);
518 return;
519 }
520 write_sve = write_buf;
521
522 /* Set up some data and write it out */
523 memset(write_sve, 0, data_size);
524 write_sve->size = data_size;
525 write_sve->vl = vl;
526 write_sve->flags = SVE_PT_REGS_SVE;
527
528 for (i = 0; i < __SVE_NUM_ZREGS; i++)
529 fill_buf(write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i),
530 SVE_PT_SVE_ZREG_SIZE(vq));
531
532 fill_buf(write_buf + SVE_PT_SVE_FPSR_OFFSET(vq), SVE_PT_SVE_FPSR_SIZE);
533 fill_buf(write_buf + SVE_PT_SVE_FPCR_OFFSET(vq), SVE_PT_SVE_FPCR_SIZE);
534
535 ret = set_sve(child, type, write_sve);
536 if (ret != 0) {
537 ksft_test_result_fail("Failed to set %s VL %u data\n",
538 type->name, vl);
539 goto out;
540 }
541
542 /* Read the data back */
543 if (get_fpsimd(child, &fpsimd_state)) {
544 ksft_test_result_fail("Failed to read %s VL %u FPSIMD data\n",
545 type->name, vl);
546 goto out;
547 }
548
549 for (i = 0; i < __SVE_NUM_ZREGS; i++) {
550 __uint128_t tmp = 0;
551
552 /*
553 * Z regs are stored endianness invariant, this won't
554 * work for big endian
555 */
556 memcpy(&tmp, write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i),
557 sizeof(tmp));
558
559 if (tmp != fpsimd_state.vregs[i]) {
560 printf("# Mismatch in FPSIMD for %s VL %u Z%d\n",
561 type->name, vl, i);
562 errors++;
563 }
564 }
565
566 check_u32(vl, "FPSR", write_buf + SVE_PT_SVE_FPSR_OFFSET(vq),
567 &fpsimd_state.fpsr, &errors);
568 check_u32(vl, "FPCR", write_buf + SVE_PT_SVE_FPCR_OFFSET(vq),
569 &fpsimd_state.fpcr, &errors);
570
571 ksft_test_result(errors == 0, "Set and get FPSIMD data for %s VL %u\n",
572 type->name, vl);
573
574 out:
575 free(write_buf);
576 }
577
578 /* Validate attempting to set FPSIMD data and read it via the SVE regset */
ptrace_set_fpsimd_get_sve_data(pid_t child,const struct vec_type * type,unsigned int vl)579 static void ptrace_set_fpsimd_get_sve_data(pid_t child,
580 const struct vec_type *type,
581 unsigned int vl)
582 {
583 void *read_buf = NULL;
584 unsigned char *p;
585 struct user_sve_header *read_sve;
586 unsigned int vq = sve_vq_from_vl(vl);
587 struct user_fpsimd_state write_fpsimd;
588 int ret, i, j;
589 size_t read_sve_size = 0;
590 size_t expected_size;
591 int errors = 0;
592
593 if (__BYTE_ORDER == __BIG_ENDIAN) {
594 ksft_test_result_skip("Big endian not supported\n");
595 return;
596 }
597
598 for (i = 0; i < 32; ++i) {
599 p = (unsigned char *)&write_fpsimd.vregs[i];
600
601 for (j = 0; j < sizeof(write_fpsimd.vregs[i]); ++j)
602 p[j] = j;
603 }
604
605 ret = set_fpsimd(child, &write_fpsimd);
606 if (ret != 0) {
607 ksft_test_result_fail("Failed to set FPSIMD state: %d\n)",
608 ret);
609 return;
610 }
611
612 if (!get_sve(child, type, (void **)&read_buf, &read_sve_size)) {
613 ksft_test_result_fail("Failed to read %s VL %u data\n",
614 type->name, vl);
615 return;
616 }
617 read_sve = read_buf;
618
619 if (read_sve->vl != vl) {
620 ksft_test_result_fail("Child VL != expected VL: %u != %u\n",
621 read_sve->vl, vl);
622 goto out;
623 }
624
625 /* The kernel may return either SVE or FPSIMD format */
626 switch (read_sve->flags & SVE_PT_REGS_MASK) {
627 case SVE_PT_REGS_FPSIMD:
628 expected_size = SVE_PT_FPSIMD_SIZE(vq, SVE_PT_REGS_FPSIMD);
629 if (read_sve_size < expected_size) {
630 ksft_test_result_fail("Read %ld bytes, expected %ld\n",
631 read_sve_size, expected_size);
632 goto out;
633 }
634
635 ret = memcmp(&write_fpsimd, read_buf + SVE_PT_FPSIMD_OFFSET,
636 sizeof(write_fpsimd));
637 if (ret != 0) {
638 ksft_print_msg("Read FPSIMD data mismatch\n");
639 errors++;
640 }
641 break;
642
643 case SVE_PT_REGS_SVE:
644 expected_size = SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE);
645 if (read_sve_size < expected_size) {
646 ksft_test_result_fail("Read %ld bytes, expected %ld\n",
647 read_sve_size, expected_size);
648 goto out;
649 }
650
651 for (i = 0; i < __SVE_NUM_ZREGS; i++) {
652 __uint128_t tmp = 0;
653
654 /*
655 * Z regs are stored endianness invariant, this won't
656 * work for big endian
657 */
658 memcpy(&tmp, read_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i),
659 sizeof(tmp));
660
661 if (tmp != write_fpsimd.vregs[i]) {
662 ksft_print_msg("Mismatch in FPSIMD for %s VL %u Z%d/V%d\n",
663 type->name, vl, i, i);
664 errors++;
665 }
666 }
667
668 check_u32(vl, "FPSR", &write_fpsimd.fpsr,
669 read_buf + SVE_PT_SVE_FPSR_OFFSET(vq), &errors);
670 check_u32(vl, "FPCR", &write_fpsimd.fpcr,
671 read_buf + SVE_PT_SVE_FPCR_OFFSET(vq), &errors);
672 break;
673 default:
674 ksft_print_msg("Unexpected regs type %d\n",
675 read_sve->flags & SVE_PT_REGS_MASK);
676 errors++;
677 break;
678 }
679
680 ksft_test_result(errors == 0, "Set FPSIMD, read via SVE for %s VL %u\n",
681 type->name, vl);
682
683 out:
684 free(read_buf);
685 }
686
do_parent(pid_t child)687 static int do_parent(pid_t child)
688 {
689 int ret = EXIT_FAILURE;
690 pid_t pid;
691 int status, i;
692 siginfo_t si;
693 unsigned int vq, vl;
694 bool vl_supported;
695
696 ksft_print_msg("Parent is %d, child is %d\n", getpid(), child);
697
698 /* Attach to the child */
699 while (1) {
700 int sig;
701
702 pid = wait(&status);
703 if (pid == -1) {
704 perror("wait");
705 goto error;
706 }
707
708 /*
709 * This should never happen but it's hard to flag in
710 * the framework.
711 */
712 if (pid != child)
713 continue;
714
715 if (WIFEXITED(status) || WIFSIGNALED(status))
716 ksft_exit_fail_msg("Child died unexpectedly\n");
717
718 if (!WIFSTOPPED(status))
719 goto error;
720
721 sig = WSTOPSIG(status);
722
723 if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &si)) {
724 if (errno == ESRCH)
725 goto disappeared;
726
727 if (errno == EINVAL) {
728 sig = 0; /* bust group-stop */
729 goto cont;
730 }
731
732 ksft_test_result_fail("PTRACE_GETSIGINFO: %s\n",
733 strerror(errno));
734 goto error;
735 }
736
737 if (sig == SIGSTOP && si.si_code == SI_TKILL &&
738 si.si_pid == pid)
739 break;
740
741 cont:
742 if (ptrace(PTRACE_CONT, pid, NULL, sig)) {
743 if (errno == ESRCH)
744 goto disappeared;
745
746 ksft_test_result_fail("PTRACE_CONT: %s\n",
747 strerror(errno));
748 goto error;
749 }
750 }
751
752 for (i = 0; i < ARRAY_SIZE(vec_types); i++) {
753 /*
754 * If the vector type isn't supported reads and writes
755 * should fail.
756 */
757 if (!(getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap)) {
758 read_fails(child, &vec_types[i]);
759 write_fails(child, &vec_types[i]);
760 } else {
761 ksft_test_result_skip("%s unsupported read fails\n",
762 vec_types[i].name);
763 ksft_test_result_skip("%s unsupported write fails\n",
764 vec_types[i].name);
765 }
766
767 /* FPSIMD via SVE regset */
768 if (getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap) {
769 ptrace_sve_fpsimd(child, &vec_types[i]);
770 } else {
771 ksft_test_result_skip("%s FPSIMD set via SVE\n",
772 vec_types[i].name);
773 ksft_test_result_skip("%s FPSIMD read\n",
774 vec_types[i].name);
775 }
776
777 /* prctl() flags */
778 if (getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap) {
779 ptrace_set_get_inherit(child, &vec_types[i]);
780 } else {
781 ksft_test_result_skip("%s SVE_PT_VL_INHERIT set\n",
782 vec_types[i].name);
783 ksft_test_result_skip("%s SVE_PT_VL_INHERIT cleared\n",
784 vec_types[i].name);
785 }
786
787 /* Setting out of bounds VLs should fail */
788 if (getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap) {
789 ptrace_set_vl_ranges(child, &vec_types[i]);
790 } else {
791 ksft_test_result_skip("%s Set invalid VL 0\n",
792 vec_types[i].name);
793 ksft_test_result_skip("%s Set invalid VL %d\n",
794 vec_types[i].name,
795 SVE_VL_MAX + SVE_VQ_BYTES);
796 }
797
798 /* Step through every possible VQ */
799 for (vq = SVE_VQ_MIN; vq <= TEST_VQ_MAX; vq++) {
800 vl = sve_vl_from_vq(vq);
801
802 /* First, try to set this vector length */
803 if (getauxval(vec_types[i].hwcap_type) &
804 vec_types[i].hwcap) {
805 ptrace_set_get_vl(child, &vec_types[i], vl,
806 &vl_supported);
807 } else {
808 ksft_test_result_skip("%s get/set VL %d\n",
809 vec_types[i].name, vl);
810 vl_supported = false;
811 }
812
813 /* If the VL is supported validate data set/get */
814 if (vl_supported) {
815 ptrace_set_sve_get_sve_data(child, &vec_types[i], vl);
816 ptrace_set_sve_get_fpsimd_data(child, &vec_types[i], vl);
817 ptrace_set_fpsimd_get_sve_data(child, &vec_types[i], vl);
818 } else {
819 ksft_test_result_skip("%s set SVE get SVE for VL %d\n",
820 vec_types[i].name, vl);
821 ksft_test_result_skip("%s set SVE get FPSIMD for VL %d\n",
822 vec_types[i].name, vl);
823 ksft_test_result_skip("%s set FPSIMD get SVE for VL %d\n",
824 vec_types[i].name, vl);
825 }
826 }
827 }
828
829 ret = EXIT_SUCCESS;
830
831 error:
832 kill(child, SIGKILL);
833
834 disappeared:
835 return ret;
836 }
837
main(void)838 int main(void)
839 {
840 int ret = EXIT_SUCCESS;
841 pid_t child;
842
843 srandom(getpid());
844
845 ksft_print_header();
846 ksft_set_plan(EXPECTED_TESTS);
847
848 child = fork();
849 if (!child)
850 return do_child();
851
852 if (do_parent(child))
853 ret = EXIT_FAILURE;
854
855 ksft_print_cnts();
856
857 return ret;
858 }
859