1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2015-2021 ARM Limited.
4 * Original author: Dave Martin <Dave.Martin@arm.com>
5 */
6 #include <errno.h>
7 #include <stdbool.h>
8 #include <stddef.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <unistd.h>
13 #include <sys/auxv.h>
14 #include <sys/prctl.h>
15 #include <sys/ptrace.h>
16 #include <sys/types.h>
17 #include <sys/uio.h>
18 #include <sys/wait.h>
19 #include <asm/sigcontext.h>
20 #include <asm/ptrace.h>
21
22 #include "kselftest.h"
23
24 /* <linux/elf.h> and <sys/auxv.h> don't like each other, so: */
25 #ifndef NT_ARM_SVE
26 #define NT_ARM_SVE 0x405
27 #endif
28
29 #ifndef NT_ARM_SSVE
30 #define NT_ARM_SSVE 0x40b
31 #endif
32
33 /*
34 * The architecture defines the maximum VQ as 16 but for extensibility
35 * the kernel specifies the SVE_VQ_MAX as 512 resulting in us running
36 * a *lot* more tests than are useful if we use it. Until the
37 * architecture is extended let's limit our coverage to what is
38 * currently allowed, plus one extra to ensure we cover constraining
39 * the VL as expected.
40 */
41 #define TEST_VQ_MAX 17
42
43 struct vec_type {
44 const char *name;
45 unsigned long hwcap_type;
46 unsigned long hwcap;
47 int regset;
48 int prctl_set;
49 };
50
51 static const struct vec_type vec_types[] = {
52 {
53 .name = "SVE",
54 .hwcap_type = AT_HWCAP,
55 .hwcap = HWCAP_SVE,
56 .regset = NT_ARM_SVE,
57 .prctl_set = PR_SVE_SET_VL,
58 },
59 {
60 .name = "Streaming SVE",
61 .hwcap_type = AT_HWCAP2,
62 .hwcap = HWCAP2_SME,
63 .regset = NT_ARM_SSVE,
64 .prctl_set = PR_SME_SET_VL,
65 },
66 };
67
68 #define VL_TESTS (((TEST_VQ_MAX - SVE_VQ_MIN) + 1) * 4)
69 #define FLAG_TESTS 4
70 #define FPSIMD_TESTS 2
71
72 #define EXPECTED_TESTS ((VL_TESTS + FLAG_TESTS + FPSIMD_TESTS) * ARRAY_SIZE(vec_types))
73
fill_buf(char * buf,size_t size)74 static void fill_buf(char *buf, size_t size)
75 {
76 int i;
77
78 for (i = 0; i < size; i++)
79 buf[i] = random();
80 }
81
do_child(void)82 static int do_child(void)
83 {
84 if (ptrace(PTRACE_TRACEME, -1, NULL, NULL))
85 ksft_exit_fail_msg("ptrace(PTRACE_TRACEME) failed: %s (%d)\n",
86 strerror(errno), errno);
87
88 if (raise(SIGSTOP))
89 ksft_exit_fail_msg("raise(SIGSTOP) failed: %s (%d)\n",
90 strerror(errno), errno);
91
92 return EXIT_SUCCESS;
93 }
94
get_fpsimd(pid_t pid,struct user_fpsimd_state * fpsimd)95 static int get_fpsimd(pid_t pid, struct user_fpsimd_state *fpsimd)
96 {
97 struct iovec iov;
98 int ret;
99
100 iov.iov_base = fpsimd;
101 iov.iov_len = sizeof(*fpsimd);
102 ret = ptrace(PTRACE_GETREGSET, pid, NT_PRFPREG, &iov);
103 if (ret == -1)
104 ksft_perror("ptrace(PTRACE_GETREGSET)");
105 return ret;
106 }
107
set_fpsimd(pid_t pid,struct user_fpsimd_state * fpsimd)108 static int set_fpsimd(pid_t pid, struct user_fpsimd_state *fpsimd)
109 {
110 struct iovec iov;
111 int ret;
112
113 iov.iov_base = fpsimd;
114 iov.iov_len = sizeof(*fpsimd);
115 ret = ptrace(PTRACE_SETREGSET, pid, NT_PRFPREG, &iov);
116 if (ret == -1)
117 ksft_perror("ptrace(PTRACE_SETREGSET)");
118 return ret;
119 }
120
get_sve(pid_t pid,const struct vec_type * type,void ** buf,size_t * size)121 static struct user_sve_header *get_sve(pid_t pid, const struct vec_type *type,
122 void **buf, size_t *size)
123 {
124 struct user_sve_header *sve;
125 void *p;
126 size_t sz = sizeof(*sve);
127 struct iovec iov;
128 int ret;
129
130 while (1) {
131 if (*size < sz) {
132 p = realloc(*buf, sz);
133 if (!p) {
134 errno = ENOMEM;
135 goto error;
136 }
137
138 *buf = p;
139 *size = sz;
140 }
141
142 iov.iov_base = *buf;
143 iov.iov_len = sz;
144 ret = ptrace(PTRACE_GETREGSET, pid, type->regset, &iov);
145 if (ret) {
146 ksft_perror("ptrace(PTRACE_GETREGSET)");
147 goto error;
148 }
149
150 sve = *buf;
151 if (sve->size <= sz)
152 break;
153
154 sz = sve->size;
155 }
156
157 return sve;
158
159 error:
160 return NULL;
161 }
162
set_sve(pid_t pid,const struct vec_type * type,const struct user_sve_header * sve)163 static int set_sve(pid_t pid, const struct vec_type *type,
164 const struct user_sve_header *sve)
165 {
166 struct iovec iov;
167 int ret;
168
169 iov.iov_base = (void *)sve;
170 iov.iov_len = sve->size;
171 ret = ptrace(PTRACE_SETREGSET, pid, type->regset, &iov);
172 if (ret == -1)
173 ksft_perror("ptrace(PTRACE_SETREGSET)");
174 return ret;
175 }
176
177 /* A read operation fails */
read_fails(pid_t child,const struct vec_type * type)178 static void read_fails(pid_t child, const struct vec_type *type)
179 {
180 struct user_sve_header *new_sve = NULL;
181 size_t new_sve_size = 0;
182 void *ret;
183
184 ret = get_sve(child, type, (void **)&new_sve, &new_sve_size);
185
186 ksft_test_result(ret == NULL, "%s unsupported read fails\n",
187 type->name);
188
189 free(new_sve);
190 }
191
192 /* A write operation fails */
write_fails(pid_t child,const struct vec_type * type)193 static void write_fails(pid_t child, const struct vec_type *type)
194 {
195 struct user_sve_header sve;
196 int ret;
197
198 /* Just the header, no data */
199 memset(&sve, 0, sizeof(sve));
200 sve.size = sizeof(sve);
201 sve.flags = SVE_PT_REGS_SVE;
202 sve.vl = SVE_VL_MIN;
203 ret = set_sve(child, type, &sve);
204
205 ksft_test_result(ret != 0, "%s unsupported write fails\n",
206 type->name);
207 }
208
209 /* Validate setting and getting the inherit flag */
ptrace_set_get_inherit(pid_t child,const struct vec_type * type)210 static void ptrace_set_get_inherit(pid_t child, const struct vec_type *type)
211 {
212 struct user_sve_header sve;
213 struct user_sve_header *new_sve = NULL;
214 size_t new_sve_size = 0;
215 int ret;
216
217 /* First set the flag */
218 memset(&sve, 0, sizeof(sve));
219 sve.size = sizeof(sve);
220 sve.vl = sve_vl_from_vq(SVE_VQ_MIN);
221 sve.flags = SVE_PT_VL_INHERIT | SVE_PT_REGS_SVE;
222 ret = set_sve(child, type, &sve);
223 if (ret != 0) {
224 ksft_test_result_fail("Failed to set %s SVE_PT_VL_INHERIT\n",
225 type->name);
226 return;
227 }
228
229 /*
230 * Read back the new register state and verify that we have
231 * set the flags we expected.
232 */
233 if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) {
234 ksft_test_result_fail("Failed to read %s SVE flags\n",
235 type->name);
236 return;
237 }
238
239 ksft_test_result(new_sve->flags & SVE_PT_VL_INHERIT,
240 "%s SVE_PT_VL_INHERIT set\n", type->name);
241
242 /* Now clear */
243 sve.flags &= ~SVE_PT_VL_INHERIT;
244 ret = set_sve(child, type, &sve);
245 if (ret != 0) {
246 ksft_test_result_fail("Failed to clear %s SVE_PT_VL_INHERIT\n",
247 type->name);
248 return;
249 }
250
251 if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) {
252 ksft_test_result_fail("Failed to read %s SVE flags\n",
253 type->name);
254 return;
255 }
256
257 ksft_test_result(!(new_sve->flags & SVE_PT_VL_INHERIT),
258 "%s SVE_PT_VL_INHERIT cleared\n", type->name);
259
260 free(new_sve);
261 }
262
263 /* Validate attempting to set the specfied VL via ptrace */
ptrace_set_get_vl(pid_t child,const struct vec_type * type,unsigned int vl,bool * supported)264 static void ptrace_set_get_vl(pid_t child, const struct vec_type *type,
265 unsigned int vl, bool *supported)
266 {
267 struct user_sve_header sve;
268 struct user_sve_header *new_sve = NULL;
269 size_t new_sve_size = 0;
270 int ret, prctl_vl;
271
272 *supported = false;
273
274 /* Check if the VL is supported in this process */
275 prctl_vl = prctl(type->prctl_set, vl);
276 if (prctl_vl == -1)
277 ksft_exit_fail_msg("prctl(PR_%s_SET_VL) failed: %s (%d)\n",
278 type->name, strerror(errno), errno);
279
280 /* If the VL is not supported then a supported VL will be returned */
281 *supported = (prctl_vl == vl);
282
283 /* Set the VL by doing a set with no register payload */
284 memset(&sve, 0, sizeof(sve));
285 sve.size = sizeof(sve);
286 sve.flags = SVE_PT_REGS_SVE;
287 sve.vl = vl;
288 ret = set_sve(child, type, &sve);
289 if (ret != 0) {
290 ksft_test_result_fail("Failed to set %s VL %u\n",
291 type->name, vl);
292 return;
293 }
294
295 /*
296 * Read back the new register state and verify that we have the
297 * same VL that we got from prctl() on ourselves.
298 */
299 if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) {
300 ksft_test_result_fail("Failed to read %s VL %u\n",
301 type->name, vl);
302 return;
303 }
304
305 ksft_test_result(new_sve->vl == prctl_vl, "Set %s VL %u\n",
306 type->name, vl);
307
308 free(new_sve);
309 }
310
check_u32(unsigned int vl,const char * reg,uint32_t * in,uint32_t * out,int * errors)311 static void check_u32(unsigned int vl, const char *reg,
312 uint32_t *in, uint32_t *out, int *errors)
313 {
314 if (*in != *out) {
315 printf("# VL %d %s wrote %x read %x\n",
316 vl, reg, *in, *out);
317 (*errors)++;
318 }
319 }
320
321 /* Set out of range VLs */
ptrace_set_vl_ranges(pid_t child,const struct vec_type * type)322 static void ptrace_set_vl_ranges(pid_t child, const struct vec_type *type)
323 {
324 struct user_sve_header sve;
325 int ret;
326
327 memset(&sve, 0, sizeof(sve));
328 sve.flags = SVE_PT_REGS_SVE;
329 sve.size = sizeof(sve);
330
331 ret = set_sve(child, type, &sve);
332 ksft_test_result(ret != 0, "%s Set invalid VL 0\n", type->name);
333
334 sve.vl = SVE_VL_MAX + SVE_VQ_BYTES;
335 ret = set_sve(child, type, &sve);
336 ksft_test_result(ret != 0, "%s Set invalid VL %d\n", type->name,
337 SVE_VL_MAX + SVE_VQ_BYTES);
338 }
339
340 /* Access the FPSIMD registers via the SVE regset */
ptrace_sve_fpsimd(pid_t child,const struct vec_type * type)341 static void ptrace_sve_fpsimd(pid_t child, const struct vec_type *type)
342 {
343 void *svebuf;
344 struct user_sve_header *sve;
345 struct user_fpsimd_state *fpsimd, new_fpsimd;
346 unsigned int i, j;
347 unsigned char *p;
348 int ret;
349
350 svebuf = malloc(SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD));
351 if (!svebuf) {
352 ksft_test_result_fail("Failed to allocate FPSIMD buffer\n");
353 return;
354 }
355
356 memset(svebuf, 0, SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD));
357 sve = svebuf;
358 sve->flags = SVE_PT_REGS_FPSIMD;
359 sve->size = SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD);
360 sve->vl = 16; /* We don't care what the VL is */
361
362 /* Try to set a known FPSIMD state via PT_REGS_SVE */
363 fpsimd = (struct user_fpsimd_state *)((char *)sve +
364 SVE_PT_FPSIMD_OFFSET);
365 for (i = 0; i < 32; ++i) {
366 p = (unsigned char *)&fpsimd->vregs[i];
367
368 for (j = 0; j < sizeof(fpsimd->vregs[i]); ++j)
369 p[j] = j;
370 }
371
372 /* This should only succeed for SVE */
373 ret = set_sve(child, type, sve);
374 ksft_test_result((type->regset == NT_ARM_SVE) == (ret == 0),
375 "%s FPSIMD set via SVE: %d\n",
376 type->name, ret);
377 if (ret)
378 goto out;
379
380 /* Verify via the FPSIMD regset */
381 if (get_fpsimd(child, &new_fpsimd)) {
382 ksft_test_result_fail("get_fpsimd(): %s\n",
383 strerror(errno));
384 goto out;
385 }
386 if (memcmp(fpsimd, &new_fpsimd, sizeof(*fpsimd)) == 0)
387 ksft_test_result_pass("%s get_fpsimd() gave same state\n",
388 type->name);
389 else
390 ksft_test_result_fail("%s get_fpsimd() gave different state\n",
391 type->name);
392
393 out:
394 free(svebuf);
395 }
396
397 /* Write the FPSIMD registers via the SVE regset when SVE is not supported */
ptrace_sve_fpsimd_no_sve(pid_t child)398 static void ptrace_sve_fpsimd_no_sve(pid_t child)
399 {
400 void *svebuf;
401 struct user_sve_header *sve;
402 struct user_fpsimd_state *fpsimd, new_fpsimd;
403 unsigned int i, j;
404 unsigned char *p;
405 int ret;
406
407 svebuf = malloc(SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD));
408 if (!svebuf) {
409 ksft_test_result_fail("Failed to allocate FPSIMD buffer\n");
410 return;
411 }
412
413 /* On a system without SVE the VL should be set to 0 */
414 memset(svebuf, 0, SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD));
415 sve = svebuf;
416 sve->flags = SVE_PT_REGS_FPSIMD;
417 sve->size = SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD);
418 sve->vl = 0;
419
420 /* Try to set a known FPSIMD state via PT_REGS_SVE */
421 fpsimd = (struct user_fpsimd_state *)((char *)sve +
422 SVE_PT_FPSIMD_OFFSET);
423 for (i = 0; i < 32; ++i) {
424 p = (unsigned char *)&fpsimd->vregs[i];
425
426 for (j = 0; j < sizeof(fpsimd->vregs[i]); ++j)
427 p[j] = j;
428 }
429
430 ret = set_sve(child, &vec_types[0], sve);
431 ksft_test_result(ret == 0, "FPSIMD write via SVE\n");
432 if (ret) {
433 ksft_test_result_skip("Verify FPSIMD write via SVE\n");
434 goto out;
435 }
436
437 /* Verify via the FPSIMD regset */
438 if (get_fpsimd(child, &new_fpsimd)) {
439 ksft_test_result_skip("Verify FPSIMD write via SVE\n");
440 goto out;
441 }
442 ksft_test_result(memcmp(fpsimd, &new_fpsimd, sizeof(*fpsimd)) == 0,
443 "Verify FPSIMD write via SVE\n");
444
445 out:
446 free(svebuf);
447 }
448
449 /* Validate attempting to set SVE data and read SVE data */
ptrace_set_sve_get_sve_data(pid_t child,const struct vec_type * type,unsigned int vl)450 static void ptrace_set_sve_get_sve_data(pid_t child,
451 const struct vec_type *type,
452 unsigned int vl)
453 {
454 void *write_buf;
455 void *read_buf = NULL;
456 struct user_sve_header *write_sve;
457 struct user_sve_header *read_sve;
458 size_t read_sve_size = 0;
459 unsigned int vq = sve_vq_from_vl(vl);
460 int ret, i;
461 size_t data_size;
462 int errors = 0;
463
464 data_size = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE);
465 write_buf = malloc(data_size);
466 if (!write_buf) {
467 ksft_test_result_fail("Error allocating %ld byte buffer for %s VL %u\n",
468 data_size, type->name, vl);
469 return;
470 }
471 write_sve = write_buf;
472
473 /* Set up some data and write it out */
474 memset(write_sve, 0, data_size);
475 write_sve->size = data_size;
476 write_sve->vl = vl;
477 write_sve->flags = SVE_PT_REGS_SVE;
478
479 for (i = 0; i < __SVE_NUM_ZREGS; i++)
480 fill_buf(write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i),
481 SVE_PT_SVE_ZREG_SIZE(vq));
482
483 for (i = 0; i < __SVE_NUM_PREGS; i++)
484 fill_buf(write_buf + SVE_PT_SVE_PREG_OFFSET(vq, i),
485 SVE_PT_SVE_PREG_SIZE(vq));
486
487 fill_buf(write_buf + SVE_PT_SVE_FPSR_OFFSET(vq), SVE_PT_SVE_FPSR_SIZE);
488 fill_buf(write_buf + SVE_PT_SVE_FPCR_OFFSET(vq), SVE_PT_SVE_FPCR_SIZE);
489
490 /* TODO: Generate a valid FFR pattern */
491
492 ret = set_sve(child, type, write_sve);
493 if (ret != 0) {
494 ksft_test_result_fail("Failed to set %s VL %u data\n",
495 type->name, vl);
496 goto out;
497 }
498
499 /* Read the data back */
500 if (!get_sve(child, type, (void **)&read_buf, &read_sve_size)) {
501 ksft_test_result_fail("Failed to read %s VL %u data\n",
502 type->name, vl);
503 goto out;
504 }
505 read_sve = read_buf;
506
507 /* We might read more data if there's extensions we don't know */
508 if (read_sve->size < write_sve->size) {
509 ksft_test_result_fail("%s wrote %d bytes, only read %d\n",
510 type->name, write_sve->size,
511 read_sve->size);
512 goto out_read;
513 }
514
515 for (i = 0; i < __SVE_NUM_ZREGS; i++) {
516 if (memcmp(write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i),
517 read_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i),
518 SVE_PT_SVE_ZREG_SIZE(vq)) != 0) {
519 printf("# Mismatch in %u Z%d\n", vl, i);
520 errors++;
521 }
522 }
523
524 for (i = 0; i < __SVE_NUM_PREGS; i++) {
525 if (memcmp(write_buf + SVE_PT_SVE_PREG_OFFSET(vq, i),
526 read_buf + SVE_PT_SVE_PREG_OFFSET(vq, i),
527 SVE_PT_SVE_PREG_SIZE(vq)) != 0) {
528 printf("# Mismatch in %u P%d\n", vl, i);
529 errors++;
530 }
531 }
532
533 check_u32(vl, "FPSR", write_buf + SVE_PT_SVE_FPSR_OFFSET(vq),
534 read_buf + SVE_PT_SVE_FPSR_OFFSET(vq), &errors);
535 check_u32(vl, "FPCR", write_buf + SVE_PT_SVE_FPCR_OFFSET(vq),
536 read_buf + SVE_PT_SVE_FPCR_OFFSET(vq), &errors);
537
538 ksft_test_result(errors == 0, "Set and get %s data for VL %u\n",
539 type->name, vl);
540
541 out_read:
542 free(read_buf);
543 out:
544 free(write_buf);
545 }
546
547 /* Validate attempting to set SVE data and read it via the FPSIMD regset */
ptrace_set_sve_get_fpsimd_data(pid_t child,const struct vec_type * type,unsigned int vl)548 static void ptrace_set_sve_get_fpsimd_data(pid_t child,
549 const struct vec_type *type,
550 unsigned int vl)
551 {
552 void *write_buf;
553 struct user_sve_header *write_sve;
554 unsigned int vq = sve_vq_from_vl(vl);
555 struct user_fpsimd_state fpsimd_state;
556 int ret, i;
557 size_t data_size;
558 int errors = 0;
559
560 if (__BYTE_ORDER == __BIG_ENDIAN) {
561 ksft_test_result_skip("Big endian not supported\n");
562 return;
563 }
564
565 data_size = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE);
566 write_buf = malloc(data_size);
567 if (!write_buf) {
568 ksft_test_result_fail("Error allocating %ld byte buffer for %s VL %u\n",
569 data_size, type->name, vl);
570 return;
571 }
572 write_sve = write_buf;
573
574 /* Set up some data and write it out */
575 memset(write_sve, 0, data_size);
576 write_sve->size = data_size;
577 write_sve->vl = vl;
578 write_sve->flags = SVE_PT_REGS_SVE;
579
580 for (i = 0; i < __SVE_NUM_ZREGS; i++)
581 fill_buf(write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i),
582 SVE_PT_SVE_ZREG_SIZE(vq));
583
584 fill_buf(write_buf + SVE_PT_SVE_FPSR_OFFSET(vq), SVE_PT_SVE_FPSR_SIZE);
585 fill_buf(write_buf + SVE_PT_SVE_FPCR_OFFSET(vq), SVE_PT_SVE_FPCR_SIZE);
586
587 ret = set_sve(child, type, write_sve);
588 if (ret != 0) {
589 ksft_test_result_fail("Failed to set %s VL %u data\n",
590 type->name, vl);
591 goto out;
592 }
593
594 /* Read the data back */
595 if (get_fpsimd(child, &fpsimd_state)) {
596 ksft_test_result_fail("Failed to read %s VL %u FPSIMD data\n",
597 type->name, vl);
598 goto out;
599 }
600
601 for (i = 0; i < __SVE_NUM_ZREGS; i++) {
602 __uint128_t tmp = 0;
603
604 /*
605 * Z regs are stored endianness invariant, this won't
606 * work for big endian
607 */
608 memcpy(&tmp, write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i),
609 sizeof(tmp));
610
611 if (tmp != fpsimd_state.vregs[i]) {
612 printf("# Mismatch in FPSIMD for %s VL %u Z%d\n",
613 type->name, vl, i);
614 errors++;
615 }
616 }
617
618 check_u32(vl, "FPSR", write_buf + SVE_PT_SVE_FPSR_OFFSET(vq),
619 &fpsimd_state.fpsr, &errors);
620 check_u32(vl, "FPCR", write_buf + SVE_PT_SVE_FPCR_OFFSET(vq),
621 &fpsimd_state.fpcr, &errors);
622
623 ksft_test_result(errors == 0, "Set and get FPSIMD data for %s VL %u\n",
624 type->name, vl);
625
626 out:
627 free(write_buf);
628 }
629
630 /* Validate attempting to set FPSIMD data and read it via the SVE regset */
ptrace_set_fpsimd_get_sve_data(pid_t child,const struct vec_type * type,unsigned int vl)631 static void ptrace_set_fpsimd_get_sve_data(pid_t child,
632 const struct vec_type *type,
633 unsigned int vl)
634 {
635 void *read_buf = NULL;
636 unsigned char *p;
637 struct user_sve_header *read_sve;
638 unsigned int vq = sve_vq_from_vl(vl);
639 struct user_fpsimd_state write_fpsimd;
640 int ret, i, j;
641 size_t read_sve_size = 0;
642 size_t expected_size;
643 int errors = 0;
644
645 if (__BYTE_ORDER == __BIG_ENDIAN) {
646 ksft_test_result_skip("Big endian not supported\n");
647 return;
648 }
649
650 for (i = 0; i < 32; ++i) {
651 p = (unsigned char *)&write_fpsimd.vregs[i];
652
653 for (j = 0; j < sizeof(write_fpsimd.vregs[i]); ++j)
654 p[j] = j;
655 }
656
657 ret = set_fpsimd(child, &write_fpsimd);
658 if (ret != 0) {
659 ksft_test_result_fail("Failed to set FPSIMD state: %d\n)",
660 ret);
661 return;
662 }
663
664 if (!get_sve(child, type, (void **)&read_buf, &read_sve_size)) {
665 ksft_test_result_fail("Failed to read %s VL %u data\n",
666 type->name, vl);
667 return;
668 }
669 read_sve = read_buf;
670
671 if (read_sve->vl != vl) {
672 ksft_test_result_fail("Child VL != expected VL: %u != %u\n",
673 read_sve->vl, vl);
674 goto out;
675 }
676
677 /* The kernel may return either SVE or FPSIMD format */
678 switch (read_sve->flags & SVE_PT_REGS_MASK) {
679 case SVE_PT_REGS_FPSIMD:
680 expected_size = SVE_PT_FPSIMD_SIZE(vq, SVE_PT_REGS_FPSIMD);
681 if (read_sve_size < expected_size) {
682 ksft_test_result_fail("Read %ld bytes, expected %ld\n",
683 read_sve_size, expected_size);
684 goto out;
685 }
686
687 ret = memcmp(&write_fpsimd, read_buf + SVE_PT_FPSIMD_OFFSET,
688 sizeof(write_fpsimd));
689 if (ret != 0) {
690 ksft_print_msg("Read FPSIMD data mismatch\n");
691 errors++;
692 }
693 break;
694
695 case SVE_PT_REGS_SVE:
696 expected_size = SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE);
697 if (read_sve_size < expected_size) {
698 ksft_test_result_fail("Read %ld bytes, expected %ld\n",
699 read_sve_size, expected_size);
700 goto out;
701 }
702
703 for (i = 0; i < __SVE_NUM_ZREGS; i++) {
704 __uint128_t tmp = 0;
705
706 /*
707 * Z regs are stored endianness invariant, this won't
708 * work for big endian
709 */
710 memcpy(&tmp, read_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i),
711 sizeof(tmp));
712
713 if (tmp != write_fpsimd.vregs[i]) {
714 ksft_print_msg("Mismatch in FPSIMD for %s VL %u Z%d/V%d\n",
715 type->name, vl, i, i);
716 errors++;
717 }
718 }
719
720 check_u32(vl, "FPSR", &write_fpsimd.fpsr,
721 read_buf + SVE_PT_SVE_FPSR_OFFSET(vq), &errors);
722 check_u32(vl, "FPCR", &write_fpsimd.fpcr,
723 read_buf + SVE_PT_SVE_FPCR_OFFSET(vq), &errors);
724 break;
725 default:
726 ksft_print_msg("Unexpected regs type %d\n",
727 read_sve->flags & SVE_PT_REGS_MASK);
728 errors++;
729 break;
730 }
731
732 ksft_test_result(errors == 0, "Set FPSIMD, read via SVE for %s VL %u\n",
733 type->name, vl);
734
735 out:
736 free(read_buf);
737 }
738
do_parent(pid_t child)739 static int do_parent(pid_t child)
740 {
741 int ret = EXIT_FAILURE;
742 pid_t pid;
743 int status, i;
744 siginfo_t si;
745 unsigned int vq, vl;
746 bool vl_supported;
747
748 ksft_print_msg("Parent is %d, child is %d\n", getpid(), child);
749
750 /* Attach to the child */
751 while (1) {
752 int sig;
753
754 pid = wait(&status);
755 if (pid == -1) {
756 perror("wait");
757 goto error;
758 }
759
760 /*
761 * This should never happen but it's hard to flag in
762 * the framework.
763 */
764 if (pid != child)
765 continue;
766
767 if (WIFEXITED(status) || WIFSIGNALED(status))
768 ksft_exit_fail_msg("Child died unexpectedly\n");
769
770 if (!WIFSTOPPED(status))
771 goto error;
772
773 sig = WSTOPSIG(status);
774
775 if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &si)) {
776 if (errno == ESRCH)
777 goto disappeared;
778
779 if (errno == EINVAL) {
780 sig = 0; /* bust group-stop */
781 goto cont;
782 }
783
784 ksft_test_result_fail("PTRACE_GETSIGINFO: %s\n",
785 strerror(errno));
786 goto error;
787 }
788
789 if (sig == SIGSTOP && si.si_code == SI_TKILL &&
790 si.si_pid == pid)
791 break;
792
793 cont:
794 if (ptrace(PTRACE_CONT, pid, NULL, sig)) {
795 if (errno == ESRCH)
796 goto disappeared;
797
798 ksft_test_result_fail("PTRACE_CONT: %s\n",
799 strerror(errno));
800 goto error;
801 }
802 }
803
804 for (i = 0; i < ARRAY_SIZE(vec_types); i++) {
805 /*
806 * If the vector type isn't supported reads and writes
807 * should fail.
808 */
809 if (!(getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap)) {
810 read_fails(child, &vec_types[i]);
811 write_fails(child, &vec_types[i]);
812 } else {
813 ksft_test_result_skip("%s unsupported read fails\n",
814 vec_types[i].name);
815 ksft_test_result_skip("%s unsupported write fails\n",
816 vec_types[i].name);
817 }
818
819 /* FPSIMD via SVE regset */
820 if (getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap) {
821 ptrace_sve_fpsimd(child, &vec_types[i]);
822 } else {
823 ksft_test_result_skip("%s FPSIMD set via SVE\n",
824 vec_types[i].name);
825 ksft_test_result_skip("%s FPSIMD read\n",
826 vec_types[i].name);
827 }
828
829 /* prctl() flags */
830 if (getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap) {
831 ptrace_set_get_inherit(child, &vec_types[i]);
832 } else {
833 ksft_test_result_skip("%s SVE_PT_VL_INHERIT set\n",
834 vec_types[i].name);
835 ksft_test_result_skip("%s SVE_PT_VL_INHERIT cleared\n",
836 vec_types[i].name);
837 }
838
839 /* Setting out of bounds VLs should fail */
840 if (getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap) {
841 ptrace_set_vl_ranges(child, &vec_types[i]);
842 } else {
843 ksft_test_result_skip("%s Set invalid VL 0\n",
844 vec_types[i].name);
845 ksft_test_result_skip("%s Set invalid VL %d\n",
846 vec_types[i].name,
847 SVE_VL_MAX + SVE_VQ_BYTES);
848 }
849
850 /* Step through every possible VQ */
851 for (vq = SVE_VQ_MIN; vq <= TEST_VQ_MAX; vq++) {
852 vl = sve_vl_from_vq(vq);
853
854 /* First, try to set this vector length */
855 if (getauxval(vec_types[i].hwcap_type) &
856 vec_types[i].hwcap) {
857 ptrace_set_get_vl(child, &vec_types[i], vl,
858 &vl_supported);
859 } else {
860 ksft_test_result_skip("%s get/set VL %d\n",
861 vec_types[i].name, vl);
862 vl_supported = false;
863 }
864
865 /* If the VL is supported validate data set/get */
866 if (vl_supported) {
867 ptrace_set_sve_get_sve_data(child, &vec_types[i], vl);
868 ptrace_set_sve_get_fpsimd_data(child, &vec_types[i], vl);
869 ptrace_set_fpsimd_get_sve_data(child, &vec_types[i], vl);
870 } else {
871 ksft_test_result_skip("%s set SVE get SVE for VL %d\n",
872 vec_types[i].name, vl);
873 ksft_test_result_skip("%s set SVE get FPSIMD for VL %d\n",
874 vec_types[i].name, vl);
875 ksft_test_result_skip("%s set FPSIMD get SVE for VL %d\n",
876 vec_types[i].name, vl);
877 }
878 }
879 }
880
881 /* We support SVE writes of FPSMID format on SME only systems */
882 if (!(getauxval(AT_HWCAP) & HWCAP_SVE) &&
883 (getauxval(AT_HWCAP2) & HWCAP2_SME)) {
884 ptrace_sve_fpsimd_no_sve(child);
885 } else {
886 ksft_test_result_skip("FPSIMD write via SVE\n");
887 ksft_test_result_skip("Verify FPSIMD write via SVE\n");
888 }
889
890 ret = EXIT_SUCCESS;
891
892 error:
893 kill(child, SIGKILL);
894
895 disappeared:
896 return ret;
897 }
898
main(void)899 int main(void)
900 {
901 int ret = EXIT_SUCCESS;
902 pid_t child;
903
904 srandom(getpid());
905
906 ksft_print_header();
907 ksft_set_plan(EXPECTED_TESTS);
908
909 child = fork();
910 if (!child)
911 return do_child();
912
913 if (do_parent(child))
914 ret = EXIT_FAILURE;
915
916 ksft_print_cnts();
917
918 return ret;
919 }
920