xref: /linux/tools/testing/selftests/riscv/vector/validate_v_ptrace.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <sys/ptrace.h>
3 #include <sys/types.h>
4 #include <sys/wait.h>
5 #include <sys/uio.h>
6 #include <unistd.h>
7 #include <errno.h>
8 
9 #include <linux/ptrace.h>
10 #include <linux/elf.h>
11 
12 #include "kselftest_harness.h"
13 #include "v_helpers.h"
14 
15 #define SR_FS_DIRTY	0x00006000UL
16 #define CSR_VXRM_SHIFT	1
17 
18 volatile unsigned long chld_lock;
19 
20 TEST(ptrace_v_not_enabled)
21 {
22 	pid_t pid;
23 
24 	if (!(is_vector_supported() || is_xtheadvector_supported()))
25 		SKIP(return, "Vector not supported");
26 
27 	chld_lock = 1;
28 	pid = fork();
29 	ASSERT_LE(0, pid)
30 		TH_LOG("fork: %m");
31 
32 	if (pid == 0) {
33 		while (chld_lock == 1)
34 			asm volatile("" : : "g"(chld_lock) : "memory");
35 
36 		asm volatile ("ebreak" : : : );
37 	} else {
38 		struct __riscv_v_regset_state *regset_data;
39 		unsigned long vlenb = get_vr_len();
40 		size_t regset_size;
41 		struct iovec iov;
42 		int status;
43 		int ret;
44 
45 		/* attach */
46 
47 		ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid, NULL, NULL));
48 		ASSERT_EQ(pid, waitpid(pid, &status, 0));
49 		ASSERT_TRUE(WIFSTOPPED(status));
50 
51 		/* unlock */
52 
53 		ASSERT_EQ(0, ptrace(PTRACE_POKEDATA, pid, &chld_lock, 0));
54 
55 		/* resume and wait for ebreak */
56 
57 		ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL));
58 		ASSERT_EQ(pid, waitpid(pid, &status, 0));
59 		ASSERT_TRUE(WIFSTOPPED(status));
60 
61 		/* try to read vector registers from the tracee */
62 
63 		regset_size = sizeof(*regset_data) + vlenb * 32;
64 		regset_data = calloc(1, regset_size);
65 
66 		iov.iov_base = regset_data;
67 		iov.iov_len = regset_size;
68 
69 		/* V extension is available, but not yet enabled for the tracee */
70 
71 		errno = 0;
72 		ret = ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov);
73 		ASSERT_EQ(ENODATA, errno);
74 		ASSERT_EQ(-1, ret);
75 
76 		/* cleanup */
77 
78 		ASSERT_EQ(0, kill(pid, SIGKILL));
79 	}
80 }
81 
82 TEST(ptrace_v_early_debug)
83 {
84 	static volatile unsigned long vstart;
85 	static volatile unsigned long vtype;
86 	static volatile unsigned long vlenb;
87 	static volatile unsigned long vcsr;
88 	static volatile unsigned long vl;
89 	bool xtheadvector;
90 	pid_t pid;
91 
92 	if (!(is_vector_supported() || is_xtheadvector_supported()))
93 		SKIP(return, "Vector not supported");
94 
95 	xtheadvector = is_xtheadvector_supported();
96 
97 	chld_lock = 1;
98 	pid = fork();
99 	ASSERT_LE(0, pid)
100 		TH_LOG("fork: %m");
101 
102 	if (pid == 0) {
103 		unsigned long vxsat, vxrm;
104 
105 		vlenb = get_vr_len();
106 
107 		while (chld_lock == 1)
108 			asm volatile ("" : : "g"(chld_lock) : "memory");
109 
110 		asm volatile (
111 			"csrr %[vstart], vstart\n"
112 			"csrr %[vtype], vtype\n"
113 			"csrr %[vl], vl\n"
114 			: [vtype] "=r"(vtype), [vstart] "=r"(vstart), [vl] "=r"(vl)
115 			:
116 			: "memory");
117 
118 		/* no 'is_xtheadvector_supported()' here to avoid clobbering v-state by syscall */
119 		if (xtheadvector) {
120 			asm volatile (
121 				"csrs sstatus, %[bit]\n"
122 				"csrr %[vxsat], vxsat\n"
123 				"csrr %[vxrm], vxrm\n"
124 				: [vxsat] "=r"(vxsat), [vxrm] "=r"(vxrm)
125 				: [bit] "r" (SR_FS_DIRTY)
126 				: "memory");
127 			vcsr = vxsat | vxrm << CSR_VXRM_SHIFT;
128 		} else {
129 			asm volatile (
130 				"csrr %[vcsr], vcsr\n"
131 				: [vcsr] "=r"(vcsr)
132 				:
133 				: "memory");
134 		}
135 
136 		asm volatile (
137 			".option push\n"
138 			".option norvc\n"
139 			"ebreak\n"
140 			".option pop\n");
141 	} else {
142 		struct __riscv_v_regset_state *regset_data;
143 		unsigned long vstart_csr;
144 		unsigned long vlenb_csr;
145 		unsigned long vtype_csr;
146 		unsigned long vcsr_csr;
147 		unsigned long vl_csr;
148 		size_t regset_size;
149 		struct iovec iov;
150 		int status;
151 
152 		/* attach */
153 
154 		ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid, NULL, NULL));
155 		ASSERT_EQ(pid, waitpid(pid, &status, 0));
156 		ASSERT_TRUE(WIFSTOPPED(status));
157 
158 		/* unlock */
159 
160 		ASSERT_EQ(0, ptrace(PTRACE_POKEDATA, pid, &chld_lock, 0));
161 
162 		/* resume and wait for ebreak */
163 
164 		ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL));
165 		ASSERT_EQ(pid, waitpid(pid, &status, 0));
166 		ASSERT_TRUE(WIFSTOPPED(status));
167 
168 		/* read tracee vector csr regs using ptrace PEEKDATA */
169 
170 		errno = 0;
171 		vstart_csr = ptrace(PTRACE_PEEKDATA, pid, &vstart, NULL);
172 		ASSERT_FALSE((errno != 0) && (vstart_csr == -1));
173 
174 		errno = 0;
175 		vl_csr = ptrace(PTRACE_PEEKDATA, pid, &vl, NULL);
176 		ASSERT_FALSE((errno != 0) && (vl_csr == -1));
177 
178 		errno = 0;
179 		vtype_csr = ptrace(PTRACE_PEEKDATA, pid, &vtype, NULL);
180 		ASSERT_FALSE((errno != 0) && (vtype_csr == -1));
181 
182 		errno = 0;
183 		vcsr_csr = ptrace(PTRACE_PEEKDATA, pid, &vcsr, NULL);
184 		ASSERT_FALSE((errno != 0) && (vcsr_csr == -1));
185 
186 		errno = 0;
187 		vlenb_csr = ptrace(PTRACE_PEEKDATA, pid, &vlenb, NULL);
188 		ASSERT_FALSE((errno != 0) && (vlenb_csr == -1));
189 
190 		/* read tracee csr regs using ptrace GETREGSET */
191 
192 		regset_size = sizeof(*regset_data) + vlenb_csr * 32;
193 		regset_data = calloc(1, regset_size);
194 
195 		iov.iov_base = regset_data;
196 		iov.iov_len = regset_size;
197 
198 		ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov));
199 
200 		/* compare */
201 
202 		EXPECT_EQ(vstart_csr, regset_data->vstart);
203 		EXPECT_EQ(vtype_csr, regset_data->vtype);
204 		EXPECT_EQ(vlenb_csr, regset_data->vlenb);
205 		EXPECT_EQ(vcsr_csr, regset_data->vcsr);
206 		EXPECT_EQ(vl_csr, regset_data->vl);
207 
208 		/* cleanup */
209 
210 		ASSERT_EQ(0, kill(pid, SIGKILL));
211 	}
212 }
213 
214 TEST(ptrace_v_syscall_clobbering)
215 {
216 	pid_t pid;
217 
218 	if (!is_vector_supported() && !is_xtheadvector_supported())
219 		SKIP(return, "Vector not supported");
220 
221 	chld_lock = 1;
222 	pid = fork();
223 	ASSERT_LE(0, pid)
224 		TH_LOG("fork: %m");
225 
226 	if (pid == 0) {
227 		unsigned long vl;
228 
229 		while (chld_lock == 1)
230 			asm volatile("" : : "g"(chld_lock) : "memory");
231 
232 		if (is_xtheadvector_supported()) {
233 			asm volatile (
234 				// 0 | zimm[10:0] | rs1 | 1 1 1 | rd |1010111| vsetvli
235 				// vsetvli	t4, x0, e16, m2, d1
236 				".4byte		0b00000000010100000111111011010111\n"
237 				"mv		%[new_vl], t4\n"
238 				: [new_vl] "=r" (vl) : : "t4");
239 		} else {
240 			asm volatile (
241 				".option push\n"
242 				".option arch, +zve32x\n"
243 				"vsetvli %[new_vl], x0, e16, m2, tu, mu\n"
244 				".option pop\n"
245 				: [new_vl] "=r"(vl) : : );
246 		}
247 
248 		while (1) {
249 			asm volatile (
250 				".option push\n"
251 				".option norvc\n"
252 				"ebreak\n"
253 				".option pop\n");
254 
255 			sleep(0);
256 		}
257 	} else {
258 		struct __riscv_v_regset_state *regset_data;
259 		unsigned long vlenb = get_vr_len();
260 		struct user_regs_struct regs;
261 		size_t regset_size;
262 		struct iovec iov;
263 		int status;
264 
265 		/* attach */
266 
267 		ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid, NULL, NULL));
268 		ASSERT_EQ(pid, waitpid(pid, &status, 0));
269 		ASSERT_TRUE(WIFSTOPPED(status));
270 
271 		/* unlock */
272 
273 		ASSERT_EQ(0, ptrace(PTRACE_POKEDATA, pid, &chld_lock, 0));
274 
275 		/* resume and wait for the 1st ebreak */
276 
277 		ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL));
278 		ASSERT_EQ(pid, waitpid(pid, &status, 0));
279 		ASSERT_TRUE(WIFSTOPPED(status));
280 
281 		/* read tracee vector csr regs using ptrace GETREGSET */
282 
283 		regset_size = sizeof(*regset_data) + vlenb * 32;
284 		regset_data = calloc(1, regset_size);
285 
286 		iov.iov_base = regset_data;
287 		iov.iov_len = regset_size;
288 
289 		ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov));
290 
291 		/* verify initial vsetvli settings */
292 
293 		if (is_xtheadvector_supported())
294 			EXPECT_EQ(5UL, regset_data->vtype);
295 		else
296 			EXPECT_EQ(9UL, regset_data->vtype);
297 
298 		EXPECT_EQ(regset_data->vlenb, regset_data->vl);
299 		EXPECT_EQ(vlenb, regset_data->vlenb);
300 		EXPECT_EQ(0UL, regset_data->vstart);
301 		EXPECT_EQ(0UL, regset_data->vcsr);
302 
303 		/* skip 1st ebreak, then resume and wait for the 2nd ebreak */
304 
305 		iov.iov_base = &regs;
306 		iov.iov_len = sizeof(regs);
307 
308 		ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_PRSTATUS, &iov));
309 		regs.pc += 4;
310 		ASSERT_EQ(0, ptrace(PTRACE_SETREGSET, pid, NT_PRSTATUS, &iov));
311 
312 		ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL));
313 		ASSERT_EQ(pid, waitpid(pid, &status, 0));
314 		ASSERT_TRUE(WIFSTOPPED(status));
315 
316 		/* read tracee vtype using ptrace GETREGSET */
317 
318 		iov.iov_base = regset_data;
319 		iov.iov_len = regset_size;
320 
321 		ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov));
322 
323 		/* verify that V state is illegal after syscall */
324 
325 		EXPECT_EQ((1UL << (__riscv_xlen - 1)), regset_data->vtype);
326 		EXPECT_EQ(vlenb, regset_data->vlenb);
327 		EXPECT_EQ(0UL, regset_data->vstart);
328 		EXPECT_EQ(0UL, regset_data->vcsr);
329 		EXPECT_EQ(0UL, regset_data->vl);
330 
331 		/* cleanup */
332 
333 		ASSERT_EQ(0, kill(pid, SIGKILL));
334 	}
335 }
336 
337 FIXTURE(v_csr_invalid)
338 {
339 };
340 
341 FIXTURE_SETUP(v_csr_invalid)
342 {
343 }
344 
345 FIXTURE_TEARDOWN(v_csr_invalid)
346 {
347 }
348 
349 #define VECTOR_1_0		BIT(0)
350 #define XTHEAD_VECTOR_0_7	BIT(1)
351 
352 #define vector_test(x)		((x) & VECTOR_1_0)
353 #define xthead_test(x)		((x) & XTHEAD_VECTOR_0_7)
354 
355 /* modifications of the initial vsetvli settings */
356 FIXTURE_VARIANT(v_csr_invalid)
357 {
358 	unsigned long vstart;
359 	unsigned long vl;
360 	unsigned long vtype;
361 	unsigned long vcsr;
362 	unsigned long vlenb_mul;
363 	unsigned long vlenb_min;
364 	unsigned long vlenb_max;
365 	unsigned long spec;
366 };
367 
368 /* unexpected vlenb value */
369 FIXTURE_VARIANT_ADD(v_csr_invalid, new_vlenb)
370 {
371 	.vstart = 0x0,
372 	.vl = 0x0,
373 	.vtype = 0x3,
374 	.vcsr = 0x0,
375 	.vlenb_mul = 0x2,
376 	.vlenb_min = 0x0,
377 	.vlenb_max = 0x0,
378 	.spec = VECTOR_1_0 | XTHEAD_VECTOR_0_7,
379 };
380 
381 /* invalid reserved bits in vcsr */
382 FIXTURE_VARIANT_ADD(v_csr_invalid, vcsr_invalid_reserved_bits)
383 {
384 	.vstart = 0x0,
385 	.vl = 0x0,
386 	.vtype = 0x3,
387 	.vcsr = 0x1UL << 8,
388 	.vlenb_mul = 0x1,
389 	.vlenb_min = 0x0,
390 	.vlenb_max = 0x0,
391 	.spec = VECTOR_1_0 | XTHEAD_VECTOR_0_7,
392 };
393 
394 /* invalid reserved bits in vtype */
395 FIXTURE_VARIANT_ADD(v_csr_invalid, vtype_invalid_reserved_bits)
396 {
397 	.vstart = 0x0,
398 	.vl = 0x0,
399 	.vtype = (0x1UL << 8) | 0x3,
400 	.vcsr = 0x0,
401 	.vlenb_mul = 0x1,
402 	.vlenb_min = 0x0,
403 	.vlenb_max = 0x0,
404 	.spec = VECTOR_1_0 | XTHEAD_VECTOR_0_7,
405 };
406 
407 /* set vill bit */
408 FIXTURE_VARIANT_ADD(v_csr_invalid, invalid_vill_bit)
409 {
410 	.vstart = 0x0,
411 	.vl = 0x0,
412 	.vtype = (0x1UL << (__riscv_xlen - 1)) | 0x3,
413 	.vcsr = 0x0,
414 	.vlenb_mul = 0x1,
415 	.vlenb_min = 0x0,
416 	.vlenb_max = 0x0,
417 	.spec = VECTOR_1_0 | XTHEAD_VECTOR_0_7,
418 };
419 
420 /* reserved vsew value: vsew > 3 */
421 FIXTURE_VARIANT_ADD(v_csr_invalid, reserved_vsew)
422 {
423 	.vstart = 0x0,
424 	.vl = 0x0,
425 	.vtype = 0x4UL << 3,
426 	.vcsr = 0x0,
427 	.vlenb_mul = 0x1,
428 	.vlenb_min = 0x0,
429 	.vlenb_max = 0x0,
430 	.spec = VECTOR_1_0,
431 };
432 
433 /* XTheadVector: unsupported non-zero VEDIV value */
434 FIXTURE_VARIANT_ADD(v_csr_invalid, reserved_vediv)
435 {
436 	.vstart = 0x0,
437 	.vl = 0x0,
438 	.vtype = 0x3UL << 5,
439 	.vcsr = 0x0,
440 	.vlenb_mul = 0x1,
441 	.vlenb_min = 0x0,
442 	.vlenb_max = 0x0,
443 	.spec = XTHEAD_VECTOR_0_7,
444 };
445 
446 /* reserved vlmul value: vlmul == 4 */
447 FIXTURE_VARIANT_ADD(v_csr_invalid, reserved_vlmul)
448 {
449 	.vstart = 0x0,
450 	.vl = 0x0,
451 	.vtype = 0x4,
452 	.vcsr = 0x0,
453 	.vlenb_mul = 0x1,
454 	.vlenb_min = 0x0,
455 	.vlenb_max = 0x0,
456 	.spec = VECTOR_1_0,
457 };
458 
459 /* invalid fractional LMUL for VLEN <= 256: LMUL= 1/8, SEW = 64 */
460 FIXTURE_VARIANT_ADD(v_csr_invalid, frac_lmul1)
461 {
462 	.vstart = 0x0,
463 	.vl = 0x0,
464 	.vtype = 0x1d,
465 	.vcsr = 0x0,
466 	.vlenb_mul = 0x1,
467 	.vlenb_min = 0x0,
468 	.vlenb_max = 0x20,
469 	.spec = VECTOR_1_0,
470 };
471 
472 /* invalid integral LMUL for VLEN <= 16: LMUL= 2, SEW = 64 */
473 FIXTURE_VARIANT_ADD(v_csr_invalid, int_lmul1)
474 {
475 	.vstart = 0x0,
476 	.vl = 0x0,
477 	.vtype = 0x19,
478 	.vcsr = 0x0,
479 	.vlenb_mul = 0x1,
480 	.vlenb_min = 0x0,
481 	.vlenb_max = 0x2,
482 	.spec = VECTOR_1_0,
483 };
484 
485 /* XTheadVector: invalid integral LMUL for VLEN <= 16: LMUL= 2, SEW = 64 */
486 FIXTURE_VARIANT_ADD(v_csr_invalid, int_lmul2)
487 {
488 	.vstart = 0x0,
489 	.vl = 0x0,
490 	.vtype = 0xd,
491 	.vcsr = 0x0,
492 	.vlenb_mul = 0x1,
493 	.vlenb_min = 0x0,
494 	.vlenb_max = 0x2,
495 	.spec = XTHEAD_VECTOR_0_7,
496 };
497 
498 /* invalid VL for VLEN <= 128: LMUL= 2, SEW = 64, VL = 8 */
499 FIXTURE_VARIANT_ADD(v_csr_invalid, vl1)
500 {
501 	.vstart = 0x0,
502 	.vl = 0x8,
503 	.vtype = 0x19,
504 	.vcsr = 0x0,
505 	.vlenb_mul = 0x1,
506 	.vlenb_min = 0x0,
507 	.vlenb_max = 0x10,
508 	.spec = VECTOR_1_0,
509 };
510 
511 /* XTheadVector: invalid VL for VLEN <= 128: LMUL= 2, SEW = 64, VL = 8 */
512 FIXTURE_VARIANT_ADD(v_csr_invalid, vl2)
513 {
514 	.vstart = 0x0,
515 	.vl = 0x8,
516 	.vtype = 0xd,
517 	.vcsr = 0x0,
518 	.vlenb_mul = 0x1,
519 	.vlenb_min = 0x0,
520 	.vlenb_max = 0x10,
521 	.spec = XTHEAD_VECTOR_0_7,
522 };
523 
524 TEST_F(v_csr_invalid, ptrace_v_invalid_values)
525 {
526 	unsigned long vlenb;
527 	pid_t pid;
528 
529 	if (!is_vector_supported() && !is_xtheadvector_supported())
530 		SKIP(return, "Vectors not supported");
531 
532 	if (is_vector_supported() && !vector_test(variant->spec))
533 		SKIP(return, "Test not supported for Vector");
534 
535 	if (is_xtheadvector_supported() && !xthead_test(variant->spec))
536 		SKIP(return, "Test not supported for XTheadVector");
537 
538 	vlenb = get_vr_len();
539 
540 	if (variant->vlenb_min) {
541 		if (vlenb < variant->vlenb_min)
542 			SKIP(return, "This test does not support VLEN < %lu\n",
543 			     variant->vlenb_min * 8);
544 	}
545 
546 	if (variant->vlenb_max) {
547 		if (vlenb > variant->vlenb_max)
548 			SKIP(return, "This test does not support VLEN > %lu\n",
549 			     variant->vlenb_max * 8);
550 	}
551 
552 	chld_lock = 1;
553 	pid = fork();
554 	ASSERT_LE(0, pid)
555 		TH_LOG("fork: %m");
556 
557 	if (pid == 0) {
558 		unsigned long vl;
559 
560 		while (chld_lock == 1)
561 			asm volatile("" : : "g"(chld_lock) : "memory");
562 
563 		if (is_xtheadvector_supported()) {
564 			asm volatile (
565 				// 0 | zimm[10:0] | rs1 | 1 1 1 | rd |1010111| vsetvli
566 				// vsetvli	t4, x0, e16, m2, d1
567 				".4byte		0b00000000010100000111111011010111\n"
568 				"mv		%[new_vl], t4\n"
569 				: [new_vl] "=r" (vl) : : "t4");
570 		} else {
571 			asm volatile (
572 				".option push\n"
573 				".option arch, +zve32x\n"
574 				"vsetvli %[new_vl], x0, e16, m2, tu, mu\n"
575 				".option pop\n"
576 				: [new_vl] "=r"(vl) : : );
577 		}
578 
579 		while (1) {
580 			asm volatile (
581 				".option push\n"
582 				".option norvc\n"
583 				"ebreak\n"
584 				"nop\n"
585 				".option pop\n");
586 		}
587 	} else {
588 		struct __riscv_v_regset_state *regset_data;
589 		size_t regset_size;
590 		struct iovec iov;
591 		int status;
592 		int ret;
593 
594 		/* attach */
595 
596 		ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid, NULL, NULL));
597 		ASSERT_EQ(pid, waitpid(pid, &status, 0));
598 		ASSERT_TRUE(WIFSTOPPED(status));
599 
600 		/* unlock */
601 
602 		ASSERT_EQ(0, ptrace(PTRACE_POKEDATA, pid, &chld_lock, 0));
603 
604 		/* resume and wait for the 1st ebreak */
605 
606 		ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL));
607 		ASSERT_EQ(pid, waitpid(pid, &status, 0));
608 		ASSERT_TRUE(WIFSTOPPED(status));
609 
610 		/* read tracee vector csr regs using ptrace GETREGSET */
611 
612 		regset_size = sizeof(*regset_data) + vlenb * 32;
613 		regset_data = calloc(1, regset_size);
614 
615 		iov.iov_base = regset_data;
616 		iov.iov_len = regset_size;
617 
618 		ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov));
619 
620 		/* verify initial vsetvli settings */
621 
622 		if (is_xtheadvector_supported())
623 			EXPECT_EQ(5UL, regset_data->vtype);
624 		else
625 			EXPECT_EQ(9UL, regset_data->vtype);
626 
627 		EXPECT_EQ(regset_data->vlenb, regset_data->vl);
628 		EXPECT_EQ(vlenb, regset_data->vlenb);
629 		EXPECT_EQ(0UL, regset_data->vstart);
630 		EXPECT_EQ(0UL, regset_data->vcsr);
631 
632 		/* apply invalid settings from fixture variants */
633 
634 		regset_data->vlenb *= variant->vlenb_mul;
635 		regset_data->vstart = variant->vstart;
636 		regset_data->vtype = variant->vtype;
637 		regset_data->vcsr = variant->vcsr;
638 		regset_data->vl = variant->vl;
639 
640 		iov.iov_base = regset_data;
641 		iov.iov_len = regset_size;
642 
643 		errno = 0;
644 		ret = ptrace(PTRACE_SETREGSET, pid, NT_RISCV_VECTOR, &iov);
645 		ASSERT_EQ(errno, EINVAL);
646 		ASSERT_EQ(ret, -1);
647 
648 		/* cleanup */
649 
650 		ASSERT_EQ(0, kill(pid, SIGKILL));
651 	}
652 }
653 
654 FIXTURE(v_csr_valid)
655 {
656 };
657 
658 FIXTURE_SETUP(v_csr_valid)
659 {
660 }
661 
662 FIXTURE_TEARDOWN(v_csr_valid)
663 {
664 }
665 
666 /* modifications of the initial vsetvli settings */
667 FIXTURE_VARIANT(v_csr_valid)
668 {
669 	unsigned long vstart;
670 	unsigned long vl;
671 	unsigned long vtype;
672 	unsigned long vcsr;
673 	unsigned long vlenb_mul;
674 	unsigned long vlenb_min;
675 	unsigned long vlenb_max;
676 	unsigned long spec;
677 };
678 
679 /* valid for VLEN >= 128: LMUL= 1/4, SEW = 32 */
680 FIXTURE_VARIANT_ADD(v_csr_valid, frac_lmul1)
681 {
682 	.vstart = 0x0,
683 	.vl = 0x0,
684 	.vtype = 0x16,
685 	.vcsr = 0x0,
686 	.vlenb_mul = 0x1,
687 	.vlenb_min = 0x10,
688 	.vlenb_max = 0x0,
689 	.spec = VECTOR_1_0,
690 };
691 
692 /* valid for VLEN >= 16: LMUL= 2, SEW = 32 */
693 FIXTURE_VARIANT_ADD(v_csr_valid, int_lmul1)
694 {
695 	.vstart = 0x0,
696 	.vl = 0x0,
697 	.vtype = 0x11,
698 	.vcsr = 0x0,
699 	.vlenb_mul = 0x1,
700 	.vlenb_min = 0x2,
701 	.vlenb_max = 0x0,
702 	.spec = VECTOR_1_0,
703 };
704 
705 /* valid for XTheadVector VLEN >= 16: LMUL= 2, SEW = 32 */
706 FIXTURE_VARIANT_ADD(v_csr_valid, int_lmul2)
707 {
708 	.vstart = 0x0,
709 	.vl = 0x0,
710 	.vtype = 0x9,
711 	.vcsr = 0x0,
712 	.vlenb_mul = 0x1,
713 	.vlenb_min = 0x2,
714 	.vlenb_max = 0x0,
715 	.spec = XTHEAD_VECTOR_0_7,
716 };
717 
718 /* valid for VLEN >= 32: LMUL= 2, SEW = 32, VL = 2 */
719 FIXTURE_VARIANT_ADD(v_csr_valid, int_lmul3)
720 {
721 	.vstart = 0x0,
722 	.vl = 0x2,
723 	.vtype = 0x11,
724 	.vcsr = 0x0,
725 	.vlenb_mul = 0x1,
726 	.vlenb_min = 0x4,
727 	.vlenb_max = 0x0,
728 	.spec = VECTOR_1_0,
729 };
730 
731 TEST_F(v_csr_valid, ptrace_v_valid_values)
732 {
733 	unsigned long vlenb;
734 	pid_t pid;
735 
736 	if (!is_vector_supported() && !is_xtheadvector_supported())
737 		SKIP(return, "Vectors not supported");
738 
739 	if (is_vector_supported() && !vector_test(variant->spec))
740 		SKIP(return, "Test not supported for Vector");
741 
742 	if (is_xtheadvector_supported() && !xthead_test(variant->spec))
743 		SKIP(return, "Test not supported for XTheadVector");
744 
745 	vlenb = get_vr_len();
746 
747 	if (variant->vlenb_min) {
748 		if (vlenb < variant->vlenb_min)
749 			SKIP(return, "This test does not support VLEN < %lu\n",
750 			     variant->vlenb_min * 8);
751 	}
752 	if (variant->vlenb_max) {
753 		if (vlenb > variant->vlenb_max)
754 			SKIP(return, "This test does not support VLEN > %lu\n",
755 			     variant->vlenb_max * 8);
756 	}
757 
758 	chld_lock = 1;
759 	pid = fork();
760 	ASSERT_LE(0, pid)
761 		TH_LOG("fork: %m");
762 
763 	if (pid == 0) {
764 		unsigned long vl;
765 
766 		while (chld_lock == 1)
767 			asm volatile("" : : "g"(chld_lock) : "memory");
768 
769 		if (is_xtheadvector_supported()) {
770 			asm volatile (
771 				// 0 | zimm[10:0] | rs1 | 1 1 1 | rd |1010111| vsetvli
772 				// vsetvli	t4, x0, e16, m2, d1
773 				".4byte		0b00000000010100000111111011010111\n"
774 				"mv		%[new_vl], t4\n"
775 				: [new_vl] "=r" (vl) : : "t4");
776 		} else {
777 			asm volatile (
778 				".option push\n"
779 				".option arch, +zve32x\n"
780 				"vsetvli %[new_vl], x0, e16, m2, tu, mu\n"
781 				".option pop\n"
782 				: [new_vl] "=r"(vl) : : );
783 		}
784 
785 		asm volatile (
786 			".option push\n"
787 			".option norvc\n"
788 			".option arch, +zve32x\n"
789 			"ebreak\n" /* breakpoint 1: apply new V state using ptrace */
790 			"nop\n"
791 			"ebreak\n" /* breakpoint 2: V state clean - context will not be saved */
792 			"vmv.v.i v0, -1\n"
793 			"ebreak\n" /* breakpoint 3: V state dirty - context will be saved */
794 			".option pop\n");
795 	} else {
796 		struct __riscv_v_regset_state *regset_data;
797 		struct user_regs_struct regs;
798 		size_t regset_size;
799 		struct iovec iov;
800 		int status;
801 
802 		/* attach */
803 
804 		ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid, NULL, NULL));
805 		ASSERT_EQ(pid, waitpid(pid, &status, 0));
806 		ASSERT_TRUE(WIFSTOPPED(status));
807 
808 		/* unlock */
809 
810 		ASSERT_EQ(0, ptrace(PTRACE_POKEDATA, pid, &chld_lock, 0));
811 
812 		/* resume and wait for the 1st ebreak */
813 
814 		ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL));
815 		ASSERT_EQ(pid, waitpid(pid, &status, 0));
816 		ASSERT_TRUE(WIFSTOPPED(status));
817 
818 		/* read tracee vector csr regs using ptrace GETREGSET */
819 
820 		regset_size = sizeof(*regset_data) + vlenb * 32;
821 		regset_data = calloc(1, regset_size);
822 
823 		iov.iov_base = regset_data;
824 		iov.iov_len = regset_size;
825 
826 		ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov));
827 
828 		/* verify initial vsetvli settings */
829 
830 		if (is_xtheadvector_supported())
831 			EXPECT_EQ(5UL, regset_data->vtype);
832 		else
833 			EXPECT_EQ(9UL, regset_data->vtype);
834 
835 		EXPECT_EQ(regset_data->vlenb, regset_data->vl);
836 		EXPECT_EQ(vlenb, regset_data->vlenb);
837 		EXPECT_EQ(0UL, regset_data->vstart);
838 		EXPECT_EQ(0UL, regset_data->vcsr);
839 
840 		/* apply valid settings from fixture variants */
841 
842 		regset_data->vlenb *= variant->vlenb_mul;
843 		regset_data->vstart = variant->vstart;
844 		regset_data->vtype = variant->vtype;
845 		regset_data->vcsr = variant->vcsr;
846 		regset_data->vl = variant->vl;
847 
848 		iov.iov_base = regset_data;
849 		iov.iov_len = regset_size;
850 
851 		ASSERT_EQ(0, ptrace(PTRACE_SETREGSET, pid, NT_RISCV_VECTOR, &iov));
852 
853 		/* skip 1st ebreak, then resume and wait for the 2nd ebreak */
854 
855 		iov.iov_base = &regs;
856 		iov.iov_len = sizeof(regs);
857 
858 		ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_PRSTATUS, &iov));
859 		regs.pc += 4;
860 		ASSERT_EQ(0, ptrace(PTRACE_SETREGSET, pid, NT_PRSTATUS, &iov));
861 
862 		ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL));
863 		ASSERT_EQ(pid, waitpid(pid, &status, 0));
864 		ASSERT_TRUE(WIFSTOPPED(status));
865 
866 		/* read tracee vector csr regs using ptrace GETREGSET */
867 
868 		iov.iov_base = regset_data;
869 		iov.iov_len = regset_size;
870 
871 		ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov));
872 
873 		/* verify vector csr regs from tracee context */
874 
875 		EXPECT_EQ(regset_data->vstart, variant->vstart);
876 		EXPECT_EQ(regset_data->vtype, variant->vtype);
877 		EXPECT_EQ(regset_data->vcsr, variant->vcsr);
878 		EXPECT_EQ(regset_data->vl, variant->vl);
879 		EXPECT_EQ(regset_data->vlenb, vlenb);
880 
881 		/* skip 2nd ebreak, then resume and wait for the 3rd ebreak */
882 
883 		iov.iov_base = &regs;
884 		iov.iov_len = sizeof(regs);
885 
886 		ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_PRSTATUS, &iov));
887 		regs.pc += 4;
888 		ASSERT_EQ(0, ptrace(PTRACE_SETREGSET, pid, NT_PRSTATUS, &iov));
889 
890 		ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL));
891 		ASSERT_EQ(pid, waitpid(pid, &status, 0));
892 		ASSERT_TRUE(WIFSTOPPED(status));
893 
894 		/* read tracee vector csr regs using ptrace GETREGSET */
895 
896 		iov.iov_base = regset_data;
897 		iov.iov_len = regset_size;
898 
899 		ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov));
900 
901 		/* verify vector csr regs from tracee context */
902 
903 		EXPECT_EQ(regset_data->vstart, variant->vstart);
904 		EXPECT_EQ(regset_data->vtype, variant->vtype);
905 		EXPECT_EQ(regset_data->vcsr, variant->vcsr);
906 		EXPECT_EQ(regset_data->vl, variant->vl);
907 		EXPECT_EQ(regset_data->vlenb, vlenb);
908 
909 		/* cleanup */
910 
911 		ASSERT_EQ(0, kill(pid, SIGKILL));
912 	}
913 }
914 
915 TEST_HARNESS_MAIN
916