xref: /linux/tools/testing/selftests/riscv/vector/validate_v_ptrace.c (revision 00afb1811fa638dacf125dd1c343b7a181624dfd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <sys/ptrace.h>
3 #include <sys/types.h>
4 #include <sys/wait.h>
5 #include <sys/uio.h>
6 #include <unistd.h>
7 #include <errno.h>
8 
9 #include <linux/ptrace.h>
10 #include <linux/elf.h>
11 
12 #include "kselftest_harness.h"
13 #include "v_helpers.h"
14 
15 #define SR_FS_DIRTY	0x00006000UL
16 #define CSR_VXRM_SHIFT	1
17 
18 volatile unsigned long chld_lock;
19 
TEST(ptrace_v_not_enabled)20 TEST(ptrace_v_not_enabled)
21 {
22 	pid_t pid;
23 
24 	if (!(is_vector_supported() || is_xtheadvector_supported()))
25 		SKIP(return, "Vector not supported");
26 
27 	chld_lock = 1;
28 	pid = fork();
29 	ASSERT_LE(0, pid)
30 		TH_LOG("fork: %m");
31 
32 	if (pid == 0) {
33 		while (chld_lock == 1)
34 			asm volatile("" : : "g"(chld_lock) : "memory");
35 
36 		asm volatile ("ebreak" : : : );
37 	} else {
38 		struct __riscv_v_regset_state *regset_data;
39 		unsigned long vlenb = get_vr_len();
40 		size_t regset_size;
41 		struct iovec iov;
42 		int status;
43 		int ret;
44 
45 		/* attach */
46 
47 		ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid, NULL, NULL));
48 		ASSERT_EQ(pid, waitpid(pid, &status, 0));
49 		ASSERT_TRUE(WIFSTOPPED(status));
50 
51 		/* unlock */
52 
53 		ASSERT_EQ(0, ptrace(PTRACE_POKEDATA, pid, &chld_lock, 0));
54 
55 		/* resume and wait for ebreak */
56 
57 		ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL));
58 		ASSERT_EQ(pid, waitpid(pid, &status, 0));
59 		ASSERT_TRUE(WIFSTOPPED(status));
60 
61 		/* try to read vector registers from the tracee */
62 
63 		regset_size = sizeof(*regset_data) + vlenb * 32;
64 		regset_data = calloc(1, regset_size);
65 
66 		iov.iov_base = regset_data;
67 		iov.iov_len = regset_size;
68 
69 		/* V extension is available, but not yet enabled for the tracee */
70 
71 		errno = 0;
72 		ret = ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov);
73 		ASSERT_EQ(ENODATA, errno);
74 		ASSERT_EQ(-1, ret);
75 
76 		/* cleanup */
77 
78 		ASSERT_EQ(0, kill(pid, SIGKILL));
79 	}
80 }
81 
TEST(ptrace_v_early_debug)82 TEST(ptrace_v_early_debug)
83 {
84 	static volatile unsigned long vstart;
85 	static volatile unsigned long vtype;
86 	static volatile unsigned long vlenb;
87 	static volatile unsigned long vcsr;
88 	static volatile unsigned long vl;
89 	bool xtheadvector;
90 	pid_t pid;
91 
92 	if (!(is_vector_supported() || is_xtheadvector_supported()))
93 		SKIP(return, "Vector not supported");
94 
95 	xtheadvector = is_xtheadvector_supported();
96 
97 	chld_lock = 1;
98 	pid = fork();
99 	ASSERT_LE(0, pid)
100 		TH_LOG("fork: %m");
101 
102 	if (pid == 0) {
103 		unsigned long vxsat, vxrm;
104 
105 		vlenb = get_vr_len();
106 
107 		while (chld_lock == 1)
108 			asm volatile ("" : : "g"(chld_lock) : "memory");
109 
110 		asm volatile (
111 			"csrr %[vstart], vstart\n"
112 			"csrr %[vtype], vtype\n"
113 			"csrr %[vl], vl\n"
114 			: [vtype] "=r"(vtype), [vstart] "=r"(vstart), [vl] "=r"(vl)
115 			:
116 			: "memory");
117 
118 		/* no 'is_xtheadvector_supported()' here to avoid clobbering v-state by syscall */
119 		if (xtheadvector) {
120 			asm volatile (
121 				"csrs sstatus, %[bit]\n"
122 				"csrr %[vxsat], vxsat\n"
123 				"csrr %[vxrm], vxrm\n"
124 				: [vxsat] "=r"(vxsat), [vxrm] "=r"(vxrm)
125 				: [bit] "r" (SR_FS_DIRTY)
126 				: "memory");
127 			vcsr = vxsat | vxrm << CSR_VXRM_SHIFT;
128 		} else {
129 			asm volatile (
130 				"csrr %[vcsr], vcsr\n"
131 				: [vcsr] "=r"(vcsr)
132 				:
133 				: "memory");
134 		}
135 
136 		asm volatile (
137 			".option push\n"
138 			".option norvc\n"
139 			"ebreak\n"
140 			".option pop\n");
141 	} else {
142 		struct __riscv_v_regset_state *regset_data;
143 		unsigned long vstart_csr;
144 		unsigned long vlenb_csr;
145 		unsigned long vtype_csr;
146 		unsigned long vcsr_csr;
147 		unsigned long vl_csr;
148 		size_t regset_size;
149 		struct iovec iov;
150 		int status;
151 
152 		/* attach */
153 
154 		ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid, NULL, NULL));
155 		ASSERT_EQ(pid, waitpid(pid, &status, 0));
156 		ASSERT_TRUE(WIFSTOPPED(status));
157 
158 		/* unlock */
159 
160 		ASSERT_EQ(0, ptrace(PTRACE_POKEDATA, pid, &chld_lock, 0));
161 
162 		/* resume and wait for ebreak */
163 
164 		ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL));
165 		ASSERT_EQ(pid, waitpid(pid, &status, 0));
166 		ASSERT_TRUE(WIFSTOPPED(status));
167 
168 		/* read tracee vector csr regs using ptrace PEEKDATA */
169 
170 		errno = 0;
171 		vstart_csr = ptrace(PTRACE_PEEKDATA, pid, &vstart, NULL);
172 		ASSERT_FALSE((errno != 0) && (vstart_csr == -1));
173 
174 		errno = 0;
175 		vl_csr = ptrace(PTRACE_PEEKDATA, pid, &vl, NULL);
176 		ASSERT_FALSE((errno != 0) && (vl_csr == -1));
177 
178 		errno = 0;
179 		vtype_csr = ptrace(PTRACE_PEEKDATA, pid, &vtype, NULL);
180 		ASSERT_FALSE((errno != 0) && (vtype_csr == -1));
181 
182 		errno = 0;
183 		vcsr_csr = ptrace(PTRACE_PEEKDATA, pid, &vcsr, NULL);
184 		ASSERT_FALSE((errno != 0) && (vcsr_csr == -1));
185 
186 		errno = 0;
187 		vlenb_csr = ptrace(PTRACE_PEEKDATA, pid, &vlenb, NULL);
188 		ASSERT_FALSE((errno != 0) && (vlenb_csr == -1));
189 
190 		/* read tracee csr regs using ptrace GETREGSET */
191 
192 		regset_size = sizeof(*regset_data) + vlenb_csr * 32;
193 		regset_data = calloc(1, regset_size);
194 
195 		iov.iov_base = regset_data;
196 		iov.iov_len = regset_size;
197 
198 		ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov));
199 
200 		/* compare */
201 
202 		EXPECT_EQ(vstart_csr, regset_data->vstart);
203 		EXPECT_EQ(vtype_csr, regset_data->vtype);
204 		EXPECT_EQ(vlenb_csr, regset_data->vlenb);
205 		EXPECT_EQ(vcsr_csr, regset_data->vcsr);
206 		EXPECT_EQ(vl_csr, regset_data->vl);
207 
208 		/* cleanup */
209 
210 		ASSERT_EQ(0, kill(pid, SIGKILL));
211 	}
212 }
213 
TEST(ptrace_v_syscall_clobbering)214 TEST(ptrace_v_syscall_clobbering)
215 {
216 	pid_t pid;
217 
218 	if (!is_vector_supported() && !is_xtheadvector_supported())
219 		SKIP(return, "Vector not supported");
220 
221 	chld_lock = 1;
222 	pid = fork();
223 	ASSERT_LE(0, pid)
224 		TH_LOG("fork: %m");
225 
226 	if (pid == 0) {
227 		unsigned long vl;
228 
229 		while (chld_lock == 1)
230 			asm volatile("" : : "g"(chld_lock) : "memory");
231 
232 		if (is_xtheadvector_supported()) {
233 			asm volatile (
234 				// 0 | zimm[10:0] | rs1 | 1 1 1 | rd |1010111| vsetvli
235 				// vsetvli	t4, x0, e16, m2, d1
236 				".4byte		0b00000000010100000111111011010111\n"
237 				"mv		%[new_vl], t4\n"
238 				: [new_vl] "=r" (vl) : : "t4");
239 		} else {
240 			asm volatile (
241 				".option push\n"
242 				".option arch, +zve32x\n"
243 				"vsetvli %[new_vl], x0, e16, m2, tu, mu\n"
244 				".option pop\n"
245 				: [new_vl] "=r"(vl) : : );
246 		}
247 
248 		while (1) {
249 			asm volatile (
250 				".option push\n"
251 				".option norvc\n"
252 				"ebreak\n"
253 				".option pop\n");
254 
255 			sleep(0);
256 		}
257 	} else {
258 		struct __riscv_v_regset_state *regset_data;
259 		unsigned long vlenb = get_vr_len();
260 		struct user_regs_struct regs;
261 		size_t regset_size;
262 		struct iovec iov;
263 		int status;
264 
265 		/* attach */
266 
267 		ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid, NULL, NULL));
268 		ASSERT_EQ(pid, waitpid(pid, &status, 0));
269 		ASSERT_TRUE(WIFSTOPPED(status));
270 
271 		/* unlock */
272 
273 		ASSERT_EQ(0, ptrace(PTRACE_POKEDATA, pid, &chld_lock, 0));
274 
275 		/* resume and wait for the 1st ebreak */
276 
277 		ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL));
278 		ASSERT_EQ(pid, waitpid(pid, &status, 0));
279 		ASSERT_TRUE(WIFSTOPPED(status));
280 
281 		/* read tracee vector csr regs using ptrace GETREGSET */
282 
283 		regset_size = sizeof(*regset_data) + vlenb * 32;
284 		regset_data = calloc(1, regset_size);
285 
286 		iov.iov_base = regset_data;
287 		iov.iov_len = regset_size;
288 
289 		ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov));
290 
291 		/* verify initial vsetvli settings */
292 
293 		if (is_xtheadvector_supported()) {
294 			EXPECT_EQ(5UL, regset_data->vtype);
295 		} else {
296 			EXPECT_EQ(9UL, regset_data->vtype);
297 		}
298 
299 		EXPECT_EQ(regset_data->vlenb, regset_data->vl);
300 		EXPECT_EQ(vlenb, regset_data->vlenb);
301 		EXPECT_EQ(0UL, regset_data->vstart);
302 		EXPECT_EQ(0UL, regset_data->vcsr);
303 
304 		/* skip 1st ebreak, then resume and wait for the 2nd ebreak */
305 
306 		iov.iov_base = &regs;
307 		iov.iov_len = sizeof(regs);
308 
309 		ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_PRSTATUS, &iov));
310 		regs.pc += 4;
311 		ASSERT_EQ(0, ptrace(PTRACE_SETREGSET, pid, NT_PRSTATUS, &iov));
312 
313 		ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL));
314 		ASSERT_EQ(pid, waitpid(pid, &status, 0));
315 		ASSERT_TRUE(WIFSTOPPED(status));
316 
317 		/* read tracee vtype using ptrace GETREGSET */
318 
319 		iov.iov_base = regset_data;
320 		iov.iov_len = regset_size;
321 
322 		ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov));
323 
324 		/* verify that V state is illegal after syscall */
325 
326 		EXPECT_EQ((1UL << (__riscv_xlen - 1)), regset_data->vtype);
327 		EXPECT_EQ(vlenb, regset_data->vlenb);
328 		EXPECT_EQ(0UL, regset_data->vstart);
329 		EXPECT_EQ(0UL, regset_data->vcsr);
330 		EXPECT_EQ(0UL, regset_data->vl);
331 
332 		/* cleanup */
333 
334 		ASSERT_EQ(0, kill(pid, SIGKILL));
335 	}
336 }
337 
FIXTURE(v_csr_invalid)338 FIXTURE(v_csr_invalid)
339 {
340 };
341 
FIXTURE_SETUP(v_csr_invalid)342 FIXTURE_SETUP(v_csr_invalid)
343 {
344 }
345 
FIXTURE_TEARDOWN(v_csr_invalid)346 FIXTURE_TEARDOWN(v_csr_invalid)
347 {
348 }
349 
350 #define VECTOR_1_0		_BITUL(0)
351 #define XTHEAD_VECTOR_0_7	_BITUL(1)
352 
353 #define vector_test(x)		((x) & VECTOR_1_0)
354 #define xthead_test(x)		((x) & XTHEAD_VECTOR_0_7)
355 
356 /* modifications of the initial vsetvli settings */
FIXTURE_VARIANT(v_csr_invalid)357 FIXTURE_VARIANT(v_csr_invalid)
358 {
359 	unsigned long vstart;
360 	unsigned long vl;
361 	unsigned long vtype;
362 	unsigned long vcsr;
363 	unsigned long vlenb_mul;
364 	unsigned long vlenb_min;
365 	unsigned long vlenb_max;
366 	unsigned long spec;
367 };
368 
369 /* unexpected vlenb value */
FIXTURE_VARIANT_ADD(v_csr_invalid,new_vlenb)370 FIXTURE_VARIANT_ADD(v_csr_invalid, new_vlenb)
371 {
372 	.vstart = 0x0,
373 	.vl = 0x0,
374 	.vtype = 0x3,
375 	.vcsr = 0x0,
376 	.vlenb_mul = 0x2,
377 	.vlenb_min = 0x0,
378 	.vlenb_max = 0x0,
379 	.spec = VECTOR_1_0 | XTHEAD_VECTOR_0_7,
380 };
381 
382 /* invalid reserved bits in vcsr */
FIXTURE_VARIANT_ADD(v_csr_invalid,vcsr_invalid_reserved_bits)383 FIXTURE_VARIANT_ADD(v_csr_invalid, vcsr_invalid_reserved_bits)
384 {
385 	.vstart = 0x0,
386 	.vl = 0x0,
387 	.vtype = 0x3,
388 	.vcsr = 0x1UL << 8,
389 	.vlenb_mul = 0x1,
390 	.vlenb_min = 0x0,
391 	.vlenb_max = 0x0,
392 	.spec = VECTOR_1_0 | XTHEAD_VECTOR_0_7,
393 };
394 
395 /* invalid reserved bits in vtype */
FIXTURE_VARIANT_ADD(v_csr_invalid,vtype_invalid_reserved_bits)396 FIXTURE_VARIANT_ADD(v_csr_invalid, vtype_invalid_reserved_bits)
397 {
398 	.vstart = 0x0,
399 	.vl = 0x0,
400 	.vtype = (0x1UL << 8) | 0x3,
401 	.vcsr = 0x0,
402 	.vlenb_mul = 0x1,
403 	.vlenb_min = 0x0,
404 	.vlenb_max = 0x0,
405 	.spec = VECTOR_1_0 | XTHEAD_VECTOR_0_7,
406 };
407 
408 /* set vill bit */
FIXTURE_VARIANT_ADD(v_csr_invalid,invalid_vill_bit)409 FIXTURE_VARIANT_ADD(v_csr_invalid, invalid_vill_bit)
410 {
411 	.vstart = 0x0,
412 	.vl = 0x0,
413 	.vtype = (0x1UL << (__riscv_xlen - 1)) | 0x3,
414 	.vcsr = 0x0,
415 	.vlenb_mul = 0x1,
416 	.vlenb_min = 0x0,
417 	.vlenb_max = 0x0,
418 	.spec = VECTOR_1_0 | XTHEAD_VECTOR_0_7,
419 };
420 
421 /* reserved vsew value: vsew > 3 */
FIXTURE_VARIANT_ADD(v_csr_invalid,reserved_vsew)422 FIXTURE_VARIANT_ADD(v_csr_invalid, reserved_vsew)
423 {
424 	.vstart = 0x0,
425 	.vl = 0x0,
426 	.vtype = 0x4UL << 3,
427 	.vcsr = 0x0,
428 	.vlenb_mul = 0x1,
429 	.vlenb_min = 0x0,
430 	.vlenb_max = 0x0,
431 	.spec = VECTOR_1_0,
432 };
433 
434 /* XTheadVector: unsupported non-zero VEDIV value */
FIXTURE_VARIANT_ADD(v_csr_invalid,reserved_vediv)435 FIXTURE_VARIANT_ADD(v_csr_invalid, reserved_vediv)
436 {
437 	.vstart = 0x0,
438 	.vl = 0x0,
439 	.vtype = 0x3UL << 5,
440 	.vcsr = 0x0,
441 	.vlenb_mul = 0x1,
442 	.vlenb_min = 0x0,
443 	.vlenb_max = 0x0,
444 	.spec = XTHEAD_VECTOR_0_7,
445 };
446 
447 /* reserved vlmul value: vlmul == 4 */
FIXTURE_VARIANT_ADD(v_csr_invalid,reserved_vlmul)448 FIXTURE_VARIANT_ADD(v_csr_invalid, reserved_vlmul)
449 {
450 	.vstart = 0x0,
451 	.vl = 0x0,
452 	.vtype = 0x4,
453 	.vcsr = 0x0,
454 	.vlenb_mul = 0x1,
455 	.vlenb_min = 0x0,
456 	.vlenb_max = 0x0,
457 	.spec = VECTOR_1_0,
458 };
459 
460 /* invalid fractional LMUL for VLEN <= 256: LMUL= 1/8, SEW = 64 */
FIXTURE_VARIANT_ADD(v_csr_invalid,frac_lmul1)461 FIXTURE_VARIANT_ADD(v_csr_invalid, frac_lmul1)
462 {
463 	.vstart = 0x0,
464 	.vl = 0x0,
465 	.vtype = 0x1d,
466 	.vcsr = 0x0,
467 	.vlenb_mul = 0x1,
468 	.vlenb_min = 0x0,
469 	.vlenb_max = 0x20,
470 	.spec = VECTOR_1_0,
471 };
472 
473 /* invalid integral LMUL for VLEN <= 16: LMUL= 2, SEW = 64 */
FIXTURE_VARIANT_ADD(v_csr_invalid,int_lmul1)474 FIXTURE_VARIANT_ADD(v_csr_invalid, int_lmul1)
475 {
476 	.vstart = 0x0,
477 	.vl = 0x0,
478 	.vtype = 0x19,
479 	.vcsr = 0x0,
480 	.vlenb_mul = 0x1,
481 	.vlenb_min = 0x0,
482 	.vlenb_max = 0x2,
483 	.spec = VECTOR_1_0,
484 };
485 
486 /* XTheadVector: invalid integral LMUL for VLEN <= 16: LMUL= 2, SEW = 64 */
FIXTURE_VARIANT_ADD(v_csr_invalid,int_lmul2)487 FIXTURE_VARIANT_ADD(v_csr_invalid, int_lmul2)
488 {
489 	.vstart = 0x0,
490 	.vl = 0x0,
491 	.vtype = 0xd,
492 	.vcsr = 0x0,
493 	.vlenb_mul = 0x1,
494 	.vlenb_min = 0x0,
495 	.vlenb_max = 0x2,
496 	.spec = XTHEAD_VECTOR_0_7,
497 };
498 
499 /* invalid VL for VLEN <= 128: LMUL= 2, SEW = 64, VL = 8 */
FIXTURE_VARIANT_ADD(v_csr_invalid,vl1)500 FIXTURE_VARIANT_ADD(v_csr_invalid, vl1)
501 {
502 	.vstart = 0x0,
503 	.vl = 0x8,
504 	.vtype = 0x19,
505 	.vcsr = 0x0,
506 	.vlenb_mul = 0x1,
507 	.vlenb_min = 0x0,
508 	.vlenb_max = 0x10,
509 	.spec = VECTOR_1_0,
510 };
511 
512 /* XTheadVector: invalid VL for VLEN <= 128: LMUL= 2, SEW = 64, VL = 8 */
FIXTURE_VARIANT_ADD(v_csr_invalid,vl2)513 FIXTURE_VARIANT_ADD(v_csr_invalid, vl2)
514 {
515 	.vstart = 0x0,
516 	.vl = 0x8,
517 	.vtype = 0xd,
518 	.vcsr = 0x0,
519 	.vlenb_mul = 0x1,
520 	.vlenb_min = 0x0,
521 	.vlenb_max = 0x10,
522 	.spec = XTHEAD_VECTOR_0_7,
523 };
524 
TEST_F(v_csr_invalid,ptrace_v_invalid_values)525 TEST_F(v_csr_invalid, ptrace_v_invalid_values)
526 {
527 	unsigned long vlenb;
528 	pid_t pid;
529 
530 	if (!is_vector_supported() && !is_xtheadvector_supported())
531 		SKIP(return, "Vectors not supported");
532 
533 	if (is_vector_supported() && !vector_test(variant->spec))
534 		SKIP(return, "Test not supported for Vector");
535 
536 	if (is_xtheadvector_supported() && !xthead_test(variant->spec))
537 		SKIP(return, "Test not supported for XTheadVector");
538 
539 	vlenb = get_vr_len();
540 
541 	if (variant->vlenb_min) {
542 		if (vlenb < variant->vlenb_min)
543 			SKIP(return, "This test does not support VLEN < %lu\n",
544 			     variant->vlenb_min * 8);
545 	}
546 
547 	if (variant->vlenb_max) {
548 		if (vlenb > variant->vlenb_max)
549 			SKIP(return, "This test does not support VLEN > %lu\n",
550 			     variant->vlenb_max * 8);
551 	}
552 
553 	chld_lock = 1;
554 	pid = fork();
555 	ASSERT_LE(0, pid)
556 		TH_LOG("fork: %m");
557 
558 	if (pid == 0) {
559 		unsigned long vl;
560 
561 		while (chld_lock == 1)
562 			asm volatile("" : : "g"(chld_lock) : "memory");
563 
564 		if (is_xtheadvector_supported()) {
565 			asm volatile (
566 				// 0 | zimm[10:0] | rs1 | 1 1 1 | rd |1010111| vsetvli
567 				// vsetvli	t4, x0, e16, m2, d1
568 				".4byte		0b00000000010100000111111011010111\n"
569 				"mv		%[new_vl], t4\n"
570 				: [new_vl] "=r" (vl) : : "t4");
571 		} else {
572 			asm volatile (
573 				".option push\n"
574 				".option arch, +zve32x\n"
575 				"vsetvli %[new_vl], x0, e16, m2, tu, mu\n"
576 				".option pop\n"
577 				: [new_vl] "=r"(vl) : : );
578 		}
579 
580 		while (1) {
581 			asm volatile (
582 				".option push\n"
583 				".option norvc\n"
584 				"ebreak\n"
585 				"nop\n"
586 				".option pop\n");
587 		}
588 	} else {
589 		struct __riscv_v_regset_state *regset_data;
590 		size_t regset_size;
591 		struct iovec iov;
592 		int status;
593 		int ret;
594 
595 		/* attach */
596 
597 		ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid, NULL, NULL));
598 		ASSERT_EQ(pid, waitpid(pid, &status, 0));
599 		ASSERT_TRUE(WIFSTOPPED(status));
600 
601 		/* unlock */
602 
603 		ASSERT_EQ(0, ptrace(PTRACE_POKEDATA, pid, &chld_lock, 0));
604 
605 		/* resume and wait for the 1st ebreak */
606 
607 		ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL));
608 		ASSERT_EQ(pid, waitpid(pid, &status, 0));
609 		ASSERT_TRUE(WIFSTOPPED(status));
610 
611 		/* read tracee vector csr regs using ptrace GETREGSET */
612 
613 		regset_size = sizeof(*regset_data) + vlenb * 32;
614 		regset_data = calloc(1, regset_size);
615 
616 		iov.iov_base = regset_data;
617 		iov.iov_len = regset_size;
618 
619 		ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov));
620 
621 		/* verify initial vsetvli settings */
622 
623 		if (is_xtheadvector_supported()) {
624 			EXPECT_EQ(5UL, regset_data->vtype);
625 		} else {
626 			EXPECT_EQ(9UL, regset_data->vtype);
627 		}
628 
629 		EXPECT_EQ(regset_data->vlenb, regset_data->vl);
630 		EXPECT_EQ(vlenb, regset_data->vlenb);
631 		EXPECT_EQ(0UL, regset_data->vstart);
632 		EXPECT_EQ(0UL, regset_data->vcsr);
633 
634 		/* apply invalid settings from fixture variants */
635 
636 		regset_data->vlenb *= variant->vlenb_mul;
637 		regset_data->vstart = variant->vstart;
638 		regset_data->vtype = variant->vtype;
639 		regset_data->vcsr = variant->vcsr;
640 		regset_data->vl = variant->vl;
641 
642 		iov.iov_base = regset_data;
643 		iov.iov_len = regset_size;
644 
645 		errno = 0;
646 		ret = ptrace(PTRACE_SETREGSET, pid, NT_RISCV_VECTOR, &iov);
647 		ASSERT_EQ(errno, EINVAL);
648 		ASSERT_EQ(ret, -1);
649 
650 		/* cleanup */
651 
652 		ASSERT_EQ(0, kill(pid, SIGKILL));
653 	}
654 }
655 
FIXTURE(v_csr_valid)656 FIXTURE(v_csr_valid)
657 {
658 };
659 
FIXTURE_SETUP(v_csr_valid)660 FIXTURE_SETUP(v_csr_valid)
661 {
662 }
663 
FIXTURE_TEARDOWN(v_csr_valid)664 FIXTURE_TEARDOWN(v_csr_valid)
665 {
666 }
667 
668 /* modifications of the initial vsetvli settings */
FIXTURE_VARIANT(v_csr_valid)669 FIXTURE_VARIANT(v_csr_valid)
670 {
671 	unsigned long vstart;
672 	unsigned long vl;
673 	unsigned long vtype;
674 	unsigned long vcsr;
675 	unsigned long vlenb_mul;
676 	unsigned long vlenb_min;
677 	unsigned long vlenb_max;
678 	unsigned long spec;
679 };
680 
681 /* valid for VLEN >= 128: LMUL= 1/4, SEW = 32 */
FIXTURE_VARIANT_ADD(v_csr_valid,frac_lmul1)682 FIXTURE_VARIANT_ADD(v_csr_valid, frac_lmul1)
683 {
684 	.vstart = 0x0,
685 	.vl = 0x0,
686 	.vtype = 0x16,
687 	.vcsr = 0x0,
688 	.vlenb_mul = 0x1,
689 	.vlenb_min = 0x10,
690 	.vlenb_max = 0x0,
691 	.spec = VECTOR_1_0,
692 };
693 
694 /* valid for VLEN >= 16: LMUL= 2, SEW = 32 */
FIXTURE_VARIANT_ADD(v_csr_valid,int_lmul1)695 FIXTURE_VARIANT_ADD(v_csr_valid, int_lmul1)
696 {
697 	.vstart = 0x0,
698 	.vl = 0x0,
699 	.vtype = 0x11,
700 	.vcsr = 0x0,
701 	.vlenb_mul = 0x1,
702 	.vlenb_min = 0x2,
703 	.vlenb_max = 0x0,
704 	.spec = VECTOR_1_0,
705 };
706 
707 /* valid for XTheadVector VLEN >= 16: LMUL= 2, SEW = 32 */
FIXTURE_VARIANT_ADD(v_csr_valid,int_lmul2)708 FIXTURE_VARIANT_ADD(v_csr_valid, int_lmul2)
709 {
710 	.vstart = 0x0,
711 	.vl = 0x0,
712 	.vtype = 0x9,
713 	.vcsr = 0x0,
714 	.vlenb_mul = 0x1,
715 	.vlenb_min = 0x2,
716 	.vlenb_max = 0x0,
717 	.spec = XTHEAD_VECTOR_0_7,
718 };
719 
720 /* valid for VLEN >= 32: LMUL= 2, SEW = 32, VL = 2 */
FIXTURE_VARIANT_ADD(v_csr_valid,int_lmul3)721 FIXTURE_VARIANT_ADD(v_csr_valid, int_lmul3)
722 {
723 	.vstart = 0x0,
724 	.vl = 0x2,
725 	.vtype = 0x11,
726 	.vcsr = 0x0,
727 	.vlenb_mul = 0x1,
728 	.vlenb_min = 0x4,
729 	.vlenb_max = 0x0,
730 	.spec = VECTOR_1_0,
731 };
732 
TEST_F(v_csr_valid,ptrace_v_valid_values)733 TEST_F(v_csr_valid, ptrace_v_valid_values)
734 {
735 	unsigned long vlenb;
736 	pid_t pid;
737 
738 	if (!is_vector_supported() && !is_xtheadvector_supported())
739 		SKIP(return, "Vectors not supported");
740 
741 	if (is_vector_supported() && !vector_test(variant->spec))
742 		SKIP(return, "Test not supported for Vector");
743 
744 	if (is_xtheadvector_supported() && !xthead_test(variant->spec))
745 		SKIP(return, "Test not supported for XTheadVector");
746 
747 	vlenb = get_vr_len();
748 
749 	if (variant->vlenb_min) {
750 		if (vlenb < variant->vlenb_min)
751 			SKIP(return, "This test does not support VLEN < %lu\n",
752 			     variant->vlenb_min * 8);
753 	}
754 	if (variant->vlenb_max) {
755 		if (vlenb > variant->vlenb_max)
756 			SKIP(return, "This test does not support VLEN > %lu\n",
757 			     variant->vlenb_max * 8);
758 	}
759 
760 	chld_lock = 1;
761 	pid = fork();
762 	ASSERT_LE(0, pid)
763 		TH_LOG("fork: %m");
764 
765 	if (pid == 0) {
766 		unsigned long vl;
767 
768 		while (chld_lock == 1)
769 			asm volatile("" : : "g"(chld_lock) : "memory");
770 
771 		if (is_xtheadvector_supported()) {
772 			asm volatile (
773 				// 0 | zimm[10:0] | rs1 | 1 1 1 | rd |1010111| vsetvli
774 				// vsetvli	t4, x0, e16, m2, d1
775 				".4byte		0b00000000010100000111111011010111\n"
776 				"mv		%[new_vl], t4\n"
777 				: [new_vl] "=r" (vl) : : "t4");
778 		} else {
779 			asm volatile (
780 				".option push\n"
781 				".option arch, +zve32x\n"
782 				"vsetvli %[new_vl], x0, e16, m2, tu, mu\n"
783 				".option pop\n"
784 				: [new_vl] "=r"(vl) : : );
785 		}
786 
787 		asm volatile (
788 			".option push\n"
789 			".option norvc\n"
790 			".option arch, +zve32x\n"
791 			"ebreak\n" /* breakpoint 1: apply new V state using ptrace */
792 			"nop\n"
793 			"ebreak\n" /* breakpoint 2: V state clean - context will not be saved */
794 			"vmv.v.i v0, -1\n"
795 			"ebreak\n" /* breakpoint 3: V state dirty - context will be saved */
796 			".option pop\n");
797 	} else {
798 		struct __riscv_v_regset_state *regset_data;
799 		struct user_regs_struct regs;
800 		size_t regset_size;
801 		struct iovec iov;
802 		int status;
803 
804 		/* attach */
805 
806 		ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid, NULL, NULL));
807 		ASSERT_EQ(pid, waitpid(pid, &status, 0));
808 		ASSERT_TRUE(WIFSTOPPED(status));
809 
810 		/* unlock */
811 
812 		ASSERT_EQ(0, ptrace(PTRACE_POKEDATA, pid, &chld_lock, 0));
813 
814 		/* resume and wait for the 1st ebreak */
815 
816 		ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL));
817 		ASSERT_EQ(pid, waitpid(pid, &status, 0));
818 		ASSERT_TRUE(WIFSTOPPED(status));
819 
820 		/* read tracee vector csr regs using ptrace GETREGSET */
821 
822 		regset_size = sizeof(*regset_data) + vlenb * 32;
823 		regset_data = calloc(1, regset_size);
824 
825 		iov.iov_base = regset_data;
826 		iov.iov_len = regset_size;
827 
828 		ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov));
829 
830 		/* verify initial vsetvli settings */
831 
832 		if (is_xtheadvector_supported()) {
833 			EXPECT_EQ(5UL, regset_data->vtype);
834 		} else {
835 			EXPECT_EQ(9UL, regset_data->vtype);
836 		}
837 
838 		EXPECT_EQ(regset_data->vlenb, regset_data->vl);
839 		EXPECT_EQ(vlenb, regset_data->vlenb);
840 		EXPECT_EQ(0UL, regset_data->vstart);
841 		EXPECT_EQ(0UL, regset_data->vcsr);
842 
843 		/* apply valid settings from fixture variants */
844 
845 		regset_data->vlenb *= variant->vlenb_mul;
846 		regset_data->vstart = variant->vstart;
847 		regset_data->vtype = variant->vtype;
848 		regset_data->vcsr = variant->vcsr;
849 		regset_data->vl = variant->vl;
850 
851 		iov.iov_base = regset_data;
852 		iov.iov_len = regset_size;
853 
854 		ASSERT_EQ(0, ptrace(PTRACE_SETREGSET, pid, NT_RISCV_VECTOR, &iov));
855 
856 		/* skip 1st ebreak, then resume and wait for the 2nd ebreak */
857 
858 		iov.iov_base = &regs;
859 		iov.iov_len = sizeof(regs);
860 
861 		ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_PRSTATUS, &iov));
862 		regs.pc += 4;
863 		ASSERT_EQ(0, ptrace(PTRACE_SETREGSET, pid, NT_PRSTATUS, &iov));
864 
865 		ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL));
866 		ASSERT_EQ(pid, waitpid(pid, &status, 0));
867 		ASSERT_TRUE(WIFSTOPPED(status));
868 
869 		/* read tracee vector csr regs using ptrace GETREGSET */
870 
871 		iov.iov_base = regset_data;
872 		iov.iov_len = regset_size;
873 
874 		ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov));
875 
876 		/* verify vector csr regs from tracee context */
877 
878 		EXPECT_EQ(regset_data->vstart, variant->vstart);
879 		EXPECT_EQ(regset_data->vtype, variant->vtype);
880 		EXPECT_EQ(regset_data->vcsr, variant->vcsr);
881 		EXPECT_EQ(regset_data->vl, variant->vl);
882 		EXPECT_EQ(regset_data->vlenb, vlenb);
883 
884 		/* skip 2nd ebreak, then resume and wait for the 3rd ebreak */
885 
886 		iov.iov_base = &regs;
887 		iov.iov_len = sizeof(regs);
888 
889 		ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_PRSTATUS, &iov));
890 		regs.pc += 4;
891 		ASSERT_EQ(0, ptrace(PTRACE_SETREGSET, pid, NT_PRSTATUS, &iov));
892 
893 		ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL));
894 		ASSERT_EQ(pid, waitpid(pid, &status, 0));
895 		ASSERT_TRUE(WIFSTOPPED(status));
896 
897 		/* read tracee vector csr regs using ptrace GETREGSET */
898 
899 		iov.iov_base = regset_data;
900 		iov.iov_len = regset_size;
901 
902 		ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov));
903 
904 		/* verify vector csr regs from tracee context */
905 
906 		EXPECT_EQ(regset_data->vstart, variant->vstart);
907 		EXPECT_EQ(regset_data->vtype, variant->vtype);
908 		EXPECT_EQ(regset_data->vcsr, variant->vcsr);
909 		EXPECT_EQ(regset_data->vl, variant->vl);
910 		EXPECT_EQ(regset_data->vlenb, vlenb);
911 
912 		/* cleanup */
913 
914 		ASSERT_EQ(0, kill(pid, SIGKILL));
915 	}
916 }
917 
918 TEST_HARNESS_MAIN
919