1 // SPDX-License-Identifier: GPL-2.0
2
3 #define _GNU_SOURCE
4 #include <err.h>
5 #include <errno.h>
6 #include <pthread.h>
7 #include <setjmp.h>
8 #include <stdio.h>
9 #include <string.h>
10 #include <stdbool.h>
11 #include <unistd.h>
12 #include <x86intrin.h>
13
14 #include <sys/auxv.h>
15 #include <sys/mman.h>
16 #include <sys/shm.h>
17 #include <sys/ptrace.h>
18 #include <sys/syscall.h>
19 #include <sys/wait.h>
20 #include <sys/uio.h>
21
22 #include "../kselftest.h" /* For __cpuid_count() */
23
24 #ifndef __x86_64__
25 # error This test is 64-bit only
26 #endif
27
28 #define XSAVE_HDR_OFFSET 512
29 #define XSAVE_HDR_SIZE 64
30
31 struct xsave_buffer {
32 union {
33 struct {
34 char legacy[XSAVE_HDR_OFFSET];
35 char header[XSAVE_HDR_SIZE];
36 char extended[0];
37 };
38 char bytes[0];
39 };
40 };
41
xsave(struct xsave_buffer * xbuf,uint64_t rfbm)42 static inline void xsave(struct xsave_buffer *xbuf, uint64_t rfbm)
43 {
44 uint32_t rfbm_lo = rfbm;
45 uint32_t rfbm_hi = rfbm >> 32;
46
47 asm volatile("xsave (%%rdi)"
48 : : "D" (xbuf), "a" (rfbm_lo), "d" (rfbm_hi)
49 : "memory");
50 }
51
xrstor(struct xsave_buffer * xbuf,uint64_t rfbm)52 static inline void xrstor(struct xsave_buffer *xbuf, uint64_t rfbm)
53 {
54 uint32_t rfbm_lo = rfbm;
55 uint32_t rfbm_hi = rfbm >> 32;
56
57 asm volatile("xrstor (%%rdi)"
58 : : "D" (xbuf), "a" (rfbm_lo), "d" (rfbm_hi));
59 }
60
61 /* err() exits and will not return */
62 #define fatal_error(msg, ...) err(1, "[FAIL]\t" msg, ##__VA_ARGS__)
63
sethandler(int sig,void (* handler)(int,siginfo_t *,void *),int flags)64 static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
65 int flags)
66 {
67 struct sigaction sa;
68
69 memset(&sa, 0, sizeof(sa));
70 sa.sa_sigaction = handler;
71 sa.sa_flags = SA_SIGINFO | flags;
72 sigemptyset(&sa.sa_mask);
73 if (sigaction(sig, &sa, 0))
74 fatal_error("sigaction");
75 }
76
clearhandler(int sig)77 static void clearhandler(int sig)
78 {
79 struct sigaction sa;
80
81 memset(&sa, 0, sizeof(sa));
82 sa.sa_handler = SIG_DFL;
83 sigemptyset(&sa.sa_mask);
84 if (sigaction(sig, &sa, 0))
85 fatal_error("sigaction");
86 }
87
88 #define XFEATURE_XTILECFG 17
89 #define XFEATURE_XTILEDATA 18
90 #define XFEATURE_MASK_XTILECFG (1 << XFEATURE_XTILECFG)
91 #define XFEATURE_MASK_XTILEDATA (1 << XFEATURE_XTILEDATA)
92 #define XFEATURE_MASK_XTILE (XFEATURE_MASK_XTILECFG | XFEATURE_MASK_XTILEDATA)
93
94 #define CPUID_LEAF1_ECX_XSAVE_MASK (1 << 26)
95 #define CPUID_LEAF1_ECX_OSXSAVE_MASK (1 << 27)
96
97 static uint32_t xbuf_size;
98
99 static struct {
100 uint32_t xbuf_offset;
101 uint32_t size;
102 } xtiledata;
103
104 #define CPUID_LEAF_XSTATE 0xd
105 #define CPUID_SUBLEAF_XSTATE_USER 0x0
106 #define TILE_CPUID 0x1d
107 #define TILE_PALETTE_ID 0x1
108
check_cpuid_xtiledata(void)109 static void check_cpuid_xtiledata(void)
110 {
111 uint32_t eax, ebx, ecx, edx;
112
113 __cpuid_count(CPUID_LEAF_XSTATE, CPUID_SUBLEAF_XSTATE_USER,
114 eax, ebx, ecx, edx);
115
116 /*
117 * EBX enumerates the size (in bytes) required by the XSAVE
118 * instruction for an XSAVE area containing all the user state
119 * components corresponding to bits currently set in XCR0.
120 *
121 * Stash that off so it can be used to allocate buffers later.
122 */
123 xbuf_size = ebx;
124
125 __cpuid_count(CPUID_LEAF_XSTATE, XFEATURE_XTILEDATA,
126 eax, ebx, ecx, edx);
127 /*
128 * eax: XTILEDATA state component size
129 * ebx: XTILEDATA state component offset in user buffer
130 */
131 if (!eax || !ebx)
132 fatal_error("xstate cpuid: invalid tile data size/offset: %d/%d",
133 eax, ebx);
134
135 xtiledata.size = eax;
136 xtiledata.xbuf_offset = ebx;
137 }
138
139 /* The helpers for managing XSAVE buffer and tile states: */
140
alloc_xbuf(void)141 struct xsave_buffer *alloc_xbuf(void)
142 {
143 struct xsave_buffer *xbuf;
144
145 /* XSAVE buffer should be 64B-aligned. */
146 xbuf = aligned_alloc(64, xbuf_size);
147 if (!xbuf)
148 fatal_error("aligned_alloc()");
149 return xbuf;
150 }
151
clear_xstate_header(struct xsave_buffer * buffer)152 static inline void clear_xstate_header(struct xsave_buffer *buffer)
153 {
154 memset(&buffer->header, 0, sizeof(buffer->header));
155 }
156
set_xstatebv(struct xsave_buffer * buffer,uint64_t bv)157 static inline void set_xstatebv(struct xsave_buffer *buffer, uint64_t bv)
158 {
159 /* XSTATE_BV is at the beginning of the header: */
160 *(uint64_t *)(&buffer->header) = bv;
161 }
162
set_rand_tiledata(struct xsave_buffer * xbuf)163 static void set_rand_tiledata(struct xsave_buffer *xbuf)
164 {
165 int *ptr = (int *)&xbuf->bytes[xtiledata.xbuf_offset];
166 int data;
167 int i;
168
169 /*
170 * Ensure that 'data' is never 0. This ensures that
171 * the registers are never in their initial configuration
172 * and thus never tracked as being in the init state.
173 */
174 data = rand() | 1;
175
176 for (i = 0; i < xtiledata.size / sizeof(int); i++, ptr++)
177 *ptr = data;
178 }
179
180 struct xsave_buffer *stashed_xsave;
181
init_stashed_xsave(void)182 static void init_stashed_xsave(void)
183 {
184 stashed_xsave = alloc_xbuf();
185 if (!stashed_xsave)
186 fatal_error("failed to allocate stashed_xsave\n");
187 clear_xstate_header(stashed_xsave);
188 }
189
free_stashed_xsave(void)190 static void free_stashed_xsave(void)
191 {
192 free(stashed_xsave);
193 }
194
195 /* See 'struct _fpx_sw_bytes' at sigcontext.h */
196 #define SW_BYTES_OFFSET 464
197 /* N.B. The struct's field name varies so read from the offset. */
198 #define SW_BYTES_BV_OFFSET (SW_BYTES_OFFSET + 8)
199
get_fpx_sw_bytes(void * buffer)200 static inline struct _fpx_sw_bytes *get_fpx_sw_bytes(void *buffer)
201 {
202 return (struct _fpx_sw_bytes *)(buffer + SW_BYTES_OFFSET);
203 }
204
get_fpx_sw_bytes_features(void * buffer)205 static inline uint64_t get_fpx_sw_bytes_features(void *buffer)
206 {
207 return *(uint64_t *)(buffer + SW_BYTES_BV_OFFSET);
208 }
209
210 /* Work around printf() being unsafe in signals: */
211 #define SIGNAL_BUF_LEN 1000
212 char signal_message_buffer[SIGNAL_BUF_LEN];
sig_print(char * msg)213 void sig_print(char *msg)
214 {
215 int left = SIGNAL_BUF_LEN - strlen(signal_message_buffer) - 1;
216
217 strncat(signal_message_buffer, msg, left);
218 }
219
220 static volatile bool noperm_signaled;
221 static int noperm_errs;
222 /*
223 * Signal handler for when AMX is used but
224 * permission has not been obtained.
225 */
handle_noperm(int sig,siginfo_t * si,void * ctx_void)226 static void handle_noperm(int sig, siginfo_t *si, void *ctx_void)
227 {
228 ucontext_t *ctx = (ucontext_t *)ctx_void;
229 void *xbuf = ctx->uc_mcontext.fpregs;
230 struct _fpx_sw_bytes *sw_bytes;
231 uint64_t features;
232
233 /* Reset the signal message buffer: */
234 signal_message_buffer[0] = '\0';
235 sig_print("\tAt SIGILL handler,\n");
236
237 if (si->si_code != ILL_ILLOPC) {
238 noperm_errs++;
239 sig_print("[FAIL]\tInvalid signal code.\n");
240 } else {
241 sig_print("[OK]\tValid signal code (ILL_ILLOPC).\n");
242 }
243
244 sw_bytes = get_fpx_sw_bytes(xbuf);
245 /*
246 * Without permission, the signal XSAVE buffer should not
247 * have room for AMX register state (aka. xtiledata).
248 * Check that the size does not overlap with where xtiledata
249 * will reside.
250 *
251 * This also implies that no state components *PAST*
252 * XTILEDATA (features >=19) can be present in the buffer.
253 */
254 if (sw_bytes->xstate_size <= xtiledata.xbuf_offset) {
255 sig_print("[OK]\tValid xstate size\n");
256 } else {
257 noperm_errs++;
258 sig_print("[FAIL]\tInvalid xstate size\n");
259 }
260
261 features = get_fpx_sw_bytes_features(xbuf);
262 /*
263 * Without permission, the XTILEDATA feature
264 * bit should not be set.
265 */
266 if ((features & XFEATURE_MASK_XTILEDATA) == 0) {
267 sig_print("[OK]\tValid xstate mask\n");
268 } else {
269 noperm_errs++;
270 sig_print("[FAIL]\tInvalid xstate mask\n");
271 }
272
273 noperm_signaled = true;
274 ctx->uc_mcontext.gregs[REG_RIP] += 3; /* Skip the faulting XRSTOR */
275 }
276
277 /* Return true if XRSTOR is successful; otherwise, false. */
xrstor_safe(struct xsave_buffer * xbuf,uint64_t mask)278 static inline bool xrstor_safe(struct xsave_buffer *xbuf, uint64_t mask)
279 {
280 noperm_signaled = false;
281 xrstor(xbuf, mask);
282
283 /* Print any messages produced by the signal code: */
284 printf("%s", signal_message_buffer);
285 /*
286 * Reset the buffer to make sure any future printing
287 * only outputs new messages:
288 */
289 signal_message_buffer[0] = '\0';
290
291 if (noperm_errs)
292 fatal_error("saw %d errors in noperm signal handler\n", noperm_errs);
293
294 return !noperm_signaled;
295 }
296
297 /*
298 * Use XRSTOR to populate the XTILEDATA registers with
299 * random data.
300 *
301 * Return true if successful; otherwise, false.
302 */
load_rand_tiledata(struct xsave_buffer * xbuf)303 static inline bool load_rand_tiledata(struct xsave_buffer *xbuf)
304 {
305 clear_xstate_header(xbuf);
306 set_xstatebv(xbuf, XFEATURE_MASK_XTILEDATA);
307 set_rand_tiledata(xbuf);
308 return xrstor_safe(xbuf, XFEATURE_MASK_XTILEDATA);
309 }
310
311 /* Return XTILEDATA to its initial configuration. */
init_xtiledata(void)312 static inline void init_xtiledata(void)
313 {
314 clear_xstate_header(stashed_xsave);
315 xrstor_safe(stashed_xsave, XFEATURE_MASK_XTILEDATA);
316 }
317
318 enum expected_result { FAIL_EXPECTED, SUCCESS_EXPECTED };
319
320 /* arch_prctl() and sigaltstack() test */
321
322 #define ARCH_GET_XCOMP_SUPP 0x1021
323 #define ARCH_GET_XCOMP_PERM 0x1022
324 #define ARCH_REQ_XCOMP_PERM 0x1023
325
req_xtiledata_perm(void)326 static void req_xtiledata_perm(void)
327 {
328 syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_PERM, XFEATURE_XTILEDATA);
329 }
330
validate_req_xcomp_perm(enum expected_result exp)331 static void validate_req_xcomp_perm(enum expected_result exp)
332 {
333 unsigned long bitmask, expected_bitmask;
334 long rc;
335
336 rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_PERM, &bitmask);
337 if (rc) {
338 fatal_error("prctl(ARCH_GET_XCOMP_PERM) error: %ld", rc);
339 } else if (!(bitmask & XFEATURE_MASK_XTILECFG)) {
340 fatal_error("ARCH_GET_XCOMP_PERM returns XFEATURE_XTILECFG off.");
341 }
342
343 rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_PERM, XFEATURE_XTILEDATA);
344 if (exp == FAIL_EXPECTED) {
345 if (rc) {
346 printf("[OK]\tARCH_REQ_XCOMP_PERM saw expected failure..\n");
347 return;
348 }
349
350 fatal_error("ARCH_REQ_XCOMP_PERM saw unexpected success.\n");
351 } else if (rc) {
352 fatal_error("ARCH_REQ_XCOMP_PERM saw unexpected failure.\n");
353 }
354
355 expected_bitmask = bitmask | XFEATURE_MASK_XTILEDATA;
356
357 rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_PERM, &bitmask);
358 if (rc) {
359 fatal_error("prctl(ARCH_GET_XCOMP_PERM) error: %ld", rc);
360 } else if (bitmask != expected_bitmask) {
361 fatal_error("ARCH_REQ_XCOMP_PERM set a wrong bitmask: %lx, expected: %lx.\n",
362 bitmask, expected_bitmask);
363 } else {
364 printf("\tARCH_REQ_XCOMP_PERM is successful.\n");
365 }
366 }
367
validate_xcomp_perm(enum expected_result exp)368 static void validate_xcomp_perm(enum expected_result exp)
369 {
370 bool load_success = load_rand_tiledata(stashed_xsave);
371
372 if (exp == FAIL_EXPECTED) {
373 if (load_success) {
374 noperm_errs++;
375 printf("[FAIL]\tLoad tiledata succeeded.\n");
376 } else {
377 printf("[OK]\tLoad tiledata failed.\n");
378 }
379 } else if (exp == SUCCESS_EXPECTED) {
380 if (load_success) {
381 printf("[OK]\tLoad tiledata succeeded.\n");
382 } else {
383 noperm_errs++;
384 printf("[FAIL]\tLoad tiledata failed.\n");
385 }
386 }
387 }
388
389 #ifndef AT_MINSIGSTKSZ
390 # define AT_MINSIGSTKSZ 51
391 #endif
392
alloc_altstack(unsigned int size)393 static void *alloc_altstack(unsigned int size)
394 {
395 void *altstack;
396
397 altstack = mmap(NULL, size, PROT_READ | PROT_WRITE,
398 MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
399
400 if (altstack == MAP_FAILED)
401 fatal_error("mmap() for altstack");
402
403 return altstack;
404 }
405
setup_altstack(void * addr,unsigned long size,enum expected_result exp)406 static void setup_altstack(void *addr, unsigned long size, enum expected_result exp)
407 {
408 stack_t ss;
409 int rc;
410
411 memset(&ss, 0, sizeof(ss));
412 ss.ss_size = size;
413 ss.ss_sp = addr;
414
415 rc = sigaltstack(&ss, NULL);
416
417 if (exp == FAIL_EXPECTED) {
418 if (rc) {
419 printf("[OK]\tsigaltstack() failed.\n");
420 } else {
421 fatal_error("sigaltstack() succeeded unexpectedly.\n");
422 }
423 } else if (rc) {
424 fatal_error("sigaltstack()");
425 }
426 }
427
test_dynamic_sigaltstack(void)428 static void test_dynamic_sigaltstack(void)
429 {
430 unsigned int small_size, enough_size;
431 unsigned long minsigstksz;
432 void *altstack;
433
434 minsigstksz = getauxval(AT_MINSIGSTKSZ);
435 printf("\tAT_MINSIGSTKSZ = %lu\n", minsigstksz);
436 /*
437 * getauxval() itself can return 0 for failure or
438 * success. But, in this case, AT_MINSIGSTKSZ
439 * will always return a >=0 value if implemented.
440 * Just check for 0.
441 */
442 if (minsigstksz == 0) {
443 printf("no support for AT_MINSIGSTKSZ, skipping sigaltstack tests\n");
444 return;
445 }
446
447 enough_size = minsigstksz * 2;
448
449 altstack = alloc_altstack(enough_size);
450 printf("\tAllocate memory for altstack (%u bytes).\n", enough_size);
451
452 /*
453 * Try setup_altstack() with a size which can not fit
454 * XTILEDATA. ARCH_REQ_XCOMP_PERM should fail.
455 */
456 small_size = minsigstksz - xtiledata.size;
457 printf("\tAfter sigaltstack() with small size (%u bytes).\n", small_size);
458 setup_altstack(altstack, small_size, SUCCESS_EXPECTED);
459 validate_req_xcomp_perm(FAIL_EXPECTED);
460
461 /*
462 * Try setup_altstack() with a size derived from
463 * AT_MINSIGSTKSZ. It should be more than large enough
464 * and thus ARCH_REQ_XCOMP_PERM should succeed.
465 */
466 printf("\tAfter sigaltstack() with enough size (%u bytes).\n", enough_size);
467 setup_altstack(altstack, enough_size, SUCCESS_EXPECTED);
468 validate_req_xcomp_perm(SUCCESS_EXPECTED);
469
470 /*
471 * Try to coerce setup_altstack() to again accept a
472 * too-small altstack. This ensures that big-enough
473 * sigaltstacks can not shrink to a too-small value
474 * once XTILEDATA permission is established.
475 */
476 printf("\tThen, sigaltstack() with small size (%u bytes).\n", small_size);
477 setup_altstack(altstack, small_size, FAIL_EXPECTED);
478 }
479
test_dynamic_state(void)480 static void test_dynamic_state(void)
481 {
482 pid_t parent, child, grandchild;
483
484 parent = fork();
485 if (parent < 0) {
486 /* fork() failed */
487 fatal_error("fork");
488 } else if (parent > 0) {
489 int status;
490 /* fork() succeeded. Now in the parent. */
491
492 wait(&status);
493 if (!WIFEXITED(status) || WEXITSTATUS(status))
494 fatal_error("arch_prctl test parent exit");
495 return;
496 }
497 /* fork() succeeded. Now in the child . */
498
499 printf("[RUN]\tCheck ARCH_REQ_XCOMP_PERM around process fork() and sigaltack() test.\n");
500
501 printf("\tFork a child.\n");
502 child = fork();
503 if (child < 0) {
504 fatal_error("fork");
505 } else if (child > 0) {
506 int status;
507
508 wait(&status);
509 if (!WIFEXITED(status) || WEXITSTATUS(status))
510 fatal_error("arch_prctl test child exit");
511 _exit(0);
512 }
513
514 /*
515 * The permission request should fail without an
516 * XTILEDATA-compatible signal stack
517 */
518 printf("\tTest XCOMP_PERM at child.\n");
519 validate_xcomp_perm(FAIL_EXPECTED);
520
521 /*
522 * Set up an XTILEDATA-compatible signal stack and
523 * also obtain permission to populate XTILEDATA.
524 */
525 printf("\tTest dynamic sigaltstack at child:\n");
526 test_dynamic_sigaltstack();
527
528 /* Ensure that XTILEDATA can be populated. */
529 printf("\tTest XCOMP_PERM again at child.\n");
530 validate_xcomp_perm(SUCCESS_EXPECTED);
531
532 printf("\tFork a grandchild.\n");
533 grandchild = fork();
534 if (grandchild < 0) {
535 /* fork() failed */
536 fatal_error("fork");
537 } else if (!grandchild) {
538 /* fork() succeeded. Now in the (grand)child. */
539 printf("\tTest XCOMP_PERM at grandchild.\n");
540
541 /*
542 * Ensure that the grandchild inherited
543 * permission and a compatible sigaltstack:
544 */
545 validate_xcomp_perm(SUCCESS_EXPECTED);
546 } else {
547 int status;
548 /* fork() succeeded. Now in the parent. */
549
550 wait(&status);
551 if (!WIFEXITED(status) || WEXITSTATUS(status))
552 fatal_error("fork test grandchild");
553 }
554
555 _exit(0);
556 }
557
__compare_tiledata_state(struct xsave_buffer * xbuf1,struct xsave_buffer * xbuf2)558 static inline int __compare_tiledata_state(struct xsave_buffer *xbuf1, struct xsave_buffer *xbuf2)
559 {
560 return memcmp(&xbuf1->bytes[xtiledata.xbuf_offset],
561 &xbuf2->bytes[xtiledata.xbuf_offset],
562 xtiledata.size);
563 }
564
565 /*
566 * Save current register state and compare it to @xbuf1.'
567 *
568 * Returns false if @xbuf1 matches the registers.
569 * Returns true if @xbuf1 differs from the registers.
570 */
__validate_tiledata_regs(struct xsave_buffer * xbuf1)571 static inline bool __validate_tiledata_regs(struct xsave_buffer *xbuf1)
572 {
573 struct xsave_buffer *xbuf2;
574 int ret;
575
576 xbuf2 = alloc_xbuf();
577 if (!xbuf2)
578 fatal_error("failed to allocate XSAVE buffer\n");
579
580 xsave(xbuf2, XFEATURE_MASK_XTILEDATA);
581 ret = __compare_tiledata_state(xbuf1, xbuf2);
582
583 free(xbuf2);
584
585 if (ret == 0)
586 return false;
587 return true;
588 }
589
validate_tiledata_regs_same(struct xsave_buffer * xbuf)590 static inline void validate_tiledata_regs_same(struct xsave_buffer *xbuf)
591 {
592 int ret = __validate_tiledata_regs(xbuf);
593
594 if (ret != 0)
595 fatal_error("TILEDATA registers changed");
596 }
597
validate_tiledata_regs_changed(struct xsave_buffer * xbuf)598 static inline void validate_tiledata_regs_changed(struct xsave_buffer *xbuf)
599 {
600 int ret = __validate_tiledata_regs(xbuf);
601
602 if (ret == 0)
603 fatal_error("TILEDATA registers did not change");
604 }
605
606 /* tiledata inheritance test */
607
test_fork(void)608 static void test_fork(void)
609 {
610 pid_t child, grandchild;
611
612 child = fork();
613 if (child < 0) {
614 /* fork() failed */
615 fatal_error("fork");
616 } else if (child > 0) {
617 /* fork() succeeded. Now in the parent. */
618 int status;
619
620 wait(&status);
621 if (!WIFEXITED(status) || WEXITSTATUS(status))
622 fatal_error("fork test child");
623 return;
624 }
625 /* fork() succeeded. Now in the child. */
626 printf("[RUN]\tCheck tile data inheritance.\n\tBefore fork(), load tiledata\n");
627
628 load_rand_tiledata(stashed_xsave);
629
630 grandchild = fork();
631 if (grandchild < 0) {
632 /* fork() failed */
633 fatal_error("fork");
634 } else if (grandchild > 0) {
635 /* fork() succeeded. Still in the first child. */
636 int status;
637
638 wait(&status);
639 if (!WIFEXITED(status) || WEXITSTATUS(status))
640 fatal_error("fork test grand child");
641 _exit(0);
642 }
643 /* fork() succeeded. Now in the (grand)child. */
644
645 /*
646 * TILEDATA registers are not preserved across fork().
647 * Ensure that their value has changed:
648 */
649 validate_tiledata_regs_changed(stashed_xsave);
650
651 _exit(0);
652 }
653
654 /* Context switching test */
655
656 static struct _ctxtswtest_cfg {
657 unsigned int iterations;
658 unsigned int num_threads;
659 } ctxtswtest_config;
660
661 struct futex_info {
662 pthread_t thread;
663 int nr;
664 pthread_mutex_t mutex;
665 struct futex_info *next;
666 };
667
check_tiledata(void * info)668 static void *check_tiledata(void *info)
669 {
670 struct futex_info *finfo = (struct futex_info *)info;
671 struct xsave_buffer *xbuf;
672 int i;
673
674 xbuf = alloc_xbuf();
675 if (!xbuf)
676 fatal_error("unable to allocate XSAVE buffer");
677
678 /*
679 * Load random data into 'xbuf' and then restore
680 * it to the tile registers themselves.
681 */
682 load_rand_tiledata(xbuf);
683 for (i = 0; i < ctxtswtest_config.iterations; i++) {
684 pthread_mutex_lock(&finfo->mutex);
685
686 /*
687 * Ensure the register values have not
688 * diverged from those recorded in 'xbuf'.
689 */
690 validate_tiledata_regs_same(xbuf);
691
692 /* Load new, random values into xbuf and registers */
693 load_rand_tiledata(xbuf);
694
695 /*
696 * The last thread's last unlock will be for
697 * thread 0's mutex. However, thread 0 will
698 * have already exited the loop and the mutex
699 * will already be unlocked.
700 *
701 * Because this is not an ERRORCHECK mutex,
702 * that inconsistency will be silently ignored.
703 */
704 pthread_mutex_unlock(&finfo->next->mutex);
705 }
706
707 free(xbuf);
708 /*
709 * Return this thread's finfo, which is
710 * a unique value for this thread.
711 */
712 return finfo;
713 }
714
create_threads(int num,struct futex_info * finfo)715 static int create_threads(int num, struct futex_info *finfo)
716 {
717 int i;
718
719 for (i = 0; i < num; i++) {
720 int next_nr;
721
722 finfo[i].nr = i;
723 /*
724 * Thread 'i' will wait on this mutex to
725 * be unlocked. Lock it immediately after
726 * initialization:
727 */
728 pthread_mutex_init(&finfo[i].mutex, NULL);
729 pthread_mutex_lock(&finfo[i].mutex);
730
731 next_nr = (i + 1) % num;
732 finfo[i].next = &finfo[next_nr];
733
734 if (pthread_create(&finfo[i].thread, NULL, check_tiledata, &finfo[i]))
735 fatal_error("pthread_create()");
736 }
737 return 0;
738 }
739
affinitize_cpu0(void)740 static void affinitize_cpu0(void)
741 {
742 cpu_set_t cpuset;
743
744 CPU_ZERO(&cpuset);
745 CPU_SET(0, &cpuset);
746
747 if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0)
748 fatal_error("sched_setaffinity to CPU 0");
749 }
750
test_context_switch(void)751 static void test_context_switch(void)
752 {
753 struct futex_info *finfo;
754 int i;
755
756 /* Affinitize to one CPU to force context switches */
757 affinitize_cpu0();
758
759 req_xtiledata_perm();
760
761 printf("[RUN]\tCheck tiledata context switches, %d iterations, %d threads.\n",
762 ctxtswtest_config.iterations,
763 ctxtswtest_config.num_threads);
764
765
766 finfo = malloc(sizeof(*finfo) * ctxtswtest_config.num_threads);
767 if (!finfo)
768 fatal_error("malloc()");
769
770 create_threads(ctxtswtest_config.num_threads, finfo);
771
772 /*
773 * This thread wakes up thread 0
774 * Thread 0 will wake up 1
775 * Thread 1 will wake up 2
776 * ...
777 * the last thread will wake up 0
778 *
779 * ... this will repeat for the configured
780 * number of iterations.
781 */
782 pthread_mutex_unlock(&finfo[0].mutex);
783
784 /* Wait for all the threads to finish: */
785 for (i = 0; i < ctxtswtest_config.num_threads; i++) {
786 void *thread_retval;
787 int rc;
788
789 rc = pthread_join(finfo[i].thread, &thread_retval);
790
791 if (rc)
792 fatal_error("pthread_join() failed for thread %d err: %d\n",
793 i, rc);
794
795 if (thread_retval != &finfo[i])
796 fatal_error("unexpected thread retval for thread %d: %p\n",
797 i, thread_retval);
798
799 }
800
801 printf("[OK]\tNo incorrect case was found.\n");
802
803 free(finfo);
804 }
805
806 /* Ptrace test */
807
808 /*
809 * Make sure the ptracee has the expanded kernel buffer on the first
810 * use. Then, initialize the state before performing the state
811 * injection from the ptracer.
812 */
ptracee_firstuse_tiledata(void)813 static inline void ptracee_firstuse_tiledata(void)
814 {
815 load_rand_tiledata(stashed_xsave);
816 init_xtiledata();
817 }
818
819 /*
820 * Ptracer injects the randomized tile data state. It also reads
821 * before and after that, which will execute the kernel's state copy
822 * functions. So, the tester is advised to double-check any emitted
823 * kernel messages.
824 */
ptracer_inject_tiledata(pid_t target)825 static void ptracer_inject_tiledata(pid_t target)
826 {
827 struct xsave_buffer *xbuf;
828 struct iovec iov;
829
830 xbuf = alloc_xbuf();
831 if (!xbuf)
832 fatal_error("unable to allocate XSAVE buffer");
833
834 printf("\tRead the init'ed tiledata via ptrace().\n");
835
836 iov.iov_base = xbuf;
837 iov.iov_len = xbuf_size;
838
839 memset(stashed_xsave, 0, xbuf_size);
840
841 if (ptrace(PTRACE_GETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
842 fatal_error("PTRACE_GETREGSET");
843
844 if (!__compare_tiledata_state(stashed_xsave, xbuf))
845 printf("[OK]\tThe init'ed tiledata was read from ptracee.\n");
846 else
847 printf("[FAIL]\tThe init'ed tiledata was not read from ptracee.\n");
848
849 printf("\tInject tiledata via ptrace().\n");
850
851 load_rand_tiledata(xbuf);
852
853 memcpy(&stashed_xsave->bytes[xtiledata.xbuf_offset],
854 &xbuf->bytes[xtiledata.xbuf_offset],
855 xtiledata.size);
856
857 if (ptrace(PTRACE_SETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
858 fatal_error("PTRACE_SETREGSET");
859
860 if (ptrace(PTRACE_GETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
861 fatal_error("PTRACE_GETREGSET");
862
863 if (!__compare_tiledata_state(stashed_xsave, xbuf))
864 printf("[OK]\tTiledata was correctly written to ptracee.\n");
865 else
866 printf("[FAIL]\tTiledata was not correctly written to ptracee.\n");
867 }
868
test_ptrace(void)869 static void test_ptrace(void)
870 {
871 pid_t child;
872 int status;
873
874 child = fork();
875 if (child < 0) {
876 err(1, "fork");
877 } else if (!child) {
878 if (ptrace(PTRACE_TRACEME, 0, NULL, NULL))
879 err(1, "PTRACE_TRACEME");
880
881 ptracee_firstuse_tiledata();
882
883 raise(SIGTRAP);
884 _exit(0);
885 }
886
887 do {
888 wait(&status);
889 } while (WSTOPSIG(status) != SIGTRAP);
890
891 ptracer_inject_tiledata(child);
892
893 ptrace(PTRACE_DETACH, child, NULL, NULL);
894 wait(&status);
895 if (!WIFEXITED(status) || WEXITSTATUS(status))
896 err(1, "ptrace test");
897 }
898
main(void)899 int main(void)
900 {
901 unsigned long features;
902 long rc;
903
904 rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_SUPP, &features);
905 if (rc || (features & XFEATURE_MASK_XTILE) != XFEATURE_MASK_XTILE) {
906 ksft_print_msg("no AMX support\n");
907 return KSFT_SKIP;
908 }
909
910 check_cpuid_xtiledata();
911
912 init_stashed_xsave();
913 sethandler(SIGILL, handle_noperm, 0);
914
915 test_dynamic_state();
916
917 /* Request permission for the following tests */
918 req_xtiledata_perm();
919
920 test_fork();
921
922 ctxtswtest_config.iterations = 10;
923 ctxtswtest_config.num_threads = 5;
924 test_context_switch();
925
926 test_ptrace();
927
928 clearhandler(SIGILL);
929 free_stashed_xsave();
930
931 return 0;
932 }
933