1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright (C) 2016 Gvozden Neskovic <neskovic@compeng.uni-frankfurt.de>.
24 */
25
26 /*
27 * USER API:
28 *
29 * Kernel fpu methods:
30 * kfpu_allowed()
31 * kfpu_begin()
32 * kfpu_end()
33 * kfpu_init()
34 * kfpu_fini()
35 *
36 * SIMD support:
37 *
38 * Following functions should be called to determine whether CPU feature
39 * is supported. All functions are usable in kernel and user space.
40 * If a SIMD algorithm is using more than one instruction set
41 * all relevant feature test functions should be called.
42 *
43 * Supported features:
44 * zfs_sse_available()
45 * zfs_sse2_available()
46 * zfs_sse3_available()
47 * zfs_ssse3_available()
48 * zfs_sse4_1_available()
49 * zfs_sse4_2_available()
50 *
51 * zfs_avx_available()
52 * zfs_avx2_available()
53 *
54 * zfs_bmi1_available()
55 * zfs_bmi2_available()
56 *
57 * zfs_shani_available()
58 *
59 * zfs_avx512f_available()
60 * zfs_avx512cd_available()
61 * zfs_avx512er_available()
62 * zfs_avx512pf_available()
63 * zfs_avx512bw_available()
64 * zfs_avx512dq_available()
65 * zfs_avx512vl_available()
66 * zfs_avx512ifma_available()
67 * zfs_avx512vbmi_available()
68 *
69 * NOTE(AVX-512VL): If using AVX-512 instructions with 128Bit registers
70 * also add zfs_avx512vl_available() to feature check.
71 */
72
73 #ifndef _LINUX_SIMD_X86_H
74 #define _LINUX_SIMD_X86_H
75
76 /* only for __x86 */
77 #if defined(__x86)
78
79 #include <sys/types.h>
80 #include <asm/cpufeature.h>
81
82 /*
83 * Disable the WARN_ON_FPU() macro to prevent additional dependencies
84 * when providing the kfpu_* functions. Relevant warnings are included
85 * as appropriate and are unconditionally enabled.
86 */
87 #if defined(CONFIG_X86_DEBUG_FPU) && !defined(KERNEL_EXPORTS_X86_FPU)
88 #undef CONFIG_X86_DEBUG_FPU
89 #endif
90
91 /*
92 * The following cases are for kernels which export either the
93 * kernel_fpu_* or __kernel_fpu_* functions.
94 */
95 #if defined(KERNEL_EXPORTS_X86_FPU)
96
97 #if defined(HAVE_KERNEL_FPU_API_HEADER)
98 #include <asm/fpu/api.h>
99 #if defined(HAVE_KERNEL_FPU_INTERNAL_HEADER)
100 #include <asm/fpu/internal.h>
101 #endif
102 #else
103 #include <asm/i387.h>
104 #endif
105
106 #define kfpu_allowed() 1
107 #define kfpu_init() 0
108 #define kfpu_fini() ((void) 0)
109
110 #if defined(HAVE_UNDERSCORE_KERNEL_FPU)
111 #define kfpu_begin() \
112 { \
113 preempt_disable(); \
114 __kernel_fpu_begin(); \
115 }
116 #define kfpu_end() \
117 { \
118 __kernel_fpu_end(); \
119 preempt_enable(); \
120 }
121
122 #elif defined(HAVE_KERNEL_FPU)
123 #define kfpu_begin() kernel_fpu_begin()
124 #define kfpu_end() kernel_fpu_end()
125
126 #else
127 /*
128 * This case is unreachable. When KERNEL_EXPORTS_X86_FPU is defined then
129 * either HAVE_UNDERSCORE_KERNEL_FPU or HAVE_KERNEL_FPU must be defined.
130 */
131 #error "Unreachable kernel configuration"
132 #endif
133
134 #else /* defined(KERNEL_EXPORTS_X86_FPU) */
135
136 /*
137 * When the kernel_fpu_* symbols are unavailable then provide our own
138 * versions which allow the FPU to be safely used.
139 */
140 #if defined(HAVE_KERNEL_FPU_INTERNAL)
141
142 /*
143 * For kernels not exporting *kfpu_{begin,end} we have to use inline assembly
144 * with the XSAVE{,OPT,S} instructions, so we need the toolchain to support at
145 * least XSAVE.
146 */
147 #if !defined(HAVE_XSAVE)
148 #error "Toolchain needs to support the XSAVE assembler instruction"
149 #endif
150
151 #ifndef XFEATURE_MASK_XTILE
152 /*
153 * For kernels where this doesn't exist yet, we still don't want to break
154 * by save/restoring this broken nonsense.
155 * See issue #14989 or Intel errata SPR4 for why
156 */
157 #define XFEATURE_MASK_XTILE 0x60000
158 #endif
159
160 #include <linux/mm.h>
161 #include <linux/slab.h>
162
163 extern uint8_t **zfs_kfpu_fpregs;
164
165 /*
166 * Return the size in bytes required by the XSAVE instruction for an
167 * XSAVE area containing all the user state components supported by this CPU.
168 * See: Intel 64 and IA-32 Architectures Software Developer’s Manual.
169 * Dec. 2021. Vol. 2A p. 3-222.
170 */
171 static inline uint32_t
get_xsave_area_size(void)172 get_xsave_area_size(void)
173 {
174 if (!boot_cpu_has(X86_FEATURE_OSXSAVE)) {
175 return (0);
176 }
177 /*
178 * Call CPUID with leaf 13 and subleaf 0. The size is in ecx.
179 * We don't need to check for cpuid_max here, since if this CPU has
180 * OSXSAVE set, it has leaf 13 (0x0D) as well.
181 */
182 uint32_t eax, ebx, ecx, edx;
183
184 eax = 13U;
185 ecx = 0U;
186 __asm__ __volatile__("cpuid"
187 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
188 : "a" (eax), "c" (ecx));
189
190 return (ecx);
191 }
192
193 /*
194 * Return the allocation order of the maximum buffer size required to save the
195 * FPU state on this architecture. The value returned is the same as Linux'
196 * get_order() function would return (i.e. 2^order = nr. of pages required).
197 * Currently this will always return 0 since the save area is below 4k even for
198 * a full fledged AVX-512 implementation.
199 */
200 static inline int
get_fpuregs_save_area_order(void)201 get_fpuregs_save_area_order(void)
202 {
203 size_t area_size = (size_t)get_xsave_area_size();
204
205 /*
206 * If we are dealing with a CPU not supporting XSAVE,
207 * get_xsave_area_size() will return 0. Thus the maximum memory
208 * required is the FXSAVE area size which is 512 bytes. See: Intel 64
209 * and IA-32 Architectures Software Developer’s Manual. Dec. 2021.
210 * Vol. 2A p. 3-451.
211 */
212 if (area_size == 0) {
213 area_size = 512;
214 }
215 return (get_order(area_size));
216 }
217
218 /*
219 * Initialize per-cpu variables to store FPU state.
220 */
221 static inline void
kfpu_fini(void)222 kfpu_fini(void)
223 {
224 int cpu;
225 int order = get_fpuregs_save_area_order();
226
227 for_each_possible_cpu(cpu) {
228 if (zfs_kfpu_fpregs[cpu] != NULL) {
229 free_pages((unsigned long)zfs_kfpu_fpregs[cpu], order);
230 }
231 }
232
233 kfree(zfs_kfpu_fpregs);
234 }
235
236 static inline int
kfpu_init(void)237 kfpu_init(void)
238 {
239 zfs_kfpu_fpregs = kzalloc(num_possible_cpus() * sizeof (uint8_t *),
240 GFP_KERNEL);
241
242 if (zfs_kfpu_fpregs == NULL)
243 return (-ENOMEM);
244
245 /*
246 * The fxsave and xsave operations require 16-/64-byte alignment of
247 * the target memory. Since kmalloc() provides no alignment
248 * guarantee instead use alloc_pages_node().
249 */
250 int cpu;
251 int order = get_fpuregs_save_area_order();
252
253 for_each_possible_cpu(cpu) {
254 struct page *page = alloc_pages_node(cpu_to_node(cpu),
255 GFP_KERNEL | __GFP_ZERO, order);
256 if (page == NULL) {
257 kfpu_fini();
258 return (-ENOMEM);
259 }
260
261 zfs_kfpu_fpregs[cpu] = page_address(page);
262 }
263
264 return (0);
265 }
266
267 #define kfpu_allowed() 1
268
269 /*
270 * FPU save and restore instructions.
271 */
272 #define __asm __asm__ __volatile__
273 #define kfpu_fxsave(addr) __asm("fxsave %0" : "=m" (*(addr)))
274 #define kfpu_fxsaveq(addr) __asm("fxsaveq %0" : "=m" (*(addr)))
275 #define kfpu_fnsave(addr) __asm("fnsave %0; fwait" : "=m" (*(addr)))
276 #define kfpu_fxrstor(addr) __asm("fxrstor %0" : : "m" (*(addr)))
277 #define kfpu_fxrstorq(addr) __asm("fxrstorq %0" : : "m" (*(addr)))
278 #define kfpu_frstor(addr) __asm("frstor %0" : : "m" (*(addr)))
279 #define kfpu_fxsr_clean(rval) __asm("fnclex; emms; fildl %P[addr]" \
280 : : [addr] "m" (rval));
281
282 #define kfpu_do_xsave(instruction, addr, mask) \
283 { \
284 uint32_t low, hi; \
285 \
286 low = mask; \
287 hi = (uint64_t)(mask) >> 32; \
288 __asm(instruction " %[dst]\n\t" \
289 : \
290 : [dst] "m" (*(addr)), "a" (low), "d" (hi) \
291 : "memory"); \
292 }
293
294 static inline void
kfpu_save_fxsr(uint8_t * addr)295 kfpu_save_fxsr(uint8_t *addr)
296 {
297 if (IS_ENABLED(CONFIG_X86_32))
298 kfpu_fxsave(addr);
299 else
300 kfpu_fxsaveq(addr);
301 }
302
303 static inline void
kfpu_save_fsave(uint8_t * addr)304 kfpu_save_fsave(uint8_t *addr)
305 {
306 kfpu_fnsave(addr);
307 }
308
309 static inline void
kfpu_begin(void)310 kfpu_begin(void)
311 {
312 /*
313 * Preemption and interrupts must be disabled for the critical
314 * region where the FPU state is being modified.
315 */
316 preempt_disable();
317 local_irq_disable();
318
319 /*
320 * The current FPU registers need to be preserved by kfpu_begin()
321 * and restored by kfpu_end(). They are stored in a dedicated
322 * per-cpu variable, not in the task struct, this allows any user
323 * FPU state to be correctly preserved and restored.
324 */
325 uint8_t *state = zfs_kfpu_fpregs[smp_processor_id()];
326 #if defined(HAVE_XSAVES)
327 if (static_cpu_has(X86_FEATURE_XSAVES)) {
328 kfpu_do_xsave("xsaves", state, ~XFEATURE_MASK_XTILE);
329 return;
330 }
331 #endif
332 #if defined(HAVE_XSAVEOPT)
333 if (static_cpu_has(X86_FEATURE_XSAVEOPT)) {
334 kfpu_do_xsave("xsaveopt", state, ~XFEATURE_MASK_XTILE);
335 return;
336 }
337 #endif
338 if (static_cpu_has(X86_FEATURE_XSAVE)) {
339 kfpu_do_xsave("xsave", state, ~XFEATURE_MASK_XTILE);
340 } else if (static_cpu_has(X86_FEATURE_FXSR)) {
341 kfpu_save_fxsr(state);
342 } else {
343 kfpu_save_fsave(state);
344 }
345 }
346
347 #define kfpu_do_xrstor(instruction, addr, mask) \
348 { \
349 uint32_t low, hi; \
350 \
351 low = mask; \
352 hi = (uint64_t)(mask) >> 32; \
353 __asm(instruction " %[src]" \
354 : \
355 : [src] "m" (*(addr)), "a" (low), "d" (hi) \
356 : "memory"); \
357 }
358
359 static inline void
kfpu_restore_fxsr(uint8_t * addr)360 kfpu_restore_fxsr(uint8_t *addr)
361 {
362 /*
363 * On AuthenticAMD K7 and K8 processors the fxrstor instruction only
364 * restores the _x87 FOP, FIP, and FDP registers when an exception
365 * is pending. Clean the _x87 state to force the restore.
366 */
367 if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK)))
368 kfpu_fxsr_clean(addr);
369
370 if (IS_ENABLED(CONFIG_X86_32)) {
371 kfpu_fxrstor(addr);
372 } else {
373 kfpu_fxrstorq(addr);
374 }
375 }
376
377 static inline void
kfpu_restore_fsave(uint8_t * addr)378 kfpu_restore_fsave(uint8_t *addr)
379 {
380 kfpu_frstor(addr);
381 }
382
383 static inline void
kfpu_end(void)384 kfpu_end(void)
385 {
386 uint8_t *state = zfs_kfpu_fpregs[smp_processor_id()];
387 #if defined(HAVE_XSAVES)
388 if (static_cpu_has(X86_FEATURE_XSAVES)) {
389 kfpu_do_xrstor("xrstors", state, ~XFEATURE_MASK_XTILE);
390 goto out;
391 }
392 #endif
393 if (static_cpu_has(X86_FEATURE_XSAVE)) {
394 kfpu_do_xrstor("xrstor", state, ~XFEATURE_MASK_XTILE);
395 } else if (static_cpu_has(X86_FEATURE_FXSR)) {
396 kfpu_restore_fxsr(state);
397 } else {
398 kfpu_restore_fsave(state);
399 }
400 out:
401 local_irq_enable();
402 preempt_enable();
403
404 }
405
406 #else
407
408 #error "Exactly one of KERNEL_EXPORTS_X86_FPU or HAVE_KERNEL_FPU_INTERNAL" \
409 " must be defined"
410
411 #endif /* defined(HAVE_KERNEL_FPU_INTERNAL */
412 #endif /* defined(KERNEL_EXPORTS_X86_FPU) */
413
414 /*
415 * Linux kernel provides an interface for CPU feature testing.
416 */
417
418 /*
419 * Detect register set support
420 */
421
422 /*
423 * Check if OS supports AVX and AVX2 by checking XCR0
424 * Only call this function if CPUID indicates that AVX feature is
425 * supported by the CPU, otherwise it might be an illegal instruction.
426 */
427 static inline uint64_t
zfs_xgetbv(uint32_t index)428 zfs_xgetbv(uint32_t index)
429 {
430 uint32_t eax, edx;
431 /* xgetbv - instruction byte code */
432 __asm__ __volatile__(".byte 0x0f; .byte 0x01; .byte 0xd0"
433 : "=a" (eax), "=d" (edx)
434 : "c" (index));
435
436 return ((((uint64_t)edx)<<32) | (uint64_t)eax);
437 }
438
439
440 static inline boolean_t
__simd_state_enabled(const uint64_t state)441 __simd_state_enabled(const uint64_t state)
442 {
443 boolean_t has_osxsave;
444 uint64_t xcr0;
445
446 #if defined(X86_FEATURE_OSXSAVE)
447 has_osxsave = !!boot_cpu_has(X86_FEATURE_OSXSAVE);
448 #else
449 has_osxsave = B_FALSE;
450 #endif
451 if (!has_osxsave)
452 return (B_FALSE);
453
454 xcr0 = zfs_xgetbv(0);
455 return ((xcr0 & state) == state);
456 }
457
458 #define _XSTATE_SSE_AVX (0x2 | 0x4)
459 #define _XSTATE_AVX512 (0xE0 | _XSTATE_SSE_AVX)
460
461 #define __ymm_enabled() __simd_state_enabled(_XSTATE_SSE_AVX)
462 #define __zmm_enabled() __simd_state_enabled(_XSTATE_AVX512)
463
464 /*
465 * Check if SSE instruction set is available
466 */
467 static inline boolean_t
zfs_sse_available(void)468 zfs_sse_available(void)
469 {
470 return (!!boot_cpu_has(X86_FEATURE_XMM));
471 }
472
473 /*
474 * Check if SSE2 instruction set is available
475 */
476 static inline boolean_t
zfs_sse2_available(void)477 zfs_sse2_available(void)
478 {
479 return (!!boot_cpu_has(X86_FEATURE_XMM2));
480 }
481
482 /*
483 * Check if SSE3 instruction set is available
484 */
485 static inline boolean_t
zfs_sse3_available(void)486 zfs_sse3_available(void)
487 {
488 return (!!boot_cpu_has(X86_FEATURE_XMM3));
489 }
490
491 /*
492 * Check if SSSE3 instruction set is available
493 */
494 static inline boolean_t
zfs_ssse3_available(void)495 zfs_ssse3_available(void)
496 {
497 return (!!boot_cpu_has(X86_FEATURE_SSSE3));
498 }
499
500 /*
501 * Check if SSE4.1 instruction set is available
502 */
503 static inline boolean_t
zfs_sse4_1_available(void)504 zfs_sse4_1_available(void)
505 {
506 return (!!boot_cpu_has(X86_FEATURE_XMM4_1));
507 }
508
509 /*
510 * Check if SSE4.2 instruction set is available
511 */
512 static inline boolean_t
zfs_sse4_2_available(void)513 zfs_sse4_2_available(void)
514 {
515 return (!!boot_cpu_has(X86_FEATURE_XMM4_2));
516 }
517
518 /*
519 * Check if AVX instruction set is available
520 */
521 static inline boolean_t
zfs_avx_available(void)522 zfs_avx_available(void)
523 {
524 return (boot_cpu_has(X86_FEATURE_AVX) && __ymm_enabled());
525 }
526
527 /*
528 * Check if AVX2 instruction set is available
529 */
530 static inline boolean_t
zfs_avx2_available(void)531 zfs_avx2_available(void)
532 {
533 return (boot_cpu_has(X86_FEATURE_AVX2) && __ymm_enabled());
534 }
535
536 /*
537 * Check if BMI1 instruction set is available
538 */
539 static inline boolean_t
zfs_bmi1_available(void)540 zfs_bmi1_available(void)
541 {
542 #if defined(X86_FEATURE_BMI1)
543 return (!!boot_cpu_has(X86_FEATURE_BMI1));
544 #else
545 return (B_FALSE);
546 #endif
547 }
548
549 /*
550 * Check if BMI2 instruction set is available
551 */
552 static inline boolean_t
zfs_bmi2_available(void)553 zfs_bmi2_available(void)
554 {
555 #if defined(X86_FEATURE_BMI2)
556 return (!!boot_cpu_has(X86_FEATURE_BMI2));
557 #else
558 return (B_FALSE);
559 #endif
560 }
561
562 /*
563 * Check if AES instruction set is available
564 */
565 static inline boolean_t
zfs_aes_available(void)566 zfs_aes_available(void)
567 {
568 #if defined(X86_FEATURE_AES)
569 return (!!boot_cpu_has(X86_FEATURE_AES));
570 #else
571 return (B_FALSE);
572 #endif
573 }
574
575 /*
576 * Check if PCLMULQDQ instruction set is available
577 */
578 static inline boolean_t
zfs_pclmulqdq_available(void)579 zfs_pclmulqdq_available(void)
580 {
581 #if defined(X86_FEATURE_PCLMULQDQ)
582 return (!!boot_cpu_has(X86_FEATURE_PCLMULQDQ));
583 #else
584 return (B_FALSE);
585 #endif
586 }
587
588 /*
589 * Check if MOVBE instruction is available
590 */
591 static inline boolean_t
zfs_movbe_available(void)592 zfs_movbe_available(void)
593 {
594 #if defined(X86_FEATURE_MOVBE)
595 return (!!boot_cpu_has(X86_FEATURE_MOVBE));
596 #else
597 return (B_FALSE);
598 #endif
599 }
600
601 /*
602 * Check if SHA_NI instruction set is available
603 */
604 static inline boolean_t
zfs_shani_available(void)605 zfs_shani_available(void)
606 {
607 #if defined(X86_FEATURE_SHA_NI)
608 return (!!boot_cpu_has(X86_FEATURE_SHA_NI));
609 #else
610 return (B_FALSE);
611 #endif
612 }
613
614 /*
615 * AVX-512 family of instruction sets:
616 *
617 * AVX512F Foundation
618 * AVX512CD Conflict Detection Instructions
619 * AVX512ER Exponential and Reciprocal Instructions
620 * AVX512PF Prefetch Instructions
621 *
622 * AVX512BW Byte and Word Instructions
623 * AVX512DQ Double-word and Quadword Instructions
624 * AVX512VL Vector Length Extensions
625 *
626 * AVX512IFMA Integer Fused Multiply Add (Not supported by kernel 4.4)
627 * AVX512VBMI Vector Byte Manipulation Instructions
628 */
629
630 /*
631 * Check if AVX512F instruction set is available
632 */
633 static inline boolean_t
zfs_avx512f_available(void)634 zfs_avx512f_available(void)
635 {
636 boolean_t has_avx512 = B_FALSE;
637
638 #if defined(X86_FEATURE_AVX512F)
639 has_avx512 = !!boot_cpu_has(X86_FEATURE_AVX512F);
640 #endif
641 return (has_avx512 && __zmm_enabled());
642 }
643
644 /*
645 * Check if AVX512CD instruction set is available
646 */
647 static inline boolean_t
zfs_avx512cd_available(void)648 zfs_avx512cd_available(void)
649 {
650 boolean_t has_avx512 = B_FALSE;
651
652 #if defined(X86_FEATURE_AVX512CD)
653 has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
654 boot_cpu_has(X86_FEATURE_AVX512CD);
655 #endif
656 return (has_avx512 && __zmm_enabled());
657 }
658
659 /*
660 * Check if AVX512ER instruction set is available
661 */
662 static inline boolean_t
zfs_avx512er_available(void)663 zfs_avx512er_available(void)
664 {
665 boolean_t has_avx512 = B_FALSE;
666
667 #if defined(X86_FEATURE_AVX512ER)
668 has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
669 boot_cpu_has(X86_FEATURE_AVX512ER);
670 #endif
671 return (has_avx512 && __zmm_enabled());
672 }
673
674 /*
675 * Check if AVX512PF instruction set is available
676 */
677 static inline boolean_t
zfs_avx512pf_available(void)678 zfs_avx512pf_available(void)
679 {
680 boolean_t has_avx512 = B_FALSE;
681
682 #if defined(X86_FEATURE_AVX512PF)
683 has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
684 boot_cpu_has(X86_FEATURE_AVX512PF);
685 #endif
686 return (has_avx512 && __zmm_enabled());
687 }
688
689 /*
690 * Check if AVX512BW instruction set is available
691 */
692 static inline boolean_t
zfs_avx512bw_available(void)693 zfs_avx512bw_available(void)
694 {
695 boolean_t has_avx512 = B_FALSE;
696
697 #if defined(X86_FEATURE_AVX512BW)
698 has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
699 boot_cpu_has(X86_FEATURE_AVX512BW);
700 #endif
701
702 return (has_avx512 && __zmm_enabled());
703 }
704
705 /*
706 * Check if AVX512DQ instruction set is available
707 */
708 static inline boolean_t
zfs_avx512dq_available(void)709 zfs_avx512dq_available(void)
710 {
711 boolean_t has_avx512 = B_FALSE;
712
713 #if defined(X86_FEATURE_AVX512DQ)
714 has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
715 boot_cpu_has(X86_FEATURE_AVX512DQ);
716 #endif
717 return (has_avx512 && __zmm_enabled());
718 }
719
720 /*
721 * Check if AVX512VL instruction set is available
722 */
723 static inline boolean_t
zfs_avx512vl_available(void)724 zfs_avx512vl_available(void)
725 {
726 boolean_t has_avx512 = B_FALSE;
727
728 #if defined(X86_FEATURE_AVX512VL)
729 has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
730 boot_cpu_has(X86_FEATURE_AVX512VL);
731 #endif
732 return (has_avx512 && __zmm_enabled());
733 }
734
735 /*
736 * Check if AVX512IFMA instruction set is available
737 */
738 static inline boolean_t
zfs_avx512ifma_available(void)739 zfs_avx512ifma_available(void)
740 {
741 boolean_t has_avx512 = B_FALSE;
742
743 #if defined(X86_FEATURE_AVX512IFMA)
744 has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
745 boot_cpu_has(X86_FEATURE_AVX512IFMA);
746 #endif
747 return (has_avx512 && __zmm_enabled());
748 }
749
750 /*
751 * Check if AVX512VBMI instruction set is available
752 */
753 static inline boolean_t
zfs_avx512vbmi_available(void)754 zfs_avx512vbmi_available(void)
755 {
756 boolean_t has_avx512 = B_FALSE;
757
758 #if defined(X86_FEATURE_AVX512VBMI)
759 has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
760 boot_cpu_has(X86_FEATURE_AVX512VBMI);
761 #endif
762 return (has_avx512 && __zmm_enabled());
763 }
764
765 #endif /* defined(__x86) */
766
767 #endif /* _LINUX_SIMD_X86_H */
768