xref: /linux/lib/raid/xor/x86/xor_arch.h (revision 440d6635b20037bc9ad46b20817d7b61cef0fc1b)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #include <asm/cpufeature.h>
3 
4 extern struct xor_block_template xor_block_pII_mmx;
5 extern struct xor_block_template xor_block_p5_mmx;
6 extern struct xor_block_template xor_block_sse;
7 extern struct xor_block_template xor_block_sse_pf64;
8 extern struct xor_block_template xor_block_avx;
9 
10 /*
11  * When SSE is available, use it as it can write around L2.  We may also be able
12  * to load into the L1 only depending on how the cpu deals with a load to a line
13  * that is being prefetched.
14  *
15  * When AVX2 is available, force using it as it is better by all measures.
16  *
17  * 32-bit without MMX can fall back to the generic routines.
18  */
19 static __always_inline void __init arch_xor_init(void)
20 {
21 	if (boot_cpu_has(X86_FEATURE_AVX) &&
22 	    boot_cpu_has(X86_FEATURE_OSXSAVE)) {
23 		xor_force(&xor_block_avx);
24 	} else if (IS_ENABLED(CONFIG_X86_64) || boot_cpu_has(X86_FEATURE_XMM)) {
25 		xor_register(&xor_block_sse);
26 		xor_register(&xor_block_sse_pf64);
27 	} else if (boot_cpu_has(X86_FEATURE_MMX)) {
28 		xor_register(&xor_block_pII_mmx);
29 		xor_register(&xor_block_p5_mmx);
30 	} else {
31 		xor_register(&xor_block_8regs);
32 		xor_register(&xor_block_8regs_p);
33 		xor_register(&xor_block_32regs);
34 		xor_register(&xor_block_32regs_p);
35 	}
36 }
37