xref: /linux/lib/raid/xor/loongarch/xor_arch.h (revision 440d6635b20037bc9ad46b20817d7b61cef0fc1b)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Copyright (C) 2023 WANG Xuerui <git@xen0n.name>
4  */
5 #include <asm/cpu-features.h>
6 
7 /*
8  * For grins, also test the generic routines.
9  *
10  * More importantly: it cannot be ruled out at this point of time, that some
11  * future (maybe reduced) models could run the vector algorithms slower than
12  * the scalar ones, maybe for errata or micro-op reasons. It may be
13  * appropriate to revisit this after one or two more uarch generations.
14  */
15 
16 extern struct xor_block_template xor_block_lsx;
17 extern struct xor_block_template xor_block_lasx;
18 
19 static __always_inline void __init arch_xor_init(void)
20 {
21 	xor_register(&xor_block_8regs);
22 	xor_register(&xor_block_8regs_p);
23 	xor_register(&xor_block_32regs);
24 	xor_register(&xor_block_32regs_p);
25 #ifdef CONFIG_CPU_HAS_LSX
26 	if (cpu_has_lsx)
27 		xor_register(&xor_block_lsx);
28 #endif
29 #ifdef CONFIG_CPU_HAS_LASX
30 	if (cpu_has_lasx)
31 		xor_register(&xor_block_lasx);
32 #endif
33 }
34