xref: /freebsd/sys/contrib/openzfs/module/zcommon/zfs_fletcher_avx512.c (revision b197d4b893974c9eb4d7b38704c6d5c486235d6f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (C) 2016 Gvozden Nešković. All rights reserved.
23  */
24 
25 #if defined(__x86_64) && defined(HAVE_AVX512F)
26 
27 #include <sys/byteorder.h>
28 #include <sys/frame.h>
29 #include <sys/spa_checksum.h>
30 #include <sys/string.h>
31 #include <sys/simd.h>
32 #include <zfs_fletcher.h>
33 
34 #ifdef __linux__
35 #define	__asm __asm__ __volatile__
36 #endif
37 
38 ZFS_NO_SANITIZE_UNDEFINED
39 static void
40 fletcher_4_avx512f_init(fletcher_4_ctx_t *ctx)
41 {
42 	memset(ctx->avx512, 0, 4 * sizeof (zfs_fletcher_avx512_t));
43 }
44 
45 ZFS_NO_SANITIZE_UNDEFINED
46 static void
47 fletcher_4_avx512f_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp)
48 {
49 	static const uint64_t
50 	CcA[] = {   0,   0,   1,   3,   6,  10,  15,  21 },
51 	CcB[] = {  28,  36,  44,  52,  60,  68,  76,  84 },
52 	DcA[] = {   0,   0,   0,   1,   4,  10,  20,  35 },
53 	DcB[] = {  56,  84, 120, 164, 216, 276, 344, 420 },
54 	DcC[] = { 448, 512, 576, 640, 704, 768, 832, 896 };
55 
56 	uint64_t A, B, C, D;
57 	uint64_t i;
58 
59 	A = ctx->avx512[0].v[0];
60 	B = 8 * ctx->avx512[1].v[0];
61 	C = 64 * ctx->avx512[2].v[0] - CcB[0] * ctx->avx512[1].v[0];
62 	D = 512 * ctx->avx512[3].v[0] - DcC[0] * ctx->avx512[2].v[0] +
63 	    DcB[0] * ctx->avx512[1].v[0];
64 
65 	for (i = 1; i < 8; i++) {
66 		A += ctx->avx512[0].v[i];
67 		B += 8 * ctx->avx512[1].v[i] - i * ctx->avx512[0].v[i];
68 		C += 64 * ctx->avx512[2].v[i] - CcB[i] * ctx->avx512[1].v[i] +
69 		    CcA[i] * ctx->avx512[0].v[i];
70 		D += 512 * ctx->avx512[3].v[i] - DcC[i] * ctx->avx512[2].v[i] +
71 		    DcB[i] * ctx->avx512[1].v[i] - DcA[i] * ctx->avx512[0].v[i];
72 	}
73 
74 	ZIO_SET_CHECKSUM(zcp, A, B, C, D);
75 }
76 
77 #define	FLETCHER_4_AVX512_RESTORE_CTX(ctx)				\
78 {									\
79 	__asm("vmovdqu64 %0, %%zmm0" :: "m" ((ctx)->avx512[0]));	\
80 	__asm("vmovdqu64 %0, %%zmm1" :: "m" ((ctx)->avx512[1]));	\
81 	__asm("vmovdqu64 %0, %%zmm2" :: "m" ((ctx)->avx512[2]));	\
82 	__asm("vmovdqu64 %0, %%zmm3" :: "m" ((ctx)->avx512[3]));	\
83 }
84 
85 #define	FLETCHER_4_AVX512_SAVE_CTX(ctx)					\
86 {									\
87 	__asm("vmovdqu64 %%zmm0, %0" : "=m" ((ctx)->avx512[0]));	\
88 	__asm("vmovdqu64 %%zmm1, %0" : "=m" ((ctx)->avx512[1]));	\
89 	__asm("vmovdqu64 %%zmm2, %0" : "=m" ((ctx)->avx512[2]));	\
90 	__asm("vmovdqu64 %%zmm3, %0" : "=m" ((ctx)->avx512[3]));	\
91 }
92 
93 static void
94 fletcher_4_avx512f_native(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size)
95 {
96 	const uint32_t *ip = buf;
97 	const uint32_t *ipend = (uint32_t *)((uint8_t *)ip + size);
98 
99 	kfpu_begin();
100 
101 	FLETCHER_4_AVX512_RESTORE_CTX(ctx);
102 
103 	for (; ip < ipend; ip += 8) {
104 		__asm("vpmovzxdq %0, %%zmm4"::"m" (*ip));
105 		__asm("vpaddq %zmm4, %zmm0, %zmm0");
106 		__asm("vpaddq %zmm0, %zmm1, %zmm1");
107 		__asm("vpaddq %zmm1, %zmm2, %zmm2");
108 		__asm("vpaddq %zmm2, %zmm3, %zmm3");
109 	}
110 
111 	FLETCHER_4_AVX512_SAVE_CTX(ctx);
112 
113 	kfpu_end();
114 }
115 STACK_FRAME_NON_STANDARD(fletcher_4_avx512f_native);
116 
117 static void
118 fletcher_4_avx512f_byteswap(fletcher_4_ctx_t *ctx, const void *buf,
119     uint64_t size)
120 {
121 	static const uint64_t byteswap_mask = 0xFFULL;
122 	const uint32_t *ip = buf;
123 	const uint32_t *ipend = (uint32_t *)((uint8_t *)ip + size);
124 
125 	kfpu_begin();
126 
127 	FLETCHER_4_AVX512_RESTORE_CTX(ctx);
128 
129 	__asm("vpbroadcastq %0, %%zmm8" :: "r" (byteswap_mask));
130 	__asm("vpsllq $8, %zmm8, %zmm9");
131 	__asm("vpsllq $16, %zmm8, %zmm10");
132 	__asm("vpsllq $24, %zmm8, %zmm11");
133 
134 	for (; ip < ipend; ip += 8) {
135 		__asm("vpmovzxdq %0, %%zmm5"::"m" (*ip));
136 
137 		__asm("vpsrlq $24, %zmm5, %zmm6");
138 		__asm("vpandd %zmm8, %zmm6, %zmm6");
139 		__asm("vpsrlq $8, %zmm5, %zmm7");
140 		__asm("vpandd %zmm9, %zmm7, %zmm7");
141 		__asm("vpord %zmm6, %zmm7, %zmm4");
142 		__asm("vpsllq $8, %zmm5, %zmm6");
143 		__asm("vpandd %zmm10, %zmm6, %zmm6");
144 		__asm("vpord %zmm6, %zmm4, %zmm4");
145 		__asm("vpsllq $24, %zmm5, %zmm5");
146 		__asm("vpandd %zmm11, %zmm5, %zmm5");
147 		__asm("vpord %zmm5, %zmm4, %zmm4");
148 
149 		__asm("vpaddq %zmm4, %zmm0, %zmm0");
150 		__asm("vpaddq %zmm0, %zmm1, %zmm1");
151 		__asm("vpaddq %zmm1, %zmm2, %zmm2");
152 		__asm("vpaddq %zmm2, %zmm3, %zmm3");
153 	}
154 
155 	FLETCHER_4_AVX512_SAVE_CTX(ctx)
156 
157 	kfpu_end();
158 }
159 STACK_FRAME_NON_STANDARD(fletcher_4_avx512f_byteswap);
160 
161 static boolean_t
162 fletcher_4_avx512f_valid(void)
163 {
164 	return (kfpu_allowed() && zfs_avx512f_available());
165 }
166 
167 const fletcher_4_ops_t fletcher_4_avx512f_ops = {
168 	.init_native = fletcher_4_avx512f_init,
169 	.fini_native = fletcher_4_avx512f_fini,
170 	.compute_native = fletcher_4_avx512f_native,
171 	.init_byteswap = fletcher_4_avx512f_init,
172 	.fini_byteswap = fletcher_4_avx512f_fini,
173 	.compute_byteswap = fletcher_4_avx512f_byteswap,
174 	.valid = fletcher_4_avx512f_valid,
175 	.name = "avx512f"
176 };
177 
178 #if defined(HAVE_AVX512BW)
179 static void
180 fletcher_4_avx512bw_byteswap(fletcher_4_ctx_t *ctx, const void *buf,
181     uint64_t size)
182 {
183 	static const zfs_fletcher_avx512_t mask = {
184 		.v = { 0xFFFFFFFF00010203, 0xFFFFFFFF08090A0B,
185 		0xFFFFFFFF00010203, 0xFFFFFFFF08090A0B,
186 		0xFFFFFFFF00010203, 0xFFFFFFFF08090A0B,
187 		0xFFFFFFFF00010203, 0xFFFFFFFF08090A0B }
188 	};
189 	const uint32_t *ip = buf;
190 	const uint32_t *ipend = (uint32_t *)((uint8_t *)ip + size);
191 
192 	kfpu_begin();
193 
194 	FLETCHER_4_AVX512_RESTORE_CTX(ctx);
195 
196 	__asm("vmovdqu64 %0, %%zmm5" :: "m" (mask));
197 
198 	for (; ip < ipend; ip += 8) {
199 		__asm("vpmovzxdq %0, %%zmm4"::"m" (*ip));
200 
201 		__asm("vpshufb %zmm5, %zmm4, %zmm4");
202 
203 		__asm("vpaddq %zmm4, %zmm0, %zmm0");
204 		__asm("vpaddq %zmm0, %zmm1, %zmm1");
205 		__asm("vpaddq %zmm1, %zmm2, %zmm2");
206 		__asm("vpaddq %zmm2, %zmm3, %zmm3");
207 	}
208 
209 	FLETCHER_4_AVX512_SAVE_CTX(ctx)
210 
211 	kfpu_end();
212 }
213 STACK_FRAME_NON_STANDARD(fletcher_4_avx512bw_byteswap);
214 
215 static boolean_t
216 fletcher_4_avx512bw_valid(void)
217 {
218 	return (fletcher_4_avx512f_valid() && zfs_avx512bw_available());
219 }
220 
221 const fletcher_4_ops_t fletcher_4_avx512bw_ops = {
222 	.init_native = fletcher_4_avx512f_init,
223 	.fini_native = fletcher_4_avx512f_fini,
224 	.compute_native = fletcher_4_avx512f_native,
225 	.init_byteswap = fletcher_4_avx512f_init,
226 	.fini_byteswap = fletcher_4_avx512f_fini,
227 	.compute_byteswap = fletcher_4_avx512bw_byteswap,
228 	.valid = fletcher_4_avx512bw_valid,
229 	.name = "avx512bw"
230 };
231 #endif
232 
233 #endif /* defined(__x86_64) && defined(HAVE_AVX512F) */
234