xref: /freebsd/sys/contrib/openzfs/module/zcommon/zfs_fletcher_avx512.c (revision 61145dc2b94f12f6a47344fb9aac702321880e43)
1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3  * CDDL HEADER START
4  *
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or https://opensource.org/licenses/CDDL-1.0.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright (C) 2016 Gvozden Nešković. All rights reserved.
24  */
25 
26 #if defined(__x86_64) && defined(HAVE_AVX512F)
27 
28 #include <sys/byteorder.h>
29 #include <sys/frame.h>
30 #include <sys/spa_checksum.h>
31 #include <sys/string.h>
32 #include <sys/simd.h>
33 #include <zfs_fletcher.h>
34 
35 #ifdef __linux__
36 #define	__asm __asm__ __volatile__
37 #endif
38 
39 static void
fletcher_4_avx512f_init(fletcher_4_ctx_t * ctx)40 fletcher_4_avx512f_init(fletcher_4_ctx_t *ctx)
41 {
42 	memset(ctx->avx512, 0, 4 * sizeof (zfs_fletcher_avx512_t));
43 }
44 
45 static void
fletcher_4_avx512f_fini(fletcher_4_ctx_t * ctx,zio_cksum_t * zcp)46 fletcher_4_avx512f_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp)
47 {
48 	static const uint64_t
49 	CcA[] = {   0,   0,   1,   3,   6,  10,  15,  21 },
50 	CcB[] = {  28,  36,  44,  52,  60,  68,  76,  84 },
51 	DcA[] = {   0,   0,   0,   1,   4,  10,  20,  35 },
52 	DcB[] = {  56,  84, 120, 164, 216, 276, 344, 420 },
53 	DcC[] = { 448, 512, 576, 640, 704, 768, 832, 896 };
54 
55 	uint64_t A, B, C, D;
56 	uint64_t i;
57 
58 	A = ctx->avx512[0].v[0];
59 	B = 8 * ctx->avx512[1].v[0];
60 	C = 64 * ctx->avx512[2].v[0] - CcB[0] * ctx->avx512[1].v[0];
61 	D = 512 * ctx->avx512[3].v[0] - DcC[0] * ctx->avx512[2].v[0] +
62 	    DcB[0] * ctx->avx512[1].v[0];
63 
64 	for (i = 1; i < 8; i++) {
65 		A += ctx->avx512[0].v[i];
66 		B += 8 * ctx->avx512[1].v[i] - i * ctx->avx512[0].v[i];
67 		C += 64 * ctx->avx512[2].v[i] - CcB[i] * ctx->avx512[1].v[i] +
68 		    CcA[i] * ctx->avx512[0].v[i];
69 		D += 512 * ctx->avx512[3].v[i] - DcC[i] * ctx->avx512[2].v[i] +
70 		    DcB[i] * ctx->avx512[1].v[i] - DcA[i] * ctx->avx512[0].v[i];
71 	}
72 
73 	ZIO_SET_CHECKSUM(zcp, A, B, C, D);
74 }
75 
76 #define	FLETCHER_4_AVX512_RESTORE_CTX(ctx)				\
77 {									\
78 	__asm("vmovdqu64 %0, %%zmm0" :: "m" ((ctx)->avx512[0]));	\
79 	__asm("vmovdqu64 %0, %%zmm1" :: "m" ((ctx)->avx512[1]));	\
80 	__asm("vmovdqu64 %0, %%zmm2" :: "m" ((ctx)->avx512[2]));	\
81 	__asm("vmovdqu64 %0, %%zmm3" :: "m" ((ctx)->avx512[3]));	\
82 }
83 
84 #define	FLETCHER_4_AVX512_SAVE_CTX(ctx)					\
85 {									\
86 	__asm("vmovdqu64 %%zmm0, %0" : "=m" ((ctx)->avx512[0]));	\
87 	__asm("vmovdqu64 %%zmm1, %0" : "=m" ((ctx)->avx512[1]));	\
88 	__asm("vmovdqu64 %%zmm2, %0" : "=m" ((ctx)->avx512[2]));	\
89 	__asm("vmovdqu64 %%zmm3, %0" : "=m" ((ctx)->avx512[3]));	\
90 }
91 
92 static void
fletcher_4_avx512f_native(fletcher_4_ctx_t * ctx,const void * buf,uint64_t size)93 fletcher_4_avx512f_native(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size)
94 {
95 	const uint32_t *ip = buf;
96 	const uint32_t *ipend = (uint32_t *)((uint8_t *)ip + size);
97 
98 	FLETCHER_4_AVX512_RESTORE_CTX(ctx);
99 
100 	do {
101 		__asm("vpmovzxdq %0, %%zmm4"::"m" (*ip));
102 		__asm("vpaddq %zmm4, %zmm0, %zmm0");
103 		__asm("vpaddq %zmm0, %zmm1, %zmm1");
104 		__asm("vpaddq %zmm1, %zmm2, %zmm2");
105 		__asm("vpaddq %zmm2, %zmm3, %zmm3");
106 	} while ((ip += 8) < ipend);
107 
108 	FLETCHER_4_AVX512_SAVE_CTX(ctx);
109 }
110 STACK_FRAME_NON_STANDARD(fletcher_4_avx512f_native);
111 
112 static void
fletcher_4_avx512f_byteswap(fletcher_4_ctx_t * ctx,const void * buf,uint64_t size)113 fletcher_4_avx512f_byteswap(fletcher_4_ctx_t *ctx, const void *buf,
114     uint64_t size)
115 {
116 	static const uint64_t byteswap_mask = 0xFFULL;
117 	const uint32_t *ip = buf;
118 	const uint32_t *ipend = (uint32_t *)((uint8_t *)ip + size);
119 
120 	FLETCHER_4_AVX512_RESTORE_CTX(ctx);
121 
122 	__asm("vpbroadcastq %0, %%zmm8" :: "r" (byteswap_mask));
123 	__asm("vpsllq $8, %zmm8, %zmm9");
124 	__asm("vpsllq $16, %zmm8, %zmm10");
125 	__asm("vpsllq $24, %zmm8, %zmm11");
126 
127 	do {
128 		__asm("vpmovzxdq %0, %%zmm5"::"m" (*ip));
129 
130 		__asm("vpsrlq $24, %zmm5, %zmm6");
131 		__asm("vpandd %zmm8, %zmm6, %zmm6");
132 		__asm("vpsrlq $8, %zmm5, %zmm7");
133 		__asm("vpandd %zmm9, %zmm7, %zmm7");
134 		__asm("vpord %zmm6, %zmm7, %zmm4");
135 		__asm("vpsllq $8, %zmm5, %zmm6");
136 		__asm("vpandd %zmm10, %zmm6, %zmm6");
137 		__asm("vpord %zmm6, %zmm4, %zmm4");
138 		__asm("vpsllq $24, %zmm5, %zmm5");
139 		__asm("vpandd %zmm11, %zmm5, %zmm5");
140 		__asm("vpord %zmm5, %zmm4, %zmm4");
141 
142 		__asm("vpaddq %zmm4, %zmm0, %zmm0");
143 		__asm("vpaddq %zmm0, %zmm1, %zmm1");
144 		__asm("vpaddq %zmm1, %zmm2, %zmm2");
145 		__asm("vpaddq %zmm2, %zmm3, %zmm3");
146 	} while ((ip += 8) < ipend);
147 
148 	FLETCHER_4_AVX512_SAVE_CTX(ctx)
149 }
150 STACK_FRAME_NON_STANDARD(fletcher_4_avx512f_byteswap);
151 
152 static boolean_t
fletcher_4_avx512f_valid(void)153 fletcher_4_avx512f_valid(void)
154 {
155 	return (kfpu_allowed() && zfs_avx512f_available());
156 }
157 
158 const fletcher_4_ops_t fletcher_4_avx512f_ops = {
159 	.init_native = fletcher_4_avx512f_init,
160 	.fini_native = fletcher_4_avx512f_fini,
161 	.compute_native = fletcher_4_avx512f_native,
162 	.init_byteswap = fletcher_4_avx512f_init,
163 	.fini_byteswap = fletcher_4_avx512f_fini,
164 	.compute_byteswap = fletcher_4_avx512f_byteswap,
165 	.valid = fletcher_4_avx512f_valid,
166 	.uses_fpu = B_TRUE,
167 	.name = "avx512f"
168 };
169 
170 #if defined(HAVE_AVX512BW)
171 static void
fletcher_4_avx512bw_byteswap(fletcher_4_ctx_t * ctx,const void * buf,uint64_t size)172 fletcher_4_avx512bw_byteswap(fletcher_4_ctx_t *ctx, const void *buf,
173     uint64_t size)
174 {
175 	static const zfs_fletcher_avx512_t mask = {
176 		.v = { 0xFFFFFFFF00010203, 0xFFFFFFFF08090A0B,
177 		0xFFFFFFFF00010203, 0xFFFFFFFF08090A0B,
178 		0xFFFFFFFF00010203, 0xFFFFFFFF08090A0B,
179 		0xFFFFFFFF00010203, 0xFFFFFFFF08090A0B }
180 	};
181 	const uint32_t *ip = buf;
182 	const uint32_t *ipend = (uint32_t *)((uint8_t *)ip + size);
183 
184 	FLETCHER_4_AVX512_RESTORE_CTX(ctx);
185 
186 	__asm("vmovdqu64 %0, %%zmm5" :: "m" (mask));
187 
188 	do {
189 		__asm("vpmovzxdq %0, %%zmm4"::"m" (*ip));
190 
191 		__asm("vpshufb %zmm5, %zmm4, %zmm4");
192 
193 		__asm("vpaddq %zmm4, %zmm0, %zmm0");
194 		__asm("vpaddq %zmm0, %zmm1, %zmm1");
195 		__asm("vpaddq %zmm1, %zmm2, %zmm2");
196 		__asm("vpaddq %zmm2, %zmm3, %zmm3");
197 	} while ((ip += 8) < ipend);
198 
199 	FLETCHER_4_AVX512_SAVE_CTX(ctx)
200 }
201 STACK_FRAME_NON_STANDARD(fletcher_4_avx512bw_byteswap);
202 
203 static boolean_t
fletcher_4_avx512bw_valid(void)204 fletcher_4_avx512bw_valid(void)
205 {
206 	return (fletcher_4_avx512f_valid() && zfs_avx512bw_available());
207 }
208 
209 const fletcher_4_ops_t fletcher_4_avx512bw_ops = {
210 	.init_native = fletcher_4_avx512f_init,
211 	.fini_native = fletcher_4_avx512f_fini,
212 	.compute_native = fletcher_4_avx512f_native,
213 	.init_byteswap = fletcher_4_avx512f_init,
214 	.fini_byteswap = fletcher_4_avx512f_fini,
215 	.compute_byteswap = fletcher_4_avx512bw_byteswap,
216 	.valid = fletcher_4_avx512bw_valid,
217 	.uses_fpu = B_TRUE,
218 	.name = "avx512bw"
219 };
220 #endif
221 
222 #endif /* defined(__x86_64) && defined(HAVE_AVX512F) */
223