1 // SPDX-License-Identifier: CDDL-1.0 2 /* 3 * CDDL HEADER START 4 * 5 * The contents of this file are subject to the terms of the 6 * Common Development and Distribution License (the "License"). 7 * You may not use this file except in compliance with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or https://opensource.org/licenses/CDDL-1.0. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright (C) 2016 Romain Dolbeau. All rights reserved. 24 * Copyright (C) 2016 Gvozden Nešković. All rights reserved. 25 */ 26 27 #include <sys/isa_defs.h> 28 29 #if defined(__x86_64) && defined(HAVE_AVX512BW) 30 31 #include <sys/param.h> 32 #include <sys/types.h> 33 #include <sys/simd.h> 34 35 36 #ifdef __linux__ 37 #define __asm __asm__ __volatile__ 38 #endif 39 40 #define _REG_CNT(_0, _1, _2, _3, _4, _5, _6, _7, N, ...) N 41 #define REG_CNT(r...) _REG_CNT(r, 8, 7, 6, 5, 4, 3, 2, 1) 42 43 #define VR0_(REG, ...) "zmm"#REG 44 #define VR1_(_1, REG, ...) "zmm"#REG 45 #define VR2_(_1, _2, REG, ...) "zmm"#REG 46 #define VR3_(_1, _2, _3, REG, ...) "zmm"#REG 47 #define VR4_(_1, _2, _3, _4, REG, ...) "zmm"#REG 48 #define VR5_(_1, _2, _3, _4, _5, REG, ...) "zmm"#REG 49 #define VR6_(_1, _2, _3, _4, _5, _6, REG, ...) "zmm"#REG 50 #define VR7_(_1, _2, _3, _4, _5, _6, _7, REG, ...) "zmm"#REG 51 52 #define VR0(r...) VR0_(r) 53 #define VR1(r...) VR1_(r) 54 #define VR2(r...) VR2_(r, 1) 55 #define VR3(r...) VR3_(r, 1, 2) 56 #define VR4(r...) VR4_(r, 1, 2) 57 #define VR5(r...) VR5_(r, 1, 2, 3) 58 #define VR6(r...) VR6_(r, 1, 2, 3, 4) 59 #define VR7(r...) VR7_(r, 1, 2, 3, 4, 5) 60 61 #define R_01(REG1, REG2, ...) REG1, REG2 62 #define _R_23(_0, _1, REG2, REG3, ...) REG2, REG3 63 #define R_23(REG...) _R_23(REG, 1, 2, 3) 64 65 #define ZFS_ASM_BUG() ASSERT(0) 66 67 extern const uint8_t gf_clmul_mod_lt[4*256][16]; 68 69 #define ELEM_SIZE 64 70 71 typedef struct v { 72 uint8_t b[ELEM_SIZE] __attribute__((aligned(ELEM_SIZE))); 73 } v_t; 74 75 #define XOR_ACC(src, r...) \ 76 { \ 77 switch (REG_CNT(r)) { \ 78 case 4: \ 79 __asm( \ 80 "vpxorq 0x00(%[SRC]), %%" VR0(r)", %%" VR0(r) "\n" \ 81 "vpxorq 0x40(%[SRC]), %%" VR1(r)", %%" VR1(r) "\n" \ 82 "vpxorq 0x80(%[SRC]), %%" VR2(r)", %%" VR2(r) "\n" \ 83 "vpxorq 0xc0(%[SRC]), %%" VR3(r)", %%" VR3(r) "\n" \ 84 : : [SRC] "r" (src)); \ 85 break; \ 86 case 2: \ 87 __asm( \ 88 "vpxorq 0x00(%[SRC]), %%" VR0(r)", %%" VR0(r) "\n" \ 89 "vpxorq 0x40(%[SRC]), %%" VR1(r)", %%" VR1(r) "\n" \ 90 : : [SRC] "r" (src)); \ 91 break; \ 92 default: \ 93 ZFS_ASM_BUG(); \ 94 } \ 95 } 96 97 #define XOR(r...) \ 98 { \ 99 switch (REG_CNT(r)) { \ 100 case 8: \ 101 __asm( \ 102 "vpxorq %" VR0(r) ", %" VR4(r)", %" VR4(r) "\n" \ 103 "vpxorq %" VR1(r) ", %" VR5(r)", %" VR5(r) "\n" \ 104 "vpxorq %" VR2(r) ", %" VR6(r)", %" VR6(r) "\n" \ 105 "vpxorq %" VR3(r) ", %" VR7(r)", %" VR7(r)); \ 106 break; \ 107 case 4: \ 108 __asm( \ 109 "vpxorq %" VR0(r) ", %" VR2(r)", %" VR2(r) "\n" \ 110 "vpxorq %" VR1(r) ", %" VR3(r)", %" VR3(r)); \ 111 break; \ 112 default: \ 113 ZFS_ASM_BUG(); \ 114 } \ 115 } 116 117 #define ZERO(r...) XOR(r, r) 118 119 #define COPY(r...) \ 120 { \ 121 switch (REG_CNT(r)) { \ 122 case 8: \ 123 __asm( \ 124 "vmovdqa64 %" VR0(r) ", %" VR4(r) "\n" \ 125 "vmovdqa64 %" VR1(r) ", %" VR5(r) "\n" \ 126 "vmovdqa64 %" VR2(r) ", %" VR6(r) "\n" \ 127 "vmovdqa64 %" VR3(r) ", %" VR7(r)); \ 128 break; \ 129 case 4: \ 130 __asm( \ 131 "vmovdqa64 %" VR0(r) ", %" VR2(r) "\n" \ 132 "vmovdqa64 %" VR1(r) ", %" VR3(r)); \ 133 break; \ 134 default: \ 135 ZFS_ASM_BUG(); \ 136 } \ 137 } 138 139 #define LOAD(src, r...) \ 140 { \ 141 switch (REG_CNT(r)) { \ 142 case 4: \ 143 __asm( \ 144 "vmovdqa64 0x00(%[SRC]), %%" VR0(r) "\n" \ 145 "vmovdqa64 0x40(%[SRC]), %%" VR1(r) "\n" \ 146 "vmovdqa64 0x80(%[SRC]), %%" VR2(r) "\n" \ 147 "vmovdqa64 0xc0(%[SRC]), %%" VR3(r) "\n" \ 148 : : [SRC] "r" (src)); \ 149 break; \ 150 case 2: \ 151 __asm( \ 152 "vmovdqa64 0x00(%[SRC]), %%" VR0(r) "\n" \ 153 "vmovdqa64 0x40(%[SRC]), %%" VR1(r) "\n" \ 154 : : [SRC] "r" (src)); \ 155 break; \ 156 default: \ 157 ZFS_ASM_BUG(); \ 158 } \ 159 } 160 161 #define STORE(dst, r...) \ 162 { \ 163 switch (REG_CNT(r)) { \ 164 case 4: \ 165 __asm( \ 166 "vmovdqa64 %%" VR0(r) ", 0x00(%[DST])\n" \ 167 "vmovdqa64 %%" VR1(r) ", 0x40(%[DST])\n" \ 168 "vmovdqa64 %%" VR2(r) ", 0x80(%[DST])\n" \ 169 "vmovdqa64 %%" VR3(r) ", 0xc0(%[DST])\n" \ 170 : : [DST] "r" (dst)); \ 171 break; \ 172 case 2: \ 173 __asm( \ 174 "vmovdqa64 %%" VR0(r) ", 0x00(%[DST])\n" \ 175 "vmovdqa64 %%" VR1(r) ", 0x40(%[DST])\n" \ 176 : : [DST] "r" (dst)); \ 177 break; \ 178 default: \ 179 ZFS_ASM_BUG(); \ 180 } \ 181 } 182 183 #define MUL2_SETUP() \ 184 { \ 185 __asm("vmovq %0, %%xmm22" :: "r"(0x1d1d1d1d1d1d1d1d)); \ 186 __asm("vpbroadcastq %xmm22, %zmm22"); \ 187 __asm("vpxord %zmm23, %zmm23 ,%zmm23"); \ 188 } 189 190 #define _MUL2(r...) \ 191 { \ 192 switch (REG_CNT(r)) { \ 193 case 2: \ 194 __asm( \ 195 "vpcmpb $1, %zmm23, %" VR0(r)", %k1\n" \ 196 "vpcmpb $1, %zmm23, %" VR1(r)", %k2\n" \ 197 "vpaddb %" VR0(r)", %" VR0(r)", %" VR0(r) "\n" \ 198 "vpaddb %" VR1(r)", %" VR1(r)", %" VR1(r) "\n" \ 199 "vpxord %zmm22, %" VR0(r)", %zmm12\n" \ 200 "vpxord %zmm22, %" VR1(r)", %zmm13\n" \ 201 "vmovdqu8 %zmm12, %" VR0(r) "{%k1}\n" \ 202 "vmovdqu8 %zmm13, %" VR1(r) "{%k2}"); \ 203 break; \ 204 default: \ 205 ZFS_ASM_BUG(); \ 206 } \ 207 } 208 209 #define MUL2(r...) \ 210 { \ 211 switch (REG_CNT(r)) { \ 212 case 4: \ 213 _MUL2(R_01(r)); \ 214 _MUL2(R_23(r)); \ 215 break; \ 216 case 2: \ 217 _MUL2(r); \ 218 break; \ 219 default: \ 220 ZFS_ASM_BUG(); \ 221 } \ 222 } 223 224 #define MUL4(r...) \ 225 { \ 226 MUL2(r); \ 227 MUL2(r); \ 228 } 229 230 #define _0f "zmm15" 231 #define _as "zmm14" 232 #define _bs "zmm13" 233 #define _ltmod "zmm12" 234 #define _ltmul "zmm11" 235 #define _ta "zmm10" 236 #define _tb "zmm15" 237 238 static const uint8_t __attribute__((aligned(64))) _mul_mask = 0x0F; 239 240 #define _MULx2(c, r...) \ 241 { \ 242 switch (REG_CNT(r)) { \ 243 case 2: \ 244 __asm( \ 245 "vpbroadcastb (%[mask]), %%" _0f "\n" \ 246 /* upper bits */ \ 247 "vbroadcasti32x4 0x00(%[lt]), %%" _ltmod "\n" \ 248 "vbroadcasti32x4 0x10(%[lt]), %%" _ltmul "\n" \ 249 \ 250 "vpsraw $0x4, %%" VR0(r) ", %%"_as "\n" \ 251 "vpsraw $0x4, %%" VR1(r) ", %%"_bs "\n" \ 252 "vpandq %%" _0f ", %%" VR0(r) ", %%" VR0(r) "\n" \ 253 "vpandq %%" _0f ", %%" VR1(r) ", %%" VR1(r) "\n" \ 254 "vpandq %%" _0f ", %%" _as ", %%" _as "\n" \ 255 "vpandq %%" _0f ", %%" _bs ", %%" _bs "\n" \ 256 \ 257 "vpshufb %%" _as ", %%" _ltmod ", %%" _ta "\n" \ 258 "vpshufb %%" _bs ", %%" _ltmod ", %%" _tb "\n" \ 259 "vpshufb %%" _as ", %%" _ltmul ", %%" _as "\n" \ 260 "vpshufb %%" _bs ", %%" _ltmul ", %%" _bs "\n" \ 261 /* lower bits */ \ 262 "vbroadcasti32x4 0x20(%[lt]), %%" _ltmod "\n" \ 263 "vbroadcasti32x4 0x30(%[lt]), %%" _ltmul "\n" \ 264 \ 265 "vpxorq %%" _ta ", %%" _as ", %%" _as "\n" \ 266 "vpxorq %%" _tb ", %%" _bs ", %%" _bs "\n" \ 267 \ 268 "vpshufb %%" VR0(r) ", %%" _ltmod ", %%" _ta "\n" \ 269 "vpshufb %%" VR1(r) ", %%" _ltmod ", %%" _tb "\n" \ 270 "vpshufb %%" VR0(r) ", %%" _ltmul ", %%" VR0(r) "\n"\ 271 "vpshufb %%" VR1(r) ", %%" _ltmul ", %%" VR1(r) "\n"\ 272 \ 273 "vpxorq %%" _ta ", %%" VR0(r) ", %%" VR0(r) "\n" \ 274 "vpxorq %%" _as ", %%" VR0(r) ", %%" VR0(r) "\n" \ 275 "vpxorq %%" _tb ", %%" VR1(r) ", %%" VR1(r) "\n" \ 276 "vpxorq %%" _bs ", %%" VR1(r) ", %%" VR1(r) "\n" \ 277 : : [mask] "r" (&_mul_mask), \ 278 [lt] "r" (gf_clmul_mod_lt[4*(c)])); \ 279 break; \ 280 default: \ 281 ZFS_ASM_BUG(); \ 282 } \ 283 } 284 285 #define MUL(c, r...) \ 286 { \ 287 switch (REG_CNT(r)) { \ 288 case 4: \ 289 _MULx2(c, R_01(r)); \ 290 _MULx2(c, R_23(r)); \ 291 break; \ 292 case 2: \ 293 _MULx2(c, R_01(r)); \ 294 break; \ 295 default: \ 296 ZFS_ASM_BUG(); \ 297 } \ 298 } 299 300 #define raidz_math_begin() kfpu_begin() 301 #define raidz_math_end() kfpu_end() 302 303 /* 304 * ZERO, COPY, and MUL operations are already 2x unrolled, which means that 305 * the stride of these operations for avx512 must not exceed 4. Otherwise, a 306 * single step would exceed 512B block size. 307 */ 308 309 #define SYN_STRIDE 4 310 311 #define ZERO_STRIDE 4 312 #define ZERO_DEFINE() {} 313 #define ZERO_D 0, 1, 2, 3 314 315 #define COPY_STRIDE 4 316 #define COPY_DEFINE() {} 317 #define COPY_D 0, 1, 2, 3 318 319 #define ADD_STRIDE 4 320 #define ADD_DEFINE() {} 321 #define ADD_D 0, 1, 2, 3 322 323 #define MUL_STRIDE 4 324 #define MUL_DEFINE() {} 325 #define MUL_D 0, 1, 2, 3 326 327 #define GEN_P_STRIDE 4 328 #define GEN_P_DEFINE() {} 329 #define GEN_P_P 0, 1, 2, 3 330 331 #define GEN_PQ_STRIDE 4 332 #define GEN_PQ_DEFINE() {} 333 #define GEN_PQ_D 0, 1, 2, 3 334 #define GEN_PQ_C 4, 5, 6, 7 335 336 #define GEN_PQR_STRIDE 4 337 #define GEN_PQR_DEFINE() {} 338 #define GEN_PQR_D 0, 1, 2, 3 339 #define GEN_PQR_C 4, 5, 6, 7 340 341 #define SYN_Q_DEFINE() {} 342 #define SYN_Q_D 0, 1, 2, 3 343 #define SYN_Q_X 4, 5, 6, 7 344 345 #define SYN_R_DEFINE() {} 346 #define SYN_R_D 0, 1, 2, 3 347 #define SYN_R_X 4, 5, 6, 7 348 349 #define SYN_PQ_DEFINE() {} 350 #define SYN_PQ_D 0, 1, 2, 3 351 #define SYN_PQ_X 4, 5, 6, 7 352 353 #define REC_PQ_STRIDE 2 354 #define REC_PQ_DEFINE() {} 355 #define REC_PQ_X 0, 1 356 #define REC_PQ_Y 2, 3 357 #define REC_PQ_T 4, 5 358 359 #define SYN_PR_DEFINE() {} 360 #define SYN_PR_D 0, 1, 2, 3 361 #define SYN_PR_X 4, 5, 6, 7 362 363 #define REC_PR_STRIDE 2 364 #define REC_PR_DEFINE() {} 365 #define REC_PR_X 0, 1 366 #define REC_PR_Y 2, 3 367 #define REC_PR_T 4, 5 368 369 #define SYN_QR_DEFINE() {} 370 #define SYN_QR_D 0, 1, 2, 3 371 #define SYN_QR_X 4, 5, 6, 7 372 373 #define REC_QR_STRIDE 2 374 #define REC_QR_DEFINE() {} 375 #define REC_QR_X 0, 1 376 #define REC_QR_Y 2, 3 377 #define REC_QR_T 4, 5 378 379 #define SYN_PQR_DEFINE() {} 380 #define SYN_PQR_D 0, 1, 2, 3 381 #define SYN_PQR_X 4, 5, 6, 7 382 383 #define REC_PQR_STRIDE 2 384 #define REC_PQR_DEFINE() {} 385 #define REC_PQR_X 0, 1 386 #define REC_PQR_Y 2, 3 387 #define REC_PQR_Z 4, 5 388 #define REC_PQR_XS 6, 7 389 #define REC_PQR_YS 8, 9 390 391 392 #include <sys/vdev_raidz_impl.h> 393 #include "vdev_raidz_math_impl.h" 394 395 DEFINE_GEN_METHODS(avx512bw); 396 DEFINE_REC_METHODS(avx512bw); 397 398 static boolean_t 399 raidz_will_avx512bw_work(void) 400 { 401 return (kfpu_allowed() && zfs_avx_available() && 402 zfs_avx512f_available() && zfs_avx512bw_available()); 403 } 404 405 const raidz_impl_ops_t vdev_raidz_avx512bw_impl = { 406 .init = NULL, 407 .fini = NULL, 408 .gen = RAIDZ_GEN_METHODS(avx512bw), 409 .rec = RAIDZ_REC_METHODS(avx512bw), 410 .is_supported = &raidz_will_avx512bw_work, 411 .name = "avx512bw" 412 }; 413 414 #endif /* defined(__x86_64) && defined(HAVE_AVX512BW) */ 415