1 // SPDX-License-Identifier: CDDL-1.0 2 /* 3 * CDDL HEADER START 4 * 5 * The contents of this file are subject to the terms of the 6 * Common Development and Distribution License (the "License"). 7 * You may not use this file except in compliance with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or https://opensource.org/licenses/CDDL-1.0. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright (C) 2016 Romain Dolbeau. All rights reserved. 24 */ 25 26 #include <sys/types.h> 27 #include <sys/simd.h> 28 29 #ifdef __linux__ 30 #define __asm __asm__ __volatile__ 31 #endif 32 33 #define _REG_CNT(_0, _1, _2, _3, _4, _5, _6, _7, N, ...) N 34 #define REG_CNT(r...) _REG_CNT(r, 8, 7, 6, 5, 4, 3, 2, 1) 35 36 #define VR0_(REG, ...) "%[w"#REG"]" 37 #define VR1_(_1, REG, ...) "%[w"#REG"]" 38 #define VR2_(_1, _2, REG, ...) "%[w"#REG"]" 39 #define VR3_(_1, _2, _3, REG, ...) "%[w"#REG"]" 40 #define VR4_(_1, _2, _3, _4, REG, ...) "%[w"#REG"]" 41 #define VR5_(_1, _2, _3, _4, _5, REG, ...) "%[w"#REG"]" 42 #define VR6_(_1, _2, _3, _4, _5, _6, REG, ...) "%[w"#REG"]" 43 #define VR7_(_1, _2, _3, _4, _5, _6, _7, REG, ...) "%[w"#REG"]" 44 45 /* 46 * Here we need registers not used otherwise. 47 * They will be used in unused ASM for the case 48 * with more registers than required... but GCC 49 * will still need to make sure the constraints 50 * are correct, and duplicate constraints are illegal 51 * ... and we use the "register" number as a name 52 */ 53 54 #define VR0(r...) VR0_(r) 55 #define VR1(r...) VR1_(r) 56 #define VR2(r...) VR2_(r, 36) 57 #define VR3(r...) VR3_(r, 36, 35) 58 #define VR4(r...) VR4_(r, 36, 35, 34, 33) 59 #define VR5(r...) VR5_(r, 36, 35, 34, 33, 32) 60 #define VR6(r...) VR6_(r, 36, 35, 34, 33, 32, 31) 61 #define VR7(r...) VR7_(r, 36, 35, 34, 33, 32, 31, 30) 62 63 #define VR(X) "%[w"#X"]" 64 65 #define RVR0_(REG, ...) [w##REG] "w" (w##REG) 66 #define RVR1_(_1, REG, ...) [w##REG] "w" (w##REG) 67 #define RVR2_(_1, _2, REG, ...) [w##REG] "w" (w##REG) 68 #define RVR3_(_1, _2, _3, REG, ...) [w##REG] "w" (w##REG) 69 #define RVR4_(_1, _2, _3, _4, REG, ...) [w##REG] "w" (w##REG) 70 #define RVR5_(_1, _2, _3, _4, _5, REG, ...) [w##REG] "w" (w##REG) 71 #define RVR6_(_1, _2, _3, _4, _5, _6, REG, ...) [w##REG] "w" (w##REG) 72 #define RVR7_(_1, _2, _3, _4, _5, _6, _7, REG, ...) [w##REG] "w" (w##REG) 73 74 #define RVR0(r...) RVR0_(r) 75 #define RVR1(r...) RVR1_(r) 76 #define RVR2(r...) RVR2_(r, 36) 77 #define RVR3(r...) RVR3_(r, 36, 35) 78 #define RVR4(r...) RVR4_(r, 36, 35, 34, 33) 79 #define RVR5(r...) RVR5_(r, 36, 35, 34, 33, 32) 80 #define RVR6(r...) RVR6_(r, 36, 35, 34, 33, 32, 31) 81 #define RVR7(r...) RVR7_(r, 36, 35, 34, 33, 32, 31, 30) 82 83 #define RVR(X) [w##X] "w" (w##X) 84 85 #define WVR0_(REG, ...) [w##REG] "=w" (w##REG) 86 #define WVR1_(_1, REG, ...) [w##REG] "=w" (w##REG) 87 #define WVR2_(_1, _2, REG, ...) [w##REG] "=w" (w##REG) 88 #define WVR3_(_1, _2, _3, REG, ...) [w##REG] "=w" (w##REG) 89 #define WVR4_(_1, _2, _3, _4, REG, ...) [w##REG] "=w" (w##REG) 90 #define WVR5_(_1, _2, _3, _4, _5, REG, ...) [w##REG] "=w" (w##REG) 91 #define WVR6_(_1, _2, _3, _4, _5, _6, REG, ...) [w##REG] "=w" (w##REG) 92 #define WVR7_(_1, _2, _3, _4, _5, _6, _7, REG, ...) [w##REG] "=w" (w##REG) 93 94 #define WVR0(r...) WVR0_(r) 95 #define WVR1(r...) WVR1_(r) 96 #define WVR2(r...) WVR2_(r, 36) 97 #define WVR3(r...) WVR3_(r, 36, 35) 98 #define WVR4(r...) WVR4_(r, 36, 35, 34, 33) 99 #define WVR5(r...) WVR5_(r, 36, 35, 34, 33, 32) 100 #define WVR6(r...) WVR6_(r, 36, 35, 34, 33, 32, 31) 101 #define WVR7(r...) WVR7_(r, 36, 35, 34, 33, 32, 31, 30) 102 103 #define WVR(X) [w##X] "=w" (w##X) 104 105 #define UVR0_(REG, ...) [w##REG] "+&w" (w##REG) 106 #define UVR1_(_1, REG, ...) [w##REG] "+&w" (w##REG) 107 #define UVR2_(_1, _2, REG, ...) [w##REG] "+&w" (w##REG) 108 #define UVR3_(_1, _2, _3, REG, ...) [w##REG] "+&w" (w##REG) 109 #define UVR4_(_1, _2, _3, _4, REG, ...) [w##REG] "+&w" (w##REG) 110 #define UVR5_(_1, _2, _3, _4, _5, REG, ...) [w##REG] "+&w" (w##REG) 111 #define UVR6_(_1, _2, _3, _4, _5, _6, REG, ...) [w##REG] "+&w" (w##REG) 112 #define UVR7_(_1, _2, _3, _4, _5, _6, _7, REG, ...) [w##REG] "+&w" (w##REG) 113 114 #define UVR0(r...) UVR0_(r) 115 #define UVR1(r...) UVR1_(r) 116 #define UVR2(r...) UVR2_(r, 36) 117 #define UVR3(r...) UVR3_(r, 36, 35) 118 #define UVR4(r...) UVR4_(r, 36, 35, 34, 33) 119 #define UVR5(r...) UVR5_(r, 36, 35, 34, 33, 32) 120 #define UVR6(r...) UVR6_(r, 36, 35, 34, 33, 32, 31) 121 #define UVR7(r...) UVR7_(r, 36, 35, 34, 33, 32, 31, 30) 122 123 #define UVR(X) [w##X] "+&w" (w##X) 124 125 #define R_01(REG1, REG2, ...) REG1, REG2 126 #define _R_23(_0, _1, REG2, REG3, ...) REG2, REG3 127 #define R_23(REG...) _R_23(REG, 1, 2, 3) 128 129 #define ZFS_ASM_BUG() ASSERT(0) 130 131 #define OFFSET(ptr, val) (((unsigned char *)(ptr))+val) 132 133 extern const uint8_t gf_clmul_mod_lt[4*256][16]; 134 135 #define ELEM_SIZE 16 136 137 typedef struct v { 138 uint8_t b[ELEM_SIZE] __attribute__((aligned(ELEM_SIZE))); 139 } v_t; 140 141 #define XOR_ACC(src, r...) \ 142 { \ 143 switch (REG_CNT(r)) { \ 144 case 8: \ 145 __asm( \ 146 "ld1 { v21.4s },%[SRC0]\n" \ 147 "ld1 { v20.4s },%[SRC1]\n" \ 148 "ld1 { v19.4s },%[SRC2]\n" \ 149 "ld1 { v18.4s },%[SRC3]\n" \ 150 "eor " VR0(r) ".16b," VR0(r) ".16b,v21.16b\n" \ 151 "eor " VR1(r) ".16b," VR1(r) ".16b,v20.16b\n" \ 152 "eor " VR2(r) ".16b," VR2(r) ".16b,v19.16b\n" \ 153 "eor " VR3(r) ".16b," VR3(r) ".16b,v18.16b\n" \ 154 "ld1 { v21.4s },%[SRC4]\n" \ 155 "ld1 { v20.4s },%[SRC5]\n" \ 156 "ld1 { v19.4s },%[SRC6]\n" \ 157 "ld1 { v18.4s },%[SRC7]\n" \ 158 "eor " VR4(r) ".16b," VR4(r) ".16b,v21.16b\n" \ 159 "eor " VR5(r) ".16b," VR5(r) ".16b,v20.16b\n" \ 160 "eor " VR6(r) ".16b," VR6(r) ".16b,v19.16b\n" \ 161 "eor " VR7(r) ".16b," VR7(r) ".16b,v18.16b\n" \ 162 : UVR0(r), UVR1(r), UVR2(r), UVR3(r), \ 163 UVR4(r), UVR5(r), UVR6(r), UVR7(r) \ 164 : [SRC0] "Q" (*(OFFSET(src, 0))), \ 165 [SRC1] "Q" (*(OFFSET(src, 16))), \ 166 [SRC2] "Q" (*(OFFSET(src, 32))), \ 167 [SRC3] "Q" (*(OFFSET(src, 48))), \ 168 [SRC4] "Q" (*(OFFSET(src, 64))), \ 169 [SRC5] "Q" (*(OFFSET(src, 80))), \ 170 [SRC6] "Q" (*(OFFSET(src, 96))), \ 171 [SRC7] "Q" (*(OFFSET(src, 112))) \ 172 : "v18", "v19", "v20", "v21"); \ 173 break; \ 174 case 4: \ 175 __asm( \ 176 "ld1 { v21.4s },%[SRC0]\n" \ 177 "ld1 { v20.4s },%[SRC1]\n" \ 178 "ld1 { v19.4s },%[SRC2]\n" \ 179 "ld1 { v18.4s },%[SRC3]\n" \ 180 "eor " VR0(r) ".16b," VR0(r) ".16b,v21.16b\n" \ 181 "eor " VR1(r) ".16b," VR1(r) ".16b,v20.16b\n" \ 182 "eor " VR2(r) ".16b," VR2(r) ".16b,v19.16b\n" \ 183 "eor " VR3(r) ".16b," VR3(r) ".16b,v18.16b\n" \ 184 : UVR0(r), UVR1(r), UVR2(r), UVR3(r) \ 185 : [SRC0] "Q" (*(OFFSET(src, 0))), \ 186 [SRC1] "Q" (*(OFFSET(src, 16))), \ 187 [SRC2] "Q" (*(OFFSET(src, 32))), \ 188 [SRC3] "Q" (*(OFFSET(src, 48))) \ 189 : "v18", "v19", "v20", "v21"); \ 190 break; \ 191 case 2: \ 192 __asm( \ 193 "ld1 { v21.4s },%[SRC0]\n" \ 194 "ld1 { v20.4s },%[SRC1]\n" \ 195 "eor " VR0(r) ".16b," VR0(r) ".16b,v21.16b\n" \ 196 "eor " VR1(r) ".16b," VR1(r) ".16b,v20.16b\n" \ 197 : UVR0(r), UVR1(r) \ 198 : [SRC0] "Q" (*(OFFSET(src, 0))), \ 199 [SRC1] "Q" (*(OFFSET(src, 16))) \ 200 : "v20", "v21"); \ 201 break; \ 202 default: \ 203 ZFS_ASM_BUG(); \ 204 } \ 205 } 206 207 #define XOR(r...) \ 208 { \ 209 switch (REG_CNT(r)) { \ 210 case 8: \ 211 __asm( \ 212 "eor " VR4(r) ".16b," VR4(r) ".16b," VR0(r) ".16b\n" \ 213 "eor " VR5(r) ".16b," VR5(r) ".16b," VR1(r) ".16b\n" \ 214 "eor " VR6(r) ".16b," VR6(r) ".16b," VR2(r) ".16b\n" \ 215 "eor " VR7(r) ".16b," VR7(r) ".16b," VR3(r) ".16b\n" \ 216 : UVR4(r), UVR5(r), UVR6(r), UVR7(r) \ 217 : RVR0(r), RVR1(r), RVR2(r), RVR3(r)); \ 218 break; \ 219 case 4: \ 220 __asm( \ 221 "eor " VR2(r) ".16b," VR2(r) ".16b," VR0(r) ".16b\n" \ 222 "eor " VR3(r) ".16b," VR3(r) ".16b," VR1(r) ".16b\n" \ 223 : UVR2(r), UVR3(r) \ 224 : RVR0(r), RVR1(r)); \ 225 break; \ 226 default: \ 227 ZFS_ASM_BUG(); \ 228 } \ 229 } 230 231 #define ZERO(r...) \ 232 { \ 233 switch (REG_CNT(r)) { \ 234 case 8: \ 235 __asm( \ 236 "eor " VR0(r) ".16b," VR0(r) ".16b," VR0(r) ".16b\n" \ 237 "eor " VR1(r) ".16b," VR1(r) ".16b," VR1(r) ".16b\n" \ 238 "eor " VR2(r) ".16b," VR2(r) ".16b," VR2(r) ".16b\n" \ 239 "eor " VR3(r) ".16b," VR3(r) ".16b," VR3(r) ".16b\n" \ 240 "eor " VR4(r) ".16b," VR4(r) ".16b," VR4(r) ".16b\n" \ 241 "eor " VR5(r) ".16b," VR5(r) ".16b," VR5(r) ".16b\n" \ 242 "eor " VR6(r) ".16b," VR6(r) ".16b," VR6(r) ".16b\n" \ 243 "eor " VR7(r) ".16b," VR7(r) ".16b," VR7(r) ".16b\n" \ 244 : WVR0(r), WVR1(r), WVR2(r), WVR3(r), \ 245 WVR4(r), WVR5(r), WVR6(r), WVR7(r)); \ 246 break; \ 247 case 4: \ 248 __asm( \ 249 "eor " VR0(r) ".16b," VR0(r) ".16b," VR0(r) ".16b\n" \ 250 "eor " VR1(r) ".16b," VR1(r) ".16b," VR1(r) ".16b\n" \ 251 "eor " VR2(r) ".16b," VR2(r) ".16b," VR2(r) ".16b\n" \ 252 "eor " VR3(r) ".16b," VR3(r) ".16b," VR3(r) ".16b\n" \ 253 : WVR0(r), WVR1(r), WVR2(r), WVR3(r)); \ 254 break; \ 255 case 2: \ 256 __asm( \ 257 "eor " VR0(r) ".16b," VR0(r) ".16b," VR0(r) ".16b\n" \ 258 "eor " VR1(r) ".16b," VR1(r) ".16b," VR1(r) ".16b\n" \ 259 : WVR0(r), WVR1(r)); \ 260 break; \ 261 default: \ 262 ZFS_ASM_BUG(); \ 263 } \ 264 } 265 266 #define COPY(r...) \ 267 { \ 268 switch (REG_CNT(r)) { \ 269 case 8: \ 270 __asm( \ 271 "mov " VR4(r) ".16b," VR0(r) ".16b\n" \ 272 "mov " VR5(r) ".16b," VR1(r) ".16b\n" \ 273 "mov " VR6(r) ".16b," VR2(r) ".16b\n" \ 274 "mov " VR7(r) ".16b," VR3(r) ".16b\n" \ 275 : WVR4(r), WVR5(r), WVR6(r), WVR7(r) \ 276 : RVR0(r), RVR1(r), RVR2(r), RVR3(r)); \ 277 break; \ 278 case 4: \ 279 __asm( \ 280 "mov " VR2(r) ".16b," VR0(r) ".16b\n" \ 281 "mov " VR3(r) ".16b," VR1(r) ".16b\n" \ 282 : WVR2(r), WVR3(r) \ 283 : RVR0(r), RVR1(r)); \ 284 break; \ 285 default: \ 286 ZFS_ASM_BUG(); \ 287 } \ 288 } 289 290 #define LOAD(src, r...) \ 291 { \ 292 switch (REG_CNT(r)) { \ 293 case 8: \ 294 __asm( \ 295 "ld1 { " VR0(r) ".4s },%[SRC0]\n" \ 296 "ld1 { " VR1(r) ".4s },%[SRC1]\n" \ 297 "ld1 { " VR2(r) ".4s },%[SRC2]\n" \ 298 "ld1 { " VR3(r) ".4s },%[SRC3]\n" \ 299 "ld1 { " VR4(r) ".4s },%[SRC4]\n" \ 300 "ld1 { " VR5(r) ".4s },%[SRC5]\n" \ 301 "ld1 { " VR6(r) ".4s },%[SRC6]\n" \ 302 "ld1 { " VR7(r) ".4s },%[SRC7]\n" \ 303 : WVR0(r), WVR1(r), WVR2(r), WVR3(r), \ 304 WVR4(r), WVR5(r), WVR6(r), WVR7(r) \ 305 : [SRC0] "Q" (*(OFFSET(src, 0))), \ 306 [SRC1] "Q" (*(OFFSET(src, 16))), \ 307 [SRC2] "Q" (*(OFFSET(src, 32))), \ 308 [SRC3] "Q" (*(OFFSET(src, 48))), \ 309 [SRC4] "Q" (*(OFFSET(src, 64))), \ 310 [SRC5] "Q" (*(OFFSET(src, 80))), \ 311 [SRC6] "Q" (*(OFFSET(src, 96))), \ 312 [SRC7] "Q" (*(OFFSET(src, 112)))); \ 313 break; \ 314 case 4: \ 315 __asm( \ 316 "ld1 { " VR0(r) ".4s },%[SRC0]\n" \ 317 "ld1 { " VR1(r) ".4s },%[SRC1]\n" \ 318 "ld1 { " VR2(r) ".4s },%[SRC2]\n" \ 319 "ld1 { " VR3(r) ".4s },%[SRC3]\n" \ 320 : WVR0(r), WVR1(r), WVR2(r), WVR3(r) \ 321 : [SRC0] "Q" (*(OFFSET(src, 0))), \ 322 [SRC1] "Q" (*(OFFSET(src, 16))), \ 323 [SRC2] "Q" (*(OFFSET(src, 32))), \ 324 [SRC3] "Q" (*(OFFSET(src, 48)))); \ 325 break; \ 326 case 2: \ 327 __asm( \ 328 "ld1 { " VR0(r) ".4s },%[SRC0]\n" \ 329 "ld1 { " VR1(r) ".4s },%[SRC1]\n" \ 330 : WVR0(r), WVR1(r) \ 331 : [SRC0] "Q" (*(OFFSET(src, 0))), \ 332 [SRC1] "Q" (*(OFFSET(src, 16)))); \ 333 break; \ 334 default: \ 335 ZFS_ASM_BUG(); \ 336 } \ 337 } 338 339 #define STORE(dst, r...) \ 340 { \ 341 switch (REG_CNT(r)) { \ 342 case 8: \ 343 __asm( \ 344 "st1 { " VR0(r) ".4s },%[DST0]\n" \ 345 "st1 { " VR1(r) ".4s },%[DST1]\n" \ 346 "st1 { " VR2(r) ".4s },%[DST2]\n" \ 347 "st1 { " VR3(r) ".4s },%[DST3]\n" \ 348 "st1 { " VR4(r) ".4s },%[DST4]\n" \ 349 "st1 { " VR5(r) ".4s },%[DST5]\n" \ 350 "st1 { " VR6(r) ".4s },%[DST6]\n" \ 351 "st1 { " VR7(r) ".4s },%[DST7]\n" \ 352 : [DST0] "=Q" (*(OFFSET(dst, 0))), \ 353 [DST1] "=Q" (*(OFFSET(dst, 16))), \ 354 [DST2] "=Q" (*(OFFSET(dst, 32))), \ 355 [DST3] "=Q" (*(OFFSET(dst, 48))), \ 356 [DST4] "=Q" (*(OFFSET(dst, 64))), \ 357 [DST5] "=Q" (*(OFFSET(dst, 80))), \ 358 [DST6] "=Q" (*(OFFSET(dst, 96))), \ 359 [DST7] "=Q" (*(OFFSET(dst, 112))) \ 360 : RVR0(r), RVR1(r), RVR2(r), RVR3(r), \ 361 RVR4(r), RVR5(r), RVR6(r), RVR7(r)); \ 362 break; \ 363 case 4: \ 364 __asm( \ 365 "st1 { " VR0(r) ".4s },%[DST0]\n" \ 366 "st1 { " VR1(r) ".4s },%[DST1]\n" \ 367 "st1 { " VR2(r) ".4s },%[DST2]\n" \ 368 "st1 { " VR3(r) ".4s },%[DST3]\n" \ 369 : [DST0] "=Q" (*(OFFSET(dst, 0))), \ 370 [DST1] "=Q" (*(OFFSET(dst, 16))), \ 371 [DST2] "=Q" (*(OFFSET(dst, 32))), \ 372 [DST3] "=Q" (*(OFFSET(dst, 48))) \ 373 : RVR0(r), RVR1(r), RVR2(r), RVR3(r)); \ 374 break; \ 375 case 2: \ 376 __asm( \ 377 "st1 { " VR0(r) ".4s },%[DST0]\n" \ 378 "st1 { " VR1(r) ".4s },%[DST1]\n" \ 379 : [DST0] "=Q" (*(OFFSET(dst, 0))), \ 380 [DST1] "=Q" (*(OFFSET(dst, 16))) \ 381 : RVR0(r), RVR1(r)); \ 382 break; \ 383 default: \ 384 ZFS_ASM_BUG(); \ 385 } \ 386 } 387 388 /* 389 * Unfortunately cannot use the macro, because GCC 390 * will try to use the macro name and not value 391 * later on... 392 * Kept as a reference to what a numbered variable is 393 */ 394 #define _00 "v17" 395 #define _1d "v16" 396 #define _temp0 "v19" 397 #define _temp1 "v18" 398 399 #define MUL2_SETUP() \ 400 { \ 401 __asm( \ 402 "eor " VR(17) ".16b," VR(17) ".16b," VR(17) ".16b\n" \ 403 "movi " VR(16) ".16b,#0x1d\n" \ 404 : WVR(16), WVR(17)); \ 405 } 406 407 #define MUL2(r...) \ 408 { \ 409 switch (REG_CNT(r)) { \ 410 case 4: \ 411 __asm( \ 412 "cmgt v19.16b," VR(17) ".16b," VR0(r) ".16b\n" \ 413 "cmgt v18.16b," VR(17) ".16b," VR1(r) ".16b\n" \ 414 "cmgt v21.16b," VR(17) ".16b," VR2(r) ".16b\n" \ 415 "cmgt v20.16b," VR(17) ".16b," VR3(r) ".16b\n" \ 416 "and v19.16b,v19.16b," VR(16) ".16b\n" \ 417 "and v18.16b,v18.16b," VR(16) ".16b\n" \ 418 "and v21.16b,v21.16b," VR(16) ".16b\n" \ 419 "and v20.16b,v20.16b," VR(16) ".16b\n" \ 420 "shl " VR0(r) ".16b," VR0(r) ".16b,#1\n" \ 421 "shl " VR1(r) ".16b," VR1(r) ".16b,#1\n" \ 422 "shl " VR2(r) ".16b," VR2(r) ".16b,#1\n" \ 423 "shl " VR3(r) ".16b," VR3(r) ".16b,#1\n" \ 424 "eor " VR0(r) ".16b,v19.16b," VR0(r) ".16b\n" \ 425 "eor " VR1(r) ".16b,v18.16b," VR1(r) ".16b\n" \ 426 "eor " VR2(r) ".16b,v21.16b," VR2(r) ".16b\n" \ 427 "eor " VR3(r) ".16b,v20.16b," VR3(r) ".16b\n" \ 428 : UVR0(r), UVR1(r), UVR2(r), UVR3(r) \ 429 : RVR(17), RVR(16) \ 430 : "v18", "v19", "v20", "v21"); \ 431 break; \ 432 case 2: \ 433 __asm( \ 434 "cmgt v19.16b," VR(17) ".16b," VR0(r) ".16b\n" \ 435 "cmgt v18.16b," VR(17) ".16b," VR1(r) ".16b\n" \ 436 "and v19.16b,v19.16b," VR(16) ".16b\n" \ 437 "and v18.16b,v18.16b," VR(16) ".16b\n" \ 438 "shl " VR0(r) ".16b," VR0(r) ".16b,#1\n" \ 439 "shl " VR1(r) ".16b," VR1(r) ".16b,#1\n" \ 440 "eor " VR0(r) ".16b,v19.16b," VR0(r) ".16b\n" \ 441 "eor " VR1(r) ".16b,v18.16b," VR1(r) ".16b\n" \ 442 : UVR0(r), UVR1(r) \ 443 : RVR(17), RVR(16) \ 444 : "v18", "v19"); \ 445 break; \ 446 default: \ 447 ZFS_ASM_BUG(); \ 448 } \ 449 } 450 451 #define MUL4(r...) \ 452 { \ 453 MUL2(r); \ 454 MUL2(r); \ 455 } 456 457 /* 458 * Unfortunately cannot use the macro, because GCC 459 * will try to use the macro name and not value 460 * later on... 461 * Kept as a reference to what a register is 462 * (here we're using actual registers for the 463 * clobbered ones) 464 */ 465 #define _0f "v15" 466 #define _a_save "v14" 467 #define _b_save "v13" 468 #define _lt_mod_a "v12" 469 #define _lt_clmul_a "v11" 470 #define _lt_mod_b "v10" 471 #define _lt_clmul_b "v15" 472 473 #define _MULx2(c, r...) \ 474 { \ 475 switch (REG_CNT(r)) { \ 476 case 2: \ 477 __asm( \ 478 /* lts for upper part */ \ 479 "movi v15.16b,#0x0f\n" \ 480 "ld1 { v10.4s },%[lt0]\n" \ 481 "ld1 { v11.4s },%[lt1]\n" \ 482 /* upper part */ \ 483 "and v14.16b," VR0(r) ".16b,v15.16b\n" \ 484 "and v13.16b," VR1(r) ".16b,v15.16b\n" \ 485 "ushr " VR0(r) ".16b," VR0(r) ".16b,#4\n" \ 486 "ushr " VR1(r) ".16b," VR1(r) ".16b,#4\n" \ 487 \ 488 "tbl v12.16b,{v10.16b}," VR0(r) ".16b\n" \ 489 "tbl v10.16b,{v10.16b}," VR1(r) ".16b\n" \ 490 "tbl v15.16b,{v11.16b}," VR0(r) ".16b\n" \ 491 "tbl v11.16b,{v11.16b}," VR1(r) ".16b\n" \ 492 \ 493 "eor " VR0(r) ".16b,v15.16b,v12.16b\n" \ 494 "eor " VR1(r) ".16b,v11.16b,v10.16b\n" \ 495 /* lts for lower part */ \ 496 "ld1 { v10.4s },%[lt2]\n" \ 497 "ld1 { v15.4s },%[lt3]\n" \ 498 /* lower part */ \ 499 "tbl v12.16b,{v10.16b},v14.16b\n" \ 500 "tbl v10.16b,{v10.16b},v13.16b\n" \ 501 "tbl v11.16b,{v15.16b},v14.16b\n" \ 502 "tbl v15.16b,{v15.16b},v13.16b\n" \ 503 \ 504 "eor " VR0(r) ".16b," VR0(r) ".16b,v12.16b\n" \ 505 "eor " VR1(r) ".16b," VR1(r) ".16b,v10.16b\n" \ 506 "eor " VR0(r) ".16b," VR0(r) ".16b,v11.16b\n" \ 507 "eor " VR1(r) ".16b," VR1(r) ".16b,v15.16b\n" \ 508 : UVR0(r), UVR1(r) \ 509 : [lt0] "Q" ((gf_clmul_mod_lt[4*(c)+0][0])), \ 510 [lt1] "Q" ((gf_clmul_mod_lt[4*(c)+1][0])), \ 511 [lt2] "Q" ((gf_clmul_mod_lt[4*(c)+2][0])), \ 512 [lt3] "Q" ((gf_clmul_mod_lt[4*(c)+3][0])) \ 513 : "v10", "v11", "v12", "v13", "v14", "v15"); \ 514 break; \ 515 default: \ 516 ZFS_ASM_BUG(); \ 517 } \ 518 } 519 520 #define MUL(c, r...) \ 521 { \ 522 switch (REG_CNT(r)) { \ 523 case 4: \ 524 _MULx2(c, R_23(r)); \ 525 _MULx2(c, R_01(r)); \ 526 break; \ 527 case 2: \ 528 _MULx2(c, R_01(r)); \ 529 break; \ 530 default: \ 531 ZFS_ASM_BUG(); \ 532 } \ 533 } 534 535 #define raidz_math_begin() kfpu_begin() 536 #define raidz_math_end() kfpu_end() 537 538 /* Overkill... */ 539 #if defined(_KERNEL) 540 #define GEN_X_DEFINE_0_3() \ 541 register unsigned char w0 asm("v0") __attribute__((vector_size(16))); \ 542 register unsigned char w1 asm("v1") __attribute__((vector_size(16))); \ 543 register unsigned char w2 asm("v2") __attribute__((vector_size(16))); \ 544 register unsigned char w3 asm("v3") __attribute__((vector_size(16))); 545 #define GEN_X_DEFINE_4_5() \ 546 register unsigned char w4 asm("v4") __attribute__((vector_size(16))); \ 547 register unsigned char w5 asm("v5") __attribute__((vector_size(16))); 548 #define GEN_X_DEFINE_6_7() \ 549 register unsigned char w6 asm("v6") __attribute__((vector_size(16))); \ 550 register unsigned char w7 asm("v7") __attribute__((vector_size(16))); 551 #define GEN_X_DEFINE_8_9() \ 552 register unsigned char w8 asm("v8") __attribute__((vector_size(16))); \ 553 register unsigned char w9 asm("v9") __attribute__((vector_size(16))); 554 #define GEN_X_DEFINE_10_11() \ 555 register unsigned char w10 asm("v10") __attribute__((vector_size(16))); \ 556 register unsigned char w11 asm("v11") __attribute__((vector_size(16))); 557 #define GEN_X_DEFINE_12_15() \ 558 register unsigned char w12 asm("v12") __attribute__((vector_size(16))); \ 559 register unsigned char w13 asm("v13") __attribute__((vector_size(16))); \ 560 register unsigned char w14 asm("v14") __attribute__((vector_size(16))); \ 561 register unsigned char w15 asm("v15") __attribute__((vector_size(16))); 562 #define GEN_X_DEFINE_16() \ 563 register unsigned char w16 asm("v16") __attribute__((vector_size(16))); 564 #define GEN_X_DEFINE_17() \ 565 register unsigned char w17 asm("v17") __attribute__((vector_size(16))); 566 #define GEN_X_DEFINE_18_21() \ 567 register unsigned char w18 asm("v18") __attribute__((vector_size(16))); \ 568 register unsigned char w19 asm("v19") __attribute__((vector_size(16))); \ 569 register unsigned char w20 asm("v20") __attribute__((vector_size(16))); \ 570 register unsigned char w21 asm("v21") __attribute__((vector_size(16))); 571 #define GEN_X_DEFINE_22_23() \ 572 register unsigned char w22 asm("v22") __attribute__((vector_size(16))); \ 573 register unsigned char w23 asm("v23") __attribute__((vector_size(16))); 574 #define GEN_X_DEFINE_24_27() \ 575 register unsigned char w24 asm("v24") __attribute__((vector_size(16))); \ 576 register unsigned char w25 asm("v25") __attribute__((vector_size(16))); \ 577 register unsigned char w26 asm("v26") __attribute__((vector_size(16))); \ 578 register unsigned char w27 asm("v27") __attribute__((vector_size(16))); 579 #define GEN_X_DEFINE_28_30() \ 580 register unsigned char w28 asm("v28") __attribute__((vector_size(16))); \ 581 register unsigned char w29 asm("v29") __attribute__((vector_size(16))); \ 582 register unsigned char w30 asm("v30") __attribute__((vector_size(16))); 583 #define GEN_X_DEFINE_31() \ 584 register unsigned char w31 asm("v31") __attribute__((vector_size(16))); 585 #define GEN_X_DEFINE_32() \ 586 register unsigned char w32 asm("v31") __attribute__((vector_size(16))); 587 #define GEN_X_DEFINE_33_36() \ 588 register unsigned char w33 asm("v31") __attribute__((vector_size(16))); \ 589 register unsigned char w34 asm("v31") __attribute__((vector_size(16))); \ 590 register unsigned char w35 asm("v31") __attribute__((vector_size(16))); \ 591 register unsigned char w36 asm("v31") __attribute__((vector_size(16))); 592 #define GEN_X_DEFINE_37_38() \ 593 register unsigned char w37 asm("v31") __attribute__((vector_size(16))); \ 594 register unsigned char w38 asm("v31") __attribute__((vector_size(16))); 595 #define GEN_X_DEFINE_ALL() \ 596 GEN_X_DEFINE_0_3() \ 597 GEN_X_DEFINE_4_5() \ 598 GEN_X_DEFINE_6_7() \ 599 GEN_X_DEFINE_8_9() \ 600 GEN_X_DEFINE_10_11() \ 601 GEN_X_DEFINE_12_15() \ 602 GEN_X_DEFINE_16() \ 603 GEN_X_DEFINE_17() \ 604 GEN_X_DEFINE_18_21() \ 605 GEN_X_DEFINE_22_23() \ 606 GEN_X_DEFINE_24_27() \ 607 GEN_X_DEFINE_28_30() \ 608 GEN_X_DEFINE_31() \ 609 GEN_X_DEFINE_32() \ 610 GEN_X_DEFINE_33_36() \ 611 GEN_X_DEFINE_37_38() 612 #else 613 #define GEN_X_DEFINE_0_3() \ 614 unsigned char w0 __attribute__((vector_size(16))); \ 615 unsigned char w1 __attribute__((vector_size(16))); \ 616 unsigned char w2 __attribute__((vector_size(16))); \ 617 unsigned char w3 __attribute__((vector_size(16))); 618 #define GEN_X_DEFINE_4_5() \ 619 unsigned char w4 __attribute__((vector_size(16))); \ 620 unsigned char w5 __attribute__((vector_size(16))); 621 #define GEN_X_DEFINE_6_7() \ 622 unsigned char w6 __attribute__((vector_size(16))); \ 623 unsigned char w7 __attribute__((vector_size(16))); 624 #define GEN_X_DEFINE_8_9() \ 625 unsigned char w8 __attribute__((vector_size(16))); \ 626 unsigned char w9 __attribute__((vector_size(16))); 627 #define GEN_X_DEFINE_10_11() \ 628 unsigned char w10 __attribute__((vector_size(16))); \ 629 unsigned char w11 __attribute__((vector_size(16))); 630 #define GEN_X_DEFINE_12_15() \ 631 unsigned char w12 __attribute__((vector_size(16))); \ 632 unsigned char w13 __attribute__((vector_size(16))); \ 633 unsigned char w14 __attribute__((vector_size(16))); \ 634 unsigned char w15 __attribute__((vector_size(16))); 635 #define GEN_X_DEFINE_16() \ 636 unsigned char w16 __attribute__((vector_size(16))); 637 #define GEN_X_DEFINE_17() \ 638 unsigned char w17 __attribute__((vector_size(16))); 639 #define GEN_X_DEFINE_18_21() \ 640 unsigned char w18 __attribute__((vector_size(16))); \ 641 unsigned char w19 __attribute__((vector_size(16))); \ 642 unsigned char w20 __attribute__((vector_size(16))); \ 643 unsigned char w21 __attribute__((vector_size(16))); 644 #define GEN_X_DEFINE_22_23() \ 645 unsigned char w22 __attribute__((vector_size(16))); \ 646 unsigned char w23 __attribute__((vector_size(16))); 647 #define GEN_X_DEFINE_24_27() \ 648 unsigned char w24 __attribute__((vector_size(16))); \ 649 unsigned char w25 __attribute__((vector_size(16))); \ 650 unsigned char w26 __attribute__((vector_size(16))); \ 651 unsigned char w27 __attribute__((vector_size(16))); 652 #define GEN_X_DEFINE_28_30() \ 653 unsigned char w28 __attribute__((vector_size(16))); \ 654 unsigned char w29 __attribute__((vector_size(16))); \ 655 unsigned char w30 __attribute__((vector_size(16))); 656 #define GEN_X_DEFINE_31() \ 657 unsigned char w31 __attribute__((vector_size(16))); 658 #define GEN_X_DEFINE_32() \ 659 unsigned char w32 __attribute__((vector_size(16))); 660 #define GEN_X_DEFINE_33_36() \ 661 unsigned char w33 __attribute__((vector_size(16))); \ 662 unsigned char w34 __attribute__((vector_size(16))); \ 663 unsigned char w35 __attribute__((vector_size(16))); \ 664 unsigned char w36 __attribute__((vector_size(16))); 665 #define GEN_X_DEFINE_37_38() \ 666 unsigned char w37 __attribute__((vector_size(16))); \ 667 unsigned char w38 __attribute__((vector_size(16))); 668 #define GEN_X_DEFINE_ALL() \ 669 GEN_X_DEFINE_0_3() \ 670 GEN_X_DEFINE_4_5() \ 671 GEN_X_DEFINE_6_7() \ 672 GEN_X_DEFINE_8_9() \ 673 GEN_X_DEFINE_10_11() \ 674 GEN_X_DEFINE_12_15() \ 675 GEN_X_DEFINE_16() \ 676 GEN_X_DEFINE_17() \ 677 GEN_X_DEFINE_18_21() \ 678 GEN_X_DEFINE_22_23() \ 679 GEN_X_DEFINE_24_27() \ 680 GEN_X_DEFINE_28_30() \ 681 GEN_X_DEFINE_31() \ 682 GEN_X_DEFINE_32() \ 683 GEN_X_DEFINE_33_36() \ 684 GEN_X_DEFINE_37_38() 685 #endif 686