xref: /freebsd/contrib/llvm-project/clang/lib/Headers/gfniintrin.h (revision a90b9d0159070121c221b966469c3e36d912bf82)
1 /*===----------------- gfniintrin.h - GFNI intrinsics ----------------------===
2  *
3  *
4  * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5  * See https://llvm.org/LICENSE.txt for license information.
6  * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7  *
8  *===-----------------------------------------------------------------------===
9  */
10 #ifndef __IMMINTRIN_H
11 #error "Never use <gfniintrin.h> directly; include <immintrin.h> instead."
12 #endif
13 
14 #ifndef __GFNIINTRIN_H
15 #define __GFNIINTRIN_H
16 
17 /* Default attributes for simple form (no masking). */
18 #define __DEFAULT_FN_ATTRS                                                     \
19   __attribute__((__always_inline__, __nodebug__,                               \
20                  __target__("gfni,no-evex512"), __min_vector_width__(128)))
21 
22 /* Default attributes for YMM unmasked form. */
23 #define __DEFAULT_FN_ATTRS_Y                                                   \
24   __attribute__((__always_inline__, __nodebug__,                               \
25                  __target__("avx,gfni,no-evex512"),                            \
26                  __min_vector_width__(256)))
27 
28 /* Default attributes for ZMM unmasked forms. */
29 #define __DEFAULT_FN_ATTRS_Z                                                   \
30   __attribute__((__always_inline__, __nodebug__,                               \
31                  __target__("avx512f,evex512,gfni"),                           \
32                  __min_vector_width__(512)))
33 /* Default attributes for ZMM masked forms. */
34 #define __DEFAULT_FN_ATTRS_Z_MASK                                              \
35   __attribute__((__always_inline__, __nodebug__,                               \
36                  __target__("avx512bw,evex512,gfni"),                          \
37                  __min_vector_width__(512)))
38 
39 /* Default attributes for VLX masked forms. */
40 #define __DEFAULT_FN_ATTRS_VL128                                               \
41   __attribute__((__always_inline__, __nodebug__,                               \
42                  __target__("avx512bw,avx512vl,gfni,no-evex512"),              \
43                  __min_vector_width__(128)))
44 #define __DEFAULT_FN_ATTRS_VL256                                               \
45   __attribute__((__always_inline__, __nodebug__,                               \
46                  __target__("avx512bw,avx512vl,gfni,no-evex512"),              \
47                  __min_vector_width__(256)))
48 
49 #define _mm_gf2p8affineinv_epi64_epi8(A, B, I) \
50   ((__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi((__v16qi)(__m128i)(A), \
51                                                    (__v16qi)(__m128i)(B), \
52                                                    (char)(I)))
53 
54 #define _mm_gf2p8affine_epi64_epi8(A, B, I) \
55   ((__m128i)__builtin_ia32_vgf2p8affineqb_v16qi((__v16qi)(__m128i)(A), \
56                                                    (__v16qi)(__m128i)(B), \
57                                                    (char)(I)))
58 
59 static __inline__ __m128i __DEFAULT_FN_ATTRS
60 _mm_gf2p8mul_epi8(__m128i __A, __m128i __B)
61 {
62   return (__m128i) __builtin_ia32_vgf2p8mulb_v16qi((__v16qi) __A,
63               (__v16qi) __B);
64 }
65 
66 #ifdef __AVXINTRIN_H
67 #define _mm256_gf2p8affineinv_epi64_epi8(A, B, I) \
68   ((__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi((__v32qi)(__m256i)(A), \
69                                                    (__v32qi)(__m256i)(B), \
70                                                    (char)(I)))
71 
72 #define _mm256_gf2p8affine_epi64_epi8(A, B, I) \
73   ((__m256i)__builtin_ia32_vgf2p8affineqb_v32qi((__v32qi)(__m256i)(A), \
74                                                    (__v32qi)(__m256i)(B), \
75                                                    (char)(I)))
76 
77 static __inline__ __m256i __DEFAULT_FN_ATTRS_Y
78 _mm256_gf2p8mul_epi8(__m256i __A, __m256i __B)
79 {
80   return (__m256i) __builtin_ia32_vgf2p8mulb_v32qi((__v32qi) __A,
81               (__v32qi) __B);
82 }
83 #endif /* __AVXINTRIN_H */
84 
85 #ifdef __AVX512BWINTRIN_H
86 #define _mm512_gf2p8affineinv_epi64_epi8(A, B, I) \
87   ((__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi((__v64qi)(__m512i)(A), \
88                                                    (__v64qi)(__m512i)(B), \
89                                                    (char)(I)))
90 
91 #define _mm512_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
92   ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
93          (__v64qi)_mm512_gf2p8affineinv_epi64_epi8(A, B, I), \
94          (__v64qi)(__m512i)(S)))
95 
96 #define _mm512_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
97   _mm512_mask_gf2p8affineinv_epi64_epi8((__m512i)_mm512_setzero_si512(), \
98          U, A, B, I)
99 
100 #define _mm512_gf2p8affine_epi64_epi8(A, B, I) \
101   ((__m512i)__builtin_ia32_vgf2p8affineqb_v64qi((__v64qi)(__m512i)(A), \
102                                                    (__v64qi)(__m512i)(B), \
103                                                    (char)(I)))
104 
105 #define _mm512_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
106   ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
107          (__v64qi)_mm512_gf2p8affine_epi64_epi8((A), (B), (I)), \
108          (__v64qi)(__m512i)(S)))
109 
110 #define _mm512_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
111   _mm512_mask_gf2p8affine_epi64_epi8((__m512i)_mm512_setzero_si512(), \
112          U, A, B, I)
113 
114 static __inline__ __m512i __DEFAULT_FN_ATTRS_Z
115 _mm512_gf2p8mul_epi8(__m512i __A, __m512i __B)
116 {
117   return (__m512i) __builtin_ia32_vgf2p8mulb_v64qi((__v64qi) __A,
118               (__v64qi) __B);
119 }
120 
121 static __inline__ __m512i __DEFAULT_FN_ATTRS_Z_MASK
122 _mm512_mask_gf2p8mul_epi8(__m512i __S, __mmask64 __U, __m512i __A, __m512i __B)
123 {
124   return (__m512i) __builtin_ia32_selectb_512(__U,
125               (__v64qi) _mm512_gf2p8mul_epi8(__A, __B),
126               (__v64qi) __S);
127 }
128 
129 static __inline__ __m512i __DEFAULT_FN_ATTRS_Z_MASK
130 _mm512_maskz_gf2p8mul_epi8(__mmask64 __U, __m512i __A, __m512i __B)
131 {
132   return _mm512_mask_gf2p8mul_epi8((__m512i)_mm512_setzero_si512(),
133               __U, __A, __B);
134 }
135 #endif /* __AVX512BWINTRIN_H */
136 
137 #ifdef __AVX512VLBWINTRIN_H
138 #define _mm_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
139   ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
140          (__v16qi)_mm_gf2p8affineinv_epi64_epi8(A, B, I), \
141          (__v16qi)(__m128i)(S)))
142 
143 #define _mm_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
144   _mm_mask_gf2p8affineinv_epi64_epi8((__m128i)_mm_setzero_si128(), \
145          U, A, B, I)
146 
147 #define _mm256_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
148   ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
149          (__v32qi)_mm256_gf2p8affineinv_epi64_epi8(A, B, I), \
150          (__v32qi)(__m256i)(S)))
151 
152 #define _mm256_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
153   _mm256_mask_gf2p8affineinv_epi64_epi8((__m256i)_mm256_setzero_si256(), \
154          U, A, B, I)
155 
156 #define _mm_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
157   ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
158          (__v16qi)_mm_gf2p8affine_epi64_epi8(A, B, I), \
159          (__v16qi)(__m128i)(S)))
160 
161 #define _mm_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
162   _mm_mask_gf2p8affine_epi64_epi8((__m128i)_mm_setzero_si128(), U, A, B, I)
163 
164 #define _mm256_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
165   ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
166          (__v32qi)_mm256_gf2p8affine_epi64_epi8(A, B, I), \
167          (__v32qi)(__m256i)(S)))
168 
169 #define _mm256_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
170   _mm256_mask_gf2p8affine_epi64_epi8((__m256i)_mm256_setzero_si256(), \
171          U, A, B, I)
172 
173 static __inline__ __m128i __DEFAULT_FN_ATTRS_VL128
174 _mm_mask_gf2p8mul_epi8(__m128i __S, __mmask16 __U, __m128i __A, __m128i __B)
175 {
176   return (__m128i) __builtin_ia32_selectb_128(__U,
177               (__v16qi) _mm_gf2p8mul_epi8(__A, __B),
178               (__v16qi) __S);
179 }
180 
181 static __inline__ __m128i __DEFAULT_FN_ATTRS_VL128
182 _mm_maskz_gf2p8mul_epi8(__mmask16 __U, __m128i __A, __m128i __B)
183 {
184   return _mm_mask_gf2p8mul_epi8((__m128i)_mm_setzero_si128(),
185               __U, __A, __B);
186 }
187 
188 static __inline__ __m256i __DEFAULT_FN_ATTRS_VL256
189 _mm256_mask_gf2p8mul_epi8(__m256i __S, __mmask32 __U, __m256i __A, __m256i __B)
190 {
191   return (__m256i) __builtin_ia32_selectb_256(__U,
192               (__v32qi) _mm256_gf2p8mul_epi8(__A, __B),
193               (__v32qi) __S);
194 }
195 
196 static __inline__ __m256i __DEFAULT_FN_ATTRS_VL256
197 _mm256_maskz_gf2p8mul_epi8(__mmask32 __U, __m256i __A, __m256i __B)
198 {
199   return _mm256_mask_gf2p8mul_epi8((__m256i)_mm256_setzero_si256(),
200               __U, __A, __B);
201 }
202 #endif /* __AVX512VLBWINTRIN_H */
203 
204 #undef __DEFAULT_FN_ATTRS
205 #undef __DEFAULT_FN_ATTRS_Y
206 #undef __DEFAULT_FN_ATTRS_Z
207 #undef __DEFAULT_FN_ATTRS_VL128
208 #undef __DEFAULT_FN_ATTRS_VL256
209 
210 #endif /* __GFNIINTRIN_H */
211 
212