Lines Matching +full:c +full:- +full:22
2 # Implement fast SHA-256 with SSSE3 instructions. (x86_64)
4 # Copyright (C) 2013 Intel Corporation.
9 # Tim Chen <tim.c.chen@linux.intel.com>
21 # - Redistributions of source code must retain the above
25 # - Redistributions in binary form must reproduce the above
41 # This code is described in an Intel White-Paper:
42 # "Fast SHA-256 Implementations on Intel Architecture Processors"
57 # Add reg to mem using reg-mem add and store
86 SHUF_00BA = %xmm10 # shuffle xBxA -> 00BA
87 SHUF_DC00 = %xmm11 # shuffle xDxC -> DC00
95 c = %ecx define
141 d = c
142 c = b define
149 ## compute W[-16] + W[-7] 4 at a time
152 ror $(25-11), y0 # y0 = e >> (25-11)
154 palignr $4, X2, XTMP0 # XTMP0 = W[-7]
155 ror $(22-13), y1 # y1 = a >> (22-13)
156 xor e, y0 # y0 = e ^ (e >> (25-11))
158 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
160 xor a, y1 # y1 = a ^ (a >> (22-13)
162 paddd X0, XTMP0 # XTMP0 = W[-7] + W[-16]
163 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
165 ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
167 palignr $4, X0, XTMP1 # XTMP1 = W[-15]
168 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
171 movdqa XTMP1, XTMP2 # XTMP2 = W[-15]
172 ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
175 movdqa XTMP1, XTMP3 # XTMP3 = W[-15]
179 pslld $(32-7), XTMP1 #
180 or c, y0 # y0 = a|c
182 and c, y2 # y2 = a&c
184 and b, y0 # y0 = (a|c)&b
186 por XTMP2, XTMP1 # XTMP1 = W[-15] ror 7
187 or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
191 movdqa XTMP3, XTMP2 # XTMP2 = W[-15]
194 movdqa XTMP3, XTMP4 # XTMP4 = W[-15]
195 ror $(25-11), y0 # y0 = e >> (25-11)
196 xor e, y0 # y0 = e ^ (e >> (25-11))
198 ror $(22-13), y1 # y1 = a >> (22-13)
199 pslld $(32-18), XTMP3 #
200 xor a, y1 # y1 = a ^ (a >> (22-13)
201 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
204 ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
205 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
209 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
211 psrld $3, XTMP4 # XTMP4 = W[-15] >> 3
214 ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
215 pxor XTMP2, XTMP1 # XTMP1 = W[-15] ror 7 ^ W[-15] ror 18
220 or c, y0 # y0 = a|c
222 and c, y2 # y2 = a&c
224 pshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA}
225 and b, y0 # y0 = (a|c)&b
227 paddd XTMP1, XTMP0 # XTMP0 = W[-16] + W[-7] + s0
228 or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
232 movdqa XTMP2, XTMP3 # XTMP3 = W[-2] {BBAA}
235 ror $(25-11), y0 # y0 = e >> (25-11)
236 movdqa XTMP2, XTMP4 # XTMP4 = W[-2] {BBAA}
237 xor e, y0 # y0 = e ^ (e >> (25-11))
238 ror $(22-13), y1 # y1 = a >> (22-13)
240 xor a, y1 # y1 = a ^ (a >> (22-13)
241 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
242 psrlq $17, XTMP2 # XTMP2 = W[-2] ror 17 {xBxA}
244 psrlq $19, XTMP3 # XTMP3 = W[-2] ror 19 {xBxA}
245 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
247 psrld $10, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA}
248 ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
249 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
254 ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
261 or c, y0 # y0 = a|c
263 and c, y2 # y2 = a&c
265 and b, y0 # y0 = (a|c)&b
268 pshufd $0b01010000, XTMP0, XTMP2 # XTMP2 = W[-2] {BBAA}
269 or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
273 movdqa XTMP2, XTMP3 # XTMP3 = W[-2] {DDCC}
275 ror $(25-11), y0 # y0 = e >> (25-11)
277 movdqa XTMP2, X0 # X0 = W[-2] {DDCC}
278 ror $(22-13), y1 # y1 = a >> (22-13)
279 xor e, y0 # y0 = e ^ (e >> (25-11))
281 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
282 psrlq $17, XTMP2 # XTMP2 = W[-2] ror 17 {xDxC}
283 xor a, y1 # y1 = a ^ (a >> (22-13)
285 psrlq $19, XTMP3 # XTMP3 = W[-2] ror 19 {xDxC}
286 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25
288 ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
289 psrld $10, X0 # X0 = W[-2] >> 10 {DDCC}
290 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22
302 or c, y0 # y0 = a|c
304 and c, y2 # y2 = a&c
306 and b, y0 # y0 = (a|c)&b
308 or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
318 ror $(25-11), y0 # y0 = e >> (25-11)
320 xor e, y0 # y0 = e ^ (e >> (25-11))
321 ror $(22-13), y1 # y1 = a >> (22-13)
323 xor a, y1 # y1 = a ^ (a >> (22-13)
324 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
326 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
327 ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
329 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
333 ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
339 or c, y0 # y0 = a|c
341 and c, y2 # y2 = a&c
342 and b, y0 # y0 = (a|c)&b
344 or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c)
373 mov 4*2(CTX), c
447 addm (4*2)(CTX),c
497 # shuffle xBxA -> 00BA
503 # shuffle xDxC -> DC00