Lines Matching +full:2 +full:w
13 # General Public License (GPL) Version 2, available from the file
98 INP = %rsi # 2nd arg
155 ## compute W[-16] + W[-7] 4 at a time
160 vpalignr $4, X2, X3, XTMP0 # XTMP0 = W[-7]
167 vpaddd X0, XTMP0, XTMP0 # XTMP0 = W[-7] + W[-16]
170 MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
172 vpalignr $4, X0, X1, XTMP1 # XTMP1 = W[-15]
173 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
176 MY_ROR 2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
178 add _XFER(%rsp), y2 # y2 = k + w + S1 + CH
180 add y2, h # h = h + S1 + CH + k + w
184 add h, d # d = d + h + S1 + CH + k + w
188 add y1, h # h = h + S1 + CH + k + w + S0
189 vpor XTMP2, XTMP3, XTMP3 # XTMP1 = W[-15] MY_ROR 7
191 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
203 vpsrld $3, XTMP1, XTMP4 # XTMP4 = W[-15] >> 3
204 MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
209 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
213 add (1*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
214 MY_ROR 2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
215 vpxor XTMP2, XTMP3, XTMP3 # XTMP1 = W[-15] MY_ROR 7 ^ W[-15] MY_ROR
217 add y2, h # h = h + S1 + CH + k + w
221 add h, d # d = d + h + S1 + CH + k + w
224 vpshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA}
226 add y1, h # h = h + S1 + CH + k + w + S0
227 vpaddd XTMP1, XTMP0, XTMP0 # XTMP0 = W[-16] + W[-7] + s0
229 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
239 vpsrld $10, XTMP2, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA}
241 vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] MY_ROR 19 {xBxA}
244 vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] MY_ROR 17 {xBxA}
245 MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
246 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
251 MY_ROR 2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
252 add (2*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
255 add y2, h # h = h + S1 + CH + k + w
259 add h, d # d = d + h + S1 + CH + k + w
261 vpaddd XTMP4, XTMP0, XTMP0 # XTMP0 = {..., ..., W[1], W[0]}
263 add y1, h # h = h + S1 + CH + k + w + S0
265 vpshufd $0b01010000, XTMP0, XTMP2 # XTMP2 = W[-2] {DDCC}
267 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
276 vpsrld $10, XTMP2, XTMP5 # XTMP5 = W[-2] >> 10 {DDCC}
279 vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] MY_ROR 19 {xDxC}
282 MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
283 vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] MY_ROR 17 {xDxC}
284 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
288 MY_ROR 2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
290 add (3*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
293 add y2, h # h = h + S1 + CH + k + w
297 add h, d # d = d + h + S1 + CH + k + w
299 vpaddd XTMP0, XTMP5, X0 # X0 = {W[3], W[2], W[1], W[0]}
301 add y1, h # h = h + S1 + CH + k + w + S0
303 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
320 MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2))
322 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
326 MY_ROR 2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
328 add offset(%rsp), y2 # y2 = k + w + S1 + CH
330 add y2, h # h = h + S1 + CH + k + w
333 add h, d # d = d + h + S1 + CH + k + w
336 add y1, h # h = h + S1 + CH + k + w + S0
338 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
366 mov 4*2(CTX), c
382 COPY_XMM_AND_BSWAP X2, 2*16(INP), BYTE_FLIP_MASK
399 vpaddd 2*16(TBL), X0, XFER
411 mov $2, SRND
417 DO_ROUND 2
422 add $2*16, TBL
425 DO_ROUND 2
436 addm (4*2)(CTX),c