Lines Matching +full:2 +full:w

13 # General Public License (GPL) Version 2, available from the file
48 # This code schedules 2 blocks at a time, with 4 lanes per block
93 INP = %rsi # 2nd arg
115 _XFER_SIZE = 2*64*4 # 2 blocks, 64 rounds, 4 bytes/round
160 addl \disp(%rsp, SRND), h # h = k + w + h # --
162 vpalignr $4, X2, X3, XTMP0 # XTMP0 = W[-7]
168 vpaddd X0, XTMP0, XTMP0 # XTMP0 = W[-7] + W[-16]# y1 = (e >> 6)# S1
174 add h, d # d = k + w + h + d # --
177 vpalignr $4, X0, X1, XTMP1 # XTMP1 = W[-15]
179 rorx $2, a, T1 # T1 = (a >> 2) # S0
183 xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0
190 add y1, h # h = k + w + h + S0 # --
192 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
193 vpor XTMP2, XTMP3, XTMP3 # XTMP3 = W[-15] ror 7
196 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
208 addl offset(%rsp, SRND), h # h = k + w + h # --
212 vpsrld $3, XTMP1, XTMP4 # XTMP4 = W[-15] >> 3
223 add h, d # d = k + w + h + d # --
230 rorx $2, a, T1 # T1 = (a >> 2) # S0
233 vpxor XTMP2, XTMP3, XTMP3 # XTMP3 = W[-15] ror 7 ^ W[-15] ror 18
234 xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0
240 vpshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA}
242 add y1, h # h = k + w + h + S0 # --
244 vpaddd XTMP1, XTMP0, XTMP0 # XTMP0 = W[-16] + W[-7] + s0
245 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
246 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
249 vpsrld $10, XTMP2, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA}
254 ################################### RND N + 2 ############################
258 offset = \disp + 2*4
259 addl offset(%rsp, SRND), h # h = k + w + h # --
261 vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] ror 19 {xBxA}
269 vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] ror 17 {xBxA}
274 add h, d # d = k + w + h + d # --
284 rorx $2, a ,T1 # T1 = (a >> 2) # S0
285 vpaddd XTMP4, XTMP0, XTMP0 # XTMP0 = {..., ..., W[1], W[0]}
287 xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0
291 vpshufd $0b01010000, XTMP0, XTMP2 # XTMP2 = W[-2] {DDCC}
294 add y1,h # h = k + w + h + S0 # --
295 add y2,d # d = k + w + h + d + S1 + CH = d + t1 # --
296 add y2,h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
309 addl offset(%rsp, SRND), h # h = k + w + h # --
313 vpsrld $10, XTMP2, XTMP5 # XTMP5 = W[-2] >> 10 {DDCC}
320 vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] ror 19 {xDxC}
323 add h, d # d = k + w + h + d # --
326 vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] ror 17 {xDxC}
336 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
338 rorx $2, a, T1 # T1 = (a >> 2) # S0
341 vpaddd XTMP0, XTMP5, X0 # X0 = {W[3], W[2], W[1], W[0]}
342 xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0
347 add y1, h # h = k + w + h + S0 # --
348 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
374 rorx $2, a, T1 # T1 = (a >> 2) # S0
375 addl \disp(%rsp, SRND), h # h = k + w + h # --
378 xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0
385 add h, d # d = k + w + h + d # --
387 add y1, h # h = k + w + h + S0 # --
388 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
394 add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
412 rorx $2, a, T1 # T1 = (a >> 2) # S0
414 addl offset(%rsp, SRND), h # h = k + w + h # --
417 xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0
424 add h, d # d = k + w + h + d # --
426 add y1, h # h = k + w + h + S0 # --
428 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
432 ################################### RND N + 2 ##############################
434 add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
452 rorx $2, a, T1 # T1 = (a >> 2) # S0
453 offset = 4*2 + \disp
454 addl offset(%rsp, SRND), h # h = k + w + h # --
457 xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0
464 add h, d # d = k + w + h + d # --
466 add y1, h # h = k + w + h + S0 # --
468 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
474 add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
492 rorx $2, a, T1 # T1 = (a >> 2) # S0
494 addl offset(%rsp, SRND), h # h = k + w + h # --
497 xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0
504 add h, d # d = k + w + h + d # --
506 add y1, h # h = k + w + h + S0 # --
508 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
511 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
547 mov 4*2(CTX), c
564 VMOVDQ 2*32(INP),XTMP2
598 leaq K256+2*32(%rip), INP
600 vmovdqa XFER, 2*32+_XFER(%rsp, SRND)
601 FOUR_ROUNDS_AND_SCHED (_XFER + 2*32)
623 add $2*32, SRND
636 addm (4*2)(CTX),c
652 add $2*32, SRND
662 addm (4*2)(CTX),c
676 VMOVDQ 2*16(INP),XWORD2
691 mov (4*2)(CTX),c