Lines Matching +full:11 +full:w
149 ## compute W[-16] + W[-7] 4 at a time
152 ror $(25-11), y0 # y0 = e >> (25-11)
154 palignr $4, X2, XTMP0 # XTMP0 = W[-7]
156 xor e, y0 # y0 = e ^ (e >> (25-11))
158 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
162 paddd X0, XTMP0 # XTMP0 = W[-7] + W[-16]
163 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
167 palignr $4, X0, XTMP1 # XTMP1 = W[-15]
169 ror $6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
171 movdqa XTMP1, XTMP2 # XTMP2 = W[-15]
174 add _XFER(%rsp) , y2 # y2 = k + w + S1 + CH
175 movdqa XTMP1, XTMP3 # XTMP3 = W[-15]
177 add y2, h # h = h + S1 + CH + k + w
181 add h, d # d = d + h + S1 + CH + k + w
185 add y1, h # h = h + S1 + CH + k + w + S0
186 por XTMP2, XTMP1 # XTMP1 = W[-15] ror 7
188 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
191 movdqa XTMP3, XTMP2 # XTMP2 = W[-15]
194 movdqa XTMP3, XTMP4 # XTMP4 = W[-15]
195 ror $(25-11), y0 # y0 = e >> (25-11)
196 xor e, y0 # y0 = e ^ (e >> (25-11))
201 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
205 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
207 ror $6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
211 psrld $3, XTMP4 # XTMP4 = W[-15] >> 3
213 add (1*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
215 pxor XTMP2, XTMP1 # XTMP1 = W[-15] ror 7 ^ W[-15] ror 18
217 add y2, h # h = h + S1 + CH + k + w
221 add h, d # d = d + h + S1 + CH + k + w
224 pshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA}
226 add y1, h # h = h + S1 + CH + k + w + S0
227 paddd XTMP1, XTMP0 # XTMP0 = W[-16] + W[-7] + s0
229 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
232 movdqa XTMP2, XTMP3 # XTMP3 = W[-2] {BBAA}
235 ror $(25-11), y0 # y0 = e >> (25-11)
236 movdqa XTMP2, XTMP4 # XTMP4 = W[-2] {BBAA}
237 xor e, y0 # y0 = e ^ (e >> (25-11))
241 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
242 psrlq $17, XTMP2 # XTMP2 = W[-2] ror 17 {xBxA}
244 psrlq $19, XTMP3 # XTMP3 = W[-2] ror 19 {xBxA}
245 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
247 psrld $10, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA}
251 ror $6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
255 add (2*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
258 add y2, h # h = h + S1 + CH + k + w
262 add h, d # d = d + h + S1 + CH + k + w
264 paddd XTMP4, XTMP0 # XTMP0 = {..., ..., W[1], W[0]}
266 add y1, h # h = h + S1 + CH + k + w + S0
268 pshufd $0b01010000, XTMP0, XTMP2 # XTMP2 = W[-2] {BBAA}
270 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
273 movdqa XTMP2, XTMP3 # XTMP3 = W[-2] {DDCC}
275 ror $(25-11), y0 # y0 = e >> (25-11)
277 movdqa XTMP2, X0 # X0 = W[-2] {DDCC}
279 xor e, y0 # y0 = e ^ (e >> (25-11))
281 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
282 psrlq $17, XTMP2 # XTMP2 = W[-2] ror 17 {xDxC}
285 psrlq $19, XTMP3 # XTMP3 = W[-2] ror 19 {xDxC}
286 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25
289 psrld $10, X0 # X0 = W[-2] >> 10 {DDCC}
291 ror $6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>2
296 add (3*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH
299 add y2, h # h = h + S1 + CH + k + w
303 add h, d # d = d + h + S1 + CH + k + w
305 paddd XTMP0, X0 # X0 = {W[3], W[2], W[1], W[0]}
307 add y1, h # h = h + S1 + CH + k + w + S0
309 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ
318 ror $(25-11), y0 # y0 = e >> (25-11)
320 xor e, y0 # y0 = e ^ (e >> (25-11))
324 ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6))
326 xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
330 ror $6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
335 add offset(%rsp), y2 # y2 = k + w + S1 + CH
337 add y2, h # h = h + S1 + CH + k + w
340 add h, d # d = d + h + S1 + CH + k + w
343 add y1, h # h = h + S1 + CH + k + w + S0
345 add y0, h # h = h + S1 + CH + k + w + S0 + MAJ