xref: /freebsd/sys/crypto/openssl/arm/ghash-armv4.S (revision 734e82fe33aa764367791a7d603b383996c6b40b)
1/* Do not modify. This file is auto-generated from ghash-armv4.pl. */
2#include "arm_arch.h"
3
4.text
5#if defined(__thumb2__) || defined(__clang__)
6.syntax	unified
7#define ldrplb  ldrbpl
8#define ldrneb  ldrbne
9#endif
10#if defined(__thumb2__)
11.thumb
12#else
13.code	32
14#endif
15
16.type	rem_4bit,%object
17.align	5
18rem_4bit:
19.short	0x0000,0x1C20,0x3840,0x2460
20.short	0x7080,0x6CA0,0x48C0,0x54E0
21.short	0xE100,0xFD20,0xD940,0xC560
22.short	0x9180,0x8DA0,0xA9C0,0xB5E0
23.size	rem_4bit,.-rem_4bit
24
25.type	rem_4bit_get,%function
26rem_4bit_get:
27#if defined(__thumb2__)
28	adr	r2,rem_4bit
29#else
30	sub	r2,pc,#8+32	@ &rem_4bit
31#endif
32	b	.Lrem_4bit_got
33	nop
34	nop
35.size	rem_4bit_get,.-rem_4bit_get
36
37.globl	gcm_ghash_4bit
38.type	gcm_ghash_4bit,%function
39.align	4
40gcm_ghash_4bit:
41#if defined(__thumb2__)
42	adr	r12,rem_4bit
43#else
44	sub	r12,pc,#8+48		@ &rem_4bit
45#endif
46	add	r3,r2,r3		@ r3 to point at the end
47	stmdb	sp!,{r3,r4,r5,r6,r7,r8,r9,r10,r11,lr}		@ save r3/end too
48
49	ldmia	r12,{r4,r5,r6,r7,r8,r9,r10,r11}		@ copy rem_4bit ...
50	stmdb	sp!,{r4,r5,r6,r7,r8,r9,r10,r11}		@ ... to stack
51
52	ldrb	r12,[r2,#15]
53	ldrb	r14,[r0,#15]
54.Louter:
55	eor	r12,r12,r14
56	and	r14,r12,#0xf0
57	and	r12,r12,#0x0f
58	mov	r3,#14
59
60	add	r7,r1,r12,lsl#4
61	ldmia	r7,{r4,r5,r6,r7}	@ load Htbl[nlo]
62	add	r11,r1,r14
63	ldrb	r12,[r2,#14]
64
65	and	r14,r4,#0xf		@ rem
66	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nhi]
67	add	r14,r14,r14
68	eor	r4,r8,r4,lsr#4
69	ldrh	r8,[sp,r14]		@ rem_4bit[rem]
70	eor	r4,r4,r5,lsl#28
71	ldrb	r14,[r0,#14]
72	eor	r5,r9,r5,lsr#4
73	eor	r5,r5,r6,lsl#28
74	eor	r6,r10,r6,lsr#4
75	eor	r6,r6,r7,lsl#28
76	eor	r7,r11,r7,lsr#4
77	eor	r12,r12,r14
78	and	r14,r12,#0xf0
79	and	r12,r12,#0x0f
80	eor	r7,r7,r8,lsl#16
81
82.Linner:
83	add	r11,r1,r12,lsl#4
84	and	r12,r4,#0xf		@ rem
85	subs	r3,r3,#1
86	add	r12,r12,r12
87	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nlo]
88	eor	r4,r8,r4,lsr#4
89	eor	r4,r4,r5,lsl#28
90	eor	r5,r9,r5,lsr#4
91	eor	r5,r5,r6,lsl#28
92	ldrh	r8,[sp,r12]		@ rem_4bit[rem]
93	eor	r6,r10,r6,lsr#4
94#ifdef	__thumb2__
95	it	pl
96#endif
97	ldrplb	r12,[r2,r3]
98	eor	r6,r6,r7,lsl#28
99	eor	r7,r11,r7,lsr#4
100
101	add	r11,r1,r14
102	and	r14,r4,#0xf		@ rem
103	eor	r7,r7,r8,lsl#16	@ ^= rem_4bit[rem]
104	add	r14,r14,r14
105	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nhi]
106	eor	r4,r8,r4,lsr#4
107#ifdef	__thumb2__
108	it	pl
109#endif
110	ldrplb	r8,[r0,r3]
111	eor	r4,r4,r5,lsl#28
112	eor	r5,r9,r5,lsr#4
113	ldrh	r9,[sp,r14]
114	eor	r5,r5,r6,lsl#28
115	eor	r6,r10,r6,lsr#4
116	eor	r6,r6,r7,lsl#28
117#ifdef	__thumb2__
118	it	pl
119#endif
120	eorpl	r12,r12,r8
121	eor	r7,r11,r7,lsr#4
122#ifdef	__thumb2__
123	itt	pl
124#endif
125	andpl	r14,r12,#0xf0
126	andpl	r12,r12,#0x0f
127	eor	r7,r7,r9,lsl#16	@ ^= rem_4bit[rem]
128	bpl	.Linner
129
130	ldr	r3,[sp,#32]		@ re-load r3/end
131	add	r2,r2,#16
132	mov	r14,r4
133#if __ARM_ARCH__>=7 && defined(__ARMEL__)
134	rev	r4,r4
135	str	r4,[r0,#12]
136#elif defined(__ARMEB__)
137	str	r4,[r0,#12]
138#else
139	mov	r9,r4,lsr#8
140	strb	r4,[r0,#12+3]
141	mov	r10,r4,lsr#16
142	strb	r9,[r0,#12+2]
143	mov	r11,r4,lsr#24
144	strb	r10,[r0,#12+1]
145	strb	r11,[r0,#12]
146#endif
147	cmp	r2,r3
148#if __ARM_ARCH__>=7 && defined(__ARMEL__)
149	rev	r5,r5
150	str	r5,[r0,#8]
151#elif defined(__ARMEB__)
152	str	r5,[r0,#8]
153#else
154	mov	r9,r5,lsr#8
155	strb	r5,[r0,#8+3]
156	mov	r10,r5,lsr#16
157	strb	r9,[r0,#8+2]
158	mov	r11,r5,lsr#24
159	strb	r10,[r0,#8+1]
160	strb	r11,[r0,#8]
161#endif
162
163#ifdef __thumb2__
164	it	ne
165#endif
166	ldrneb	r12,[r2,#15]
167#if __ARM_ARCH__>=7 && defined(__ARMEL__)
168	rev	r6,r6
169	str	r6,[r0,#4]
170#elif defined(__ARMEB__)
171	str	r6,[r0,#4]
172#else
173	mov	r9,r6,lsr#8
174	strb	r6,[r0,#4+3]
175	mov	r10,r6,lsr#16
176	strb	r9,[r0,#4+2]
177	mov	r11,r6,lsr#24
178	strb	r10,[r0,#4+1]
179	strb	r11,[r0,#4]
180#endif
181
182#if __ARM_ARCH__>=7 && defined(__ARMEL__)
183	rev	r7,r7
184	str	r7,[r0,#0]
185#elif defined(__ARMEB__)
186	str	r7,[r0,#0]
187#else
188	mov	r9,r7,lsr#8
189	strb	r7,[r0,#0+3]
190	mov	r10,r7,lsr#16
191	strb	r9,[r0,#0+2]
192	mov	r11,r7,lsr#24
193	strb	r10,[r0,#0+1]
194	strb	r11,[r0,#0]
195#endif
196
197	bne	.Louter
198
199	add	sp,sp,#36
200#if __ARM_ARCH__>=5
201	ldmia	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
202#else
203	ldmia	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
204	tst	lr,#1
205	moveq	pc,lr			@ be binary compatible with V4, yet
206.word	0xe12fff1e			@ interoperable with Thumb ISA:-)
207#endif
208.size	gcm_ghash_4bit,.-gcm_ghash_4bit
209
210.globl	gcm_gmult_4bit
211.type	gcm_gmult_4bit,%function
212gcm_gmult_4bit:
213	stmdb	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
214	ldrb	r12,[r0,#15]
215	b	rem_4bit_get
216.Lrem_4bit_got:
217	and	r14,r12,#0xf0
218	and	r12,r12,#0x0f
219	mov	r3,#14
220
221	add	r7,r1,r12,lsl#4
222	ldmia	r7,{r4,r5,r6,r7}	@ load Htbl[nlo]
223	ldrb	r12,[r0,#14]
224
225	add	r11,r1,r14
226	and	r14,r4,#0xf		@ rem
227	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nhi]
228	add	r14,r14,r14
229	eor	r4,r8,r4,lsr#4
230	ldrh	r8,[r2,r14]	@ rem_4bit[rem]
231	eor	r4,r4,r5,lsl#28
232	eor	r5,r9,r5,lsr#4
233	eor	r5,r5,r6,lsl#28
234	eor	r6,r10,r6,lsr#4
235	eor	r6,r6,r7,lsl#28
236	eor	r7,r11,r7,lsr#4
237	and	r14,r12,#0xf0
238	eor	r7,r7,r8,lsl#16
239	and	r12,r12,#0x0f
240
241.Loop:
242	add	r11,r1,r12,lsl#4
243	and	r12,r4,#0xf		@ rem
244	subs	r3,r3,#1
245	add	r12,r12,r12
246	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nlo]
247	eor	r4,r8,r4,lsr#4
248	eor	r4,r4,r5,lsl#28
249	eor	r5,r9,r5,lsr#4
250	eor	r5,r5,r6,lsl#28
251	ldrh	r8,[r2,r12]	@ rem_4bit[rem]
252	eor	r6,r10,r6,lsr#4
253#ifdef	__thumb2__
254	it	pl
255#endif
256	ldrplb	r12,[r0,r3]
257	eor	r6,r6,r7,lsl#28
258	eor	r7,r11,r7,lsr#4
259
260	add	r11,r1,r14
261	and	r14,r4,#0xf		@ rem
262	eor	r7,r7,r8,lsl#16	@ ^= rem_4bit[rem]
263	add	r14,r14,r14
264	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nhi]
265	eor	r4,r8,r4,lsr#4
266	eor	r4,r4,r5,lsl#28
267	eor	r5,r9,r5,lsr#4
268	ldrh	r8,[r2,r14]	@ rem_4bit[rem]
269	eor	r5,r5,r6,lsl#28
270	eor	r6,r10,r6,lsr#4
271	eor	r6,r6,r7,lsl#28
272	eor	r7,r11,r7,lsr#4
273#ifdef	__thumb2__
274	itt	pl
275#endif
276	andpl	r14,r12,#0xf0
277	andpl	r12,r12,#0x0f
278	eor	r7,r7,r8,lsl#16	@ ^= rem_4bit[rem]
279	bpl	.Loop
280#if __ARM_ARCH__>=7 && defined(__ARMEL__)
281	rev	r4,r4
282	str	r4,[r0,#12]
283#elif defined(__ARMEB__)
284	str	r4,[r0,#12]
285#else
286	mov	r9,r4,lsr#8
287	strb	r4,[r0,#12+3]
288	mov	r10,r4,lsr#16
289	strb	r9,[r0,#12+2]
290	mov	r11,r4,lsr#24
291	strb	r10,[r0,#12+1]
292	strb	r11,[r0,#12]
293#endif
294
295#if __ARM_ARCH__>=7 && defined(__ARMEL__)
296	rev	r5,r5
297	str	r5,[r0,#8]
298#elif defined(__ARMEB__)
299	str	r5,[r0,#8]
300#else
301	mov	r9,r5,lsr#8
302	strb	r5,[r0,#8+3]
303	mov	r10,r5,lsr#16
304	strb	r9,[r0,#8+2]
305	mov	r11,r5,lsr#24
306	strb	r10,[r0,#8+1]
307	strb	r11,[r0,#8]
308#endif
309
310#if __ARM_ARCH__>=7 && defined(__ARMEL__)
311	rev	r6,r6
312	str	r6,[r0,#4]
313#elif defined(__ARMEB__)
314	str	r6,[r0,#4]
315#else
316	mov	r9,r6,lsr#8
317	strb	r6,[r0,#4+3]
318	mov	r10,r6,lsr#16
319	strb	r9,[r0,#4+2]
320	mov	r11,r6,lsr#24
321	strb	r10,[r0,#4+1]
322	strb	r11,[r0,#4]
323#endif
324
325#if __ARM_ARCH__>=7 && defined(__ARMEL__)
326	rev	r7,r7
327	str	r7,[r0,#0]
328#elif defined(__ARMEB__)
329	str	r7,[r0,#0]
330#else
331	mov	r9,r7,lsr#8
332	strb	r7,[r0,#0+3]
333	mov	r10,r7,lsr#16
334	strb	r9,[r0,#0+2]
335	mov	r11,r7,lsr#24
336	strb	r10,[r0,#0+1]
337	strb	r11,[r0,#0]
338#endif
339
340#if __ARM_ARCH__>=5
341	ldmia	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
342#else
343	ldmia	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
344	tst	lr,#1
345	moveq	pc,lr			@ be binary compatible with V4, yet
346.word	0xe12fff1e			@ interoperable with Thumb ISA:-)
347#endif
348.size	gcm_gmult_4bit,.-gcm_gmult_4bit
349#if __ARM_MAX_ARCH__>=7
350.arch	armv7-a
351.fpu	neon
352
353.globl	gcm_init_neon
354.type	gcm_init_neon,%function
355.align	4
356gcm_init_neon:
357	vld1.64	d7,[r1]!		@ load H
358	vmov.i8	q8,#0xe1
359	vld1.64	d6,[r1]
360	vshl.i64	d17,#57
361	vshr.u64	d16,#63		@ t0=0xc2....01
362	vdup.8	q9,d7[7]
363	vshr.u64	d26,d6,#63
364	vshr.s8	q9,#7			@ broadcast carry bit
365	vshl.i64	q3,q3,#1
366	vand	q8,q8,q9
367	vorr	d7,d26		@ H<<<=1
368	veor	q3,q3,q8		@ twisted H
369	vstmia	r0,{q3}
370
371	bx	lr					@ bx lr
372.size	gcm_init_neon,.-gcm_init_neon
373
374.globl	gcm_gmult_neon
375.type	gcm_gmult_neon,%function
376.align	4
377gcm_gmult_neon:
378	vld1.64	d7,[r0]!		@ load Xi
379	vld1.64	d6,[r0]!
380	vmov.i64	d29,#0x0000ffffffffffff
381	vldmia	r1,{d26,d27}	@ load twisted H
382	vmov.i64	d30,#0x00000000ffffffff
383#ifdef __ARMEL__
384	vrev64.8	q3,q3
385#endif
386	vmov.i64	d31,#0x000000000000ffff
387	veor	d28,d26,d27		@ Karatsuba pre-processing
388	mov	r3,#16
389	b	.Lgmult_neon
390.size	gcm_gmult_neon,.-gcm_gmult_neon
391
392.globl	gcm_ghash_neon
393.type	gcm_ghash_neon,%function
394.align	4
395gcm_ghash_neon:
396	vld1.64	d1,[r0]!		@ load Xi
397	vld1.64	d0,[r0]!
398	vmov.i64	d29,#0x0000ffffffffffff
399	vldmia	r1,{d26,d27}	@ load twisted H
400	vmov.i64	d30,#0x00000000ffffffff
401#ifdef __ARMEL__
402	vrev64.8	q0,q0
403#endif
404	vmov.i64	d31,#0x000000000000ffff
405	veor	d28,d26,d27		@ Karatsuba pre-processing
406
407.Loop_neon:
408	vld1.64	d7,[r2]!		@ load inp
409	vld1.64	d6,[r2]!
410#ifdef __ARMEL__
411	vrev64.8	q3,q3
412#endif
413	veor	q3,q0			@ inp^=Xi
414.Lgmult_neon:
415	vext.8	d16, d26, d26, #1	@ A1
416	vmull.p8	q8, d16, d6		@ F = A1*B
417	vext.8	d0, d6, d6, #1	@ B1
418	vmull.p8	q0, d26, d0		@ E = A*B1
419	vext.8	d18, d26, d26, #2	@ A2
420	vmull.p8	q9, d18, d6		@ H = A2*B
421	vext.8	d22, d6, d6, #2	@ B2
422	vmull.p8	q11, d26, d22		@ G = A*B2
423	vext.8	d20, d26, d26, #3	@ A3
424	veor	q8, q8, q0		@ L = E + F
425	vmull.p8	q10, d20, d6		@ J = A3*B
426	vext.8	d0, d6, d6, #3	@ B3
427	veor	q9, q9, q11		@ M = G + H
428	vmull.p8	q0, d26, d0		@ I = A*B3
429	veor	d16, d16, d17	@ t0 = (L) (P0 + P1) << 8
430	vand	d17, d17, d29
431	vext.8	d22, d6, d6, #4	@ B4
432	veor	d18, d18, d19	@ t1 = (M) (P2 + P3) << 16
433	vand	d19, d19, d30
434	vmull.p8	q11, d26, d22		@ K = A*B4
435	veor	q10, q10, q0		@ N = I + J
436	veor	d16, d16, d17
437	veor	d18, d18, d19
438	veor	d20, d20, d21	@ t2 = (N) (P4 + P5) << 24
439	vand	d21, d21, d31
440	vext.8	q8, q8, q8, #15
441	veor	d22, d22, d23	@ t3 = (K) (P6 + P7) << 32
442	vmov.i64	d23, #0
443	vext.8	q9, q9, q9, #14
444	veor	d20, d20, d21
445	vmull.p8	q0, d26, d6		@ D = A*B
446	vext.8	q11, q11, q11, #12
447	vext.8	q10, q10, q10, #13
448	veor	q8, q8, q9
449	veor	q10, q10, q11
450	veor	q0, q0, q8
451	veor	q0, q0, q10
452	veor	d6,d6,d7	@ Karatsuba pre-processing
453	vext.8	d16, d28, d28, #1	@ A1
454	vmull.p8	q8, d16, d6		@ F = A1*B
455	vext.8	d2, d6, d6, #1	@ B1
456	vmull.p8	q1, d28, d2		@ E = A*B1
457	vext.8	d18, d28, d28, #2	@ A2
458	vmull.p8	q9, d18, d6		@ H = A2*B
459	vext.8	d22, d6, d6, #2	@ B2
460	vmull.p8	q11, d28, d22		@ G = A*B2
461	vext.8	d20, d28, d28, #3	@ A3
462	veor	q8, q8, q1		@ L = E + F
463	vmull.p8	q10, d20, d6		@ J = A3*B
464	vext.8	d2, d6, d6, #3	@ B3
465	veor	q9, q9, q11		@ M = G + H
466	vmull.p8	q1, d28, d2		@ I = A*B3
467	veor	d16, d16, d17	@ t0 = (L) (P0 + P1) << 8
468	vand	d17, d17, d29
469	vext.8	d22, d6, d6, #4	@ B4
470	veor	d18, d18, d19	@ t1 = (M) (P2 + P3) << 16
471	vand	d19, d19, d30
472	vmull.p8	q11, d28, d22		@ K = A*B4
473	veor	q10, q10, q1		@ N = I + J
474	veor	d16, d16, d17
475	veor	d18, d18, d19
476	veor	d20, d20, d21	@ t2 = (N) (P4 + P5) << 24
477	vand	d21, d21, d31
478	vext.8	q8, q8, q8, #15
479	veor	d22, d22, d23	@ t3 = (K) (P6 + P7) << 32
480	vmov.i64	d23, #0
481	vext.8	q9, q9, q9, #14
482	veor	d20, d20, d21
483	vmull.p8	q1, d28, d6		@ D = A*B
484	vext.8	q11, q11, q11, #12
485	vext.8	q10, q10, q10, #13
486	veor	q8, q8, q9
487	veor	q10, q10, q11
488	veor	q1, q1, q8
489	veor	q1, q1, q10
490	vext.8	d16, d27, d27, #1	@ A1
491	vmull.p8	q8, d16, d7		@ F = A1*B
492	vext.8	d4, d7, d7, #1	@ B1
493	vmull.p8	q2, d27, d4		@ E = A*B1
494	vext.8	d18, d27, d27, #2	@ A2
495	vmull.p8	q9, d18, d7		@ H = A2*B
496	vext.8	d22, d7, d7, #2	@ B2
497	vmull.p8	q11, d27, d22		@ G = A*B2
498	vext.8	d20, d27, d27, #3	@ A3
499	veor	q8, q8, q2		@ L = E + F
500	vmull.p8	q10, d20, d7		@ J = A3*B
501	vext.8	d4, d7, d7, #3	@ B3
502	veor	q9, q9, q11		@ M = G + H
503	vmull.p8	q2, d27, d4		@ I = A*B3
504	veor	d16, d16, d17	@ t0 = (L) (P0 + P1) << 8
505	vand	d17, d17, d29
506	vext.8	d22, d7, d7, #4	@ B4
507	veor	d18, d18, d19	@ t1 = (M) (P2 + P3) << 16
508	vand	d19, d19, d30
509	vmull.p8	q11, d27, d22		@ K = A*B4
510	veor	q10, q10, q2		@ N = I + J
511	veor	d16, d16, d17
512	veor	d18, d18, d19
513	veor	d20, d20, d21	@ t2 = (N) (P4 + P5) << 24
514	vand	d21, d21, d31
515	vext.8	q8, q8, q8, #15
516	veor	d22, d22, d23	@ t3 = (K) (P6 + P7) << 32
517	vmov.i64	d23, #0
518	vext.8	q9, q9, q9, #14
519	veor	d20, d20, d21
520	vmull.p8	q2, d27, d7		@ D = A*B
521	vext.8	q11, q11, q11, #12
522	vext.8	q10, q10, q10, #13
523	veor	q8, q8, q9
524	veor	q10, q10, q11
525	veor	q2, q2, q8
526	veor	q2, q2, q10
527	veor	q1,q1,q0		@ Karatsuba post-processing
528	veor	q1,q1,q2
529	veor	d1,d1,d2
530	veor	d4,d4,d3	@ Xh|Xl - 256-bit result
531
532	@ equivalent of reduction_avx from ghash-x86_64.pl
533	vshl.i64	q9,q0,#57		@ 1st phase
534	vshl.i64	q10,q0,#62
535	veor	q10,q10,q9		@
536	vshl.i64	q9,q0,#63
537	veor	q10, q10, q9		@
538	veor	d1,d1,d20	@
539	veor	d4,d4,d21
540
541	vshr.u64	q10,q0,#1		@ 2nd phase
542	veor	q2,q2,q0
543	veor	q0,q0,q10		@
544	vshr.u64	q10,q10,#6
545	vshr.u64	q0,q0,#1		@
546	veor	q0,q0,q2		@
547	veor	q0,q0,q10		@
548
549	subs	r3,#16
550	bne	.Loop_neon
551
552#ifdef __ARMEL__
553	vrev64.8	q0,q0
554#endif
555	sub	r0,#16
556	vst1.64	d1,[r0]!		@ write out Xi
557	vst1.64	d0,[r0]
558
559	bx	lr					@ bx lr
560.size	gcm_ghash_neon,.-gcm_ghash_neon
561#endif
562.byte	71,72,65,83,72,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
563.align	2
564.align	2
565