xref: /linux/arch/x86/crypto/crc32-pclmul_asm.S (revision c4ee0af3fa0dc65f690fc908f02b8355f9576ea0)
1/* GPL HEADER START
2 *
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12 * General Public License version 2 for more details (a copy is included
13 * in the LICENSE file that accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License
16 * version 2 along with this program; If not, see http://www.gnu.org/licenses
17 *
18 * Please  visit http://www.xyratex.com/contact if you need additional
19 * information or have any questions.
20 *
21 * GPL HEADER END
22 */
23
24/*
25 * Copyright 2012 Xyratex Technology Limited
26 *
27 * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32
28 * calculation.
29 * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE)
30 * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found
31 * at:
32 * http://www.intel.com/products/processor/manuals/
33 * Intel(R) 64 and IA-32 Architectures Software Developer's Manual
34 * Volume 2B: Instruction Set Reference, N-Z
35 *
36 * Authors:   Gregory Prestas <Gregory_Prestas@us.xyratex.com>
37 *	      Alexander Boyko <Alexander_Boyko@xyratex.com>
38 */
39
40#include <linux/linkage.h>
41#include <asm/inst.h>
42
43
44.align 16
45/*
46 * [x4*128+32 mod P(x) << 32)]'  << 1   = 0x154442bd4
47 * #define CONSTANT_R1  0x154442bd4LL
48 *
49 * [(x4*128-32 mod P(x) << 32)]' << 1   = 0x1c6e41596
50 * #define CONSTANT_R2  0x1c6e41596LL
51 */
52.Lconstant_R2R1:
53	.octa 0x00000001c6e415960000000154442bd4
54/*
55 * [(x128+32 mod P(x) << 32)]'   << 1   = 0x1751997d0
56 * #define CONSTANT_R3  0x1751997d0LL
57 *
58 * [(x128-32 mod P(x) << 32)]'   << 1   = 0x0ccaa009e
59 * #define CONSTANT_R4  0x0ccaa009eLL
60 */
61.Lconstant_R4R3:
62	.octa 0x00000000ccaa009e00000001751997d0
63/*
64 * [(x64 mod P(x) << 32)]'       << 1   = 0x163cd6124
65 * #define CONSTANT_R5  0x163cd6124LL
66 */
67.Lconstant_R5:
68	.octa 0x00000000000000000000000163cd6124
69.Lconstant_mask32:
70	.octa 0x000000000000000000000000FFFFFFFF
71/*
72 * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL
73 *
74 * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))` = 0x1F7011641LL
75 * #define CONSTANT_RU  0x1F7011641LL
76 */
77.Lconstant_RUpoly:
78	.octa 0x00000001F701164100000001DB710641
79
80#define CONSTANT %xmm0
81
82#ifdef __x86_64__
83#define BUF     %rdi
84#define LEN     %rsi
85#define CRC     %edx
86#else
87#define BUF     %eax
88#define LEN     %edx
89#define CRC     %ecx
90#endif
91
92
93
94.text
95/**
96 *      Calculate crc32
97 *      BUF - buffer (16 bytes aligned)
98 *      LEN - sizeof buffer (16 bytes aligned), LEN should be grater than 63
99 *      CRC - initial crc32
100 *      return %eax crc32
101 *      uint crc32_pclmul_le_16(unsigned char const *buffer,
102 *	                     size_t len, uint crc32)
103 */
104
105ENTRY(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */
106	movdqa  (BUF), %xmm1
107	movdqa  0x10(BUF), %xmm2
108	movdqa  0x20(BUF), %xmm3
109	movdqa  0x30(BUF), %xmm4
110	movd    CRC, CONSTANT
111	pxor    CONSTANT, %xmm1
112	sub     $0x40, LEN
113	add     $0x40, BUF
114#ifndef __x86_64__
115	/* This is for position independent code(-fPIC) support for 32bit */
116	call    delta
117delta:
118	pop     %ecx
119#endif
120	cmp     $0x40, LEN
121	jb      less_64
122
123#ifdef __x86_64__
124	movdqa .Lconstant_R2R1(%rip), CONSTANT
125#else
126	movdqa .Lconstant_R2R1 - delta(%ecx), CONSTANT
127#endif
128
129loop_64:/*  64 bytes Full cache line folding */
130	prefetchnta    0x40(BUF)
131	movdqa  %xmm1, %xmm5
132	movdqa  %xmm2, %xmm6
133	movdqa  %xmm3, %xmm7
134#ifdef __x86_64__
135	movdqa  %xmm4, %xmm8
136#endif
137	PCLMULQDQ 00, CONSTANT, %xmm1
138	PCLMULQDQ 00, CONSTANT, %xmm2
139	PCLMULQDQ 00, CONSTANT, %xmm3
140#ifdef __x86_64__
141	PCLMULQDQ 00, CONSTANT, %xmm4
142#endif
143	PCLMULQDQ 0x11, CONSTANT, %xmm5
144	PCLMULQDQ 0x11, CONSTANT, %xmm6
145	PCLMULQDQ 0x11, CONSTANT, %xmm7
146#ifdef __x86_64__
147	PCLMULQDQ 0x11, CONSTANT, %xmm8
148#endif
149	pxor    %xmm5, %xmm1
150	pxor    %xmm6, %xmm2
151	pxor    %xmm7, %xmm3
152#ifdef __x86_64__
153	pxor    %xmm8, %xmm4
154#else
155	/* xmm8 unsupported for x32 */
156	movdqa  %xmm4, %xmm5
157	PCLMULQDQ 00, CONSTANT, %xmm4
158	PCLMULQDQ 0x11, CONSTANT, %xmm5
159	pxor    %xmm5, %xmm4
160#endif
161
162	pxor    (BUF), %xmm1
163	pxor    0x10(BUF), %xmm2
164	pxor    0x20(BUF), %xmm3
165	pxor    0x30(BUF), %xmm4
166
167	sub     $0x40, LEN
168	add     $0x40, BUF
169	cmp     $0x40, LEN
170	jge     loop_64
171less_64:/*  Folding cache line into 128bit */
172#ifdef __x86_64__
173	movdqa  .Lconstant_R4R3(%rip), CONSTANT
174#else
175	movdqa  .Lconstant_R4R3 - delta(%ecx), CONSTANT
176#endif
177	prefetchnta     (BUF)
178
179	movdqa  %xmm1, %xmm5
180	PCLMULQDQ 0x00, CONSTANT, %xmm1
181	PCLMULQDQ 0x11, CONSTANT, %xmm5
182	pxor    %xmm5, %xmm1
183	pxor    %xmm2, %xmm1
184
185	movdqa  %xmm1, %xmm5
186	PCLMULQDQ 0x00, CONSTANT, %xmm1
187	PCLMULQDQ 0x11, CONSTANT, %xmm5
188	pxor    %xmm5, %xmm1
189	pxor    %xmm3, %xmm1
190
191	movdqa  %xmm1, %xmm5
192	PCLMULQDQ 0x00, CONSTANT, %xmm1
193	PCLMULQDQ 0x11, CONSTANT, %xmm5
194	pxor    %xmm5, %xmm1
195	pxor    %xmm4, %xmm1
196
197	cmp     $0x10, LEN
198	jb      fold_64
199loop_16:/* Folding rest buffer into 128bit */
200	movdqa  %xmm1, %xmm5
201	PCLMULQDQ 0x00, CONSTANT, %xmm1
202	PCLMULQDQ 0x11, CONSTANT, %xmm5
203	pxor    %xmm5, %xmm1
204	pxor    (BUF), %xmm1
205	sub     $0x10, LEN
206	add     $0x10, BUF
207	cmp     $0x10, LEN
208	jge     loop_16
209
210fold_64:
211	/* perform the last 64 bit fold, also adds 32 zeroes
212	 * to the input stream */
213	PCLMULQDQ 0x01, %xmm1, CONSTANT /* R4 * xmm1.low */
214	psrldq  $0x08, %xmm1
215	pxor    CONSTANT, %xmm1
216
217	/* final 32-bit fold */
218	movdqa  %xmm1, %xmm2
219#ifdef __x86_64__
220	movdqa  .Lconstant_R5(%rip), CONSTANT
221	movdqa  .Lconstant_mask32(%rip), %xmm3
222#else
223	movdqa  .Lconstant_R5 - delta(%ecx), CONSTANT
224	movdqa  .Lconstant_mask32 - delta(%ecx), %xmm3
225#endif
226	psrldq  $0x04, %xmm2
227	pand    %xmm3, %xmm1
228	PCLMULQDQ 0x00, CONSTANT, %xmm1
229	pxor    %xmm2, %xmm1
230
231	/* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */
232#ifdef __x86_64__
233	movdqa  .Lconstant_RUpoly(%rip), CONSTANT
234#else
235	movdqa  .Lconstant_RUpoly - delta(%ecx), CONSTANT
236#endif
237	movdqa  %xmm1, %xmm2
238	pand    %xmm3, %xmm1
239	PCLMULQDQ 0x10, CONSTANT, %xmm1
240	pand    %xmm3, %xmm1
241	PCLMULQDQ 0x00, CONSTANT, %xmm1
242	pxor    %xmm2, %xmm1
243	PEXTRD  0x01, %xmm1, %eax
244
245	ret
246ENDPROC(crc32_pclmul_le_16)
247