xref: /linux/arch/arm/lib/csumpartial.S (revision 3932b9ca55b0be314a36d3e84faff3e823c081f5)
1/*
2 *  linux/arch/arm/lib/csumpartial.S
3 *
4 *  Copyright (C) 1995-1998 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/linkage.h>
11#include <asm/assembler.h>
12
13		.text
14
15/*
16 * Function: __u32 csum_partial(const char *src, int len, __u32 sum)
17 * Params  : r0 = buffer, r1 = len, r2 = checksum
18 * Returns : r0 = new checksum
19 */
20
21buf	.req	r0
22len	.req	r1
23sum	.req	r2
24td0	.req	r3
25td1	.req	r4	@ save before use
26td2	.req	r5	@ save before use
27td3	.req	lr
28
29.Lzero:		mov	r0, sum
30		add	sp, sp, #4
31		ldr	pc, [sp], #4
32
33		/*
34		 * Handle 0 to 7 bytes, with any alignment of source and
35		 * destination pointers.  Note that when we get here, C = 0
36		 */
37.Lless8:		teq	len, #0			@ check for zero count
38		beq	.Lzero
39
40		/* we must have at least one byte. */
41		tst	buf, #1			@ odd address?
42		movne	sum, sum, ror #8
43		ldrneb	td0, [buf], #1
44		subne	len, len, #1
45		adcnes	sum, sum, td0, put_byte_1
46
47.Lless4:		tst	len, #6
48		beq	.Lless8_byte
49
50		/* we are now half-word aligned */
51
52.Lless8_wordlp:
53#if __LINUX_ARM_ARCH__ >= 4
54		ldrh	td0, [buf], #2
55		sub	len, len, #2
56#else
57		ldrb	td0, [buf], #1
58		ldrb	td3, [buf], #1
59		sub	len, len, #2
60#ifndef __ARMEB__
61		orr	td0, td0, td3, lsl #8
62#else
63		orr	td0, td3, td0, lsl #8
64#endif
65#endif
66		adcs	sum, sum, td0
67		tst	len, #6
68		bne	.Lless8_wordlp
69
70.Lless8_byte:	tst	len, #1			@ odd number of bytes
71		ldrneb	td0, [buf], #1		@ include last byte
72		adcnes	sum, sum, td0, put_byte_0	@ update checksum
73
74.Ldone:		adc	r0, sum, #0		@ collect up the last carry
75		ldr	td0, [sp], #4
76		tst	td0, #1			@ check buffer alignment
77		movne	r0, r0, ror #8		@ rotate checksum by 8 bits
78		ldr	pc, [sp], #4		@ return
79
80.Lnot_aligned:	tst	buf, #1			@ odd address
81		ldrneb	td0, [buf], #1		@ make even
82		subne	len, len, #1
83		adcnes	sum, sum, td0, put_byte_1	@ update checksum
84
85		tst	buf, #2			@ 32-bit aligned?
86#if __LINUX_ARM_ARCH__ >= 4
87		ldrneh	td0, [buf], #2		@ make 32-bit aligned
88		subne	len, len, #2
89#else
90		ldrneb	td0, [buf], #1
91		ldrneb	ip, [buf], #1
92		subne	len, len, #2
93#ifndef __ARMEB__
94		orrne	td0, td0, ip, lsl #8
95#else
96		orrne	td0, ip, td0, lsl #8
97#endif
98#endif
99		adcnes	sum, sum, td0		@ update checksum
100		ret	lr
101
102ENTRY(csum_partial)
103		stmfd	sp!, {buf, lr}
104		cmp	len, #8			@ Ensure that we have at least
105		blo	.Lless8			@ 8 bytes to copy.
106
107		tst	buf, #1
108		movne	sum, sum, ror #8
109
110		adds	sum, sum, #0		@ C = 0
111		tst	buf, #3			@ Test destination alignment
112		blne	.Lnot_aligned		@ align destination, return here
113
1141:		bics	ip, len, #31
115		beq	3f
116
117		stmfd	sp!, {r4 - r5}
1182:		ldmia	buf!, {td0, td1, td2, td3}
119		adcs	sum, sum, td0
120		adcs	sum, sum, td1
121		adcs	sum, sum, td2
122		adcs	sum, sum, td3
123		ldmia	buf!, {td0, td1, td2, td3}
124		adcs	sum, sum, td0
125		adcs	sum, sum, td1
126		adcs	sum, sum, td2
127		adcs	sum, sum, td3
128		sub	ip, ip, #32
129		teq	ip, #0
130		bne	2b
131		ldmfd	sp!, {r4 - r5}
132
1333:		tst	len, #0x1c		@ should not change C
134		beq	.Lless4
135
1364:		ldr	td0, [buf], #4
137		sub	len, len, #4
138		adcs	sum, sum, td0
139		tst	len, #0x1c
140		bne	4b
141		b	.Lless4
142ENDPROC(csum_partial)
143