xref: /linux/arch/x86/lib/atomic64_386_32.S (revision 5d4a2e29fba5b2bef95b96a46b338ec4d76fa4fd)
1/*
2 * atomic64_t for 386/486
3 *
4 * Copyright © 2010  Luca Barbieri
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/linkage.h>
13#include <asm/alternative-asm.h>
14#include <asm/dwarf2.h>
15
16/* if you want SMP support, implement these with real spinlocks */
17.macro LOCK reg
18	pushfl
19	CFI_ADJUST_CFA_OFFSET 4
20	cli
21.endm
22
23.macro UNLOCK reg
24	popfl
25	CFI_ADJUST_CFA_OFFSET -4
26.endm
27
28.macro BEGIN func reg
29$v = \reg
30
31ENTRY(atomic64_\func\()_386)
32	CFI_STARTPROC
33	LOCK $v
34
35.macro RETURN
36	UNLOCK $v
37	ret
38.endm
39
40.macro END_
41	CFI_ENDPROC
42ENDPROC(atomic64_\func\()_386)
43.purgem RETURN
44.purgem END_
45.purgem END
46.endm
47
48.macro END
49RETURN
50END_
51.endm
52.endm
53
54BEGIN read %ecx
55	movl  ($v), %eax
56	movl 4($v), %edx
57END
58
59BEGIN set %esi
60	movl %ebx,  ($v)
61	movl %ecx, 4($v)
62END
63
64BEGIN xchg %esi
65	movl  ($v), %eax
66	movl 4($v), %edx
67	movl %ebx,  ($v)
68	movl %ecx, 4($v)
69END
70
71BEGIN add %ecx
72	addl %eax,  ($v)
73	adcl %edx, 4($v)
74END
75
76BEGIN add_return %ecx
77	addl  ($v), %eax
78	adcl 4($v), %edx
79	movl %eax,  ($v)
80	movl %edx, 4($v)
81END
82
83BEGIN sub %ecx
84	subl %eax,  ($v)
85	sbbl %edx, 4($v)
86END
87
88BEGIN sub_return %ecx
89	negl %edx
90	negl %eax
91	sbbl $0, %edx
92	addl  ($v), %eax
93	adcl 4($v), %edx
94	movl %eax,  ($v)
95	movl %edx, 4($v)
96END
97
98BEGIN inc %esi
99	addl $1,  ($v)
100	adcl $0, 4($v)
101END
102
103BEGIN inc_return %esi
104	movl  ($v), %eax
105	movl 4($v), %edx
106	addl $1, %eax
107	adcl $0, %edx
108	movl %eax,  ($v)
109	movl %edx, 4($v)
110END
111
112BEGIN dec %esi
113	subl $1,  ($v)
114	sbbl $0, 4($v)
115END
116
117BEGIN dec_return %esi
118	movl  ($v), %eax
119	movl 4($v), %edx
120	subl $1, %eax
121	sbbl $0, %edx
122	movl %eax,  ($v)
123	movl %edx, 4($v)
124END
125
126BEGIN add_unless %ecx
127	addl %eax, %esi
128	adcl %edx, %edi
129	addl  ($v), %eax
130	adcl 4($v), %edx
131	cmpl %eax, %esi
132	je 3f
1331:
134	movl %eax,  ($v)
135	movl %edx, 4($v)
136	movl $1, %eax
1372:
138RETURN
1393:
140	cmpl %edx, %edi
141	jne 1b
142	xorl %eax, %eax
143	jmp 2b
144END_
145
146BEGIN inc_not_zero %esi
147	movl  ($v), %eax
148	movl 4($v), %edx
149	testl %eax, %eax
150	je 3f
1511:
152	addl $1, %eax
153	adcl $0, %edx
154	movl %eax,  ($v)
155	movl %edx, 4($v)
156	movl $1, %eax
1572:
158RETURN
1593:
160	testl %edx, %edx
161	jne 1b
162	jmp 2b
163END_
164
165BEGIN dec_if_positive %esi
166	movl  ($v), %eax
167	movl 4($v), %edx
168	subl $1, %eax
169	sbbl $0, %edx
170	js 1f
171	movl %eax,  ($v)
172	movl %edx, 4($v)
1731:
174END
175