xref: /linux/arch/mips/include/asm/bitops.h (revision 95e9fd10f06cb5642028b6b851e32b8c8afb4571)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (c) 1994 - 1997, 99, 2000, 06, 07  Ralf Baechle (ralf@linux-mips.org)
7  * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
8  */
9 #ifndef _ASM_BITOPS_H
10 #define _ASM_BITOPS_H
11 
12 #ifndef _LINUX_BITOPS_H
13 #error only <linux/bitops.h> can be included directly
14 #endif
15 
16 #include <linux/compiler.h>
17 #include <linux/irqflags.h>
18 #include <linux/types.h>
19 #include <asm/barrier.h>
20 #include <asm/byteorder.h>		/* sigh ... */
21 #include <asm/cpu-features.h>
22 #include <asm/sgidefs.h>
23 #include <asm/war.h>
24 
25 #if _MIPS_SZLONG == 32
26 #define SZLONG_LOG 5
27 #define SZLONG_MASK 31UL
28 #define __LL		"ll	"
29 #define __SC		"sc	"
30 #define __INS		"ins    "
31 #define __EXT		"ext    "
32 #elif _MIPS_SZLONG == 64
33 #define SZLONG_LOG 6
34 #define SZLONG_MASK 63UL
35 #define __LL		"lld	"
36 #define __SC		"scd	"
37 #define __INS		"dins    "
38 #define __EXT		"dext    "
39 #endif
40 
41 /*
42  * clear_bit() doesn't provide any barrier for the compiler.
43  */
44 #define smp_mb__before_clear_bit()	smp_mb__before_llsc()
45 #define smp_mb__after_clear_bit()	smp_llsc_mb()
46 
47 /*
48  * set_bit - Atomically set a bit in memory
49  * @nr: the bit to set
50  * @addr: the address to start counting from
51  *
52  * This function is atomic and may not be reordered.  See __set_bit()
53  * if you do not require the atomic guarantees.
54  * Note that @nr may be almost arbitrarily large; this function is not
55  * restricted to acting on a single-word quantity.
56  */
57 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
58 {
59 	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
60 	unsigned short bit = nr & SZLONG_MASK;
61 	unsigned long temp;
62 
63 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
64 		__asm__ __volatile__(
65 		"	.set	mips3					\n"
66 		"1:	" __LL "%0, %1			# set_bit	\n"
67 		"	or	%0, %2					\n"
68 		"	" __SC	"%0, %1					\n"
69 		"	beqzl	%0, 1b					\n"
70 		"	.set	mips0					\n"
71 		: "=&r" (temp), "=m" (*m)
72 		: "ir" (1UL << bit), "m" (*m));
73 #ifdef CONFIG_CPU_MIPSR2
74 	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
75 		do {
76 			__asm__ __volatile__(
77 			"	" __LL "%0, %1		# set_bit	\n"
78 			"	" __INS "%0, %3, %2, 1			\n"
79 			"	" __SC "%0, %1				\n"
80 			: "=&r" (temp), "+m" (*m)
81 			: "ir" (bit), "r" (~0));
82 		} while (unlikely(!temp));
83 #endif /* CONFIG_CPU_MIPSR2 */
84 	} else if (kernel_uses_llsc) {
85 		do {
86 			__asm__ __volatile__(
87 			"	.set	mips3				\n"
88 			"	" __LL "%0, %1		# set_bit	\n"
89 			"	or	%0, %2				\n"
90 			"	" __SC	"%0, %1				\n"
91 			"	.set	mips0				\n"
92 			: "=&r" (temp), "+m" (*m)
93 			: "ir" (1UL << bit));
94 		} while (unlikely(!temp));
95 	} else {
96 		volatile unsigned long *a = addr;
97 		unsigned long mask;
98 		unsigned long flags;
99 
100 		a += nr >> SZLONG_LOG;
101 		mask = 1UL << bit;
102 		raw_local_irq_save(flags);
103 		*a |= mask;
104 		raw_local_irq_restore(flags);
105 	}
106 }
107 
108 /*
109  * clear_bit - Clears a bit in memory
110  * @nr: Bit to clear
111  * @addr: Address to start counting from
112  *
113  * clear_bit() is atomic and may not be reordered.  However, it does
114  * not contain a memory barrier, so if it is used for locking purposes,
115  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
116  * in order to ensure changes are visible on other processors.
117  */
118 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
119 {
120 	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
121 	unsigned short bit = nr & SZLONG_MASK;
122 	unsigned long temp;
123 
124 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
125 		__asm__ __volatile__(
126 		"	.set	mips3					\n"
127 		"1:	" __LL "%0, %1			# clear_bit	\n"
128 		"	and	%0, %2					\n"
129 		"	" __SC "%0, %1					\n"
130 		"	beqzl	%0, 1b					\n"
131 		"	.set	mips0					\n"
132 		: "=&r" (temp), "+m" (*m)
133 		: "ir" (~(1UL << bit)));
134 #ifdef CONFIG_CPU_MIPSR2
135 	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
136 		do {
137 			__asm__ __volatile__(
138 			"	" __LL "%0, %1		# clear_bit	\n"
139 			"	" __INS "%0, $0, %2, 1			\n"
140 			"	" __SC "%0, %1				\n"
141 			: "=&r" (temp), "+m" (*m)
142 			: "ir" (bit));
143 		} while (unlikely(!temp));
144 #endif /* CONFIG_CPU_MIPSR2 */
145 	} else if (kernel_uses_llsc) {
146 		do {
147 			__asm__ __volatile__(
148 			"	.set	mips3				\n"
149 			"	" __LL "%0, %1		# clear_bit	\n"
150 			"	and	%0, %2				\n"
151 			"	" __SC "%0, %1				\n"
152 			"	.set	mips0				\n"
153 			: "=&r" (temp), "+m" (*m)
154 			: "ir" (~(1UL << bit)));
155 		} while (unlikely(!temp));
156 	} else {
157 		volatile unsigned long *a = addr;
158 		unsigned long mask;
159 		unsigned long flags;
160 
161 		a += nr >> SZLONG_LOG;
162 		mask = 1UL << bit;
163 		raw_local_irq_save(flags);
164 		*a &= ~mask;
165 		raw_local_irq_restore(flags);
166 	}
167 }
168 
169 /*
170  * clear_bit_unlock - Clears a bit in memory
171  * @nr: Bit to clear
172  * @addr: Address to start counting from
173  *
174  * clear_bit() is atomic and implies release semantics before the memory
175  * operation. It can be used for an unlock.
176  */
177 static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
178 {
179 	smp_mb__before_clear_bit();
180 	clear_bit(nr, addr);
181 }
182 
183 /*
184  * change_bit - Toggle a bit in memory
185  * @nr: Bit to change
186  * @addr: Address to start counting from
187  *
188  * change_bit() is atomic and may not be reordered.
189  * Note that @nr may be almost arbitrarily large; this function is not
190  * restricted to acting on a single-word quantity.
191  */
192 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
193 {
194 	unsigned short bit = nr & SZLONG_MASK;
195 
196 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
197 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
198 		unsigned long temp;
199 
200 		__asm__ __volatile__(
201 		"	.set	mips3				\n"
202 		"1:	" __LL "%0, %1		# change_bit	\n"
203 		"	xor	%0, %2				\n"
204 		"	" __SC	"%0, %1				\n"
205 		"	beqzl	%0, 1b				\n"
206 		"	.set	mips0				\n"
207 		: "=&r" (temp), "+m" (*m)
208 		: "ir" (1UL << bit));
209 	} else if (kernel_uses_llsc) {
210 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
211 		unsigned long temp;
212 
213 		do {
214 			__asm__ __volatile__(
215 			"	.set	mips3				\n"
216 			"	" __LL "%0, %1		# change_bit	\n"
217 			"	xor	%0, %2				\n"
218 			"	" __SC	"%0, %1				\n"
219 			"	.set	mips0				\n"
220 			: "=&r" (temp), "+m" (*m)
221 			: "ir" (1UL << bit));
222 		} while (unlikely(!temp));
223 	} else {
224 		volatile unsigned long *a = addr;
225 		unsigned long mask;
226 		unsigned long flags;
227 
228 		a += nr >> SZLONG_LOG;
229 		mask = 1UL << bit;
230 		raw_local_irq_save(flags);
231 		*a ^= mask;
232 		raw_local_irq_restore(flags);
233 	}
234 }
235 
236 /*
237  * test_and_set_bit - Set a bit and return its old value
238  * @nr: Bit to set
239  * @addr: Address to count from
240  *
241  * This operation is atomic and cannot be reordered.
242  * It also implies a memory barrier.
243  */
244 static inline int test_and_set_bit(unsigned long nr,
245 	volatile unsigned long *addr)
246 {
247 	unsigned short bit = nr & SZLONG_MASK;
248 	unsigned long res;
249 
250 	smp_mb__before_llsc();
251 
252 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
253 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
254 		unsigned long temp;
255 
256 		__asm__ __volatile__(
257 		"	.set	mips3					\n"
258 		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
259 		"	or	%2, %0, %3				\n"
260 		"	" __SC	"%2, %1					\n"
261 		"	beqzl	%2, 1b					\n"
262 		"	and	%2, %0, %3				\n"
263 		"	.set	mips0					\n"
264 		: "=&r" (temp), "+m" (*m), "=&r" (res)
265 		: "r" (1UL << bit)
266 		: "memory");
267 	} else if (kernel_uses_llsc) {
268 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
269 		unsigned long temp;
270 
271 		do {
272 			__asm__ __volatile__(
273 			"	.set	mips3				\n"
274 			"	" __LL "%0, %1	# test_and_set_bit	\n"
275 			"	or	%2, %0, %3			\n"
276 			"	" __SC	"%2, %1				\n"
277 			"	.set	mips0				\n"
278 			: "=&r" (temp), "+m" (*m), "=&r" (res)
279 			: "r" (1UL << bit)
280 			: "memory");
281 		} while (unlikely(!res));
282 
283 		res = temp & (1UL << bit);
284 	} else {
285 		volatile unsigned long *a = addr;
286 		unsigned long mask;
287 		unsigned long flags;
288 
289 		a += nr >> SZLONG_LOG;
290 		mask = 1UL << bit;
291 		raw_local_irq_save(flags);
292 		res = (mask & *a);
293 		*a |= mask;
294 		raw_local_irq_restore(flags);
295 	}
296 
297 	smp_llsc_mb();
298 
299 	return res != 0;
300 }
301 
302 /*
303  * test_and_set_bit_lock - Set a bit and return its old value
304  * @nr: Bit to set
305  * @addr: Address to count from
306  *
307  * This operation is atomic and implies acquire ordering semantics
308  * after the memory operation.
309  */
310 static inline int test_and_set_bit_lock(unsigned long nr,
311 	volatile unsigned long *addr)
312 {
313 	unsigned short bit = nr & SZLONG_MASK;
314 	unsigned long res;
315 
316 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
317 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
318 		unsigned long temp;
319 
320 		__asm__ __volatile__(
321 		"	.set	mips3					\n"
322 		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
323 		"	or	%2, %0, %3				\n"
324 		"	" __SC	"%2, %1					\n"
325 		"	beqzl	%2, 1b					\n"
326 		"	and	%2, %0, %3				\n"
327 		"	.set	mips0					\n"
328 		: "=&r" (temp), "+m" (*m), "=&r" (res)
329 		: "r" (1UL << bit)
330 		: "memory");
331 	} else if (kernel_uses_llsc) {
332 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
333 		unsigned long temp;
334 
335 		do {
336 			__asm__ __volatile__(
337 			"	.set	mips3				\n"
338 			"	" __LL "%0, %1	# test_and_set_bit	\n"
339 			"	or	%2, %0, %3			\n"
340 			"	" __SC	"%2, %1				\n"
341 			"	.set	mips0				\n"
342 			: "=&r" (temp), "+m" (*m), "=&r" (res)
343 			: "r" (1UL << bit)
344 			: "memory");
345 		} while (unlikely(!res));
346 
347 		res = temp & (1UL << bit);
348 	} else {
349 		volatile unsigned long *a = addr;
350 		unsigned long mask;
351 		unsigned long flags;
352 
353 		a += nr >> SZLONG_LOG;
354 		mask = 1UL << bit;
355 		raw_local_irq_save(flags);
356 		res = (mask & *a);
357 		*a |= mask;
358 		raw_local_irq_restore(flags);
359 	}
360 
361 	smp_llsc_mb();
362 
363 	return res != 0;
364 }
365 /*
366  * test_and_clear_bit - Clear a bit and return its old value
367  * @nr: Bit to clear
368  * @addr: Address to count from
369  *
370  * This operation is atomic and cannot be reordered.
371  * It also implies a memory barrier.
372  */
373 static inline int test_and_clear_bit(unsigned long nr,
374 	volatile unsigned long *addr)
375 {
376 	unsigned short bit = nr & SZLONG_MASK;
377 	unsigned long res;
378 
379 	smp_mb__before_llsc();
380 
381 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
382 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
383 		unsigned long temp;
384 
385 		__asm__ __volatile__(
386 		"	.set	mips3					\n"
387 		"1:	" __LL	"%0, %1		# test_and_clear_bit	\n"
388 		"	or	%2, %0, %3				\n"
389 		"	xor	%2, %3					\n"
390 		"	" __SC 	"%2, %1					\n"
391 		"	beqzl	%2, 1b					\n"
392 		"	and	%2, %0, %3				\n"
393 		"	.set	mips0					\n"
394 		: "=&r" (temp), "+m" (*m), "=&r" (res)
395 		: "r" (1UL << bit)
396 		: "memory");
397 #ifdef CONFIG_CPU_MIPSR2
398 	} else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
399 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
400 		unsigned long temp;
401 
402 		do {
403 			__asm__ __volatile__(
404 			"	" __LL	"%0, %1	# test_and_clear_bit	\n"
405 			"	" __EXT "%2, %0, %3, 1			\n"
406 			"	" __INS	"%0, $0, %3, 1			\n"
407 			"	" __SC 	"%0, %1				\n"
408 			: "=&r" (temp), "+m" (*m), "=&r" (res)
409 			: "ir" (bit)
410 			: "memory");
411 		} while (unlikely(!temp));
412 #endif
413 	} else if (kernel_uses_llsc) {
414 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
415 		unsigned long temp;
416 
417 		do {
418 			__asm__ __volatile__(
419 			"	.set	mips3				\n"
420 			"	" __LL	"%0, %1	# test_and_clear_bit	\n"
421 			"	or	%2, %0, %3			\n"
422 			"	xor	%2, %3				\n"
423 			"	" __SC 	"%2, %1				\n"
424 			"	.set	mips0				\n"
425 			: "=&r" (temp), "+m" (*m), "=&r" (res)
426 			: "r" (1UL << bit)
427 			: "memory");
428 		} while (unlikely(!res));
429 
430 		res = temp & (1UL << bit);
431 	} else {
432 		volatile unsigned long *a = addr;
433 		unsigned long mask;
434 		unsigned long flags;
435 
436 		a += nr >> SZLONG_LOG;
437 		mask = 1UL << bit;
438 		raw_local_irq_save(flags);
439 		res = (mask & *a);
440 		*a &= ~mask;
441 		raw_local_irq_restore(flags);
442 	}
443 
444 	smp_llsc_mb();
445 
446 	return res != 0;
447 }
448 
449 /*
450  * test_and_change_bit - Change a bit and return its old value
451  * @nr: Bit to change
452  * @addr: Address to count from
453  *
454  * This operation is atomic and cannot be reordered.
455  * It also implies a memory barrier.
456  */
457 static inline int test_and_change_bit(unsigned long nr,
458 	volatile unsigned long *addr)
459 {
460 	unsigned short bit = nr & SZLONG_MASK;
461 	unsigned long res;
462 
463 	smp_mb__before_llsc();
464 
465 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
466 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
467 		unsigned long temp;
468 
469 		__asm__ __volatile__(
470 		"	.set	mips3					\n"
471 		"1:	" __LL	"%0, %1		# test_and_change_bit	\n"
472 		"	xor	%2, %0, %3				\n"
473 		"	" __SC	"%2, %1					\n"
474 		"	beqzl	%2, 1b					\n"
475 		"	and	%2, %0, %3				\n"
476 		"	.set	mips0					\n"
477 		: "=&r" (temp), "+m" (*m), "=&r" (res)
478 		: "r" (1UL << bit)
479 		: "memory");
480 	} else if (kernel_uses_llsc) {
481 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
482 		unsigned long temp;
483 
484 		do {
485 			__asm__ __volatile__(
486 			"	.set	mips3				\n"
487 			"	" __LL	"%0, %1	# test_and_change_bit	\n"
488 			"	xor	%2, %0, %3			\n"
489 			"	" __SC	"\t%2, %1			\n"
490 			"	.set	mips0				\n"
491 			: "=&r" (temp), "+m" (*m), "=&r" (res)
492 			: "r" (1UL << bit)
493 			: "memory");
494 		} while (unlikely(!res));
495 
496 		res = temp & (1UL << bit);
497 	} else {
498 		volatile unsigned long *a = addr;
499 		unsigned long mask;
500 		unsigned long flags;
501 
502 		a += nr >> SZLONG_LOG;
503 		mask = 1UL << bit;
504 		raw_local_irq_save(flags);
505 		res = (mask & *a);
506 		*a ^= mask;
507 		raw_local_irq_restore(flags);
508 	}
509 
510 	smp_llsc_mb();
511 
512 	return res != 0;
513 }
514 
515 #include <asm-generic/bitops/non-atomic.h>
516 
517 /*
518  * __clear_bit_unlock - Clears a bit in memory
519  * @nr: Bit to clear
520  * @addr: Address to start counting from
521  *
522  * __clear_bit() is non-atomic and implies release semantics before the memory
523  * operation. It can be used for an unlock if no other CPUs can concurrently
524  * modify other bits in the word.
525  */
526 static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
527 {
528 	smp_mb();
529 	__clear_bit(nr, addr);
530 }
531 
532 /*
533  * Return the bit position (0..63) of the most significant 1 bit in a word
534  * Returns -1 if no 1 bit exists
535  */
536 static inline unsigned long __fls(unsigned long word)
537 {
538 	int num;
539 
540 	if (BITS_PER_LONG == 32 &&
541 	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
542 		__asm__(
543 		"	.set	push					\n"
544 		"	.set	mips32					\n"
545 		"	clz	%0, %1					\n"
546 		"	.set	pop					\n"
547 		: "=r" (num)
548 		: "r" (word));
549 
550 		return 31 - num;
551 	}
552 
553 	if (BITS_PER_LONG == 64 &&
554 	    __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
555 		__asm__(
556 		"	.set	push					\n"
557 		"	.set	mips64					\n"
558 		"	dclz	%0, %1					\n"
559 		"	.set	pop					\n"
560 		: "=r" (num)
561 		: "r" (word));
562 
563 		return 63 - num;
564 	}
565 
566 	num = BITS_PER_LONG - 1;
567 
568 #if BITS_PER_LONG == 64
569 	if (!(word & (~0ul << 32))) {
570 		num -= 32;
571 		word <<= 32;
572 	}
573 #endif
574 	if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
575 		num -= 16;
576 		word <<= 16;
577 	}
578 	if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
579 		num -= 8;
580 		word <<= 8;
581 	}
582 	if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
583 		num -= 4;
584 		word <<= 4;
585 	}
586 	if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
587 		num -= 2;
588 		word <<= 2;
589 	}
590 	if (!(word & (~0ul << (BITS_PER_LONG-1))))
591 		num -= 1;
592 	return num;
593 }
594 
595 /*
596  * __ffs - find first bit in word.
597  * @word: The word to search
598  *
599  * Returns 0..SZLONG-1
600  * Undefined if no bit exists, so code should check against 0 first.
601  */
602 static inline unsigned long __ffs(unsigned long word)
603 {
604 	return __fls(word & -word);
605 }
606 
607 /*
608  * fls - find last bit set.
609  * @word: The word to search
610  *
611  * This is defined the same way as ffs.
612  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
613  */
614 static inline int fls(int x)
615 {
616 	int r;
617 
618 	if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
619 		__asm__("clz %0, %1" : "=r" (x) : "r" (x));
620 
621 		return 32 - x;
622 	}
623 
624 	r = 32;
625 	if (!x)
626 		return 0;
627 	if (!(x & 0xffff0000u)) {
628 		x <<= 16;
629 		r -= 16;
630 	}
631 	if (!(x & 0xff000000u)) {
632 		x <<= 8;
633 		r -= 8;
634 	}
635 	if (!(x & 0xf0000000u)) {
636 		x <<= 4;
637 		r -= 4;
638 	}
639 	if (!(x & 0xc0000000u)) {
640 		x <<= 2;
641 		r -= 2;
642 	}
643 	if (!(x & 0x80000000u)) {
644 		x <<= 1;
645 		r -= 1;
646 	}
647 	return r;
648 }
649 
650 #include <asm-generic/bitops/fls64.h>
651 
652 /*
653  * ffs - find first bit set.
654  * @word: The word to search
655  *
656  * This is defined the same way as
657  * the libc and compiler builtin ffs routines, therefore
658  * differs in spirit from the above ffz (man ffs).
659  */
660 static inline int ffs(int word)
661 {
662 	if (!word)
663 		return 0;
664 
665 	return fls(word & -word);
666 }
667 
668 #include <asm-generic/bitops/ffz.h>
669 #include <asm-generic/bitops/find.h>
670 
671 #ifdef __KERNEL__
672 
673 #include <asm-generic/bitops/sched.h>
674 
675 #include <asm/arch_hweight.h>
676 #include <asm-generic/bitops/const_hweight.h>
677 
678 #include <asm-generic/bitops/le.h>
679 #include <asm-generic/bitops/ext2-atomic.h>
680 
681 #endif /* __KERNEL__ */
682 
683 #endif /* _ASM_BITOPS_H */
684