xref: /linux/arch/mips/include/asm/bitops.h (revision 3932b9ca55b0be314a36d3e84faff3e823c081f5)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (c) 1994 - 1997, 99, 2000, 06, 07  Ralf Baechle (ralf@linux-mips.org)
7  * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
8  */
9 #ifndef _ASM_BITOPS_H
10 #define _ASM_BITOPS_H
11 
12 #ifndef _LINUX_BITOPS_H
13 #error only <linux/bitops.h> can be included directly
14 #endif
15 
16 #include <linux/compiler.h>
17 #include <linux/types.h>
18 #include <asm/barrier.h>
19 #include <asm/byteorder.h>		/* sigh ... */
20 #include <asm/cpu-features.h>
21 #include <asm/sgidefs.h>
22 #include <asm/war.h>
23 
24 #if _MIPS_SZLONG == 32
25 #define SZLONG_LOG 5
26 #define SZLONG_MASK 31UL
27 #define __LL		"ll	"
28 #define __SC		"sc	"
29 #define __INS		"ins	"
30 #define __EXT		"ext	"
31 #elif _MIPS_SZLONG == 64
32 #define SZLONG_LOG 6
33 #define SZLONG_MASK 63UL
34 #define __LL		"lld	"
35 #define __SC		"scd	"
36 #define __INS		"dins	 "
37 #define __EXT		"dext	 "
38 #endif
39 
40 /*
41  * These are the "slower" versions of the functions and are in bitops.c.
42  * These functions call raw_local_irq_{save,restore}().
43  */
44 void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
45 void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
46 void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
47 int __mips_test_and_set_bit(unsigned long nr,
48 			    volatile unsigned long *addr);
49 int __mips_test_and_set_bit_lock(unsigned long nr,
50 				 volatile unsigned long *addr);
51 int __mips_test_and_clear_bit(unsigned long nr,
52 			      volatile unsigned long *addr);
53 int __mips_test_and_change_bit(unsigned long nr,
54 			       volatile unsigned long *addr);
55 
56 
57 /*
58  * set_bit - Atomically set a bit in memory
59  * @nr: the bit to set
60  * @addr: the address to start counting from
61  *
62  * This function is atomic and may not be reordered.  See __set_bit()
63  * if you do not require the atomic guarantees.
64  * Note that @nr may be almost arbitrarily large; this function is not
65  * restricted to acting on a single-word quantity.
66  */
67 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
68 {
69 	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
70 	int bit = nr & SZLONG_MASK;
71 	unsigned long temp;
72 
73 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
74 		__asm__ __volatile__(
75 		"	.set	arch=r4000				\n"
76 		"1:	" __LL "%0, %1			# set_bit	\n"
77 		"	or	%0, %2					\n"
78 		"	" __SC	"%0, %1					\n"
79 		"	beqzl	%0, 1b					\n"
80 		"	.set	mips0					\n"
81 		: "=&r" (temp), "=m" (*m)
82 		: "ir" (1UL << bit), "m" (*m));
83 #ifdef CONFIG_CPU_MIPSR2
84 	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
85 		do {
86 			__asm__ __volatile__(
87 			"	" __LL "%0, %1		# set_bit	\n"
88 			"	" __INS "%0, %3, %2, 1			\n"
89 			"	" __SC "%0, %1				\n"
90 			: "=&r" (temp), "+m" (*m)
91 			: "ir" (bit), "r" (~0));
92 		} while (unlikely(!temp));
93 #endif /* CONFIG_CPU_MIPSR2 */
94 	} else if (kernel_uses_llsc) {
95 		do {
96 			__asm__ __volatile__(
97 			"	.set	arch=r4000			\n"
98 			"	" __LL "%0, %1		# set_bit	\n"
99 			"	or	%0, %2				\n"
100 			"	" __SC	"%0, %1				\n"
101 			"	.set	mips0				\n"
102 			: "=&r" (temp), "+m" (*m)
103 			: "ir" (1UL << bit));
104 		} while (unlikely(!temp));
105 	} else
106 		__mips_set_bit(nr, addr);
107 }
108 
109 /*
110  * clear_bit - Clears a bit in memory
111  * @nr: Bit to clear
112  * @addr: Address to start counting from
113  *
114  * clear_bit() is atomic and may not be reordered.  However, it does
115  * not contain a memory barrier, so if it is used for locking purposes,
116  * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
117  * in order to ensure changes are visible on other processors.
118  */
119 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
120 {
121 	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
122 	int bit = nr & SZLONG_MASK;
123 	unsigned long temp;
124 
125 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
126 		__asm__ __volatile__(
127 		"	.set	arch=r4000				\n"
128 		"1:	" __LL "%0, %1			# clear_bit	\n"
129 		"	and	%0, %2					\n"
130 		"	" __SC "%0, %1					\n"
131 		"	beqzl	%0, 1b					\n"
132 		"	.set	mips0					\n"
133 		: "=&r" (temp), "+m" (*m)
134 		: "ir" (~(1UL << bit)));
135 #ifdef CONFIG_CPU_MIPSR2
136 	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
137 		do {
138 			__asm__ __volatile__(
139 			"	" __LL "%0, %1		# clear_bit	\n"
140 			"	" __INS "%0, $0, %2, 1			\n"
141 			"	" __SC "%0, %1				\n"
142 			: "=&r" (temp), "+m" (*m)
143 			: "ir" (bit));
144 		} while (unlikely(!temp));
145 #endif /* CONFIG_CPU_MIPSR2 */
146 	} else if (kernel_uses_llsc) {
147 		do {
148 			__asm__ __volatile__(
149 			"	.set	arch=r4000			\n"
150 			"	" __LL "%0, %1		# clear_bit	\n"
151 			"	and	%0, %2				\n"
152 			"	" __SC "%0, %1				\n"
153 			"	.set	mips0				\n"
154 			: "=&r" (temp), "+m" (*m)
155 			: "ir" (~(1UL << bit)));
156 		} while (unlikely(!temp));
157 	} else
158 		__mips_clear_bit(nr, addr);
159 }
160 
161 /*
162  * clear_bit_unlock - Clears a bit in memory
163  * @nr: Bit to clear
164  * @addr: Address to start counting from
165  *
166  * clear_bit() is atomic and implies release semantics before the memory
167  * operation. It can be used for an unlock.
168  */
169 static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
170 {
171 	smp_mb__before_atomic();
172 	clear_bit(nr, addr);
173 }
174 
175 /*
176  * change_bit - Toggle a bit in memory
177  * @nr: Bit to change
178  * @addr: Address to start counting from
179  *
180  * change_bit() is atomic and may not be reordered.
181  * Note that @nr may be almost arbitrarily large; this function is not
182  * restricted to acting on a single-word quantity.
183  */
184 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
185 {
186 	int bit = nr & SZLONG_MASK;
187 
188 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
189 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
190 		unsigned long temp;
191 
192 		__asm__ __volatile__(
193 		"	.set	arch=r4000			\n"
194 		"1:	" __LL "%0, %1		# change_bit	\n"
195 		"	xor	%0, %2				\n"
196 		"	" __SC	"%0, %1				\n"
197 		"	beqzl	%0, 1b				\n"
198 		"	.set	mips0				\n"
199 		: "=&r" (temp), "+m" (*m)
200 		: "ir" (1UL << bit));
201 	} else if (kernel_uses_llsc) {
202 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
203 		unsigned long temp;
204 
205 		do {
206 			__asm__ __volatile__(
207 			"	.set	arch=r4000			\n"
208 			"	" __LL "%0, %1		# change_bit	\n"
209 			"	xor	%0, %2				\n"
210 			"	" __SC	"%0, %1				\n"
211 			"	.set	mips0				\n"
212 			: "=&r" (temp), "+m" (*m)
213 			: "ir" (1UL << bit));
214 		} while (unlikely(!temp));
215 	} else
216 		__mips_change_bit(nr, addr);
217 }
218 
219 /*
220  * test_and_set_bit - Set a bit and return its old value
221  * @nr: Bit to set
222  * @addr: Address to count from
223  *
224  * This operation is atomic and cannot be reordered.
225  * It also implies a memory barrier.
226  */
227 static inline int test_and_set_bit(unsigned long nr,
228 	volatile unsigned long *addr)
229 {
230 	int bit = nr & SZLONG_MASK;
231 	unsigned long res;
232 
233 	smp_mb__before_llsc();
234 
235 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
236 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
237 		unsigned long temp;
238 
239 		__asm__ __volatile__(
240 		"	.set	arch=r4000				\n"
241 		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
242 		"	or	%2, %0, %3				\n"
243 		"	" __SC	"%2, %1					\n"
244 		"	beqzl	%2, 1b					\n"
245 		"	and	%2, %0, %3				\n"
246 		"	.set	mips0					\n"
247 		: "=&r" (temp), "+m" (*m), "=&r" (res)
248 		: "r" (1UL << bit)
249 		: "memory");
250 	} else if (kernel_uses_llsc) {
251 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
252 		unsigned long temp;
253 
254 		do {
255 			__asm__ __volatile__(
256 			"	.set	arch=r4000			\n"
257 			"	" __LL "%0, %1	# test_and_set_bit	\n"
258 			"	or	%2, %0, %3			\n"
259 			"	" __SC	"%2, %1				\n"
260 			"	.set	mips0				\n"
261 			: "=&r" (temp), "+m" (*m), "=&r" (res)
262 			: "r" (1UL << bit)
263 			: "memory");
264 		} while (unlikely(!res));
265 
266 		res = temp & (1UL << bit);
267 	} else
268 		res = __mips_test_and_set_bit(nr, addr);
269 
270 	smp_llsc_mb();
271 
272 	return res != 0;
273 }
274 
275 /*
276  * test_and_set_bit_lock - Set a bit and return its old value
277  * @nr: Bit to set
278  * @addr: Address to count from
279  *
280  * This operation is atomic and implies acquire ordering semantics
281  * after the memory operation.
282  */
283 static inline int test_and_set_bit_lock(unsigned long nr,
284 	volatile unsigned long *addr)
285 {
286 	int bit = nr & SZLONG_MASK;
287 	unsigned long res;
288 
289 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
290 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
291 		unsigned long temp;
292 
293 		__asm__ __volatile__(
294 		"	.set	arch=r4000				\n"
295 		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
296 		"	or	%2, %0, %3				\n"
297 		"	" __SC	"%2, %1					\n"
298 		"	beqzl	%2, 1b					\n"
299 		"	and	%2, %0, %3				\n"
300 		"	.set	mips0					\n"
301 		: "=&r" (temp), "+m" (*m), "=&r" (res)
302 		: "r" (1UL << bit)
303 		: "memory");
304 	} else if (kernel_uses_llsc) {
305 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
306 		unsigned long temp;
307 
308 		do {
309 			__asm__ __volatile__(
310 			"	.set	arch=r4000			\n"
311 			"	" __LL "%0, %1	# test_and_set_bit	\n"
312 			"	or	%2, %0, %3			\n"
313 			"	" __SC	"%2, %1				\n"
314 			"	.set	mips0				\n"
315 			: "=&r" (temp), "+m" (*m), "=&r" (res)
316 			: "r" (1UL << bit)
317 			: "memory");
318 		} while (unlikely(!res));
319 
320 		res = temp & (1UL << bit);
321 	} else
322 		res = __mips_test_and_set_bit_lock(nr, addr);
323 
324 	smp_llsc_mb();
325 
326 	return res != 0;
327 }
328 /*
329  * test_and_clear_bit - Clear a bit and return its old value
330  * @nr: Bit to clear
331  * @addr: Address to count from
332  *
333  * This operation is atomic and cannot be reordered.
334  * It also implies a memory barrier.
335  */
336 static inline int test_and_clear_bit(unsigned long nr,
337 	volatile unsigned long *addr)
338 {
339 	int bit = nr & SZLONG_MASK;
340 	unsigned long res;
341 
342 	smp_mb__before_llsc();
343 
344 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
345 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
346 		unsigned long temp;
347 
348 		__asm__ __volatile__(
349 		"	.set	arch=r4000				\n"
350 		"1:	" __LL	"%0, %1		# test_and_clear_bit	\n"
351 		"	or	%2, %0, %3				\n"
352 		"	xor	%2, %3					\n"
353 		"	" __SC	"%2, %1					\n"
354 		"	beqzl	%2, 1b					\n"
355 		"	and	%2, %0, %3				\n"
356 		"	.set	mips0					\n"
357 		: "=&r" (temp), "+m" (*m), "=&r" (res)
358 		: "r" (1UL << bit)
359 		: "memory");
360 #ifdef CONFIG_CPU_MIPSR2
361 	} else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
362 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
363 		unsigned long temp;
364 
365 		do {
366 			__asm__ __volatile__(
367 			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
368 			"	" __EXT "%2, %0, %3, 1			\n"
369 			"	" __INS "%0, $0, %3, 1			\n"
370 			"	" __SC	"%0, %1				\n"
371 			: "=&r" (temp), "+m" (*m), "=&r" (res)
372 			: "ir" (bit)
373 			: "memory");
374 		} while (unlikely(!temp));
375 #endif
376 	} else if (kernel_uses_llsc) {
377 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
378 		unsigned long temp;
379 
380 		do {
381 			__asm__ __volatile__(
382 			"	.set	arch=r4000			\n"
383 			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
384 			"	or	%2, %0, %3			\n"
385 			"	xor	%2, %3				\n"
386 			"	" __SC	"%2, %1				\n"
387 			"	.set	mips0				\n"
388 			: "=&r" (temp), "+m" (*m), "=&r" (res)
389 			: "r" (1UL << bit)
390 			: "memory");
391 		} while (unlikely(!res));
392 
393 		res = temp & (1UL << bit);
394 	} else
395 		res = __mips_test_and_clear_bit(nr, addr);
396 
397 	smp_llsc_mb();
398 
399 	return res != 0;
400 }
401 
402 /*
403  * test_and_change_bit - Change a bit and return its old value
404  * @nr: Bit to change
405  * @addr: Address to count from
406  *
407  * This operation is atomic and cannot be reordered.
408  * It also implies a memory barrier.
409  */
410 static inline int test_and_change_bit(unsigned long nr,
411 	volatile unsigned long *addr)
412 {
413 	int bit = nr & SZLONG_MASK;
414 	unsigned long res;
415 
416 	smp_mb__before_llsc();
417 
418 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
419 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
420 		unsigned long temp;
421 
422 		__asm__ __volatile__(
423 		"	.set	arch=r4000				\n"
424 		"1:	" __LL	"%0, %1		# test_and_change_bit	\n"
425 		"	xor	%2, %0, %3				\n"
426 		"	" __SC	"%2, %1					\n"
427 		"	beqzl	%2, 1b					\n"
428 		"	and	%2, %0, %3				\n"
429 		"	.set	mips0					\n"
430 		: "=&r" (temp), "+m" (*m), "=&r" (res)
431 		: "r" (1UL << bit)
432 		: "memory");
433 	} else if (kernel_uses_llsc) {
434 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
435 		unsigned long temp;
436 
437 		do {
438 			__asm__ __volatile__(
439 			"	.set	arch=r4000			\n"
440 			"	" __LL	"%0, %1 # test_and_change_bit	\n"
441 			"	xor	%2, %0, %3			\n"
442 			"	" __SC	"\t%2, %1			\n"
443 			"	.set	mips0				\n"
444 			: "=&r" (temp), "+m" (*m), "=&r" (res)
445 			: "r" (1UL << bit)
446 			: "memory");
447 		} while (unlikely(!res));
448 
449 		res = temp & (1UL << bit);
450 	} else
451 		res = __mips_test_and_change_bit(nr, addr);
452 
453 	smp_llsc_mb();
454 
455 	return res != 0;
456 }
457 
458 #include <asm-generic/bitops/non-atomic.h>
459 
460 /*
461  * __clear_bit_unlock - Clears a bit in memory
462  * @nr: Bit to clear
463  * @addr: Address to start counting from
464  *
465  * __clear_bit() is non-atomic and implies release semantics before the memory
466  * operation. It can be used for an unlock if no other CPUs can concurrently
467  * modify other bits in the word.
468  */
469 static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
470 {
471 	smp_mb();
472 	__clear_bit(nr, addr);
473 }
474 
475 /*
476  * Return the bit position (0..63) of the most significant 1 bit in a word
477  * Returns -1 if no 1 bit exists
478  */
479 static inline unsigned long __fls(unsigned long word)
480 {
481 	int num;
482 
483 	if (BITS_PER_LONG == 32 &&
484 	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
485 		__asm__(
486 		"	.set	push					\n"
487 		"	.set	mips32					\n"
488 		"	clz	%0, %1					\n"
489 		"	.set	pop					\n"
490 		: "=r" (num)
491 		: "r" (word));
492 
493 		return 31 - num;
494 	}
495 
496 	if (BITS_PER_LONG == 64 &&
497 	    __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
498 		__asm__(
499 		"	.set	push					\n"
500 		"	.set	mips64					\n"
501 		"	dclz	%0, %1					\n"
502 		"	.set	pop					\n"
503 		: "=r" (num)
504 		: "r" (word));
505 
506 		return 63 - num;
507 	}
508 
509 	num = BITS_PER_LONG - 1;
510 
511 #if BITS_PER_LONG == 64
512 	if (!(word & (~0ul << 32))) {
513 		num -= 32;
514 		word <<= 32;
515 	}
516 #endif
517 	if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
518 		num -= 16;
519 		word <<= 16;
520 	}
521 	if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
522 		num -= 8;
523 		word <<= 8;
524 	}
525 	if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
526 		num -= 4;
527 		word <<= 4;
528 	}
529 	if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
530 		num -= 2;
531 		word <<= 2;
532 	}
533 	if (!(word & (~0ul << (BITS_PER_LONG-1))))
534 		num -= 1;
535 	return num;
536 }
537 
538 /*
539  * __ffs - find first bit in word.
540  * @word: The word to search
541  *
542  * Returns 0..SZLONG-1
543  * Undefined if no bit exists, so code should check against 0 first.
544  */
545 static inline unsigned long __ffs(unsigned long word)
546 {
547 	return __fls(word & -word);
548 }
549 
550 /*
551  * fls - find last bit set.
552  * @word: The word to search
553  *
554  * This is defined the same way as ffs.
555  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
556  */
557 static inline int fls(int x)
558 {
559 	int r;
560 
561 	if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
562 		__asm__(
563 		"	.set	push					\n"
564 		"	.set	mips32					\n"
565 		"	clz	%0, %1					\n"
566 		"	.set	pop					\n"
567 		: "=r" (x)
568 		: "r" (x));
569 
570 		return 32 - x;
571 	}
572 
573 	r = 32;
574 	if (!x)
575 		return 0;
576 	if (!(x & 0xffff0000u)) {
577 		x <<= 16;
578 		r -= 16;
579 	}
580 	if (!(x & 0xff000000u)) {
581 		x <<= 8;
582 		r -= 8;
583 	}
584 	if (!(x & 0xf0000000u)) {
585 		x <<= 4;
586 		r -= 4;
587 	}
588 	if (!(x & 0xc0000000u)) {
589 		x <<= 2;
590 		r -= 2;
591 	}
592 	if (!(x & 0x80000000u)) {
593 		x <<= 1;
594 		r -= 1;
595 	}
596 	return r;
597 }
598 
599 #include <asm-generic/bitops/fls64.h>
600 
601 /*
602  * ffs - find first bit set.
603  * @word: The word to search
604  *
605  * This is defined the same way as
606  * the libc and compiler builtin ffs routines, therefore
607  * differs in spirit from the above ffz (man ffs).
608  */
609 static inline int ffs(int word)
610 {
611 	if (!word)
612 		return 0;
613 
614 	return fls(word & -word);
615 }
616 
617 #include <asm-generic/bitops/ffz.h>
618 #include <asm-generic/bitops/find.h>
619 
620 #ifdef __KERNEL__
621 
622 #include <asm-generic/bitops/sched.h>
623 
624 #include <asm/arch_hweight.h>
625 #include <asm-generic/bitops/const_hweight.h>
626 
627 #include <asm-generic/bitops/le.h>
628 #include <asm-generic/bitops/ext2-atomic.h>
629 
630 #endif /* __KERNEL__ */
631 
632 #endif /* _ASM_BITOPS_H */
633