xref: /linux/arch/mips/include/asm/bitops.h (revision b889fcf63cb62e7fdb7816565e28f44dbe4a76a5)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (c) 1994 - 1997, 99, 2000, 06, 07  Ralf Baechle (ralf@linux-mips.org)
7  * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
8  */
9 #ifndef _ASM_BITOPS_H
10 #define _ASM_BITOPS_H
11 
12 #ifndef _LINUX_BITOPS_H
13 #error only <linux/bitops.h> can be included directly
14 #endif
15 
16 #include <linux/compiler.h>
17 #include <linux/types.h>
18 #include <asm/barrier.h>
19 #include <asm/byteorder.h>		/* sigh ... */
20 #include <asm/cpu-features.h>
21 #include <asm/sgidefs.h>
22 #include <asm/war.h>
23 
24 #if _MIPS_SZLONG == 32
25 #define SZLONG_LOG 5
26 #define SZLONG_MASK 31UL
27 #define __LL		"ll	"
28 #define __SC		"sc	"
29 #define __INS		"ins    "
30 #define __EXT		"ext    "
31 #elif _MIPS_SZLONG == 64
32 #define SZLONG_LOG 6
33 #define SZLONG_MASK 63UL
34 #define __LL		"lld	"
35 #define __SC		"scd	"
36 #define __INS		"dins    "
37 #define __EXT		"dext    "
38 #endif
39 
40 /*
41  * clear_bit() doesn't provide any barrier for the compiler.
42  */
43 #define smp_mb__before_clear_bit()	smp_mb__before_llsc()
44 #define smp_mb__after_clear_bit()	smp_llsc_mb()
45 
46 
47 /*
48  * These are the "slower" versions of the functions and are in bitops.c.
49  * These functions call raw_local_irq_{save,restore}().
50  */
51 void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
52 void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
53 void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
54 int __mips_test_and_set_bit(unsigned long nr,
55 			    volatile unsigned long *addr);
56 int __mips_test_and_set_bit_lock(unsigned long nr,
57 				 volatile unsigned long *addr);
58 int __mips_test_and_clear_bit(unsigned long nr,
59 			      volatile unsigned long *addr);
60 int __mips_test_and_change_bit(unsigned long nr,
61 			       volatile unsigned long *addr);
62 
63 
64 /*
65  * set_bit - Atomically set a bit in memory
66  * @nr: the bit to set
67  * @addr: the address to start counting from
68  *
69  * This function is atomic and may not be reordered.  See __set_bit()
70  * if you do not require the atomic guarantees.
71  * Note that @nr may be almost arbitrarily large; this function is not
72  * restricted to acting on a single-word quantity.
73  */
74 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
75 {
76 	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
77 	int bit = nr & SZLONG_MASK;
78 	unsigned long temp;
79 
80 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
81 		__asm__ __volatile__(
82 		"	.set	mips3					\n"
83 		"1:	" __LL "%0, %1			# set_bit	\n"
84 		"	or	%0, %2					\n"
85 		"	" __SC	"%0, %1					\n"
86 		"	beqzl	%0, 1b					\n"
87 		"	.set	mips0					\n"
88 		: "=&r" (temp), "=m" (*m)
89 		: "ir" (1UL << bit), "m" (*m));
90 #ifdef CONFIG_CPU_MIPSR2
91 	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
92 		do {
93 			__asm__ __volatile__(
94 			"	" __LL "%0, %1		# set_bit	\n"
95 			"	" __INS "%0, %3, %2, 1			\n"
96 			"	" __SC "%0, %1				\n"
97 			: "=&r" (temp), "+m" (*m)
98 			: "ir" (bit), "r" (~0));
99 		} while (unlikely(!temp));
100 #endif /* CONFIG_CPU_MIPSR2 */
101 	} else if (kernel_uses_llsc) {
102 		do {
103 			__asm__ __volatile__(
104 			"	.set	mips3				\n"
105 			"	" __LL "%0, %1		# set_bit	\n"
106 			"	or	%0, %2				\n"
107 			"	" __SC	"%0, %1				\n"
108 			"	.set	mips0				\n"
109 			: "=&r" (temp), "+m" (*m)
110 			: "ir" (1UL << bit));
111 		} while (unlikely(!temp));
112 	} else
113 		__mips_set_bit(nr, addr);
114 }
115 
116 /*
117  * clear_bit - Clears a bit in memory
118  * @nr: Bit to clear
119  * @addr: Address to start counting from
120  *
121  * clear_bit() is atomic and may not be reordered.  However, it does
122  * not contain a memory barrier, so if it is used for locking purposes,
123  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
124  * in order to ensure changes are visible on other processors.
125  */
126 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
127 {
128 	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
129 	int bit = nr & SZLONG_MASK;
130 	unsigned long temp;
131 
132 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
133 		__asm__ __volatile__(
134 		"	.set	mips3					\n"
135 		"1:	" __LL "%0, %1			# clear_bit	\n"
136 		"	and	%0, %2					\n"
137 		"	" __SC "%0, %1					\n"
138 		"	beqzl	%0, 1b					\n"
139 		"	.set	mips0					\n"
140 		: "=&r" (temp), "+m" (*m)
141 		: "ir" (~(1UL << bit)));
142 #ifdef CONFIG_CPU_MIPSR2
143 	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
144 		do {
145 			__asm__ __volatile__(
146 			"	" __LL "%0, %1		# clear_bit	\n"
147 			"	" __INS "%0, $0, %2, 1			\n"
148 			"	" __SC "%0, %1				\n"
149 			: "=&r" (temp), "+m" (*m)
150 			: "ir" (bit));
151 		} while (unlikely(!temp));
152 #endif /* CONFIG_CPU_MIPSR2 */
153 	} else if (kernel_uses_llsc) {
154 		do {
155 			__asm__ __volatile__(
156 			"	.set	mips3				\n"
157 			"	" __LL "%0, %1		# clear_bit	\n"
158 			"	and	%0, %2				\n"
159 			"	" __SC "%0, %1				\n"
160 			"	.set	mips0				\n"
161 			: "=&r" (temp), "+m" (*m)
162 			: "ir" (~(1UL << bit)));
163 		} while (unlikely(!temp));
164 	} else
165 		__mips_clear_bit(nr, addr);
166 }
167 
168 /*
169  * clear_bit_unlock - Clears a bit in memory
170  * @nr: Bit to clear
171  * @addr: Address to start counting from
172  *
173  * clear_bit() is atomic and implies release semantics before the memory
174  * operation. It can be used for an unlock.
175  */
176 static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
177 {
178 	smp_mb__before_clear_bit();
179 	clear_bit(nr, addr);
180 }
181 
182 /*
183  * change_bit - Toggle a bit in memory
184  * @nr: Bit to change
185  * @addr: Address to start counting from
186  *
187  * change_bit() is atomic and may not be reordered.
188  * Note that @nr may be almost arbitrarily large; this function is not
189  * restricted to acting on a single-word quantity.
190  */
191 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
192 {
193 	int bit = nr & SZLONG_MASK;
194 
195 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
196 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
197 		unsigned long temp;
198 
199 		__asm__ __volatile__(
200 		"	.set	mips3				\n"
201 		"1:	" __LL "%0, %1		# change_bit	\n"
202 		"	xor	%0, %2				\n"
203 		"	" __SC	"%0, %1				\n"
204 		"	beqzl	%0, 1b				\n"
205 		"	.set	mips0				\n"
206 		: "=&r" (temp), "+m" (*m)
207 		: "ir" (1UL << bit));
208 	} else if (kernel_uses_llsc) {
209 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
210 		unsigned long temp;
211 
212 		do {
213 			__asm__ __volatile__(
214 			"	.set	mips3				\n"
215 			"	" __LL "%0, %1		# change_bit	\n"
216 			"	xor	%0, %2				\n"
217 			"	" __SC	"%0, %1				\n"
218 			"	.set	mips0				\n"
219 			: "=&r" (temp), "+m" (*m)
220 			: "ir" (1UL << bit));
221 		} while (unlikely(!temp));
222 	} else
223 		__mips_change_bit(nr, addr);
224 }
225 
226 /*
227  * test_and_set_bit - Set a bit and return its old value
228  * @nr: Bit to set
229  * @addr: Address to count from
230  *
231  * This operation is atomic and cannot be reordered.
232  * It also implies a memory barrier.
233  */
234 static inline int test_and_set_bit(unsigned long nr,
235 	volatile unsigned long *addr)
236 {
237 	int bit = nr & SZLONG_MASK;
238 	unsigned long res;
239 
240 	smp_mb__before_llsc();
241 
242 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
243 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
244 		unsigned long temp;
245 
246 		__asm__ __volatile__(
247 		"	.set	mips3					\n"
248 		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
249 		"	or	%2, %0, %3				\n"
250 		"	" __SC	"%2, %1					\n"
251 		"	beqzl	%2, 1b					\n"
252 		"	and	%2, %0, %3				\n"
253 		"	.set	mips0					\n"
254 		: "=&r" (temp), "+m" (*m), "=&r" (res)
255 		: "r" (1UL << bit)
256 		: "memory");
257 	} else if (kernel_uses_llsc) {
258 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
259 		unsigned long temp;
260 
261 		do {
262 			__asm__ __volatile__(
263 			"	.set	mips3				\n"
264 			"	" __LL "%0, %1	# test_and_set_bit	\n"
265 			"	or	%2, %0, %3			\n"
266 			"	" __SC	"%2, %1				\n"
267 			"	.set	mips0				\n"
268 			: "=&r" (temp), "+m" (*m), "=&r" (res)
269 			: "r" (1UL << bit)
270 			: "memory");
271 		} while (unlikely(!res));
272 
273 		res = temp & (1UL << bit);
274 	} else
275 		res = __mips_test_and_set_bit(nr, addr);
276 
277 	smp_llsc_mb();
278 
279 	return res != 0;
280 }
281 
282 /*
283  * test_and_set_bit_lock - Set a bit and return its old value
284  * @nr: Bit to set
285  * @addr: Address to count from
286  *
287  * This operation is atomic and implies acquire ordering semantics
288  * after the memory operation.
289  */
290 static inline int test_and_set_bit_lock(unsigned long nr,
291 	volatile unsigned long *addr)
292 {
293 	int bit = nr & SZLONG_MASK;
294 	unsigned long res;
295 
296 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
297 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
298 		unsigned long temp;
299 
300 		__asm__ __volatile__(
301 		"	.set	mips3					\n"
302 		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
303 		"	or	%2, %0, %3				\n"
304 		"	" __SC	"%2, %1					\n"
305 		"	beqzl	%2, 1b					\n"
306 		"	and	%2, %0, %3				\n"
307 		"	.set	mips0					\n"
308 		: "=&r" (temp), "+m" (*m), "=&r" (res)
309 		: "r" (1UL << bit)
310 		: "memory");
311 	} else if (kernel_uses_llsc) {
312 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
313 		unsigned long temp;
314 
315 		do {
316 			__asm__ __volatile__(
317 			"	.set	mips3				\n"
318 			"	" __LL "%0, %1	# test_and_set_bit	\n"
319 			"	or	%2, %0, %3			\n"
320 			"	" __SC	"%2, %1				\n"
321 			"	.set	mips0				\n"
322 			: "=&r" (temp), "+m" (*m), "=&r" (res)
323 			: "r" (1UL << bit)
324 			: "memory");
325 		} while (unlikely(!res));
326 
327 		res = temp & (1UL << bit);
328 	} else
329 		res = __mips_test_and_set_bit_lock(nr, addr);
330 
331 	smp_llsc_mb();
332 
333 	return res != 0;
334 }
335 /*
336  * test_and_clear_bit - Clear a bit and return its old value
337  * @nr: Bit to clear
338  * @addr: Address to count from
339  *
340  * This operation is atomic and cannot be reordered.
341  * It also implies a memory barrier.
342  */
343 static inline int test_and_clear_bit(unsigned long nr,
344 	volatile unsigned long *addr)
345 {
346 	int bit = nr & SZLONG_MASK;
347 	unsigned long res;
348 
349 	smp_mb__before_llsc();
350 
351 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
352 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
353 		unsigned long temp;
354 
355 		__asm__ __volatile__(
356 		"	.set	mips3					\n"
357 		"1:	" __LL	"%0, %1		# test_and_clear_bit	\n"
358 		"	or	%2, %0, %3				\n"
359 		"	xor	%2, %3					\n"
360 		"	" __SC 	"%2, %1					\n"
361 		"	beqzl	%2, 1b					\n"
362 		"	and	%2, %0, %3				\n"
363 		"	.set	mips0					\n"
364 		: "=&r" (temp), "+m" (*m), "=&r" (res)
365 		: "r" (1UL << bit)
366 		: "memory");
367 #ifdef CONFIG_CPU_MIPSR2
368 	} else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
369 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
370 		unsigned long temp;
371 
372 		do {
373 			__asm__ __volatile__(
374 			"	" __LL	"%0, %1	# test_and_clear_bit	\n"
375 			"	" __EXT "%2, %0, %3, 1			\n"
376 			"	" __INS	"%0, $0, %3, 1			\n"
377 			"	" __SC 	"%0, %1				\n"
378 			: "=&r" (temp), "+m" (*m), "=&r" (res)
379 			: "ir" (bit)
380 			: "memory");
381 		} while (unlikely(!temp));
382 #endif
383 	} else if (kernel_uses_llsc) {
384 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
385 		unsigned long temp;
386 
387 		do {
388 			__asm__ __volatile__(
389 			"	.set	mips3				\n"
390 			"	" __LL	"%0, %1	# test_and_clear_bit	\n"
391 			"	or	%2, %0, %3			\n"
392 			"	xor	%2, %3				\n"
393 			"	" __SC 	"%2, %1				\n"
394 			"	.set	mips0				\n"
395 			: "=&r" (temp), "+m" (*m), "=&r" (res)
396 			: "r" (1UL << bit)
397 			: "memory");
398 		} while (unlikely(!res));
399 
400 		res = temp & (1UL << bit);
401 	} else
402 		res = __mips_test_and_clear_bit(nr, addr);
403 
404 	smp_llsc_mb();
405 
406 	return res != 0;
407 }
408 
409 /*
410  * test_and_change_bit - Change a bit and return its old value
411  * @nr: Bit to change
412  * @addr: Address to count from
413  *
414  * This operation is atomic and cannot be reordered.
415  * It also implies a memory barrier.
416  */
417 static inline int test_and_change_bit(unsigned long nr,
418 	volatile unsigned long *addr)
419 {
420 	int bit = nr & SZLONG_MASK;
421 	unsigned long res;
422 
423 	smp_mb__before_llsc();
424 
425 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
426 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
427 		unsigned long temp;
428 
429 		__asm__ __volatile__(
430 		"	.set	mips3					\n"
431 		"1:	" __LL	"%0, %1		# test_and_change_bit	\n"
432 		"	xor	%2, %0, %3				\n"
433 		"	" __SC	"%2, %1					\n"
434 		"	beqzl	%2, 1b					\n"
435 		"	and	%2, %0, %3				\n"
436 		"	.set	mips0					\n"
437 		: "=&r" (temp), "+m" (*m), "=&r" (res)
438 		: "r" (1UL << bit)
439 		: "memory");
440 	} else if (kernel_uses_llsc) {
441 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
442 		unsigned long temp;
443 
444 		do {
445 			__asm__ __volatile__(
446 			"	.set	mips3				\n"
447 			"	" __LL	"%0, %1	# test_and_change_bit	\n"
448 			"	xor	%2, %0, %3			\n"
449 			"	" __SC	"\t%2, %1			\n"
450 			"	.set	mips0				\n"
451 			: "=&r" (temp), "+m" (*m), "=&r" (res)
452 			: "r" (1UL << bit)
453 			: "memory");
454 		} while (unlikely(!res));
455 
456 		res = temp & (1UL << bit);
457 	} else
458 		res = __mips_test_and_change_bit(nr, addr);
459 
460 	smp_llsc_mb();
461 
462 	return res != 0;
463 }
464 
465 #include <asm-generic/bitops/non-atomic.h>
466 
467 /*
468  * __clear_bit_unlock - Clears a bit in memory
469  * @nr: Bit to clear
470  * @addr: Address to start counting from
471  *
472  * __clear_bit() is non-atomic and implies release semantics before the memory
473  * operation. It can be used for an unlock if no other CPUs can concurrently
474  * modify other bits in the word.
475  */
476 static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
477 {
478 	smp_mb();
479 	__clear_bit(nr, addr);
480 }
481 
482 /*
483  * Return the bit position (0..63) of the most significant 1 bit in a word
484  * Returns -1 if no 1 bit exists
485  */
486 static inline unsigned long __fls(unsigned long word)
487 {
488 	int num;
489 
490 	if (BITS_PER_LONG == 32 &&
491 	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
492 		__asm__(
493 		"	.set	push					\n"
494 		"	.set	mips32					\n"
495 		"	clz	%0, %1					\n"
496 		"	.set	pop					\n"
497 		: "=r" (num)
498 		: "r" (word));
499 
500 		return 31 - num;
501 	}
502 
503 	if (BITS_PER_LONG == 64 &&
504 	    __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
505 		__asm__(
506 		"	.set	push					\n"
507 		"	.set	mips64					\n"
508 		"	dclz	%0, %1					\n"
509 		"	.set	pop					\n"
510 		: "=r" (num)
511 		: "r" (word));
512 
513 		return 63 - num;
514 	}
515 
516 	num = BITS_PER_LONG - 1;
517 
518 #if BITS_PER_LONG == 64
519 	if (!(word & (~0ul << 32))) {
520 		num -= 32;
521 		word <<= 32;
522 	}
523 #endif
524 	if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
525 		num -= 16;
526 		word <<= 16;
527 	}
528 	if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
529 		num -= 8;
530 		word <<= 8;
531 	}
532 	if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
533 		num -= 4;
534 		word <<= 4;
535 	}
536 	if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
537 		num -= 2;
538 		word <<= 2;
539 	}
540 	if (!(word & (~0ul << (BITS_PER_LONG-1))))
541 		num -= 1;
542 	return num;
543 }
544 
545 /*
546  * __ffs - find first bit in word.
547  * @word: The word to search
548  *
549  * Returns 0..SZLONG-1
550  * Undefined if no bit exists, so code should check against 0 first.
551  */
552 static inline unsigned long __ffs(unsigned long word)
553 {
554 	return __fls(word & -word);
555 }
556 
557 /*
558  * fls - find last bit set.
559  * @word: The word to search
560  *
561  * This is defined the same way as ffs.
562  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
563  */
564 static inline int fls(int x)
565 {
566 	int r;
567 
568 	if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
569 		__asm__("clz %0, %1" : "=r" (x) : "r" (x));
570 
571 		return 32 - x;
572 	}
573 
574 	r = 32;
575 	if (!x)
576 		return 0;
577 	if (!(x & 0xffff0000u)) {
578 		x <<= 16;
579 		r -= 16;
580 	}
581 	if (!(x & 0xff000000u)) {
582 		x <<= 8;
583 		r -= 8;
584 	}
585 	if (!(x & 0xf0000000u)) {
586 		x <<= 4;
587 		r -= 4;
588 	}
589 	if (!(x & 0xc0000000u)) {
590 		x <<= 2;
591 		r -= 2;
592 	}
593 	if (!(x & 0x80000000u)) {
594 		x <<= 1;
595 		r -= 1;
596 	}
597 	return r;
598 }
599 
600 #include <asm-generic/bitops/fls64.h>
601 
602 /*
603  * ffs - find first bit set.
604  * @word: The word to search
605  *
606  * This is defined the same way as
607  * the libc and compiler builtin ffs routines, therefore
608  * differs in spirit from the above ffz (man ffs).
609  */
610 static inline int ffs(int word)
611 {
612 	if (!word)
613 		return 0;
614 
615 	return fls(word & -word);
616 }
617 
618 #include <asm-generic/bitops/ffz.h>
619 #include <asm-generic/bitops/find.h>
620 
621 #ifdef __KERNEL__
622 
623 #include <asm-generic/bitops/sched.h>
624 
625 #include <asm/arch_hweight.h>
626 #include <asm-generic/bitops/const_hweight.h>
627 
628 #include <asm-generic/bitops/le.h>
629 #include <asm-generic/bitops/ext2-atomic.h>
630 
631 #endif /* __KERNEL__ */
632 
633 #endif /* _ASM_BITOPS_H */
634