xref: /linux/arch/m68k/include/asm/bitops.h (revision be239684b18e1cdcafcf8c7face4a2f562c745ad)
1 #ifndef _M68K_BITOPS_H
2 #define _M68K_BITOPS_H
3 /*
4  * Copyright 1992, Linus Torvalds.
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License.  See the file COPYING in the main directory of this archive
8  * for more details.
9  */
10 
11 #ifndef _LINUX_BITOPS_H
12 #error only <linux/bitops.h> can be included directly
13 #endif
14 
15 #include <linux/compiler.h>
16 #include <asm/barrier.h>
17 
18 /*
19  *	Bit access functions vary across the ColdFire and 68k families.
20  *	So we will break them out here, and then macro in the ones we want.
21  *
22  *	ColdFire - supports standard bset/bclr/bchg with register operand only
23  *	68000    - supports standard bset/bclr/bchg with memory operand
24  *	>= 68020 - also supports the bfset/bfclr/bfchg instructions
25  *
26  *	Although it is possible to use only the bset/bclr/bchg with register
27  *	operands on all platforms you end up with larger generated code.
28  *	So we use the best form possible on a given platform.
29  */
30 
31 static inline void bset_reg_set_bit(int nr, volatile unsigned long *vaddr)
32 {
33 	char *p = (char *)vaddr + (nr ^ 31) / 8;
34 
35 	__asm__ __volatile__ ("bset %1,(%0)"
36 		:
37 		: "a" (p), "di" (nr & 7)
38 		: "memory");
39 }
40 
41 static inline void bset_mem_set_bit(int nr, volatile unsigned long *vaddr)
42 {
43 	char *p = (char *)vaddr + (nr ^ 31) / 8;
44 
45 	__asm__ __volatile__ ("bset %1,%0"
46 		: "+m" (*p)
47 		: "di" (nr & 7));
48 }
49 
50 static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr)
51 {
52 	__asm__ __volatile__ ("bfset %1{%0:#1}"
53 		:
54 		: "d" (nr ^ 31), "o" (*vaddr)
55 		: "memory");
56 }
57 
58 #if defined(CONFIG_COLDFIRE)
59 #define	set_bit(nr, vaddr)	bset_reg_set_bit(nr, vaddr)
60 #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
61 #define	set_bit(nr, vaddr)	bset_mem_set_bit(nr, vaddr)
62 #else
63 #define set_bit(nr, vaddr)	(__builtin_constant_p(nr) ? \
64 				bset_mem_set_bit(nr, vaddr) : \
65 				bfset_mem_set_bit(nr, vaddr))
66 #endif
67 
68 static __always_inline void
69 arch___set_bit(unsigned long nr, volatile unsigned long *addr)
70 {
71 	set_bit(nr, addr);
72 }
73 
74 static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr)
75 {
76 	char *p = (char *)vaddr + (nr ^ 31) / 8;
77 
78 	__asm__ __volatile__ ("bclr %1,(%0)"
79 		:
80 		: "a" (p), "di" (nr & 7)
81 		: "memory");
82 }
83 
84 static inline void bclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
85 {
86 	char *p = (char *)vaddr + (nr ^ 31) / 8;
87 
88 	__asm__ __volatile__ ("bclr %1,%0"
89 		: "+m" (*p)
90 		: "di" (nr & 7));
91 }
92 
93 static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
94 {
95 	__asm__ __volatile__ ("bfclr %1{%0:#1}"
96 		:
97 		: "d" (nr ^ 31), "o" (*vaddr)
98 		: "memory");
99 }
100 
101 #if defined(CONFIG_COLDFIRE)
102 #define	clear_bit(nr, vaddr)	bclr_reg_clear_bit(nr, vaddr)
103 #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
104 #define	clear_bit(nr, vaddr)	bclr_mem_clear_bit(nr, vaddr)
105 #else
106 #define clear_bit(nr, vaddr)	(__builtin_constant_p(nr) ? \
107 				bclr_mem_clear_bit(nr, vaddr) : \
108 				bfclr_mem_clear_bit(nr, vaddr))
109 #endif
110 
111 static __always_inline void
112 arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
113 {
114 	clear_bit(nr, addr);
115 }
116 
117 static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr)
118 {
119 	char *p = (char *)vaddr + (nr ^ 31) / 8;
120 
121 	__asm__ __volatile__ ("bchg %1,(%0)"
122 		:
123 		: "a" (p), "di" (nr & 7)
124 		: "memory");
125 }
126 
127 static inline void bchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
128 {
129 	char *p = (char *)vaddr + (nr ^ 31) / 8;
130 
131 	__asm__ __volatile__ ("bchg %1,%0"
132 		: "+m" (*p)
133 		: "di" (nr & 7));
134 }
135 
136 static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
137 {
138 	__asm__ __volatile__ ("bfchg %1{%0:#1}"
139 		:
140 		: "d" (nr ^ 31), "o" (*vaddr)
141 		: "memory");
142 }
143 
144 #if defined(CONFIG_COLDFIRE)
145 #define	change_bit(nr, vaddr)	bchg_reg_change_bit(nr, vaddr)
146 #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
147 #define	change_bit(nr, vaddr)	bchg_mem_change_bit(nr, vaddr)
148 #else
149 #define change_bit(nr, vaddr)	(__builtin_constant_p(nr) ? \
150 				bchg_mem_change_bit(nr, vaddr) : \
151 				bfchg_mem_change_bit(nr, vaddr))
152 #endif
153 
154 static __always_inline void
155 arch___change_bit(unsigned long nr, volatile unsigned long *addr)
156 {
157 	change_bit(nr, addr);
158 }
159 
160 #define arch_test_bit generic_test_bit
161 #define arch_test_bit_acquire generic_test_bit_acquire
162 
163 static inline int bset_reg_test_and_set_bit(int nr,
164 					    volatile unsigned long *vaddr)
165 {
166 	char *p = (char *)vaddr + (nr ^ 31) / 8;
167 	char retval;
168 
169 	__asm__ __volatile__ ("bset %2,(%1); sne %0"
170 		: "=d" (retval)
171 		: "a" (p), "di" (nr & 7)
172 		: "memory");
173 	return retval;
174 }
175 
176 static inline int bset_mem_test_and_set_bit(int nr,
177 					    volatile unsigned long *vaddr)
178 {
179 	char *p = (char *)vaddr + (nr ^ 31) / 8;
180 	char retval;
181 
182 	__asm__ __volatile__ ("bset %2,%1; sne %0"
183 		: "=d" (retval), "+m" (*p)
184 		: "di" (nr & 7));
185 	return retval;
186 }
187 
188 static inline int bfset_mem_test_and_set_bit(int nr,
189 					     volatile unsigned long *vaddr)
190 {
191 	char retval;
192 
193 	__asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
194 		: "=d" (retval)
195 		: "d" (nr ^ 31), "o" (*vaddr)
196 		: "memory");
197 	return retval;
198 }
199 
200 #if defined(CONFIG_COLDFIRE)
201 #define	test_and_set_bit(nr, vaddr)	bset_reg_test_and_set_bit(nr, vaddr)
202 #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
203 #define	test_and_set_bit(nr, vaddr)	bset_mem_test_and_set_bit(nr, vaddr)
204 #else
205 #define test_and_set_bit(nr, vaddr)	(__builtin_constant_p(nr) ? \
206 					bset_mem_test_and_set_bit(nr, vaddr) : \
207 					bfset_mem_test_and_set_bit(nr, vaddr))
208 #endif
209 
210 static __always_inline bool
211 arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
212 {
213 	return test_and_set_bit(nr, addr);
214 }
215 
216 static inline int bclr_reg_test_and_clear_bit(int nr,
217 					      volatile unsigned long *vaddr)
218 {
219 	char *p = (char *)vaddr + (nr ^ 31) / 8;
220 	char retval;
221 
222 	__asm__ __volatile__ ("bclr %2,(%1); sne %0"
223 		: "=d" (retval)
224 		: "a" (p), "di" (nr & 7)
225 		: "memory");
226 	return retval;
227 }
228 
229 static inline int bclr_mem_test_and_clear_bit(int nr,
230 					      volatile unsigned long *vaddr)
231 {
232 	char *p = (char *)vaddr + (nr ^ 31) / 8;
233 	char retval;
234 
235 	__asm__ __volatile__ ("bclr %2,%1; sne %0"
236 		: "=d" (retval), "+m" (*p)
237 		: "di" (nr & 7));
238 	return retval;
239 }
240 
241 static inline int bfclr_mem_test_and_clear_bit(int nr,
242 					       volatile unsigned long *vaddr)
243 {
244 	char retval;
245 
246 	__asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
247 		: "=d" (retval)
248 		: "d" (nr ^ 31), "o" (*vaddr)
249 		: "memory");
250 	return retval;
251 }
252 
253 #if defined(CONFIG_COLDFIRE)
254 #define	test_and_clear_bit(nr, vaddr)	bclr_reg_test_and_clear_bit(nr, vaddr)
255 #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
256 #define	test_and_clear_bit(nr, vaddr)	bclr_mem_test_and_clear_bit(nr, vaddr)
257 #else
258 #define test_and_clear_bit(nr, vaddr)	(__builtin_constant_p(nr) ? \
259 					bclr_mem_test_and_clear_bit(nr, vaddr) : \
260 					bfclr_mem_test_and_clear_bit(nr, vaddr))
261 #endif
262 
263 static __always_inline bool
264 arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
265 {
266 	return test_and_clear_bit(nr, addr);
267 }
268 
269 static inline int bchg_reg_test_and_change_bit(int nr,
270 					       volatile unsigned long *vaddr)
271 {
272 	char *p = (char *)vaddr + (nr ^ 31) / 8;
273 	char retval;
274 
275 	__asm__ __volatile__ ("bchg %2,(%1); sne %0"
276 		: "=d" (retval)
277 		: "a" (p), "di" (nr & 7)
278 		: "memory");
279 	return retval;
280 }
281 
282 static inline int bchg_mem_test_and_change_bit(int nr,
283 					       volatile unsigned long *vaddr)
284 {
285 	char *p = (char *)vaddr + (nr ^ 31) / 8;
286 	char retval;
287 
288 	__asm__ __volatile__ ("bchg %2,%1; sne %0"
289 		: "=d" (retval), "+m" (*p)
290 		: "di" (nr & 7));
291 	return retval;
292 }
293 
294 static inline int bfchg_mem_test_and_change_bit(int nr,
295 						volatile unsigned long *vaddr)
296 {
297 	char retval;
298 
299 	__asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
300 		: "=d" (retval)
301 		: "d" (nr ^ 31), "o" (*vaddr)
302 		: "memory");
303 	return retval;
304 }
305 
306 #if defined(CONFIG_COLDFIRE)
307 #define	test_and_change_bit(nr, vaddr)	bchg_reg_test_and_change_bit(nr, vaddr)
308 #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
309 #define	test_and_change_bit(nr, vaddr)	bchg_mem_test_and_change_bit(nr, vaddr)
310 #else
311 #define test_and_change_bit(nr, vaddr)	(__builtin_constant_p(nr) ? \
312 					bchg_mem_test_and_change_bit(nr, vaddr) : \
313 					bfchg_mem_test_and_change_bit(nr, vaddr))
314 #endif
315 
316 static __always_inline bool
317 arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
318 {
319 	return test_and_change_bit(nr, addr);
320 }
321 
322 static inline bool xor_unlock_is_negative_byte(unsigned long mask,
323 		volatile unsigned long *p)
324 {
325 #ifdef CONFIG_COLDFIRE
326 	__asm__ __volatile__ ("eorl %1, %0"
327 		: "+m" (*p)
328 		: "d" (mask)
329 		: "memory");
330 	return *p & (1 << 7);
331 #else
332 	char result;
333 	char *cp = (char *)p + 3;	/* m68k is big-endian */
334 
335 	__asm__ __volatile__ ("eor.b %1, %2; smi %0"
336 		: "=d" (result)
337 		: "di" (mask), "o" (*cp)
338 		: "memory");
339 	return result;
340 #endif
341 }
342 
343 /*
344  *	The true 68020 and more advanced processors support the "bfffo"
345  *	instruction for finding bits. ColdFire and simple 68000 parts
346  *	(including CPU32) do not support this. They simply use the generic
347  *	functions.
348  */
349 #if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
350 #include <asm-generic/bitops/ffz.h>
351 #else
352 
353 static inline int find_first_zero_bit(const unsigned long *vaddr,
354 				      unsigned size)
355 {
356 	const unsigned long *p = vaddr;
357 	int res = 32;
358 	unsigned int words;
359 	unsigned long num;
360 
361 	if (!size)
362 		return 0;
363 
364 	words = (size + 31) >> 5;
365 	while (!(num = ~*p++)) {
366 		if (!--words)
367 			goto out;
368 	}
369 
370 	__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
371 			      : "=d" (res) : "d" (num & -num));
372 	res ^= 31;
373 out:
374 	res += ((long)p - (long)vaddr - 4) * 8;
375 	return res < size ? res : size;
376 }
377 #define find_first_zero_bit find_first_zero_bit
378 
379 static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
380 				     int offset)
381 {
382 	const unsigned long *p = vaddr + (offset >> 5);
383 	int bit = offset & 31UL, res;
384 
385 	if (offset >= size)
386 		return size;
387 
388 	if (bit) {
389 		unsigned long num = ~*p++ & (~0UL << bit);
390 		offset -= bit;
391 
392 		/* Look for zero in first longword */
393 		__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
394 				      : "=d" (res) : "d" (num & -num));
395 		if (res < 32) {
396 			offset += res ^ 31;
397 			return offset < size ? offset : size;
398 		}
399 		offset += 32;
400 
401 		if (offset >= size)
402 			return size;
403 	}
404 	/* No zero yet, search remaining full bytes for a zero */
405 	return offset + find_first_zero_bit(p, size - offset);
406 }
407 #define find_next_zero_bit find_next_zero_bit
408 
409 static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
410 {
411 	const unsigned long *p = vaddr;
412 	int res = 32;
413 	unsigned int words;
414 	unsigned long num;
415 
416 	if (!size)
417 		return 0;
418 
419 	words = (size + 31) >> 5;
420 	while (!(num = *p++)) {
421 		if (!--words)
422 			goto out;
423 	}
424 
425 	__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
426 			      : "=d" (res) : "d" (num & -num));
427 	res ^= 31;
428 out:
429 	res += ((long)p - (long)vaddr - 4) * 8;
430 	return res < size ? res : size;
431 }
432 #define find_first_bit find_first_bit
433 
434 static inline int find_next_bit(const unsigned long *vaddr, int size,
435 				int offset)
436 {
437 	const unsigned long *p = vaddr + (offset >> 5);
438 	int bit = offset & 31UL, res;
439 
440 	if (offset >= size)
441 		return size;
442 
443 	if (bit) {
444 		unsigned long num = *p++ & (~0UL << bit);
445 		offset -= bit;
446 
447 		/* Look for one in first longword */
448 		__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
449 				      : "=d" (res) : "d" (num & -num));
450 		if (res < 32) {
451 			offset += res ^ 31;
452 			return offset < size ? offset : size;
453 		}
454 		offset += 32;
455 
456 		if (offset >= size)
457 			return size;
458 	}
459 	/* No one yet, search remaining full bytes for a one */
460 	return offset + find_first_bit(p, size - offset);
461 }
462 #define find_next_bit find_next_bit
463 
464 /*
465  * ffz = Find First Zero in word. Undefined if no zero exists,
466  * so code should check against ~0UL first..
467  */
468 static inline unsigned long ffz(unsigned long word)
469 {
470 	int res;
471 
472 	__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
473 			      : "=d" (res) : "d" (~word & -~word));
474 	return res ^ 31;
475 }
476 
477 #endif
478 
479 #ifdef __KERNEL__
480 
481 #if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
482 
483 /*
484  *	The newer ColdFire family members support a "bitrev" instruction
485  *	and we can use that to implement a fast ffs. Older Coldfire parts,
486  *	and normal 68000 parts don't have anything special, so we use the
487  *	generic functions for those.
488  */
489 #if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \
490 	!defined(CONFIG_M68000)
491 static inline unsigned long __ffs(unsigned long x)
492 {
493 	__asm__ __volatile__ ("bitrev %0; ff1 %0"
494 		: "=d" (x)
495 		: "0" (x));
496 	return x;
497 }
498 
499 static inline int ffs(int x)
500 {
501 	if (!x)
502 		return 0;
503 	return __ffs(x) + 1;
504 }
505 
506 #else
507 #include <asm-generic/bitops/ffs.h>
508 #include <asm-generic/bitops/__ffs.h>
509 #endif
510 
511 #include <asm-generic/bitops/fls.h>
512 #include <asm-generic/bitops/__fls.h>
513 
514 #else
515 
516 /*
517  *	ffs: find first bit set. This is defined the same way as
518  *	the libc and compiler builtin ffs routines, therefore
519  *	differs in spirit from the above ffz (man ffs).
520  */
521 static inline int ffs(int x)
522 {
523 	int cnt;
524 
525 	__asm__ ("bfffo %1{#0:#0},%0"
526 		: "=d" (cnt)
527 		: "dm" (x & -x));
528 	return 32 - cnt;
529 }
530 
531 static inline unsigned long __ffs(unsigned long x)
532 {
533 	return ffs(x) - 1;
534 }
535 
536 /*
537  *	fls: find last bit set.
538  */
539 static inline int fls(unsigned int x)
540 {
541 	int cnt;
542 
543 	__asm__ ("bfffo %1{#0,#0},%0"
544 		: "=d" (cnt)
545 		: "dm" (x));
546 	return 32 - cnt;
547 }
548 
549 static inline unsigned long __fls(unsigned long x)
550 {
551 	return fls(x) - 1;
552 }
553 
554 #endif
555 
556 /* Simple test-and-set bit locks */
557 #define test_and_set_bit_lock	test_and_set_bit
558 #define clear_bit_unlock	clear_bit
559 #define __clear_bit_unlock	clear_bit_unlock
560 
561 #include <asm-generic/bitops/non-instrumented-non-atomic.h>
562 #include <asm-generic/bitops/ext2-atomic.h>
563 #include <asm-generic/bitops/fls64.h>
564 #include <asm-generic/bitops/sched.h>
565 #include <asm-generic/bitops/hweight.h>
566 #include <asm-generic/bitops/le.h>
567 #endif /* __KERNEL__ */
568 
569 #endif /* _M68K_BITOPS_H */
570