xref: /linux/arch/x86/include/asm/word-at-a-time.h (revision 64b14a184e83eb62ea0615e31a409956049d40e7)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_WORD_AT_A_TIME_H
3 #define _ASM_WORD_AT_A_TIME_H
4 
5 #include <linux/kernel.h>
6 
7 /*
8  * This is largely generic for little-endian machines, but the
9  * optimal byte mask counting is probably going to be something
10  * that is architecture-specific. If you have a reliably fast
11  * bit count instruction, that might be better than the multiply
12  * and shift, for example.
13  */
14 struct word_at_a_time {
15 	const unsigned long one_bits, high_bits;
16 };
17 
18 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
19 
20 #ifdef CONFIG_64BIT
21 
22 /*
23  * Jan Achrenius on G+: microoptimized version of
24  * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
25  * that works for the bytemasks without having to
26  * mask them first.
27  */
28 static inline long count_masked_bytes(unsigned long mask)
29 {
30 	return mask*0x0001020304050608ul >> 56;
31 }
32 
33 #else	/* 32-bit case */
34 
35 /* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
36 static inline long count_masked_bytes(long mask)
37 {
38 	/* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
39 	long a = (0x0ff0001+mask) >> 23;
40 	/* Fix the 1 for 00 case */
41 	return a & mask;
42 }
43 
44 #endif
45 
46 /* Return nonzero if it has a zero */
47 static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
48 {
49 	unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
50 	*bits = mask;
51 	return mask;
52 }
53 
54 static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
55 {
56 	return bits;
57 }
58 
59 static inline unsigned long create_zero_mask(unsigned long bits)
60 {
61 	bits = (bits - 1) & ~bits;
62 	return bits >> 7;
63 }
64 
65 /* The mask we created is directly usable as a bytemask */
66 #define zero_bytemask(mask) (mask)
67 
68 static inline unsigned long find_zero(unsigned long mask)
69 {
70 	return count_masked_bytes(mask);
71 }
72 
73 /*
74  * Load an unaligned word from kernel space.
75  *
76  * In the (very unlikely) case of the word being a page-crosser
77  * and the next page not being mapped, take the exception and
78  * return zeroes in the non-existing part.
79  */
80 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
81 
82 static inline unsigned long load_unaligned_zeropad(const void *addr)
83 {
84 	unsigned long offset, data;
85 	unsigned long ret;
86 
87 	asm_volatile_goto(
88 		"1:	mov %[mem], %[ret]\n"
89 
90 		_ASM_EXTABLE(1b, %l[do_exception])
91 
92 		: [ret] "=r" (ret)
93 		: [mem] "m" (*(unsigned long *)addr)
94 		: : do_exception);
95 
96 	return ret;
97 
98 do_exception:
99 	offset = (unsigned long)addr & (sizeof(long) - 1);
100 	addr = (void *)((unsigned long)addr & ~(sizeof(long) - 1));
101 	data = *(unsigned long *)addr;
102 	ret = data >> offset * 8;
103 
104 	return ret;
105 }
106 
107 #else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
108 
109 static inline unsigned long load_unaligned_zeropad(const void *addr)
110 {
111 	unsigned long offset, data;
112 	unsigned long ret, err = 0;
113 
114 	asm(	"1:	mov %[mem], %[ret]\n"
115 		"2:\n"
116 
117 		_ASM_EXTABLE_FAULT(1b, 2b)
118 
119 		: [ret] "=&r" (ret), "+a" (err)
120 		: [mem] "m" (*(unsigned long *)addr));
121 
122 	if (unlikely(err)) {
123 		offset = (unsigned long)addr & (sizeof(long) - 1);
124 		addr = (void *)((unsigned long)addr & ~(sizeof(long) - 1));
125 		data = *(unsigned long *)addr;
126 		ret = data >> offset * 8;
127 	}
128 
129 	return ret;
130 }
131 
132 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
133 
134 #endif /* _ASM_WORD_AT_A_TIME_H */
135