xref: /linux/arch/mips/tools/loongson3-llsc-check.c (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <byteswap.h>
3 #include <elf.h>
4 #include <endian.h>
5 #include <errno.h>
6 #include <fcntl.h>
7 #include <inttypes.h>
8 #include <stdbool.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <sys/stat.h>
15 #include <unistd.h>
16 
17 #ifdef be32toh
18 /* If libc provides le{16,32,64}toh() then we'll use them */
19 #elif BYTE_ORDER == LITTLE_ENDIAN
20 # define le16toh(x)	(x)
21 # define le32toh(x)	(x)
22 # define le64toh(x)	(x)
23 #elif BYTE_ORDER == BIG_ENDIAN
24 # define le16toh(x)	bswap_16(x)
25 # define le32toh(x)	bswap_32(x)
26 # define le64toh(x)	bswap_64(x)
27 #endif
28 
29 /* MIPS opcodes, in bits 31:26 of an instruction */
30 #define OP_SPECIAL	0x00
31 #define OP_REGIMM	0x01
32 #define OP_BEQ		0x04
33 #define OP_BNE		0x05
34 #define OP_BLEZ		0x06
35 #define OP_BGTZ		0x07
36 #define OP_BEQL		0x14
37 #define OP_BNEL		0x15
38 #define OP_BLEZL	0x16
39 #define OP_BGTZL	0x17
40 #define OP_LL		0x30
41 #define OP_LLD		0x34
42 #define OP_SC		0x38
43 #define OP_SCD		0x3c
44 
45 /* Bits 20:16 of OP_REGIMM instructions */
46 #define REGIMM_BLTZ	0x00
47 #define REGIMM_BGEZ	0x01
48 #define REGIMM_BLTZL	0x02
49 #define REGIMM_BGEZL	0x03
50 #define REGIMM_BLTZAL	0x10
51 #define REGIMM_BGEZAL	0x11
52 #define REGIMM_BLTZALL	0x12
53 #define REGIMM_BGEZALL	0x13
54 
55 /* Bits 5:0 of OP_SPECIAL instructions */
56 #define SPECIAL_SYNC	0x0f
57 
58 static void usage(FILE *f)
59 {
60 	fprintf(f, "Usage: loongson3-llsc-check /path/to/vmlinux\n");
61 }
62 
63 static int se16(uint16_t x)
64 {
65 	return (int16_t)x;
66 }
67 
68 static bool is_ll(uint32_t insn)
69 {
70 	switch (insn >> 26) {
71 	case OP_LL:
72 	case OP_LLD:
73 		return true;
74 
75 	default:
76 		return false;
77 	}
78 }
79 
80 static bool is_sc(uint32_t insn)
81 {
82 	switch (insn >> 26) {
83 	case OP_SC:
84 	case OP_SCD:
85 		return true;
86 
87 	default:
88 		return false;
89 	}
90 }
91 
92 static bool is_sync(uint32_t insn)
93 {
94 	/* Bits 31:11 should all be zeroes */
95 	if (insn >> 11)
96 		return false;
97 
98 	/* Bits 5:0 specify the SYNC special encoding */
99 	if ((insn & 0x3f) != SPECIAL_SYNC)
100 		return false;
101 
102 	return true;
103 }
104 
105 static bool is_branch(uint32_t insn, int *off)
106 {
107 	switch (insn >> 26) {
108 	case OP_BEQ:
109 	case OP_BEQL:
110 	case OP_BNE:
111 	case OP_BNEL:
112 	case OP_BGTZ:
113 	case OP_BGTZL:
114 	case OP_BLEZ:
115 	case OP_BLEZL:
116 		*off = se16(insn) + 1;
117 		return true;
118 
119 	case OP_REGIMM:
120 		switch ((insn >> 16) & 0x1f) {
121 		case REGIMM_BGEZ:
122 		case REGIMM_BGEZL:
123 		case REGIMM_BGEZAL:
124 		case REGIMM_BGEZALL:
125 		case REGIMM_BLTZ:
126 		case REGIMM_BLTZL:
127 		case REGIMM_BLTZAL:
128 		case REGIMM_BLTZALL:
129 			*off = se16(insn) + 1;
130 			return true;
131 
132 		default:
133 			return false;
134 		}
135 
136 	default:
137 		return false;
138 	}
139 }
140 
141 static int check_ll(uint64_t pc, uint32_t *code, size_t sz)
142 {
143 	ssize_t i, max, sc_pos;
144 	int off;
145 
146 	/*
147 	 * Every LL must be preceded by a sync instruction in order to ensure
148 	 * that instruction reordering doesn't allow a prior memory access to
149 	 * execute after the LL & cause erroneous results.
150 	 */
151 	if (!is_sync(le32toh(code[-1]))) {
152 		fprintf(stderr, "%" PRIx64 ": LL not preceded by sync\n", pc);
153 		return -EINVAL;
154 	}
155 
156 	/* Find the matching SC instruction */
157 	max = sz / 4;
158 	for (sc_pos = 0; sc_pos < max; sc_pos++) {
159 		if (is_sc(le32toh(code[sc_pos])))
160 			break;
161 	}
162 	if (sc_pos >= max) {
163 		fprintf(stderr, "%" PRIx64 ": LL has no matching SC\n", pc);
164 		return -EINVAL;
165 	}
166 
167 	/*
168 	 * Check branches within the LL/SC loop target sync instructions,
169 	 * ensuring that speculative execution can't generate memory accesses
170 	 * due to instructions outside of the loop.
171 	 */
172 	for (i = 0; i < sc_pos; i++) {
173 		if (!is_branch(le32toh(code[i]), &off))
174 			continue;
175 
176 		/*
177 		 * If the branch target is within the LL/SC loop then we don't
178 		 * need to worry about it.
179 		 */
180 		if ((off >= -i) && (off <= sc_pos))
181 			continue;
182 
183 		/* If the branch targets a sync instruction we're all good... */
184 		if (is_sync(le32toh(code[i + off])))
185 			continue;
186 
187 		/* ...but if not, we have a problem */
188 		fprintf(stderr, "%" PRIx64 ": Branch target not a sync\n",
189 			pc + (i * 4));
190 		return -EINVAL;
191 	}
192 
193 	return 0;
194 }
195 
196 static int check_code(uint64_t pc, uint32_t *code, size_t sz)
197 {
198 	int err = 0;
199 
200 	if (sz % 4) {
201 		fprintf(stderr, "%" PRIx64 ": Section size not a multiple of 4\n",
202 			pc);
203 		err = -EINVAL;
204 		sz -= (sz % 4);
205 	}
206 
207 	if (is_ll(le32toh(code[0]))) {
208 		fprintf(stderr, "%" PRIx64 ": First instruction in section is an LL\n",
209 			pc);
210 		err = -EINVAL;
211 	}
212 
213 #define advance() (	\
214 	code++,		\
215 	pc += 4,	\
216 	sz -= 4		\
217 )
218 
219 	/*
220 	 * Skip the first instruction, allowing check_ll to look backwards
221 	 * unconditionally.
222 	 */
223 	advance();
224 
225 	/* Now scan through the code looking for LL instructions */
226 	for (; sz; advance()) {
227 		if (is_ll(le32toh(code[0])))
228 			err |= check_ll(pc, code, sz);
229 	}
230 
231 	return err;
232 }
233 
234 int main(int argc, char *argv[])
235 {
236 	int vmlinux_fd, status, err, i;
237 	const char *vmlinux_path;
238 	struct stat st;
239 	Elf64_Ehdr *eh;
240 	Elf64_Shdr *sh;
241 	void *vmlinux;
242 
243 	status = EXIT_FAILURE;
244 
245 	if (argc < 2) {
246 		usage(stderr);
247 		goto out_ret;
248 	}
249 
250 	vmlinux_path = argv[1];
251 	vmlinux_fd = open(vmlinux_path, O_RDONLY);
252 	if (vmlinux_fd == -1) {
253 		perror("Unable to open vmlinux");
254 		goto out_ret;
255 	}
256 
257 	err = fstat(vmlinux_fd, &st);
258 	if (err) {
259 		perror("Unable to stat vmlinux");
260 		goto out_close;
261 	}
262 
263 	vmlinux = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, vmlinux_fd, 0);
264 	if (vmlinux == MAP_FAILED) {
265 		perror("Unable to mmap vmlinux");
266 		goto out_close;
267 	}
268 
269 	eh = vmlinux;
270 	if (memcmp(eh->e_ident, ELFMAG, SELFMAG)) {
271 		fprintf(stderr, "vmlinux is not an ELF?\n");
272 		goto out_munmap;
273 	}
274 
275 	if (eh->e_ident[EI_CLASS] != ELFCLASS64) {
276 		fprintf(stderr, "vmlinux is not 64b?\n");
277 		goto out_munmap;
278 	}
279 
280 	if (eh->e_ident[EI_DATA] != ELFDATA2LSB) {
281 		fprintf(stderr, "vmlinux is not little endian?\n");
282 		goto out_munmap;
283 	}
284 
285 	for (i = 0; i < le16toh(eh->e_shnum); i++) {
286 		sh = vmlinux + le64toh(eh->e_shoff) + (i * le16toh(eh->e_shentsize));
287 
288 		if (sh->sh_type != SHT_PROGBITS)
289 			continue;
290 		if (!(sh->sh_flags & SHF_EXECINSTR))
291 			continue;
292 
293 		err = check_code(le64toh(sh->sh_addr),
294 				 vmlinux + le64toh(sh->sh_offset),
295 				 le64toh(sh->sh_size));
296 		if (err)
297 			goto out_munmap;
298 	}
299 
300 	status = EXIT_SUCCESS;
301 out_munmap:
302 	munmap(vmlinux, st.st_size);
303 out_close:
304 	close(vmlinux_fd);
305 out_ret:
306 	fprintf(stdout, "loongson3-llsc-check returns %s\n",
307 		status ? "failure" : "success");
308 	return status;
309 }
310