xref: /linux/tools/perf/util/annotate-arch/annotate-x86.c (revision c7decec2f2d2ab0366567f9e30c0e1418cece43f)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <string.h>
3 #include <linux/compiler.h>
4 #include <assert.h>
5 #include <inttypes.h>
6 #include "../annotate-data.h"
7 #include "../debug.h"
8 #include "../disasm.h"
9 #include "../dso.h"
10 #include "../map.h"
11 #include "../string2.h" // strstarts
12 #include "../symbol.h"
13 
14 /*
15  * x86 instruction nmemonic table to parse disasm lines for annotate.
16  * This table is searched twice - one for exact match and another for
17  * match without a size suffix (b, w, l, q) in case of AT&T syntax.
18  *
19  * So this table should not have entries with the suffix unless it's
20  * a complete different instruction than ones without the suffix.
21  */
22 static const struct ins x86__instructions[] = {
23 	{ .name = "adc",	.ops = &mov_ops,  },
24 	{ .name = "add",	.ops = &mov_ops,  },
25 	{ .name = "addsd",	.ops = &mov_ops,  },
26 	{ .name = "and",	.ops = &mov_ops,  },
27 	{ .name = "andpd",	.ops = &mov_ops,  },
28 	{ .name = "andps",	.ops = &mov_ops,  },
29 	{ .name = "bsr",	.ops = &mov_ops,  },
30 	{ .name = "bt",		.ops = &mov_ops,  },
31 	{ .name = "btr",	.ops = &mov_ops,  },
32 	{ .name = "bts",	.ops = &mov_ops,  },
33 	{ .name = "call",	.ops = &call_ops, },
34 	{ .name = "cmovae",	.ops = &mov_ops,  },
35 	{ .name = "cmovbe",	.ops = &mov_ops,  },
36 	{ .name = "cmove",	.ops = &mov_ops,  },
37 	{ .name = "cmp",	.ops = &mov_ops,  },
38 	{ .name = "cmpxch",	.ops = &mov_ops,  },
39 	{ .name = "cmpxchg",	.ops = &mov_ops,  },
40 	{ .name = "cs",		.ops = &mov_ops,  },
41 	{ .name = "dec",	.ops = &dec_ops,  },
42 	{ .name = "divsd",	.ops = &mov_ops,  },
43 	{ .name = "divss",	.ops = &mov_ops,  },
44 	{ .name = "gs",		.ops = &mov_ops,  },
45 	{ .name = "imul",	.ops = &mov_ops,  },
46 	{ .name = "inc",	.ops = &dec_ops,  },
47 	{ .name = "ja",		.ops = &jump_ops, },
48 	{ .name = "jae",	.ops = &jump_ops, },
49 	{ .name = "jb",		.ops = &jump_ops, },
50 	{ .name = "jbe",	.ops = &jump_ops, },
51 	{ .name = "jc",		.ops = &jump_ops, },
52 	{ .name = "jcxz",	.ops = &jump_ops, },
53 	{ .name = "je",		.ops = &jump_ops, },
54 	{ .name = "jecxz",	.ops = &jump_ops, },
55 	{ .name = "jg",		.ops = &jump_ops, },
56 	{ .name = "jge",	.ops = &jump_ops, },
57 	{ .name = "jl",		.ops = &jump_ops, },
58 	{ .name = "jle",	.ops = &jump_ops, },
59 	{ .name = "jmp",	.ops = &jump_ops, },
60 	{ .name = "jna",	.ops = &jump_ops, },
61 	{ .name = "jnae",	.ops = &jump_ops, },
62 	{ .name = "jnb",	.ops = &jump_ops, },
63 	{ .name = "jnbe",	.ops = &jump_ops, },
64 	{ .name = "jnc",	.ops = &jump_ops, },
65 	{ .name = "jne",	.ops = &jump_ops, },
66 	{ .name = "jng",	.ops = &jump_ops, },
67 	{ .name = "jnge",	.ops = &jump_ops, },
68 	{ .name = "jnl",	.ops = &jump_ops, },
69 	{ .name = "jnle",	.ops = &jump_ops, },
70 	{ .name = "jno",	.ops = &jump_ops, },
71 	{ .name = "jnp",	.ops = &jump_ops, },
72 	{ .name = "jns",	.ops = &jump_ops, },
73 	{ .name = "jnz",	.ops = &jump_ops, },
74 	{ .name = "jo",		.ops = &jump_ops, },
75 	{ .name = "jp",		.ops = &jump_ops, },
76 	{ .name = "jpe",	.ops = &jump_ops, },
77 	{ .name = "jpo",	.ops = &jump_ops, },
78 	{ .name = "jrcxz",	.ops = &jump_ops, },
79 	{ .name = "js",		.ops = &jump_ops, },
80 	{ .name = "jz",		.ops = &jump_ops, },
81 	{ .name = "lea",	.ops = &mov_ops,  },
82 	{ .name = "lock",	.ops = &lock_ops, },
83 	{ .name = "mov",	.ops = &mov_ops,  },
84 	{ .name = "movapd",	.ops = &mov_ops,  },
85 	{ .name = "movaps",	.ops = &mov_ops,  },
86 	{ .name = "movdqa",	.ops = &mov_ops,  },
87 	{ .name = "movdqu",	.ops = &mov_ops,  },
88 	{ .name = "movsb",	.ops = &mov_ops,  },
89 	{ .name = "movsd",	.ops = &mov_ops,  },
90 	{ .name = "movsl",	.ops = &mov_ops,  },
91 	{ .name = "movss",	.ops = &mov_ops,  },
92 	{ .name = "movsw",	.ops = &mov_ops,  },
93 	{ .name = "movupd",	.ops = &mov_ops,  },
94 	{ .name = "movups",	.ops = &mov_ops,  },
95 	{ .name = "movzb",	.ops = &mov_ops,  },
96 	{ .name = "movzl",	.ops = &mov_ops,  },
97 	{ .name = "movzw",	.ops = &mov_ops,  },
98 	{ .name = "mulsd",	.ops = &mov_ops,  },
99 	{ .name = "mulss",	.ops = &mov_ops,  },
100 	{ .name = "nop",	.ops = &nop_ops,  },
101 	{ .name = "or",		.ops = &mov_ops,  },
102 	{ .name = "orps",	.ops = &mov_ops,  },
103 	{ .name = "paddq",	.ops = &mov_ops,  },
104 	{ .name = "pand",	.ops = &mov_ops,  },
105 	{ .name = "pcmpeqb",	.ops = &mov_ops,  },
106 	{ .name = "por",	.ops = &mov_ops,  },
107 	{ .name = "rcl",	.ops = &mov_ops,  },
108 	{ .name = "ret",	.ops = &ret_ops,  },
109 	{ .name = "sbb",	.ops = &mov_ops,  },
110 	{ .name = "sete",	.ops = &mov_ops,  },
111 	{ .name = "sub",	.ops = &mov_ops,  },
112 	{ .name = "subsd",	.ops = &mov_ops,  },
113 	{ .name = "test",	.ops = &mov_ops,  },
114 	{ .name = "tzcnt",	.ops = &mov_ops,  },
115 	{ .name = "ucomisd",	.ops = &mov_ops,  },
116 	{ .name = "ucomiss",	.ops = &mov_ops,  },
117 	{ .name = "vaddsd",	.ops = &mov_ops,  },
118 	{ .name = "vandpd",	.ops = &mov_ops,  },
119 	{ .name = "vmovdqa",	.ops = &mov_ops,  },
120 	{ .name = "vmovq",	.ops = &mov_ops,  },
121 	{ .name = "vmovsd",	.ops = &mov_ops,  },
122 	{ .name = "vmulsd",	.ops = &mov_ops,  },
123 	{ .name = "vorpd",	.ops = &mov_ops,  },
124 	{ .name = "vsubsd",	.ops = &mov_ops,  },
125 	{ .name = "vucomisd",	.ops = &mov_ops,  },
126 	{ .name = "xadd",	.ops = &mov_ops,  },
127 	{ .name = "xbegin",	.ops = &jump_ops, },
128 	{ .name = "xchg",	.ops = &mov_ops,  },
129 	{ .name = "xor",	.ops = &mov_ops, },
130 	{ .name = "xorpd",	.ops = &mov_ops, },
131 	{ .name = "xorps",	.ops = &mov_ops, },
132 };
133 
amd__ins_is_fused(const struct arch * arch,const char * ins1,const char * ins2)134 static bool amd__ins_is_fused(const struct arch *arch, const char *ins1,
135 			      const char *ins2)
136 {
137 	if (strstr(ins2, "jmp"))
138 		return false;
139 
140 	/* Family >= 15h supports cmp/test + branch fusion */
141 	if (arch->family >= 0x15 && (strstarts(ins1, "test") ||
142 	    (strstarts(ins1, "cmp") && !strstr(ins1, "xchg")))) {
143 		return true;
144 	}
145 
146 	/* Family >= 19h supports some ALU + branch fusion */
147 	if (arch->family >= 0x19 && (strstarts(ins1, "add") ||
148 	    strstarts(ins1, "sub") || strstarts(ins1, "and") ||
149 	    strstarts(ins1, "inc") || strstarts(ins1, "dec") ||
150 	    strstarts(ins1, "or") || strstarts(ins1, "xor"))) {
151 		return true;
152 	}
153 
154 	return false;
155 }
156 
intel__ins_is_fused(const struct arch * arch,const char * ins1,const char * ins2)157 static bool intel__ins_is_fused(const struct arch *arch, const char *ins1,
158 				const char *ins2)
159 {
160 	if (arch->family != 6 || arch->model < 0x1e || strstr(ins2, "jmp"))
161 		return false;
162 
163 	if (arch->model == 0x1e) {
164 		/* Nehalem */
165 		if ((strstr(ins1, "cmp") && !strstr(ins1, "xchg")) ||
166 		     strstr(ins1, "test")) {
167 			return true;
168 		}
169 	} else {
170 		/* Newer platform */
171 		if ((strstr(ins1, "cmp") && !strstr(ins1, "xchg")) ||
172 		     strstr(ins1, "test") ||
173 		     strstr(ins1, "add") ||
174 		     strstr(ins1, "sub") ||
175 		     strstr(ins1, "and") ||
176 		     strstr(ins1, "inc") ||
177 		     strstr(ins1, "dec")) {
178 			return true;
179 		}
180 	}
181 
182 	return false;
183 }
184 
x86__cpuid_parse(struct arch * arch,const char * cpuid)185 static int x86__cpuid_parse(struct arch *arch, const char *cpuid)
186 {
187 	unsigned int family, model, stepping;
188 	int ret;
189 
190 	/*
191 	 * cpuid = "GenuineIntel,family,model,stepping"
192 	 */
193 	ret = sscanf(cpuid, "%*[^,],%u,%u,%u", &family, &model, &stepping);
194 	if (ret == 3) {
195 		arch->family = family;
196 		arch->model = model;
197 		arch->ins_is_fused = strstarts(cpuid, "AuthenticAMD") ?
198 					amd__ins_is_fused :
199 					intel__ins_is_fused;
200 		return 0;
201 	}
202 
203 	return -1;
204 }
205 
206 #ifdef HAVE_LIBDW_SUPPORT
update_insn_state_x86(struct type_state * state,struct data_loc_info * dloc,Dwarf_Die * cu_die,struct disasm_line * dl)207 static void update_insn_state_x86(struct type_state *state,
208 				  struct data_loc_info *dloc, Dwarf_Die *cu_die,
209 				  struct disasm_line *dl)
210 {
211 	struct annotated_insn_loc loc;
212 	struct annotated_op_loc *src = &loc.ops[INSN_OP_SOURCE];
213 	struct annotated_op_loc *dst = &loc.ops[INSN_OP_TARGET];
214 	struct type_state_reg *tsr;
215 	Dwarf_Die type_die;
216 	u32 insn_offset = dl->al.offset;
217 	int fbreg = dloc->fbreg;
218 	int fboff = 0;
219 
220 	if (annotate_get_insn_location(dloc->arch, dl, &loc) < 0)
221 		return;
222 
223 	if (ins__is_call(&dl->ins)) {
224 		struct symbol *func = dl->ops.target.sym;
225 
226 		if (func == NULL)
227 			return;
228 
229 		/* __fentry__ will preserve all registers */
230 		if (!strcmp(func->name, "__fentry__"))
231 			return;
232 
233 		pr_debug_dtp("call [%x] %s\n", insn_offset, func->name);
234 
235 		/* Otherwise invalidate caller-saved registers after call */
236 		for (unsigned i = 0; i < ARRAY_SIZE(state->regs); i++) {
237 			if (state->regs[i].caller_saved)
238 				state->regs[i].ok = false;
239 		}
240 
241 		/* Update register with the return type (if any) */
242 		if (die_find_func_rettype(cu_die, func->name, &type_die)) {
243 			tsr = &state->regs[state->ret_reg];
244 			tsr->type = type_die;
245 			tsr->kind = TSR_KIND_TYPE;
246 			tsr->offset = 0;
247 			tsr->ok = true;
248 
249 			pr_debug_dtp("call [%x] return -> reg%d",
250 				     insn_offset, state->ret_reg);
251 			pr_debug_type_name(&type_die, tsr->kind);
252 		}
253 		return;
254 	}
255 
256 	if (!strncmp(dl->ins.name, "add", 3)) {
257 		u64 imm_value = -1ULL;
258 		int offset;
259 		const char *var_name = NULL;
260 		struct map_symbol *ms = dloc->ms;
261 		u64 ip = ms->sym->start + dl->al.offset;
262 
263 		if (!has_reg_type(state, dst->reg1))
264 			return;
265 
266 		tsr = &state->regs[dst->reg1];
267 		tsr->copied_from = -1;
268 
269 		if (src->imm)
270 			imm_value = src->offset;
271 		else if (has_reg_type(state, src->reg1) &&
272 			 state->regs[src->reg1].kind == TSR_KIND_CONST)
273 			imm_value = state->regs[src->reg1].imm_value;
274 		else if (src->reg1 == DWARF_REG_PC) {
275 			u64 var_addr = annotate_calc_pcrel(dloc->ms, ip,
276 							   src->offset, dl);
277 
278 			if (get_global_var_info(dloc, var_addr,
279 						&var_name, &offset) &&
280 			    !strcmp(var_name, "this_cpu_off") &&
281 			    tsr->kind == TSR_KIND_CONST) {
282 				tsr->kind = TSR_KIND_PERCPU_BASE;
283 				tsr->offset = 0;
284 				tsr->ok = true;
285 				imm_value = tsr->imm_value;
286 			}
287 		}
288 		else
289 			return;
290 
291 		/* Ignore add to non-pointer or non-const types */
292 		if (tsr->kind == TSR_KIND_POINTER ||
293 		    (dwarf_tag(&tsr->type) == DW_TAG_pointer_type &&
294 		     src->reg1 != DWARF_REG_PC && tsr->kind == TSR_KIND_TYPE && !dst->mem_ref)) {
295 			tsr->offset += imm_value;
296 			pr_debug_dtp("add [%x] offset %#"PRIx64" to reg%d",
297 				     insn_offset, imm_value, dst->reg1);
298 			pr_debug_type_name(&tsr->type, tsr->kind);
299 		}
300 
301 		if (tsr->kind == TSR_KIND_CONST)
302 			tsr->imm_value += imm_value;
303 
304 		if (tsr->kind != TSR_KIND_PERCPU_BASE)
305 			return;
306 
307 		if (get_global_var_type(cu_die, dloc, ip, imm_value, &offset,
308 					&type_die) && offset == 0) {
309 			/*
310 			 * This is not a pointer type, but it should be treated
311 			 * as a pointer.
312 			 */
313 			tsr->type = type_die;
314 			tsr->kind = TSR_KIND_PERCPU_POINTER;
315 			tsr->offset = 0;
316 			tsr->ok = true;
317 
318 			pr_debug_dtp("add [%x] percpu %#"PRIx64" -> reg%d",
319 				     insn_offset, imm_value, dst->reg1);
320 			pr_debug_type_name(&tsr->type, tsr->kind);
321 		}
322 		return;
323 	}
324 
325 	if (!strncmp(dl->ins.name, "sub", 3)) {
326 		u64 imm_value = -1ULL;
327 
328 		if (!has_reg_type(state, dst->reg1))
329 			return;
330 
331 		tsr = &state->regs[dst->reg1];
332 		tsr->copied_from = -1;
333 
334 		if (src->imm)
335 			imm_value = src->offset;
336 		else if (has_reg_type(state, src->reg1) &&
337 			 state->regs[src->reg1].kind == TSR_KIND_CONST)
338 			imm_value = state->regs[src->reg1].imm_value;
339 
340 		if (tsr->kind == TSR_KIND_POINTER ||
341 		    (dwarf_tag(&tsr->type) == DW_TAG_pointer_type &&
342 		     src->reg1 != DWARF_REG_PC && tsr->kind == TSR_KIND_TYPE && !dst->mem_ref)) {
343 			tsr->offset -= imm_value;
344 			pr_debug_dtp("sub [%x] offset %#"PRIx64" to reg%d",
345 				     insn_offset, imm_value, dst->reg1);
346 			pr_debug_type_name(&tsr->type, tsr->kind);
347 		}
348 
349 		if (tsr->kind == TSR_KIND_CONST)
350 			tsr->imm_value -= imm_value;
351 
352 		return;
353 	}
354 
355 	if (!strncmp(dl->ins.name, "lea", 3)) {
356 		int sreg = src->reg1;
357 		struct type_state_reg src_tsr;
358 
359 		if (!has_reg_type(state, sreg) ||
360 		    !has_reg_type(state, dst->reg1) ||
361 		    !src->mem_ref)
362 			return;
363 
364 		src_tsr = state->regs[sreg];
365 		tsr = &state->regs[dst->reg1];
366 
367 		tsr->copied_from = -1;
368 		tsr->ok = false;
369 
370 		/* Case 1: Based on stack pointer or frame pointer */
371 		if (sreg == fbreg || sreg == state->stack_reg) {
372 			struct type_state_stack *stack;
373 			int offset = src->offset - fboff;
374 
375 			stack = find_stack_state(state, offset);
376 			if (!stack)
377 				return;
378 
379 			tsr->type = stack->type;
380 			tsr->kind = TSR_KIND_POINTER;
381 			tsr->offset = offset - stack->offset;
382 			tsr->ok = true;
383 
384 			if (sreg == fbreg) {
385 				pr_debug_dtp("lea [%x] address of -%#x(stack) -> reg%d",
386 					     insn_offset, -src->offset, dst->reg1);
387 			} else {
388 				pr_debug_dtp("lea [%x] address of %#x(reg%d) -> reg%d",
389 					     insn_offset, src->offset, sreg, dst->reg1);
390 			}
391 
392 			pr_debug_type_name(&tsr->type, tsr->kind);
393 		}
394 		/* Case 2: Based on a register holding a typed pointer */
395 		else if (src_tsr.ok && (src_tsr.kind == TSR_KIND_POINTER ||
396 			 (dwarf_tag(&src_tsr.type) == DW_TAG_pointer_type &&
397 			  src_tsr.kind == TSR_KIND_TYPE))) {
398 
399 			if (src_tsr.kind == TSR_KIND_TYPE &&
400 			    __die_get_real_type(&state->regs[sreg].type, &type_die) == NULL)
401 				return;
402 
403 			if (src_tsr.kind == TSR_KIND_POINTER)
404 				type_die = state->regs[sreg].type;
405 
406 			/* Check if the target type has a member at the new offset */
407 			if (die_get_member_type(&type_die,
408 						src->offset + src_tsr.offset, &type_die) == NULL)
409 				return;
410 
411 			tsr->type = src_tsr.type;
412 			tsr->kind = src_tsr.kind;
413 			tsr->offset = src->offset + src_tsr.offset;
414 			tsr->ok = true;
415 
416 			pr_debug_dtp("lea [%x] address of %s%#x(reg%d) -> reg%d",
417 						insn_offset, src->offset < 0 ? "-" : "",
418 						abs(src->offset), sreg, dst->reg1);
419 
420 			pr_debug_type_name(&tsr->type, tsr->kind);
421 		}
422 		return;
423 	}
424 
425 	/* Invalidate register states for other ops which may change pointers */
426 	if (has_reg_type(state, dst->reg1) && !dst->mem_ref &&
427 	    dwarf_tag(&state->regs[dst->reg1].type) == DW_TAG_pointer_type) {
428 		if (!strncmp(dl->ins.name, "imul", 4) || !strncmp(dl->ins.name, "mul", 3) ||
429 		    !strncmp(dl->ins.name, "idiv", 4) || !strncmp(dl->ins.name, "div", 3) ||
430 		    !strncmp(dl->ins.name, "shl", 3)  || !strncmp(dl->ins.name, "shr", 3) ||
431 		    !strncmp(dl->ins.name, "sar", 3)  || !strncmp(dl->ins.name, "and", 3) ||
432 		    !strncmp(dl->ins.name, "or", 2)   || !strncmp(dl->ins.name, "neg", 3) ||
433 		    !strncmp(dl->ins.name, "inc", 3)  || !strncmp(dl->ins.name, "dec", 3)) {
434 			pr_debug_dtp("%s [%x] invalidate reg%d\n",
435 						dl->ins.name, insn_offset, dst->reg1);
436 			state->regs[dst->reg1].ok = false;
437 			state->regs[dst->reg1].copied_from = -1;
438 			return;
439 		}
440 
441 		if (!strncmp(dl->ins.name, "xor", 3) && dst->reg1 == src->reg1) {
442 			/* xor reg, reg clears the register */
443 			pr_debug_dtp("xor [%x] clear reg%d\n",
444 				     insn_offset, dst->reg1);
445 
446 			state->regs[dst->reg1].kind = TSR_KIND_CONST;
447 			state->regs[dst->reg1].imm_value = 0;
448 			state->regs[dst->reg1].ok = true;
449 			state->regs[dst->reg1].copied_from = -1;
450 			return;
451 		}
452 	}
453 
454 	if (strncmp(dl->ins.name, "mov", 3))
455 		return;
456 
457 	if (dloc->fb_cfa) {
458 		u64 ip = dloc->ms->sym->start + dl->al.offset;
459 		u64 pc = map__rip_2objdump(dloc->ms->map, ip);
460 
461 		if (die_get_cfa(dloc->di->dbg, pc, &fbreg, &fboff) < 0)
462 			fbreg = -1;
463 	}
464 
465 	/* Case 1. register to register or segment:offset to register transfers */
466 	if (!src->mem_ref && !dst->mem_ref) {
467 		if (!has_reg_type(state, dst->reg1))
468 			return;
469 
470 		tsr = &state->regs[dst->reg1];
471 		tsr->copied_from = -1;
472 
473 		if (dso__kernel(map__dso(dloc->ms->map)) &&
474 		    src->segment == INSN_SEG_X86_GS && src->imm) {
475 			u64 ip = dloc->ms->sym->start + dl->al.offset;
476 			u64 var_addr;
477 			int offset;
478 
479 			/*
480 			 * In kernel, %gs points to a per-cpu region for the
481 			 * current CPU.  Access with a constant offset should
482 			 * be treated as a global variable access.
483 			 */
484 			var_addr = src->offset;
485 
486 			if (var_addr == 40) {
487 				tsr->kind = TSR_KIND_CANARY;
488 				tsr->offset = 0;
489 				tsr->ok = true;
490 
491 				pr_debug_dtp("mov [%x] stack canary -> reg%d\n",
492 					     insn_offset, dst->reg1);
493 				return;
494 			}
495 
496 			if (!get_global_var_type(cu_die, dloc, ip, var_addr,
497 						 &offset, &type_die) ||
498 			    !die_get_member_type(&type_die, offset, &type_die)) {
499 				tsr->ok = false;
500 				return;
501 			}
502 
503 			tsr->type = type_die;
504 			tsr->kind = TSR_KIND_TYPE;
505 			tsr->offset = 0;
506 			tsr->ok = true;
507 
508 			pr_debug_dtp("mov [%x] this-cpu addr=%#"PRIx64" -> reg%d",
509 				     insn_offset, var_addr, dst->reg1);
510 			pr_debug_type_name(&tsr->type, tsr->kind);
511 			return;
512 		}
513 
514 		if (src->imm) {
515 			tsr->kind = TSR_KIND_CONST;
516 			tsr->imm_value = src->offset;
517 			tsr->offset = 0;
518 			tsr->ok = true;
519 
520 			pr_debug_dtp("mov [%x] imm=%#x -> reg%d\n",
521 				     insn_offset, tsr->imm_value, dst->reg1);
522 			return;
523 		}
524 
525 		if (!has_reg_type(state, src->reg1) ||
526 		    !state->regs[src->reg1].ok) {
527 			tsr->ok = false;
528 			return;
529 		}
530 
531 		tsr->type = state->regs[src->reg1].type;
532 		tsr->kind = state->regs[src->reg1].kind;
533 		tsr->imm_value = state->regs[src->reg1].imm_value;
534 		tsr->offset = state->regs[src->reg1].offset;
535 		tsr->ok = true;
536 
537 		/* To copy back the variable type later (hopefully) */
538 		if (tsr->kind == TSR_KIND_TYPE || tsr->kind == TSR_KIND_POINTER)
539 			tsr->copied_from = src->reg1;
540 
541 		pr_debug_dtp("mov [%x] reg%d -> reg%d",
542 			     insn_offset, src->reg1, dst->reg1);
543 		pr_debug_type_name(&tsr->type, tsr->kind);
544 	}
545 	/* Case 2. memory to register transers */
546 	if (src->mem_ref && !dst->mem_ref) {
547 		int sreg = src->reg1;
548 
549 		if (!has_reg_type(state, dst->reg1))
550 			return;
551 
552 		tsr = &state->regs[dst->reg1];
553 		tsr->copied_from = -1;
554 
555 retry:
556 		/* Check stack variables with offset */
557 		if (sreg == fbreg || sreg == state->stack_reg) {
558 			struct type_state_stack *stack;
559 			int offset = src->offset - fboff;
560 
561 			stack = find_stack_state(state, offset);
562 			if (stack == NULL) {
563 				tsr->ok = false;
564 				return;
565 			} else if (!stack->compound) {
566 				tsr->type = stack->type;
567 				tsr->kind = stack->kind;
568 				tsr->offset = stack->ptr_offset;
569 				tsr->ok = true;
570 			} else if (die_get_member_type(&stack->type,
571 						       offset - stack->offset,
572 						       &type_die)) {
573 				tsr->type = type_die;
574 				tsr->kind = TSR_KIND_TYPE;
575 				tsr->offset = 0;
576 				tsr->ok = true;
577 			} else {
578 				tsr->ok = false;
579 				return;
580 			}
581 
582 			if (sreg == fbreg) {
583 				pr_debug_dtp("mov [%x] -%#x(stack) -> reg%d",
584 					     insn_offset, -offset, dst->reg1);
585 			} else {
586 				pr_debug_dtp("mov [%x] %#x(reg%d) -> reg%d",
587 					     insn_offset, offset, sreg, dst->reg1);
588 			}
589 			pr_debug_type_name(&tsr->type, tsr->kind);
590 		}
591 		/* And then dereference the pointer if it has one */
592 		else if (has_reg_type(state, sreg) && state->regs[sreg].ok &&
593 			 state->regs[sreg].kind == TSR_KIND_TYPE &&
594 			 die_deref_ptr_type(&state->regs[sreg].type,
595 					    src->offset + state->regs[sreg].offset, &type_die)) {
596 			tsr->type = type_die;
597 			tsr->kind = TSR_KIND_TYPE;
598 			tsr->offset = 0;
599 			tsr->ok = true;
600 
601 			pr_debug_dtp("mov [%x] %#x(reg%d) -> reg%d",
602 				     insn_offset, src->offset, sreg, dst->reg1);
603 			pr_debug_type_name(&tsr->type, tsr->kind);
604 		}
605 		/* Handle dereference of TSR_KIND_POINTER registers */
606 		else if (has_reg_type(state, sreg) && state->regs[sreg].ok &&
607 			 state->regs[sreg].kind == TSR_KIND_POINTER &&
608 			 die_get_member_type(&state->regs[sreg].type,
609 					     src->offset + state->regs[sreg].offset, &type_die)) {
610 			tsr->type = state->regs[sreg].type;
611 			tsr->kind = TSR_KIND_TYPE;
612 			tsr->offset = src->offset + state->regs[sreg].offset;
613 			tsr->ok = true;
614 
615 			pr_debug_dtp("mov [%x] addr %#x(reg%d) -> reg%d",
616 				     insn_offset, src->offset, sreg, dst->reg1);
617 			pr_debug_type_name(&tsr->type, tsr->kind);
618 		}
619 		/* Or check if it's a global variable */
620 		else if (sreg == DWARF_REG_PC) {
621 			struct map_symbol *ms = dloc->ms;
622 			u64 ip = ms->sym->start + dl->al.offset;
623 			u64 addr;
624 			int offset;
625 
626 			addr = annotate_calc_pcrel(ms, ip, src->offset, dl);
627 
628 			if (!get_global_var_type(cu_die, dloc, ip, addr, &offset,
629 						 &type_die) ||
630 			    !die_get_member_type(&type_die, offset, &type_die)) {
631 				tsr->ok = false;
632 				return;
633 			}
634 
635 			tsr->type = type_die;
636 			tsr->kind = TSR_KIND_TYPE;
637 			tsr->offset = 0;
638 			tsr->ok = true;
639 
640 			pr_debug_dtp("mov [%x] global addr=%"PRIx64" -> reg%d",
641 				     insn_offset, addr, dst->reg1);
642 			pr_debug_type_name(&type_die, tsr->kind);
643 		}
644 		/* And check percpu access with base register */
645 		else if (has_reg_type(state, sreg) &&
646 			 state->regs[sreg].kind == TSR_KIND_PERCPU_BASE) {
647 			u64 ip = dloc->ms->sym->start + dl->al.offset;
648 			u64 var_addr = src->offset;
649 			int offset;
650 
651 			if (src->multi_regs) {
652 				int reg2 = (sreg == src->reg1) ? src->reg2 : src->reg1;
653 
654 				if (has_reg_type(state, reg2) && state->regs[reg2].ok &&
655 				    state->regs[reg2].kind == TSR_KIND_CONST)
656 					var_addr += state->regs[reg2].imm_value;
657 			}
658 
659 			/*
660 			 * In kernel, %gs points to a per-cpu region for the
661 			 * current CPU.  Access with a constant offset should
662 			 * be treated as a global variable access.
663 			 */
664 			if (get_global_var_type(cu_die, dloc, ip, var_addr,
665 						&offset, &type_die) &&
666 			    die_get_member_type(&type_die, offset, &type_die)) {
667 				tsr->type = type_die;
668 				tsr->kind = TSR_KIND_TYPE;
669 				tsr->offset = 0;
670 				tsr->ok = true;
671 
672 				if (src->multi_regs) {
673 					pr_debug_dtp("mov [%x] percpu %#x(reg%d,reg%d) -> reg%d",
674 						     insn_offset, src->offset, src->reg1,
675 						     src->reg2, dst->reg1);
676 				} else {
677 					pr_debug_dtp("mov [%x] percpu %#x(reg%d) -> reg%d",
678 						     insn_offset, src->offset, sreg, dst->reg1);
679 				}
680 				pr_debug_type_name(&tsr->type, tsr->kind);
681 			} else {
682 				tsr->ok = false;
683 			}
684 		}
685 		/* And then dereference the calculated pointer if it has one */
686 		else if (has_reg_type(state, sreg) && state->regs[sreg].ok &&
687 			 state->regs[sreg].kind == TSR_KIND_PERCPU_POINTER &&
688 			 die_get_member_type(&state->regs[sreg].type,
689 					     src->offset, &type_die)) {
690 			tsr->type = type_die;
691 			tsr->kind = TSR_KIND_TYPE;
692 			tsr->offset = 0;
693 			tsr->ok = true;
694 
695 			pr_debug_dtp("mov [%x] pointer %#x(reg%d) -> reg%d",
696 				     insn_offset, src->offset, sreg, dst->reg1);
697 			pr_debug_type_name(&tsr->type, tsr->kind);
698 		}
699 		/* Or try another register if any */
700 		else if (src->multi_regs && sreg == src->reg1 &&
701 			 src->reg1 != src->reg2) {
702 			sreg = src->reg2;
703 			goto retry;
704 		}
705 		else {
706 			int offset;
707 			const char *var_name = NULL;
708 
709 			/* it might be per-cpu variable (in kernel) access */
710 			if (src->offset < 0) {
711 				if (get_global_var_info(dloc, (s64)src->offset,
712 							&var_name, &offset) &&
713 				    !strcmp(var_name, "__per_cpu_offset")) {
714 					tsr->kind = TSR_KIND_PERCPU_BASE;
715 					tsr->offset = 0;
716 					tsr->ok = true;
717 
718 					pr_debug_dtp("mov [%x] percpu base reg%d\n",
719 						     insn_offset, dst->reg1);
720 					return;
721 				}
722 			}
723 
724 			tsr->ok = false;
725 		}
726 	}
727 	/* Case 3. register to memory transfers */
728 	if (!src->mem_ref && dst->mem_ref) {
729 		if (!has_reg_type(state, src->reg1) ||
730 		    !state->regs[src->reg1].ok)
731 			return;
732 
733 		/* Check stack variables with offset */
734 		if (dst->reg1 == fbreg || dst->reg1 == state->stack_reg) {
735 			struct type_state_stack *stack;
736 			int offset = dst->offset - fboff;
737 
738 			tsr = &state->regs[src->reg1];
739 
740 			stack = find_stack_state(state, offset);
741 			if (stack) {
742 				/*
743 				 * The source register is likely to hold a type
744 				 * of member if it's a compound type.  Do not
745 				 * update the stack variable type since we can
746 				 * get the member type later by using the
747 				 * die_get_member_type().
748 				 */
749 				if (!stack->compound)
750 					set_stack_state(stack, offset, tsr->kind,
751 							&tsr->type, tsr->offset);
752 			} else {
753 				findnew_stack_state(state, offset, tsr->kind,
754 						    &tsr->type, tsr->offset);
755 			}
756 
757 			if (dst->reg1 == fbreg) {
758 				pr_debug_dtp("mov [%x] reg%d -> -%#x(stack)",
759 					     insn_offset, src->reg1, -offset);
760 			} else {
761 				pr_debug_dtp("mov [%x] reg%d -> %#x(reg%d)",
762 					     insn_offset, src->reg1, offset, dst->reg1);
763 			}
764 			if (tsr->offset != 0) {
765 				pr_debug_dtp(" reg%d offset %#x ->",
766 					src->reg1, tsr->offset);
767 			}
768 
769 			pr_debug_type_name(&tsr->type, tsr->kind);
770 		}
771 		/*
772 		 * Ignore other transfers since it'd set a value in a struct
773 		 * and won't change the type.
774 		 */
775 	}
776 	/* Case 4. memory to memory transfers (not handled for now) */
777 }
778 #endif
779 
arch__new_x86(const struct e_machine_and_e_flags * id,const char * cpuid)780 const struct arch *arch__new_x86(const struct e_machine_and_e_flags *id, const char *cpuid)
781 {
782 	struct arch *arch = zalloc(sizeof(*arch));
783 
784 	if (!arch)
785 		return NULL;
786 
787 	arch->name = "x86";
788 	arch->id = *id;
789 	if (cpuid) {
790 		if (x86__cpuid_parse(arch, cpuid)) {
791 			errno = SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING;
792 			return NULL;
793 		}
794 	}
795 	arch->instructions = x86__instructions;
796 	arch->nr_instructions = ARRAY_SIZE(x86__instructions);
797 #ifndef NDEBUG
798 	{
799 		static bool sorted_check;
800 
801 		if (!sorted_check) {
802 			for (size_t i = 0; i < arch->nr_instructions - 1; i++) {
803 				assert(strcmp(arch->instructions[i].name,
804 					      arch->instructions[i + 1].name) <= 0);
805 			}
806 			sorted_check = true;
807 		}
808 	}
809 #endif
810 	arch->sorted_instructions = true;
811 	arch->objdump.comment_char = '#';
812 	arch->objdump.register_char = '%';
813 	arch->objdump.memory_ref_char = '(';
814 	arch->objdump.imm_char = '$';
815 	arch->insn_suffix = "bwlq";
816 #ifdef HAVE_LIBDW_SUPPORT
817 	arch->update_insn_state = update_insn_state_x86;
818 #endif
819 	return arch;
820 }
821