xref: /linux/tools/perf/util/annotate-arch/annotate-x86.c (revision 046fd8206d820b71e7870f7b894b46f8a15ae974)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <string.h>
3 #include <linux/compiler.h>
4 #include <assert.h>
5 #include <inttypes.h>
6 #include "../annotate-data.h"
7 #include "../debug.h"
8 #include "../disasm.h"
9 #include "../dso.h"
10 #include "../map.h"
11 #include "../string2.h" // strstarts
12 #include "../symbol.h"
13 
14 /*
15  * x86 instruction nmemonic table to parse disasm lines for annotate.
16  * This table is searched twice - one for exact match and another for
17  * match without a size suffix (b, w, l, q) in case of AT&T syntax.
18  *
19  * So this table should not have entries with the suffix unless it's
20  * a complete different instruction than ones without the suffix.
21  */
22 static const struct ins x86__instructions[] = {
23 	{ .name = "adc",	.ops = &mov_ops,  },
24 	{ .name = "add",	.ops = &mov_ops,  },
25 	{ .name = "addsd",	.ops = &mov_ops,  },
26 	{ .name = "and",	.ops = &mov_ops,  },
27 	{ .name = "andpd",	.ops = &mov_ops,  },
28 	{ .name = "andps",	.ops = &mov_ops,  },
29 	{ .name = "bsr",	.ops = &mov_ops,  },
30 	{ .name = "bt",		.ops = &mov_ops,  },
31 	{ .name = "btr",	.ops = &mov_ops,  },
32 	{ .name = "bts",	.ops = &mov_ops,  },
33 	{ .name = "call",	.ops = &call_ops, },
34 	{ .name = "cmovae",	.ops = &mov_ops,  },
35 	{ .name = "cmovbe",	.ops = &mov_ops,  },
36 	{ .name = "cmove",	.ops = &mov_ops,  },
37 	{ .name = "cmp",	.ops = &mov_ops,  },
38 	{ .name = "cmpxch",	.ops = &mov_ops,  },
39 	{ .name = "cmpxchg",	.ops = &mov_ops,  },
40 	{ .name = "cs",		.ops = &mov_ops,  },
41 	{ .name = "dec",	.ops = &dec_ops,  },
42 	{ .name = "divsd",	.ops = &mov_ops,  },
43 	{ .name = "divss",	.ops = &mov_ops,  },
44 	{ .name = "gs",		.ops = &mov_ops,  },
45 	{ .name = "imul",	.ops = &mov_ops,  },
46 	{ .name = "inc",	.ops = &dec_ops,  },
47 	{ .name = "ja",		.ops = &jump_ops, },
48 	{ .name = "jae",	.ops = &jump_ops, },
49 	{ .name = "jb",		.ops = &jump_ops, },
50 	{ .name = "jbe",	.ops = &jump_ops, },
51 	{ .name = "jc",		.ops = &jump_ops, },
52 	{ .name = "jcxz",	.ops = &jump_ops, },
53 	{ .name = "je",		.ops = &jump_ops, },
54 	{ .name = "jecxz",	.ops = &jump_ops, },
55 	{ .name = "jg",		.ops = &jump_ops, },
56 	{ .name = "jge",	.ops = &jump_ops, },
57 	{ .name = "jl",		.ops = &jump_ops, },
58 	{ .name = "jle",	.ops = &jump_ops, },
59 	{ .name = "jmp",	.ops = &jump_ops, },
60 	{ .name = "jna",	.ops = &jump_ops, },
61 	{ .name = "jnae",	.ops = &jump_ops, },
62 	{ .name = "jnb",	.ops = &jump_ops, },
63 	{ .name = "jnbe",	.ops = &jump_ops, },
64 	{ .name = "jnc",	.ops = &jump_ops, },
65 	{ .name = "jne",	.ops = &jump_ops, },
66 	{ .name = "jng",	.ops = &jump_ops, },
67 	{ .name = "jnge",	.ops = &jump_ops, },
68 	{ .name = "jnl",	.ops = &jump_ops, },
69 	{ .name = "jnle",	.ops = &jump_ops, },
70 	{ .name = "jno",	.ops = &jump_ops, },
71 	{ .name = "jnp",	.ops = &jump_ops, },
72 	{ .name = "jns",	.ops = &jump_ops, },
73 	{ .name = "jnz",	.ops = &jump_ops, },
74 	{ .name = "jo",		.ops = &jump_ops, },
75 	{ .name = "jp",		.ops = &jump_ops, },
76 	{ .name = "jpe",	.ops = &jump_ops, },
77 	{ .name = "jpo",	.ops = &jump_ops, },
78 	{ .name = "jrcxz",	.ops = &jump_ops, },
79 	{ .name = "js",		.ops = &jump_ops, },
80 	{ .name = "jz",		.ops = &jump_ops, },
81 	{ .name = "lea",	.ops = &mov_ops,  },
82 	{ .name = "lock",	.ops = &lock_ops, },
83 	{ .name = "mov",	.ops = &mov_ops,  },
84 	{ .name = "movapd",	.ops = &mov_ops,  },
85 	{ .name = "movaps",	.ops = &mov_ops,  },
86 	{ .name = "movdqa",	.ops = &mov_ops,  },
87 	{ .name = "movdqu",	.ops = &mov_ops,  },
88 	{ .name = "movsb",	.ops = &mov_ops,  },
89 	{ .name = "movsd",	.ops = &mov_ops,  },
90 	{ .name = "movsl",	.ops = &mov_ops,  },
91 	{ .name = "movss",	.ops = &mov_ops,  },
92 	{ .name = "movsw",	.ops = &mov_ops,  },
93 	{ .name = "movupd",	.ops = &mov_ops,  },
94 	{ .name = "movups",	.ops = &mov_ops,  },
95 	{ .name = "movzb",	.ops = &mov_ops,  },
96 	{ .name = "movzl",	.ops = &mov_ops,  },
97 	{ .name = "movzw",	.ops = &mov_ops,  },
98 	{ .name = "mulsd",	.ops = &mov_ops,  },
99 	{ .name = "mulss",	.ops = &mov_ops,  },
100 	{ .name = "nop",	.ops = &nop_ops,  },
101 	{ .name = "or",		.ops = &mov_ops,  },
102 	{ .name = "orps",	.ops = &mov_ops,  },
103 	{ .name = "paddq",	.ops = &mov_ops,  },
104 	{ .name = "pand",	.ops = &mov_ops,  },
105 	{ .name = "pcmpeqb",	.ops = &mov_ops,  },
106 	{ .name = "por",	.ops = &mov_ops,  },
107 	{ .name = "rcl",	.ops = &mov_ops,  },
108 	{ .name = "ret",	.ops = &ret_ops,  },
109 	{ .name = "sbb",	.ops = &mov_ops,  },
110 	{ .name = "sete",	.ops = &mov_ops,  },
111 	{ .name = "sub",	.ops = &mov_ops,  },
112 	{ .name = "subsd",	.ops = &mov_ops,  },
113 	{ .name = "test",	.ops = &mov_ops,  },
114 	{ .name = "tzcnt",	.ops = &mov_ops,  },
115 	{ .name = "ucomisd",	.ops = &mov_ops,  },
116 	{ .name = "ucomiss",	.ops = &mov_ops,  },
117 	{ .name = "vaddsd",	.ops = &mov_ops,  },
118 	{ .name = "vandpd",	.ops = &mov_ops,  },
119 	{ .name = "vmovdqa",	.ops = &mov_ops,  },
120 	{ .name = "vmovq",	.ops = &mov_ops,  },
121 	{ .name = "vmovsd",	.ops = &mov_ops,  },
122 	{ .name = "vmulsd",	.ops = &mov_ops,  },
123 	{ .name = "vorpd",	.ops = &mov_ops,  },
124 	{ .name = "vsubsd",	.ops = &mov_ops,  },
125 	{ .name = "vucomisd",	.ops = &mov_ops,  },
126 	{ .name = "xadd",	.ops = &mov_ops,  },
127 	{ .name = "xbegin",	.ops = &jump_ops, },
128 	{ .name = "xchg",	.ops = &mov_ops,  },
129 	{ .name = "xor",	.ops = &mov_ops, },
130 	{ .name = "xorpd",	.ops = &mov_ops, },
131 	{ .name = "xorps",	.ops = &mov_ops, },
132 };
133 
134 static bool amd__ins_is_fused(const struct arch *arch, const char *ins1,
135 			      const char *ins2)
136 {
137 	if (strstr(ins2, "jmp"))
138 		return false;
139 
140 	/* Family >= 15h supports cmp/test + branch fusion */
141 	if (arch->family >= 0x15 && (strstarts(ins1, "test") ||
142 	    (strstarts(ins1, "cmp") && !strstr(ins1, "xchg")))) {
143 		return true;
144 	}
145 
146 	/* Family >= 19h supports some ALU + branch fusion */
147 	if (arch->family >= 0x19 && (strstarts(ins1, "add") ||
148 	    strstarts(ins1, "sub") || strstarts(ins1, "and") ||
149 	    strstarts(ins1, "inc") || strstarts(ins1, "dec") ||
150 	    strstarts(ins1, "or") || strstarts(ins1, "xor"))) {
151 		return true;
152 	}
153 
154 	return false;
155 }
156 
157 static bool intel__ins_is_fused(const struct arch *arch, const char *ins1,
158 				const char *ins2)
159 {
160 	if (arch->family != 6 || arch->model < 0x1e || strstr(ins2, "jmp"))
161 		return false;
162 
163 	if (arch->model == 0x1e) {
164 		/* Nehalem */
165 		if ((strstr(ins1, "cmp") && !strstr(ins1, "xchg")) ||
166 		     strstr(ins1, "test")) {
167 			return true;
168 		}
169 	} else {
170 		/* Newer platform */
171 		if ((strstr(ins1, "cmp") && !strstr(ins1, "xchg")) ||
172 		     strstr(ins1, "test") ||
173 		     strstr(ins1, "add") ||
174 		     strstr(ins1, "sub") ||
175 		     strstr(ins1, "and") ||
176 		     strstr(ins1, "inc") ||
177 		     strstr(ins1, "dec")) {
178 			return true;
179 		}
180 	}
181 
182 	return false;
183 }
184 
185 static int x86__cpuid_parse(struct arch *arch, const char *cpuid)
186 {
187 	unsigned int family, model, stepping;
188 	int ret;
189 
190 	/*
191 	 * cpuid = "GenuineIntel,family,model,stepping"
192 	 */
193 	ret = sscanf(cpuid, "%*[^,],%u,%u,%u", &family, &model, &stepping);
194 	if (ret == 3) {
195 		arch->family = family;
196 		arch->model = model;
197 		arch->ins_is_fused = strstarts(cpuid, "AuthenticAMD") ?
198 					amd__ins_is_fused :
199 					intel__ins_is_fused;
200 		return 0;
201 	}
202 
203 	return -1;
204 }
205 
206 #ifdef HAVE_LIBDW_SUPPORT
207 static void invalidate_reg_state(struct type_state_reg *reg)
208 {
209 	reg->kind = TSR_KIND_INVALID;
210 	reg->ok = false;
211 	reg->lifetime_active = false;
212 	reg->lifetime_end = 0;
213 	reg->copied_from = -1;
214 }
215 
216 static void update_insn_state_x86(struct type_state *state,
217 				  struct data_loc_info *dloc, Dwarf_Die *cu_die,
218 				  struct disasm_line *dl)
219 {
220 	struct annotated_insn_loc loc;
221 	struct annotated_op_loc *src = &loc.ops[INSN_OP_SOURCE];
222 	struct annotated_op_loc *dst = &loc.ops[INSN_OP_TARGET];
223 	struct type_state_reg *tsr;
224 	Dwarf_Die type_die;
225 	u32 insn_offset = dl->al.offset;
226 	int fbreg = dloc->fbreg;
227 	int fboff = 0;
228 
229 	if (annotate_get_insn_location(dloc->arch, dl, &loc) < 0)
230 		return;
231 
232 	if (ins__is_call(&dl->ins)) {
233 		struct symbol *func = dl->ops.target.sym;
234 		const char *call_name;
235 		u64 call_addr;
236 
237 		/* Try to resolve the call target name */
238 		if (func)
239 			call_name = func->name;
240 		else
241 			call_name = dl->ops.target.name;
242 
243 		/* __fentry__ will preserve all registers */
244 		if (call_name && !strcmp(call_name, "__fentry__"))
245 			return;
246 
247 		if (call_name)
248 			pr_debug_dtp("call [%x] %s\n", insn_offset, call_name);
249 		else
250 			pr_debug_dtp("call [%x] <unknown>\n", insn_offset);
251 
252 		/* Invalidate caller-saved registers after call */
253 		call_addr = map__rip_2objdump(dloc->ms->map,
254 					      dloc->ms->sym->start + dl->al.offset);
255 		for (unsigned i = 0; i < ARRAY_SIZE(state->regs); i++) {
256 			struct type_state_reg *reg = &state->regs[i];
257 
258 			if (!reg->caller_saved)
259 				continue;
260 			/* Keep register valid within DWARF location lifetime */
261 			if (reg->lifetime_active && call_addr < reg->lifetime_end)
262 				continue;
263 			invalidate_reg_state(reg);
264 		}
265 
266 		/* Update register with the return type (if any) */
267 		if (call_name && die_find_func_rettype(cu_die, call_name, &type_die)) {
268 			tsr = &state->regs[state->ret_reg];
269 			tsr->type = type_die;
270 			tsr->kind = TSR_KIND_TYPE;
271 			tsr->offset = 0;
272 			tsr->ok = true;
273 
274 			pr_debug_dtp("call [%x] return -> reg%d",
275 				     insn_offset, state->ret_reg);
276 			pr_debug_type_name(&type_die, tsr->kind);
277 		}
278 		return;
279 	}
280 
281 	if (!strncmp(dl->ins.name, "add", 3)) {
282 		u64 imm_value = -1ULL;
283 		int offset;
284 		const char *var_name = NULL;
285 		struct map_symbol *ms = dloc->ms;
286 		u64 ip = ms->sym->start + dl->al.offset;
287 
288 		if (!has_reg_type(state, dst->reg1))
289 			return;
290 
291 		tsr = &state->regs[dst->reg1];
292 		tsr->copied_from = -1;
293 		tsr->lifetime_active = false;
294 		tsr->lifetime_end = 0;
295 
296 		if (src->imm)
297 			imm_value = src->offset;
298 		else if (has_reg_type(state, src->reg1) &&
299 			 state->regs[src->reg1].kind == TSR_KIND_CONST)
300 			imm_value = state->regs[src->reg1].imm_value;
301 		else if (src->reg1 == DWARF_REG_PC) {
302 			u64 var_addr = annotate_calc_pcrel(dloc->ms, ip,
303 							   src->offset, dl);
304 
305 			if (get_global_var_info(dloc, var_addr,
306 						&var_name, &offset) &&
307 			    !strcmp(var_name, "this_cpu_off") &&
308 			    tsr->kind == TSR_KIND_CONST) {
309 				tsr->kind = TSR_KIND_PERCPU_BASE;
310 				tsr->offset = 0;
311 				tsr->ok = true;
312 				imm_value = tsr->imm_value;
313 			}
314 		}
315 		else
316 			return;
317 
318 		/* Ignore add to non-pointer or non-const types */
319 		if (tsr->kind == TSR_KIND_POINTER ||
320 		    (dwarf_tag(&tsr->type) == DW_TAG_pointer_type &&
321 		     src->reg1 != DWARF_REG_PC && tsr->kind == TSR_KIND_TYPE && !dst->mem_ref)) {
322 			tsr->offset += imm_value;
323 			pr_debug_dtp("add [%x] offset %#"PRIx64" to reg%d",
324 				     insn_offset, imm_value, dst->reg1);
325 			pr_debug_type_name(&tsr->type, tsr->kind);
326 		}
327 
328 		if (tsr->kind == TSR_KIND_CONST)
329 			tsr->imm_value += imm_value;
330 
331 		if (tsr->kind != TSR_KIND_PERCPU_BASE)
332 			return;
333 
334 		if (get_global_var_type(cu_die, dloc, ip, imm_value, &offset,
335 					&type_die) && offset == 0) {
336 			/*
337 			 * This is not a pointer type, but it should be treated
338 			 * as a pointer.
339 			 */
340 			tsr->type = type_die;
341 			tsr->kind = TSR_KIND_PERCPU_POINTER;
342 			tsr->offset = 0;
343 			tsr->ok = true;
344 
345 			pr_debug_dtp("add [%x] percpu %#"PRIx64" -> reg%d",
346 				     insn_offset, imm_value, dst->reg1);
347 			pr_debug_type_name(&tsr->type, tsr->kind);
348 		}
349 		return;
350 	}
351 
352 	if (!strncmp(dl->ins.name, "sub", 3)) {
353 		u64 imm_value = -1ULL;
354 
355 		if (!has_reg_type(state, dst->reg1))
356 			return;
357 
358 		tsr = &state->regs[dst->reg1];
359 		tsr->copied_from = -1;
360 		tsr->lifetime_active = false;
361 		tsr->lifetime_end = 0;
362 
363 		if (src->imm)
364 			imm_value = src->offset;
365 		else if (has_reg_type(state, src->reg1) &&
366 			 state->regs[src->reg1].kind == TSR_KIND_CONST)
367 			imm_value = state->regs[src->reg1].imm_value;
368 
369 		if (tsr->kind == TSR_KIND_POINTER ||
370 		    (dwarf_tag(&tsr->type) == DW_TAG_pointer_type &&
371 		     src->reg1 != DWARF_REG_PC && tsr->kind == TSR_KIND_TYPE && !dst->mem_ref)) {
372 			tsr->offset -= imm_value;
373 			pr_debug_dtp("sub [%x] offset %#"PRIx64" to reg%d",
374 				     insn_offset, imm_value, dst->reg1);
375 			pr_debug_type_name(&tsr->type, tsr->kind);
376 		}
377 
378 		if (tsr->kind == TSR_KIND_CONST)
379 			tsr->imm_value -= imm_value;
380 
381 		return;
382 	}
383 
384 	if (!strncmp(dl->ins.name, "lea", 3)) {
385 		int sreg = src->reg1;
386 		struct type_state_reg src_tsr;
387 
388 		if (!has_reg_type(state, sreg) ||
389 		    !has_reg_type(state, dst->reg1) ||
390 		    !src->mem_ref)
391 			return;
392 
393 		src_tsr = state->regs[sreg];
394 		tsr = &state->regs[dst->reg1];
395 
396 		invalidate_reg_state(tsr);
397 
398 		/* Case 1: Based on stack pointer or frame pointer */
399 		if (sreg == fbreg || sreg == state->stack_reg) {
400 			struct type_state_stack *stack;
401 			int offset = src->offset - fboff;
402 
403 			stack = find_stack_state(state, offset);
404 			if (!stack)
405 				return;
406 
407 			tsr->type = stack->type;
408 			tsr->kind = TSR_KIND_POINTER;
409 			tsr->offset = offset - stack->offset;
410 			tsr->ok = true;
411 
412 			if (sreg == fbreg) {
413 				pr_debug_dtp("lea [%x] address of -%#x(stack) -> reg%d",
414 					     insn_offset, -src->offset, dst->reg1);
415 			} else {
416 				pr_debug_dtp("lea [%x] address of %#x(reg%d) -> reg%d",
417 					     insn_offset, src->offset, sreg, dst->reg1);
418 			}
419 
420 			pr_debug_type_name(&tsr->type, tsr->kind);
421 		}
422 		/* Case 2: Based on a register holding a typed pointer */
423 		else if (src_tsr.ok && (src_tsr.kind == TSR_KIND_POINTER ||
424 			 (dwarf_tag(&src_tsr.type) == DW_TAG_pointer_type &&
425 			  src_tsr.kind == TSR_KIND_TYPE))) {
426 
427 			if (src_tsr.kind == TSR_KIND_TYPE &&
428 			    __die_get_real_type(&state->regs[sreg].type, &type_die) == NULL)
429 				return;
430 
431 			if (src_tsr.kind == TSR_KIND_POINTER)
432 				type_die = state->regs[sreg].type;
433 
434 			/* Check if the target type has a member at the new offset */
435 			if (die_get_member_type(&type_die,
436 						src->offset + src_tsr.offset, &type_die) == NULL)
437 				return;
438 
439 			tsr->type = src_tsr.type;
440 			tsr->kind = src_tsr.kind;
441 			tsr->offset = src->offset + src_tsr.offset;
442 			tsr->ok = true;
443 
444 			pr_debug_dtp("lea [%x] address of %s%#x(reg%d) -> reg%d",
445 						insn_offset, src->offset < 0 ? "-" : "",
446 						abs(src->offset), sreg, dst->reg1);
447 
448 			pr_debug_type_name(&tsr->type, tsr->kind);
449 		}
450 		return;
451 	}
452 
453 	/* Invalidate register states for other ops which may change pointers */
454 	if (has_reg_type(state, dst->reg1) && !dst->mem_ref &&
455 	    dwarf_tag(&state->regs[dst->reg1].type) == DW_TAG_pointer_type) {
456 		if (!strncmp(dl->ins.name, "imul", 4) || !strncmp(dl->ins.name, "mul", 3) ||
457 		    !strncmp(dl->ins.name, "idiv", 4) || !strncmp(dl->ins.name, "div", 3) ||
458 		    !strncmp(dl->ins.name, "shl", 3)  || !strncmp(dl->ins.name, "shr", 3) ||
459 		    !strncmp(dl->ins.name, "sar", 3)  || !strncmp(dl->ins.name, "and", 3) ||
460 		    !strncmp(dl->ins.name, "or", 2)   || !strncmp(dl->ins.name, "neg", 3) ||
461 		    !strncmp(dl->ins.name, "inc", 3)  || !strncmp(dl->ins.name, "dec", 3)) {
462 			pr_debug_dtp("%s [%x] invalidate reg%d\n",
463 						dl->ins.name, insn_offset, dst->reg1);
464 			invalidate_reg_state(&state->regs[dst->reg1]);
465 			return;
466 		}
467 
468 		if (!strncmp(dl->ins.name, "xor", 3) && dst->reg1 == src->reg1) {
469 			/* xor reg, reg clears the register */
470 			pr_debug_dtp("xor [%x] clear reg%d\n",
471 				     insn_offset, dst->reg1);
472 
473 			state->regs[dst->reg1].kind = TSR_KIND_CONST;
474 			state->regs[dst->reg1].imm_value = 0;
475 			state->regs[dst->reg1].ok = true;
476 			state->regs[dst->reg1].lifetime_active = false;
477 			state->regs[dst->reg1].lifetime_end = 0;
478 			state->regs[dst->reg1].copied_from = -1;
479 			return;
480 		}
481 	}
482 
483 	if (strncmp(dl->ins.name, "mov", 3))
484 		return;
485 
486 	if (dloc->fb_cfa) {
487 		u64 ip = dloc->ms->sym->start + dl->al.offset;
488 		u64 pc = map__rip_2objdump(dloc->ms->map, ip);
489 
490 		if (die_get_cfa(dloc->di->dbg, pc, &fbreg, &fboff) < 0)
491 			fbreg = -1;
492 	}
493 
494 	/* Case 1. register to register or segment:offset to register transfers */
495 	if (!src->mem_ref && !dst->mem_ref) {
496 		if (!has_reg_type(state, dst->reg1))
497 			return;
498 
499 		tsr = &state->regs[dst->reg1];
500 		tsr->copied_from = -1;
501 
502 		if (dso__kernel(map__dso(dloc->ms->map)) &&
503 		    src->segment == INSN_SEG_X86_GS && src->imm) {
504 			u64 ip = dloc->ms->sym->start + dl->al.offset;
505 			u64 var_addr;
506 			int offset;
507 
508 			/*
509 			 * In kernel, %gs points to a per-cpu region for the
510 			 * current CPU.  Access with a constant offset should
511 			 * be treated as a global variable access.
512 			 */
513 			var_addr = src->offset;
514 
515 			if (var_addr == 40) {
516 				tsr->kind = TSR_KIND_CANARY;
517 				tsr->offset = 0;
518 				tsr->ok = true;
519 
520 				pr_debug_dtp("mov [%x] stack canary -> reg%d\n",
521 					     insn_offset, dst->reg1);
522 				return;
523 			}
524 
525 			if (!get_global_var_type(cu_die, dloc, ip, var_addr,
526 						 &offset, &type_die) ||
527 			    !die_get_member_type(&type_die, offset, &type_die)) {
528 				invalidate_reg_state(tsr);
529 				return;
530 			}
531 
532 			tsr->type = type_die;
533 			tsr->kind = TSR_KIND_TYPE;
534 			tsr->offset = 0;
535 			tsr->ok = true;
536 
537 			pr_debug_dtp("mov [%x] this-cpu addr=%#"PRIx64" -> reg%d",
538 				     insn_offset, var_addr, dst->reg1);
539 			pr_debug_type_name(&tsr->type, tsr->kind);
540 			return;
541 		}
542 
543 		if (src->imm) {
544 			tsr->kind = TSR_KIND_CONST;
545 			tsr->imm_value = src->offset;
546 			tsr->offset = 0;
547 			tsr->ok = true;
548 
549 			pr_debug_dtp("mov [%x] imm=%#x -> reg%d\n",
550 				     insn_offset, tsr->imm_value, dst->reg1);
551 			return;
552 		}
553 
554 		if (!has_reg_type(state, src->reg1) ||
555 		    !state->regs[src->reg1].ok) {
556 			invalidate_reg_state(tsr);
557 			return;
558 		}
559 
560 		tsr->type = state->regs[src->reg1].type;
561 		tsr->kind = state->regs[src->reg1].kind;
562 		tsr->imm_value = state->regs[src->reg1].imm_value;
563 		tsr->offset = state->regs[src->reg1].offset;
564 		tsr->lifetime_active = state->regs[src->reg1].lifetime_active;
565 		tsr->lifetime_end = state->regs[src->reg1].lifetime_end;
566 		tsr->ok = true;
567 
568 		/* To copy back the variable type later (hopefully) */
569 		if (tsr->kind == TSR_KIND_TYPE || tsr->kind == TSR_KIND_POINTER)
570 			tsr->copied_from = src->reg1;
571 
572 		pr_debug_dtp("mov [%x] reg%d -> reg%d",
573 			     insn_offset, src->reg1, dst->reg1);
574 		pr_debug_type_name(&tsr->type, tsr->kind);
575 	}
576 	/* Case 2. memory to register transers */
577 	if (src->mem_ref && !dst->mem_ref) {
578 		int sreg = src->reg1;
579 
580 		if (!has_reg_type(state, dst->reg1))
581 			return;
582 
583 		tsr = &state->regs[dst->reg1];
584 		tsr->copied_from = -1;
585 
586 retry:
587 		/* Check stack variables with offset */
588 		if (sreg == fbreg || sreg == state->stack_reg) {
589 			struct type_state_stack *stack;
590 			int offset = src->offset - fboff;
591 
592 			stack = find_stack_state(state, offset);
593 			if (stack == NULL) {
594 				invalidate_reg_state(tsr);
595 				return;
596 			} else if (!stack->compound) {
597 				tsr->type = stack->type;
598 				tsr->kind = stack->kind;
599 				tsr->offset = stack->ptr_offset;
600 				tsr->ok = true;
601 			} else if (die_get_member_type(&stack->type,
602 						       offset - stack->offset,
603 						       &type_die)) {
604 				tsr->type = type_die;
605 				tsr->kind = TSR_KIND_TYPE;
606 				tsr->offset = 0;
607 				tsr->ok = true;
608 			} else {
609 				invalidate_reg_state(tsr);
610 				return;
611 			}
612 
613 			if (sreg == fbreg) {
614 				pr_debug_dtp("mov [%x] -%#x(stack) -> reg%d",
615 					     insn_offset, -offset, dst->reg1);
616 			} else {
617 				pr_debug_dtp("mov [%x] %#x(reg%d) -> reg%d",
618 					     insn_offset, offset, sreg, dst->reg1);
619 			}
620 			pr_debug_type_name(&tsr->type, tsr->kind);
621 		}
622 		/* And then dereference the pointer if it has one */
623 		else if (has_reg_type(state, sreg) && state->regs[sreg].ok &&
624 			 state->regs[sreg].kind == TSR_KIND_TYPE &&
625 			 die_deref_ptr_type(&state->regs[sreg].type,
626 					    src->offset + state->regs[sreg].offset, &type_die)) {
627 			tsr->type = type_die;
628 			tsr->kind = TSR_KIND_TYPE;
629 			tsr->offset = 0;
630 			tsr->ok = true;
631 
632 			pr_debug_dtp("mov [%x] %#x(reg%d) -> reg%d",
633 				     insn_offset, src->offset, sreg, dst->reg1);
634 			pr_debug_type_name(&tsr->type, tsr->kind);
635 		}
636 		/* Handle dereference of TSR_KIND_POINTER registers */
637 		else if (has_reg_type(state, sreg) && state->regs[sreg].ok &&
638 			 state->regs[sreg].kind == TSR_KIND_POINTER &&
639 			 die_get_member_type(&state->regs[sreg].type,
640 					     src->offset + state->regs[sreg].offset, &type_die)) {
641 			tsr->type = state->regs[sreg].type;
642 			tsr->kind = TSR_KIND_TYPE;
643 			tsr->offset = src->offset + state->regs[sreg].offset;
644 			tsr->ok = true;
645 
646 			pr_debug_dtp("mov [%x] addr %#x(reg%d) -> reg%d",
647 				     insn_offset, src->offset, sreg, dst->reg1);
648 			pr_debug_type_name(&tsr->type, tsr->kind);
649 		}
650 		/* Or check if it's a global variable */
651 		else if (sreg == DWARF_REG_PC) {
652 			struct map_symbol *ms = dloc->ms;
653 			u64 ip = ms->sym->start + dl->al.offset;
654 			u64 addr;
655 			int offset;
656 
657 			addr = annotate_calc_pcrel(ms, ip, src->offset, dl);
658 
659 			if (!get_global_var_type(cu_die, dloc, ip, addr, &offset,
660 						 &type_die) ||
661 			    !die_get_member_type(&type_die, offset, &type_die)) {
662 				invalidate_reg_state(tsr);
663 				return;
664 			}
665 
666 			tsr->type = type_die;
667 			tsr->kind = TSR_KIND_TYPE;
668 			tsr->offset = 0;
669 			tsr->ok = true;
670 
671 			pr_debug_dtp("mov [%x] global addr=%"PRIx64" -> reg%d",
672 				     insn_offset, addr, dst->reg1);
673 			pr_debug_type_name(&type_die, tsr->kind);
674 		}
675 		/* And check percpu access with base register */
676 		else if (has_reg_type(state, sreg) &&
677 			 state->regs[sreg].kind == TSR_KIND_PERCPU_BASE) {
678 			u64 ip = dloc->ms->sym->start + dl->al.offset;
679 			u64 var_addr = src->offset;
680 			int offset;
681 
682 			if (src->multi_regs) {
683 				int reg2 = (sreg == src->reg1) ? src->reg2 : src->reg1;
684 
685 				if (has_reg_type(state, reg2) && state->regs[reg2].ok &&
686 				    state->regs[reg2].kind == TSR_KIND_CONST)
687 					var_addr += state->regs[reg2].imm_value;
688 			}
689 
690 			/*
691 			 * In kernel, %gs points to a per-cpu region for the
692 			 * current CPU.  Access with a constant offset should
693 			 * be treated as a global variable access.
694 			 */
695 			if (get_global_var_type(cu_die, dloc, ip, var_addr,
696 						&offset, &type_die) &&
697 			    die_get_member_type(&type_die, offset, &type_die)) {
698 				tsr->type = type_die;
699 				tsr->kind = TSR_KIND_TYPE;
700 				tsr->offset = 0;
701 				tsr->ok = true;
702 
703 				if (src->multi_regs) {
704 					pr_debug_dtp("mov [%x] percpu %#x(reg%d,reg%d) -> reg%d",
705 						     insn_offset, src->offset, src->reg1,
706 						     src->reg2, dst->reg1);
707 				} else {
708 					pr_debug_dtp("mov [%x] percpu %#x(reg%d) -> reg%d",
709 						     insn_offset, src->offset, sreg, dst->reg1);
710 				}
711 				pr_debug_type_name(&tsr->type, tsr->kind);
712 			} else {
713 				invalidate_reg_state(tsr);
714 			}
715 		}
716 		/* And then dereference the calculated pointer if it has one */
717 		else if (has_reg_type(state, sreg) && state->regs[sreg].ok &&
718 			 state->regs[sreg].kind == TSR_KIND_PERCPU_POINTER &&
719 			 die_get_member_type(&state->regs[sreg].type,
720 					     src->offset, &type_die)) {
721 			tsr->type = type_die;
722 			tsr->kind = TSR_KIND_TYPE;
723 			tsr->offset = 0;
724 			tsr->ok = true;
725 
726 			pr_debug_dtp("mov [%x] pointer %#x(reg%d) -> reg%d",
727 				     insn_offset, src->offset, sreg, dst->reg1);
728 			pr_debug_type_name(&tsr->type, tsr->kind);
729 		}
730 		/* Or try another register if any */
731 		else if (src->multi_regs && sreg == src->reg1 &&
732 			 src->reg1 != src->reg2) {
733 			sreg = src->reg2;
734 			goto retry;
735 		}
736 		else {
737 			int offset;
738 			const char *var_name = NULL;
739 
740 			/* it might be per-cpu variable (in kernel) access */
741 			if (src->offset < 0) {
742 				if (get_global_var_info(dloc, (s64)src->offset,
743 							&var_name, &offset) &&
744 				    !strcmp(var_name, "__per_cpu_offset")) {
745 					tsr->kind = TSR_KIND_PERCPU_BASE;
746 					tsr->offset = 0;
747 					tsr->ok = true;
748 
749 					pr_debug_dtp("mov [%x] percpu base reg%d\n",
750 						     insn_offset, dst->reg1);
751 					return;
752 				}
753 			}
754 
755 			invalidate_reg_state(tsr);
756 		}
757 	}
758 	/* Case 3. register to memory transfers */
759 	if (!src->mem_ref && dst->mem_ref) {
760 		if (!has_reg_type(state, src->reg1) ||
761 		    !state->regs[src->reg1].ok)
762 			return;
763 
764 		/* Check stack variables with offset */
765 		if (dst->reg1 == fbreg || dst->reg1 == state->stack_reg) {
766 			struct type_state_stack *stack;
767 			int offset = dst->offset - fboff;
768 
769 			tsr = &state->regs[src->reg1];
770 
771 			stack = find_stack_state(state, offset);
772 			if (stack) {
773 				/*
774 				 * The source register is likely to hold a type
775 				 * of member if it's a compound type.  Do not
776 				 * update the stack variable type since we can
777 				 * get the member type later by using the
778 				 * die_get_member_type().
779 				 */
780 				if (!stack->compound)
781 					set_stack_state(stack, offset, tsr->kind,
782 							&tsr->type, tsr->offset);
783 			} else {
784 				findnew_stack_state(state, offset, tsr->kind,
785 						    &tsr->type, tsr->offset);
786 			}
787 
788 			if (dst->reg1 == fbreg) {
789 				pr_debug_dtp("mov [%x] reg%d -> -%#x(stack)",
790 					     insn_offset, src->reg1, -offset);
791 			} else {
792 				pr_debug_dtp("mov [%x] reg%d -> %#x(reg%d)",
793 					     insn_offset, src->reg1, offset, dst->reg1);
794 			}
795 			if (tsr->offset != 0) {
796 				pr_debug_dtp(" reg%d offset %#x ->",
797 					src->reg1, tsr->offset);
798 			}
799 
800 			pr_debug_type_name(&tsr->type, tsr->kind);
801 		}
802 		/*
803 		 * Ignore other transfers since it'd set a value in a struct
804 		 * and won't change the type.
805 		 */
806 	}
807 	/* Case 4. memory to memory transfers (not handled for now) */
808 }
809 #endif
810 
811 const struct arch *arch__new_x86(const struct e_machine_and_e_flags *id, const char *cpuid)
812 {
813 	struct arch *arch = zalloc(sizeof(*arch));
814 
815 	if (!arch)
816 		return NULL;
817 
818 	arch->name = "x86";
819 	arch->id = *id;
820 	if (cpuid) {
821 		if (x86__cpuid_parse(arch, cpuid)) {
822 			errno = SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING;
823 			return NULL;
824 		}
825 	}
826 	arch->instructions = x86__instructions;
827 	arch->nr_instructions = ARRAY_SIZE(x86__instructions);
828 #ifndef NDEBUG
829 	{
830 		static bool sorted_check;
831 
832 		if (!sorted_check) {
833 			for (size_t i = 0; i < arch->nr_instructions - 1; i++) {
834 				assert(strcmp(arch->instructions[i].name,
835 					      arch->instructions[i + 1].name) <= 0);
836 			}
837 			sorted_check = true;
838 		}
839 	}
840 #endif
841 	arch->sorted_instructions = true;
842 	arch->objdump.comment_char = '#';
843 	arch->objdump.register_char = '%';
844 	arch->objdump.memory_ref_char = '(';
845 	arch->objdump.imm_char = '$';
846 	arch->insn_suffix = "bwlq";
847 #ifdef HAVE_LIBDW_SUPPORT
848 	arch->update_insn_state = update_insn_state_x86;
849 #endif
850 	return arch;
851 }
852