xref: /linux/arch/x86/kernel/step.c (revision b85d45947951d23cb22d90caecf4c1eb81342c96)
1 /*
2  * x86 single-step support code, common to 32-bit and 64-bit.
3  */
4 #include <linux/sched.h>
5 #include <linux/mm.h>
6 #include <linux/ptrace.h>
7 #include <asm/desc.h>
8 #include <asm/mmu_context.h>
9 
10 unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
11 {
12 	unsigned long addr, seg;
13 
14 	addr = regs->ip;
15 	seg = regs->cs & 0xffff;
16 	if (v8086_mode(regs)) {
17 		addr = (addr & 0xffff) + (seg << 4);
18 		return addr;
19 	}
20 
21 #ifdef CONFIG_MODIFY_LDT_SYSCALL
22 	/*
23 	 * We'll assume that the code segments in the GDT
24 	 * are all zero-based. That is largely true: the
25 	 * TLS segments are used for data, and the PNPBIOS
26 	 * and APM bios ones we just ignore here.
27 	 */
28 	if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) {
29 		struct desc_struct *desc;
30 		unsigned long base;
31 
32 		seg >>= 3;
33 
34 		mutex_lock(&child->mm->context.lock);
35 		if (unlikely(!child->mm->context.ldt ||
36 			     seg >= child->mm->context.ldt->size))
37 			addr = -1L; /* bogus selector, access would fault */
38 		else {
39 			desc = &child->mm->context.ldt->entries[seg];
40 			base = get_desc_base(desc);
41 
42 			/* 16-bit code segment? */
43 			if (!desc->d)
44 				addr &= 0xffff;
45 			addr += base;
46 		}
47 		mutex_unlock(&child->mm->context.lock);
48 	}
49 #endif
50 
51 	return addr;
52 }
53 
54 static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
55 {
56 	int i, copied;
57 	unsigned char opcode[15];
58 	unsigned long addr = convert_ip_to_linear(child, regs);
59 
60 	copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
61 	for (i = 0; i < copied; i++) {
62 		switch (opcode[i]) {
63 		/* popf and iret */
64 		case 0x9d: case 0xcf:
65 			return 1;
66 
67 			/* CHECKME: 64 65 */
68 
69 		/* opcode and address size prefixes */
70 		case 0x66: case 0x67:
71 			continue;
72 		/* irrelevant prefixes (segment overrides and repeats) */
73 		case 0x26: case 0x2e:
74 		case 0x36: case 0x3e:
75 		case 0x64: case 0x65:
76 		case 0xf0: case 0xf2: case 0xf3:
77 			continue;
78 
79 #ifdef CONFIG_X86_64
80 		case 0x40 ... 0x4f:
81 			if (!user_64bit_mode(regs))
82 				/* 32-bit mode: register increment */
83 				return 0;
84 			/* 64-bit mode: REX prefix */
85 			continue;
86 #endif
87 
88 			/* CHECKME: f2, f3 */
89 
90 		/*
91 		 * pushf: NOTE! We should probably not let
92 		 * the user see the TF bit being set. But
93 		 * it's more pain than it's worth to avoid
94 		 * it, and a debugger could emulate this
95 		 * all in user space if it _really_ cares.
96 		 */
97 		case 0x9c:
98 		default:
99 			return 0;
100 		}
101 	}
102 	return 0;
103 }
104 
105 /*
106  * Enable single-stepping.  Return nonzero if user mode is not using TF itself.
107  */
108 static int enable_single_step(struct task_struct *child)
109 {
110 	struct pt_regs *regs = task_pt_regs(child);
111 	unsigned long oflags;
112 
113 	/*
114 	 * If we stepped into a sysenter/syscall insn, it trapped in
115 	 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
116 	 * If user-mode had set TF itself, then it's still clear from
117 	 * do_debug() and we need to set it again to restore the user
118 	 * state so we don't wrongly set TIF_FORCED_TF below.
119 	 * If enable_single_step() was used last and that is what
120 	 * set TIF_SINGLESTEP, then both TF and TIF_FORCED_TF are
121 	 * already set and our bookkeeping is fine.
122 	 */
123 	if (unlikely(test_tsk_thread_flag(child, TIF_SINGLESTEP)))
124 		regs->flags |= X86_EFLAGS_TF;
125 
126 	/*
127 	 * Always set TIF_SINGLESTEP - this guarantees that
128 	 * we single-step system calls etc..  This will also
129 	 * cause us to set TF when returning to user mode.
130 	 */
131 	set_tsk_thread_flag(child, TIF_SINGLESTEP);
132 
133 	oflags = regs->flags;
134 
135 	/* Set TF on the kernel stack.. */
136 	regs->flags |= X86_EFLAGS_TF;
137 
138 	/*
139 	 * ..but if TF is changed by the instruction we will trace,
140 	 * don't mark it as being "us" that set it, so that we
141 	 * won't clear it by hand later.
142 	 *
143 	 * Note that if we don't actually execute the popf because
144 	 * of a signal arriving right now or suchlike, we will lose
145 	 * track of the fact that it really was "us" that set it.
146 	 */
147 	if (is_setting_trap_flag(child, regs)) {
148 		clear_tsk_thread_flag(child, TIF_FORCED_TF);
149 		return 0;
150 	}
151 
152 	/*
153 	 * If TF was already set, check whether it was us who set it.
154 	 * If not, we should never attempt a block step.
155 	 */
156 	if (oflags & X86_EFLAGS_TF)
157 		return test_tsk_thread_flag(child, TIF_FORCED_TF);
158 
159 	set_tsk_thread_flag(child, TIF_FORCED_TF);
160 
161 	return 1;
162 }
163 
164 void set_task_blockstep(struct task_struct *task, bool on)
165 {
166 	unsigned long debugctl;
167 
168 	/*
169 	 * Ensure irq/preemption can't change debugctl in between.
170 	 * Note also that both TIF_BLOCKSTEP and debugctl should
171 	 * be changed atomically wrt preemption.
172 	 *
173 	 * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if
174 	 * task is current or it can't be running, otherwise we can race
175 	 * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but
176 	 * PTRACE_KILL is not safe.
177 	 */
178 	local_irq_disable();
179 	debugctl = get_debugctlmsr();
180 	if (on) {
181 		debugctl |= DEBUGCTLMSR_BTF;
182 		set_tsk_thread_flag(task, TIF_BLOCKSTEP);
183 	} else {
184 		debugctl &= ~DEBUGCTLMSR_BTF;
185 		clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
186 	}
187 	if (task == current)
188 		update_debugctlmsr(debugctl);
189 	local_irq_enable();
190 }
191 
192 /*
193  * Enable single or block step.
194  */
195 static void enable_step(struct task_struct *child, bool block)
196 {
197 	/*
198 	 * Make sure block stepping (BTF) is not enabled unless it should be.
199 	 * Note that we don't try to worry about any is_setting_trap_flag()
200 	 * instructions after the first when using block stepping.
201 	 * So no one should try to use debugger block stepping in a program
202 	 * that uses user-mode single stepping itself.
203 	 */
204 	if (enable_single_step(child) && block)
205 		set_task_blockstep(child, true);
206 	else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
207 		set_task_blockstep(child, false);
208 }
209 
210 void user_enable_single_step(struct task_struct *child)
211 {
212 	enable_step(child, 0);
213 }
214 
215 void user_enable_block_step(struct task_struct *child)
216 {
217 	enable_step(child, 1);
218 }
219 
220 void user_disable_single_step(struct task_struct *child)
221 {
222 	/*
223 	 * Make sure block stepping (BTF) is disabled.
224 	 */
225 	if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
226 		set_task_blockstep(child, false);
227 
228 	/* Always clear TIF_SINGLESTEP... */
229 	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
230 
231 	/* But touch TF only if it was set by us.. */
232 	if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF))
233 		task_pt_regs(child)->flags &= ~X86_EFLAGS_TF;
234 }
235