1 /* 2 * linux/kernel/ptrace.c 3 * 4 * (C) Copyright 1999 Linus Torvalds 5 * 6 * Common interfaces for "ptrace()" which we do not want 7 * to continually duplicate across every architecture. 8 */ 9 10 #include <linux/module.h> 11 #include <linux/sched.h> 12 #include <linux/errno.h> 13 #include <linux/mm.h> 14 #include <linux/highmem.h> 15 #include <linux/pagemap.h> 16 #include <linux/smp_lock.h> 17 #include <linux/ptrace.h> 18 #include <linux/security.h> 19 #include <linux/signal.h> 20 21 #include <asm/pgtable.h> 22 #include <asm/uaccess.h> 23 24 /* 25 * ptrace a task: make the debugger its new parent and 26 * move it to the ptrace list. 27 * 28 * Must be called with the tasklist lock write-held. 29 */ 30 void __ptrace_link(task_t *child, task_t *new_parent) 31 { 32 if (!list_empty(&child->ptrace_list)) 33 BUG(); 34 if (child->parent == new_parent) 35 return; 36 list_add(&child->ptrace_list, &child->parent->ptrace_children); 37 REMOVE_LINKS(child); 38 child->parent = new_parent; 39 SET_LINKS(child); 40 } 41 42 /* 43 * Turn a tracing stop into a normal stop now, since with no tracer there 44 * would be no way to wake it up with SIGCONT or SIGKILL. If there was a 45 * signal sent that would resume the child, but didn't because it was in 46 * TASK_TRACED, resume it now. 47 * Requires that irqs be disabled. 48 */ 49 void ptrace_untrace(task_t *child) 50 { 51 spin_lock(&child->sighand->siglock); 52 if (child->state == TASK_TRACED) { 53 if (child->signal->flags & SIGNAL_STOP_STOPPED) { 54 child->state = TASK_STOPPED; 55 } else { 56 signal_wake_up(child, 1); 57 } 58 } 59 spin_unlock(&child->sighand->siglock); 60 } 61 62 /* 63 * unptrace a task: move it back to its original parent and 64 * remove it from the ptrace list. 65 * 66 * Must be called with the tasklist lock write-held. 67 */ 68 void __ptrace_unlink(task_t *child) 69 { 70 if (!child->ptrace) 71 BUG(); 72 child->ptrace = 0; 73 if (!list_empty(&child->ptrace_list)) { 74 list_del_init(&child->ptrace_list); 75 REMOVE_LINKS(child); 76 child->parent = child->real_parent; 77 SET_LINKS(child); 78 } 79 80 if (child->state == TASK_TRACED) 81 ptrace_untrace(child); 82 } 83 84 /* 85 * Check that we have indeed attached to the thing.. 86 */ 87 int ptrace_check_attach(struct task_struct *child, int kill) 88 { 89 int ret = -ESRCH; 90 91 /* 92 * We take the read lock around doing both checks to close a 93 * possible race where someone else was tracing our child and 94 * detached between these two checks. After this locked check, 95 * we are sure that this is our traced child and that can only 96 * be changed by us so it's not changing right after this. 97 */ 98 read_lock(&tasklist_lock); 99 if ((child->ptrace & PT_PTRACED) && child->parent == current && 100 (!(child->ptrace & PT_ATTACHED) || child->real_parent != current) 101 && child->signal != NULL) { 102 ret = 0; 103 spin_lock_irq(&child->sighand->siglock); 104 if (child->state == TASK_STOPPED) { 105 child->state = TASK_TRACED; 106 } else if (child->state != TASK_TRACED && !kill) { 107 ret = -ESRCH; 108 } 109 spin_unlock_irq(&child->sighand->siglock); 110 } 111 read_unlock(&tasklist_lock); 112 113 if (!ret && !kill) { 114 wait_task_inactive(child); 115 } 116 117 /* All systems go.. */ 118 return ret; 119 } 120 121 static int may_attach(struct task_struct *task) 122 { 123 if (!task->mm) 124 return -EPERM; 125 if (((current->uid != task->euid) || 126 (current->uid != task->suid) || 127 (current->uid != task->uid) || 128 (current->gid != task->egid) || 129 (current->gid != task->sgid) || 130 (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE)) 131 return -EPERM; 132 smp_rmb(); 133 if (!task->mm->dumpable && !capable(CAP_SYS_PTRACE)) 134 return -EPERM; 135 136 return security_ptrace(current, task); 137 } 138 139 int ptrace_may_attach(struct task_struct *task) 140 { 141 int err; 142 task_lock(task); 143 err = may_attach(task); 144 task_unlock(task); 145 return !err; 146 } 147 148 int ptrace_attach(struct task_struct *task) 149 { 150 int retval; 151 task_lock(task); 152 retval = -EPERM; 153 if (task->pid <= 1) 154 goto bad; 155 if (task == current) 156 goto bad; 157 /* the same process cannot be attached many times */ 158 if (task->ptrace & PT_PTRACED) 159 goto bad; 160 retval = may_attach(task); 161 if (retval) 162 goto bad; 163 164 /* Go */ 165 task->ptrace |= PT_PTRACED | ((task->real_parent != current) 166 ? PT_ATTACHED : 0); 167 if (capable(CAP_SYS_PTRACE)) 168 task->ptrace |= PT_PTRACE_CAP; 169 task_unlock(task); 170 171 write_lock_irq(&tasklist_lock); 172 __ptrace_link(task, current); 173 write_unlock_irq(&tasklist_lock); 174 175 force_sig_specific(SIGSTOP, task); 176 return 0; 177 178 bad: 179 task_unlock(task); 180 return retval; 181 } 182 183 int ptrace_detach(struct task_struct *child, unsigned int data) 184 { 185 if (!valid_signal(data)) 186 return -EIO; 187 188 /* Architecture-specific hardware disable .. */ 189 ptrace_disable(child); 190 191 /* .. re-parent .. */ 192 child->exit_code = data; 193 194 write_lock_irq(&tasklist_lock); 195 __ptrace_unlink(child); 196 /* .. and wake it up. */ 197 if (child->exit_state != EXIT_ZOMBIE) 198 wake_up_process(child); 199 write_unlock_irq(&tasklist_lock); 200 201 return 0; 202 } 203 204 /* 205 * Access another process' address space. 206 * Source/target buffer must be kernel space, 207 * Do not walk the page table directly, use get_user_pages 208 */ 209 210 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) 211 { 212 struct mm_struct *mm; 213 struct vm_area_struct *vma; 214 struct page *page; 215 void *old_buf = buf; 216 217 mm = get_task_mm(tsk); 218 if (!mm) 219 return 0; 220 221 down_read(&mm->mmap_sem); 222 /* ignore errors, just check how much was sucessfully transfered */ 223 while (len) { 224 int bytes, ret, offset; 225 void *maddr; 226 227 ret = get_user_pages(tsk, mm, addr, 1, 228 write, 1, &page, &vma); 229 if (ret <= 0) 230 break; 231 232 bytes = len; 233 offset = addr & (PAGE_SIZE-1); 234 if (bytes > PAGE_SIZE-offset) 235 bytes = PAGE_SIZE-offset; 236 237 maddr = kmap(page); 238 if (write) { 239 copy_to_user_page(vma, page, addr, 240 maddr + offset, buf, bytes); 241 set_page_dirty_lock(page); 242 } else { 243 copy_from_user_page(vma, page, addr, 244 buf, maddr + offset, bytes); 245 } 246 kunmap(page); 247 page_cache_release(page); 248 len -= bytes; 249 buf += bytes; 250 addr += bytes; 251 } 252 up_read(&mm->mmap_sem); 253 mmput(mm); 254 255 return buf - old_buf; 256 } 257 258 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) 259 { 260 int copied = 0; 261 262 while (len > 0) { 263 char buf[128]; 264 int this_len, retval; 265 266 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 267 retval = access_process_vm(tsk, src, buf, this_len, 0); 268 if (!retval) { 269 if (copied) 270 break; 271 return -EIO; 272 } 273 if (copy_to_user(dst, buf, retval)) 274 return -EFAULT; 275 copied += retval; 276 src += retval; 277 dst += retval; 278 len -= retval; 279 } 280 return copied; 281 } 282 283 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) 284 { 285 int copied = 0; 286 287 while (len > 0) { 288 char buf[128]; 289 int this_len, retval; 290 291 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 292 if (copy_from_user(buf, src, this_len)) 293 return -EFAULT; 294 retval = access_process_vm(tsk, dst, buf, this_len, 1); 295 if (!retval) { 296 if (copied) 297 break; 298 return -EIO; 299 } 300 copied += retval; 301 src += retval; 302 dst += retval; 303 len -= retval; 304 } 305 return copied; 306 } 307 308 static int ptrace_setoptions(struct task_struct *child, long data) 309 { 310 child->ptrace &= ~PT_TRACE_MASK; 311 312 if (data & PTRACE_O_TRACESYSGOOD) 313 child->ptrace |= PT_TRACESYSGOOD; 314 315 if (data & PTRACE_O_TRACEFORK) 316 child->ptrace |= PT_TRACE_FORK; 317 318 if (data & PTRACE_O_TRACEVFORK) 319 child->ptrace |= PT_TRACE_VFORK; 320 321 if (data & PTRACE_O_TRACECLONE) 322 child->ptrace |= PT_TRACE_CLONE; 323 324 if (data & PTRACE_O_TRACEEXEC) 325 child->ptrace |= PT_TRACE_EXEC; 326 327 if (data & PTRACE_O_TRACEVFORKDONE) 328 child->ptrace |= PT_TRACE_VFORK_DONE; 329 330 if (data & PTRACE_O_TRACEEXIT) 331 child->ptrace |= PT_TRACE_EXIT; 332 333 return (data & ~PTRACE_O_MASK) ? -EINVAL : 0; 334 } 335 336 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t __user * data) 337 { 338 siginfo_t lastinfo; 339 int error = -ESRCH; 340 341 read_lock(&tasklist_lock); 342 if (likely(child->sighand != NULL)) { 343 error = -EINVAL; 344 spin_lock_irq(&child->sighand->siglock); 345 if (likely(child->last_siginfo != NULL)) { 346 lastinfo = *child->last_siginfo; 347 error = 0; 348 } 349 spin_unlock_irq(&child->sighand->siglock); 350 } 351 read_unlock(&tasklist_lock); 352 if (!error) 353 return copy_siginfo_to_user(data, &lastinfo); 354 return error; 355 } 356 357 static int ptrace_setsiginfo(struct task_struct *child, siginfo_t __user * data) 358 { 359 siginfo_t newinfo; 360 int error = -ESRCH; 361 362 if (copy_from_user(&newinfo, data, sizeof (siginfo_t))) 363 return -EFAULT; 364 365 read_lock(&tasklist_lock); 366 if (likely(child->sighand != NULL)) { 367 error = -EINVAL; 368 spin_lock_irq(&child->sighand->siglock); 369 if (likely(child->last_siginfo != NULL)) { 370 *child->last_siginfo = newinfo; 371 error = 0; 372 } 373 spin_unlock_irq(&child->sighand->siglock); 374 } 375 read_unlock(&tasklist_lock); 376 return error; 377 } 378 379 int ptrace_request(struct task_struct *child, long request, 380 long addr, long data) 381 { 382 int ret = -EIO; 383 384 switch (request) { 385 #ifdef PTRACE_OLDSETOPTIONS 386 case PTRACE_OLDSETOPTIONS: 387 #endif 388 case PTRACE_SETOPTIONS: 389 ret = ptrace_setoptions(child, data); 390 break; 391 case PTRACE_GETEVENTMSG: 392 ret = put_user(child->ptrace_message, (unsigned long __user *) data); 393 break; 394 case PTRACE_GETSIGINFO: 395 ret = ptrace_getsiginfo(child, (siginfo_t __user *) data); 396 break; 397 case PTRACE_SETSIGINFO: 398 ret = ptrace_setsiginfo(child, (siginfo_t __user *) data); 399 break; 400 default: 401 break; 402 } 403 404 return ret; 405 } 406