1 /* 2 * Copyright (c) 1993 Jan-Simon Pendry 3 * Copyright (c) 1993 Sean Eric Fagan 4 * Copyright (c) 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Jan-Simon Pendry and Sean Eric Fagan. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)procfs_mem.c 8.5 (Berkeley) 6/15/94 39 * 40 * $FreeBSD$ 41 */ 42 43 /* 44 * This is a lightly hacked and merged version 45 * of sef's pread/pwrite functions 46 */ 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/proc.h> 51 #include <sys/vnode.h> 52 #include <miscfs/procfs/procfs.h> 53 #include <vm/vm.h> 54 #include <vm/vm_param.h> 55 #include <sys/lock.h> 56 #include <vm/pmap.h> 57 #include <vm/vm_map.h> 58 #include <vm/vm_kern.h> 59 #include <vm/vm_object.h> 60 #include <vm/vm_page.h> 61 #include <vm/vm_extern.h> 62 #include <sys/user.h> 63 #include <sys/ptrace.h> 64 65 static int procfs_rwmem __P((struct proc *curp, 66 struct proc *p, struct uio *uio)); 67 68 static int 69 procfs_rwmem(curp, p, uio) 70 struct proc *curp; 71 struct proc *p; 72 struct uio *uio; 73 { 74 int error; 75 int writing; 76 struct vmspace *vm; 77 vm_map_t map; 78 vm_object_t object = NULL; 79 vm_offset_t pageno = 0; /* page number */ 80 vm_prot_t reqprot; 81 vm_offset_t kva; 82 83 /* 84 * if the vmspace is in the midst of being deallocated or the 85 * process is exiting, don't try to grab anything. The page table 86 * usage in that process can be messed up. 87 */ 88 vm = p->p_vmspace; 89 if ((p->p_flag & P_WEXIT) || (vm->vm_refcnt < 1)) 90 return EFAULT; 91 ++vm->vm_refcnt; 92 /* 93 * The map we want... 94 */ 95 map = &vm->vm_map; 96 97 writing = uio->uio_rw == UIO_WRITE; 98 reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) : VM_PROT_READ; 99 100 kva = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 101 102 /* 103 * Only map in one page at a time. We don't have to, but it 104 * makes things easier. This way is trivial - right? 105 */ 106 do { 107 vm_map_t tmap; 108 vm_offset_t uva; 109 int page_offset; /* offset into page */ 110 vm_map_entry_t out_entry; 111 vm_prot_t out_prot; 112 boolean_t wired; 113 vm_pindex_t pindex; 114 u_int len; 115 vm_page_t m; 116 117 object = NULL; 118 119 uva = (vm_offset_t) uio->uio_offset; 120 121 /* 122 * Get the page number of this segment. 123 */ 124 pageno = trunc_page(uva); 125 page_offset = uva - pageno; 126 127 /* 128 * How many bytes to copy 129 */ 130 len = min(PAGE_SIZE - page_offset, uio->uio_resid); 131 132 if (uva >= VM_MAXUSER_ADDRESS) { 133 vm_offset_t tkva; 134 135 if (writing || 136 uva >= VM_MAXUSER_ADDRESS + UPAGES * PAGE_SIZE || 137 (ptrace_read_u_check(p, 138 uva - (vm_offset_t) VM_MAXUSER_ADDRESS, 139 (size_t) len) && 140 !procfs_kmemaccess(curp))) { 141 error = 0; 142 break; 143 } 144 145 /* we are reading the "U area", force it into core */ 146 PHOLD(p); 147 148 /* sanity check */ 149 if (!(p->p_flag & P_INMEM)) { 150 /* aiee! */ 151 PRELE(p); 152 error = EFAULT; 153 break; 154 } 155 156 /* populate the ptrace/procfs area */ 157 p->p_addr->u_kproc.kp_proc = *p; 158 fill_eproc (p, &p->p_addr->u_kproc.kp_eproc); 159 160 /* locate the in-core address */ 161 tkva = (uintptr_t)p->p_addr + uva - VM_MAXUSER_ADDRESS; 162 163 /* transfer it */ 164 error = uiomove((caddr_t)tkva, len, uio); 165 166 /* let the pages go */ 167 PRELE(p); 168 169 continue; 170 } 171 172 /* 173 * Fault the page on behalf of the process 174 */ 175 error = vm_fault(map, pageno, reqprot, FALSE); 176 if (error) { 177 error = EFAULT; 178 break; 179 } 180 181 /* 182 * Now we need to get the page. out_entry, out_prot, wired, 183 * and single_use aren't used. One would think the vm code 184 * would be a *bit* nicer... We use tmap because 185 * vm_map_lookup() can change the map argument. 186 */ 187 tmap = map; 188 error = vm_map_lookup(&tmap, pageno, reqprot, 189 &out_entry, &object, &pindex, &out_prot, 190 &wired); 191 192 if (error) { 193 error = EFAULT; 194 195 /* 196 * Make sure that there is no residue in 'object' from 197 * an error return on vm_map_lookup. 198 */ 199 object = NULL; 200 201 break; 202 } 203 204 m = vm_page_lookup(object, pindex); 205 206 /* Allow fallback to backing objects if we are reading */ 207 208 while (m == NULL && !writing && object->backing_object) { 209 210 pindex += OFF_TO_IDX(object->backing_object_offset); 211 object = object->backing_object; 212 213 m = vm_page_lookup(object, pindex); 214 } 215 216 if (m == NULL) { 217 error = EFAULT; 218 219 /* 220 * Make sure that there is no residue in 'object' from 221 * an error return on vm_map_lookup. 222 */ 223 object = NULL; 224 225 vm_map_lookup_done(tmap, out_entry); 226 227 break; 228 } 229 230 /* 231 * Wire the page into memory 232 */ 233 vm_page_wire(m); 234 235 /* 236 * We're done with tmap now. 237 * But reference the object first, so that we won't loose 238 * it. 239 */ 240 vm_object_reference(object); 241 vm_map_lookup_done(tmap, out_entry); 242 243 pmap_kenter(kva, VM_PAGE_TO_PHYS(m)); 244 245 /* 246 * Now do the i/o move. 247 */ 248 error = uiomove((caddr_t)(kva + page_offset), len, uio); 249 250 pmap_kremove(kva); 251 252 /* 253 * release the page and the object 254 */ 255 vm_page_unwire(m, 1); 256 vm_object_deallocate(object); 257 258 object = NULL; 259 260 } while (error == 0 && uio->uio_resid > 0); 261 262 if (object) 263 vm_object_deallocate(object); 264 265 kmem_free(kernel_map, kva, PAGE_SIZE); 266 vmspace_free(vm); 267 return (error); 268 } 269 270 /* 271 * Copy data in and out of the target process. 272 * We do this by mapping the process's page into 273 * the kernel and then doing a uiomove direct 274 * from the kernel address space. 275 */ 276 int 277 procfs_domem(curp, p, pfs, uio) 278 struct proc *curp; 279 struct proc *p; 280 struct pfsnode *pfs; 281 struct uio *uio; 282 { 283 284 if (uio->uio_resid == 0) 285 return (0); 286 287 /* 288 * XXX 289 * We need to check for KMEM_GROUP because ps is sgid kmem; 290 * not allowing it here causes ps to not work properly. Arguably, 291 * this is a bug with what ps does. We only need to do this 292 * for Pmem nodes, and only if it's reading. This is still not 293 * good, as it may still be possible to grab illicit data if 294 * a process somehow gets to be KMEM_GROUP. Note that this also 295 * means that KMEM_GROUP can't change without editing procfs.h! 296 * All in all, quite yucky. 297 */ 298 299 if (!CHECKIO(curp, p) && 300 !(uio->uio_rw == UIO_READ && 301 procfs_kmemaccess(curp))) 302 return EPERM; 303 304 return (procfs_rwmem(curp, p, uio)); 305 } 306 307 /* 308 * Given process (p), find the vnode from which 309 * its text segment is being executed. 310 * 311 * It would be nice to grab this information from 312 * the VM system, however, there is no sure-fire 313 * way of doing that. Instead, fork(), exec() and 314 * wait() all maintain the p_textvp field in the 315 * process proc structure which contains a held 316 * reference to the exec'ed vnode. 317 */ 318 struct vnode * 319 procfs_findtextvp(p) 320 struct proc *p; 321 { 322 323 return (p->p_textvp); 324 } 325 326 int procfs_kmemaccess(curp) 327 struct proc *curp; 328 { 329 int i; 330 struct ucred *cred; 331 332 cred = curp->p_cred->pc_ucred; 333 if (suser(curp)) 334 return 1; 335 336 /* XXX: Why isn't this done with file-perms ??? */ 337 for (i = 0; i < cred->cr_ngroups; i++) 338 if (cred->cr_groups[i] == KMEM_GROUP) 339 return 1; 340 341 return 0; 342 } 343