xref: /freebsd/sys/fs/procfs/procfs_mem.c (revision 1d66272a85cde1c8a69c58f4b5dd649babd6eca6)
1 /*
2  * Copyright (c) 1993 Jan-Simon Pendry
3  * Copyright (c) 1993 Sean Eric Fagan
4  * Copyright (c) 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * Jan-Simon Pendry and Sean Eric Fagan.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)procfs_mem.c	8.5 (Berkeley) 6/15/94
39  *
40  * $FreeBSD$
41  */
42 
43 /*
44  * This is a lightly hacked and merged version
45  * of sef's pread/pwrite functions
46  */
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/proc.h>
51 #include <sys/vnode.h>
52 #include <miscfs/procfs/procfs.h>
53 #include <vm/vm.h>
54 #include <vm/vm_param.h>
55 #include <sys/lock.h>
56 #include <vm/pmap.h>
57 #include <vm/vm_extern.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_kern.h>
60 #include <vm/vm_object.h>
61 #include <vm/vm_page.h>
62 #include <sys/user.h>
63 #include <sys/ptrace.h>
64 
65 static int	procfs_rwmem __P((struct proc *curp,
66 				  struct proc *p, struct uio *uio));
67 
68 static int
69 procfs_rwmem(curp, p, uio)
70 	struct proc *curp;
71 	struct proc *p;
72 	struct uio *uio;
73 {
74 	int error;
75 	int writing;
76 	struct vmspace *vm;
77 	vm_map_t map;
78 	vm_object_t object = NULL;
79 	vm_offset_t pageno = 0;		/* page number */
80 	vm_prot_t reqprot;
81 	vm_offset_t kva;
82 
83 	/*
84 	 * if the vmspace is in the midst of being deallocated or the
85 	 * process is exiting, don't try to grab anything.  The page table
86 	 * usage in that process can be messed up.
87 	 */
88 	vm = p->p_vmspace;
89 	if ((p->p_flag & P_WEXIT) || (vm->vm_refcnt < 1))
90 		return EFAULT;
91 	++vm->vm_refcnt;
92 	/*
93 	 * The map we want...
94 	 */
95 	map = &vm->vm_map;
96 
97 	writing = uio->uio_rw == UIO_WRITE;
98 	reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) : VM_PROT_READ;
99 
100 	kva = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
101 
102 	/*
103 	 * Only map in one page at a time.  We don't have to, but it
104 	 * makes things easier.  This way is trivial - right?
105 	 */
106 	do {
107 		vm_map_t tmap;
108 		vm_offset_t uva;
109 		int page_offset;		/* offset into page */
110 		vm_map_entry_t out_entry;
111 		vm_prot_t out_prot;
112 		boolean_t wired;
113 		vm_pindex_t pindex;
114 		u_int len;
115 		vm_page_t m;
116 
117 		object = NULL;
118 
119 		uva = (vm_offset_t) uio->uio_offset;
120 
121 		/*
122 		 * Get the page number of this segment.
123 		 */
124 		pageno = trunc_page(uva);
125 		page_offset = uva - pageno;
126 
127 		/*
128 		 * How many bytes to copy
129 		 */
130 		len = min(PAGE_SIZE - page_offset, uio->uio_resid);
131 
132 		/*
133 		 * Fault the page on behalf of the process
134 		 */
135 		error = vm_fault(map, pageno, reqprot, VM_FAULT_NORMAL);
136 		if (error) {
137 			error = EFAULT;
138 			break;
139 		}
140 
141 		/*
142 		 * Now we need to get the page.  out_entry, out_prot, wired,
143 		 * and single_use aren't used.  One would think the vm code
144 		 * would be a *bit* nicer...  We use tmap because
145 		 * vm_map_lookup() can change the map argument.
146 		 */
147 		tmap = map;
148 		error = vm_map_lookup(&tmap, pageno, reqprot,
149 			      &out_entry, &object, &pindex, &out_prot,
150 			      &wired);
151 
152 		if (error) {
153 			error = EFAULT;
154 
155 			/*
156 			 * Make sure that there is no residue in 'object' from
157 			 * an error return on vm_map_lookup.
158 			 */
159 			object = NULL;
160 
161 			break;
162 		}
163 
164 		m = vm_page_lookup(object, pindex);
165 
166 		/* Allow fallback to backing objects if we are reading */
167 
168 		while (m == NULL && !writing && object->backing_object) {
169 
170 		  pindex += OFF_TO_IDX(object->backing_object_offset);
171 		  object = object->backing_object;
172 
173 		  m = vm_page_lookup(object, pindex);
174 		}
175 
176 		if (m == NULL) {
177 			error = EFAULT;
178 
179 			/*
180 			 * Make sure that there is no residue in 'object' from
181 			 * an error return on vm_map_lookup.
182 			 */
183 			object = NULL;
184 
185 			vm_map_lookup_done(tmap, out_entry);
186 
187 			break;
188 		}
189 
190 		/*
191 		 * Wire the page into memory
192 		 */
193 		vm_page_wire(m);
194 
195 		/*
196 		 * We're done with tmap now.
197 		 * But reference the object first, so that we won't loose
198 		 * it.
199 		 */
200 		vm_object_reference(object);
201 		vm_map_lookup_done(tmap, out_entry);
202 
203 		pmap_kenter(kva, VM_PAGE_TO_PHYS(m));
204 
205 		/*
206 		 * Now do the i/o move.
207 		 */
208 		error = uiomove((caddr_t)(kva + page_offset), len, uio);
209 
210 		pmap_kremove(kva);
211 
212 		/*
213 		 * release the page and the object
214 		 */
215 		vm_page_unwire(m, 1);
216 		vm_object_deallocate(object);
217 
218 		object = NULL;
219 
220 	} while (error == 0 && uio->uio_resid > 0);
221 
222 	if (object)
223 		vm_object_deallocate(object);
224 
225 	kmem_free(kernel_map, kva, PAGE_SIZE);
226 	vmspace_free(vm);
227 	return (error);
228 }
229 
230 /*
231  * Copy data in and out of the target process.
232  * We do this by mapping the process's page into
233  * the kernel and then doing a uiomove direct
234  * from the kernel address space.
235  */
236 int
237 procfs_domem(curp, p, pfs, uio)
238 	struct proc *curp;
239 	struct proc *p;
240 	struct pfsnode *pfs;
241 	struct uio *uio;
242 {
243 
244 	if (uio->uio_resid == 0)
245 		return (0);
246 
247  	/*
248  	 * XXX
249  	 * We need to check for KMEM_GROUP because ps is sgid kmem;
250  	 * not allowing it here causes ps to not work properly.  Arguably,
251  	 * this is a bug with what ps does.  We only need to do this
252  	 * for Pmem nodes, and only if it's reading.  This is still not
253  	 * good, as it may still be possible to grab illicit data if
254  	 * a process somehow gets to be KMEM_GROUP.  Note that this also
255  	 * means that KMEM_GROUP can't change without editing procfs.h!
256  	 * All in all, quite yucky.
257  	 */
258 
259  	if (p_can(curp, p, P_CAN_DEBUG, NULL) &&
260 	    !(uio->uio_rw == UIO_READ &&
261 	      procfs_kmemaccess(curp)))
262  		return EPERM;
263 
264 	return (procfs_rwmem(curp, p, uio));
265 }
266 
267 /*
268  * Given process (p), find the vnode from which
269  * its text segment is being executed.
270  *
271  * It would be nice to grab this information from
272  * the VM system, however, there is no sure-fire
273  * way of doing that.  Instead, fork(), exec() and
274  * wait() all maintain the p_textvp field in the
275  * process proc structure which contains a held
276  * reference to the exec'ed vnode.
277  *
278  * XXX - Currently, this is not not used, as the
279  * /proc/pid/file object exposes an information leak
280  * that shouldn't happen.  Using a mount option would
281  * make it configurable on a per-system (or, at least,
282  * per-mount) basis; however, that's not really best.
283  * The best way to do it, I think, would be as an
284  * ioctl; this would restrict it to the uid running
285  * program, or root, which seems a reasonable compromise.
286  * However, the number of applications for this is
287  * minimal, if it can't be seen in the filesytem space,
288  * and doint it as an ioctl makes it somewhat less
289  * useful due to the, well, inelegance.
290  *
291  */
292 struct vnode *
293 procfs_findtextvp(p)
294 	struct proc *p;
295 {
296 
297 	return (p->p_textvp);
298 }
299 
300 int procfs_kmemaccess(curp)
301 	struct proc *curp;
302 {
303 	int i;
304 	struct ucred *cred;
305 
306 	cred = curp->p_ucred;
307 	if (suser(curp))
308 		return 1;
309 
310 	/* XXX: Why isn't this done with file-perms ??? */
311 	for (i = 0; i < cred->cr_ngroups; i++)
312 		if (cred->cr_groups[i] == KMEM_GROUP)
313 			return 1;
314 
315 	return 0;
316 }
317