xref: /freebsd/sys/fs/procfs/procfs_mem.c (revision 0de89efe5c443f213c7ea28773ef2dc6cf3af2ed)
1 /*
2  * Copyright (c) 1993 Jan-Simon Pendry
3  * Copyright (c) 1993 Sean Eric Fagan
4  * Copyright (c) 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * Jan-Simon Pendry and Sean Eric Fagan.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)procfs_mem.c	8.5 (Berkeley) 6/15/94
39  *
40  *	$Id: procfs_mem.c,v 1.26 1997/08/02 14:32:14 bde Exp $
41  */
42 
43 /*
44  * This is a lightly hacked and merged version
45  * of sef's pread/pwrite functions
46  */
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/proc.h>
51 #include <sys/vnode.h>
52 #include <miscfs/procfs/procfs.h>
53 #include <vm/vm.h>
54 #include <vm/vm_param.h>
55 #include <vm/vm_prot.h>
56 #include <sys/lock.h>
57 #include <vm/pmap.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_kern.h>
60 #include <vm/vm_object.h>
61 #include <vm/vm_page.h>
62 #include <vm/vm_extern.h>
63 #include <sys/user.h>
64 
65 static int	procfs_rwmem __P((struct proc *p, struct uio *uio));
66 
67 static int
68 procfs_rwmem(p, uio)
69 	struct proc *p;
70 	struct uio *uio;
71 {
72 	int error;
73 	int writing;
74 	struct vmspace *vm;
75 	vm_map_t map;
76 	vm_object_t object = NULL;
77 	vm_offset_t pageno = 0;		/* page number */
78 	vm_prot_t reqprot;
79 	vm_offset_t kva;
80 
81 	/*
82 	 * if the vmspace is in the midst of being deallocated or the
83 	 * process is exiting, don't try to grab anything.  The page table
84 	 * usage in that process can be messed up.
85 	 */
86 	vm = p->p_vmspace;
87 	if ((p->p_flag & P_WEXIT) || (vm->vm_refcnt < 1))
88 		return EFAULT;
89 	++vm->vm_refcnt;
90 	/*
91 	 * The map we want...
92 	 */
93 	map = &vm->vm_map;
94 
95 	writing = uio->uio_rw == UIO_WRITE;
96 	reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) : VM_PROT_READ;
97 
98 	kva = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
99 
100 	/*
101 	 * Only map in one page at a time.  We don't have to, but it
102 	 * makes things easier.  This way is trivial - right?
103 	 */
104 	do {
105 		vm_map_t tmap;
106 		vm_offset_t uva;
107 		int page_offset;		/* offset into page */
108 		vm_map_entry_t out_entry;
109 		vm_prot_t out_prot;
110 		boolean_t wired, single_use;
111 		vm_pindex_t pindex;
112 		u_int len;
113 		vm_page_t m;
114 
115 		object = NULL;
116 
117 		uva = (vm_offset_t) uio->uio_offset;
118 
119 		/*
120 		 * Get the page number of this segment.
121 		 */
122 		pageno = trunc_page(uva);
123 		page_offset = uva - pageno;
124 
125 		/*
126 		 * How many bytes to copy
127 		 */
128 		len = min(PAGE_SIZE - page_offset, uio->uio_resid);
129 
130 		if (uva >= VM_MAXUSER_ADDRESS) {
131 			vm_offset_t tkva;
132 
133 			if (writing || (uva >= (VM_MAXUSER_ADDRESS + UPAGES * PAGE_SIZE))) {
134 				error = 0;
135 				break;
136 			}
137 
138 			/* we are reading the "U area", force it into core */
139 			PHOLD(p);
140 
141 			/* sanity check */
142 			if (!(p->p_flag & P_INMEM)) {
143 				/* aiee! */
144 				PRELE(p);
145 				error = EFAULT;
146 				break;
147 			}
148 
149 			/* populate the ptrace/procfs area */
150 			p->p_addr->u_kproc.kp_proc = *p;
151 			fill_eproc (p, &p->p_addr->u_kproc.kp_eproc);
152 
153 			/* locate the in-core address */
154 			tkva = (u_int)p->p_addr + uva - VM_MAXUSER_ADDRESS;
155 
156 			/* transfer it */
157 			error = uiomove((caddr_t)tkva, len, uio);
158 
159 			/* let the pages go */
160 			PRELE(p);
161 
162 			continue;
163 		}
164 
165 		/*
166 		 * Fault the page on behalf of the process
167 		 */
168 		error = vm_fault(map, pageno, reqprot, FALSE);
169 		if (error) {
170 			error = EFAULT;
171 			break;
172 		}
173 
174 		/*
175 		 * Now we need to get the page.  out_entry, out_prot, wired,
176 		 * and single_use aren't used.  One would think the vm code
177 		 * would be a *bit* nicer...  We use tmap because
178 		 * vm_map_lookup() can change the map argument.
179 		 */
180 		tmap = map;
181 		error = vm_map_lookup(&tmap, pageno, reqprot,
182 			      &out_entry, &object, &pindex, &out_prot,
183 			      &wired, &single_use);
184 
185 		if (error) {
186 			error = EFAULT;
187 
188 			/*
189 			 * Make sure that there is no residue in 'object' from
190 			 * an error return on vm_map_lookup.
191 			 */
192 			object = NULL;
193 
194 			break;
195 		}
196 
197 		m = vm_page_lookup(object, pindex);
198 
199 		/* Allow fallback to backing objects if we are reading */
200 
201 		while (m == NULL && !writing && object->backing_object) {
202 
203 		  pindex += OFF_TO_IDX(object->backing_object_offset);
204 		  object = object->backing_object;
205 
206 		  m = vm_page_lookup(object, pindex);
207 		}
208 
209 		if (m == NULL) {
210 			error = EFAULT;
211 
212 			/*
213 			 * Make sure that there is no residue in 'object' from
214 			 * an error return on vm_map_lookup.
215 			 */
216 			object = NULL;
217 
218 			vm_map_lookup_done(tmap, out_entry);
219 
220 			break;
221 		}
222 
223 		/*
224 		 * Wire the page into memory
225 		 */
226 		vm_page_wire(m);
227 
228 		/*
229 		 * We're done with tmap now.
230 		 * But reference the object first, so that we won't loose
231 		 * it.
232 		 */
233 		vm_object_reference(object);
234 		vm_map_lookup_done(tmap, out_entry);
235 
236 		pmap_kenter(kva, VM_PAGE_TO_PHYS(m));
237 
238 		/*
239 		 * Now do the i/o move.
240 		 */
241 		error = uiomove((caddr_t)(kva + page_offset), len, uio);
242 
243 		pmap_kremove(kva);
244 
245 		/*
246 		 * release the page and the object
247 		 */
248 		vm_page_unwire(m);
249 		vm_object_deallocate(object);
250 
251 		object = NULL;
252 
253 	} while (error == 0 && uio->uio_resid > 0);
254 
255 	if (object)
256 		vm_object_deallocate(object);
257 
258 	kmem_free(kernel_map, kva, PAGE_SIZE);
259 	vmspace_free(vm);
260 	return (error);
261 }
262 
263 /*
264  * Copy data in and out of the target process.
265  * We do this by mapping the process's page into
266  * the kernel and then doing a uiomove direct
267  * from the kernel address space.
268  */
269 int
270 procfs_domem(curp, p, pfs, uio)
271 	struct proc *curp;
272 	struct proc *p;
273 	struct pfsnode *pfs;
274 	struct uio *uio;
275 {
276 
277 	if (uio->uio_resid == 0)
278 		return (0);
279 
280  	/*
281  	 * XXX
282  	 * We need to check for KMEM_GROUP because ps is sgid kmem;
283  	 * not allowing it here causes ps to not work properly.  Arguably,
284  	 * this is a bug with what ps does.  We only need to do this
285  	 * for Pmem nodes, and only if it's reading.  This is still not
286  	 * good, as it may still be possible to grab illicit data if
287  	 * a process somehow gets to be KMEM_GROUP.  Note that this also
288  	 * means that KMEM_GROUP can't change without editing procfs.h!
289  	 * All in all, quite yucky.
290  	 */
291 
292  	if (!CHECKIO(curp, p) &&
293 	    !(curp->p_cred->pc_ucred->cr_gid == KMEM_GROUP &&
294 	      uio->uio_rw == UIO_READ))
295  		return EPERM;
296 
297 	return (procfs_rwmem(p, uio));
298 }
299 
300 /*
301  * Given process (p), find the vnode from which
302  * it's text segment is being executed.
303  *
304  * It would be nice to grab this information from
305  * the VM system, however, there is no sure-fire
306  * way of doing that.  Instead, fork(), exec() and
307  * wait() all maintain the p_textvp field in the
308  * process proc structure which contains a held
309  * reference to the exec'ed vnode.
310  */
311 struct vnode *
312 procfs_findtextvp(p)
313 	struct proc *p;
314 {
315 
316 	return (p->p_textvp);
317 }
318