xref: /freebsd/sys/amd64/amd64/mem.c (revision ef5d438ed4bc17ad7ece3e40fe4d1f9baf3aadf7)
1 /*-
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department, and code derived from software contributed to
9  * Berkeley by William Jolitz.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the University of
22  *	California, Berkeley and its contributors.
23  * 4. Neither the name of the University nor the names of its contributors
24  *    may be used to endorse or promote products derived from this software
25  *    without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  *	from: Utah $Hdr: mem.c 1.13 89/10/08$
40  *	from: @(#)mem.c	7.2 (Berkeley) 5/9/91
41  *	$Id: mem.c,v 1.28 1995/12/27 11:18:29 markm Exp $
42  */
43 
44 /*
45  * Memory special file
46  */
47 
48 #include <sys/param.h>
49 #include <sys/conf.h>
50 #include <sys/buf.h>
51 #ifdef DEVFS
52 #include <sys/devfsext.h>
53 #endif /* DEVFS */
54 #include <sys/kernel.h>
55 #include <sys/systm.h>
56 #include <sys/uio.h>
57 #include <sys/malloc.h>
58 #include <sys/proc.h>
59 
60 #include <machine/cpu.h>
61 #include <machine/random.h>
62 #include <machine/psl.h>
63 
64 #include <vm/vm.h>
65 #include <vm/vm_param.h>
66 #include <vm/lock.h>
67 #include <vm/vm_prot.h>
68 #include <vm/pmap.h>
69 #include <vm/vm_extern.h>
70 
71 
72 
73 static	d_open_t	mmopen;
74 static	d_close_t	mmclose;
75 static	d_read_t	mmrw;
76 static	d_ioctl_t	mmioctl;
77 static	d_mmap_t	memmmap;
78 
79 #define CDEV_MAJOR 2
80 static struct cdevsw mem_cdevsw =
81 	{ mmopen,	mmclose,	mmrw,		mmrw,		/*2*/
82 	  mmioctl,	nullstop,	nullreset,	nodevtotty,/* memory */
83 	  seltrue,	memmmap,	NULL,	"mem",	NULL, -1 };
84 
85 #ifdef DEVFS
86 static void *mem_devfs_token;
87 static void *kmem_devfs_token;
88 static void *null_devfs_token;
89 static void *random_devfs_token;
90 static void *urandom_devfs_token;
91 static void *zero_devfs_token;
92 static void *io_devfs_token;
93 
94 static void memdevfs_init __P((void));
95 
96 static void
97 memdevfs_init()
98 {
99 /*            path	name	cdevsw	   minor	type   uid gid perm*/
100     mem_devfs_token = devfs_add_devsw(
101 		"/",	"mem",	&mem_cdevsw,    0,	DV_CHR, 0,  2, 0640);
102     kmem_devfs_token = devfs_add_devsw(
103 		"/",	"kmem",	&mem_cdevsw,    1,	DV_CHR, 0,  2, 0640);
104     null_devfs_token = devfs_add_devsw(
105 		"/",	"null",	&mem_cdevsw,    2,	DV_CHR, 0,  0, 0666);
106     random_devfs_token = devfs_add_devsw(
107 		"/",	"random", &mem_cdevsw,  3,	DV_CHR, 0,  0, 0666);
108     urandom_devfs_token = devfs_add_devsw(
109 		"/",	"urandom", &mem_cdevsw, 4,	DV_CHR, 0,  0, 0666);
110     zero_devfs_token = devfs_add_devsw(
111 		"/",	"zero",	&mem_cdevsw,    12,	DV_CHR, 0,  0, 0666);
112     io_devfs_token = devfs_add_devsw(
113 		"/",	"io",	&mem_cdevsw,    14,	DV_CHR, 0,  0, 0600);
114 }
115 #endif /* DEVFS */
116 
117 extern        char *ptvmmap;            /* poor name! */
118 
119 static int
120 mmclose(dev, flags, fmt, p)
121 	dev_t dev;
122 	int flags;
123 	int fmt;
124 	struct proc *p;
125 {
126 	struct trapframe *fp;
127 
128 	switch (minor(dev)) {
129 	case 14:
130 		fp = (struct trapframe *)curproc->p_md.md_regs;
131 		fp->tf_eflags &= ~PSL_IOPL;
132 		break;
133 	default:
134 		break;
135 	}
136 	return(0);
137 }
138 
139 static int
140 mmopen(dev, flags, fmt, p)
141 	dev_t dev;
142 	int flags;
143 	int fmt;
144 	struct proc *p;
145 {
146 	struct trapframe *fp;
147 
148 	switch (minor(dev)) {
149 	case 14:
150 		fp = (struct trapframe *)curproc->p_md.md_regs;
151 		fp->tf_eflags |= PSL_IOPL;
152 		break;
153 	default:
154 		break;
155 	}
156 	return(0);
157 }
158 
159 static int
160 mmrw(dev, uio, flags)
161 	dev_t dev;
162 	struct uio *uio;
163 	int flags;
164 {
165 	register int o;
166 	register u_int c, v;
167 	u_int poolsize;
168 	register struct iovec *iov;
169 	int error = 0;
170 	caddr_t buf = NULL;
171 
172 	while (uio->uio_resid > 0 && error == 0) {
173 		iov = uio->uio_iov;
174 		if (iov->iov_len == 0) {
175 			uio->uio_iov++;
176 			uio->uio_iovcnt--;
177 			if (uio->uio_iovcnt < 0)
178 				panic("mmrw");
179 			continue;
180 		}
181 		switch (minor(dev)) {
182 
183 /* minor device 0 is physical memory */
184 		case 0:
185 			v = uio->uio_offset;
186 			pmap_enter(kernel_pmap, (vm_offset_t)ptvmmap, v,
187 				uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE,
188 				TRUE);
189 			o = (int)uio->uio_offset & PGOFSET;
190 			c = (u_int)(NBPG - ((int)iov->iov_base & PGOFSET));
191 			c = min(c, (u_int)(NBPG - o));
192 			c = min(c, (u_int)iov->iov_len);
193 			error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio);
194 			pmap_remove(kernel_pmap, (vm_offset_t)ptvmmap,
195 				    (vm_offset_t)&ptvmmap[NBPG]);
196 			continue;
197 
198 /* minor device 1 is kernel memory */
199 		case 1: {
200 			vm_offset_t addr, eaddr;
201 			c = iov->iov_len;
202 
203 			/*
204 			 * Make sure that all of the pages are currently resident so
205 			 * that we don't create any zero-fill pages.
206 			 */
207 			addr = trunc_page(uio->uio_offset);
208 			eaddr = round_page(uio->uio_offset + c);
209 			for (; addr < eaddr; addr += PAGE_SIZE)
210 				if (pmap_extract(kernel_pmap, addr) == 0)
211 					return EFAULT;
212 
213 			if (!kernacc((caddr_t)(int)uio->uio_offset, c,
214 			    uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
215 				return(EFAULT);
216 			error = uiomove((caddr_t)(int)uio->uio_offset, (int)c, uio);
217 			continue;
218 		}
219 
220 /* minor device 2 is EOF/RATHOLE */
221 		case 2:
222 			if (uio->uio_rw == UIO_READ)
223 				return (0);
224 			c = iov->iov_len;
225 			break;
226 
227 /* minor device 3 (/dev/random) is source of filth on read, rathole on write */
228 		case 3:
229 			if (uio->uio_rw == UIO_WRITE) {
230 				c = iov->iov_len;
231 				break;
232 			}
233 			if (buf == NULL)
234 				buf = (caddr_t)
235 				    malloc(CLBYTES, M_TEMP, M_WAITOK);
236 			c = min(iov->iov_len, CLBYTES);
237 			poolsize = read_random(buf, c);
238 			if (poolsize == 0) {
239 				if (buf)
240 					free(buf, M_TEMP);
241 				return (0);
242 			}
243 			c = min(c, poolsize);
244 			error = uiomove(buf, (int)c, uio);
245 			continue;
246 
247 /* minor device 4 (/dev/urandom) is source of muck on read, rathole on write */
248 		case 4:
249 			if (uio->uio_rw == UIO_WRITE) {
250 				c = iov->iov_len;
251 				break;
252 			}
253 			if (buf == NULL)
254 				buf = (caddr_t)
255 				    malloc(CLBYTES, M_TEMP, M_WAITOK);
256 			c = min(iov->iov_len, CLBYTES);
257 			poolsize = read_random_unlimited(buf, c);
258 			c = min(c, poolsize);
259 			error = uiomove(buf, (int)c, uio);
260 			continue;
261 
262 /* minor device 12 (/dev/zero) is source of nulls on read, rathole on write */
263 		case 12:
264 			if (uio->uio_rw == UIO_WRITE) {
265 				c = iov->iov_len;
266 				break;
267 			}
268 			if (buf == NULL) {
269 				buf = (caddr_t)
270 				    malloc(CLBYTES, M_TEMP, M_WAITOK);
271 				bzero(buf, CLBYTES);
272 			}
273 			c = min(iov->iov_len, CLBYTES);
274 			error = uiomove(buf, (int)c, uio);
275 			continue;
276 
277 #ifdef notyet
278 /* 386 I/O address space (/dev/ioport[bwl]) is a read/write access to seperate
279    i/o device address bus, different than memory bus. Semantics here are
280    very different than ordinary read/write, as if iov_len is a multiple
281    an implied string move from a single port will be done. Note that lseek
282    must be used to set the port number reliably. */
283 		case 14:
284 			if (iov->iov_len == 1) {
285 				u_char tmp;
286 				tmp = inb(uio->uio_offset);
287 				error = uiomove (&tmp, iov->iov_len, uio);
288 			} else {
289 				if (!useracc((caddr_t)iov->iov_base,
290 					iov->iov_len, uio->uio_rw))
291 					return (EFAULT);
292 				insb(uio->uio_offset, iov->iov_base,
293 					iov->iov_len);
294 			}
295 			break;
296 		case 15:
297 			if (iov->iov_len == sizeof (short)) {
298 				u_short tmp;
299 				tmp = inw(uio->uio_offset);
300 				error = uiomove (&tmp, iov->iov_len, uio);
301 			} else {
302 				if (!useracc((caddr_t)iov->iov_base,
303 					iov->iov_len, uio->uio_rw))
304 					return (EFAULT);
305 				insw(uio->uio_offset, iov->iov_base,
306 					iov->iov_len/ sizeof (short));
307 			}
308 			break;
309 		case 16:
310 			if (iov->iov_len == sizeof (long)) {
311 				u_long tmp;
312 				tmp = inl(uio->uio_offset);
313 				error = uiomove (&tmp, iov->iov_len, uio);
314 			} else {
315 				if (!useracc((caddr_t)iov->iov_base,
316 					iov->iov_len, uio->uio_rw))
317 					return (EFAULT);
318 				insl(uio->uio_offset, iov->iov_base,
319 					iov->iov_len/ sizeof (long));
320 			}
321 			break;
322 #endif
323 
324 		default:
325 			return (ENXIO);
326 		}
327 		if (error)
328 			break;
329 		iov->iov_base += c;
330 		iov->iov_len -= c;
331 		uio->uio_offset += c;
332 		uio->uio_resid -= c;
333 	}
334 	if (buf)
335 		free(buf, M_TEMP);
336 	return (error);
337 }
338 
339 
340 
341 
342 /*******************************************************\
343 * allow user processes to MMAP some memory sections	*
344 * instead of going through read/write			*
345 \*******************************************************/
346 static int
347 memmmap(dev_t dev, int offset, int nprot)
348 {
349 	switch (minor(dev))
350 	{
351 
352 /* minor device 0 is physical memory */
353 	case 0:
354         	return i386_btop(offset);
355 
356 /* minor device 1 is kernel memory */
357 	case 1:
358         	return i386_btop(vtophys(offset));
359 
360 	default:
361 		return -1;
362 	}
363 }
364 
365 /*
366  * Allow userland to select which interrupts will be used in the muck
367  * gathering business.
368  */
369 static int
370 mmioctl(dev, cmd, cmdarg, flags, p)
371 	dev_t dev;
372 	int cmd;
373 	caddr_t cmdarg;
374 	int flags;
375 	struct proc *p;
376 {
377 	static u_int16_t interrupt_allowed = 0;
378 	u_int16_t interrupt_mask;
379 	int error;
380 
381 	if (minor(dev) != 3 && minor(dev) != 4)
382 		return (ENODEV);
383 
384 	if (*(u_int16_t *)cmdarg >= 16)
385 		return (EINVAL);
386 
387 	/* Only root can do this */
388 	error = suser(p->p_ucred, &p->p_acflag);
389 	if (error) {
390 		return (error);
391 	}
392 	interrupt_mask = 1 << *(u_int16_t *)cmdarg;
393 
394 	switch (cmd) {
395 
396 		case MEM_SETIRQ:
397 			if (!(interrupt_allowed & interrupt_mask)) {
398 				disable_intr();
399 				interrupt_allowed |= interrupt_mask;
400 				sec_intr_handler[*(u_int16_t *)cmdarg] =
401 					intr_handler[*(u_int16_t *)cmdarg];
402 				intr_handler[*(u_int16_t *)cmdarg] =
403 					add_interrupt_randomness;
404 				sec_intr_unit[*(u_int16_t *)cmdarg] =
405 					intr_unit[*(u_int16_t *)cmdarg];
406 				intr_unit[*(u_int16_t *)cmdarg] =
407 					*(u_int16_t *)cmdarg;
408 				enable_intr();
409 			}
410 			else return (EPERM);
411 			break;
412 
413 		case MEM_CLEARIRQ:
414 			if (interrupt_allowed & interrupt_mask) {
415 				disable_intr();
416 				interrupt_allowed &= ~(interrupt_mask);
417 				intr_handler[*(u_int16_t *)cmdarg] =
418 					sec_intr_handler[*(u_int16_t *)cmdarg];
419 				intr_unit[*(u_int16_t *)cmdarg] =
420 					sec_intr_unit[*(u_int16_t *)cmdarg];
421 				enable_intr();
422 			}
423 			else return (EPERM);
424 			break;
425 
426 		case MEM_RETURNIRQ:
427 			*(u_int16_t *)cmdarg = interrupt_allowed;
428 			break;
429 
430 		default:
431 			return (ENOTTY);
432 	}
433 	return (0);
434 }
435 
436 /*
437  * Routine that identifies /dev/mem and /dev/kmem.
438  *
439  * A minimal stub routine can always return 0.
440  */
441 int
442 iskmemdev(dev)
443 	dev_t dev;
444 {
445 
446 	return ((major(dev) == mem_cdevsw.d_maj)
447 	      && (minor(dev) == 0 || minor(dev) == 1));
448 }
449 
450 int
451 iszerodev(dev)
452 	dev_t dev;
453 {
454 	return ((major(dev) == mem_cdevsw.d_maj)
455 	  && minor(dev) == 12);
456 }
457 
458 
459 
460 static mem_devsw_installed = 0;
461 
462 static void
463 mem_drvinit(void *unused)
464 {
465 	dev_t dev;
466 
467 	if( ! mem_devsw_installed ) {
468 		dev = makedev(CDEV_MAJOR, 0);
469 		cdevsw_add(&dev,&mem_cdevsw, NULL);
470 		mem_devsw_installed = 1;
471 #ifdef DEVFS
472 		memdevfs_init();
473 #endif
474 	}
475 }
476 
477 SYSINIT(memdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,mem_drvinit,NULL)
478 
479