xref: /freebsd/sys/amd64/amd64/mem.c (revision 7f3dea244c40159a41ab22da77a434d7c5b5e85a)
1 /*-
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department, and code derived from software contributed to
9  * Berkeley by William Jolitz.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the University of
22  *	California, Berkeley and its contributors.
23  * 4. Neither the name of the University nor the names of its contributors
24  *    may be used to endorse or promote products derived from this software
25  *    without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  *	from: Utah $Hdr: mem.c 1.13 89/10/08$
40  *	from: @(#)mem.c	7.2 (Berkeley) 5/9/91
41  *	$Id: mem.c,v 1.64 1999/08/23 20:58:38 phk Exp $
42  */
43 
44 /*
45  * Memory special file
46  */
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/conf.h>
51 #include <sys/buf.h>
52 #include <sys/kernel.h>
53 #include <sys/uio.h>
54 #include <sys/ioccom.h>
55 #include <sys/malloc.h>
56 #include <sys/memrange.h>
57 #include <sys/proc.h>
58 #include <sys/signalvar.h>
59 
60 #include <machine/frame.h>
61 #include <machine/md_var.h>
62 #include <machine/random.h>
63 #include <machine/psl.h>
64 #include <machine/specialreg.h>
65 #include <i386/isa/intr_machdep.h>
66 
67 #include <vm/vm.h>
68 #include <vm/vm_prot.h>
69 #include <vm/pmap.h>
70 #include <vm/vm_extern.h>
71 
72 
73 static	d_open_t	mmopen;
74 static	d_close_t	mmclose;
75 static	d_read_t	mmrw;
76 static	d_ioctl_t	mmioctl;
77 static	d_mmap_t	memmmap;
78 static	d_poll_t	mmpoll;
79 
80 #define CDEV_MAJOR 2
81 static struct cdevsw mem_cdevsw = {
82 	/* open */	mmopen,
83 	/* close */	mmclose,
84 	/* read */	mmrw,
85 	/* write */	mmrw,
86 	/* ioctl */	mmioctl,
87 	/* stop */	nostop,
88 	/* reset */	noreset,
89 	/* devtotty */	nodevtotty,
90 	/* poll */	mmpoll,
91 	/* mmap */	memmmap,
92 	/* strategy */	nostrategy,
93 	/* name */	"mem",
94 	/* parms */	noparms,
95 	/* maj */	CDEV_MAJOR,
96 	/* dump */	nodump,
97 	/* psize */	nopsize,
98 	/* flags */	0,
99 	/* maxio */	0,
100 	/* bmaj */	-1
101 };
102 
103 static struct random_softc random_softc[16];
104 static caddr_t	zbuf;
105 
106 MALLOC_DEFINE(M_MEMDESC, "memdesc", "memory range descriptors");
107 static int mem_ioctl __P((dev_t, u_long, caddr_t, int, struct proc *));
108 static int random_ioctl __P((dev_t, u_long, caddr_t, int, struct proc *));
109 
110 struct mem_range_softc mem_range_softc;
111 
112 
113 static int
114 mmclose(dev, flags, fmt, p)
115 	dev_t dev;
116 	int flags;
117 	int fmt;
118 	struct proc *p;
119 {
120 	switch (minor(dev)) {
121 	case 14:
122 		curproc->p_md.md_regs->tf_eflags &= ~PSL_IOPL;
123 		break;
124 	default:
125 		break;
126 	}
127 	return(0);
128 }
129 
130 static int
131 mmopen(dev, flags, fmt, p)
132 	dev_t dev;
133 	int flags;
134 	int fmt;
135 	struct proc *p;
136 {
137 	int error;
138 
139 	switch (minor(dev)) {
140 	case 14:
141 		error = suser(p);
142 		if (error != 0)
143 			return (error);
144 		if (securelevel > 0)
145 			return (EPERM);
146 		curproc->p_md.md_regs->tf_eflags |= PSL_IOPL;
147 		break;
148 	default:
149 		break;
150 	}
151 	return(0);
152 }
153 
154 static int
155 mmrw(dev, uio, flags)
156 	dev_t dev;
157 	struct uio *uio;
158 	int flags;
159 {
160 	register int o;
161 	register u_int c, v;
162 	u_int poolsize;
163 	register struct iovec *iov;
164 	int error = 0;
165 	caddr_t buf = NULL;
166 
167 	while (uio->uio_resid > 0 && error == 0) {
168 		iov = uio->uio_iov;
169 		if (iov->iov_len == 0) {
170 			uio->uio_iov++;
171 			uio->uio_iovcnt--;
172 			if (uio->uio_iovcnt < 0)
173 				panic("mmrw");
174 			continue;
175 		}
176 		switch (minor(dev)) {
177 
178 /* minor device 0 is physical memory */
179 		case 0:
180 			v = uio->uio_offset;
181 			pmap_enter(kernel_pmap, (vm_offset_t)ptvmmap, v,
182 				uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE,
183 				TRUE);
184 			o = (int)uio->uio_offset & PAGE_MASK;
185 			c = (u_int)(PAGE_SIZE - ((int)iov->iov_base & PAGE_MASK));
186 			c = min(c, (u_int)(PAGE_SIZE - o));
187 			c = min(c, (u_int)iov->iov_len);
188 			error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio);
189 			pmap_remove(kernel_pmap, (vm_offset_t)ptvmmap,
190 				    (vm_offset_t)&ptvmmap[PAGE_SIZE]);
191 			continue;
192 
193 /* minor device 1 is kernel memory */
194 		case 1: {
195 			vm_offset_t addr, eaddr;
196 			c = iov->iov_len;
197 
198 			/*
199 			 * Make sure that all of the pages are currently resident so
200 			 * that we don't create any zero-fill pages.
201 			 */
202 			addr = trunc_page(uio->uio_offset);
203 			eaddr = round_page(uio->uio_offset + c);
204 
205 			if (addr < (vm_offset_t)VADDR(PTDPTDI, 0))
206 				return EFAULT;
207 			if (eaddr >= (vm_offset_t)VADDR(APTDPTDI, 0))
208 				return EFAULT;
209 			for (; addr < eaddr; addr += PAGE_SIZE)
210 				if (pmap_extract(kernel_pmap, addr) == 0)
211 					return EFAULT;
212 
213 			if (!kernacc((caddr_t)(int)uio->uio_offset, c,
214 			    uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
215 				return(EFAULT);
216 			error = uiomove((caddr_t)(int)uio->uio_offset, (int)c, uio);
217 			continue;
218 		}
219 
220 /* minor device 2 is EOF/RATHOLE */
221 		case 2:
222 			if (uio->uio_rw == UIO_READ)
223 				return (0);
224 			c = iov->iov_len;
225 			break;
226 
227 /* minor device 3 (/dev/random) is source of filth on read, rathole on write */
228 		case 3:
229 			if (uio->uio_rw == UIO_WRITE) {
230 				c = iov->iov_len;
231 				break;
232 			}
233 			if (buf == NULL)
234 				buf = (caddr_t)
235 				    malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
236 			c = min(iov->iov_len, PAGE_SIZE);
237 			poolsize = read_random(buf, c);
238 			if (poolsize == 0) {
239 				if (buf)
240 					free(buf, M_TEMP);
241 				return (0);
242 			}
243 			c = min(c, poolsize);
244 			error = uiomove(buf, (int)c, uio);
245 			continue;
246 
247 /* minor device 4 (/dev/urandom) is source of muck on read, rathole on write */
248 		case 4:
249 			if (uio->uio_rw == UIO_WRITE) {
250 				c = iov->iov_len;
251 				break;
252 			}
253 			if (CURSIG(curproc) != 0) {
254 				/*
255 				 * Use tsleep() to get the error code right.
256 				 * It should return immediately.
257 				 */
258 				error = tsleep(&random_softc[0],
259 				    PZERO | PCATCH, "urand", 1);
260 				if (error != 0 && error != EWOULDBLOCK)
261 					continue;
262 			}
263 			if (buf == NULL)
264 				buf = (caddr_t)
265 				    malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
266 			c = min(iov->iov_len, PAGE_SIZE);
267 			poolsize = read_random_unlimited(buf, c);
268 			c = min(c, poolsize);
269 			error = uiomove(buf, (int)c, uio);
270 			continue;
271 
272 /* minor device 12 (/dev/zero) is source of nulls on read, rathole on write */
273 		case 12:
274 			if (uio->uio_rw == UIO_WRITE) {
275 				c = iov->iov_len;
276 				break;
277 			}
278 			if (zbuf == NULL) {
279 				zbuf = (caddr_t)
280 				    malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
281 				bzero(zbuf, PAGE_SIZE);
282 			}
283 			c = min(iov->iov_len, PAGE_SIZE);
284 			error = uiomove(zbuf, (int)c, uio);
285 			continue;
286 
287 #ifdef notyet
288 /* 386 I/O address space (/dev/ioport[bwl]) is a read/write access to seperate
289    i/o device address bus, different than memory bus. Semantics here are
290    very different than ordinary read/write, as if iov_len is a multiple
291    an implied string move from a single port will be done. Note that lseek
292    must be used to set the port number reliably. */
293 		case 14:
294 			if (iov->iov_len == 1) {
295 				u_char tmp;
296 				tmp = inb(uio->uio_offset);
297 				error = uiomove (&tmp, iov->iov_len, uio);
298 			} else {
299 				if (!useracc((caddr_t)iov->iov_base,
300 					iov->iov_len, uio->uio_rw))
301 					return (EFAULT);
302 				insb(uio->uio_offset, iov->iov_base,
303 					iov->iov_len);
304 			}
305 			break;
306 		case 15:
307 			if (iov->iov_len == sizeof (short)) {
308 				u_short tmp;
309 				tmp = inw(uio->uio_offset);
310 				error = uiomove (&tmp, iov->iov_len, uio);
311 			} else {
312 				if (!useracc((caddr_t)iov->iov_base,
313 					iov->iov_len, uio->uio_rw))
314 					return (EFAULT);
315 				insw(uio->uio_offset, iov->iov_base,
316 					iov->iov_len/ sizeof (short));
317 			}
318 			break;
319 		case 16:
320 			if (iov->iov_len == sizeof (long)) {
321 				u_long tmp;
322 				tmp = inl(uio->uio_offset);
323 				error = uiomove (&tmp, iov->iov_len, uio);
324 			} else {
325 				if (!useracc((caddr_t)iov->iov_base,
326 					iov->iov_len, uio->uio_rw))
327 					return (EFAULT);
328 				insl(uio->uio_offset, iov->iov_base,
329 					iov->iov_len/ sizeof (long));
330 			}
331 			break;
332 #endif
333 
334 		default:
335 			return (ENXIO);
336 		}
337 		if (error)
338 			break;
339 		iov->iov_base += c;
340 		iov->iov_len -= c;
341 		uio->uio_offset += c;
342 		uio->uio_resid -= c;
343 	}
344 	if (buf)
345 		free(buf, M_TEMP);
346 	return (error);
347 }
348 
349 
350 
351 
352 /*******************************************************\
353 * allow user processes to MMAP some memory sections	*
354 * instead of going through read/write			*
355 \*******************************************************/
356 static int
357 memmmap(dev_t dev, vm_offset_t offset, int nprot)
358 {
359 	switch (minor(dev))
360 	{
361 
362 /* minor device 0 is physical memory */
363 	case 0:
364         	return i386_btop(offset);
365 
366 /* minor device 1 is kernel memory */
367 	case 1:
368         	return i386_btop(vtophys(offset));
369 
370 	default:
371 		return -1;
372 	}
373 }
374 
375 static int
376 mmioctl(dev, cmd, data, flags, p)
377 	dev_t dev;
378 	u_long cmd;
379 	caddr_t data;
380 	int flags;
381 	struct proc *p;
382 {
383 
384 	switch (minor(dev)) {
385 	case 0:
386 		return mem_ioctl(dev, cmd, data, flags, p);
387 	case 3:
388 	case 4:
389 		return random_ioctl(dev, cmd, data, flags, p);
390 	}
391 	return (ENODEV);
392 }
393 
394 /*
395  * Operations for changing memory attributes.
396  *
397  * This is basically just an ioctl shim for mem_range_attr_get
398  * and mem_range_attr_set.
399  */
400 static int
401 mem_ioctl(dev, cmd, data, flags, p)
402 	dev_t dev;
403 	u_long cmd;
404 	caddr_t data;
405 	int flags;
406 	struct proc *p;
407 {
408 	int nd, error = 0;
409 	struct mem_range_op *mo = (struct mem_range_op *)data;
410 	struct mem_range_desc *md;
411 
412 	/* is this for us? */
413 	if ((cmd != MEMRANGE_GET) &&
414 	    (cmd != MEMRANGE_SET))
415 		return(ENODEV);
416 
417 	/* any chance we can handle this? */
418 	if (mem_range_softc.mr_op == NULL)
419 		return(EOPNOTSUPP);
420 
421 	/* do we have any descriptors? */
422 	if (mem_range_softc.mr_ndesc == 0)
423 		return(ENXIO);
424 
425 	switch(cmd) {
426 	case MEMRANGE_GET:
427 		nd = imin(mo->mo_arg[0], mem_range_softc.mr_ndesc);
428 		if (nd > 0) {
429 			md = (struct mem_range_desc *)
430 				malloc(nd * sizeof(struct mem_range_desc),
431 				       M_MEMDESC, M_WAITOK);
432 			mem_range_attr_get(md, &nd);
433 			error = copyout(md, mo->mo_desc,
434 					nd * sizeof(struct mem_range_desc));
435 			free(md, M_MEMDESC);
436 		} else {
437 			nd = mem_range_softc.mr_ndesc;
438 		}
439 		mo->mo_arg[0] = nd;
440 		break;
441 
442 	case MEMRANGE_SET:
443 		md = (struct mem_range_desc *)malloc(sizeof(struct mem_range_desc),
444 						    M_MEMDESC, M_WAITOK);
445 		error = copyin(mo->mo_desc, md, sizeof(struct mem_range_desc));
446 		/* clamp description string */
447 		md->mr_owner[sizeof(md->mr_owner) - 1] = 0;
448 		if (error == 0)
449 			error = mem_range_attr_set(md, &mo->mo_arg[0]);
450 		free(md, M_MEMDESC);
451 		break;
452 
453 	default:
454 		error = EOPNOTSUPP;
455 	}
456 	return(error);
457 }
458 
459 /*
460  * Implementation-neutral, kernel-callable functions for manipulating
461  * memory range attributes.
462  */
463 void
464 mem_range_attr_get(struct mem_range_desc *mrd, int *arg)
465 {
466 	if (*arg == 0) {
467 		*arg = mem_range_softc.mr_ndesc;
468 	} else {
469 		bcopy(mem_range_softc.mr_desc, mrd, (*arg) * sizeof(struct mem_range_desc));
470 	}
471 }
472 
473 int
474 mem_range_attr_set(struct mem_range_desc *mrd, int *arg)
475 {
476 	return(mem_range_softc.mr_op->set(&mem_range_softc, mrd, arg));
477 }
478 
479 #ifdef SMP
480 void
481 mem_range_AP_init(void)
482 {
483 	if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
484 		return(mem_range_softc.mr_op->initAP(&mem_range_softc));
485 }
486 #endif
487 
488 static int
489 random_ioctl(dev, cmd, data, flags, p)
490 	dev_t dev;
491 	u_long cmd;
492 	caddr_t data;
493 	int flags;
494 	struct proc *p;
495 {
496 	static intrmask_t interrupt_allowed;
497 	intrmask_t interrupt_mask;
498 	int error, intr;
499 	struct random_softc *sc;
500 
501 	/*
502 	 * We're the random or urandom device.  The only ioctls are for
503 	 * selecting and inspecting which interrupts are used in the muck
504 	 * gathering business.
505 	 */
506 	if (cmd != MEM_SETIRQ && cmd != MEM_CLEARIRQ && cmd != MEM_RETURNIRQ)
507 		return (ENOTTY);
508 
509 	/*
510 	 * Even inspecting the state is privileged, since it gives a hint
511 	 * about how easily the randomness might be guessed.
512 	 */
513 	error = suser(p);
514 	if (error != 0)
515 		return (error);
516 
517 	/*
518 	 * XXX the data is 16-bit due to a historical botch, so we use
519 	 * magic 16's instead of ICU_LEN and can't support 24 interrupts
520 	 * under SMP.
521 	 */
522 	intr = *(int16_t *)data;
523 	if (cmd != MEM_RETURNIRQ && (intr < 0 || intr >= 16))
524 		return (EINVAL);
525 
526 	interrupt_mask = 1 << intr;
527 	sc = &random_softc[intr];
528 	switch (cmd) {
529 	case MEM_SETIRQ:
530 		if (interrupt_allowed & interrupt_mask)
531 			break;
532 		interrupt_allowed |= interrupt_mask;
533 		sc->sc_intr = intr;
534 		disable_intr();
535 		sc->sc_handler = intr_handler[intr];
536 		intr_handler[intr] = add_interrupt_randomness;
537 		sc->sc_arg = intr_unit[intr];
538 		intr_unit[intr] = sc;
539 		enable_intr();
540 		break;
541 	case MEM_CLEARIRQ:
542 		if (!(interrupt_allowed & interrupt_mask))
543 			break;
544 		interrupt_allowed &= ~interrupt_mask;
545 		disable_intr();
546 		intr_handler[intr] = sc->sc_handler;
547 		intr_unit[intr] = sc->sc_arg;
548 		enable_intr();
549 		break;
550 	case MEM_RETURNIRQ:
551 		*(u_int16_t *)data = interrupt_allowed;
552 		break;
553 	default:
554 		return (ENOTTY);
555 	}
556 	return (0);
557 }
558 
559 int
560 mmpoll(dev, events, p)
561 	dev_t dev;
562 	int events;
563 	struct proc *p;
564 {
565 	switch (minor(dev)) {
566 	case 3:		/* /dev/random */
567 		return random_poll(dev, events, p);
568 	case 4:		/* /dev/urandom */
569 	default:
570 		return seltrue(dev, events, p);
571 	}
572 }
573 
574 /*
575  * Routine that identifies /dev/mem and /dev/kmem.
576  *
577  * A minimal stub routine can always return 0.
578  */
579 int
580 iskmemdev(dev)
581 	dev_t dev;
582 {
583 
584 	return ((major(dev) == mem_cdevsw.d_maj)
585 	      && (minor(dev) == 0 || minor(dev) == 1));
586 }
587 
588 int
589 iszerodev(dev)
590 	dev_t dev;
591 {
592 	return ((major(dev) == mem_cdevsw.d_maj)
593 	  && minor(dev) == 12);
594 }
595 
596 static void
597 mem_drvinit(void *unused)
598 {
599 
600 	/* Initialise memory range handling */
601 	if (mem_range_softc.mr_op != NULL)
602 		mem_range_softc.mr_op->init(&mem_range_softc);
603 
604 	make_dev(&mem_cdevsw, 0, UID_ROOT, GID_KMEM, 0640, "mem");
605 	make_dev(&mem_cdevsw, 1, UID_ROOT, GID_KMEM, 0640, "kmem");
606 	make_dev(&mem_cdevsw, 2, UID_ROOT, GID_WHEEL, 0666, "null");
607 	make_dev(&mem_cdevsw, 3, UID_ROOT, GID_WHEEL, 0644, "random");
608 	make_dev(&mem_cdevsw, 4, UID_ROOT, GID_WHEEL, 0644, "urandom");
609 	make_dev(&mem_cdevsw, 12, UID_ROOT, GID_WHEEL, 0666, "zero");
610 	make_dev(&mem_cdevsw, 14, UID_ROOT, GID_WHEEL, 0600, "io");
611 }
612 
613 SYSINIT(memdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,mem_drvinit,NULL)
614 
615