1 /*- 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department, and code derived from software contributed to 9 * Berkeley by William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: Utah $Hdr: mem.c 1.13 89/10/08$ 40 * from: @(#)mem.c 7.2 (Berkeley) 5/9/91 41 * $Id: mem.c,v 1.46 1997/07/20 08:37:20 bde Exp $ 42 */ 43 44 /* 45 * Memory special file 46 */ 47 48 #include "opt_perfmon.h" 49 50 #include <sys/param.h> 51 #include <sys/conf.h> 52 #include <sys/buf.h> 53 #ifdef DEVFS 54 #include <sys/devfsext.h> 55 #endif /* DEVFS */ 56 #include <sys/kernel.h> 57 #include <sys/systm.h> 58 #include <sys/uio.h> 59 #include <sys/malloc.h> 60 #include <sys/proc.h> 61 62 #include <machine/cpu.h> 63 #include <machine/random.h> 64 #include <machine/psl.h> 65 #ifdef PERFMON 66 #include <machine/perfmon.h> 67 #endif 68 #include <i386/isa/intr_machdep.h> 69 70 #include <vm/vm.h> 71 #include <vm/vm_prot.h> 72 #include <vm/pmap.h> 73 #include <vm/vm_extern.h> 74 75 76 77 static d_open_t mmopen; 78 static d_close_t mmclose; 79 static d_read_t mmrw; 80 static d_ioctl_t mmioctl; 81 static d_mmap_t memmmap; 82 static d_poll_t mmpoll; 83 84 #define CDEV_MAJOR 2 85 static struct cdevsw mem_cdevsw = 86 { mmopen, mmclose, mmrw, mmrw, /*2*/ 87 mmioctl, nullstop, nullreset, nodevtotty,/* memory */ 88 mmpoll, memmmap, NULL, "mem", NULL, -1 }; 89 90 static caddr_t zbuf; 91 92 #ifdef DEVFS 93 static void *mem_devfs_token; 94 static void *kmem_devfs_token; 95 static void *null_devfs_token; 96 static void *random_devfs_token; 97 static void *urandom_devfs_token; 98 static void *zero_devfs_token; 99 static void *io_devfs_token; 100 #ifdef PERFMON 101 static void *perfmon_devfs_token; 102 #endif 103 104 static void memdevfs_init __P((void)); 105 106 static void 107 memdevfs_init() 108 { 109 mem_devfs_token = 110 devfs_add_devswf(&mem_cdevsw, 0, DV_CHR, 111 UID_ROOT, GID_KMEM, 0640, "mem"); 112 kmem_devfs_token = 113 devfs_add_devswf(&mem_cdevsw, 1, DV_CHR, 114 UID_ROOT, GID_KMEM, 0640, "kmem"); 115 null_devfs_token = 116 devfs_add_devswf(&mem_cdevsw, 2, DV_CHR, 117 UID_ROOT, GID_WHEEL, 0666, "null"); 118 random_devfs_token = 119 devfs_add_devswf(&mem_cdevsw, 3, DV_CHR, 120 UID_ROOT, GID_WHEEL, 0644, "random"); 121 urandom_devfs_token = 122 devfs_add_devswf(&mem_cdevsw, 4, DV_CHR, 123 UID_ROOT, GID_WHEEL, 0644, "urandom"); 124 zero_devfs_token = 125 devfs_add_devswf(&mem_cdevsw, 12, DV_CHR, 126 UID_ROOT, GID_WHEEL, 0666, "zero"); 127 io_devfs_token = 128 devfs_add_devswf(&mem_cdevsw, 14, DV_CHR, 129 UID_ROOT, GID_WHEEL, 0600, "io"); 130 #ifdef PERFMON 131 perfmon_devfs_token = 132 devfs_add_devswf(&mem_cdevsw, 32, DV_CHR, 133 UID_ROOT, GID_KMEM, 0640, "perfmon"); 134 #endif /* PERFMON */ 135 } 136 #endif /* DEVFS */ 137 138 extern char *ptvmmap; /* poor name! */ 139 140 static int 141 mmclose(dev, flags, fmt, p) 142 dev_t dev; 143 int flags; 144 int fmt; 145 struct proc *p; 146 { 147 switch (minor(dev)) { 148 #ifdef PERFMON 149 case 32: 150 return perfmon_close(dev, flags, fmt, p); 151 #endif 152 case 14: 153 curproc->p_md.md_regs->tf_eflags &= ~PSL_IOPL; 154 break; 155 default: 156 break; 157 } 158 return(0); 159 } 160 161 static int 162 mmopen(dev, flags, fmt, p) 163 dev_t dev; 164 int flags; 165 int fmt; 166 struct proc *p; 167 { 168 int error; 169 170 switch (minor(dev)) { 171 case 32: 172 #ifdef PERFMON 173 return perfmon_open(dev, flags, fmt, p); 174 #else 175 return ENODEV; 176 #endif 177 case 14: 178 error = suser(p->p_ucred, &p->p_acflag); 179 if (error != 0) 180 return (error); 181 if (securelevel > 0) 182 return (EPERM); 183 curproc->p_md.md_regs->tf_eflags |= PSL_IOPL; 184 break; 185 default: 186 break; 187 } 188 return(0); 189 } 190 191 static int 192 mmrw(dev, uio, flags) 193 dev_t dev; 194 struct uio *uio; 195 int flags; 196 { 197 register int o; 198 register u_int c, v; 199 u_int poolsize; 200 register struct iovec *iov; 201 int error = 0; 202 caddr_t buf = NULL; 203 204 while (uio->uio_resid > 0 && error == 0) { 205 iov = uio->uio_iov; 206 if (iov->iov_len == 0) { 207 uio->uio_iov++; 208 uio->uio_iovcnt--; 209 if (uio->uio_iovcnt < 0) 210 panic("mmrw"); 211 continue; 212 } 213 switch (minor(dev)) { 214 215 /* minor device 0 is physical memory */ 216 case 0: 217 v = uio->uio_offset; 218 pmap_enter(kernel_pmap, (vm_offset_t)ptvmmap, v, 219 uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE, 220 TRUE); 221 o = (int)uio->uio_offset & PAGE_MASK; 222 c = (u_int)(PAGE_SIZE - ((int)iov->iov_base & PAGE_MASK)); 223 c = min(c, (u_int)(PAGE_SIZE - o)); 224 c = min(c, (u_int)iov->iov_len); 225 error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio); 226 pmap_remove(kernel_pmap, (vm_offset_t)ptvmmap, 227 (vm_offset_t)&ptvmmap[PAGE_SIZE]); 228 continue; 229 230 /* minor device 1 is kernel memory */ 231 case 1: { 232 vm_offset_t addr, eaddr; 233 c = iov->iov_len; 234 235 /* 236 * Make sure that all of the pages are currently resident so 237 * that we don't create any zero-fill pages. 238 */ 239 addr = trunc_page(uio->uio_offset); 240 eaddr = round_page(uio->uio_offset + c); 241 242 if (addr < (vm_offset_t)VADDR(PTDPTDI, 0)) 243 return EFAULT; 244 if (eaddr >= (vm_offset_t)VADDR(APTDPTDI, 0)) 245 return EFAULT; 246 for (; addr < eaddr; addr += PAGE_SIZE) 247 if (pmap_extract(kernel_pmap, addr) == 0) 248 return EFAULT; 249 250 if (!kernacc((caddr_t)(int)uio->uio_offset, c, 251 uio->uio_rw == UIO_READ ? B_READ : B_WRITE)) 252 return(EFAULT); 253 error = uiomove((caddr_t)(int)uio->uio_offset, (int)c, uio); 254 continue; 255 } 256 257 /* minor device 2 is EOF/RATHOLE */ 258 case 2: 259 if (uio->uio_rw == UIO_READ) 260 return (0); 261 c = iov->iov_len; 262 break; 263 264 /* minor device 3 (/dev/random) is source of filth on read, rathole on write */ 265 case 3: 266 if (uio->uio_rw == UIO_WRITE) { 267 c = iov->iov_len; 268 break; 269 } 270 if (buf == NULL) 271 buf = (caddr_t) 272 malloc(PAGE_SIZE, M_TEMP, M_WAITOK); 273 c = min(iov->iov_len, PAGE_SIZE); 274 poolsize = read_random(buf, c); 275 if (poolsize == 0) { 276 if (buf) 277 free(buf, M_TEMP); 278 return (0); 279 } 280 c = min(c, poolsize); 281 error = uiomove(buf, (int)c, uio); 282 continue; 283 284 /* minor device 4 (/dev/urandom) is source of muck on read, rathole on write */ 285 case 4: 286 if (uio->uio_rw == UIO_WRITE) { 287 c = iov->iov_len; 288 break; 289 } 290 if (buf == NULL) 291 buf = (caddr_t) 292 malloc(PAGE_SIZE, M_TEMP, M_WAITOK); 293 c = min(iov->iov_len, PAGE_SIZE); 294 poolsize = read_random_unlimited(buf, c); 295 c = min(c, poolsize); 296 error = uiomove(buf, (int)c, uio); 297 continue; 298 299 /* minor device 12 (/dev/zero) is source of nulls on read, rathole on write */ 300 case 12: 301 if (uio->uio_rw == UIO_WRITE) { 302 c = iov->iov_len; 303 break; 304 } 305 if (zbuf == NULL) { 306 zbuf = (caddr_t) 307 malloc(PAGE_SIZE, M_TEMP, M_WAITOK); 308 bzero(zbuf, PAGE_SIZE); 309 } 310 c = min(iov->iov_len, PAGE_SIZE); 311 error = uiomove(zbuf, (int)c, uio); 312 continue; 313 314 #ifdef notyet 315 /* 386 I/O address space (/dev/ioport[bwl]) is a read/write access to seperate 316 i/o device address bus, different than memory bus. Semantics here are 317 very different than ordinary read/write, as if iov_len is a multiple 318 an implied string move from a single port will be done. Note that lseek 319 must be used to set the port number reliably. */ 320 case 14: 321 if (iov->iov_len == 1) { 322 u_char tmp; 323 tmp = inb(uio->uio_offset); 324 error = uiomove (&tmp, iov->iov_len, uio); 325 } else { 326 if (!useracc((caddr_t)iov->iov_base, 327 iov->iov_len, uio->uio_rw)) 328 return (EFAULT); 329 insb(uio->uio_offset, iov->iov_base, 330 iov->iov_len); 331 } 332 break; 333 case 15: 334 if (iov->iov_len == sizeof (short)) { 335 u_short tmp; 336 tmp = inw(uio->uio_offset); 337 error = uiomove (&tmp, iov->iov_len, uio); 338 } else { 339 if (!useracc((caddr_t)iov->iov_base, 340 iov->iov_len, uio->uio_rw)) 341 return (EFAULT); 342 insw(uio->uio_offset, iov->iov_base, 343 iov->iov_len/ sizeof (short)); 344 } 345 break; 346 case 16: 347 if (iov->iov_len == sizeof (long)) { 348 u_long tmp; 349 tmp = inl(uio->uio_offset); 350 error = uiomove (&tmp, iov->iov_len, uio); 351 } else { 352 if (!useracc((caddr_t)iov->iov_base, 353 iov->iov_len, uio->uio_rw)) 354 return (EFAULT); 355 insl(uio->uio_offset, iov->iov_base, 356 iov->iov_len/ sizeof (long)); 357 } 358 break; 359 #endif 360 361 default: 362 return (ENXIO); 363 } 364 if (error) 365 break; 366 iov->iov_base += c; 367 iov->iov_len -= c; 368 uio->uio_offset += c; 369 uio->uio_resid -= c; 370 } 371 if (buf) 372 free(buf, M_TEMP); 373 return (error); 374 } 375 376 377 378 379 /*******************************************************\ 380 * allow user processes to MMAP some memory sections * 381 * instead of going through read/write * 382 \*******************************************************/ 383 static int 384 memmmap(dev_t dev, int offset, int nprot) 385 { 386 switch (minor(dev)) 387 { 388 389 /* minor device 0 is physical memory */ 390 case 0: 391 return i386_btop(offset); 392 393 /* minor device 1 is kernel memory */ 394 case 1: 395 return i386_btop(vtophys(offset)); 396 397 default: 398 return -1; 399 } 400 } 401 402 /* 403 * Allow userland to select which interrupts will be used in the muck 404 * gathering business. 405 */ 406 static int 407 mmioctl(dev, cmd, cmdarg, flags, p) 408 dev_t dev; 409 int cmd; 410 caddr_t cmdarg; 411 int flags; 412 struct proc *p; 413 { 414 static u_int16_t interrupt_allowed = 0; 415 u_int16_t interrupt_mask; 416 int error; 417 418 switch(minor(dev)) { 419 case 3: 420 case 4: 421 break; 422 423 #ifdef PERFMON 424 case 32: 425 return perfmon_ioctl(dev, cmd, cmdarg, flags, p); 426 #endif 427 default: 428 return ENODEV; 429 } 430 431 if (*(u_int16_t *)cmdarg >= 16) 432 return (EINVAL); 433 434 /* Only root can do this */ 435 error = suser(p->p_ucred, &p->p_acflag); 436 if (error) { 437 return (error); 438 } 439 interrupt_mask = 1 << *(u_int16_t *)cmdarg; 440 441 switch (cmd) { 442 443 case MEM_SETIRQ: 444 if (!(interrupt_allowed & interrupt_mask)) { 445 disable_intr(); 446 interrupt_allowed |= interrupt_mask; 447 sec_intr_handler[*(u_int16_t *)cmdarg] = 448 intr_handler[*(u_int16_t *)cmdarg]; 449 intr_handler[*(u_int16_t *)cmdarg] = 450 add_interrupt_randomness; 451 sec_intr_unit[*(u_int16_t *)cmdarg] = 452 intr_unit[*(u_int16_t *)cmdarg]; 453 intr_unit[*(u_int16_t *)cmdarg] = 454 *(u_int16_t *)cmdarg; 455 enable_intr(); 456 } 457 else return (EPERM); 458 break; 459 460 case MEM_CLEARIRQ: 461 if (interrupt_allowed & interrupt_mask) { 462 disable_intr(); 463 interrupt_allowed &= ~(interrupt_mask); 464 intr_handler[*(u_int16_t *)cmdarg] = 465 sec_intr_handler[*(u_int16_t *)cmdarg]; 466 intr_unit[*(u_int16_t *)cmdarg] = 467 sec_intr_unit[*(u_int16_t *)cmdarg]; 468 enable_intr(); 469 } 470 else return (EPERM); 471 break; 472 473 case MEM_RETURNIRQ: 474 *(u_int16_t *)cmdarg = interrupt_allowed; 475 break; 476 477 default: 478 return (ENOTTY); 479 } 480 return (0); 481 } 482 483 int 484 mmpoll(dev, events, p) 485 dev_t dev; 486 int events; 487 struct proc *p; 488 { 489 switch (minor(dev)) { 490 case 3: /* /dev/random */ 491 return random_poll(dev, events, p); 492 case 4: /* /dev/urandom */ 493 default: 494 return seltrue(dev, events, p); 495 } 496 } 497 498 /* 499 * Routine that identifies /dev/mem and /dev/kmem. 500 * 501 * A minimal stub routine can always return 0. 502 */ 503 int 504 iskmemdev(dev) 505 dev_t dev; 506 { 507 508 return ((major(dev) == mem_cdevsw.d_maj) 509 && (minor(dev) == 0 || minor(dev) == 1)); 510 } 511 512 int 513 iszerodev(dev) 514 dev_t dev; 515 { 516 return ((major(dev) == mem_cdevsw.d_maj) 517 && minor(dev) == 12); 518 } 519 520 521 522 static mem_devsw_installed = 0; 523 524 static void 525 mem_drvinit(void *unused) 526 { 527 dev_t dev; 528 529 if( ! mem_devsw_installed ) { 530 dev = makedev(CDEV_MAJOR, 0); 531 cdevsw_add(&dev,&mem_cdevsw, NULL); 532 mem_devsw_installed = 1; 533 #ifdef DEVFS 534 memdevfs_init(); 535 #endif 536 } 537 } 538 539 SYSINIT(memdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,mem_drvinit,NULL) 540 541