1 /*- 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department, and code derived from software contributed to 9 * Berkeley by William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: Utah $Hdr: mem.c 1.13 89/10/08$ 40 * from: @(#)mem.c 7.2 (Berkeley) 5/9/91 41 * $Id: mem.c,v 1.44 1997/05/07 20:32:41 peter Exp $ 42 */ 43 44 /* 45 * Memory special file 46 */ 47 48 #include "opt_perfmon.h" 49 50 #include <sys/param.h> 51 #include <sys/conf.h> 52 #include <sys/buf.h> 53 #ifdef DEVFS 54 #include <sys/devfsext.h> 55 #endif /* DEVFS */ 56 #include <sys/kernel.h> 57 #include <sys/systm.h> 58 #include <sys/uio.h> 59 #include <sys/malloc.h> 60 #include <sys/proc.h> 61 62 #include <machine/cpu.h> 63 #include <machine/random.h> 64 #include <machine/psl.h> 65 #ifdef PERFMON 66 #include <machine/perfmon.h> 67 #endif 68 #include <i386/isa/intr_machdep.h> 69 70 #include <vm/vm.h> 71 #include <vm/vm_param.h> 72 #include <sys/lock.h> 73 #include <vm/vm_prot.h> 74 #include <vm/pmap.h> 75 #include <vm/vm_extern.h> 76 77 78 79 static d_open_t mmopen; 80 static d_close_t mmclose; 81 static d_read_t mmrw; 82 static d_ioctl_t mmioctl; 83 static d_mmap_t memmmap; 84 static d_select_t mmselect; 85 86 #define CDEV_MAJOR 2 87 static struct cdevsw mem_cdevsw = 88 { mmopen, mmclose, mmrw, mmrw, /*2*/ 89 mmioctl, nullstop, nullreset, nodevtotty,/* memory */ 90 mmselect, memmmap, NULL, "mem", NULL, -1 }; 91 92 static caddr_t zbuf; 93 94 #ifdef DEVFS 95 static void *mem_devfs_token; 96 static void *kmem_devfs_token; 97 static void *null_devfs_token; 98 static void *random_devfs_token; 99 static void *urandom_devfs_token; 100 static void *zero_devfs_token; 101 static void *io_devfs_token; 102 #ifdef PERFMON 103 static void *perfmon_devfs_token; 104 #endif 105 106 static void memdevfs_init __P((void)); 107 108 static void 109 memdevfs_init() 110 { 111 mem_devfs_token = 112 devfs_add_devswf(&mem_cdevsw, 0, DV_CHR, 113 UID_ROOT, GID_KMEM, 0640, "mem"); 114 kmem_devfs_token = 115 devfs_add_devswf(&mem_cdevsw, 1, DV_CHR, 116 UID_ROOT, GID_KMEM, 0640, "kmem"); 117 null_devfs_token = 118 devfs_add_devswf(&mem_cdevsw, 2, DV_CHR, 119 UID_ROOT, GID_WHEEL, 0666, "null"); 120 random_devfs_token = 121 devfs_add_devswf(&mem_cdevsw, 3, DV_CHR, 122 UID_ROOT, GID_WHEEL, 0644, "random"); 123 urandom_devfs_token = 124 devfs_add_devswf(&mem_cdevsw, 4, DV_CHR, 125 UID_ROOT, GID_WHEEL, 0644, "urandom"); 126 zero_devfs_token = 127 devfs_add_devswf(&mem_cdevsw, 12, DV_CHR, 128 UID_ROOT, GID_WHEEL, 0666, "zero"); 129 io_devfs_token = 130 devfs_add_devswf(&mem_cdevsw, 14, DV_CHR, 131 UID_ROOT, GID_WHEEL, 0600, "io"); 132 #ifdef PERFMON 133 perfmon_devfs_token = 134 devfs_add_devswf(&mem_cdevsw, 32, DV_CHR, 135 UID_ROOT, GID_KMEM, 0640, "perfmon"); 136 #endif /* PERFMON */ 137 } 138 #endif /* DEVFS */ 139 140 extern char *ptvmmap; /* poor name! */ 141 142 static int 143 mmclose(dev, flags, fmt, p) 144 dev_t dev; 145 int flags; 146 int fmt; 147 struct proc *p; 148 { 149 switch (minor(dev)) { 150 #ifdef PERFMON 151 case 32: 152 return perfmon_close(dev, flags, fmt, p); 153 #endif 154 case 14: 155 curproc->p_md.md_regs->tf_eflags &= ~PSL_IOPL; 156 break; 157 default: 158 break; 159 } 160 return(0); 161 } 162 163 static int 164 mmopen(dev, flags, fmt, p) 165 dev_t dev; 166 int flags; 167 int fmt; 168 struct proc *p; 169 { 170 int error; 171 172 switch (minor(dev)) { 173 case 32: 174 #ifdef PERFMON 175 return perfmon_open(dev, flags, fmt, p); 176 #else 177 return ENODEV; 178 #endif 179 case 14: 180 error = suser(p->p_ucred, &p->p_acflag); 181 if (error != 0) 182 return (error); 183 if (securelevel > 0) 184 return (EPERM); 185 curproc->p_md.md_regs->tf_eflags |= PSL_IOPL; 186 break; 187 default: 188 break; 189 } 190 return(0); 191 } 192 193 static int 194 mmrw(dev, uio, flags) 195 dev_t dev; 196 struct uio *uio; 197 int flags; 198 { 199 register int o; 200 register u_int c, v; 201 u_int poolsize; 202 register struct iovec *iov; 203 int error = 0; 204 caddr_t buf = NULL; 205 206 while (uio->uio_resid > 0 && error == 0) { 207 iov = uio->uio_iov; 208 if (iov->iov_len == 0) { 209 uio->uio_iov++; 210 uio->uio_iovcnt--; 211 if (uio->uio_iovcnt < 0) 212 panic("mmrw"); 213 continue; 214 } 215 switch (minor(dev)) { 216 217 /* minor device 0 is physical memory */ 218 case 0: 219 v = uio->uio_offset; 220 pmap_enter(kernel_pmap, (vm_offset_t)ptvmmap, v, 221 uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE, 222 TRUE); 223 o = (int)uio->uio_offset & PAGE_MASK; 224 c = (u_int)(PAGE_SIZE - ((int)iov->iov_base & PAGE_MASK)); 225 c = min(c, (u_int)(PAGE_SIZE - o)); 226 c = min(c, (u_int)iov->iov_len); 227 error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio); 228 pmap_remove(kernel_pmap, (vm_offset_t)ptvmmap, 229 (vm_offset_t)&ptvmmap[PAGE_SIZE]); 230 continue; 231 232 /* minor device 1 is kernel memory */ 233 case 1: { 234 vm_offset_t addr, eaddr; 235 c = iov->iov_len; 236 237 /* 238 * Make sure that all of the pages are currently resident so 239 * that we don't create any zero-fill pages. 240 */ 241 addr = trunc_page(uio->uio_offset); 242 eaddr = round_page(uio->uio_offset + c); 243 244 if (addr < (vm_offset_t)VADDR(PTDPTDI, 0)) 245 return EFAULT; 246 if (eaddr >= (vm_offset_t)VADDR(APTDPTDI, 0)) 247 return EFAULT; 248 for (; addr < eaddr; addr += PAGE_SIZE) 249 if (pmap_extract(kernel_pmap, addr) == 0) 250 return EFAULT; 251 252 if (!kernacc((caddr_t)(int)uio->uio_offset, c, 253 uio->uio_rw == UIO_READ ? B_READ : B_WRITE)) 254 return(EFAULT); 255 error = uiomove((caddr_t)(int)uio->uio_offset, (int)c, uio); 256 continue; 257 } 258 259 /* minor device 2 is EOF/RATHOLE */ 260 case 2: 261 if (uio->uio_rw == UIO_READ) 262 return (0); 263 c = iov->iov_len; 264 break; 265 266 /* minor device 3 (/dev/random) is source of filth on read, rathole on write */ 267 case 3: 268 if (uio->uio_rw == UIO_WRITE) { 269 c = iov->iov_len; 270 break; 271 } 272 if (buf == NULL) 273 buf = (caddr_t) 274 malloc(PAGE_SIZE, M_TEMP, M_WAITOK); 275 c = min(iov->iov_len, PAGE_SIZE); 276 poolsize = read_random(buf, c); 277 if (poolsize == 0) { 278 if (buf) 279 free(buf, M_TEMP); 280 return (0); 281 } 282 c = min(c, poolsize); 283 error = uiomove(buf, (int)c, uio); 284 continue; 285 286 /* minor device 4 (/dev/urandom) is source of muck on read, rathole on write */ 287 case 4: 288 if (uio->uio_rw == UIO_WRITE) { 289 c = iov->iov_len; 290 break; 291 } 292 if (buf == NULL) 293 buf = (caddr_t) 294 malloc(PAGE_SIZE, M_TEMP, M_WAITOK); 295 c = min(iov->iov_len, PAGE_SIZE); 296 poolsize = read_random_unlimited(buf, c); 297 c = min(c, poolsize); 298 error = uiomove(buf, (int)c, uio); 299 continue; 300 301 /* minor device 12 (/dev/zero) is source of nulls on read, rathole on write */ 302 case 12: 303 if (uio->uio_rw == UIO_WRITE) { 304 c = iov->iov_len; 305 break; 306 } 307 if (zbuf == NULL) { 308 zbuf = (caddr_t) 309 malloc(PAGE_SIZE, M_TEMP, M_WAITOK); 310 bzero(zbuf, PAGE_SIZE); 311 } 312 c = min(iov->iov_len, PAGE_SIZE); 313 error = uiomove(zbuf, (int)c, uio); 314 continue; 315 316 #ifdef notyet 317 /* 386 I/O address space (/dev/ioport[bwl]) is a read/write access to seperate 318 i/o device address bus, different than memory bus. Semantics here are 319 very different than ordinary read/write, as if iov_len is a multiple 320 an implied string move from a single port will be done. Note that lseek 321 must be used to set the port number reliably. */ 322 case 14: 323 if (iov->iov_len == 1) { 324 u_char tmp; 325 tmp = inb(uio->uio_offset); 326 error = uiomove (&tmp, iov->iov_len, uio); 327 } else { 328 if (!useracc((caddr_t)iov->iov_base, 329 iov->iov_len, uio->uio_rw)) 330 return (EFAULT); 331 insb(uio->uio_offset, iov->iov_base, 332 iov->iov_len); 333 } 334 break; 335 case 15: 336 if (iov->iov_len == sizeof (short)) { 337 u_short tmp; 338 tmp = inw(uio->uio_offset); 339 error = uiomove (&tmp, iov->iov_len, uio); 340 } else { 341 if (!useracc((caddr_t)iov->iov_base, 342 iov->iov_len, uio->uio_rw)) 343 return (EFAULT); 344 insw(uio->uio_offset, iov->iov_base, 345 iov->iov_len/ sizeof (short)); 346 } 347 break; 348 case 16: 349 if (iov->iov_len == sizeof (long)) { 350 u_long tmp; 351 tmp = inl(uio->uio_offset); 352 error = uiomove (&tmp, iov->iov_len, uio); 353 } else { 354 if (!useracc((caddr_t)iov->iov_base, 355 iov->iov_len, uio->uio_rw)) 356 return (EFAULT); 357 insl(uio->uio_offset, iov->iov_base, 358 iov->iov_len/ sizeof (long)); 359 } 360 break; 361 #endif 362 363 default: 364 return (ENXIO); 365 } 366 if (error) 367 break; 368 iov->iov_base += c; 369 iov->iov_len -= c; 370 uio->uio_offset += c; 371 uio->uio_resid -= c; 372 } 373 if (buf) 374 free(buf, M_TEMP); 375 return (error); 376 } 377 378 379 380 381 /*******************************************************\ 382 * allow user processes to MMAP some memory sections * 383 * instead of going through read/write * 384 \*******************************************************/ 385 static int 386 memmmap(dev_t dev, int offset, int nprot) 387 { 388 switch (minor(dev)) 389 { 390 391 /* minor device 0 is physical memory */ 392 case 0: 393 return i386_btop(offset); 394 395 /* minor device 1 is kernel memory */ 396 case 1: 397 return i386_btop(vtophys(offset)); 398 399 default: 400 return -1; 401 } 402 } 403 404 /* 405 * Allow userland to select which interrupts will be used in the muck 406 * gathering business. 407 */ 408 static int 409 mmioctl(dev, cmd, cmdarg, flags, p) 410 dev_t dev; 411 int cmd; 412 caddr_t cmdarg; 413 int flags; 414 struct proc *p; 415 { 416 static u_int16_t interrupt_allowed = 0; 417 u_int16_t interrupt_mask; 418 int error; 419 420 switch(minor(dev)) { 421 case 3: 422 case 4: 423 break; 424 425 #ifdef PERFMON 426 case 32: 427 return perfmon_ioctl(dev, cmd, cmdarg, flags, p); 428 #endif 429 default: 430 return ENODEV; 431 } 432 433 if (*(u_int16_t *)cmdarg >= 16) 434 return (EINVAL); 435 436 /* Only root can do this */ 437 error = suser(p->p_ucred, &p->p_acflag); 438 if (error) { 439 return (error); 440 } 441 interrupt_mask = 1 << *(u_int16_t *)cmdarg; 442 443 switch (cmd) { 444 445 case MEM_SETIRQ: 446 if (!(interrupt_allowed & interrupt_mask)) { 447 disable_intr(); 448 interrupt_allowed |= interrupt_mask; 449 sec_intr_handler[*(u_int16_t *)cmdarg] = 450 intr_handler[*(u_int16_t *)cmdarg]; 451 intr_handler[*(u_int16_t *)cmdarg] = 452 add_interrupt_randomness; 453 sec_intr_unit[*(u_int16_t *)cmdarg] = 454 intr_unit[*(u_int16_t *)cmdarg]; 455 intr_unit[*(u_int16_t *)cmdarg] = 456 *(u_int16_t *)cmdarg; 457 enable_intr(); 458 } 459 else return (EPERM); 460 break; 461 462 case MEM_CLEARIRQ: 463 if (interrupt_allowed & interrupt_mask) { 464 disable_intr(); 465 interrupt_allowed &= ~(interrupt_mask); 466 intr_handler[*(u_int16_t *)cmdarg] = 467 sec_intr_handler[*(u_int16_t *)cmdarg]; 468 intr_unit[*(u_int16_t *)cmdarg] = 469 sec_intr_unit[*(u_int16_t *)cmdarg]; 470 enable_intr(); 471 } 472 else return (EPERM); 473 break; 474 475 case MEM_RETURNIRQ: 476 *(u_int16_t *)cmdarg = interrupt_allowed; 477 break; 478 479 default: 480 return (ENOTTY); 481 } 482 return (0); 483 } 484 485 int 486 mmselect(dev, rw, p) 487 dev_t dev; 488 int rw; 489 struct proc *p; 490 { 491 switch (minor(dev)) { 492 case 3: /* /dev/random */ 493 return random_select(dev, rw, p); 494 case 4: /* /dev/urandom */ 495 default: 496 return seltrue(dev, rw, p); 497 } 498 } 499 500 /* 501 * Routine that identifies /dev/mem and /dev/kmem. 502 * 503 * A minimal stub routine can always return 0. 504 */ 505 int 506 iskmemdev(dev) 507 dev_t dev; 508 { 509 510 return ((major(dev) == mem_cdevsw.d_maj) 511 && (minor(dev) == 0 || minor(dev) == 1)); 512 } 513 514 int 515 iszerodev(dev) 516 dev_t dev; 517 { 518 return ((major(dev) == mem_cdevsw.d_maj) 519 && minor(dev) == 12); 520 } 521 522 523 524 static mem_devsw_installed = 0; 525 526 static void 527 mem_drvinit(void *unused) 528 { 529 dev_t dev; 530 531 if( ! mem_devsw_installed ) { 532 dev = makedev(CDEV_MAJOR, 0); 533 cdevsw_add(&dev,&mem_cdevsw, NULL); 534 mem_devsw_installed = 1; 535 #ifdef DEVFS 536 memdevfs_init(); 537 #endif 538 } 539 } 540 541 SYSINIT(memdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,mem_drvinit,NULL) 542 543