1 /*- 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department, and code derived from software contributed to 9 * Berkeley by William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: Utah $Hdr: mem.c 1.13 89/10/08$ 40 * from: @(#)mem.c 7.2 (Berkeley) 5/9/91 41 * $Id$ 42 */ 43 44 /* 45 * Memory special file 46 */ 47 48 #include "opt_perfmon.h" 49 50 #include <sys/param.h> 51 #include <sys/conf.h> 52 #include <sys/buf.h> 53 #ifdef DEVFS 54 #include <sys/devfsext.h> 55 #endif /* DEVFS */ 56 #include <sys/kernel.h> 57 #include <sys/systm.h> 58 #include <sys/uio.h> 59 #include <sys/malloc.h> 60 #include <sys/proc.h> 61 62 #include <machine/cpu.h> 63 #include <machine/random.h> 64 #include <machine/psl.h> 65 #ifdef PERFMON 66 #include <machine/perfmon.h> 67 #endif 68 69 #include <vm/vm.h> 70 #include <vm/vm_param.h> 71 #include <sys/lock.h> 72 #include <vm/vm_prot.h> 73 #include <vm/pmap.h> 74 #include <vm/vm_extern.h> 75 76 77 78 static d_open_t mmopen; 79 static d_close_t mmclose; 80 static d_read_t mmrw; 81 static d_ioctl_t mmioctl; 82 static d_mmap_t memmmap; 83 static d_select_t mmselect; 84 85 #define CDEV_MAJOR 2 86 static struct cdevsw mem_cdevsw = 87 { mmopen, mmclose, mmrw, mmrw, /*2*/ 88 mmioctl, nullstop, nullreset, nodevtotty,/* memory */ 89 mmselect, memmmap, NULL, "mem", NULL, -1 }; 90 91 static caddr_t zbuf; 92 93 #ifdef DEVFS 94 static void *mem_devfs_token; 95 static void *kmem_devfs_token; 96 static void *null_devfs_token; 97 static void *random_devfs_token; 98 static void *urandom_devfs_token; 99 static void *zero_devfs_token; 100 static void *io_devfs_token; 101 #ifdef PERFMON 102 static void *perfmon_devfs_token; 103 #endif 104 105 static void memdevfs_init __P((void)); 106 107 static void 108 memdevfs_init() 109 { 110 mem_devfs_token = 111 devfs_add_devswf(&mem_cdevsw, 0, DV_CHR, 112 UID_ROOT, GID_KMEM, 0640, "mem"); 113 kmem_devfs_token = 114 devfs_add_devswf(&mem_cdevsw, 1, DV_CHR, 115 UID_ROOT, GID_KMEM, 0640, "kmem"); 116 null_devfs_token = 117 devfs_add_devswf(&mem_cdevsw, 2, DV_CHR, 118 UID_ROOT, GID_WHEEL, 0666, "null"); 119 random_devfs_token = 120 devfs_add_devswf(&mem_cdevsw, 3, DV_CHR, 121 UID_ROOT, GID_WHEEL, 0644, "random"); 122 urandom_devfs_token = 123 devfs_add_devswf(&mem_cdevsw, 4, DV_CHR, 124 UID_ROOT, GID_WHEEL, 0644, "urandom"); 125 zero_devfs_token = 126 devfs_add_devswf(&mem_cdevsw, 12, DV_CHR, 127 UID_ROOT, GID_WHEEL, 0666, "zero"); 128 io_devfs_token = 129 devfs_add_devswf(&mem_cdevsw, 14, DV_CHR, 130 UID_ROOT, GID_WHEEL, 0600, "io"); 131 #ifdef PERFMON 132 perfmon_devfs_token = 133 devfs_add_devswf(&mem_cdevsw, 32, DV_CHR, 134 UID_ROOT, GID_KMEM, 0640, "perfmon"); 135 #endif /* PERFMON */ 136 } 137 #endif /* DEVFS */ 138 139 extern char *ptvmmap; /* poor name! */ 140 141 static int 142 mmclose(dev, flags, fmt, p) 143 dev_t dev; 144 int flags; 145 int fmt; 146 struct proc *p; 147 { 148 struct trapframe *fp; 149 150 switch (minor(dev)) { 151 #ifdef PERFMON 152 case 32: 153 return perfmon_close(dev, flags, fmt, p); 154 #endif 155 case 14: 156 fp = (struct trapframe *)curproc->p_md.md_regs; 157 fp->tf_eflags &= ~PSL_IOPL; 158 break; 159 default: 160 break; 161 } 162 return(0); 163 } 164 165 static int 166 mmopen(dev, flags, fmt, p) 167 dev_t dev; 168 int flags; 169 int fmt; 170 struct proc *p; 171 { 172 struct trapframe *fp; 173 174 switch (minor(dev)) { 175 case 32: 176 #ifdef PERFMON 177 return perfmon_open(dev, flags, fmt, p); 178 #else 179 return ENODEV; 180 #endif 181 case 14: 182 fp = (struct trapframe *)curproc->p_md.md_regs; 183 fp->tf_eflags |= PSL_IOPL; 184 break; 185 default: 186 break; 187 } 188 return(0); 189 } 190 191 static int 192 mmrw(dev, uio, flags) 193 dev_t dev; 194 struct uio *uio; 195 int flags; 196 { 197 register int o; 198 register u_int c, v; 199 u_int poolsize; 200 register struct iovec *iov; 201 int error = 0; 202 caddr_t buf = NULL; 203 204 while (uio->uio_resid > 0 && error == 0) { 205 iov = uio->uio_iov; 206 if (iov->iov_len == 0) { 207 uio->uio_iov++; 208 uio->uio_iovcnt--; 209 if (uio->uio_iovcnt < 0) 210 panic("mmrw"); 211 continue; 212 } 213 switch (minor(dev)) { 214 215 /* minor device 0 is physical memory */ 216 case 0: 217 v = uio->uio_offset; 218 pmap_enter(kernel_pmap, (vm_offset_t)ptvmmap, v, 219 uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE, 220 TRUE); 221 o = (int)uio->uio_offset & PAGE_MASK; 222 c = (u_int)(PAGE_SIZE - ((int)iov->iov_base & PAGE_MASK)); 223 c = min(c, (u_int)(PAGE_SIZE - o)); 224 c = min(c, (u_int)iov->iov_len); 225 error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio); 226 pmap_remove(kernel_pmap, (vm_offset_t)ptvmmap, 227 (vm_offset_t)&ptvmmap[PAGE_SIZE]); 228 continue; 229 230 /* minor device 1 is kernel memory */ 231 case 1: { 232 vm_offset_t addr, eaddr; 233 c = iov->iov_len; 234 235 /* 236 * Make sure that all of the pages are currently resident so 237 * that we don't create any zero-fill pages. 238 */ 239 addr = trunc_page(uio->uio_offset); 240 eaddr = round_page(uio->uio_offset + c); 241 for (; addr < eaddr; addr += PAGE_SIZE) 242 if (pmap_extract(kernel_pmap, addr) == 0) 243 return EFAULT; 244 245 if (!kernacc((caddr_t)(int)uio->uio_offset, c, 246 uio->uio_rw == UIO_READ ? B_READ : B_WRITE)) 247 return(EFAULT); 248 error = uiomove((caddr_t)(int)uio->uio_offset, (int)c, uio); 249 continue; 250 } 251 252 /* minor device 2 is EOF/RATHOLE */ 253 case 2: 254 if (uio->uio_rw == UIO_READ) 255 return (0); 256 c = iov->iov_len; 257 break; 258 259 /* minor device 3 (/dev/random) is source of filth on read, rathole on write */ 260 case 3: 261 if (uio->uio_rw == UIO_WRITE) { 262 c = iov->iov_len; 263 break; 264 } 265 if (buf == NULL) 266 buf = (caddr_t) 267 malloc(PAGE_SIZE, M_TEMP, M_WAITOK); 268 c = min(iov->iov_len, PAGE_SIZE); 269 poolsize = read_random(buf, c); 270 if (poolsize == 0) { 271 if (buf) 272 free(buf, M_TEMP); 273 return (0); 274 } 275 c = min(c, poolsize); 276 error = uiomove(buf, (int)c, uio); 277 continue; 278 279 /* minor device 4 (/dev/urandom) is source of muck on read, rathole on write */ 280 case 4: 281 if (uio->uio_rw == UIO_WRITE) { 282 c = iov->iov_len; 283 break; 284 } 285 if (buf == NULL) 286 buf = (caddr_t) 287 malloc(PAGE_SIZE, M_TEMP, M_WAITOK); 288 c = min(iov->iov_len, PAGE_SIZE); 289 poolsize = read_random_unlimited(buf, c); 290 c = min(c, poolsize); 291 error = uiomove(buf, (int)c, uio); 292 continue; 293 294 /* minor device 12 (/dev/zero) is source of nulls on read, rathole on write */ 295 case 12: 296 if (uio->uio_rw == UIO_WRITE) { 297 c = iov->iov_len; 298 break; 299 } 300 if (zbuf == NULL) { 301 zbuf = (caddr_t) 302 malloc(PAGE_SIZE, M_TEMP, M_WAITOK); 303 bzero(zbuf, PAGE_SIZE); 304 } 305 c = min(iov->iov_len, PAGE_SIZE); 306 error = uiomove(zbuf, (int)c, uio); 307 continue; 308 309 #ifdef notyet 310 /* 386 I/O address space (/dev/ioport[bwl]) is a read/write access to seperate 311 i/o device address bus, different than memory bus. Semantics here are 312 very different than ordinary read/write, as if iov_len is a multiple 313 an implied string move from a single port will be done. Note that lseek 314 must be used to set the port number reliably. */ 315 case 14: 316 if (iov->iov_len == 1) { 317 u_char tmp; 318 tmp = inb(uio->uio_offset); 319 error = uiomove (&tmp, iov->iov_len, uio); 320 } else { 321 if (!useracc((caddr_t)iov->iov_base, 322 iov->iov_len, uio->uio_rw)) 323 return (EFAULT); 324 insb(uio->uio_offset, iov->iov_base, 325 iov->iov_len); 326 } 327 break; 328 case 15: 329 if (iov->iov_len == sizeof (short)) { 330 u_short tmp; 331 tmp = inw(uio->uio_offset); 332 error = uiomove (&tmp, iov->iov_len, uio); 333 } else { 334 if (!useracc((caddr_t)iov->iov_base, 335 iov->iov_len, uio->uio_rw)) 336 return (EFAULT); 337 insw(uio->uio_offset, iov->iov_base, 338 iov->iov_len/ sizeof (short)); 339 } 340 break; 341 case 16: 342 if (iov->iov_len == sizeof (long)) { 343 u_long tmp; 344 tmp = inl(uio->uio_offset); 345 error = uiomove (&tmp, iov->iov_len, uio); 346 } else { 347 if (!useracc((caddr_t)iov->iov_base, 348 iov->iov_len, uio->uio_rw)) 349 return (EFAULT); 350 insl(uio->uio_offset, iov->iov_base, 351 iov->iov_len/ sizeof (long)); 352 } 353 break; 354 #endif 355 356 default: 357 return (ENXIO); 358 } 359 if (error) 360 break; 361 iov->iov_base += c; 362 iov->iov_len -= c; 363 uio->uio_offset += c; 364 uio->uio_resid -= c; 365 } 366 if (buf) 367 free(buf, M_TEMP); 368 return (error); 369 } 370 371 372 373 374 /*******************************************************\ 375 * allow user processes to MMAP some memory sections * 376 * instead of going through read/write * 377 \*******************************************************/ 378 static int 379 memmmap(dev_t dev, int offset, int nprot) 380 { 381 switch (minor(dev)) 382 { 383 384 /* minor device 0 is physical memory */ 385 case 0: 386 return i386_btop(offset); 387 388 /* minor device 1 is kernel memory */ 389 case 1: 390 return i386_btop(vtophys(offset)); 391 392 default: 393 return -1; 394 } 395 } 396 397 /* 398 * Allow userland to select which interrupts will be used in the muck 399 * gathering business. 400 */ 401 static int 402 mmioctl(dev, cmd, cmdarg, flags, p) 403 dev_t dev; 404 int cmd; 405 caddr_t cmdarg; 406 int flags; 407 struct proc *p; 408 { 409 static u_int16_t interrupt_allowed = 0; 410 u_int16_t interrupt_mask; 411 int error; 412 413 switch(minor(dev)) { 414 case 3: 415 case 4: 416 break; 417 418 #ifdef PERFMON 419 case 32: 420 return perfmon_ioctl(dev, cmd, cmdarg, flags, p); 421 #endif 422 default: 423 return ENODEV; 424 } 425 426 if (*(u_int16_t *)cmdarg >= 16) 427 return (EINVAL); 428 429 /* Only root can do this */ 430 error = suser(p->p_ucred, &p->p_acflag); 431 if (error) { 432 return (error); 433 } 434 interrupt_mask = 1 << *(u_int16_t *)cmdarg; 435 436 switch (cmd) { 437 438 case MEM_SETIRQ: 439 if (!(interrupt_allowed & interrupt_mask)) { 440 disable_intr(); 441 interrupt_allowed |= interrupt_mask; 442 sec_intr_handler[*(u_int16_t *)cmdarg] = 443 intr_handler[*(u_int16_t *)cmdarg]; 444 intr_handler[*(u_int16_t *)cmdarg] = 445 add_interrupt_randomness; 446 sec_intr_unit[*(u_int16_t *)cmdarg] = 447 intr_unit[*(u_int16_t *)cmdarg]; 448 intr_unit[*(u_int16_t *)cmdarg] = 449 *(u_int16_t *)cmdarg; 450 enable_intr(); 451 } 452 else return (EPERM); 453 break; 454 455 case MEM_CLEARIRQ: 456 if (interrupt_allowed & interrupt_mask) { 457 disable_intr(); 458 interrupt_allowed &= ~(interrupt_mask); 459 intr_handler[*(u_int16_t *)cmdarg] = 460 sec_intr_handler[*(u_int16_t *)cmdarg]; 461 intr_unit[*(u_int16_t *)cmdarg] = 462 sec_intr_unit[*(u_int16_t *)cmdarg]; 463 enable_intr(); 464 } 465 else return (EPERM); 466 break; 467 468 case MEM_RETURNIRQ: 469 *(u_int16_t *)cmdarg = interrupt_allowed; 470 break; 471 472 default: 473 return (ENOTTY); 474 } 475 return (0); 476 } 477 478 int 479 mmselect(dev, rw, p) 480 dev_t dev; 481 int rw; 482 struct proc *p; 483 { 484 switch (minor(dev)) { 485 case 3: /* /dev/random */ 486 return random_select(dev, rw, p); 487 case 4: /* /dev/urandom */ 488 default: 489 return seltrue(dev, rw, p); 490 } 491 } 492 493 /* 494 * Routine that identifies /dev/mem and /dev/kmem. 495 * 496 * A minimal stub routine can always return 0. 497 */ 498 int 499 iskmemdev(dev) 500 dev_t dev; 501 { 502 503 return ((major(dev) == mem_cdevsw.d_maj) 504 && (minor(dev) == 0 || minor(dev) == 1)); 505 } 506 507 int 508 iszerodev(dev) 509 dev_t dev; 510 { 511 return ((major(dev) == mem_cdevsw.d_maj) 512 && minor(dev) == 12); 513 } 514 515 516 517 static mem_devsw_installed = 0; 518 519 static void 520 mem_drvinit(void *unused) 521 { 522 dev_t dev; 523 524 if( ! mem_devsw_installed ) { 525 dev = makedev(CDEV_MAJOR, 0); 526 cdevsw_add(&dev,&mem_cdevsw, NULL); 527 mem_devsw_installed = 1; 528 #ifdef DEVFS 529 memdevfs_init(); 530 #endif 531 } 532 } 533 534 SYSINIT(memdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,mem_drvinit,NULL) 535 536