1 /*- 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department, and code derived from software contributed to 9 * Berkeley by William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: Utah $Hdr: mem.c 1.13 89/10/08$ 40 * from: @(#)mem.c 7.2 (Berkeley) 5/9/91 41 * $Id: mem.c,v 1.32 1996/04/01 21:02:14 scrappy Exp $ 42 */ 43 44 /* 45 * Memory special file 46 */ 47 48 #include "opt_perfmon.h" 49 50 #include <sys/param.h> 51 #include <sys/conf.h> 52 #include <sys/buf.h> 53 #ifdef DEVFS 54 #include <sys/devfsext.h> 55 #endif /* DEVFS */ 56 #include <sys/kernel.h> 57 #include <sys/systm.h> 58 #include <sys/uio.h> 59 #include <sys/malloc.h> 60 #include <sys/proc.h> 61 62 #include <machine/cpu.h> 63 #include <machine/random.h> 64 #include <machine/psl.h> 65 #ifdef PERFMON 66 #include <machine/perfmon.h> 67 #endif 68 69 #include <vm/vm.h> 70 #include <vm/vm_param.h> 71 #include <vm/lock.h> 72 #include <vm/vm_prot.h> 73 #include <vm/pmap.h> 74 #include <vm/vm_extern.h> 75 76 77 78 static d_open_t mmopen; 79 static d_close_t mmclose; 80 static d_read_t mmrw; 81 static d_ioctl_t mmioctl; 82 static d_mmap_t memmmap; 83 84 #define CDEV_MAJOR 2 85 static struct cdevsw mem_cdevsw = 86 { mmopen, mmclose, mmrw, mmrw, /*2*/ 87 mmioctl, nullstop, nullreset, nodevtotty,/* memory */ 88 seltrue, memmmap, NULL, "mem", NULL, -1 }; 89 90 #ifdef DEVFS 91 static void *mem_devfs_token; 92 static void *kmem_devfs_token; 93 static void *null_devfs_token; 94 static void *random_devfs_token; 95 static void *urandom_devfs_token; 96 static void *zero_devfs_token; 97 static void *io_devfs_token; 98 #ifdef PERFMON 99 static void *perfmon_devfs_token; 100 #endif 101 102 static void memdevfs_init __P((void)); 103 104 static void 105 memdevfs_init() 106 { 107 mem_devfs_token = 108 devfs_add_devswf(&mem_cdevsw, 0, DV_CHR, 109 UID_ROOT, GID_KMEM, 0640, "mem"); 110 kmem_devfs_token = 111 devfs_add_devswf(&mem_cdevsw, 1, DV_CHR, 112 UID_ROOT, GID_KMEM, 0640, "kmem"); 113 null_devfs_token = 114 devfs_add_devswf(&mem_cdevsw, 2, DV_CHR, 115 UID_ROOT, GID_WHEEL, 0666, "null"); 116 random_devfs_token = 117 devfs_add_devswf(&mem_cdevsw, 3, DV_CHR, 118 UID_ROOT, GID_WHEEL, 0644, "random"); 119 urandom_devfs_token = 120 devfs_add_devswf(&mem_cdevsw, 4, DV_CHR, 121 UID_ROOT, GID_WHEEL, 0644, "urandom"); 122 zero_devfs_token = 123 devfs_add_devswf(&mem_cdevsw, 12, DV_CHR, 124 UID_ROOT, GID_WHEEL, 0666, "zero"); 125 io_devfs_token = 126 devfs_add_devswf(&mem_cdevsw, 14, DV_CHR, 127 UID_ROOT, GID_WHEEL, 0600, "io"); 128 #ifdef PERFMON 129 perfmon_devfs_token = 130 devfs_add_devswf(&mem_cdevsw, 32, DV_CHR, 131 UID_ROOT, GID_KMEM, 0640, "perfmon"); 132 #endif /* PERFMON */ 133 } 134 #endif /* DEVFS */ 135 136 extern char *ptvmmap; /* poor name! */ 137 138 static int 139 mmclose(dev, flags, fmt, p) 140 dev_t dev; 141 int flags; 142 int fmt; 143 struct proc *p; 144 { 145 struct trapframe *fp; 146 147 switch (minor(dev)) { 148 #ifdef PERFMON 149 case 32: 150 return perfmon_close(dev, flags, fmt, p); 151 #endif 152 case 14: 153 fp = (struct trapframe *)curproc->p_md.md_regs; 154 fp->tf_eflags &= ~PSL_IOPL; 155 break; 156 default: 157 break; 158 } 159 return(0); 160 } 161 162 static int 163 mmopen(dev, flags, fmt, p) 164 dev_t dev; 165 int flags; 166 int fmt; 167 struct proc *p; 168 { 169 struct trapframe *fp; 170 171 switch (minor(dev)) { 172 case 32: 173 #ifdef PERFMON 174 return perfmon_open(dev, flags, fmt, p); 175 #else 176 return ENODEV; 177 #endif 178 case 14: 179 fp = (struct trapframe *)curproc->p_md.md_regs; 180 fp->tf_eflags |= PSL_IOPL; 181 break; 182 default: 183 break; 184 } 185 return(0); 186 } 187 188 static int 189 mmrw(dev, uio, flags) 190 dev_t dev; 191 struct uio *uio; 192 int flags; 193 { 194 register int o; 195 register u_int c, v; 196 u_int poolsize; 197 register struct iovec *iov; 198 int error = 0; 199 caddr_t buf = NULL; 200 201 while (uio->uio_resid > 0 && error == 0) { 202 iov = uio->uio_iov; 203 if (iov->iov_len == 0) { 204 uio->uio_iov++; 205 uio->uio_iovcnt--; 206 if (uio->uio_iovcnt < 0) 207 panic("mmrw"); 208 continue; 209 } 210 switch (minor(dev)) { 211 212 /* minor device 0 is physical memory */ 213 case 0: 214 v = uio->uio_offset; 215 pmap_enter(kernel_pmap, (vm_offset_t)ptvmmap, v, 216 uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE, 217 TRUE); 218 o = (int)uio->uio_offset & PGOFSET; 219 c = (u_int)(NBPG - ((int)iov->iov_base & PGOFSET)); 220 c = min(c, (u_int)(NBPG - o)); 221 c = min(c, (u_int)iov->iov_len); 222 error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio); 223 pmap_remove(kernel_pmap, (vm_offset_t)ptvmmap, 224 (vm_offset_t)&ptvmmap[NBPG]); 225 continue; 226 227 /* minor device 1 is kernel memory */ 228 case 1: { 229 vm_offset_t addr, eaddr; 230 c = iov->iov_len; 231 232 /* 233 * Make sure that all of the pages are currently resident so 234 * that we don't create any zero-fill pages. 235 */ 236 addr = trunc_page(uio->uio_offset); 237 eaddr = round_page(uio->uio_offset + c); 238 for (; addr < eaddr; addr += PAGE_SIZE) 239 if (pmap_extract(kernel_pmap, addr) == 0) 240 return EFAULT; 241 242 if (!kernacc((caddr_t)(int)uio->uio_offset, c, 243 uio->uio_rw == UIO_READ ? B_READ : B_WRITE)) 244 return(EFAULT); 245 error = uiomove((caddr_t)(int)uio->uio_offset, (int)c, uio); 246 continue; 247 } 248 249 /* minor device 2 is EOF/RATHOLE */ 250 case 2: 251 if (uio->uio_rw == UIO_READ) 252 return (0); 253 c = iov->iov_len; 254 break; 255 256 /* minor device 3 (/dev/random) is source of filth on read, rathole on write */ 257 case 3: 258 if (uio->uio_rw == UIO_WRITE) { 259 c = iov->iov_len; 260 break; 261 } 262 if (buf == NULL) 263 buf = (caddr_t) 264 malloc(CLBYTES, M_TEMP, M_WAITOK); 265 c = min(iov->iov_len, CLBYTES); 266 poolsize = read_random(buf, c); 267 if (poolsize == 0) { 268 if (buf) 269 free(buf, M_TEMP); 270 return (0); 271 } 272 c = min(c, poolsize); 273 error = uiomove(buf, (int)c, uio); 274 continue; 275 276 /* minor device 4 (/dev/urandom) is source of muck on read, rathole on write */ 277 case 4: 278 if (uio->uio_rw == UIO_WRITE) { 279 c = iov->iov_len; 280 break; 281 } 282 if (buf == NULL) 283 buf = (caddr_t) 284 malloc(CLBYTES, M_TEMP, M_WAITOK); 285 c = min(iov->iov_len, CLBYTES); 286 poolsize = read_random_unlimited(buf, c); 287 c = min(c, poolsize); 288 error = uiomove(buf, (int)c, uio); 289 continue; 290 291 /* minor device 12 (/dev/zero) is source of nulls on read, rathole on write */ 292 case 12: 293 if (uio->uio_rw == UIO_WRITE) { 294 c = iov->iov_len; 295 break; 296 } 297 if (buf == NULL) { 298 buf = (caddr_t) 299 malloc(CLBYTES, M_TEMP, M_WAITOK); 300 bzero(buf, CLBYTES); 301 } 302 c = min(iov->iov_len, CLBYTES); 303 error = uiomove(buf, (int)c, uio); 304 continue; 305 306 #ifdef notyet 307 /* 386 I/O address space (/dev/ioport[bwl]) is a read/write access to seperate 308 i/o device address bus, different than memory bus. Semantics here are 309 very different than ordinary read/write, as if iov_len is a multiple 310 an implied string move from a single port will be done. Note that lseek 311 must be used to set the port number reliably. */ 312 case 14: 313 if (iov->iov_len == 1) { 314 u_char tmp; 315 tmp = inb(uio->uio_offset); 316 error = uiomove (&tmp, iov->iov_len, uio); 317 } else { 318 if (!useracc((caddr_t)iov->iov_base, 319 iov->iov_len, uio->uio_rw)) 320 return (EFAULT); 321 insb(uio->uio_offset, iov->iov_base, 322 iov->iov_len); 323 } 324 break; 325 case 15: 326 if (iov->iov_len == sizeof (short)) { 327 u_short tmp; 328 tmp = inw(uio->uio_offset); 329 error = uiomove (&tmp, iov->iov_len, uio); 330 } else { 331 if (!useracc((caddr_t)iov->iov_base, 332 iov->iov_len, uio->uio_rw)) 333 return (EFAULT); 334 insw(uio->uio_offset, iov->iov_base, 335 iov->iov_len/ sizeof (short)); 336 } 337 break; 338 case 16: 339 if (iov->iov_len == sizeof (long)) { 340 u_long tmp; 341 tmp = inl(uio->uio_offset); 342 error = uiomove (&tmp, iov->iov_len, uio); 343 } else { 344 if (!useracc((caddr_t)iov->iov_base, 345 iov->iov_len, uio->uio_rw)) 346 return (EFAULT); 347 insl(uio->uio_offset, iov->iov_base, 348 iov->iov_len/ sizeof (long)); 349 } 350 break; 351 #endif 352 353 default: 354 return (ENXIO); 355 } 356 if (error) 357 break; 358 iov->iov_base += c; 359 iov->iov_len -= c; 360 uio->uio_offset += c; 361 uio->uio_resid -= c; 362 } 363 if (buf) 364 free(buf, M_TEMP); 365 return (error); 366 } 367 368 369 370 371 /*******************************************************\ 372 * allow user processes to MMAP some memory sections * 373 * instead of going through read/write * 374 \*******************************************************/ 375 static int 376 memmmap(dev_t dev, int offset, int nprot) 377 { 378 switch (minor(dev)) 379 { 380 381 /* minor device 0 is physical memory */ 382 case 0: 383 return i386_btop(offset); 384 385 /* minor device 1 is kernel memory */ 386 case 1: 387 return i386_btop(vtophys(offset)); 388 389 default: 390 return -1; 391 } 392 } 393 394 /* 395 * Allow userland to select which interrupts will be used in the muck 396 * gathering business. 397 */ 398 static int 399 mmioctl(dev, cmd, cmdarg, flags, p) 400 dev_t dev; 401 int cmd; 402 caddr_t cmdarg; 403 int flags; 404 struct proc *p; 405 { 406 static u_int16_t interrupt_allowed = 0; 407 u_int16_t interrupt_mask; 408 int error; 409 410 switch(minor(dev)) { 411 case 3: 412 case 4: 413 break; 414 415 #ifdef PERFMON 416 case 32: 417 return perfmon_ioctl(dev, cmd, cmdarg, flags, p); 418 #endif 419 default: 420 return ENODEV; 421 } 422 423 if (*(u_int16_t *)cmdarg >= 16) 424 return (EINVAL); 425 426 /* Only root can do this */ 427 error = suser(p->p_ucred, &p->p_acflag); 428 if (error) { 429 return (error); 430 } 431 interrupt_mask = 1 << *(u_int16_t *)cmdarg; 432 433 switch (cmd) { 434 435 case MEM_SETIRQ: 436 if (!(interrupt_allowed & interrupt_mask)) { 437 disable_intr(); 438 interrupt_allowed |= interrupt_mask; 439 sec_intr_handler[*(u_int16_t *)cmdarg] = 440 intr_handler[*(u_int16_t *)cmdarg]; 441 intr_handler[*(u_int16_t *)cmdarg] = 442 add_interrupt_randomness; 443 sec_intr_unit[*(u_int16_t *)cmdarg] = 444 intr_unit[*(u_int16_t *)cmdarg]; 445 intr_unit[*(u_int16_t *)cmdarg] = 446 *(u_int16_t *)cmdarg; 447 enable_intr(); 448 } 449 else return (EPERM); 450 break; 451 452 case MEM_CLEARIRQ: 453 if (interrupt_allowed & interrupt_mask) { 454 disable_intr(); 455 interrupt_allowed &= ~(interrupt_mask); 456 intr_handler[*(u_int16_t *)cmdarg] = 457 sec_intr_handler[*(u_int16_t *)cmdarg]; 458 intr_unit[*(u_int16_t *)cmdarg] = 459 sec_intr_unit[*(u_int16_t *)cmdarg]; 460 enable_intr(); 461 } 462 else return (EPERM); 463 break; 464 465 case MEM_RETURNIRQ: 466 *(u_int16_t *)cmdarg = interrupt_allowed; 467 break; 468 469 default: 470 return (ENOTTY); 471 } 472 return (0); 473 } 474 475 /* 476 * Routine that identifies /dev/mem and /dev/kmem. 477 * 478 * A minimal stub routine can always return 0. 479 */ 480 int 481 iskmemdev(dev) 482 dev_t dev; 483 { 484 485 return ((major(dev) == mem_cdevsw.d_maj) 486 && (minor(dev) == 0 || minor(dev) == 1)); 487 } 488 489 int 490 iszerodev(dev) 491 dev_t dev; 492 { 493 return ((major(dev) == mem_cdevsw.d_maj) 494 && minor(dev) == 12); 495 } 496 497 498 499 static mem_devsw_installed = 0; 500 501 static void 502 mem_drvinit(void *unused) 503 { 504 dev_t dev; 505 506 if( ! mem_devsw_installed ) { 507 dev = makedev(CDEV_MAJOR, 0); 508 cdevsw_add(&dev,&mem_cdevsw, NULL); 509 mem_devsw_installed = 1; 510 #ifdef DEVFS 511 memdevfs_init(); 512 #endif 513 } 514 } 515 516 SYSINIT(memdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,mem_drvinit,NULL) 517 518