1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1988 University of Utah. 5 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department, and code derived from software contributed to 11 * Berkeley by William Jolitz. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: Utah $Hdr: mem.c 1.13 89/10/08$ 38 */ 39 40 #include <sys/cdefs.h> 41 /* 42 * Memory special file 43 */ 44 45 #include <sys/param.h> 46 #include <sys/conf.h> 47 #include <sys/fcntl.h> 48 #include <sys/ioccom.h> 49 #include <sys/kernel.h> 50 #include <sys/lock.h> 51 #include <sys/malloc.h> 52 #include <sys/memrange.h> 53 #include <sys/module.h> 54 #include <sys/mutex.h> 55 #include <sys/proc.h> 56 #include <sys/signalvar.h> 57 #include <sys/systm.h> 58 #include <sys/uio.h> 59 60 #include <machine/md_var.h> 61 #include <machine/specialreg.h> 62 #include <machine/vmparam.h> 63 64 #include <vm/vm.h> 65 #include <vm/pmap.h> 66 #include <vm/vm_extern.h> 67 68 #include <machine/memdev.h> 69 70 /* 71 * Used in /dev/mem drivers and elsewhere 72 */ 73 MALLOC_DEFINE(M_MEMDESC, "memdesc", "memory range descriptors"); 74 75 /* ARGSUSED */ 76 int 77 memrw(struct cdev *dev, struct uio *uio, int flags) 78 { 79 struct iovec *iov; 80 void *p; 81 ssize_t orig_resid; 82 vm_prot_t prot; 83 u_long v, vd; 84 u_int c; 85 int error; 86 87 error = 0; 88 orig_resid = uio->uio_resid; 89 while (uio->uio_resid > 0 && error == 0) { 90 iov = uio->uio_iov; 91 if (iov->iov_len == 0) { 92 uio->uio_iov++; 93 uio->uio_iovcnt--; 94 if (uio->uio_iovcnt < 0) 95 panic("memrw"); 96 continue; 97 } 98 v = uio->uio_offset; 99 c = ulmin(iov->iov_len, PAGE_SIZE - (u_int)(v & PAGE_MASK)); 100 101 switch (dev2unit(dev)) { 102 case CDEV_MINOR_KMEM: 103 /* 104 * Since c is clamped to be less or equal than 105 * PAGE_SIZE, the uiomove() call does not 106 * access past the end of the direct map. 107 */ 108 if (v >= DMAP_MIN_ADDRESS && 109 v < DMAP_MIN_ADDRESS + dmaplimit) { 110 error = uiomove((void *)v, c, uio); 111 break; 112 } 113 114 switch (uio->uio_rw) { 115 case UIO_READ: 116 prot = VM_PROT_READ; 117 break; 118 case UIO_WRITE: 119 prot = VM_PROT_WRITE; 120 break; 121 } 122 123 if (!kernacc((void *)v, c, prot)) { 124 error = EFAULT; 125 break; 126 } 127 128 /* 129 * If the extracted address is not accessible 130 * through the direct map, then we make a 131 * private (uncached) mapping because we can't 132 * depend on the existing kernel mapping 133 * remaining valid until the completion of 134 * uiomove(). 135 * 136 * XXX We cannot provide access to the 137 * physical page 0 mapped into KVA. 138 */ 139 v = pmap_extract(kernel_pmap, v); 140 if (v == 0) { 141 error = EFAULT; 142 break; 143 } 144 /* FALLTHROUGH */ 145 case CDEV_MINOR_MEM: 146 if (v < dmaplimit) { 147 vd = PHYS_TO_DMAP(v); 148 error = uiomove((void *)vd, c, uio); 149 break; 150 } 151 if (v > cpu_getmaxphyaddr()) { 152 error = EFAULT; 153 break; 154 } 155 p = pmap_mapdev(v, PAGE_SIZE); 156 error = uiomove(p, c, uio); 157 pmap_unmapdev(p, PAGE_SIZE); 158 break; 159 } 160 } 161 /* 162 * Don't return error if any byte was written. Read and write 163 * can return error only if no i/o was performed. 164 */ 165 if (uio->uio_resid != orig_resid) 166 error = 0; 167 return (error); 168 } 169 170 /* 171 * allow user processes to MMAP some memory sections 172 * instead of going through read/write 173 */ 174 /* ARGSUSED */ 175 int 176 memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, 177 int prot __unused, vm_memattr_t *memattr __unused) 178 { 179 if (dev2unit(dev) == CDEV_MINOR_MEM) { 180 if (offset > cpu_getmaxphyaddr()) 181 return (-1); 182 *paddr = offset; 183 return (0); 184 } 185 return (-1); 186 } 187 188 /* 189 * Operations for changing memory attributes. 190 * 191 * This is basically just an ioctl shim for mem_range_attr_get 192 * and mem_range_attr_set. 193 */ 194 int 195 memioctl_md(struct cdev *dev __unused, u_long cmd, caddr_t data, int flags, 196 struct thread *td) 197 { 198 int nd, error = 0; 199 struct mem_range_op *mo = (struct mem_range_op *)data; 200 struct mem_range_desc *md; 201 202 /* is this for us? */ 203 if ((cmd != MEMRANGE_GET) && 204 (cmd != MEMRANGE_SET)) 205 return (ENOTTY); 206 207 /* any chance we can handle this? */ 208 if (mem_range_softc.mr_op == NULL) 209 return (EOPNOTSUPP); 210 211 /* do we have any descriptors? */ 212 if (mem_range_softc.mr_ndesc == 0) 213 return (ENXIO); 214 215 switch (cmd) { 216 case MEMRANGE_GET: 217 nd = imin(mo->mo_arg[0], mem_range_softc.mr_ndesc); 218 if (nd > 0) { 219 md = (struct mem_range_desc *) 220 malloc(nd * sizeof(struct mem_range_desc), 221 M_MEMDESC, M_WAITOK); 222 error = mem_range_attr_get(md, &nd); 223 if (!error) 224 error = copyout(md, mo->mo_desc, 225 nd * sizeof(struct mem_range_desc)); 226 free(md, M_MEMDESC); 227 } 228 else 229 nd = mem_range_softc.mr_ndesc; 230 mo->mo_arg[0] = nd; 231 break; 232 233 case MEMRANGE_SET: 234 md = (struct mem_range_desc *)malloc(sizeof(struct mem_range_desc), 235 M_MEMDESC, M_WAITOK); 236 error = copyin(mo->mo_desc, md, sizeof(struct mem_range_desc)); 237 /* clamp description string */ 238 md->mr_owner[sizeof(md->mr_owner) - 1] = 0; 239 if (error == 0) 240 error = mem_range_attr_set(md, &mo->mo_arg[0]); 241 free(md, M_MEMDESC); 242 break; 243 } 244 return (error); 245 } 246