1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Absolutely no warranty of function or purpose is made by the author 17 * John S. Dyson. 18 * 4. Modifications may be freely made to this file if the above conditions 19 * are met. 20 */ 21 22 #include <sys/cdefs.h> 23 __FBSDID("$FreeBSD$"); 24 25 #include <sys/param.h> 26 #include <sys/systm.h> 27 #include <sys/bio.h> 28 #include <sys/buf.h> 29 #include <sys/conf.h> 30 #include <sys/malloc.h> 31 #include <sys/proc.h> 32 #include <sys/racct.h> 33 #include <sys/rwlock.h> 34 #include <sys/uio.h> 35 #include <geom/geom.h> 36 37 #include <vm/vm.h> 38 #include <vm/vm_object.h> 39 #include <vm/vm_page.h> 40 #include <vm/vm_pager.h> 41 #include <vm/vm_extern.h> 42 #include <vm/vm_map.h> 43 44 int 45 physio(struct cdev *dev, struct uio *uio, int ioflag) 46 { 47 struct cdevsw *csw; 48 struct buf *pbuf; 49 struct bio *bp; 50 struct vm_page **pages; 51 char *base, *sa; 52 u_int iolen, poff; 53 int error, i, npages, maxpages; 54 vm_prot_t prot; 55 56 csw = dev->si_devsw; 57 npages = 0; 58 sa = NULL; 59 /* check if character device is being destroyed */ 60 if (csw == NULL) 61 return (ENXIO); 62 63 /* XXX: sanity check */ 64 if(dev->si_iosize_max < PAGE_SIZE) { 65 printf("WARNING: %s si_iosize_max=%d, using DFLTPHYS.\n", 66 devtoname(dev), dev->si_iosize_max); 67 dev->si_iosize_max = DFLTPHYS; 68 } 69 70 /* 71 * If the driver does not want I/O to be split, that means that we 72 * need to reject any requests that will not fit into one buffer. 73 */ 74 if (dev->si_flags & SI_NOSPLIT && 75 (uio->uio_resid > dev->si_iosize_max || uio->uio_resid > maxphys || 76 uio->uio_iovcnt > 1)) { 77 /* 78 * Tell the user why his I/O was rejected. 79 */ 80 if (uio->uio_resid > dev->si_iosize_max) 81 uprintf("%s: request size=%zd > si_iosize_max=%d; " 82 "cannot split request\n", devtoname(dev), 83 uio->uio_resid, dev->si_iosize_max); 84 if (uio->uio_resid > maxphys) 85 uprintf("%s: request size=%zd > maxphys=%lu; " 86 "cannot split request\n", devtoname(dev), 87 uio->uio_resid, maxphys); 88 if (uio->uio_iovcnt > 1) 89 uprintf("%s: request vectors=%d > 1; " 90 "cannot split request\n", devtoname(dev), 91 uio->uio_iovcnt); 92 return (EFBIG); 93 } 94 95 /* 96 * Keep the process UPAGES from being swapped. Processes swapped 97 * out while holding pbufs, used by swapper, may lead to deadlock. 98 */ 99 PHOLD(curproc); 100 101 bp = g_alloc_bio(); 102 if (uio->uio_segflg != UIO_USERSPACE) { 103 pbuf = NULL; 104 pages = NULL; 105 } else if ((dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed) { 106 pbuf = NULL; 107 maxpages = btoc(MIN(uio->uio_resid, maxphys)) + 1; 108 pages = malloc(sizeof(*pages) * maxpages, M_DEVBUF, M_WAITOK); 109 } else { 110 pbuf = uma_zalloc(pbuf_zone, M_WAITOK); 111 MPASS((pbuf->b_flags & B_MAXPHYS) != 0); 112 sa = pbuf->b_data; 113 maxpages = PBUF_PAGES; 114 pages = pbuf->b_pages; 115 } 116 prot = VM_PROT_READ; 117 if (uio->uio_rw == UIO_READ) 118 prot |= VM_PROT_WRITE; /* Less backwards than it looks */ 119 error = 0; 120 for (i = 0; i < uio->uio_iovcnt; i++) { 121 #ifdef RACCT 122 if (racct_enable) { 123 PROC_LOCK(curproc); 124 if (uio->uio_rw == UIO_READ) { 125 racct_add_force(curproc, RACCT_READBPS, 126 uio->uio_iov[i].iov_len); 127 racct_add_force(curproc, RACCT_READIOPS, 1); 128 } else { 129 racct_add_force(curproc, RACCT_WRITEBPS, 130 uio->uio_iov[i].iov_len); 131 racct_add_force(curproc, RACCT_WRITEIOPS, 1); 132 } 133 PROC_UNLOCK(curproc); 134 } 135 #endif /* RACCT */ 136 137 while (uio->uio_iov[i].iov_len) { 138 g_reset_bio(bp); 139 if (uio->uio_rw == UIO_READ) { 140 bp->bio_cmd = BIO_READ; 141 curthread->td_ru.ru_inblock++; 142 } else { 143 bp->bio_cmd = BIO_WRITE; 144 curthread->td_ru.ru_oublock++; 145 } 146 bp->bio_offset = uio->uio_offset; 147 base = uio->uio_iov[i].iov_base; 148 bp->bio_length = uio->uio_iov[i].iov_len; 149 if (bp->bio_length > dev->si_iosize_max) 150 bp->bio_length = dev->si_iosize_max; 151 if (bp->bio_length > maxphys) 152 bp->bio_length = maxphys; 153 bp->bio_bcount = bp->bio_length; 154 bp->bio_dev = dev; 155 156 if (pages) { 157 if ((npages = vm_fault_quick_hold_pages( 158 &curproc->p_vmspace->vm_map, 159 (vm_offset_t)base, bp->bio_length, 160 prot, pages, maxpages)) < 0) { 161 error = EFAULT; 162 goto doerror; 163 } 164 poff = (vm_offset_t)base & PAGE_MASK; 165 if (pbuf && sa) { 166 pmap_qenter((vm_offset_t)sa, 167 pages, npages); 168 bp->bio_data = sa + poff; 169 } else { 170 bp->bio_ma = pages; 171 bp->bio_ma_n = npages; 172 bp->bio_ma_offset = poff; 173 bp->bio_data = unmapped_buf; 174 bp->bio_flags |= BIO_UNMAPPED; 175 } 176 } else 177 bp->bio_data = base; 178 179 csw->d_strategy(bp); 180 if (uio->uio_rw == UIO_READ) 181 biowait(bp, "physrd"); 182 else 183 biowait(bp, "physwr"); 184 185 if (pages) { 186 if (pbuf) 187 pmap_qremove((vm_offset_t)sa, npages); 188 vm_page_unhold_pages(pages, npages); 189 } 190 191 iolen = bp->bio_length - bp->bio_resid; 192 if (iolen == 0 && !(bp->bio_flags & BIO_ERROR)) 193 goto doerror; /* EOF */ 194 uio->uio_iov[i].iov_len -= iolen; 195 uio->uio_iov[i].iov_base = 196 (char *)uio->uio_iov[i].iov_base + iolen; 197 uio->uio_resid -= iolen; 198 uio->uio_offset += iolen; 199 if (bp->bio_flags & BIO_ERROR) { 200 error = bp->bio_error; 201 goto doerror; 202 } 203 } 204 } 205 doerror: 206 if (pbuf) 207 uma_zfree(pbuf_zone, pbuf); 208 else if (pages) 209 free(pages, M_DEVBUF); 210 g_destroy_bio(bp); 211 PRELE(curproc); 212 return (error); 213 } 214