1 /*- 2 * Copyright (c) 1994 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. Modifications may be freely made to this file if the above conditions 17 * are met. 18 */ 19 20 #include <sys/cdefs.h> 21 #include <sys/param.h> 22 #include <sys/systm.h> 23 #include <sys/bio.h> 24 #include <sys/buf.h> 25 #include <sys/conf.h> 26 #include <sys/malloc.h> 27 #include <sys/proc.h> 28 #include <sys/racct.h> 29 #include <sys/rwlock.h> 30 #include <sys/uio.h> 31 #include <geom/geom.h> 32 33 #include <vm/vm.h> 34 #include <vm/vm_object.h> 35 #include <vm/vm_page.h> 36 #include <vm/vm_pager.h> 37 #include <vm/vm_extern.h> 38 #include <vm/vm_map.h> 39 40 int 41 physio(struct cdev *dev, struct uio *uio, int ioflag) 42 { 43 struct cdevsw *csw; 44 struct buf *pbuf; 45 struct bio *bp; 46 struct vm_page **pages; 47 char *base, *sa; 48 u_int iolen, poff; 49 int error, i, npages, maxpages; 50 vm_prot_t prot; 51 52 csw = dev->si_devsw; 53 npages = 0; 54 sa = NULL; 55 /* check if character device is being destroyed */ 56 if (csw == NULL) 57 return (ENXIO); 58 59 /* XXX: sanity check */ 60 if (dev->si_iosize_max < PAGE_SIZE) { 61 printf("WARNING: %s si_iosize_max=%d, using DFLTPHYS.\n", 62 devtoname(dev), dev->si_iosize_max); 63 dev->si_iosize_max = DFLTPHYS; 64 } 65 66 /* 67 * If the driver does not want I/O to be split, that means that we 68 * need to reject any requests that will not fit into one buffer. 69 */ 70 if (dev->si_flags & SI_NOSPLIT && 71 (uio->uio_resid > dev->si_iosize_max || uio->uio_resid > maxphys || 72 uio->uio_iovcnt > 1)) { 73 /* 74 * Tell the user why his I/O was rejected. 75 */ 76 if (uio->uio_resid > dev->si_iosize_max) 77 uprintf("%s: request size=%zd > si_iosize_max=%d; " 78 "cannot split request\n", devtoname(dev), 79 uio->uio_resid, dev->si_iosize_max); 80 if (uio->uio_resid > maxphys) 81 uprintf("%s: request size=%zd > maxphys=%lu; " 82 "cannot split request\n", devtoname(dev), 83 uio->uio_resid, maxphys); 84 if (uio->uio_iovcnt > 1) 85 uprintf("%s: request vectors=%d > 1; " 86 "cannot split request\n", devtoname(dev), 87 uio->uio_iovcnt); 88 return (EFBIG); 89 } 90 91 /* 92 * Keep the process UPAGES from being swapped. Processes swapped 93 * out while holding pbufs, used by swapper, may lead to deadlock. 94 */ 95 PHOLD(curproc); 96 97 bp = g_alloc_bio(); 98 if (uio->uio_segflg != UIO_USERSPACE) { 99 pbuf = NULL; 100 pages = NULL; 101 } else if ((dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed) { 102 pbuf = NULL; 103 maxpages = btoc(MIN(uio->uio_resid, maxphys)) + 1; 104 pages = malloc(sizeof(*pages) * maxpages, M_DEVBUF, M_WAITOK); 105 } else { 106 pbuf = uma_zalloc(pbuf_zone, M_WAITOK); 107 MPASS((pbuf->b_flags & B_MAXPHYS) != 0); 108 sa = pbuf->b_data; 109 maxpages = PBUF_PAGES; 110 pages = pbuf->b_pages; 111 } 112 prot = VM_PROT_READ; 113 if (uio->uio_rw == UIO_READ) 114 prot |= VM_PROT_WRITE; /* Less backwards than it looks */ 115 error = 0; 116 for (i = 0; i < uio->uio_iovcnt; i++) { 117 #ifdef RACCT 118 if (racct_enable) { 119 PROC_LOCK(curproc); 120 if (uio->uio_rw == UIO_READ) { 121 racct_add_force(curproc, RACCT_READBPS, 122 uio->uio_iov[i].iov_len); 123 racct_add_force(curproc, RACCT_READIOPS, 1); 124 } else { 125 racct_add_force(curproc, RACCT_WRITEBPS, 126 uio->uio_iov[i].iov_len); 127 racct_add_force(curproc, RACCT_WRITEIOPS, 1); 128 } 129 PROC_UNLOCK(curproc); 130 } 131 #endif /* RACCT */ 132 133 while (uio->uio_iov[i].iov_len) { 134 g_reset_bio(bp); 135 if (uio->uio_rw == UIO_READ) { 136 bp->bio_cmd = BIO_READ; 137 curthread->td_ru.ru_inblock++; 138 } else { 139 bp->bio_cmd = BIO_WRITE; 140 curthread->td_ru.ru_oublock++; 141 } 142 bp->bio_offset = uio->uio_offset; 143 base = uio->uio_iov[i].iov_base; 144 bp->bio_length = uio->uio_iov[i].iov_len; 145 if (bp->bio_length > dev->si_iosize_max) 146 bp->bio_length = dev->si_iosize_max; 147 if (bp->bio_length > maxphys) 148 bp->bio_length = maxphys; 149 bp->bio_bcount = bp->bio_length; 150 bp->bio_dev = dev; 151 152 if (pages) { 153 if ((npages = vm_fault_quick_hold_pages( 154 &curproc->p_vmspace->vm_map, 155 (vm_offset_t)base, bp->bio_length, 156 prot, pages, maxpages)) < 0) { 157 error = EFAULT; 158 goto doerror; 159 } 160 poff = (vm_offset_t)base & PAGE_MASK; 161 if (pbuf && sa) { 162 pmap_qenter((vm_offset_t)sa, 163 pages, npages); 164 bp->bio_data = sa + poff; 165 } else { 166 bp->bio_ma = pages; 167 bp->bio_ma_n = npages; 168 bp->bio_ma_offset = poff; 169 bp->bio_data = unmapped_buf; 170 bp->bio_flags |= BIO_UNMAPPED; 171 } 172 } else 173 bp->bio_data = base; 174 175 csw->d_strategy(bp); 176 if (uio->uio_rw == UIO_READ) 177 biowait(bp, "physrd"); 178 else 179 biowait(bp, "physwr"); 180 181 if (pages) { 182 if (pbuf) 183 pmap_qremove((vm_offset_t)sa, npages); 184 vm_page_unhold_pages(pages, npages); 185 } 186 187 iolen = bp->bio_length - bp->bio_resid; 188 if (iolen == 0 && !(bp->bio_flags & BIO_ERROR)) 189 goto doerror; /* EOF */ 190 uio->uio_iov[i].iov_len -= iolen; 191 uio->uio_iov[i].iov_base = 192 (char *)uio->uio_iov[i].iov_base + iolen; 193 uio->uio_resid -= iolen; 194 uio->uio_offset += iolen; 195 if (bp->bio_flags & BIO_ERROR) { 196 error = bp->bio_error; 197 goto doerror; 198 } 199 } 200 } 201 doerror: 202 if (pbuf) 203 uma_zfree(pbuf_zone, pbuf); 204 else if (pages) 205 free(pages, M_DEVBUF); 206 g_destroy_bio(bp); 207 PRELE(curproc); 208 return (error); 209 } 210