1 /*- 2 * Copyright (c) 1994 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. Modifications may be freely made to this file if the above conditions 17 * are met. 18 */ 19 20 #include <sys/cdefs.h> 21 __FBSDID("$FreeBSD$"); 22 23 #include <sys/param.h> 24 #include <sys/systm.h> 25 #include <sys/bio.h> 26 #include <sys/buf.h> 27 #include <sys/conf.h> 28 #include <sys/malloc.h> 29 #include <sys/proc.h> 30 #include <sys/racct.h> 31 #include <sys/rwlock.h> 32 #include <sys/uio.h> 33 #include <geom/geom.h> 34 35 #include <vm/vm.h> 36 #include <vm/vm_object.h> 37 #include <vm/vm_page.h> 38 #include <vm/vm_pager.h> 39 #include <vm/vm_extern.h> 40 #include <vm/vm_map.h> 41 42 int 43 physio(struct cdev *dev, struct uio *uio, int ioflag) 44 { 45 struct cdevsw *csw; 46 struct buf *pbuf; 47 struct bio *bp; 48 struct vm_page **pages; 49 char *base, *sa; 50 u_int iolen, poff; 51 int error, i, npages, maxpages; 52 vm_prot_t prot; 53 54 csw = dev->si_devsw; 55 npages = 0; 56 sa = NULL; 57 /* check if character device is being destroyed */ 58 if (csw == NULL) 59 return (ENXIO); 60 61 /* XXX: sanity check */ 62 if (dev->si_iosize_max < PAGE_SIZE) { 63 printf("WARNING: %s si_iosize_max=%d, using DFLTPHYS.\n", 64 devtoname(dev), dev->si_iosize_max); 65 dev->si_iosize_max = DFLTPHYS; 66 } 67 68 /* 69 * If the driver does not want I/O to be split, that means that we 70 * need to reject any requests that will not fit into one buffer. 71 */ 72 if (dev->si_flags & SI_NOSPLIT && 73 (uio->uio_resid > dev->si_iosize_max || uio->uio_resid > maxphys || 74 uio->uio_iovcnt > 1)) { 75 /* 76 * Tell the user why his I/O was rejected. 77 */ 78 if (uio->uio_resid > dev->si_iosize_max) 79 uprintf("%s: request size=%zd > si_iosize_max=%d; " 80 "cannot split request\n", devtoname(dev), 81 uio->uio_resid, dev->si_iosize_max); 82 if (uio->uio_resid > maxphys) 83 uprintf("%s: request size=%zd > maxphys=%lu; " 84 "cannot split request\n", devtoname(dev), 85 uio->uio_resid, maxphys); 86 if (uio->uio_iovcnt > 1) 87 uprintf("%s: request vectors=%d > 1; " 88 "cannot split request\n", devtoname(dev), 89 uio->uio_iovcnt); 90 return (EFBIG); 91 } 92 93 /* 94 * Keep the process UPAGES from being swapped. Processes swapped 95 * out while holding pbufs, used by swapper, may lead to deadlock. 96 */ 97 PHOLD(curproc); 98 99 bp = g_alloc_bio(); 100 if (uio->uio_segflg != UIO_USERSPACE) { 101 pbuf = NULL; 102 pages = NULL; 103 } else if ((dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed) { 104 pbuf = NULL; 105 maxpages = btoc(MIN(uio->uio_resid, maxphys)) + 1; 106 pages = malloc(sizeof(*pages) * maxpages, M_DEVBUF, M_WAITOK); 107 } else { 108 pbuf = uma_zalloc(pbuf_zone, M_WAITOK); 109 MPASS((pbuf->b_flags & B_MAXPHYS) != 0); 110 sa = pbuf->b_data; 111 maxpages = PBUF_PAGES; 112 pages = pbuf->b_pages; 113 } 114 prot = VM_PROT_READ; 115 if (uio->uio_rw == UIO_READ) 116 prot |= VM_PROT_WRITE; /* Less backwards than it looks */ 117 error = 0; 118 for (i = 0; i < uio->uio_iovcnt; i++) { 119 #ifdef RACCT 120 if (racct_enable) { 121 PROC_LOCK(curproc); 122 if (uio->uio_rw == UIO_READ) { 123 racct_add_force(curproc, RACCT_READBPS, 124 uio->uio_iov[i].iov_len); 125 racct_add_force(curproc, RACCT_READIOPS, 1); 126 } else { 127 racct_add_force(curproc, RACCT_WRITEBPS, 128 uio->uio_iov[i].iov_len); 129 racct_add_force(curproc, RACCT_WRITEIOPS, 1); 130 } 131 PROC_UNLOCK(curproc); 132 } 133 #endif /* RACCT */ 134 135 while (uio->uio_iov[i].iov_len) { 136 g_reset_bio(bp); 137 if (uio->uio_rw == UIO_READ) { 138 bp->bio_cmd = BIO_READ; 139 curthread->td_ru.ru_inblock++; 140 } else { 141 bp->bio_cmd = BIO_WRITE; 142 curthread->td_ru.ru_oublock++; 143 } 144 bp->bio_offset = uio->uio_offset; 145 base = uio->uio_iov[i].iov_base; 146 bp->bio_length = uio->uio_iov[i].iov_len; 147 if (bp->bio_length > dev->si_iosize_max) 148 bp->bio_length = dev->si_iosize_max; 149 if (bp->bio_length > maxphys) 150 bp->bio_length = maxphys; 151 bp->bio_bcount = bp->bio_length; 152 bp->bio_dev = dev; 153 154 if (pages) { 155 if ((npages = vm_fault_quick_hold_pages( 156 &curproc->p_vmspace->vm_map, 157 (vm_offset_t)base, bp->bio_length, 158 prot, pages, maxpages)) < 0) { 159 error = EFAULT; 160 goto doerror; 161 } 162 poff = (vm_offset_t)base & PAGE_MASK; 163 if (pbuf && sa) { 164 pmap_qenter((vm_offset_t)sa, 165 pages, npages); 166 bp->bio_data = sa + poff; 167 } else { 168 bp->bio_ma = pages; 169 bp->bio_ma_n = npages; 170 bp->bio_ma_offset = poff; 171 bp->bio_data = unmapped_buf; 172 bp->bio_flags |= BIO_UNMAPPED; 173 } 174 } else 175 bp->bio_data = base; 176 177 csw->d_strategy(bp); 178 if (uio->uio_rw == UIO_READ) 179 biowait(bp, "physrd"); 180 else 181 biowait(bp, "physwr"); 182 183 if (pages) { 184 if (pbuf) 185 pmap_qremove((vm_offset_t)sa, npages); 186 vm_page_unhold_pages(pages, npages); 187 } 188 189 iolen = bp->bio_length - bp->bio_resid; 190 if (iolen == 0 && !(bp->bio_flags & BIO_ERROR)) 191 goto doerror; /* EOF */ 192 uio->uio_iov[i].iov_len -= iolen; 193 uio->uio_iov[i].iov_base = 194 (char *)uio->uio_iov[i].iov_base + iolen; 195 uio->uio_resid -= iolen; 196 uio->uio_offset += iolen; 197 if (bp->bio_flags & BIO_ERROR) { 198 error = bp->bio_error; 199 goto doerror; 200 } 201 } 202 } 203 doerror: 204 if (pbuf) 205 uma_zfree(pbuf_zone, pbuf); 206 else if (pages) 207 free(pages, M_DEVBUF); 208 g_destroy_bio(bp); 209 PRELE(curproc); 210 return (error); 211 } 212