xref: /freebsd/sys/kern/kern_physio.c (revision cd8537910406e68d4719136a5b0cf6d23bb1b23b)
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Absolutely no warranty of function or purpose is made by the author
17  *    John S. Dyson.
18  * 4. Modifications may be freely made to this file if the above conditions
19  *    are met.
20  */
21 
22 #include <sys/cdefs.h>
23 __FBSDID("$FreeBSD$");
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/bio.h>
28 #include <sys/buf.h>
29 #include <sys/conf.h>
30 #include <sys/malloc.h>
31 #include <sys/proc.h>
32 #include <sys/racct.h>
33 #include <sys/uio.h>
34 #include <geom/geom.h>
35 
36 #include <vm/vm.h>
37 #include <vm/vm_page.h>
38 #include <vm/vm_extern.h>
39 #include <vm/vm_map.h>
40 
41 int
42 physio(struct cdev *dev, struct uio *uio, int ioflag)
43 {
44 	struct cdevsw *csw;
45 	struct buf *pbuf;
46 	struct bio *bp;
47 	struct vm_page **pages;
48 	char *base, *sa;
49 	u_int iolen, poff;
50 	int error, i, npages, maxpages;
51 	vm_prot_t prot;
52 
53 	csw = dev->si_devsw;
54 	npages = 0;
55 	sa = NULL;
56 	/* check if character device is being destroyed */
57 	if (csw == NULL)
58 		return (ENXIO);
59 
60 	/* XXX: sanity check */
61 	if(dev->si_iosize_max < PAGE_SIZE) {
62 		printf("WARNING: %s si_iosize_max=%d, using DFLTPHYS.\n",
63 		    devtoname(dev), dev->si_iosize_max);
64 		dev->si_iosize_max = DFLTPHYS;
65 	}
66 
67 	/*
68 	 * If the driver does not want I/O to be split, that means that we
69 	 * need to reject any requests that will not fit into one buffer.
70 	 */
71 	if (dev->si_flags & SI_NOSPLIT &&
72 	    (uio->uio_resid > dev->si_iosize_max || uio->uio_resid > maxphys ||
73 	    uio->uio_iovcnt > 1)) {
74 		/*
75 		 * Tell the user why his I/O was rejected.
76 		 */
77 		if (uio->uio_resid > dev->si_iosize_max)
78 			uprintf("%s: request size=%zd > si_iosize_max=%d; "
79 			    "cannot split request\n", devtoname(dev),
80 			    uio->uio_resid, dev->si_iosize_max);
81 		if (uio->uio_resid > maxphys)
82 			uprintf("%s: request size=%zd > maxphys=%lu; "
83 			    "cannot split request\n", devtoname(dev),
84 			    uio->uio_resid, maxphys);
85 		if (uio->uio_iovcnt > 1)
86 			uprintf("%s: request vectors=%d > 1; "
87 			    "cannot split request\n", devtoname(dev),
88 			    uio->uio_iovcnt);
89 		return (EFBIG);
90 	}
91 
92 	/*
93 	 * Keep the process UPAGES from being swapped.  Processes swapped
94 	 * out while holding pbufs, used by swapper, may lead to deadlock.
95 	 */
96 	PHOLD(curproc);
97 
98 	bp = g_alloc_bio();
99 	if (uio->uio_segflg != UIO_USERSPACE) {
100 		pbuf = NULL;
101 		pages = NULL;
102 	} else if ((dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed) {
103 		pbuf = NULL;
104 		maxpages = btoc(MIN(uio->uio_resid, maxphys)) + 1;
105 		pages = malloc(sizeof(*pages) * maxpages, M_DEVBUF, M_WAITOK);
106 	} else {
107 		pbuf = uma_zalloc(pbuf_zone, M_WAITOK);
108 		MPASS((pbuf->b_flags & B_MAXPHYS) != 0);
109 		sa = pbuf->b_data;
110 		maxpages = btoc(maxphys);
111 		pages = pbuf->b_pages;
112 	}
113 	prot = VM_PROT_READ;
114 	if (uio->uio_rw == UIO_READ)
115 		prot |= VM_PROT_WRITE;	/* Less backwards than it looks */
116 	error = 0;
117 	for (i = 0; i < uio->uio_iovcnt; i++) {
118 #ifdef RACCT
119 		if (racct_enable) {
120 			PROC_LOCK(curproc);
121 			if (uio->uio_rw == UIO_READ) {
122 				racct_add_force(curproc, RACCT_READBPS,
123 				    uio->uio_iov[i].iov_len);
124 				racct_add_force(curproc, RACCT_READIOPS, 1);
125 			} else {
126 				racct_add_force(curproc, RACCT_WRITEBPS,
127 				    uio->uio_iov[i].iov_len);
128 				racct_add_force(curproc, RACCT_WRITEIOPS, 1);
129 			}
130 			PROC_UNLOCK(curproc);
131 		}
132 #endif /* RACCT */
133 
134 		while (uio->uio_iov[i].iov_len) {
135 			g_reset_bio(bp);
136 			if (uio->uio_rw == UIO_READ) {
137 				bp->bio_cmd = BIO_READ;
138 				curthread->td_ru.ru_inblock++;
139 			} else {
140 				bp->bio_cmd = BIO_WRITE;
141 				curthread->td_ru.ru_oublock++;
142 			}
143 			bp->bio_offset = uio->uio_offset;
144 			base = uio->uio_iov[i].iov_base;
145 			bp->bio_length = uio->uio_iov[i].iov_len;
146 			if (bp->bio_length > dev->si_iosize_max)
147 				bp->bio_length = dev->si_iosize_max;
148 			if (bp->bio_length > maxphys)
149 				bp->bio_length = maxphys;
150 
151 			/*
152 			 * Make sure the pbuf can map the request.
153 			 * The pbuf has kvasize = maxphys, so a request
154 			 * larger than maxphys - PAGE_SIZE must be
155 			 * page aligned or it will be fragmented.
156 			 */
157 			poff = (vm_offset_t)base & PAGE_MASK;
158 			if (pbuf && bp->bio_length + poff > pbuf->b_kvasize) {
159 				if (dev->si_flags & SI_NOSPLIT) {
160 					uprintf("%s: request ptr %p is not "
161 					    "on a page boundary; cannot split "
162 					    "request\n", devtoname(dev),
163 					    base);
164 					error = EFBIG;
165 					goto doerror;
166 				}
167 				bp->bio_length = pbuf->b_kvasize;
168 				if (poff != 0)
169 					bp->bio_length -= PAGE_SIZE;
170 			}
171 
172 			bp->bio_bcount = bp->bio_length;
173 			bp->bio_dev = dev;
174 
175 			if (pages) {
176 				if ((npages = vm_fault_quick_hold_pages(
177 				    &curproc->p_vmspace->vm_map,
178 				    (vm_offset_t)base, bp->bio_length,
179 				    prot, pages, maxpages)) < 0) {
180 					error = EFAULT;
181 					goto doerror;
182 				}
183 				if (pbuf && sa) {
184 					pmap_qenter((vm_offset_t)sa,
185 					    pages, npages);
186 					bp->bio_data = sa + poff;
187 				} else {
188 					bp->bio_ma = pages;
189 					bp->bio_ma_n = npages;
190 					bp->bio_ma_offset = poff;
191 					bp->bio_data = unmapped_buf;
192 					bp->bio_flags |= BIO_UNMAPPED;
193 				}
194 			} else
195 				bp->bio_data = base;
196 
197 			csw->d_strategy(bp);
198 			if (uio->uio_rw == UIO_READ)
199 				biowait(bp, "physrd");
200 			else
201 				biowait(bp, "physwr");
202 
203 			if (pages) {
204 				if (pbuf)
205 					pmap_qremove((vm_offset_t)sa, npages);
206 				vm_page_unhold_pages(pages, npages);
207 			}
208 
209 			iolen = bp->bio_length - bp->bio_resid;
210 			if (iolen == 0 && !(bp->bio_flags & BIO_ERROR))
211 				goto doerror;	/* EOF */
212 			uio->uio_iov[i].iov_len -= iolen;
213 			uio->uio_iov[i].iov_base =
214 			    (char *)uio->uio_iov[i].iov_base + iolen;
215 			uio->uio_resid -= iolen;
216 			uio->uio_offset += iolen;
217 			if (bp->bio_flags & BIO_ERROR) {
218 				error = bp->bio_error;
219 				goto doerror;
220 			}
221 		}
222 	}
223 doerror:
224 	if (pbuf)
225 		uma_zfree(pbuf_zone, pbuf);
226 	else if (pages)
227 		free(pages, M_DEVBUF);
228 	g_destroy_bio(bp);
229 	PRELE(curproc);
230 	return (error);
231 }
232