xref: /freebsd/sys/contrib/openzfs/module/zfs/dmu_direct.c (revision 7ab1a32cd43cbae61ad4dd435d6a482bbf61cb52)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 
23 #include <sys/dmu.h>
24 #include <sys/dmu_impl.h>
25 #include <sys/dbuf.h>
26 #include <sys/dnode.h>
27 #include <sys/zfs_context.h>
28 #include <sys/zfs_racct.h>
29 #include <sys/dsl_dataset.h>
30 #include <sys/dmu_objset.h>
31 
32 static abd_t *
33 make_abd_for_dbuf(dmu_buf_impl_t *db, abd_t *data, uint64_t offset,
34     uint64_t size)
35 {
36 	size_t buf_size = db->db.db_size;
37 	abd_t *pre_buf = NULL, *post_buf = NULL, *mbuf = NULL;
38 	size_t buf_off = 0;
39 
40 	ASSERT(MUTEX_HELD(&db->db_mtx));
41 
42 	if (offset > db->db.db_offset) {
43 		size_t pre_size = offset - db->db.db_offset;
44 		pre_buf = abd_alloc_for_io(pre_size, B_TRUE);
45 		buf_size -= pre_size;
46 		buf_off = 0;
47 	} else {
48 		buf_off = db->db.db_offset - offset;
49 		size -= buf_off;
50 	}
51 
52 	if (size < buf_size) {
53 		size_t post_size = buf_size - size;
54 		post_buf = abd_alloc_for_io(post_size, B_TRUE);
55 		buf_size -= post_size;
56 	}
57 
58 	ASSERT3U(buf_size, >, 0);
59 	abd_t *buf = abd_get_offset_size(data, buf_off, buf_size);
60 
61 	if (pre_buf || post_buf) {
62 		mbuf = abd_alloc_gang();
63 		if (pre_buf)
64 			abd_gang_add(mbuf, pre_buf, B_TRUE);
65 		abd_gang_add(mbuf, buf, B_TRUE);
66 		if (post_buf)
67 			abd_gang_add(mbuf, post_buf, B_TRUE);
68 	} else {
69 		mbuf = buf;
70 	}
71 
72 	return (mbuf);
73 }
74 
75 static void
76 dmu_read_abd_done(zio_t *zio)
77 {
78 	abd_free(zio->io_abd);
79 }
80 
81 static void
82 dmu_write_direct_ready(zio_t *zio)
83 {
84 	dmu_sync_ready(zio, NULL, zio->io_private);
85 }
86 
87 static void
88 dmu_write_direct_done(zio_t *zio)
89 {
90 	dmu_sync_arg_t *dsa = zio->io_private;
91 	dbuf_dirty_record_t *dr = dsa->dsa_dr;
92 	dmu_buf_impl_t *db = dr->dr_dbuf;
93 
94 	abd_free(zio->io_abd);
95 
96 	mutex_enter(&db->db_mtx);
97 	ASSERT3P(db->db_buf, ==, NULL);
98 	ASSERT3P(dr->dt.dl.dr_data, ==, NULL);
99 	ASSERT3P(db->db.db_data, ==, NULL);
100 	db->db_state = DB_UNCACHED;
101 	mutex_exit(&db->db_mtx);
102 
103 	dmu_sync_done(zio, NULL, zio->io_private);
104 
105 	if (zio->io_error != 0) {
106 		if (zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR)
107 			ASSERT3U(zio->io_error, ==, EIO);
108 
109 		/*
110 		 * In the event of an I/O error this block has been freed in
111 		 * zio_done() through zio_dva_unallocate(). Calling
112 		 * dmu_sync_done() above set dr_override_state to
113 		 * DR_NOT_OVERRIDDEN. In this case when dbuf_undirty() calls
114 		 * dbuf_unoverride(), it will skip doing zio_free() to free
115 		 * this block as that was already taken care of.
116 		 *
117 		 * Since we are undirtying the record in open-context, we must
118 		 * have a hold on the db, so it should never be evicted after
119 		 * calling dbuf_undirty().
120 		 */
121 		mutex_enter(&db->db_mtx);
122 		VERIFY3B(dbuf_undirty(db, dsa->dsa_tx), ==, B_FALSE);
123 		mutex_exit(&db->db_mtx);
124 	}
125 
126 	kmem_free(zio->io_bp, sizeof (blkptr_t));
127 	zio->io_bp = NULL;
128 }
129 
130 int
131 dmu_write_direct(zio_t *pio, dmu_buf_impl_t *db, abd_t *data, dmu_tx_t *tx)
132 {
133 	objset_t *os = db->db_objset;
134 	dsl_dataset_t *ds = dmu_objset_ds(os);
135 	zbookmark_phys_t zb;
136 	dbuf_dirty_record_t *dr_head;
137 
138 	SET_BOOKMARK(&zb, ds->ds_object,
139 	    db->db.db_object, db->db_level, db->db_blkid);
140 
141 	DB_DNODE_ENTER(db);
142 	zio_prop_t zp;
143 	dmu_write_policy(os, DB_DNODE(db), db->db_level,
144 	    WP_DMU_SYNC | WP_DIRECT_WR, &zp);
145 	DB_DNODE_EXIT(db);
146 
147 	/*
148 	 * Dirty this dbuf with DB_NOFILL since we will not have any data
149 	 * associated with the dbuf.
150 	 */
151 	dmu_buf_will_clone_or_dio(&db->db, tx);
152 
153 	mutex_enter(&db->db_mtx);
154 
155 	uint64_t txg = dmu_tx_get_txg(tx);
156 	ASSERT3U(txg, >, spa_last_synced_txg(os->os_spa));
157 	ASSERT3U(txg, >, spa_syncing_txg(os->os_spa));
158 
159 	dr_head = list_head(&db->db_dirty_records);
160 	ASSERT3U(dr_head->dr_txg, ==, txg);
161 	dr_head->dt.dl.dr_diowrite = B_TRUE;
162 	dr_head->dr_accounted = db->db.db_size;
163 
164 	blkptr_t *bp = kmem_alloc(sizeof (blkptr_t), KM_SLEEP);
165 	if (db->db_blkptr != NULL) {
166 		/*
167 		 * Fill in bp with the current block pointer so that
168 		 * the nopwrite code can check if we're writing the same
169 		 * data that's already on disk.
170 		 */
171 		*bp = *db->db_blkptr;
172 	} else {
173 		memset(bp, 0, sizeof (blkptr_t));
174 	}
175 
176 	/*
177 	 * Disable nopwrite if the current block pointer could change
178 	 * before this TXG syncs.
179 	 */
180 	if (list_next(&db->db_dirty_records, dr_head) != NULL)
181 		zp.zp_nopwrite = B_FALSE;
182 
183 	ASSERT3S(dr_head->dt.dl.dr_override_state, ==, DR_NOT_OVERRIDDEN);
184 	dr_head->dt.dl.dr_override_state = DR_IN_DMU_SYNC;
185 
186 	mutex_exit(&db->db_mtx);
187 
188 	dmu_objset_willuse_space(os, dr_head->dr_accounted, tx);
189 
190 	dmu_sync_arg_t *dsa = kmem_zalloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
191 	dsa->dsa_dr = dr_head;
192 	dsa->dsa_tx = tx;
193 
194 	zio_t *zio = zio_write(pio, os->os_spa, txg, bp, data,
195 	    db->db.db_size, db->db.db_size, &zp,
196 	    dmu_write_direct_ready, NULL, dmu_write_direct_done, dsa,
197 	    ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb);
198 
199 	if (pio == NULL)
200 		return (zio_wait(zio));
201 
202 	zio_nowait(zio);
203 
204 	return (0);
205 }
206 
207 int
208 dmu_write_abd(dnode_t *dn, uint64_t offset, uint64_t size,
209     abd_t *data, uint32_t flags, dmu_tx_t *tx)
210 {
211 	dmu_buf_t **dbp;
212 	spa_t *spa = dn->dn_objset->os_spa;
213 	int numbufs, err;
214 
215 	ASSERT(flags & DMU_DIRECTIO);
216 
217 	err = dmu_buf_hold_array_by_dnode(dn, offset,
218 	    size, B_FALSE, FTAG, &numbufs, &dbp, flags);
219 	if (err)
220 		return (err);
221 
222 	zio_t *pio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
223 
224 	for (int i = 0; i < numbufs && err == 0; i++) {
225 		dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i];
226 
227 		abd_t *abd = abd_get_offset_size(data,
228 		    db->db.db_offset - offset, dn->dn_datablksz);
229 
230 		zfs_racct_write(spa, db->db.db_size, 1, flags);
231 		err = dmu_write_direct(pio, db, abd, tx);
232 		ASSERT0(err);
233 	}
234 
235 	err = zio_wait(pio);
236 
237 	/*
238 	 * The dbuf must be held until the Direct I/O write has completed in
239 	 * the event there was any errors and dbuf_undirty() was called.
240 	 */
241 	dmu_buf_rele_array(dbp, numbufs, FTAG);
242 
243 	return (err);
244 }
245 
246 int
247 dmu_read_abd(dnode_t *dn, uint64_t offset, uint64_t size,
248     abd_t *data, uint32_t flags)
249 {
250 	objset_t *os = dn->dn_objset;
251 	spa_t *spa = os->os_spa;
252 	dmu_buf_t **dbp;
253 	int numbufs, err;
254 
255 	ASSERT(flags & DMU_DIRECTIO);
256 
257 	err = dmu_buf_hold_array_by_dnode(dn, offset,
258 	    size, B_FALSE, FTAG, &numbufs, &dbp, flags);
259 	if (err)
260 		return (err);
261 
262 	zio_t *rio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
263 
264 	for (int i = 0; i < numbufs; i++) {
265 		dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i];
266 		abd_t *mbuf;
267 		zbookmark_phys_t zb;
268 		blkptr_t *bp;
269 
270 		mutex_enter(&db->db_mtx);
271 
272 		SET_BOOKMARK(&zb, dmu_objset_ds(os)->ds_object,
273 		    db->db.db_object, db->db_level, db->db_blkid);
274 
275 		/*
276 		 * If there is another read for this dbuf, we will wait for
277 		 * that to complete first before checking the db_state below.
278 		 */
279 		while (db->db_state == DB_READ)
280 			cv_wait(&db->db_changed, &db->db_mtx);
281 
282 		err = dmu_buf_get_bp_from_dbuf(db, &bp);
283 		if (err) {
284 			mutex_exit(&db->db_mtx);
285 			goto error;
286 		}
287 
288 		/*
289 		 * There is no need to read if this is a hole or the data is
290 		 * cached. This will not be considered a direct read for IO
291 		 * accounting in the same way that an ARC hit is not counted.
292 		 */
293 		if (bp == NULL || BP_IS_HOLE(bp) || db->db_state == DB_CACHED) {
294 			size_t aoff = offset < db->db.db_offset ?
295 			    db->db.db_offset - offset : 0;
296 			size_t boff = offset > db->db.db_offset ?
297 			    offset - db->db.db_offset : 0;
298 			size_t len = MIN(size - aoff, db->db.db_size - boff);
299 
300 			if (db->db_state == DB_CACHED) {
301 				/*
302 				 * We need to untransformed the ARC buf data
303 				 * before we copy it over.
304 				 */
305 				err = dmu_buf_untransform_direct(db, spa);
306 				ASSERT0(err);
307 				abd_copy_from_buf_off(data,
308 				    (char *)db->db.db_data + boff, aoff, len);
309 			} else {
310 				abd_zero_off(data, aoff, len);
311 			}
312 
313 			mutex_exit(&db->db_mtx);
314 			continue;
315 		}
316 
317 		mbuf = make_abd_for_dbuf(db, data, offset, size);
318 		ASSERT3P(mbuf, !=, NULL);
319 
320 		/*
321 		 * The dbuf mutex (db_mtx) must be held when creating the ZIO
322 		 * for the read. The BP returned from
323 		 * dmu_buf_get_bp_from_dbuf() could be from a pending block
324 		 * clone or a yet to be synced Direct I/O write that is in the
325 		 * dbuf's dirty record. When zio_read() is called, zio_create()
326 		 * will make a copy of the BP. However, if zio_read() is called
327 		 * without the mutex being held then the dirty record from the
328 		 * dbuf could be freed in dbuf_write_done() resulting in garbage
329 		 * being set for the zio BP.
330 		 */
331 		zio_t *cio = zio_read(rio, spa, bp, mbuf, db->db.db_size,
332 		    dmu_read_abd_done, NULL, ZIO_PRIORITY_SYNC_READ,
333 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_DIO_READ, &zb);
334 		mutex_exit(&db->db_mtx);
335 
336 		zfs_racct_read(spa, db->db.db_size, 1, flags);
337 		zio_nowait(cio);
338 	}
339 
340 	dmu_buf_rele_array(dbp, numbufs, FTAG);
341 
342 	return (zio_wait(rio));
343 
344 error:
345 	dmu_buf_rele_array(dbp, numbufs, FTAG);
346 	(void) zio_wait(rio);
347 	return (err);
348 }
349 
350 #ifdef _KERNEL
351 int
352 dmu_read_uio_direct(dnode_t *dn, zfs_uio_t *uio, uint64_t size)
353 {
354 	offset_t offset = zfs_uio_offset(uio);
355 	offset_t page_index = (offset - zfs_uio_soffset(uio)) >> PAGESHIFT;
356 	int err;
357 
358 	ASSERT(uio->uio_extflg & UIO_DIRECT);
359 	ASSERT3U(page_index, <, uio->uio_dio.npages);
360 
361 	abd_t *data = abd_alloc_from_pages(&uio->uio_dio.pages[page_index],
362 	    offset & (PAGESIZE - 1), size);
363 	err = dmu_read_abd(dn, offset, size, data, DMU_DIRECTIO);
364 	abd_free(data);
365 
366 	if (err == 0)
367 		zfs_uioskip(uio, size);
368 
369 	return (err);
370 }
371 
372 int
373 dmu_write_uio_direct(dnode_t *dn, zfs_uio_t *uio, uint64_t size, dmu_tx_t *tx)
374 {
375 	offset_t offset = zfs_uio_offset(uio);
376 	offset_t page_index = (offset - zfs_uio_soffset(uio)) >> PAGESHIFT;
377 	int err;
378 
379 	ASSERT(uio->uio_extflg & UIO_DIRECT);
380 	ASSERT3U(page_index, <, uio->uio_dio.npages);
381 
382 	abd_t *data = abd_alloc_from_pages(&uio->uio_dio.pages[page_index],
383 	    offset & (PAGESIZE - 1), size);
384 	err = dmu_write_abd(dn, offset, size, data, DMU_DIRECTIO, tx);
385 	abd_free(data);
386 
387 	if (err == 0)
388 		zfs_uioskip(uio, size);
389 
390 	return (err);
391 }
392 #endif /* _KERNEL */
393 
394 EXPORT_SYMBOL(dmu_read_uio_direct);
395 EXPORT_SYMBOL(dmu_write_uio_direct);
396