1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
25 */
26
27 #include <sys/dmu.h>
28 #include <sys/dmu_impl.h>
29 #include <sys/dbuf.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dmu_objset.h>
32 #include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */
33 #include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */
34 #include <sys/dsl_pool.h>
35 #include <sys/zap_impl.h> /* for fzap_default_block_shift */
36 #include <sys/spa.h>
37 #include <sys/sa.h>
38 #include <sys/sa_impl.h>
39 #include <sys/zfs_context.h>
40 #include <sys/varargs.h>
41
42 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
43 uint64_t arg1, uint64_t arg2);
44
45
46 dmu_tx_t *
dmu_tx_create_dd(dsl_dir_t * dd)47 dmu_tx_create_dd(dsl_dir_t *dd)
48 {
49 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
50 tx->tx_dir = dd;
51 if (dd != NULL)
52 tx->tx_pool = dd->dd_pool;
53 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
54 offsetof(dmu_tx_hold_t, txh_node));
55 list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
56 offsetof(dmu_tx_callback_t, dcb_node));
57 tx->tx_start = gethrtime();
58 #ifdef ZFS_DEBUG
59 refcount_create(&tx->tx_space_written);
60 refcount_create(&tx->tx_space_freed);
61 #endif
62 return (tx);
63 }
64
65 dmu_tx_t *
dmu_tx_create(objset_t * os)66 dmu_tx_create(objset_t *os)
67 {
68 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
69 tx->tx_objset = os;
70 tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset);
71 return (tx);
72 }
73
74 dmu_tx_t *
dmu_tx_create_assigned(struct dsl_pool * dp,uint64_t txg)75 dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
76 {
77 dmu_tx_t *tx = dmu_tx_create_dd(NULL);
78
79 ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
80 tx->tx_pool = dp;
81 tx->tx_txg = txg;
82 tx->tx_anyobj = TRUE;
83
84 return (tx);
85 }
86
87 int
dmu_tx_is_syncing(dmu_tx_t * tx)88 dmu_tx_is_syncing(dmu_tx_t *tx)
89 {
90 return (tx->tx_anyobj);
91 }
92
93 int
dmu_tx_private_ok(dmu_tx_t * tx)94 dmu_tx_private_ok(dmu_tx_t *tx)
95 {
96 return (tx->tx_anyobj);
97 }
98
99 static dmu_tx_hold_t *
dmu_tx_hold_object_impl(dmu_tx_t * tx,objset_t * os,uint64_t object,enum dmu_tx_hold_type type,uint64_t arg1,uint64_t arg2)100 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
101 enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
102 {
103 dmu_tx_hold_t *txh;
104 dnode_t *dn = NULL;
105 int err;
106
107 if (object != DMU_NEW_OBJECT) {
108 err = dnode_hold(os, object, tx, &dn);
109 if (err) {
110 tx->tx_err = err;
111 return (NULL);
112 }
113
114 if (err == 0 && tx->tx_txg != 0) {
115 mutex_enter(&dn->dn_mtx);
116 /*
117 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
118 * problem, but there's no way for it to happen (for
119 * now, at least).
120 */
121 ASSERT(dn->dn_assigned_txg == 0);
122 dn->dn_assigned_txg = tx->tx_txg;
123 (void) refcount_add(&dn->dn_tx_holds, tx);
124 mutex_exit(&dn->dn_mtx);
125 }
126 }
127
128 txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
129 txh->txh_tx = tx;
130 txh->txh_dnode = dn;
131 #ifdef ZFS_DEBUG
132 txh->txh_type = type;
133 txh->txh_arg1 = arg1;
134 txh->txh_arg2 = arg2;
135 #endif
136 list_insert_tail(&tx->tx_holds, txh);
137
138 return (txh);
139 }
140
141 void
dmu_tx_add_new_object(dmu_tx_t * tx,objset_t * os,uint64_t object)142 dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object)
143 {
144 /*
145 * If we're syncing, they can manipulate any object anyhow, and
146 * the hold on the dnode_t can cause problems.
147 */
148 if (!dmu_tx_is_syncing(tx)) {
149 (void) dmu_tx_hold_object_impl(tx, os,
150 object, THT_NEWOBJECT, 0, 0);
151 }
152 }
153
154 static int
dmu_tx_check_ioerr(zio_t * zio,dnode_t * dn,int level,uint64_t blkid)155 dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
156 {
157 int err;
158 dmu_buf_impl_t *db;
159
160 rw_enter(&dn->dn_struct_rwlock, RW_READER);
161 db = dbuf_hold_level(dn, level, blkid, FTAG);
162 rw_exit(&dn->dn_struct_rwlock);
163 if (db == NULL)
164 return (SET_ERROR(EIO));
165 err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH);
166 dbuf_rele(db, FTAG);
167 return (err);
168 }
169
170 static void
dmu_tx_count_twig(dmu_tx_hold_t * txh,dnode_t * dn,dmu_buf_impl_t * db,int level,uint64_t blkid,boolean_t freeable,uint64_t * history)171 dmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db,
172 int level, uint64_t blkid, boolean_t freeable, uint64_t *history)
173 {
174 objset_t *os = dn->dn_objset;
175 dsl_dataset_t *ds = os->os_dsl_dataset;
176 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
177 dmu_buf_impl_t *parent = NULL;
178 blkptr_t *bp = NULL;
179 uint64_t space;
180
181 if (level >= dn->dn_nlevels || history[level] == blkid)
182 return;
183
184 history[level] = blkid;
185
186 space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift);
187
188 if (db == NULL || db == dn->dn_dbuf) {
189 ASSERT(level != 0);
190 db = NULL;
191 } else {
192 ASSERT(DB_DNODE(db) == dn);
193 ASSERT(db->db_level == level);
194 ASSERT(db->db.db_size == space);
195 ASSERT(db->db_blkid == blkid);
196 bp = db->db_blkptr;
197 parent = db->db_parent;
198 }
199
200 freeable = (bp && (freeable ||
201 dsl_dataset_block_freeable(ds, bp, bp->blk_birth)));
202
203 if (freeable)
204 txh->txh_space_tooverwrite += space;
205 else
206 txh->txh_space_towrite += space;
207 if (bp)
208 txh->txh_space_tounref += bp_get_dsize(os->os_spa, bp);
209
210 dmu_tx_count_twig(txh, dn, parent, level + 1,
211 blkid >> epbs, freeable, history);
212 }
213
214 /* ARGSUSED */
215 static void
dmu_tx_count_write(dmu_tx_hold_t * txh,uint64_t off,uint64_t len)216 dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
217 {
218 dnode_t *dn = txh->txh_dnode;
219 uint64_t start, end, i;
220 int min_bs, max_bs, min_ibs, max_ibs, epbs, bits;
221 int err = 0;
222
223 if (len == 0)
224 return;
225
226 min_bs = SPA_MINBLOCKSHIFT;
227 max_bs = highbit64(txh->txh_tx->tx_objset->os_recordsize) - 1;
228 min_ibs = DN_MIN_INDBLKSHIFT;
229 max_ibs = DN_MAX_INDBLKSHIFT;
230
231 if (dn) {
232 uint64_t history[DN_MAX_LEVELS];
233 int nlvls = dn->dn_nlevels;
234 int delta;
235
236 /*
237 * For i/o error checking, read the first and last level-0
238 * blocks (if they are not aligned), and all the level-1 blocks.
239 */
240 if (dn->dn_maxblkid == 0) {
241 delta = dn->dn_datablksz;
242 start = (off < dn->dn_datablksz) ? 0 : 1;
243 end = (off+len <= dn->dn_datablksz) ? 0 : 1;
244 if (start == 0 && (off > 0 || len < dn->dn_datablksz)) {
245 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
246 if (err)
247 goto out;
248 delta -= off;
249 }
250 } else {
251 zio_t *zio = zio_root(dn->dn_objset->os_spa,
252 NULL, NULL, ZIO_FLAG_CANFAIL);
253
254 /* first level-0 block */
255 start = off >> dn->dn_datablkshift;
256 if (P2PHASE(off, dn->dn_datablksz) ||
257 len < dn->dn_datablksz) {
258 err = dmu_tx_check_ioerr(zio, dn, 0, start);
259 if (err)
260 goto out;
261 }
262
263 /* last level-0 block */
264 end = (off+len-1) >> dn->dn_datablkshift;
265 if (end != start && end <= dn->dn_maxblkid &&
266 P2PHASE(off+len, dn->dn_datablksz)) {
267 err = dmu_tx_check_ioerr(zio, dn, 0, end);
268 if (err)
269 goto out;
270 }
271
272 /* level-1 blocks */
273 if (nlvls > 1) {
274 int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
275 for (i = (start>>shft)+1; i < end>>shft; i++) {
276 err = dmu_tx_check_ioerr(zio, dn, 1, i);
277 if (err)
278 goto out;
279 }
280 }
281
282 err = zio_wait(zio);
283 if (err)
284 goto out;
285 delta = P2NPHASE(off, dn->dn_datablksz);
286 }
287
288 min_ibs = max_ibs = dn->dn_indblkshift;
289 if (dn->dn_maxblkid > 0) {
290 /*
291 * The blocksize can't change,
292 * so we can make a more precise estimate.
293 */
294 ASSERT(dn->dn_datablkshift != 0);
295 min_bs = max_bs = dn->dn_datablkshift;
296 } else {
297 /*
298 * The blocksize can increase up to the recordsize,
299 * or if it is already more than the recordsize,
300 * up to the next power of 2.
301 */
302 min_bs = highbit64(dn->dn_datablksz - 1);
303 max_bs = MAX(max_bs, highbit64(dn->dn_datablksz - 1));
304 }
305
306 /*
307 * If this write is not off the end of the file
308 * we need to account for overwrites/unref.
309 */
310 if (start <= dn->dn_maxblkid) {
311 for (int l = 0; l < DN_MAX_LEVELS; l++)
312 history[l] = -1ULL;
313 }
314 while (start <= dn->dn_maxblkid) {
315 dmu_buf_impl_t *db;
316
317 rw_enter(&dn->dn_struct_rwlock, RW_READER);
318 err = dbuf_hold_impl(dn, 0, start,
319 FALSE, FALSE, FTAG, &db);
320 rw_exit(&dn->dn_struct_rwlock);
321
322 if (err) {
323 txh->txh_tx->tx_err = err;
324 return;
325 }
326
327 dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE,
328 history);
329 dbuf_rele(db, FTAG);
330 if (++start > end) {
331 /*
332 * Account for new indirects appearing
333 * before this IO gets assigned into a txg.
334 */
335 bits = 64 - min_bs;
336 epbs = min_ibs - SPA_BLKPTRSHIFT;
337 for (bits -= epbs * (nlvls - 1);
338 bits >= 0; bits -= epbs)
339 txh->txh_fudge += 1ULL << max_ibs;
340 goto out;
341 }
342 off += delta;
343 if (len >= delta)
344 len -= delta;
345 delta = dn->dn_datablksz;
346 }
347 }
348
349 /*
350 * 'end' is the last thing we will access, not one past.
351 * This way we won't overflow when accessing the last byte.
352 */
353 start = P2ALIGN(off, 1ULL << max_bs);
354 end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1;
355 txh->txh_space_towrite += end - start + 1;
356
357 start >>= min_bs;
358 end >>= min_bs;
359
360 epbs = min_ibs - SPA_BLKPTRSHIFT;
361
362 /*
363 * The object contains at most 2^(64 - min_bs) blocks,
364 * and each indirect level maps 2^epbs.
365 */
366 for (bits = 64 - min_bs; bits >= 0; bits -= epbs) {
367 start >>= epbs;
368 end >>= epbs;
369 ASSERT3U(end, >=, start);
370 txh->txh_space_towrite += (end - start + 1) << max_ibs;
371 if (start != 0) {
372 /*
373 * We also need a new blkid=0 indirect block
374 * to reference any existing file data.
375 */
376 txh->txh_space_towrite += 1ULL << max_ibs;
377 }
378 }
379
380 out:
381 if (txh->txh_space_towrite + txh->txh_space_tooverwrite >
382 2 * DMU_MAX_ACCESS)
383 err = SET_ERROR(EFBIG);
384
385 if (err)
386 txh->txh_tx->tx_err = err;
387 }
388
389 static void
dmu_tx_count_dnode(dmu_tx_hold_t * txh)390 dmu_tx_count_dnode(dmu_tx_hold_t *txh)
391 {
392 dnode_t *dn = txh->txh_dnode;
393 dnode_t *mdn = DMU_META_DNODE(txh->txh_tx->tx_objset);
394 uint64_t space = mdn->dn_datablksz +
395 ((mdn->dn_nlevels-1) << mdn->dn_indblkshift);
396
397 if (dn && dn->dn_dbuf->db_blkptr &&
398 dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
399 dn->dn_dbuf->db_blkptr, dn->dn_dbuf->db_blkptr->blk_birth)) {
400 txh->txh_space_tooverwrite += space;
401 txh->txh_space_tounref += space;
402 } else {
403 txh->txh_space_towrite += space;
404 if (dn && dn->dn_dbuf->db_blkptr)
405 txh->txh_space_tounref += space;
406 }
407 }
408
409 void
dmu_tx_hold_write(dmu_tx_t * tx,uint64_t object,uint64_t off,int len)410 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
411 {
412 dmu_tx_hold_t *txh;
413
414 ASSERT(tx->tx_txg == 0);
415 ASSERT(len < DMU_MAX_ACCESS);
416 ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
417
418 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
419 object, THT_WRITE, off, len);
420 if (txh == NULL)
421 return;
422
423 dmu_tx_count_write(txh, off, len);
424 dmu_tx_count_dnode(txh);
425 }
426
427 static void
dmu_tx_count_free(dmu_tx_hold_t * txh,uint64_t off,uint64_t len)428 dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
429 {
430 uint64_t blkid, nblks, lastblk;
431 uint64_t space = 0, unref = 0, skipped = 0;
432 dnode_t *dn = txh->txh_dnode;
433 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
434 spa_t *spa = txh->txh_tx->tx_pool->dp_spa;
435 int epbs;
436 uint64_t l0span = 0, nl1blks = 0;
437
438 if (dn->dn_nlevels == 0)
439 return;
440
441 /*
442 * The struct_rwlock protects us against dn_nlevels
443 * changing, in case (against all odds) we manage to dirty &
444 * sync out the changes after we check for being dirty.
445 * Also, dbuf_hold_impl() wants us to have the struct_rwlock.
446 */
447 rw_enter(&dn->dn_struct_rwlock, RW_READER);
448 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
449 if (dn->dn_maxblkid == 0) {
450 if (off == 0 && len >= dn->dn_datablksz) {
451 blkid = 0;
452 nblks = 1;
453 } else {
454 rw_exit(&dn->dn_struct_rwlock);
455 return;
456 }
457 } else {
458 blkid = off >> dn->dn_datablkshift;
459 nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift;
460
461 if (blkid > dn->dn_maxblkid) {
462 rw_exit(&dn->dn_struct_rwlock);
463 return;
464 }
465 if (blkid + nblks > dn->dn_maxblkid)
466 nblks = dn->dn_maxblkid - blkid + 1;
467
468 }
469 l0span = nblks; /* save for later use to calc level > 1 overhead */
470 if (dn->dn_nlevels == 1) {
471 int i;
472 for (i = 0; i < nblks; i++) {
473 blkptr_t *bp = dn->dn_phys->dn_blkptr;
474 ASSERT3U(blkid + i, <, dn->dn_nblkptr);
475 bp += blkid + i;
476 if (dsl_dataset_block_freeable(ds, bp, bp->blk_birth)) {
477 dprintf_bp(bp, "can free old%s", "");
478 space += bp_get_dsize(spa, bp);
479 }
480 unref += BP_GET_ASIZE(bp);
481 }
482 nl1blks = 1;
483 nblks = 0;
484 }
485
486 lastblk = blkid + nblks - 1;
487 while (nblks) {
488 dmu_buf_impl_t *dbuf;
489 uint64_t ibyte, new_blkid;
490 int epb = 1 << epbs;
491 int err, i, blkoff, tochk;
492 blkptr_t *bp;
493
494 ibyte = blkid << dn->dn_datablkshift;
495 err = dnode_next_offset(dn,
496 DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0);
497 new_blkid = ibyte >> dn->dn_datablkshift;
498 if (err == ESRCH) {
499 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
500 break;
501 }
502 if (err) {
503 txh->txh_tx->tx_err = err;
504 break;
505 }
506 if (new_blkid > lastblk) {
507 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
508 break;
509 }
510
511 if (new_blkid > blkid) {
512 ASSERT((new_blkid >> epbs) > (blkid >> epbs));
513 skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1;
514 nblks -= new_blkid - blkid;
515 blkid = new_blkid;
516 }
517 blkoff = P2PHASE(blkid, epb);
518 tochk = MIN(epb - blkoff, nblks);
519
520 err = dbuf_hold_impl(dn, 1, blkid >> epbs,
521 FALSE, FALSE, FTAG, &dbuf);
522 if (err) {
523 txh->txh_tx->tx_err = err;
524 break;
525 }
526
527 txh->txh_memory_tohold += dbuf->db.db_size;
528
529 /*
530 * We don't check memory_tohold against DMU_MAX_ACCESS because
531 * memory_tohold is an over-estimation (especially the >L1
532 * indirect blocks), so it could fail. Callers should have
533 * already verified that they will not be holding too much
534 * memory.
535 */
536
537 err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL);
538 if (err != 0) {
539 txh->txh_tx->tx_err = err;
540 dbuf_rele(dbuf, FTAG);
541 break;
542 }
543
544 bp = dbuf->db.db_data;
545 bp += blkoff;
546
547 for (i = 0; i < tochk; i++) {
548 if (dsl_dataset_block_freeable(ds, &bp[i],
549 bp[i].blk_birth)) {
550 dprintf_bp(&bp[i], "can free old%s", "");
551 space += bp_get_dsize(spa, &bp[i]);
552 }
553 unref += BP_GET_ASIZE(bp);
554 }
555 dbuf_rele(dbuf, FTAG);
556
557 ++nl1blks;
558 blkid += tochk;
559 nblks -= tochk;
560 }
561 rw_exit(&dn->dn_struct_rwlock);
562
563 /*
564 * Add in memory requirements of higher-level indirects.
565 * This assumes a worst-possible scenario for dn_nlevels and a
566 * worst-possible distribution of l1-blocks over the region to free.
567 */
568 {
569 uint64_t blkcnt = 1 + ((l0span >> epbs) >> epbs);
570 int level = 2;
571 /*
572 * Here we don't use DN_MAX_LEVEL, but calculate it with the
573 * given datablkshift and indblkshift. This makes the
574 * difference between 19 and 8 on large files.
575 */
576 int maxlevel = 2 + (DN_MAX_OFFSET_SHIFT - dn->dn_datablkshift) /
577 (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
578
579 while (level++ < maxlevel) {
580 txh->txh_memory_tohold += MAX(MIN(blkcnt, nl1blks), 1)
581 << dn->dn_indblkshift;
582 blkcnt = 1 + (blkcnt >> epbs);
583 }
584 }
585
586 /* account for new level 1 indirect blocks that might show up */
587 if (skipped > 0) {
588 txh->txh_fudge += skipped << dn->dn_indblkshift;
589 skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs);
590 txh->txh_memory_tohold += skipped << dn->dn_indblkshift;
591 }
592 txh->txh_space_tofree += space;
593 txh->txh_space_tounref += unref;
594 }
595
596 /*
597 * This function marks the transaction as being a "net free". The end
598 * result is that refquotas will be disabled for this transaction, and
599 * this transaction will be able to use half of the pool space overhead
600 * (see dsl_pool_adjustedsize()). Therefore this function should only
601 * be called for transactions that we expect will not cause a net increase
602 * in the amount of space used (but it's OK if that is occasionally not true).
603 */
604 void
dmu_tx_mark_netfree(dmu_tx_t * tx)605 dmu_tx_mark_netfree(dmu_tx_t *tx)
606 {
607 dmu_tx_hold_t *txh;
608
609 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
610 DMU_NEW_OBJECT, THT_FREE, 0, 0);
611
612 /*
613 * Pretend that this operation will free 1GB of space. This
614 * should be large enough to cancel out the largest write.
615 * We don't want to use something like UINT64_MAX, because that would
616 * cause overflows when doing math with these values (e.g. in
617 * dmu_tx_try_assign()).
618 */
619 txh->txh_space_tofree = txh->txh_space_tounref = 1024 * 1024 * 1024;
620 }
621
622 void
dmu_tx_hold_free(dmu_tx_t * tx,uint64_t object,uint64_t off,uint64_t len)623 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
624 {
625 dmu_tx_hold_t *txh;
626 dnode_t *dn;
627 int err;
628 zio_t *zio;
629
630 ASSERT(tx->tx_txg == 0);
631
632 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
633 object, THT_FREE, off, len);
634 if (txh == NULL)
635 return;
636 dn = txh->txh_dnode;
637 dmu_tx_count_dnode(txh);
638
639 if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz)
640 return;
641 if (len == DMU_OBJECT_END)
642 len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off;
643
644 /*
645 * For i/o error checking, we read the first and last level-0
646 * blocks if they are not aligned, and all the level-1 blocks.
647 *
648 * Note: dbuf_free_range() assumes that we have not instantiated
649 * any level-0 dbufs that will be completely freed. Therefore we must
650 * exercise care to not read or count the first and last blocks
651 * if they are blocksize-aligned.
652 */
653 if (dn->dn_datablkshift == 0) {
654 if (off != 0 || len < dn->dn_datablksz)
655 dmu_tx_count_write(txh, 0, dn->dn_datablksz);
656 } else {
657 /* first block will be modified if it is not aligned */
658 if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift))
659 dmu_tx_count_write(txh, off, 1);
660 /* last block will be modified if it is not aligned */
661 if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift))
662 dmu_tx_count_write(txh, off+len, 1);
663 }
664
665 /*
666 * Check level-1 blocks.
667 */
668 if (dn->dn_nlevels > 1) {
669 int shift = dn->dn_datablkshift + dn->dn_indblkshift -
670 SPA_BLKPTRSHIFT;
671 uint64_t start = off >> shift;
672 uint64_t end = (off + len) >> shift;
673
674 ASSERT(dn->dn_indblkshift != 0);
675
676 /*
677 * dnode_reallocate() can result in an object with indirect
678 * blocks having an odd data block size. In this case,
679 * just check the single block.
680 */
681 if (dn->dn_datablkshift == 0)
682 start = end = 0;
683
684 zio = zio_root(tx->tx_pool->dp_spa,
685 NULL, NULL, ZIO_FLAG_CANFAIL);
686 for (uint64_t i = start; i <= end; i++) {
687 uint64_t ibyte = i << shift;
688 err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
689 i = ibyte >> shift;
690 if (err == ESRCH || i > end)
691 break;
692 if (err) {
693 tx->tx_err = err;
694 return;
695 }
696
697 err = dmu_tx_check_ioerr(zio, dn, 1, i);
698 if (err) {
699 tx->tx_err = err;
700 return;
701 }
702 }
703 err = zio_wait(zio);
704 if (err) {
705 tx->tx_err = err;
706 return;
707 }
708 }
709
710 dmu_tx_count_free(txh, off, len);
711 }
712
713 void
dmu_tx_hold_zap(dmu_tx_t * tx,uint64_t object,int add,const char * name)714 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
715 {
716 dmu_tx_hold_t *txh;
717 dnode_t *dn;
718 dsl_dataset_phys_t *ds_phys;
719 uint64_t nblocks;
720 int epbs, err;
721
722 ASSERT(tx->tx_txg == 0);
723
724 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
725 object, THT_ZAP, add, (uintptr_t)name);
726 if (txh == NULL)
727 return;
728 dn = txh->txh_dnode;
729
730 dmu_tx_count_dnode(txh);
731
732 if (dn == NULL) {
733 /*
734 * We will be able to fit a new object's entries into one leaf
735 * block. So there will be at most 2 blocks total,
736 * including the header block.
737 */
738 dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift);
739 return;
740 }
741
742 ASSERT3P(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP);
743
744 if (dn->dn_maxblkid == 0 && !add) {
745 blkptr_t *bp;
746
747 /*
748 * If there is only one block (i.e. this is a micro-zap)
749 * and we are not adding anything, the accounting is simple.
750 */
751 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
752 if (err) {
753 tx->tx_err = err;
754 return;
755 }
756
757 /*
758 * Use max block size here, since we don't know how much
759 * the size will change between now and the dbuf dirty call.
760 */
761 bp = &dn->dn_phys->dn_blkptr[0];
762 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
763 bp, bp->blk_birth))
764 txh->txh_space_tooverwrite += MZAP_MAX_BLKSZ;
765 else
766 txh->txh_space_towrite += MZAP_MAX_BLKSZ;
767 if (!BP_IS_HOLE(bp))
768 txh->txh_space_tounref += MZAP_MAX_BLKSZ;
769 return;
770 }
771
772 if (dn->dn_maxblkid > 0 && name) {
773 /*
774 * access the name in this fat-zap so that we'll check
775 * for i/o errors to the leaf blocks, etc.
776 */
777 err = zap_lookup(dn->dn_objset, dn->dn_object, name,
778 8, 0, NULL);
779 if (err == EIO) {
780 tx->tx_err = err;
781 return;
782 }
783 }
784
785 err = zap_count_write(dn->dn_objset, dn->dn_object, name, add,
786 &txh->txh_space_towrite, &txh->txh_space_tooverwrite);
787
788 /*
789 * If the modified blocks are scattered to the four winds,
790 * we'll have to modify an indirect twig for each.
791 */
792 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
793 ds_phys = dsl_dataset_phys(dn->dn_objset->os_dsl_dataset);
794 for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs)
795 if (ds_phys->ds_prev_snap_obj)
796 txh->txh_space_towrite += 3 << dn->dn_indblkshift;
797 else
798 txh->txh_space_tooverwrite += 3 << dn->dn_indblkshift;
799 }
800
801 void
dmu_tx_hold_bonus(dmu_tx_t * tx,uint64_t object)802 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
803 {
804 dmu_tx_hold_t *txh;
805
806 ASSERT(tx->tx_txg == 0);
807
808 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
809 object, THT_BONUS, 0, 0);
810 if (txh)
811 dmu_tx_count_dnode(txh);
812 }
813
814 void
dmu_tx_hold_space(dmu_tx_t * tx,uint64_t space)815 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
816 {
817 dmu_tx_hold_t *txh;
818 ASSERT(tx->tx_txg == 0);
819
820 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
821 DMU_NEW_OBJECT, THT_SPACE, space, 0);
822
823 txh->txh_space_towrite += space;
824 }
825
826 int
dmu_tx_holds(dmu_tx_t * tx,uint64_t object)827 dmu_tx_holds(dmu_tx_t *tx, uint64_t object)
828 {
829 dmu_tx_hold_t *txh;
830 int holds = 0;
831
832 /*
833 * By asserting that the tx is assigned, we're counting the
834 * number of dn_tx_holds, which is the same as the number of
835 * dn_holds. Otherwise, we'd be counting dn_holds, but
836 * dn_tx_holds could be 0.
837 */
838 ASSERT(tx->tx_txg != 0);
839
840 /* if (tx->tx_anyobj == TRUE) */
841 /* return (0); */
842
843 for (txh = list_head(&tx->tx_holds); txh;
844 txh = list_next(&tx->tx_holds, txh)) {
845 if (txh->txh_dnode && txh->txh_dnode->dn_object == object)
846 holds++;
847 }
848
849 return (holds);
850 }
851
852 #ifdef ZFS_DEBUG
853 void
dmu_tx_dirty_buf(dmu_tx_t * tx,dmu_buf_impl_t * db)854 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
855 {
856 dmu_tx_hold_t *txh;
857 int match_object = FALSE, match_offset = FALSE;
858 dnode_t *dn;
859
860 DB_DNODE_ENTER(db);
861 dn = DB_DNODE(db);
862 ASSERT(tx->tx_txg != 0);
863 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
864 ASSERT3U(dn->dn_object, ==, db->db.db_object);
865
866 if (tx->tx_anyobj) {
867 DB_DNODE_EXIT(db);
868 return;
869 }
870
871 /* XXX No checking on the meta dnode for now */
872 if (db->db.db_object == DMU_META_DNODE_OBJECT) {
873 DB_DNODE_EXIT(db);
874 return;
875 }
876
877 for (txh = list_head(&tx->tx_holds); txh;
878 txh = list_next(&tx->tx_holds, txh)) {
879 ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg);
880 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
881 match_object = TRUE;
882 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
883 int datablkshift = dn->dn_datablkshift ?
884 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
885 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
886 int shift = datablkshift + epbs * db->db_level;
887 uint64_t beginblk = shift >= 64 ? 0 :
888 (txh->txh_arg1 >> shift);
889 uint64_t endblk = shift >= 64 ? 0 :
890 ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
891 uint64_t blkid = db->db_blkid;
892
893 /* XXX txh_arg2 better not be zero... */
894
895 dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
896 txh->txh_type, beginblk, endblk);
897
898 switch (txh->txh_type) {
899 case THT_WRITE:
900 if (blkid >= beginblk && blkid <= endblk)
901 match_offset = TRUE;
902 /*
903 * We will let this hold work for the bonus
904 * or spill buffer so that we don't need to
905 * hold it when creating a new object.
906 */
907 if (blkid == DMU_BONUS_BLKID ||
908 blkid == DMU_SPILL_BLKID)
909 match_offset = TRUE;
910 /*
911 * They might have to increase nlevels,
912 * thus dirtying the new TLIBs. Or the
913 * might have to change the block size,
914 * thus dirying the new lvl=0 blk=0.
915 */
916 if (blkid == 0)
917 match_offset = TRUE;
918 break;
919 case THT_FREE:
920 /*
921 * We will dirty all the level 1 blocks in
922 * the free range and perhaps the first and
923 * last level 0 block.
924 */
925 if (blkid >= beginblk && (blkid <= endblk ||
926 txh->txh_arg2 == DMU_OBJECT_END))
927 match_offset = TRUE;
928 break;
929 case THT_SPILL:
930 if (blkid == DMU_SPILL_BLKID)
931 match_offset = TRUE;
932 break;
933 case THT_BONUS:
934 if (blkid == DMU_BONUS_BLKID)
935 match_offset = TRUE;
936 break;
937 case THT_ZAP:
938 match_offset = TRUE;
939 break;
940 case THT_NEWOBJECT:
941 match_object = TRUE;
942 break;
943 default:
944 ASSERT(!"bad txh_type");
945 }
946 }
947 if (match_object && match_offset) {
948 DB_DNODE_EXIT(db);
949 return;
950 }
951 }
952 DB_DNODE_EXIT(db);
953 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
954 (u_longlong_t)db->db.db_object, db->db_level,
955 (u_longlong_t)db->db_blkid);
956 }
957 #endif
958
959 /*
960 * If we can't do 10 iops, something is wrong. Let us go ahead
961 * and hit zfs_dirty_data_max.
962 */
963 hrtime_t zfs_delay_max_ns = MSEC2NSEC(100);
964 int zfs_delay_resolution_ns = 100 * 1000; /* 100 microseconds */
965
966 /*
967 * We delay transactions when we've determined that the backend storage
968 * isn't able to accommodate the rate of incoming writes.
969 *
970 * If there is already a transaction waiting, we delay relative to when
971 * that transaction finishes waiting. This way the calculated min_time
972 * is independent of the number of threads concurrently executing
973 * transactions.
974 *
975 * If we are the only waiter, wait relative to when the transaction
976 * started, rather than the current time. This credits the transaction for
977 * "time already served", e.g. reading indirect blocks.
978 *
979 * The minimum time for a transaction to take is calculated as:
980 * min_time = scale * (dirty - min) / (max - dirty)
981 * min_time is then capped at zfs_delay_max_ns.
982 *
983 * The delay has two degrees of freedom that can be adjusted via tunables.
984 * The percentage of dirty data at which we start to delay is defined by
985 * zfs_delay_min_dirty_percent. This should typically be at or above
986 * zfs_vdev_async_write_active_max_dirty_percent so that we only start to
987 * delay after writing at full speed has failed to keep up with the incoming
988 * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly
989 * speaking, this variable determines the amount of delay at the midpoint of
990 * the curve.
991 *
992 * delay
993 * 10ms +-------------------------------------------------------------*+
994 * | *|
995 * 9ms + *+
996 * | *|
997 * 8ms + *+
998 * | * |
999 * 7ms + * +
1000 * | * |
1001 * 6ms + * +
1002 * | * |
1003 * 5ms + * +
1004 * | * |
1005 * 4ms + * +
1006 * | * |
1007 * 3ms + * +
1008 * | * |
1009 * 2ms + (midpoint) * +
1010 * | | ** |
1011 * 1ms + v *** +
1012 * | zfs_delay_scale ----------> ******** |
1013 * 0 +-------------------------------------*********----------------+
1014 * 0% <- zfs_dirty_data_max -> 100%
1015 *
1016 * Note that since the delay is added to the outstanding time remaining on the
1017 * most recent transaction, the delay is effectively the inverse of IOPS.
1018 * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
1019 * was chosen such that small changes in the amount of accumulated dirty data
1020 * in the first 3/4 of the curve yield relatively small differences in the
1021 * amount of delay.
1022 *
1023 * The effects can be easier to understand when the amount of delay is
1024 * represented on a log scale:
1025 *
1026 * delay
1027 * 100ms +-------------------------------------------------------------++
1028 * + +
1029 * | |
1030 * + *+
1031 * 10ms + *+
1032 * + ** +
1033 * | (midpoint) ** |
1034 * + | ** +
1035 * 1ms + v **** +
1036 * + zfs_delay_scale ----------> ***** +
1037 * | **** |
1038 * + **** +
1039 * 100us + ** +
1040 * + * +
1041 * | * |
1042 * + * +
1043 * 10us + * +
1044 * + +
1045 * | |
1046 * + +
1047 * +--------------------------------------------------------------+
1048 * 0% <- zfs_dirty_data_max -> 100%
1049 *
1050 * Note here that only as the amount of dirty data approaches its limit does
1051 * the delay start to increase rapidly. The goal of a properly tuned system
1052 * should be to keep the amount of dirty data out of that range by first
1053 * ensuring that the appropriate limits are set for the I/O scheduler to reach
1054 * optimal throughput on the backend storage, and then by changing the value
1055 * of zfs_delay_scale to increase the steepness of the curve.
1056 */
1057 static void
dmu_tx_delay(dmu_tx_t * tx,uint64_t dirty)1058 dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty)
1059 {
1060 dsl_pool_t *dp = tx->tx_pool;
1061 uint64_t delay_min_bytes =
1062 zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
1063 hrtime_t wakeup, min_tx_time, now;
1064
1065 if (dirty <= delay_min_bytes)
1066 return;
1067
1068 /*
1069 * The caller has already waited until we are under the max.
1070 * We make them pass us the amount of dirty data so we don't
1071 * have to handle the case of it being >= the max, which could
1072 * cause a divide-by-zero if it's == the max.
1073 */
1074 ASSERT3U(dirty, <, zfs_dirty_data_max);
1075
1076 now = gethrtime();
1077 min_tx_time = zfs_delay_scale *
1078 (dirty - delay_min_bytes) / (zfs_dirty_data_max - dirty);
1079 if (now > tx->tx_start + min_tx_time)
1080 return;
1081
1082 min_tx_time = MIN(min_tx_time, zfs_delay_max_ns);
1083
1084 DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty,
1085 uint64_t, min_tx_time);
1086
1087 mutex_enter(&dp->dp_lock);
1088 wakeup = MAX(tx->tx_start + min_tx_time,
1089 dp->dp_last_wakeup + min_tx_time);
1090 dp->dp_last_wakeup = wakeup;
1091 mutex_exit(&dp->dp_lock);
1092
1093 #ifdef _KERNEL
1094 mutex_enter(&curthread->t_delay_lock);
1095 while (cv_timedwait_hires(&curthread->t_delay_cv,
1096 &curthread->t_delay_lock, wakeup, zfs_delay_resolution_ns,
1097 CALLOUT_FLAG_ABSOLUTE | CALLOUT_FLAG_ROUNDUP) > 0)
1098 continue;
1099 mutex_exit(&curthread->t_delay_lock);
1100 #else
1101 hrtime_t delta = wakeup - gethrtime();
1102 struct timespec ts;
1103 ts.tv_sec = delta / NANOSEC;
1104 ts.tv_nsec = delta % NANOSEC;
1105 (void) nanosleep(&ts, NULL);
1106 #endif
1107 }
1108
1109 static int
dmu_tx_try_assign(dmu_tx_t * tx,txg_how_t txg_how)1110 dmu_tx_try_assign(dmu_tx_t *tx, txg_how_t txg_how)
1111 {
1112 dmu_tx_hold_t *txh;
1113 spa_t *spa = tx->tx_pool->dp_spa;
1114 uint64_t memory, asize, fsize, usize;
1115 uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge;
1116
1117 ASSERT0(tx->tx_txg);
1118
1119 if (tx->tx_err)
1120 return (tx->tx_err);
1121
1122 if (spa_suspended(spa)) {
1123 /*
1124 * If the user has indicated a blocking failure mode
1125 * then return ERESTART which will block in dmu_tx_wait().
1126 * Otherwise, return EIO so that an error can get
1127 * propagated back to the VOP calls.
1128 *
1129 * Note that we always honor the txg_how flag regardless
1130 * of the failuremode setting.
1131 */
1132 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
1133 txg_how != TXG_WAIT)
1134 return (SET_ERROR(EIO));
1135
1136 return (SET_ERROR(ERESTART));
1137 }
1138
1139 if (!tx->tx_waited &&
1140 dsl_pool_need_dirty_delay(tx->tx_pool)) {
1141 tx->tx_wait_dirty = B_TRUE;
1142 return (SET_ERROR(ERESTART));
1143 }
1144
1145 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
1146 tx->tx_needassign_txh = NULL;
1147
1148 /*
1149 * NB: No error returns are allowed after txg_hold_open, but
1150 * before processing the dnode holds, due to the
1151 * dmu_tx_unassign() logic.
1152 */
1153
1154 towrite = tofree = tooverwrite = tounref = tohold = fudge = 0;
1155 for (txh = list_head(&tx->tx_holds); txh;
1156 txh = list_next(&tx->tx_holds, txh)) {
1157 dnode_t *dn = txh->txh_dnode;
1158 if (dn != NULL) {
1159 mutex_enter(&dn->dn_mtx);
1160 if (dn->dn_assigned_txg == tx->tx_txg - 1) {
1161 mutex_exit(&dn->dn_mtx);
1162 tx->tx_needassign_txh = txh;
1163 return (SET_ERROR(ERESTART));
1164 }
1165 if (dn->dn_assigned_txg == 0)
1166 dn->dn_assigned_txg = tx->tx_txg;
1167 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1168 (void) refcount_add(&dn->dn_tx_holds, tx);
1169 mutex_exit(&dn->dn_mtx);
1170 }
1171 towrite += txh->txh_space_towrite;
1172 tofree += txh->txh_space_tofree;
1173 tooverwrite += txh->txh_space_tooverwrite;
1174 tounref += txh->txh_space_tounref;
1175 tohold += txh->txh_memory_tohold;
1176 fudge += txh->txh_fudge;
1177 }
1178
1179 /*
1180 * If a snapshot has been taken since we made our estimates,
1181 * assume that we won't be able to free or overwrite anything.
1182 */
1183 if (tx->tx_objset &&
1184 dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) >
1185 tx->tx_lastsnap_txg) {
1186 towrite += tooverwrite;
1187 tooverwrite = tofree = 0;
1188 }
1189
1190 /* needed allocation: worst-case estimate of write space */
1191 asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite);
1192 /* freed space estimate: worst-case overwrite + free estimate */
1193 fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree;
1194 /* convert unrefd space to worst-case estimate */
1195 usize = spa_get_asize(tx->tx_pool->dp_spa, tounref);
1196 /* calculate memory footprint estimate */
1197 memory = towrite + tooverwrite + tohold;
1198
1199 #ifdef ZFS_DEBUG
1200 /*
1201 * Add in 'tohold' to account for our dirty holds on this memory
1202 * XXX - the "fudge" factor is to account for skipped blocks that
1203 * we missed because dnode_next_offset() misses in-core-only blocks.
1204 */
1205 tx->tx_space_towrite = asize +
1206 spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge);
1207 tx->tx_space_tofree = tofree;
1208 tx->tx_space_tooverwrite = tooverwrite;
1209 tx->tx_space_tounref = tounref;
1210 #endif
1211
1212 if (tx->tx_dir && asize != 0) {
1213 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
1214 asize, fsize, usize, &tx->tx_tempreserve_cookie, tx);
1215 if (err)
1216 return (err);
1217 }
1218
1219 return (0);
1220 }
1221
1222 static void
dmu_tx_unassign(dmu_tx_t * tx)1223 dmu_tx_unassign(dmu_tx_t *tx)
1224 {
1225 dmu_tx_hold_t *txh;
1226
1227 if (tx->tx_txg == 0)
1228 return;
1229
1230 txg_rele_to_quiesce(&tx->tx_txgh);
1231
1232 /*
1233 * Walk the transaction's hold list, removing the hold on the
1234 * associated dnode, and notifying waiters if the refcount drops to 0.
1235 */
1236 for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh;
1237 txh = list_next(&tx->tx_holds, txh)) {
1238 dnode_t *dn = txh->txh_dnode;
1239
1240 if (dn == NULL)
1241 continue;
1242 mutex_enter(&dn->dn_mtx);
1243 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1244
1245 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1246 dn->dn_assigned_txg = 0;
1247 cv_broadcast(&dn->dn_notxholds);
1248 }
1249 mutex_exit(&dn->dn_mtx);
1250 }
1251
1252 txg_rele_to_sync(&tx->tx_txgh);
1253
1254 tx->tx_lasttried_txg = tx->tx_txg;
1255 tx->tx_txg = 0;
1256 }
1257
1258 /*
1259 * Assign tx to a transaction group. txg_how can be one of:
1260 *
1261 * (1) TXG_WAIT. If the current open txg is full, waits until there's
1262 * a new one. This should be used when you're not holding locks.
1263 * It will only fail if we're truly out of space (or over quota).
1264 *
1265 * (2) TXG_NOWAIT. If we can't assign into the current open txg without
1266 * blocking, returns immediately with ERESTART. This should be used
1267 * whenever you're holding locks. On an ERESTART error, the caller
1268 * should drop locks, do a dmu_tx_wait(tx), and try again.
1269 *
1270 * (3) TXG_WAITED. Like TXG_NOWAIT, but indicates that dmu_tx_wait()
1271 * has already been called on behalf of this operation (though
1272 * most likely on a different tx).
1273 */
1274 int
dmu_tx_assign(dmu_tx_t * tx,txg_how_t txg_how)1275 dmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how)
1276 {
1277 int err;
1278
1279 ASSERT(tx->tx_txg == 0);
1280 ASSERT(txg_how == TXG_WAIT || txg_how == TXG_NOWAIT ||
1281 txg_how == TXG_WAITED);
1282 ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1283
1284 /* If we might wait, we must not hold the config lock. */
1285 ASSERT(txg_how != TXG_WAIT || !dsl_pool_config_held(tx->tx_pool));
1286
1287 if (txg_how == TXG_WAITED)
1288 tx->tx_waited = B_TRUE;
1289
1290 while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
1291 dmu_tx_unassign(tx);
1292
1293 if (err != ERESTART || txg_how != TXG_WAIT)
1294 return (err);
1295
1296 dmu_tx_wait(tx);
1297 }
1298
1299 txg_rele_to_quiesce(&tx->tx_txgh);
1300
1301 return (0);
1302 }
1303
1304 void
dmu_tx_wait(dmu_tx_t * tx)1305 dmu_tx_wait(dmu_tx_t *tx)
1306 {
1307 spa_t *spa = tx->tx_pool->dp_spa;
1308 dsl_pool_t *dp = tx->tx_pool;
1309
1310 ASSERT(tx->tx_txg == 0);
1311 ASSERT(!dsl_pool_config_held(tx->tx_pool));
1312
1313 if (tx->tx_wait_dirty) {
1314 /*
1315 * dmu_tx_try_assign() has determined that we need to wait
1316 * because we've consumed much or all of the dirty buffer
1317 * space.
1318 */
1319 mutex_enter(&dp->dp_lock);
1320 while (dp->dp_dirty_total >= zfs_dirty_data_max)
1321 cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock);
1322 uint64_t dirty = dp->dp_dirty_total;
1323 mutex_exit(&dp->dp_lock);
1324
1325 dmu_tx_delay(tx, dirty);
1326
1327 tx->tx_wait_dirty = B_FALSE;
1328
1329 /*
1330 * Note: setting tx_waited only has effect if the caller
1331 * used TX_WAIT. Otherwise they are going to destroy
1332 * this tx and try again. The common case, zfs_write(),
1333 * uses TX_WAIT.
1334 */
1335 tx->tx_waited = B_TRUE;
1336 } else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
1337 /*
1338 * If the pool is suspended we need to wait until it
1339 * is resumed. Note that it's possible that the pool
1340 * has become active after this thread has tried to
1341 * obtain a tx. If that's the case then tx_lasttried_txg
1342 * would not have been set.
1343 */
1344 txg_wait_synced(dp, spa_last_synced_txg(spa) + 1);
1345 } else if (tx->tx_needassign_txh) {
1346 /*
1347 * A dnode is assigned to the quiescing txg. Wait for its
1348 * transaction to complete.
1349 */
1350 dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1351
1352 mutex_enter(&dn->dn_mtx);
1353 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1354 cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1355 mutex_exit(&dn->dn_mtx);
1356 tx->tx_needassign_txh = NULL;
1357 } else {
1358 txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1);
1359 }
1360 }
1361
1362 void
dmu_tx_willuse_space(dmu_tx_t * tx,int64_t delta)1363 dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta)
1364 {
1365 #ifdef ZFS_DEBUG
1366 if (tx->tx_dir == NULL || delta == 0)
1367 return;
1368
1369 if (delta > 0) {
1370 ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=,
1371 tx->tx_space_towrite);
1372 (void) refcount_add_many(&tx->tx_space_written, delta, NULL);
1373 } else {
1374 (void) refcount_add_many(&tx->tx_space_freed, -delta, NULL);
1375 }
1376 #endif
1377 }
1378
1379 void
dmu_tx_commit(dmu_tx_t * tx)1380 dmu_tx_commit(dmu_tx_t *tx)
1381 {
1382 dmu_tx_hold_t *txh;
1383
1384 ASSERT(tx->tx_txg != 0);
1385
1386 /*
1387 * Go through the transaction's hold list and remove holds on
1388 * associated dnodes, notifying waiters if no holds remain.
1389 */
1390 while (txh = list_head(&tx->tx_holds)) {
1391 dnode_t *dn = txh->txh_dnode;
1392
1393 list_remove(&tx->tx_holds, txh);
1394 kmem_free(txh, sizeof (dmu_tx_hold_t));
1395 if (dn == NULL)
1396 continue;
1397 mutex_enter(&dn->dn_mtx);
1398 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1399
1400 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1401 dn->dn_assigned_txg = 0;
1402 cv_broadcast(&dn->dn_notxholds);
1403 }
1404 mutex_exit(&dn->dn_mtx);
1405 dnode_rele(dn, tx);
1406 }
1407
1408 if (tx->tx_tempreserve_cookie)
1409 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1410
1411 if (!list_is_empty(&tx->tx_callbacks))
1412 txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
1413
1414 if (tx->tx_anyobj == FALSE)
1415 txg_rele_to_sync(&tx->tx_txgh);
1416
1417 list_destroy(&tx->tx_callbacks);
1418 list_destroy(&tx->tx_holds);
1419 #ifdef ZFS_DEBUG
1420 dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n",
1421 tx->tx_space_towrite, refcount_count(&tx->tx_space_written),
1422 tx->tx_space_tofree, refcount_count(&tx->tx_space_freed));
1423 refcount_destroy_many(&tx->tx_space_written,
1424 refcount_count(&tx->tx_space_written));
1425 refcount_destroy_many(&tx->tx_space_freed,
1426 refcount_count(&tx->tx_space_freed));
1427 #endif
1428 kmem_free(tx, sizeof (dmu_tx_t));
1429 }
1430
1431 void
dmu_tx_abort(dmu_tx_t * tx)1432 dmu_tx_abort(dmu_tx_t *tx)
1433 {
1434 dmu_tx_hold_t *txh;
1435
1436 ASSERT(tx->tx_txg == 0);
1437
1438 while (txh = list_head(&tx->tx_holds)) {
1439 dnode_t *dn = txh->txh_dnode;
1440
1441 list_remove(&tx->tx_holds, txh);
1442 kmem_free(txh, sizeof (dmu_tx_hold_t));
1443 if (dn != NULL)
1444 dnode_rele(dn, tx);
1445 }
1446
1447 /*
1448 * Call any registered callbacks with an error code.
1449 */
1450 if (!list_is_empty(&tx->tx_callbacks))
1451 dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED);
1452
1453 list_destroy(&tx->tx_callbacks);
1454 list_destroy(&tx->tx_holds);
1455 #ifdef ZFS_DEBUG
1456 refcount_destroy_many(&tx->tx_space_written,
1457 refcount_count(&tx->tx_space_written));
1458 refcount_destroy_many(&tx->tx_space_freed,
1459 refcount_count(&tx->tx_space_freed));
1460 #endif
1461 kmem_free(tx, sizeof (dmu_tx_t));
1462 }
1463
1464 uint64_t
dmu_tx_get_txg(dmu_tx_t * tx)1465 dmu_tx_get_txg(dmu_tx_t *tx)
1466 {
1467 ASSERT(tx->tx_txg != 0);
1468 return (tx->tx_txg);
1469 }
1470
1471 dsl_pool_t *
dmu_tx_pool(dmu_tx_t * tx)1472 dmu_tx_pool(dmu_tx_t *tx)
1473 {
1474 ASSERT(tx->tx_pool != NULL);
1475 return (tx->tx_pool);
1476 }
1477
1478
1479 void
dmu_tx_callback_register(dmu_tx_t * tx,dmu_tx_callback_func_t * func,void * data)1480 dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
1481 {
1482 dmu_tx_callback_t *dcb;
1483
1484 dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
1485
1486 dcb->dcb_func = func;
1487 dcb->dcb_data = data;
1488
1489 list_insert_tail(&tx->tx_callbacks, dcb);
1490 }
1491
1492 /*
1493 * Call all the commit callbacks on a list, with a given error code.
1494 */
1495 void
dmu_tx_do_callbacks(list_t * cb_list,int error)1496 dmu_tx_do_callbacks(list_t *cb_list, int error)
1497 {
1498 dmu_tx_callback_t *dcb;
1499
1500 while (dcb = list_head(cb_list)) {
1501 list_remove(cb_list, dcb);
1502 dcb->dcb_func(dcb->dcb_data, error);
1503 kmem_free(dcb, sizeof (dmu_tx_callback_t));
1504 }
1505 }
1506
1507 /*
1508 * Interface to hold a bunch of attributes.
1509 * used for creating new files.
1510 * attrsize is the total size of all attributes
1511 * to be added during object creation
1512 *
1513 * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1514 */
1515
1516 /*
1517 * hold necessary attribute name for attribute registration.
1518 * should be a very rare case where this is needed. If it does
1519 * happen it would only happen on the first write to the file system.
1520 */
1521 static void
dmu_tx_sa_registration_hold(sa_os_t * sa,dmu_tx_t * tx)1522 dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
1523 {
1524 int i;
1525
1526 if (!sa->sa_need_attr_registration)
1527 return;
1528
1529 for (i = 0; i != sa->sa_num_attrs; i++) {
1530 if (!sa->sa_attr_table[i].sa_registered) {
1531 if (sa->sa_reg_attr_obj)
1532 dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
1533 B_TRUE, sa->sa_attr_table[i].sa_name);
1534 else
1535 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
1536 B_TRUE, sa->sa_attr_table[i].sa_name);
1537 }
1538 }
1539 }
1540
1541
1542 void
dmu_tx_hold_spill(dmu_tx_t * tx,uint64_t object)1543 dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
1544 {
1545 dnode_t *dn;
1546 dmu_tx_hold_t *txh;
1547
1548 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
1549 THT_SPILL, 0, 0);
1550
1551 dn = txh->txh_dnode;
1552
1553 if (dn == NULL)
1554 return;
1555
1556 /* If blkptr doesn't exist then add space to towrite */
1557 if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
1558 txh->txh_space_towrite += SPA_OLD_MAXBLOCKSIZE;
1559 } else {
1560 blkptr_t *bp;
1561
1562 bp = &dn->dn_phys->dn_spill;
1563 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
1564 bp, bp->blk_birth))
1565 txh->txh_space_tooverwrite += SPA_OLD_MAXBLOCKSIZE;
1566 else
1567 txh->txh_space_towrite += SPA_OLD_MAXBLOCKSIZE;
1568 if (!BP_IS_HOLE(bp))
1569 txh->txh_space_tounref += SPA_OLD_MAXBLOCKSIZE;
1570 }
1571 }
1572
1573 void
dmu_tx_hold_sa_create(dmu_tx_t * tx,int attrsize)1574 dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
1575 {
1576 sa_os_t *sa = tx->tx_objset->os_sa;
1577
1578 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1579
1580 if (tx->tx_objset->os_sa->sa_master_obj == 0)
1581 return;
1582
1583 if (tx->tx_objset->os_sa->sa_layout_attr_obj)
1584 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1585 else {
1586 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1587 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1588 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1589 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1590 }
1591
1592 dmu_tx_sa_registration_hold(sa, tx);
1593
1594 if (attrsize <= DN_MAX_BONUSLEN && !sa->sa_force_spill)
1595 return;
1596
1597 (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
1598 THT_SPILL, 0, 0);
1599 }
1600
1601 /*
1602 * Hold SA attribute
1603 *
1604 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1605 *
1606 * variable_size is the total size of all variable sized attributes
1607 * passed to this function. It is not the total size of all
1608 * variable size attributes that *may* exist on this object.
1609 */
1610 void
dmu_tx_hold_sa(dmu_tx_t * tx,sa_handle_t * hdl,boolean_t may_grow)1611 dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
1612 {
1613 uint64_t object;
1614 sa_os_t *sa = tx->tx_objset->os_sa;
1615
1616 ASSERT(hdl != NULL);
1617
1618 object = sa_handle_object(hdl);
1619
1620 dmu_tx_hold_bonus(tx, object);
1621
1622 if (tx->tx_objset->os_sa->sa_master_obj == 0)
1623 return;
1624
1625 if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
1626 tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
1627 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1628 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1629 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1630 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1631 }
1632
1633 dmu_tx_sa_registration_hold(sa, tx);
1634
1635 if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
1636 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1637
1638 if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
1639 ASSERT(tx->tx_txg == 0);
1640 dmu_tx_hold_spill(tx, object);
1641 } else {
1642 dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1643 dnode_t *dn;
1644
1645 DB_DNODE_ENTER(db);
1646 dn = DB_DNODE(db);
1647 if (dn->dn_have_spill) {
1648 ASSERT(tx->tx_txg == 0);
1649 dmu_tx_hold_spill(tx, object);
1650 }
1651 DB_DNODE_EXIT(db);
1652 }
1653 }
1654