1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
26 */
27
28 #include <sys/dmu.h>
29 #include <sys/dmu_impl.h>
30 #include <sys/dbuf.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dmu_objset.h>
33 #include <sys/dsl_dataset.h>
34 #include <sys/dsl_dir.h>
35 #include <sys/dsl_pool.h>
36 #include <sys/zap_impl.h>
37 #include <sys/spa.h>
38 #include <sys/sa.h>
39 #include <sys/sa_impl.h>
40 #include <sys/zfs_context.h>
41 #include <sys/varargs.h>
42
43 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
44 uint64_t arg1, uint64_t arg2);
45
46
47 dmu_tx_t *
dmu_tx_create_dd(dsl_dir_t * dd)48 dmu_tx_create_dd(dsl_dir_t *dd)
49 {
50 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
51 tx->tx_dir = dd;
52 if (dd != NULL)
53 tx->tx_pool = dd->dd_pool;
54 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
55 offsetof(dmu_tx_hold_t, txh_node));
56 list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
57 offsetof(dmu_tx_callback_t, dcb_node));
58 tx->tx_start = gethrtime();
59 return (tx);
60 }
61
62 dmu_tx_t *
dmu_tx_create(objset_t * os)63 dmu_tx_create(objset_t *os)
64 {
65 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
66 tx->tx_objset = os;
67 return (tx);
68 }
69
70 dmu_tx_t *
dmu_tx_create_assigned(struct dsl_pool * dp,uint64_t txg)71 dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
72 {
73 dmu_tx_t *tx = dmu_tx_create_dd(NULL);
74
75 txg_verify(dp->dp_spa, txg);
76 tx->tx_pool = dp;
77 tx->tx_txg = txg;
78 tx->tx_anyobj = TRUE;
79
80 return (tx);
81 }
82
83 int
dmu_tx_is_syncing(dmu_tx_t * tx)84 dmu_tx_is_syncing(dmu_tx_t *tx)
85 {
86 return (tx->tx_anyobj);
87 }
88
89 int
dmu_tx_private_ok(dmu_tx_t * tx)90 dmu_tx_private_ok(dmu_tx_t *tx)
91 {
92 return (tx->tx_anyobj);
93 }
94
95 static dmu_tx_hold_t *
dmu_tx_hold_dnode_impl(dmu_tx_t * tx,dnode_t * dn,enum dmu_tx_hold_type type,uint64_t arg1,uint64_t arg2)96 dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type,
97 uint64_t arg1, uint64_t arg2)
98 {
99 dmu_tx_hold_t *txh;
100
101 if (dn != NULL) {
102 (void) zfs_refcount_add(&dn->dn_holds, tx);
103 if (tx->tx_txg != 0) {
104 mutex_enter(&dn->dn_mtx);
105 /*
106 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
107 * problem, but there's no way for it to happen (for
108 * now, at least).
109 */
110 ASSERT(dn->dn_assigned_txg == 0);
111 dn->dn_assigned_txg = tx->tx_txg;
112 (void) zfs_refcount_add(&dn->dn_tx_holds, tx);
113 mutex_exit(&dn->dn_mtx);
114 }
115 }
116
117 txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
118 txh->txh_tx = tx;
119 txh->txh_dnode = dn;
120 zfs_refcount_create(&txh->txh_space_towrite);
121 zfs_refcount_create(&txh->txh_memory_tohold);
122 txh->txh_type = type;
123 txh->txh_arg1 = arg1;
124 txh->txh_arg2 = arg2;
125 list_insert_tail(&tx->tx_holds, txh);
126
127 return (txh);
128 }
129
130 static dmu_tx_hold_t *
dmu_tx_hold_object_impl(dmu_tx_t * tx,objset_t * os,uint64_t object,enum dmu_tx_hold_type type,uint64_t arg1,uint64_t arg2)131 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
132 enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
133 {
134 dnode_t *dn = NULL;
135 dmu_tx_hold_t *txh;
136 int err;
137
138 if (object != DMU_NEW_OBJECT) {
139 err = dnode_hold(os, object, FTAG, &dn);
140 if (err != 0) {
141 tx->tx_err = err;
142 return (NULL);
143 }
144 }
145 txh = dmu_tx_hold_dnode_impl(tx, dn, type, arg1, arg2);
146 if (dn != NULL)
147 dnode_rele(dn, FTAG);
148 return (txh);
149 }
150
151 void
dmu_tx_add_new_object(dmu_tx_t * tx,dnode_t * dn)152 dmu_tx_add_new_object(dmu_tx_t *tx, dnode_t *dn)
153 {
154 /*
155 * If we're syncing, they can manipulate any object anyhow, and
156 * the hold on the dnode_t can cause problems.
157 */
158 if (!dmu_tx_is_syncing(tx))
159 (void) dmu_tx_hold_dnode_impl(tx, dn, THT_NEWOBJECT, 0, 0);
160 }
161
162 /*
163 * This function reads specified data from disk. The specified data will
164 * be needed to perform the transaction -- i.e, it will be read after
165 * we do dmu_tx_assign(). There are two reasons that we read the data now
166 * (before dmu_tx_assign()):
167 *
168 * 1. Reading it now has potentially better performance. The transaction
169 * has not yet been assigned, so the TXG is not held open, and also the
170 * caller typically has less locks held when calling dmu_tx_hold_*() than
171 * after the transaction has been assigned. This reduces the lock (and txg)
172 * hold times, thus reducing lock contention.
173 *
174 * 2. It is easier for callers (primarily the ZPL) to handle i/o errors
175 * that are detected before they start making changes to the DMU state
176 * (i.e. now). Once the transaction has been assigned, and some DMU
177 * state has been changed, it can be difficult to recover from an i/o
178 * error (e.g. to undo the changes already made in memory at the DMU
179 * layer). Typically code to do so does not exist in the caller -- it
180 * assumes that the data has already been cached and thus i/o errors are
181 * not possible.
182 *
183 * It has been observed that the i/o initiated here can be a performance
184 * problem, and it appears to be optional, because we don't look at the
185 * data which is read. However, removing this read would only serve to
186 * move the work elsewhere (after the dmu_tx_assign()), where it may
187 * have a greater impact on performance (in addition to the impact on
188 * fault tolerance noted above).
189 */
190 static int
dmu_tx_check_ioerr(zio_t * zio,dnode_t * dn,int level,uint64_t blkid)191 dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
192 {
193 int err;
194 dmu_buf_impl_t *db;
195
196 rw_enter(&dn->dn_struct_rwlock, RW_READER);
197 db = dbuf_hold_level(dn, level, blkid, FTAG);
198 rw_exit(&dn->dn_struct_rwlock);
199 if (db == NULL)
200 return (SET_ERROR(EIO));
201 err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH);
202 dbuf_rele(db, FTAG);
203 return (err);
204 }
205
206 /* ARGSUSED */
207 static void
dmu_tx_count_write(dmu_tx_hold_t * txh,uint64_t off,uint64_t len)208 dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
209 {
210 dnode_t *dn = txh->txh_dnode;
211 int err = 0;
212
213 if (len == 0)
214 return;
215
216 (void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG);
217
218 if (zfs_refcount_count(&txh->txh_space_towrite) > 2 * DMU_MAX_ACCESS)
219 err = SET_ERROR(EFBIG);
220
221 if (dn == NULL)
222 return;
223
224 /*
225 * For i/o error checking, read the blocks that will be needed
226 * to perform the write: the first and last level-0 blocks (if
227 * they are not aligned, i.e. if they are partial-block writes),
228 * and all the level-1 blocks.
229 */
230 if (dn->dn_maxblkid == 0) {
231 if (off < dn->dn_datablksz &&
232 (off > 0 || len < dn->dn_datablksz)) {
233 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
234 if (err != 0) {
235 txh->txh_tx->tx_err = err;
236 }
237 }
238 } else {
239 zio_t *zio = zio_root(dn->dn_objset->os_spa,
240 NULL, NULL, ZIO_FLAG_CANFAIL);
241
242 /* first level-0 block */
243 uint64_t start = off >> dn->dn_datablkshift;
244 if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) {
245 err = dmu_tx_check_ioerr(zio, dn, 0, start);
246 if (err != 0) {
247 txh->txh_tx->tx_err = err;
248 }
249 }
250
251 /* last level-0 block */
252 uint64_t end = (off + len - 1) >> dn->dn_datablkshift;
253 if (end != start && end <= dn->dn_maxblkid &&
254 P2PHASE(off + len, dn->dn_datablksz)) {
255 err = dmu_tx_check_ioerr(zio, dn, 0, end);
256 if (err != 0) {
257 txh->txh_tx->tx_err = err;
258 }
259 }
260
261 /* level-1 blocks */
262 if (dn->dn_nlevels > 1) {
263 int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
264 for (uint64_t i = (start >> shft) + 1;
265 i < end >> shft; i++) {
266 err = dmu_tx_check_ioerr(zio, dn, 1, i);
267 if (err != 0) {
268 txh->txh_tx->tx_err = err;
269 }
270 }
271 }
272
273 err = zio_wait(zio);
274 if (err != 0) {
275 txh->txh_tx->tx_err = err;
276 }
277 }
278 }
279
280 static void
dmu_tx_count_dnode(dmu_tx_hold_t * txh)281 dmu_tx_count_dnode(dmu_tx_hold_t *txh)
282 {
283 (void) zfs_refcount_add_many(&txh->txh_space_towrite, DNODE_MIN_SIZE,
284 FTAG);
285 }
286
287 void
dmu_tx_hold_write(dmu_tx_t * tx,uint64_t object,uint64_t off,int len)288 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
289 {
290 dmu_tx_hold_t *txh;
291
292 ASSERT0(tx->tx_txg);
293 ASSERT3U(len, <=, DMU_MAX_ACCESS);
294 ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
295
296 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
297 object, THT_WRITE, off, len);
298 if (txh != NULL) {
299 dmu_tx_count_write(txh, off, len);
300 dmu_tx_count_dnode(txh);
301 }
302 }
303
304 void
dmu_tx_hold_remap_l1indirect(dmu_tx_t * tx,uint64_t object)305 dmu_tx_hold_remap_l1indirect(dmu_tx_t *tx, uint64_t object)
306 {
307 dmu_tx_hold_t *txh;
308
309 ASSERT(tx->tx_txg == 0);
310 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
311 object, THT_WRITE, 0, 0);
312 if (txh == NULL)
313 return;
314
315 dnode_t *dn = txh->txh_dnode;
316 (void) zfs_refcount_add_many(&txh->txh_space_towrite,
317 1ULL << dn->dn_indblkshift, FTAG);
318 dmu_tx_count_dnode(txh);
319 }
320
321 void
dmu_tx_hold_write_by_dnode(dmu_tx_t * tx,dnode_t * dn,uint64_t off,int len)322 dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len)
323 {
324 dmu_tx_hold_t *txh;
325
326 ASSERT0(tx->tx_txg);
327 ASSERT3U(len, <=, DMU_MAX_ACCESS);
328 ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
329
330 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_WRITE, off, len);
331 if (txh != NULL) {
332 dmu_tx_count_write(txh, off, len);
333 dmu_tx_count_dnode(txh);
334 }
335 }
336
337 /*
338 * This function marks the transaction as being a "net free". The end
339 * result is that refquotas will be disabled for this transaction, and
340 * this transaction will be able to use half of the pool space overhead
341 * (see dsl_pool_adjustedsize()). Therefore this function should only
342 * be called for transactions that we expect will not cause a net increase
343 * in the amount of space used (but it's OK if that is occasionally not true).
344 */
345 void
dmu_tx_mark_netfree(dmu_tx_t * tx)346 dmu_tx_mark_netfree(dmu_tx_t *tx)
347 {
348 tx->tx_netfree = B_TRUE;
349 }
350
351 static void
dmu_tx_hold_free_impl(dmu_tx_hold_t * txh,uint64_t off,uint64_t len)352 dmu_tx_hold_free_impl(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
353 {
354 dmu_tx_t *tx;
355 dnode_t *dn;
356 int err;
357
358 tx = txh->txh_tx;
359 ASSERT(tx->tx_txg == 0);
360
361 dn = txh->txh_dnode;
362 dmu_tx_count_dnode(txh);
363
364 if (off >= (dn->dn_maxblkid + 1) * dn->dn_datablksz)
365 return;
366 if (len == DMU_OBJECT_END)
367 len = (dn->dn_maxblkid + 1) * dn->dn_datablksz - off;
368
369 /*
370 * For i/o error checking, we read the first and last level-0
371 * blocks if they are not aligned, and all the level-1 blocks.
372 *
373 * Note: dbuf_free_range() assumes that we have not instantiated
374 * any level-0 dbufs that will be completely freed. Therefore we must
375 * exercise care to not read or count the first and last blocks
376 * if they are blocksize-aligned.
377 */
378 if (dn->dn_datablkshift == 0) {
379 if (off != 0 || len < dn->dn_datablksz)
380 dmu_tx_count_write(txh, 0, dn->dn_datablksz);
381 } else {
382 /* first block will be modified if it is not aligned */
383 if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift))
384 dmu_tx_count_write(txh, off, 1);
385 /* last block will be modified if it is not aligned */
386 if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift))
387 dmu_tx_count_write(txh, off + len, 1);
388 }
389
390 /*
391 * Check level-1 blocks.
392 */
393 if (dn->dn_nlevels > 1) {
394 int shift = dn->dn_datablkshift + dn->dn_indblkshift -
395 SPA_BLKPTRSHIFT;
396 uint64_t start = off >> shift;
397 uint64_t end = (off + len) >> shift;
398
399 ASSERT(dn->dn_indblkshift != 0);
400
401 /*
402 * dnode_reallocate() can result in an object with indirect
403 * blocks having an odd data block size. In this case,
404 * just check the single block.
405 */
406 if (dn->dn_datablkshift == 0)
407 start = end = 0;
408
409 zio_t *zio = zio_root(tx->tx_pool->dp_spa,
410 NULL, NULL, ZIO_FLAG_CANFAIL);
411 for (uint64_t i = start; i <= end; i++) {
412 uint64_t ibyte = i << shift;
413 err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
414 i = ibyte >> shift;
415 if (err == ESRCH || i > end)
416 break;
417 if (err != 0) {
418 tx->tx_err = err;
419 (void) zio_wait(zio);
420 return;
421 }
422
423 (void) zfs_refcount_add_many(&txh->txh_memory_tohold,
424 1 << dn->dn_indblkshift, FTAG);
425
426 err = dmu_tx_check_ioerr(zio, dn, 1, i);
427 if (err != 0) {
428 tx->tx_err = err;
429 (void) zio_wait(zio);
430 return;
431 }
432 }
433 err = zio_wait(zio);
434 if (err != 0) {
435 tx->tx_err = err;
436 return;
437 }
438 }
439 }
440
441 void
dmu_tx_hold_free(dmu_tx_t * tx,uint64_t object,uint64_t off,uint64_t len)442 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
443 {
444 dmu_tx_hold_t *txh;
445
446 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
447 object, THT_FREE, off, len);
448 if (txh != NULL)
449 (void) dmu_tx_hold_free_impl(txh, off, len);
450 }
451
452 void
dmu_tx_hold_free_by_dnode(dmu_tx_t * tx,dnode_t * dn,uint64_t off,uint64_t len)453 dmu_tx_hold_free_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, uint64_t len)
454 {
455 dmu_tx_hold_t *txh;
456
457 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_FREE, off, len);
458 if (txh != NULL)
459 (void) dmu_tx_hold_free_impl(txh, off, len);
460 }
461
462 static void
dmu_tx_hold_zap_impl(dmu_tx_hold_t * txh,const char * name)463 dmu_tx_hold_zap_impl(dmu_tx_hold_t *txh, const char *name)
464 {
465 dmu_tx_t *tx = txh->txh_tx;
466 dnode_t *dn;
467 int err;
468
469 ASSERT(tx->tx_txg == 0);
470
471 dn = txh->txh_dnode;
472
473 dmu_tx_count_dnode(txh);
474
475 /*
476 * Modifying a almost-full microzap is around the worst case (128KB)
477 *
478 * If it is a fat zap, the worst case would be 7*16KB=112KB:
479 * - 3 blocks overwritten: target leaf, ptrtbl block, header block
480 * - 4 new blocks written if adding:
481 * - 2 blocks for possibly split leaves,
482 * - 2 grown ptrtbl blocks
483 */
484 (void) zfs_refcount_add_many(&txh->txh_space_towrite,
485 MZAP_MAX_BLKSZ, FTAG);
486
487 if (dn == NULL)
488 return;
489
490 ASSERT3P(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP);
491
492 if (dn->dn_maxblkid == 0 || name == NULL) {
493 /*
494 * This is a microzap (only one block), or we don't know
495 * the name. Check the first block for i/o errors.
496 */
497 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
498 if (err != 0) {
499 tx->tx_err = err;
500 }
501 } else {
502 /*
503 * Access the name so that we'll check for i/o errors to
504 * the leaf blocks, etc. We ignore ENOENT, as this name
505 * may not yet exist.
506 */
507 err = zap_lookup_by_dnode(dn, name, 8, 0, NULL);
508 if (err == EIO || err == ECKSUM || err == ENXIO) {
509 tx->tx_err = err;
510 }
511 }
512 }
513
514 void
dmu_tx_hold_zap(dmu_tx_t * tx,uint64_t object,int add,const char * name)515 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
516 {
517 dmu_tx_hold_t *txh;
518
519 ASSERT0(tx->tx_txg);
520
521 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
522 object, THT_ZAP, add, (uintptr_t)name);
523 if (txh != NULL)
524 dmu_tx_hold_zap_impl(txh, name);
525 }
526
527 void
dmu_tx_hold_zap_by_dnode(dmu_tx_t * tx,dnode_t * dn,int add,const char * name)528 dmu_tx_hold_zap_by_dnode(dmu_tx_t *tx, dnode_t *dn, int add, const char *name)
529 {
530 dmu_tx_hold_t *txh;
531
532 ASSERT0(tx->tx_txg);
533 ASSERT(dn != NULL);
534
535 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_ZAP, add, (uintptr_t)name);
536 if (txh != NULL)
537 dmu_tx_hold_zap_impl(txh, name);
538 }
539
540 void
dmu_tx_hold_bonus(dmu_tx_t * tx,uint64_t object)541 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
542 {
543 dmu_tx_hold_t *txh;
544
545 ASSERT(tx->tx_txg == 0);
546
547 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
548 object, THT_BONUS, 0, 0);
549 if (txh)
550 dmu_tx_count_dnode(txh);
551 }
552
553 void
dmu_tx_hold_bonus_by_dnode(dmu_tx_t * tx,dnode_t * dn)554 dmu_tx_hold_bonus_by_dnode(dmu_tx_t *tx, dnode_t *dn)
555 {
556 dmu_tx_hold_t *txh;
557
558 ASSERT0(tx->tx_txg);
559
560 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_BONUS, 0, 0);
561 if (txh)
562 dmu_tx_count_dnode(txh);
563 }
564
565 void
dmu_tx_hold_space(dmu_tx_t * tx,uint64_t space)566 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
567 {
568 dmu_tx_hold_t *txh;
569 ASSERT(tx->tx_txg == 0);
570
571 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
572 DMU_NEW_OBJECT, THT_SPACE, space, 0);
573
574 (void) zfs_refcount_add_many(&txh->txh_space_towrite, space, FTAG);
575 }
576
577 #ifdef ZFS_DEBUG
578 void
dmu_tx_dirty_buf(dmu_tx_t * tx,dmu_buf_impl_t * db)579 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
580 {
581 boolean_t match_object = B_FALSE;
582 boolean_t match_offset = B_FALSE;
583
584 DB_DNODE_ENTER(db);
585 dnode_t *dn = DB_DNODE(db);
586 ASSERT(tx->tx_txg != 0);
587 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
588 ASSERT3U(dn->dn_object, ==, db->db.db_object);
589
590 if (tx->tx_anyobj) {
591 DB_DNODE_EXIT(db);
592 return;
593 }
594
595 /* XXX No checking on the meta dnode for now */
596 if (db->db.db_object == DMU_META_DNODE_OBJECT) {
597 DB_DNODE_EXIT(db);
598 return;
599 }
600
601 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
602 txh = list_next(&tx->tx_holds, txh)) {
603 ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg);
604 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
605 match_object = TRUE;
606 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
607 int datablkshift = dn->dn_datablkshift ?
608 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
609 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
610 int shift = datablkshift + epbs * db->db_level;
611 uint64_t beginblk = shift >= 64 ? 0 :
612 (txh->txh_arg1 >> shift);
613 uint64_t endblk = shift >= 64 ? 0 :
614 ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
615 uint64_t blkid = db->db_blkid;
616
617 /* XXX txh_arg2 better not be zero... */
618
619 dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
620 txh->txh_type, beginblk, endblk);
621
622 switch (txh->txh_type) {
623 case THT_WRITE:
624 if (blkid >= beginblk && blkid <= endblk)
625 match_offset = TRUE;
626 /*
627 * We will let this hold work for the bonus
628 * or spill buffer so that we don't need to
629 * hold it when creating a new object.
630 */
631 if (blkid == DMU_BONUS_BLKID ||
632 blkid == DMU_SPILL_BLKID)
633 match_offset = TRUE;
634 /*
635 * They might have to increase nlevels,
636 * thus dirtying the new TLIBs. Or the
637 * might have to change the block size,
638 * thus dirying the new lvl=0 blk=0.
639 */
640 if (blkid == 0)
641 match_offset = TRUE;
642 break;
643 case THT_FREE:
644 /*
645 * We will dirty all the level 1 blocks in
646 * the free range and perhaps the first and
647 * last level 0 block.
648 */
649 if (blkid >= beginblk && (blkid <= endblk ||
650 txh->txh_arg2 == DMU_OBJECT_END))
651 match_offset = TRUE;
652 break;
653 case THT_SPILL:
654 if (blkid == DMU_SPILL_BLKID)
655 match_offset = TRUE;
656 break;
657 case THT_BONUS:
658 if (blkid == DMU_BONUS_BLKID)
659 match_offset = TRUE;
660 break;
661 case THT_ZAP:
662 match_offset = TRUE;
663 break;
664 case THT_NEWOBJECT:
665 match_object = TRUE;
666 break;
667 default:
668 ASSERT(!"bad txh_type");
669 }
670 }
671 if (match_object && match_offset) {
672 DB_DNODE_EXIT(db);
673 return;
674 }
675 }
676 DB_DNODE_EXIT(db);
677 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
678 (u_longlong_t)db->db.db_object, db->db_level,
679 (u_longlong_t)db->db_blkid);
680 }
681 #endif
682
683 /*
684 * If we can't do 10 iops, something is wrong. Let us go ahead
685 * and hit zfs_dirty_data_max.
686 */
687 hrtime_t zfs_delay_max_ns = MSEC2NSEC(100);
688 int zfs_delay_resolution_ns = 100 * 1000; /* 100 microseconds */
689
690 /*
691 * We delay transactions when we've determined that the backend storage
692 * isn't able to accommodate the rate of incoming writes.
693 *
694 * If there is already a transaction waiting, we delay relative to when
695 * that transaction finishes waiting. This way the calculated min_time
696 * is independent of the number of threads concurrently executing
697 * transactions.
698 *
699 * If we are the only waiter, wait relative to when the transaction
700 * started, rather than the current time. This credits the transaction for
701 * "time already served", e.g. reading indirect blocks.
702 *
703 * The minimum time for a transaction to take is calculated as:
704 * min_time = scale * (dirty - min) / (max - dirty)
705 * min_time is then capped at zfs_delay_max_ns.
706 *
707 * The delay has two degrees of freedom that can be adjusted via tunables.
708 * The percentage of dirty data at which we start to delay is defined by
709 * zfs_delay_min_dirty_percent. This should typically be at or above
710 * zfs_vdev_async_write_active_max_dirty_percent so that we only start to
711 * delay after writing at full speed has failed to keep up with the incoming
712 * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly
713 * speaking, this variable determines the amount of delay at the midpoint of
714 * the curve.
715 *
716 * delay
717 * 10ms +-------------------------------------------------------------*+
718 * | *|
719 * 9ms + *+
720 * | *|
721 * 8ms + *+
722 * | * |
723 * 7ms + * +
724 * | * |
725 * 6ms + * +
726 * | * |
727 * 5ms + * +
728 * | * |
729 * 4ms + * +
730 * | * |
731 * 3ms + * +
732 * | * |
733 * 2ms + (midpoint) * +
734 * | | ** |
735 * 1ms + v *** +
736 * | zfs_delay_scale ----------> ******** |
737 * 0 +-------------------------------------*********----------------+
738 * 0% <- zfs_dirty_data_max -> 100%
739 *
740 * Note that since the delay is added to the outstanding time remaining on the
741 * most recent transaction, the delay is effectively the inverse of IOPS.
742 * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
743 * was chosen such that small changes in the amount of accumulated dirty data
744 * in the first 3/4 of the curve yield relatively small differences in the
745 * amount of delay.
746 *
747 * The effects can be easier to understand when the amount of delay is
748 * represented on a log scale:
749 *
750 * delay
751 * 100ms +-------------------------------------------------------------++
752 * + +
753 * | |
754 * + *+
755 * 10ms + *+
756 * + ** +
757 * | (midpoint) ** |
758 * + | ** +
759 * 1ms + v **** +
760 * + zfs_delay_scale ----------> ***** +
761 * | **** |
762 * + **** +
763 * 100us + ** +
764 * + * +
765 * | * |
766 * + * +
767 * 10us + * +
768 * + +
769 * | |
770 * + +
771 * +--------------------------------------------------------------+
772 * 0% <- zfs_dirty_data_max -> 100%
773 *
774 * Note here that only as the amount of dirty data approaches its limit does
775 * the delay start to increase rapidly. The goal of a properly tuned system
776 * should be to keep the amount of dirty data out of that range by first
777 * ensuring that the appropriate limits are set for the I/O scheduler to reach
778 * optimal throughput on the backend storage, and then by changing the value
779 * of zfs_delay_scale to increase the steepness of the curve.
780 */
781 static void
dmu_tx_delay(dmu_tx_t * tx,uint64_t dirty)782 dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty)
783 {
784 dsl_pool_t *dp = tx->tx_pool;
785 uint64_t delay_min_bytes =
786 zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
787 hrtime_t wakeup, min_tx_time, now;
788
789 if (dirty <= delay_min_bytes)
790 return;
791
792 /*
793 * The caller has already waited until we are under the max.
794 * We make them pass us the amount of dirty data so we don't
795 * have to handle the case of it being >= the max, which could
796 * cause a divide-by-zero if it's == the max.
797 */
798 ASSERT3U(dirty, <, zfs_dirty_data_max);
799
800 now = gethrtime();
801 min_tx_time = zfs_delay_scale *
802 (dirty - delay_min_bytes) / (zfs_dirty_data_max - dirty);
803 if (now > tx->tx_start + min_tx_time)
804 return;
805
806 min_tx_time = MIN(min_tx_time, zfs_delay_max_ns);
807
808 DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty,
809 uint64_t, min_tx_time);
810
811 mutex_enter(&dp->dp_lock);
812 wakeup = MAX(tx->tx_start + min_tx_time,
813 dp->dp_last_wakeup + min_tx_time);
814 dp->dp_last_wakeup = wakeup;
815 mutex_exit(&dp->dp_lock);
816
817 #ifdef _KERNEL
818 mutex_enter(&curthread->t_delay_lock);
819 while (cv_timedwait_hires(&curthread->t_delay_cv,
820 &curthread->t_delay_lock, wakeup, zfs_delay_resolution_ns,
821 CALLOUT_FLAG_ABSOLUTE | CALLOUT_FLAG_ROUNDUP) > 0)
822 continue;
823 mutex_exit(&curthread->t_delay_lock);
824 #else
825 hrtime_t delta = wakeup - gethrtime();
826 struct timespec ts;
827 ts.tv_sec = delta / NANOSEC;
828 ts.tv_nsec = delta % NANOSEC;
829 (void) nanosleep(&ts, NULL);
830 #endif
831 }
832
833 /*
834 * This routine attempts to assign the transaction to a transaction group.
835 * To do so, we must determine if there is sufficient free space on disk.
836 *
837 * If this is a "netfree" transaction (i.e. we called dmu_tx_mark_netfree()
838 * on it), then it is assumed that there is sufficient free space,
839 * unless there's insufficient slop space in the pool (see the comment
840 * above spa_slop_shift in spa_misc.c).
841 *
842 * If it is not a "netfree" transaction, then if the data already on disk
843 * is over the allowed usage (e.g. quota), this will fail with EDQUOT or
844 * ENOSPC. Otherwise, if the current rough estimate of pending changes,
845 * plus the rough estimate of this transaction's changes, may exceed the
846 * allowed usage, then this will fail with ERESTART, which will cause the
847 * caller to wait for the pending changes to be written to disk (by waiting
848 * for the next TXG to open), and then check the space usage again.
849 *
850 * The rough estimate of pending changes is comprised of the sum of:
851 *
852 * - this transaction's holds' txh_space_towrite
853 *
854 * - dd_tempreserved[], which is the sum of in-flight transactions'
855 * holds' txh_space_towrite (i.e. those transactions that have called
856 * dmu_tx_assign() but not yet called dmu_tx_commit()).
857 *
858 * - dd_space_towrite[], which is the amount of dirtied dbufs.
859 *
860 * Note that all of these values are inflated by spa_get_worst_case_asize(),
861 * which means that we may get ERESTART well before we are actually in danger
862 * of running out of space, but this also mitigates any small inaccuracies
863 * in the rough estimate (e.g. txh_space_towrite doesn't take into account
864 * indirect blocks, and dd_space_towrite[] doesn't take into account changes
865 * to the MOS).
866 *
867 * Note that due to this algorithm, it is possible to exceed the allowed
868 * usage by one transaction. Also, as we approach the allowed usage,
869 * we will allow a very limited amount of changes into each TXG, thus
870 * decreasing performance.
871 */
872 static int
dmu_tx_try_assign(dmu_tx_t * tx,uint64_t txg_how)873 dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
874 {
875 spa_t *spa = tx->tx_pool->dp_spa;
876
877 ASSERT0(tx->tx_txg);
878
879 if (tx->tx_err)
880 return (tx->tx_err);
881
882 if (spa_suspended(spa)) {
883 /*
884 * If the user has indicated a blocking failure mode
885 * then return ERESTART which will block in dmu_tx_wait().
886 * Otherwise, return EIO so that an error can get
887 * propagated back to the VOP calls.
888 *
889 * Note that we always honor the txg_how flag regardless
890 * of the failuremode setting.
891 */
892 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
893 !(txg_how & TXG_WAIT))
894 return (SET_ERROR(EIO));
895
896 return (SET_ERROR(ERESTART));
897 }
898
899 if (!tx->tx_dirty_delayed &&
900 dsl_pool_need_dirty_delay(tx->tx_pool)) {
901 tx->tx_wait_dirty = B_TRUE;
902 return (SET_ERROR(ERESTART));
903 }
904
905 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
906 tx->tx_needassign_txh = NULL;
907
908 /*
909 * NB: No error returns are allowed after txg_hold_open, but
910 * before processing the dnode holds, due to the
911 * dmu_tx_unassign() logic.
912 */
913
914 uint64_t towrite = 0;
915 uint64_t tohold = 0;
916 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
917 txh = list_next(&tx->tx_holds, txh)) {
918 dnode_t *dn = txh->txh_dnode;
919 if (dn != NULL) {
920 mutex_enter(&dn->dn_mtx);
921 if (dn->dn_assigned_txg == tx->tx_txg - 1) {
922 mutex_exit(&dn->dn_mtx);
923 tx->tx_needassign_txh = txh;
924 return (SET_ERROR(ERESTART));
925 }
926 if (dn->dn_assigned_txg == 0)
927 dn->dn_assigned_txg = tx->tx_txg;
928 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
929 (void) zfs_refcount_add(&dn->dn_tx_holds, tx);
930 mutex_exit(&dn->dn_mtx);
931 }
932 towrite += zfs_refcount_count(&txh->txh_space_towrite);
933 tohold += zfs_refcount_count(&txh->txh_memory_tohold);
934 }
935
936 /* needed allocation: worst-case estimate of write space */
937 uint64_t asize = spa_get_worst_case_asize(tx->tx_pool->dp_spa, towrite);
938 /* calculate memory footprint estimate */
939 uint64_t memory = towrite + tohold;
940
941 if (tx->tx_dir != NULL && asize != 0) {
942 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
943 asize, tx->tx_netfree, &tx->tx_tempreserve_cookie, tx);
944 if (err != 0)
945 return (err);
946 }
947
948 return (0);
949 }
950
951 static void
dmu_tx_unassign(dmu_tx_t * tx)952 dmu_tx_unassign(dmu_tx_t *tx)
953 {
954 if (tx->tx_txg == 0)
955 return;
956
957 txg_rele_to_quiesce(&tx->tx_txgh);
958
959 /*
960 * Walk the transaction's hold list, removing the hold on the
961 * associated dnode, and notifying waiters if the refcount drops to 0.
962 */
963 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds);
964 txh != tx->tx_needassign_txh;
965 txh = list_next(&tx->tx_holds, txh)) {
966 dnode_t *dn = txh->txh_dnode;
967
968 if (dn == NULL)
969 continue;
970 mutex_enter(&dn->dn_mtx);
971 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
972
973 if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
974 dn->dn_assigned_txg = 0;
975 cv_broadcast(&dn->dn_notxholds);
976 }
977 mutex_exit(&dn->dn_mtx);
978 }
979
980 txg_rele_to_sync(&tx->tx_txgh);
981
982 tx->tx_lasttried_txg = tx->tx_txg;
983 tx->tx_txg = 0;
984 }
985
986 /*
987 * Assign tx to a transaction group; txg_how is a bitmask:
988 *
989 * If TXG_WAIT is set and the currently open txg is full, this function
990 * will wait until there's a new txg. This should be used when no locks
991 * are being held. With this bit set, this function will only fail if
992 * we're truly out of space (or over quota).
993 *
994 * If TXG_WAIT is *not* set and we can't assign into the currently open
995 * txg without blocking, this function will return immediately with
996 * ERESTART. This should be used whenever locks are being held. On an
997 * ERESTART error, the caller should drop all locks, call dmu_tx_wait(),
998 * and try again.
999 *
1000 * If TXG_NOTHROTTLE is set, this indicates that this tx should not be
1001 * delayed due on the ZFS Write Throttle (see comments in dsl_pool.c for
1002 * details on the throttle). This is used by the VFS operations, after
1003 * they have already called dmu_tx_wait() (though most likely on a
1004 * different tx).
1005 */
1006 int
dmu_tx_assign(dmu_tx_t * tx,uint64_t txg_how)1007 dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
1008 {
1009 int err;
1010
1011 ASSERT(tx->tx_txg == 0);
1012 ASSERT0(txg_how & ~(TXG_WAIT | TXG_NOTHROTTLE));
1013 ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1014
1015 /* If we might wait, we must not hold the config lock. */
1016 IMPLY((txg_how & TXG_WAIT), !dsl_pool_config_held(tx->tx_pool));
1017
1018 if ((txg_how & TXG_NOTHROTTLE))
1019 tx->tx_dirty_delayed = B_TRUE;
1020
1021 while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
1022 dmu_tx_unassign(tx);
1023
1024 if (err != ERESTART || !(txg_how & TXG_WAIT))
1025 return (err);
1026
1027 dmu_tx_wait(tx);
1028 }
1029
1030 txg_rele_to_quiesce(&tx->tx_txgh);
1031
1032 return (0);
1033 }
1034
1035 void
dmu_tx_wait(dmu_tx_t * tx)1036 dmu_tx_wait(dmu_tx_t *tx)
1037 {
1038 spa_t *spa = tx->tx_pool->dp_spa;
1039 dsl_pool_t *dp = tx->tx_pool;
1040
1041 ASSERT(tx->tx_txg == 0);
1042 ASSERT(!dsl_pool_config_held(tx->tx_pool));
1043
1044 if (tx->tx_wait_dirty) {
1045 /*
1046 * dmu_tx_try_assign() has determined that we need to wait
1047 * because we've consumed much or all of the dirty buffer
1048 * space.
1049 */
1050 mutex_enter(&dp->dp_lock);
1051 while (dp->dp_dirty_total >= zfs_dirty_data_max)
1052 cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock);
1053 uint64_t dirty = dp->dp_dirty_total;
1054 mutex_exit(&dp->dp_lock);
1055
1056 dmu_tx_delay(tx, dirty);
1057
1058 tx->tx_wait_dirty = B_FALSE;
1059
1060 /*
1061 * Note: setting tx_dirty_delayed only has effect if the
1062 * caller used TX_WAIT. Otherwise they are going to
1063 * destroy this tx and try again. The common case,
1064 * zfs_write(), uses TX_WAIT.
1065 */
1066 tx->tx_dirty_delayed = B_TRUE;
1067 } else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
1068 /*
1069 * If the pool is suspended we need to wait until it
1070 * is resumed. Note that it's possible that the pool
1071 * has become active after this thread has tried to
1072 * obtain a tx. If that's the case then tx_lasttried_txg
1073 * would not have been set.
1074 */
1075 txg_wait_synced(dp, spa_last_synced_txg(spa) + 1);
1076 } else if (tx->tx_needassign_txh) {
1077 /*
1078 * A dnode is assigned to the quiescing txg. Wait for its
1079 * transaction to complete.
1080 */
1081 dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1082
1083 mutex_enter(&dn->dn_mtx);
1084 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1085 cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1086 mutex_exit(&dn->dn_mtx);
1087 tx->tx_needassign_txh = NULL;
1088 } else {
1089 /*
1090 * If we have a lot of dirty data just wait until we sync
1091 * out a TXG at which point we'll hopefully have synced
1092 * a portion of the changes.
1093 */
1094 txg_wait_synced(dp, spa_last_synced_txg(spa) + 1);
1095 }
1096 }
1097
1098 static void
dmu_tx_destroy(dmu_tx_t * tx)1099 dmu_tx_destroy(dmu_tx_t *tx)
1100 {
1101 dmu_tx_hold_t *txh;
1102
1103 while ((txh = list_head(&tx->tx_holds)) != NULL) {
1104 dnode_t *dn = txh->txh_dnode;
1105
1106 list_remove(&tx->tx_holds, txh);
1107 zfs_refcount_destroy_many(&txh->txh_space_towrite,
1108 zfs_refcount_count(&txh->txh_space_towrite));
1109 zfs_refcount_destroy_many(&txh->txh_memory_tohold,
1110 zfs_refcount_count(&txh->txh_memory_tohold));
1111 kmem_free(txh, sizeof (dmu_tx_hold_t));
1112 if (dn != NULL)
1113 dnode_rele(dn, tx);
1114 }
1115
1116 list_destroy(&tx->tx_callbacks);
1117 list_destroy(&tx->tx_holds);
1118 kmem_free(tx, sizeof (dmu_tx_t));
1119 }
1120
1121 void
dmu_tx_commit(dmu_tx_t * tx)1122 dmu_tx_commit(dmu_tx_t *tx)
1123 {
1124 ASSERT(tx->tx_txg != 0);
1125
1126 /*
1127 * Go through the transaction's hold list and remove holds on
1128 * associated dnodes, notifying waiters if no holds remain.
1129 */
1130 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
1131 txh = list_next(&tx->tx_holds, txh)) {
1132 dnode_t *dn = txh->txh_dnode;
1133
1134 if (dn == NULL)
1135 continue;
1136
1137 mutex_enter(&dn->dn_mtx);
1138 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1139
1140 if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1141 dn->dn_assigned_txg = 0;
1142 cv_broadcast(&dn->dn_notxholds);
1143 }
1144 mutex_exit(&dn->dn_mtx);
1145 }
1146
1147 if (tx->tx_tempreserve_cookie)
1148 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1149
1150 if (!list_is_empty(&tx->tx_callbacks))
1151 txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
1152
1153 if (tx->tx_anyobj == FALSE)
1154 txg_rele_to_sync(&tx->tx_txgh);
1155
1156 dmu_tx_destroy(tx);
1157 }
1158
1159 void
dmu_tx_abort(dmu_tx_t * tx)1160 dmu_tx_abort(dmu_tx_t *tx)
1161 {
1162 ASSERT(tx->tx_txg == 0);
1163
1164 /*
1165 * Call any registered callbacks with an error code.
1166 */
1167 if (!list_is_empty(&tx->tx_callbacks))
1168 dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED);
1169
1170 dmu_tx_destroy(tx);
1171 }
1172
1173 uint64_t
dmu_tx_get_txg(dmu_tx_t * tx)1174 dmu_tx_get_txg(dmu_tx_t *tx)
1175 {
1176 ASSERT(tx->tx_txg != 0);
1177 return (tx->tx_txg);
1178 }
1179
1180 dsl_pool_t *
dmu_tx_pool(dmu_tx_t * tx)1181 dmu_tx_pool(dmu_tx_t *tx)
1182 {
1183 ASSERT(tx->tx_pool != NULL);
1184 return (tx->tx_pool);
1185 }
1186
1187 void
dmu_tx_callback_register(dmu_tx_t * tx,dmu_tx_callback_func_t * func,void * data)1188 dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
1189 {
1190 dmu_tx_callback_t *dcb;
1191
1192 dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
1193
1194 dcb->dcb_func = func;
1195 dcb->dcb_data = data;
1196
1197 list_insert_tail(&tx->tx_callbacks, dcb);
1198 }
1199
1200 /*
1201 * Call all the commit callbacks on a list, with a given error code.
1202 */
1203 void
dmu_tx_do_callbacks(list_t * cb_list,int error)1204 dmu_tx_do_callbacks(list_t *cb_list, int error)
1205 {
1206 dmu_tx_callback_t *dcb;
1207
1208 while ((dcb = list_head(cb_list)) != NULL) {
1209 list_remove(cb_list, dcb);
1210 dcb->dcb_func(dcb->dcb_data, error);
1211 kmem_free(dcb, sizeof (dmu_tx_callback_t));
1212 }
1213 }
1214
1215 /*
1216 * Interface to hold a bunch of attributes.
1217 * used for creating new files.
1218 * attrsize is the total size of all attributes
1219 * to be added during object creation
1220 *
1221 * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1222 */
1223
1224 /*
1225 * hold necessary attribute name for attribute registration.
1226 * should be a very rare case where this is needed. If it does
1227 * happen it would only happen on the first write to the file system.
1228 */
1229 static void
dmu_tx_sa_registration_hold(sa_os_t * sa,dmu_tx_t * tx)1230 dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
1231 {
1232 if (!sa->sa_need_attr_registration)
1233 return;
1234
1235 for (int i = 0; i != sa->sa_num_attrs; i++) {
1236 if (!sa->sa_attr_table[i].sa_registered) {
1237 if (sa->sa_reg_attr_obj)
1238 dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
1239 B_TRUE, sa->sa_attr_table[i].sa_name);
1240 else
1241 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
1242 B_TRUE, sa->sa_attr_table[i].sa_name);
1243 }
1244 }
1245 }
1246
1247 void
dmu_tx_hold_spill(dmu_tx_t * tx,uint64_t object)1248 dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
1249 {
1250 dmu_tx_hold_t *txh;
1251
1252 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
1253 THT_SPILL, 0, 0);
1254 if (txh != NULL)
1255 (void) zfs_refcount_add_many(&txh->txh_space_towrite,
1256 SPA_OLD_MAXBLOCKSIZE, FTAG);
1257 }
1258
1259 void
dmu_tx_hold_sa_create(dmu_tx_t * tx,int attrsize)1260 dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
1261 {
1262 sa_os_t *sa = tx->tx_objset->os_sa;
1263
1264 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1265
1266 if (tx->tx_objset->os_sa->sa_master_obj == 0)
1267 return;
1268
1269 if (tx->tx_objset->os_sa->sa_layout_attr_obj) {
1270 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1271 } else {
1272 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1273 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1274 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1275 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1276 }
1277
1278 dmu_tx_sa_registration_hold(sa, tx);
1279
1280 if (attrsize <= DN_OLD_MAX_BONUSLEN && !sa->sa_force_spill)
1281 return;
1282
1283 (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
1284 THT_SPILL, 0, 0);
1285 }
1286
1287 /*
1288 * Hold SA attribute
1289 *
1290 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1291 *
1292 * variable_size is the total size of all variable sized attributes
1293 * passed to this function. It is not the total size of all
1294 * variable size attributes that *may* exist on this object.
1295 */
1296 void
dmu_tx_hold_sa(dmu_tx_t * tx,sa_handle_t * hdl,boolean_t may_grow)1297 dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
1298 {
1299 uint64_t object;
1300 sa_os_t *sa = tx->tx_objset->os_sa;
1301
1302 ASSERT(hdl != NULL);
1303
1304 object = sa_handle_object(hdl);
1305
1306 dmu_tx_hold_bonus(tx, object);
1307
1308 if (tx->tx_objset->os_sa->sa_master_obj == 0)
1309 return;
1310
1311 if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
1312 tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
1313 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1314 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1315 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1316 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1317 }
1318
1319 dmu_tx_sa_registration_hold(sa, tx);
1320
1321 if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
1322 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1323
1324 if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
1325 ASSERT(tx->tx_txg == 0);
1326 dmu_tx_hold_spill(tx, object);
1327 } else {
1328 dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1329 dnode_t *dn;
1330
1331 DB_DNODE_ENTER(db);
1332 dn = DB_DNODE(db);
1333 if (dn->dn_have_spill) {
1334 ASSERT(tx->tx_txg == 0);
1335 dmu_tx_hold_spill(tx, object);
1336 }
1337 DB_DNODE_EXIT(db);
1338 }
1339 }
1340