xref: /freebsd/sys/contrib/openzfs/module/zfs/dmu.c (revision f9693bef8dc83284e7ac905adc346f7d866b5245)
1eda14cbcSMatt Macy /*
2eda14cbcSMatt Macy  * CDDL HEADER START
3eda14cbcSMatt Macy  *
4eda14cbcSMatt Macy  * The contents of this file are subject to the terms of the
5eda14cbcSMatt Macy  * Common Development and Distribution License (the "License").
6eda14cbcSMatt Macy  * You may not use this file except in compliance with the License.
7eda14cbcSMatt Macy  *
8eda14cbcSMatt Macy  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9eda14cbcSMatt Macy  * or http://www.opensolaris.org/os/licensing.
10eda14cbcSMatt Macy  * See the License for the specific language governing permissions
11eda14cbcSMatt Macy  * and limitations under the License.
12eda14cbcSMatt Macy  *
13eda14cbcSMatt Macy  * When distributing Covered Code, include this CDDL HEADER in each
14eda14cbcSMatt Macy  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15eda14cbcSMatt Macy  * If applicable, add the following below this CDDL HEADER, with the
16eda14cbcSMatt Macy  * fields enclosed by brackets "[]" replaced with your own identifying
17eda14cbcSMatt Macy  * information: Portions Copyright [yyyy] [name of copyright owner]
18eda14cbcSMatt Macy  *
19eda14cbcSMatt Macy  * CDDL HEADER END
20eda14cbcSMatt Macy  */
21eda14cbcSMatt Macy /*
22eda14cbcSMatt Macy  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23eda14cbcSMatt Macy  * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
24eda14cbcSMatt Macy  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25eda14cbcSMatt Macy  * Copyright (c) 2013, Joyent, Inc. All rights reserved.
26eda14cbcSMatt Macy  * Copyright (c) 2016, Nexenta Systems, Inc. All rights reserved.
27eda14cbcSMatt Macy  * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
28eda14cbcSMatt Macy  * Copyright (c) 2019 Datto Inc.
29eda14cbcSMatt Macy  * Copyright (c) 2019, Klara Inc.
30eda14cbcSMatt Macy  * Copyright (c) 2019, Allan Jude
31eda14cbcSMatt Macy  */
32eda14cbcSMatt Macy 
33eda14cbcSMatt Macy #include <sys/dmu.h>
34eda14cbcSMatt Macy #include <sys/dmu_impl.h>
35eda14cbcSMatt Macy #include <sys/dmu_tx.h>
36eda14cbcSMatt Macy #include <sys/dbuf.h>
37eda14cbcSMatt Macy #include <sys/dnode.h>
38eda14cbcSMatt Macy #include <sys/zfs_context.h>
39eda14cbcSMatt Macy #include <sys/dmu_objset.h>
40eda14cbcSMatt Macy #include <sys/dmu_traverse.h>
41eda14cbcSMatt Macy #include <sys/dsl_dataset.h>
42eda14cbcSMatt Macy #include <sys/dsl_dir.h>
43eda14cbcSMatt Macy #include <sys/dsl_pool.h>
44eda14cbcSMatt Macy #include <sys/dsl_synctask.h>
45eda14cbcSMatt Macy #include <sys/dsl_prop.h>
46eda14cbcSMatt Macy #include <sys/dmu_zfetch.h>
47eda14cbcSMatt Macy #include <sys/zfs_ioctl.h>
48eda14cbcSMatt Macy #include <sys/zap.h>
49eda14cbcSMatt Macy #include <sys/zio_checksum.h>
50eda14cbcSMatt Macy #include <sys/zio_compress.h>
51eda14cbcSMatt Macy #include <sys/sa.h>
52eda14cbcSMatt Macy #include <sys/zfeature.h>
53eda14cbcSMatt Macy #include <sys/abd.h>
54eda14cbcSMatt Macy #include <sys/trace_zfs.h>
55ba27dd8bSMartin Matuska #include <sys/zfs_racct.h>
56eda14cbcSMatt Macy #include <sys/zfs_rlock.h>
57eda14cbcSMatt Macy #ifdef _KERNEL
58eda14cbcSMatt Macy #include <sys/vmsystm.h>
59eda14cbcSMatt Macy #include <sys/zfs_znode.h>
60eda14cbcSMatt Macy #endif
61eda14cbcSMatt Macy 
62eda14cbcSMatt Macy /*
63eda14cbcSMatt Macy  * Enable/disable nopwrite feature.
64eda14cbcSMatt Macy  */
65eda14cbcSMatt Macy int zfs_nopwrite_enabled = 1;
66eda14cbcSMatt Macy 
67eda14cbcSMatt Macy /*
68eda14cbcSMatt Macy  * Tunable to control percentage of dirtied L1 blocks from frees allowed into
69eda14cbcSMatt Macy  * one TXG. After this threshold is crossed, additional dirty blocks from frees
70eda14cbcSMatt Macy  * will wait until the next TXG.
71eda14cbcSMatt Macy  * A value of zero will disable this throttle.
72eda14cbcSMatt Macy  */
73eda14cbcSMatt Macy unsigned long zfs_per_txg_dirty_frees_percent = 5;
74eda14cbcSMatt Macy 
75eda14cbcSMatt Macy /*
76eda14cbcSMatt Macy  * Enable/disable forcing txg sync when dirty in dmu_offset_next.
77eda14cbcSMatt Macy  */
78eda14cbcSMatt Macy int zfs_dmu_offset_next_sync = 0;
79eda14cbcSMatt Macy 
80eda14cbcSMatt Macy /*
81eda14cbcSMatt Macy  * Limit the amount we can prefetch with one call to this amount.  This
82eda14cbcSMatt Macy  * helps to limit the amount of memory that can be used by prefetching.
83eda14cbcSMatt Macy  * Larger objects should be prefetched a bit at a time.
84eda14cbcSMatt Macy  */
85eda14cbcSMatt Macy int dmu_prefetch_max = 8 * SPA_MAXBLOCKSIZE;
86eda14cbcSMatt Macy 
87eda14cbcSMatt Macy const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = {
88eda14cbcSMatt Macy 	{DMU_BSWAP_UINT8,  TRUE,  FALSE, FALSE, "unallocated"		},
89eda14cbcSMatt Macy 	{DMU_BSWAP_ZAP,    TRUE,  TRUE,  FALSE, "object directory"	},
90eda14cbcSMatt Macy 	{DMU_BSWAP_UINT64, TRUE,  TRUE,  FALSE, "object array"		},
91eda14cbcSMatt Macy 	{DMU_BSWAP_UINT8,  TRUE,  FALSE, FALSE, "packed nvlist"		},
92eda14cbcSMatt Macy 	{DMU_BSWAP_UINT64, TRUE,  FALSE, FALSE, "packed nvlist size"	},
93eda14cbcSMatt Macy 	{DMU_BSWAP_UINT64, TRUE,  FALSE, FALSE, "bpobj"			},
94eda14cbcSMatt Macy 	{DMU_BSWAP_UINT64, TRUE,  FALSE, FALSE, "bpobj header"		},
95eda14cbcSMatt Macy 	{DMU_BSWAP_UINT64, TRUE,  FALSE, FALSE, "SPA space map header"	},
96eda14cbcSMatt Macy 	{DMU_BSWAP_UINT64, TRUE,  FALSE, FALSE, "SPA space map"		},
97eda14cbcSMatt Macy 	{DMU_BSWAP_UINT64, TRUE,  FALSE, TRUE,  "ZIL intent log"	},
98eda14cbcSMatt Macy 	{DMU_BSWAP_DNODE,  TRUE,  FALSE, TRUE,  "DMU dnode"		},
99eda14cbcSMatt Macy 	{DMU_BSWAP_OBJSET, TRUE,  TRUE,  FALSE, "DMU objset"		},
100eda14cbcSMatt Macy 	{DMU_BSWAP_UINT64, TRUE,  TRUE,  FALSE, "DSL directory"		},
101eda14cbcSMatt Macy 	{DMU_BSWAP_ZAP,    TRUE,  TRUE,  FALSE, "DSL directory child map"},
102eda14cbcSMatt Macy 	{DMU_BSWAP_ZAP,    TRUE,  TRUE,  FALSE, "DSL dataset snap map"	},
103eda14cbcSMatt Macy 	{DMU_BSWAP_ZAP,    TRUE,  TRUE,  FALSE, "DSL props"		},
104eda14cbcSMatt Macy 	{DMU_BSWAP_UINT64, TRUE,  TRUE,  FALSE, "DSL dataset"		},
105eda14cbcSMatt Macy 	{DMU_BSWAP_ZNODE,  TRUE,  FALSE, FALSE, "ZFS znode"		},
106eda14cbcSMatt Macy 	{DMU_BSWAP_OLDACL, TRUE,  FALSE, TRUE,  "ZFS V0 ACL"		},
107eda14cbcSMatt Macy 	{DMU_BSWAP_UINT8,  FALSE, FALSE, TRUE,  "ZFS plain file"	},
108eda14cbcSMatt Macy 	{DMU_BSWAP_ZAP,    TRUE,  FALSE, TRUE,  "ZFS directory"		},
109eda14cbcSMatt Macy 	{DMU_BSWAP_ZAP,    TRUE,  FALSE, FALSE, "ZFS master node"	},
110eda14cbcSMatt Macy 	{DMU_BSWAP_ZAP,    TRUE,  FALSE, TRUE,  "ZFS delete queue"	},
111eda14cbcSMatt Macy 	{DMU_BSWAP_UINT8,  FALSE, FALSE, TRUE,  "zvol object"		},
112eda14cbcSMatt Macy 	{DMU_BSWAP_ZAP,    TRUE,  FALSE, FALSE, "zvol prop"		},
113eda14cbcSMatt Macy 	{DMU_BSWAP_UINT8,  FALSE, FALSE, TRUE,  "other uint8[]"		},
114eda14cbcSMatt Macy 	{DMU_BSWAP_UINT64, FALSE, FALSE, TRUE,  "other uint64[]"	},
115eda14cbcSMatt Macy 	{DMU_BSWAP_ZAP,    TRUE,  FALSE, FALSE, "other ZAP"		},
116eda14cbcSMatt Macy 	{DMU_BSWAP_ZAP,    TRUE,  FALSE, FALSE, "persistent error log"	},
117eda14cbcSMatt Macy 	{DMU_BSWAP_UINT8,  TRUE,  FALSE, FALSE, "SPA history"		},
118eda14cbcSMatt Macy 	{DMU_BSWAP_UINT64, TRUE,  FALSE, FALSE, "SPA history offsets"	},
119eda14cbcSMatt Macy 	{DMU_BSWAP_ZAP,    TRUE,  TRUE,  FALSE, "Pool properties"	},
120eda14cbcSMatt Macy 	{DMU_BSWAP_ZAP,    TRUE,  TRUE,  FALSE, "DSL permissions"	},
121eda14cbcSMatt Macy 	{DMU_BSWAP_ACL,    TRUE,  FALSE, TRUE,  "ZFS ACL"		},
122eda14cbcSMatt Macy 	{DMU_BSWAP_UINT8,  TRUE,  FALSE, TRUE,  "ZFS SYSACL"		},
123eda14cbcSMatt Macy 	{DMU_BSWAP_UINT8,  TRUE,  FALSE, TRUE,  "FUID table"		},
124eda14cbcSMatt Macy 	{DMU_BSWAP_UINT64, TRUE,  FALSE, FALSE, "FUID table size"	},
125eda14cbcSMatt Macy 	{DMU_BSWAP_ZAP,    TRUE,  TRUE,  FALSE, "DSL dataset next clones"},
126eda14cbcSMatt Macy 	{DMU_BSWAP_ZAP,    TRUE,  FALSE, FALSE, "scan work queue"	},
127eda14cbcSMatt Macy 	{DMU_BSWAP_ZAP,    TRUE,  FALSE, TRUE,  "ZFS user/group/project used" },
128eda14cbcSMatt Macy 	{DMU_BSWAP_ZAP,    TRUE,  FALSE, TRUE,  "ZFS user/group/project quota"},
129eda14cbcSMatt Macy 	{DMU_BSWAP_ZAP,    TRUE,  TRUE,  FALSE, "snapshot refcount tags"},
130eda14cbcSMatt Macy 	{DMU_BSWAP_ZAP,    TRUE,  FALSE, FALSE, "DDT ZAP algorithm"	},
131eda14cbcSMatt Macy 	{DMU_BSWAP_ZAP,    TRUE,  FALSE, FALSE, "DDT statistics"	},
132eda14cbcSMatt Macy 	{DMU_BSWAP_UINT8,  TRUE,  FALSE, TRUE,	"System attributes"	},
133eda14cbcSMatt Macy 	{DMU_BSWAP_ZAP,    TRUE,  FALSE, TRUE,	"SA master node"	},
134eda14cbcSMatt Macy 	{DMU_BSWAP_ZAP,    TRUE,  FALSE, TRUE,	"SA attr registration"	},
135eda14cbcSMatt Macy 	{DMU_BSWAP_ZAP,    TRUE,  FALSE, TRUE,	"SA attr layouts"	},
136eda14cbcSMatt Macy 	{DMU_BSWAP_ZAP,    TRUE,  FALSE, FALSE, "scan translations"	},
137eda14cbcSMatt Macy 	{DMU_BSWAP_UINT8,  FALSE, FALSE, TRUE,  "deduplicated block"	},
138eda14cbcSMatt Macy 	{DMU_BSWAP_ZAP,    TRUE,  TRUE,  FALSE, "DSL deadlist map"	},
139eda14cbcSMatt Macy 	{DMU_BSWAP_UINT64, TRUE,  TRUE,  FALSE, "DSL deadlist map hdr"	},
140eda14cbcSMatt Macy 	{DMU_BSWAP_ZAP,    TRUE,  TRUE,  FALSE, "DSL dir clones"	},
141eda14cbcSMatt Macy 	{DMU_BSWAP_UINT64, TRUE,  FALSE, FALSE, "bpobj subobj"		}
142eda14cbcSMatt Macy };
143eda14cbcSMatt Macy 
144eda14cbcSMatt Macy const dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = {
145eda14cbcSMatt Macy 	{	byteswap_uint8_array,	"uint8"		},
146eda14cbcSMatt Macy 	{	byteswap_uint16_array,	"uint16"	},
147eda14cbcSMatt Macy 	{	byteswap_uint32_array,	"uint32"	},
148eda14cbcSMatt Macy 	{	byteswap_uint64_array,	"uint64"	},
149eda14cbcSMatt Macy 	{	zap_byteswap,		"zap"		},
150eda14cbcSMatt Macy 	{	dnode_buf_byteswap,	"dnode"		},
151eda14cbcSMatt Macy 	{	dmu_objset_byteswap,	"objset"	},
152eda14cbcSMatt Macy 	{	zfs_znode_byteswap,	"znode"		},
153eda14cbcSMatt Macy 	{	zfs_oldacl_byteswap,	"oldacl"	},
154eda14cbcSMatt Macy 	{	zfs_acl_byteswap,	"acl"		}
155eda14cbcSMatt Macy };
156eda14cbcSMatt Macy 
157eda14cbcSMatt Macy static int
158eda14cbcSMatt Macy dmu_buf_hold_noread_by_dnode(dnode_t *dn, uint64_t offset,
159eda14cbcSMatt Macy     void *tag, dmu_buf_t **dbp)
160eda14cbcSMatt Macy {
161eda14cbcSMatt Macy 	uint64_t blkid;
162eda14cbcSMatt Macy 	dmu_buf_impl_t *db;
163eda14cbcSMatt Macy 
164eda14cbcSMatt Macy 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
165eda14cbcSMatt Macy 	blkid = dbuf_whichblock(dn, 0, offset);
166eda14cbcSMatt Macy 	db = dbuf_hold(dn, blkid, tag);
167eda14cbcSMatt Macy 	rw_exit(&dn->dn_struct_rwlock);
168eda14cbcSMatt Macy 
169eda14cbcSMatt Macy 	if (db == NULL) {
170eda14cbcSMatt Macy 		*dbp = NULL;
171eda14cbcSMatt Macy 		return (SET_ERROR(EIO));
172eda14cbcSMatt Macy 	}
173eda14cbcSMatt Macy 
174eda14cbcSMatt Macy 	*dbp = &db->db;
175eda14cbcSMatt Macy 	return (0);
176eda14cbcSMatt Macy }
177eda14cbcSMatt Macy int
178eda14cbcSMatt Macy dmu_buf_hold_noread(objset_t *os, uint64_t object, uint64_t offset,
179eda14cbcSMatt Macy     void *tag, dmu_buf_t **dbp)
180eda14cbcSMatt Macy {
181eda14cbcSMatt Macy 	dnode_t *dn;
182eda14cbcSMatt Macy 	uint64_t blkid;
183eda14cbcSMatt Macy 	dmu_buf_impl_t *db;
184eda14cbcSMatt Macy 	int err;
185eda14cbcSMatt Macy 
186eda14cbcSMatt Macy 	err = dnode_hold(os, object, FTAG, &dn);
187eda14cbcSMatt Macy 	if (err)
188eda14cbcSMatt Macy 		return (err);
189eda14cbcSMatt Macy 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
190eda14cbcSMatt Macy 	blkid = dbuf_whichblock(dn, 0, offset);
191eda14cbcSMatt Macy 	db = dbuf_hold(dn, blkid, tag);
192eda14cbcSMatt Macy 	rw_exit(&dn->dn_struct_rwlock);
193eda14cbcSMatt Macy 	dnode_rele(dn, FTAG);
194eda14cbcSMatt Macy 
195eda14cbcSMatt Macy 	if (db == NULL) {
196eda14cbcSMatt Macy 		*dbp = NULL;
197eda14cbcSMatt Macy 		return (SET_ERROR(EIO));
198eda14cbcSMatt Macy 	}
199eda14cbcSMatt Macy 
200eda14cbcSMatt Macy 	*dbp = &db->db;
201eda14cbcSMatt Macy 	return (err);
202eda14cbcSMatt Macy }
203eda14cbcSMatt Macy 
204eda14cbcSMatt Macy int
205eda14cbcSMatt Macy dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset,
206eda14cbcSMatt Macy     void *tag, dmu_buf_t **dbp, int flags)
207eda14cbcSMatt Macy {
208eda14cbcSMatt Macy 	int err;
209eda14cbcSMatt Macy 	int db_flags = DB_RF_CANFAIL;
210eda14cbcSMatt Macy 
211eda14cbcSMatt Macy 	if (flags & DMU_READ_NO_PREFETCH)
212eda14cbcSMatt Macy 		db_flags |= DB_RF_NOPREFETCH;
213eda14cbcSMatt Macy 	if (flags & DMU_READ_NO_DECRYPT)
214eda14cbcSMatt Macy 		db_flags |= DB_RF_NO_DECRYPT;
215eda14cbcSMatt Macy 
216eda14cbcSMatt Macy 	err = dmu_buf_hold_noread_by_dnode(dn, offset, tag, dbp);
217eda14cbcSMatt Macy 	if (err == 0) {
218eda14cbcSMatt Macy 		dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp);
219eda14cbcSMatt Macy 		err = dbuf_read(db, NULL, db_flags);
220eda14cbcSMatt Macy 		if (err != 0) {
221eda14cbcSMatt Macy 			dbuf_rele(db, tag);
222eda14cbcSMatt Macy 			*dbp = NULL;
223eda14cbcSMatt Macy 		}
224eda14cbcSMatt Macy 	}
225eda14cbcSMatt Macy 
226eda14cbcSMatt Macy 	return (err);
227eda14cbcSMatt Macy }
228eda14cbcSMatt Macy 
229eda14cbcSMatt Macy int
230eda14cbcSMatt Macy dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
231eda14cbcSMatt Macy     void *tag, dmu_buf_t **dbp, int flags)
232eda14cbcSMatt Macy {
233eda14cbcSMatt Macy 	int err;
234eda14cbcSMatt Macy 	int db_flags = DB_RF_CANFAIL;
235eda14cbcSMatt Macy 
236eda14cbcSMatt Macy 	if (flags & DMU_READ_NO_PREFETCH)
237eda14cbcSMatt Macy 		db_flags |= DB_RF_NOPREFETCH;
238eda14cbcSMatt Macy 	if (flags & DMU_READ_NO_DECRYPT)
239eda14cbcSMatt Macy 		db_flags |= DB_RF_NO_DECRYPT;
240eda14cbcSMatt Macy 
241eda14cbcSMatt Macy 	err = dmu_buf_hold_noread(os, object, offset, tag, dbp);
242eda14cbcSMatt Macy 	if (err == 0) {
243eda14cbcSMatt Macy 		dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp);
244eda14cbcSMatt Macy 		err = dbuf_read(db, NULL, db_flags);
245eda14cbcSMatt Macy 		if (err != 0) {
246eda14cbcSMatt Macy 			dbuf_rele(db, tag);
247eda14cbcSMatt Macy 			*dbp = NULL;
248eda14cbcSMatt Macy 		}
249eda14cbcSMatt Macy 	}
250eda14cbcSMatt Macy 
251eda14cbcSMatt Macy 	return (err);
252eda14cbcSMatt Macy }
253eda14cbcSMatt Macy 
254eda14cbcSMatt Macy int
255eda14cbcSMatt Macy dmu_bonus_max(void)
256eda14cbcSMatt Macy {
257eda14cbcSMatt Macy 	return (DN_OLD_MAX_BONUSLEN);
258eda14cbcSMatt Macy }
259eda14cbcSMatt Macy 
260eda14cbcSMatt Macy int
261eda14cbcSMatt Macy dmu_set_bonus(dmu_buf_t *db_fake, int newsize, dmu_tx_t *tx)
262eda14cbcSMatt Macy {
263eda14cbcSMatt Macy 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
264eda14cbcSMatt Macy 	dnode_t *dn;
265eda14cbcSMatt Macy 	int error;
266eda14cbcSMatt Macy 
267eda14cbcSMatt Macy 	DB_DNODE_ENTER(db);
268eda14cbcSMatt Macy 	dn = DB_DNODE(db);
269eda14cbcSMatt Macy 
270eda14cbcSMatt Macy 	if (dn->dn_bonus != db) {
271eda14cbcSMatt Macy 		error = SET_ERROR(EINVAL);
272eda14cbcSMatt Macy 	} else if (newsize < 0 || newsize > db_fake->db_size) {
273eda14cbcSMatt Macy 		error = SET_ERROR(EINVAL);
274eda14cbcSMatt Macy 	} else {
275eda14cbcSMatt Macy 		dnode_setbonuslen(dn, newsize, tx);
276eda14cbcSMatt Macy 		error = 0;
277eda14cbcSMatt Macy 	}
278eda14cbcSMatt Macy 
279eda14cbcSMatt Macy 	DB_DNODE_EXIT(db);
280eda14cbcSMatt Macy 	return (error);
281eda14cbcSMatt Macy }
282eda14cbcSMatt Macy 
283eda14cbcSMatt Macy int
284eda14cbcSMatt Macy dmu_set_bonustype(dmu_buf_t *db_fake, dmu_object_type_t type, dmu_tx_t *tx)
285eda14cbcSMatt Macy {
286eda14cbcSMatt Macy 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
287eda14cbcSMatt Macy 	dnode_t *dn;
288eda14cbcSMatt Macy 	int error;
289eda14cbcSMatt Macy 
290eda14cbcSMatt Macy 	DB_DNODE_ENTER(db);
291eda14cbcSMatt Macy 	dn = DB_DNODE(db);
292eda14cbcSMatt Macy 
293eda14cbcSMatt Macy 	if (!DMU_OT_IS_VALID(type)) {
294eda14cbcSMatt Macy 		error = SET_ERROR(EINVAL);
295eda14cbcSMatt Macy 	} else if (dn->dn_bonus != db) {
296eda14cbcSMatt Macy 		error = SET_ERROR(EINVAL);
297eda14cbcSMatt Macy 	} else {
298eda14cbcSMatt Macy 		dnode_setbonus_type(dn, type, tx);
299eda14cbcSMatt Macy 		error = 0;
300eda14cbcSMatt Macy 	}
301eda14cbcSMatt Macy 
302eda14cbcSMatt Macy 	DB_DNODE_EXIT(db);
303eda14cbcSMatt Macy 	return (error);
304eda14cbcSMatt Macy }
305eda14cbcSMatt Macy 
306eda14cbcSMatt Macy dmu_object_type_t
307eda14cbcSMatt Macy dmu_get_bonustype(dmu_buf_t *db_fake)
308eda14cbcSMatt Macy {
309eda14cbcSMatt Macy 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
310eda14cbcSMatt Macy 	dnode_t *dn;
311eda14cbcSMatt Macy 	dmu_object_type_t type;
312eda14cbcSMatt Macy 
313eda14cbcSMatt Macy 	DB_DNODE_ENTER(db);
314eda14cbcSMatt Macy 	dn = DB_DNODE(db);
315eda14cbcSMatt Macy 	type = dn->dn_bonustype;
316eda14cbcSMatt Macy 	DB_DNODE_EXIT(db);
317eda14cbcSMatt Macy 
318eda14cbcSMatt Macy 	return (type);
319eda14cbcSMatt Macy }
320eda14cbcSMatt Macy 
321eda14cbcSMatt Macy int
322eda14cbcSMatt Macy dmu_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx)
323eda14cbcSMatt Macy {
324eda14cbcSMatt Macy 	dnode_t *dn;
325eda14cbcSMatt Macy 	int error;
326eda14cbcSMatt Macy 
327eda14cbcSMatt Macy 	error = dnode_hold(os, object, FTAG, &dn);
328eda14cbcSMatt Macy 	dbuf_rm_spill(dn, tx);
329eda14cbcSMatt Macy 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
330eda14cbcSMatt Macy 	dnode_rm_spill(dn, tx);
331eda14cbcSMatt Macy 	rw_exit(&dn->dn_struct_rwlock);
332eda14cbcSMatt Macy 	dnode_rele(dn, FTAG);
333eda14cbcSMatt Macy 	return (error);
334eda14cbcSMatt Macy }
335eda14cbcSMatt Macy 
336eda14cbcSMatt Macy /*
337eda14cbcSMatt Macy  * Lookup and hold the bonus buffer for the provided dnode.  If the dnode
338eda14cbcSMatt Macy  * has not yet been allocated a new bonus dbuf a will be allocated.
339eda14cbcSMatt Macy  * Returns ENOENT, EIO, or 0.
340eda14cbcSMatt Macy  */
341eda14cbcSMatt Macy int dmu_bonus_hold_by_dnode(dnode_t *dn, void *tag, dmu_buf_t **dbp,
342eda14cbcSMatt Macy     uint32_t flags)
343eda14cbcSMatt Macy {
344eda14cbcSMatt Macy 	dmu_buf_impl_t *db;
345eda14cbcSMatt Macy 	int error;
346eda14cbcSMatt Macy 	uint32_t db_flags = DB_RF_MUST_SUCCEED;
347eda14cbcSMatt Macy 
348eda14cbcSMatt Macy 	if (flags & DMU_READ_NO_PREFETCH)
349eda14cbcSMatt Macy 		db_flags |= DB_RF_NOPREFETCH;
350eda14cbcSMatt Macy 	if (flags & DMU_READ_NO_DECRYPT)
351eda14cbcSMatt Macy 		db_flags |= DB_RF_NO_DECRYPT;
352eda14cbcSMatt Macy 
353eda14cbcSMatt Macy 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
354eda14cbcSMatt Macy 	if (dn->dn_bonus == NULL) {
355eda14cbcSMatt Macy 		rw_exit(&dn->dn_struct_rwlock);
356eda14cbcSMatt Macy 		rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
357eda14cbcSMatt Macy 		if (dn->dn_bonus == NULL)
358eda14cbcSMatt Macy 			dbuf_create_bonus(dn);
359eda14cbcSMatt Macy 	}
360eda14cbcSMatt Macy 	db = dn->dn_bonus;
361eda14cbcSMatt Macy 
362eda14cbcSMatt Macy 	/* as long as the bonus buf is held, the dnode will be held */
363eda14cbcSMatt Macy 	if (zfs_refcount_add(&db->db_holds, tag) == 1) {
364eda14cbcSMatt Macy 		VERIFY(dnode_add_ref(dn, db));
365eda14cbcSMatt Macy 		atomic_inc_32(&dn->dn_dbufs_count);
366eda14cbcSMatt Macy 	}
367eda14cbcSMatt Macy 
368eda14cbcSMatt Macy 	/*
369eda14cbcSMatt Macy 	 * Wait to drop dn_struct_rwlock until after adding the bonus dbuf's
370eda14cbcSMatt Macy 	 * hold and incrementing the dbuf count to ensure that dnode_move() sees
371eda14cbcSMatt Macy 	 * a dnode hold for every dbuf.
372eda14cbcSMatt Macy 	 */
373eda14cbcSMatt Macy 	rw_exit(&dn->dn_struct_rwlock);
374eda14cbcSMatt Macy 
375eda14cbcSMatt Macy 	error = dbuf_read(db, NULL, db_flags);
376eda14cbcSMatt Macy 	if (error) {
377eda14cbcSMatt Macy 		dnode_evict_bonus(dn);
378eda14cbcSMatt Macy 		dbuf_rele(db, tag);
379eda14cbcSMatt Macy 		*dbp = NULL;
380eda14cbcSMatt Macy 		return (error);
381eda14cbcSMatt Macy 	}
382eda14cbcSMatt Macy 
383eda14cbcSMatt Macy 	*dbp = &db->db;
384eda14cbcSMatt Macy 	return (0);
385eda14cbcSMatt Macy }
386eda14cbcSMatt Macy 
387eda14cbcSMatt Macy int
388eda14cbcSMatt Macy dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp)
389eda14cbcSMatt Macy {
390eda14cbcSMatt Macy 	dnode_t *dn;
391eda14cbcSMatt Macy 	int error;
392eda14cbcSMatt Macy 
393eda14cbcSMatt Macy 	error = dnode_hold(os, object, FTAG, &dn);
394eda14cbcSMatt Macy 	if (error)
395eda14cbcSMatt Macy 		return (error);
396eda14cbcSMatt Macy 
397eda14cbcSMatt Macy 	error = dmu_bonus_hold_by_dnode(dn, tag, dbp, DMU_READ_NO_PREFETCH);
398eda14cbcSMatt Macy 	dnode_rele(dn, FTAG);
399eda14cbcSMatt Macy 
400eda14cbcSMatt Macy 	return (error);
401eda14cbcSMatt Macy }
402eda14cbcSMatt Macy 
403eda14cbcSMatt Macy /*
404eda14cbcSMatt Macy  * returns ENOENT, EIO, or 0.
405eda14cbcSMatt Macy  *
406eda14cbcSMatt Macy  * This interface will allocate a blank spill dbuf when a spill blk
407eda14cbcSMatt Macy  * doesn't already exist on the dnode.
408eda14cbcSMatt Macy  *
409eda14cbcSMatt Macy  * if you only want to find an already existing spill db, then
410eda14cbcSMatt Macy  * dmu_spill_hold_existing() should be used.
411eda14cbcSMatt Macy  */
412eda14cbcSMatt Macy int
413eda14cbcSMatt Macy dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, void *tag, dmu_buf_t **dbp)
414eda14cbcSMatt Macy {
415eda14cbcSMatt Macy 	dmu_buf_impl_t *db = NULL;
416eda14cbcSMatt Macy 	int err;
417eda14cbcSMatt Macy 
418eda14cbcSMatt Macy 	if ((flags & DB_RF_HAVESTRUCT) == 0)
419eda14cbcSMatt Macy 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
420eda14cbcSMatt Macy 
421eda14cbcSMatt Macy 	db = dbuf_hold(dn, DMU_SPILL_BLKID, tag);
422eda14cbcSMatt Macy 
423eda14cbcSMatt Macy 	if ((flags & DB_RF_HAVESTRUCT) == 0)
424eda14cbcSMatt Macy 		rw_exit(&dn->dn_struct_rwlock);
425eda14cbcSMatt Macy 
426eda14cbcSMatt Macy 	if (db == NULL) {
427eda14cbcSMatt Macy 		*dbp = NULL;
428eda14cbcSMatt Macy 		return (SET_ERROR(EIO));
429eda14cbcSMatt Macy 	}
430eda14cbcSMatt Macy 	err = dbuf_read(db, NULL, flags);
431eda14cbcSMatt Macy 	if (err == 0)
432eda14cbcSMatt Macy 		*dbp = &db->db;
433eda14cbcSMatt Macy 	else {
434eda14cbcSMatt Macy 		dbuf_rele(db, tag);
435eda14cbcSMatt Macy 		*dbp = NULL;
436eda14cbcSMatt Macy 	}
437eda14cbcSMatt Macy 	return (err);
438eda14cbcSMatt Macy }
439eda14cbcSMatt Macy 
440eda14cbcSMatt Macy int
441eda14cbcSMatt Macy dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
442eda14cbcSMatt Macy {
443eda14cbcSMatt Macy 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
444eda14cbcSMatt Macy 	dnode_t *dn;
445eda14cbcSMatt Macy 	int err;
446eda14cbcSMatt Macy 
447eda14cbcSMatt Macy 	DB_DNODE_ENTER(db);
448eda14cbcSMatt Macy 	dn = DB_DNODE(db);
449eda14cbcSMatt Macy 
450eda14cbcSMatt Macy 	if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) {
451eda14cbcSMatt Macy 		err = SET_ERROR(EINVAL);
452eda14cbcSMatt Macy 	} else {
453eda14cbcSMatt Macy 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
454eda14cbcSMatt Macy 
455eda14cbcSMatt Macy 		if (!dn->dn_have_spill) {
456eda14cbcSMatt Macy 			err = SET_ERROR(ENOENT);
457eda14cbcSMatt Macy 		} else {
458eda14cbcSMatt Macy 			err = dmu_spill_hold_by_dnode(dn,
459eda14cbcSMatt Macy 			    DB_RF_HAVESTRUCT | DB_RF_CANFAIL, tag, dbp);
460eda14cbcSMatt Macy 		}
461eda14cbcSMatt Macy 
462eda14cbcSMatt Macy 		rw_exit(&dn->dn_struct_rwlock);
463eda14cbcSMatt Macy 	}
464eda14cbcSMatt Macy 
465eda14cbcSMatt Macy 	DB_DNODE_EXIT(db);
466eda14cbcSMatt Macy 	return (err);
467eda14cbcSMatt Macy }
468eda14cbcSMatt Macy 
469eda14cbcSMatt Macy int
470eda14cbcSMatt Macy dmu_spill_hold_by_bonus(dmu_buf_t *bonus, uint32_t flags, void *tag,
471eda14cbcSMatt Macy     dmu_buf_t **dbp)
472eda14cbcSMatt Macy {
473eda14cbcSMatt Macy 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
474eda14cbcSMatt Macy 	dnode_t *dn;
475eda14cbcSMatt Macy 	int err;
476eda14cbcSMatt Macy 	uint32_t db_flags = DB_RF_CANFAIL;
477eda14cbcSMatt Macy 
478eda14cbcSMatt Macy 	if (flags & DMU_READ_NO_DECRYPT)
479eda14cbcSMatt Macy 		db_flags |= DB_RF_NO_DECRYPT;
480eda14cbcSMatt Macy 
481eda14cbcSMatt Macy 	DB_DNODE_ENTER(db);
482eda14cbcSMatt Macy 	dn = DB_DNODE(db);
483eda14cbcSMatt Macy 	err = dmu_spill_hold_by_dnode(dn, db_flags, tag, dbp);
484eda14cbcSMatt Macy 	DB_DNODE_EXIT(db);
485eda14cbcSMatt Macy 
486eda14cbcSMatt Macy 	return (err);
487eda14cbcSMatt Macy }
488eda14cbcSMatt Macy 
489eda14cbcSMatt Macy /*
490eda14cbcSMatt Macy  * Note: longer-term, we should modify all of the dmu_buf_*() interfaces
491eda14cbcSMatt Macy  * to take a held dnode rather than <os, object> -- the lookup is wasteful,
492eda14cbcSMatt Macy  * and can induce severe lock contention when writing to several files
493eda14cbcSMatt Macy  * whose dnodes are in the same block.
494eda14cbcSMatt Macy  */
495eda14cbcSMatt Macy int
496eda14cbcSMatt Macy dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
497eda14cbcSMatt Macy     boolean_t read, void *tag, int *numbufsp, dmu_buf_t ***dbpp, uint32_t flags)
498eda14cbcSMatt Macy {
499eda14cbcSMatt Macy 	dmu_buf_t **dbp;
500*f9693befSMartin Matuska 	zstream_t *zs = NULL;
501eda14cbcSMatt Macy 	uint64_t blkid, nblks, i;
502eda14cbcSMatt Macy 	uint32_t dbuf_flags;
503eda14cbcSMatt Macy 	int err;
5047877fdebSMatt Macy 	zio_t *zio = NULL;
505*f9693befSMartin Matuska 	boolean_t missed = B_FALSE;
506eda14cbcSMatt Macy 
507eda14cbcSMatt Macy 	ASSERT(length <= DMU_MAX_ACCESS);
508eda14cbcSMatt Macy 
509eda14cbcSMatt Macy 	/*
510eda14cbcSMatt Macy 	 * Note: We directly notify the prefetch code of this read, so that
511eda14cbcSMatt Macy 	 * we can tell it about the multi-block read.  dbuf_read() only knows
512eda14cbcSMatt Macy 	 * about the one block it is accessing.
513eda14cbcSMatt Macy 	 */
514eda14cbcSMatt Macy 	dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT |
515eda14cbcSMatt Macy 	    DB_RF_NOPREFETCH;
516eda14cbcSMatt Macy 
517eda14cbcSMatt Macy 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
518eda14cbcSMatt Macy 	if (dn->dn_datablkshift) {
519eda14cbcSMatt Macy 		int blkshift = dn->dn_datablkshift;
520eda14cbcSMatt Macy 		nblks = (P2ROUNDUP(offset + length, 1ULL << blkshift) -
521eda14cbcSMatt Macy 		    P2ALIGN(offset, 1ULL << blkshift)) >> blkshift;
522eda14cbcSMatt Macy 	} else {
523eda14cbcSMatt Macy 		if (offset + length > dn->dn_datablksz) {
524eda14cbcSMatt Macy 			zfs_panic_recover("zfs: accessing past end of object "
525eda14cbcSMatt Macy 			    "%llx/%llx (size=%u access=%llu+%llu)",
526eda14cbcSMatt Macy 			    (longlong_t)dn->dn_objset->
527eda14cbcSMatt Macy 			    os_dsl_dataset->ds_object,
528eda14cbcSMatt Macy 			    (longlong_t)dn->dn_object, dn->dn_datablksz,
529eda14cbcSMatt Macy 			    (longlong_t)offset, (longlong_t)length);
530eda14cbcSMatt Macy 			rw_exit(&dn->dn_struct_rwlock);
531eda14cbcSMatt Macy 			return (SET_ERROR(EIO));
532eda14cbcSMatt Macy 		}
533eda14cbcSMatt Macy 		nblks = 1;
534eda14cbcSMatt Macy 	}
535eda14cbcSMatt Macy 	dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP);
536eda14cbcSMatt Macy 
5377877fdebSMatt Macy 	if (read)
5387877fdebSMatt Macy 		zio = zio_root(dn->dn_objset->os_spa, NULL, NULL,
5397877fdebSMatt Macy 		    ZIO_FLAG_CANFAIL);
540eda14cbcSMatt Macy 	blkid = dbuf_whichblock(dn, 0, offset);
541*f9693befSMartin Matuska 	if ((flags & DMU_READ_NO_PREFETCH) == 0 &&
542*f9693befSMartin Matuska 	    DNODE_META_IS_CACHEABLE(dn) && length <= zfetch_array_rd_sz) {
543*f9693befSMartin Matuska 		/*
544*f9693befSMartin Matuska 		 * Prepare the zfetch before initiating the demand reads, so
545*f9693befSMartin Matuska 		 * that if multiple threads block on same indirect block, we
546*f9693befSMartin Matuska 		 * base predictions on the original less racy request order.
547*f9693befSMartin Matuska 		 */
548*f9693befSMartin Matuska 		zs = dmu_zfetch_prepare(&dn->dn_zfetch, blkid, nblks,
549*f9693befSMartin Matuska 		    read && DNODE_IS_CACHEABLE(dn), B_TRUE);
550*f9693befSMartin Matuska 	}
551eda14cbcSMatt Macy 	for (i = 0; i < nblks; i++) {
552eda14cbcSMatt Macy 		dmu_buf_impl_t *db = dbuf_hold(dn, blkid + i, tag);
553eda14cbcSMatt Macy 		if (db == NULL) {
554*f9693befSMartin Matuska 			if (zs)
555*f9693befSMartin Matuska 				dmu_zfetch_run(zs, missed, B_TRUE);
556eda14cbcSMatt Macy 			rw_exit(&dn->dn_struct_rwlock);
557eda14cbcSMatt Macy 			dmu_buf_rele_array(dbp, nblks, tag);
5587877fdebSMatt Macy 			if (read)
559eda14cbcSMatt Macy 				zio_nowait(zio);
560eda14cbcSMatt Macy 			return (SET_ERROR(EIO));
561eda14cbcSMatt Macy 		}
562eda14cbcSMatt Macy 
563*f9693befSMartin Matuska 		/*
564*f9693befSMartin Matuska 		 * Initiate async demand data read.
565*f9693befSMartin Matuska 		 * We check the db_state after calling dbuf_read() because
566*f9693befSMartin Matuska 		 * (1) dbuf_read() may change the state to CACHED due to a
567*f9693befSMartin Matuska 		 * hit in the ARC, and (2) on a cache miss, a child will
568*f9693befSMartin Matuska 		 * have been added to "zio" but not yet completed, so the
569*f9693befSMartin Matuska 		 * state will not yet be CACHED.
570*f9693befSMartin Matuska 		 */
571*f9693befSMartin Matuska 		if (read) {
572eda14cbcSMatt Macy 			(void) dbuf_read(db, zio, dbuf_flags);
573*f9693befSMartin Matuska 			if (db->db_state != DB_CACHED)
574*f9693befSMartin Matuska 				missed = B_TRUE;
575*f9693befSMartin Matuska 		}
576eda14cbcSMatt Macy 		dbp[i] = &db->db;
577eda14cbcSMatt Macy 	}
578eda14cbcSMatt Macy 
579ba27dd8bSMartin Matuska 	if (!read)
580ba27dd8bSMartin Matuska 		zfs_racct_write(length, nblks);
581ba27dd8bSMartin Matuska 
582*f9693befSMartin Matuska 	if (zs)
583*f9693befSMartin Matuska 		dmu_zfetch_run(zs, missed, B_TRUE);
584eda14cbcSMatt Macy 	rw_exit(&dn->dn_struct_rwlock);
585eda14cbcSMatt Macy 
5867877fdebSMatt Macy 	if (read) {
5877877fdebSMatt Macy 		/* wait for async read i/o */
588eda14cbcSMatt Macy 		err = zio_wait(zio);
589eda14cbcSMatt Macy 		if (err) {
590eda14cbcSMatt Macy 			dmu_buf_rele_array(dbp, nblks, tag);
591eda14cbcSMatt Macy 			return (err);
592eda14cbcSMatt Macy 		}
593eda14cbcSMatt Macy 
594eda14cbcSMatt Macy 		/* wait for other io to complete */
595eda14cbcSMatt Macy 		for (i = 0; i < nblks; i++) {
596eda14cbcSMatt Macy 			dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i];
597eda14cbcSMatt Macy 			mutex_enter(&db->db_mtx);
598eda14cbcSMatt Macy 			while (db->db_state == DB_READ ||
599eda14cbcSMatt Macy 			    db->db_state == DB_FILL)
600eda14cbcSMatt Macy 				cv_wait(&db->db_changed, &db->db_mtx);
601eda14cbcSMatt Macy 			if (db->db_state == DB_UNCACHED)
602eda14cbcSMatt Macy 				err = SET_ERROR(EIO);
603eda14cbcSMatt Macy 			mutex_exit(&db->db_mtx);
604eda14cbcSMatt Macy 			if (err) {
605eda14cbcSMatt Macy 				dmu_buf_rele_array(dbp, nblks, tag);
606eda14cbcSMatt Macy 				return (err);
607eda14cbcSMatt Macy 			}
608eda14cbcSMatt Macy 		}
609eda14cbcSMatt Macy 	}
610eda14cbcSMatt Macy 
611eda14cbcSMatt Macy 	*numbufsp = nblks;
612eda14cbcSMatt Macy 	*dbpp = dbp;
613eda14cbcSMatt Macy 	return (0);
614eda14cbcSMatt Macy }
615eda14cbcSMatt Macy 
616eda14cbcSMatt Macy static int
617eda14cbcSMatt Macy dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
618eda14cbcSMatt Macy     uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp)
619eda14cbcSMatt Macy {
620eda14cbcSMatt Macy 	dnode_t *dn;
621eda14cbcSMatt Macy 	int err;
622eda14cbcSMatt Macy 
623eda14cbcSMatt Macy 	err = dnode_hold(os, object, FTAG, &dn);
624eda14cbcSMatt Macy 	if (err)
625eda14cbcSMatt Macy 		return (err);
626eda14cbcSMatt Macy 
627eda14cbcSMatt Macy 	err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
628eda14cbcSMatt Macy 	    numbufsp, dbpp, DMU_READ_PREFETCH);
629eda14cbcSMatt Macy 
630eda14cbcSMatt Macy 	dnode_rele(dn, FTAG);
631eda14cbcSMatt Macy 
632eda14cbcSMatt Macy 	return (err);
633eda14cbcSMatt Macy }
634eda14cbcSMatt Macy 
635eda14cbcSMatt Macy int
636eda14cbcSMatt Macy dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset,
637eda14cbcSMatt Macy     uint64_t length, boolean_t read, void *tag, int *numbufsp,
638eda14cbcSMatt Macy     dmu_buf_t ***dbpp)
639eda14cbcSMatt Macy {
640eda14cbcSMatt Macy 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
641eda14cbcSMatt Macy 	dnode_t *dn;
642eda14cbcSMatt Macy 	int err;
643eda14cbcSMatt Macy 
644eda14cbcSMatt Macy 	DB_DNODE_ENTER(db);
645eda14cbcSMatt Macy 	dn = DB_DNODE(db);
646eda14cbcSMatt Macy 	err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
647eda14cbcSMatt Macy 	    numbufsp, dbpp, DMU_READ_PREFETCH);
648eda14cbcSMatt Macy 	DB_DNODE_EXIT(db);
649eda14cbcSMatt Macy 
650eda14cbcSMatt Macy 	return (err);
651eda14cbcSMatt Macy }
652eda14cbcSMatt Macy 
653eda14cbcSMatt Macy void
654eda14cbcSMatt Macy dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, void *tag)
655eda14cbcSMatt Macy {
656eda14cbcSMatt Macy 	int i;
657eda14cbcSMatt Macy 	dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake;
658eda14cbcSMatt Macy 
659eda14cbcSMatt Macy 	if (numbufs == 0)
660eda14cbcSMatt Macy 		return;
661eda14cbcSMatt Macy 
662eda14cbcSMatt Macy 	for (i = 0; i < numbufs; i++) {
663eda14cbcSMatt Macy 		if (dbp[i])
664eda14cbcSMatt Macy 			dbuf_rele(dbp[i], tag);
665eda14cbcSMatt Macy 	}
666eda14cbcSMatt Macy 
667eda14cbcSMatt Macy 	kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs);
668eda14cbcSMatt Macy }
669eda14cbcSMatt Macy 
670eda14cbcSMatt Macy /*
671eda14cbcSMatt Macy  * Issue prefetch i/os for the given blocks.  If level is greater than 0, the
672eda14cbcSMatt Macy  * indirect blocks prefetched will be those that point to the blocks containing
673eda14cbcSMatt Macy  * the data starting at offset, and continuing to offset + len.
674eda14cbcSMatt Macy  *
675eda14cbcSMatt Macy  * Note that if the indirect blocks above the blocks being prefetched are not
676eda14cbcSMatt Macy  * in cache, they will be asynchronously read in.
677eda14cbcSMatt Macy  */
678eda14cbcSMatt Macy void
679eda14cbcSMatt Macy dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset,
680eda14cbcSMatt Macy     uint64_t len, zio_priority_t pri)
681eda14cbcSMatt Macy {
682eda14cbcSMatt Macy 	dnode_t *dn;
683eda14cbcSMatt Macy 	uint64_t blkid;
684eda14cbcSMatt Macy 	int nblks, err;
685eda14cbcSMatt Macy 
686eda14cbcSMatt Macy 	if (len == 0) {  /* they're interested in the bonus buffer */
687eda14cbcSMatt Macy 		dn = DMU_META_DNODE(os);
688eda14cbcSMatt Macy 
689eda14cbcSMatt Macy 		if (object == 0 || object >= DN_MAX_OBJECT)
690eda14cbcSMatt Macy 			return;
691eda14cbcSMatt Macy 
692eda14cbcSMatt Macy 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
693eda14cbcSMatt Macy 		blkid = dbuf_whichblock(dn, level,
694eda14cbcSMatt Macy 		    object * sizeof (dnode_phys_t));
695eda14cbcSMatt Macy 		dbuf_prefetch(dn, level, blkid, pri, 0);
696eda14cbcSMatt Macy 		rw_exit(&dn->dn_struct_rwlock);
697eda14cbcSMatt Macy 		return;
698eda14cbcSMatt Macy 	}
699eda14cbcSMatt Macy 
700eda14cbcSMatt Macy 	/*
701eda14cbcSMatt Macy 	 * See comment before the definition of dmu_prefetch_max.
702eda14cbcSMatt Macy 	 */
703eda14cbcSMatt Macy 	len = MIN(len, dmu_prefetch_max);
704eda14cbcSMatt Macy 
705eda14cbcSMatt Macy 	/*
706eda14cbcSMatt Macy 	 * XXX - Note, if the dnode for the requested object is not
707eda14cbcSMatt Macy 	 * already cached, we will do a *synchronous* read in the
708eda14cbcSMatt Macy 	 * dnode_hold() call.  The same is true for any indirects.
709eda14cbcSMatt Macy 	 */
710eda14cbcSMatt Macy 	err = dnode_hold(os, object, FTAG, &dn);
711eda14cbcSMatt Macy 	if (err != 0)
712eda14cbcSMatt Macy 		return;
713eda14cbcSMatt Macy 
714eda14cbcSMatt Macy 	/*
715eda14cbcSMatt Macy 	 * offset + len - 1 is the last byte we want to prefetch for, and offset
716eda14cbcSMatt Macy 	 * is the first.  Then dbuf_whichblk(dn, level, off + len - 1) is the
717eda14cbcSMatt Macy 	 * last block we want to prefetch, and dbuf_whichblock(dn, level,
718eda14cbcSMatt Macy 	 * offset)  is the first.  Then the number we need to prefetch is the
719eda14cbcSMatt Macy 	 * last - first + 1.
720eda14cbcSMatt Macy 	 */
721eda14cbcSMatt Macy 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
722eda14cbcSMatt Macy 	if (level > 0 || dn->dn_datablkshift != 0) {
723eda14cbcSMatt Macy 		nblks = dbuf_whichblock(dn, level, offset + len - 1) -
724eda14cbcSMatt Macy 		    dbuf_whichblock(dn, level, offset) + 1;
725eda14cbcSMatt Macy 	} else {
726eda14cbcSMatt Macy 		nblks = (offset < dn->dn_datablksz);
727eda14cbcSMatt Macy 	}
728eda14cbcSMatt Macy 
729eda14cbcSMatt Macy 	if (nblks != 0) {
730eda14cbcSMatt Macy 		blkid = dbuf_whichblock(dn, level, offset);
731eda14cbcSMatt Macy 		for (int i = 0; i < nblks; i++)
732eda14cbcSMatt Macy 			dbuf_prefetch(dn, level, blkid + i, pri, 0);
733eda14cbcSMatt Macy 	}
734eda14cbcSMatt Macy 	rw_exit(&dn->dn_struct_rwlock);
735eda14cbcSMatt Macy 
736eda14cbcSMatt Macy 	dnode_rele(dn, FTAG);
737eda14cbcSMatt Macy }
738eda14cbcSMatt Macy 
739eda14cbcSMatt Macy /*
740eda14cbcSMatt Macy  * Get the next "chunk" of file data to free.  We traverse the file from
741eda14cbcSMatt Macy  * the end so that the file gets shorter over time (if we crashes in the
742eda14cbcSMatt Macy  * middle, this will leave us in a better state).  We find allocated file
743eda14cbcSMatt Macy  * data by simply searching the allocated level 1 indirects.
744eda14cbcSMatt Macy  *
745eda14cbcSMatt Macy  * On input, *start should be the first offset that does not need to be
746eda14cbcSMatt Macy  * freed (e.g. "offset + length").  On return, *start will be the first
747eda14cbcSMatt Macy  * offset that should be freed and l1blks is set to the number of level 1
748eda14cbcSMatt Macy  * indirect blocks found within the chunk.
749eda14cbcSMatt Macy  */
750eda14cbcSMatt Macy static int
751eda14cbcSMatt Macy get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum, uint64_t *l1blks)
752eda14cbcSMatt Macy {
753eda14cbcSMatt Macy 	uint64_t blks;
754eda14cbcSMatt Macy 	uint64_t maxblks = DMU_MAX_ACCESS >> (dn->dn_indblkshift + 1);
755eda14cbcSMatt Macy 	/* bytes of data covered by a level-1 indirect block */
756eda14cbcSMatt Macy 	uint64_t iblkrange = (uint64_t)dn->dn_datablksz *
757eda14cbcSMatt Macy 	    EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT);
758eda14cbcSMatt Macy 
759eda14cbcSMatt Macy 	ASSERT3U(minimum, <=, *start);
760eda14cbcSMatt Macy 
761eda14cbcSMatt Macy 	/*
762eda14cbcSMatt Macy 	 * Check if we can free the entire range assuming that all of the
763eda14cbcSMatt Macy 	 * L1 blocks in this range have data. If we can, we use this
764eda14cbcSMatt Macy 	 * worst case value as an estimate so we can avoid having to look
765eda14cbcSMatt Macy 	 * at the object's actual data.
766eda14cbcSMatt Macy 	 */
767eda14cbcSMatt Macy 	uint64_t total_l1blks =
768eda14cbcSMatt Macy 	    (roundup(*start, iblkrange) - (minimum / iblkrange * iblkrange)) /
769eda14cbcSMatt Macy 	    iblkrange;
770eda14cbcSMatt Macy 	if (total_l1blks <= maxblks) {
771eda14cbcSMatt Macy 		*l1blks = total_l1blks;
772eda14cbcSMatt Macy 		*start = minimum;
773eda14cbcSMatt Macy 		return (0);
774eda14cbcSMatt Macy 	}
775eda14cbcSMatt Macy 	ASSERT(ISP2(iblkrange));
776eda14cbcSMatt Macy 
777eda14cbcSMatt Macy 	for (blks = 0; *start > minimum && blks < maxblks; blks++) {
778eda14cbcSMatt Macy 		int err;
779eda14cbcSMatt Macy 
780eda14cbcSMatt Macy 		/*
781eda14cbcSMatt Macy 		 * dnode_next_offset(BACKWARDS) will find an allocated L1
782eda14cbcSMatt Macy 		 * indirect block at or before the input offset.  We must
783eda14cbcSMatt Macy 		 * decrement *start so that it is at the end of the region
784eda14cbcSMatt Macy 		 * to search.
785eda14cbcSMatt Macy 		 */
786eda14cbcSMatt Macy 		(*start)--;
787eda14cbcSMatt Macy 
788eda14cbcSMatt Macy 		err = dnode_next_offset(dn,
789eda14cbcSMatt Macy 		    DNODE_FIND_BACKWARDS, start, 2, 1, 0);
790eda14cbcSMatt Macy 
791eda14cbcSMatt Macy 		/* if there are no indirect blocks before start, we are done */
792eda14cbcSMatt Macy 		if (err == ESRCH) {
793eda14cbcSMatt Macy 			*start = minimum;
794eda14cbcSMatt Macy 			break;
795eda14cbcSMatt Macy 		} else if (err != 0) {
796eda14cbcSMatt Macy 			*l1blks = blks;
797eda14cbcSMatt Macy 			return (err);
798eda14cbcSMatt Macy 		}
799eda14cbcSMatt Macy 
800eda14cbcSMatt Macy 		/* set start to the beginning of this L1 indirect */
801eda14cbcSMatt Macy 		*start = P2ALIGN(*start, iblkrange);
802eda14cbcSMatt Macy 	}
803eda14cbcSMatt Macy 	if (*start < minimum)
804eda14cbcSMatt Macy 		*start = minimum;
805eda14cbcSMatt Macy 	*l1blks = blks;
806eda14cbcSMatt Macy 
807eda14cbcSMatt Macy 	return (0);
808eda14cbcSMatt Macy }
809eda14cbcSMatt Macy 
810eda14cbcSMatt Macy /*
811eda14cbcSMatt Macy  * If this objset is of type OST_ZFS return true if vfs's unmounted flag is set,
812eda14cbcSMatt Macy  * otherwise return false.
813eda14cbcSMatt Macy  * Used below in dmu_free_long_range_impl() to enable abort when unmounting
814eda14cbcSMatt Macy  */
815eda14cbcSMatt Macy /*ARGSUSED*/
816eda14cbcSMatt Macy static boolean_t
817eda14cbcSMatt Macy dmu_objset_zfs_unmounting(objset_t *os)
818eda14cbcSMatt Macy {
819eda14cbcSMatt Macy #ifdef _KERNEL
820eda14cbcSMatt Macy 	if (dmu_objset_type(os) == DMU_OST_ZFS)
821eda14cbcSMatt Macy 		return (zfs_get_vfs_flag_unmounted(os));
822eda14cbcSMatt Macy #endif
823eda14cbcSMatt Macy 	return (B_FALSE);
824eda14cbcSMatt Macy }
825eda14cbcSMatt Macy 
826eda14cbcSMatt Macy static int
827eda14cbcSMatt Macy dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
828eda14cbcSMatt Macy     uint64_t length)
829eda14cbcSMatt Macy {
830eda14cbcSMatt Macy 	uint64_t object_size;
831eda14cbcSMatt Macy 	int err;
832eda14cbcSMatt Macy 	uint64_t dirty_frees_threshold;
833eda14cbcSMatt Macy 	dsl_pool_t *dp = dmu_objset_pool(os);
834eda14cbcSMatt Macy 
835eda14cbcSMatt Macy 	if (dn == NULL)
836eda14cbcSMatt Macy 		return (SET_ERROR(EINVAL));
837eda14cbcSMatt Macy 
838eda14cbcSMatt Macy 	object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
839eda14cbcSMatt Macy 	if (offset >= object_size)
840eda14cbcSMatt Macy 		return (0);
841eda14cbcSMatt Macy 
842eda14cbcSMatt Macy 	if (zfs_per_txg_dirty_frees_percent <= 100)
843eda14cbcSMatt Macy 		dirty_frees_threshold =
844eda14cbcSMatt Macy 		    zfs_per_txg_dirty_frees_percent * zfs_dirty_data_max / 100;
845eda14cbcSMatt Macy 	else
846eda14cbcSMatt Macy 		dirty_frees_threshold = zfs_dirty_data_max / 20;
847eda14cbcSMatt Macy 
848eda14cbcSMatt Macy 	if (length == DMU_OBJECT_END || offset + length > object_size)
849eda14cbcSMatt Macy 		length = object_size - offset;
850eda14cbcSMatt Macy 
851eda14cbcSMatt Macy 	while (length != 0) {
852eda14cbcSMatt Macy 		uint64_t chunk_end, chunk_begin, chunk_len;
853eda14cbcSMatt Macy 		uint64_t l1blks;
854eda14cbcSMatt Macy 		dmu_tx_t *tx;
855eda14cbcSMatt Macy 
856eda14cbcSMatt Macy 		if (dmu_objset_zfs_unmounting(dn->dn_objset))
857eda14cbcSMatt Macy 			return (SET_ERROR(EINTR));
858eda14cbcSMatt Macy 
859eda14cbcSMatt Macy 		chunk_end = chunk_begin = offset + length;
860eda14cbcSMatt Macy 
861eda14cbcSMatt Macy 		/* move chunk_begin backwards to the beginning of this chunk */
862eda14cbcSMatt Macy 		err = get_next_chunk(dn, &chunk_begin, offset, &l1blks);
863eda14cbcSMatt Macy 		if (err)
864eda14cbcSMatt Macy 			return (err);
865eda14cbcSMatt Macy 		ASSERT3U(chunk_begin, >=, offset);
866eda14cbcSMatt Macy 		ASSERT3U(chunk_begin, <=, chunk_end);
867eda14cbcSMatt Macy 
868eda14cbcSMatt Macy 		chunk_len = chunk_end - chunk_begin;
869eda14cbcSMatt Macy 
870eda14cbcSMatt Macy 		tx = dmu_tx_create(os);
871eda14cbcSMatt Macy 		dmu_tx_hold_free(tx, dn->dn_object, chunk_begin, chunk_len);
872eda14cbcSMatt Macy 
873eda14cbcSMatt Macy 		/*
874eda14cbcSMatt Macy 		 * Mark this transaction as typically resulting in a net
875eda14cbcSMatt Macy 		 * reduction in space used.
876eda14cbcSMatt Macy 		 */
877eda14cbcSMatt Macy 		dmu_tx_mark_netfree(tx);
878eda14cbcSMatt Macy 		err = dmu_tx_assign(tx, TXG_WAIT);
879eda14cbcSMatt Macy 		if (err) {
880eda14cbcSMatt Macy 			dmu_tx_abort(tx);
881eda14cbcSMatt Macy 			return (err);
882eda14cbcSMatt Macy 		}
883eda14cbcSMatt Macy 
884eda14cbcSMatt Macy 		uint64_t txg = dmu_tx_get_txg(tx);
885eda14cbcSMatt Macy 
886eda14cbcSMatt Macy 		mutex_enter(&dp->dp_lock);
887eda14cbcSMatt Macy 		uint64_t long_free_dirty =
888eda14cbcSMatt Macy 		    dp->dp_long_free_dirty_pertxg[txg & TXG_MASK];
889eda14cbcSMatt Macy 		mutex_exit(&dp->dp_lock);
890eda14cbcSMatt Macy 
891eda14cbcSMatt Macy 		/*
892eda14cbcSMatt Macy 		 * To avoid filling up a TXG with just frees, wait for
893eda14cbcSMatt Macy 		 * the next TXG to open before freeing more chunks if
894eda14cbcSMatt Macy 		 * we have reached the threshold of frees.
895eda14cbcSMatt Macy 		 */
896eda14cbcSMatt Macy 		if (dirty_frees_threshold != 0 &&
897eda14cbcSMatt Macy 		    long_free_dirty >= dirty_frees_threshold) {
898eda14cbcSMatt Macy 			DMU_TX_STAT_BUMP(dmu_tx_dirty_frees_delay);
899eda14cbcSMatt Macy 			dmu_tx_commit(tx);
900eda14cbcSMatt Macy 			txg_wait_open(dp, 0, B_TRUE);
901eda14cbcSMatt Macy 			continue;
902eda14cbcSMatt Macy 		}
903eda14cbcSMatt Macy 
904eda14cbcSMatt Macy 		/*
905eda14cbcSMatt Macy 		 * In order to prevent unnecessary write throttling, for each
906eda14cbcSMatt Macy 		 * TXG, we track the cumulative size of L1 blocks being dirtied
907eda14cbcSMatt Macy 		 * in dnode_free_range() below. We compare this number to a
908eda14cbcSMatt Macy 		 * tunable threshold, past which we prevent new L1 dirty freeing
909eda14cbcSMatt Macy 		 * blocks from being added into the open TXG. See
910eda14cbcSMatt Macy 		 * dmu_free_long_range_impl() for details. The threshold
911eda14cbcSMatt Macy 		 * prevents write throttle activation due to dirty freeing L1
912eda14cbcSMatt Macy 		 * blocks taking up a large percentage of zfs_dirty_data_max.
913eda14cbcSMatt Macy 		 */
914eda14cbcSMatt Macy 		mutex_enter(&dp->dp_lock);
915eda14cbcSMatt Macy 		dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] +=
916eda14cbcSMatt Macy 		    l1blks << dn->dn_indblkshift;
917eda14cbcSMatt Macy 		mutex_exit(&dp->dp_lock);
918eda14cbcSMatt Macy 		DTRACE_PROBE3(free__long__range,
919eda14cbcSMatt Macy 		    uint64_t, long_free_dirty, uint64_t, chunk_len,
920eda14cbcSMatt Macy 		    uint64_t, txg);
921eda14cbcSMatt Macy 		dnode_free_range(dn, chunk_begin, chunk_len, tx);
922eda14cbcSMatt Macy 
923eda14cbcSMatt Macy 		dmu_tx_commit(tx);
924eda14cbcSMatt Macy 
925eda14cbcSMatt Macy 		length -= chunk_len;
926eda14cbcSMatt Macy 	}
927eda14cbcSMatt Macy 	return (0);
928eda14cbcSMatt Macy }
929eda14cbcSMatt Macy 
930eda14cbcSMatt Macy int
931eda14cbcSMatt Macy dmu_free_long_range(objset_t *os, uint64_t object,
932eda14cbcSMatt Macy     uint64_t offset, uint64_t length)
933eda14cbcSMatt Macy {
934eda14cbcSMatt Macy 	dnode_t *dn;
935eda14cbcSMatt Macy 	int err;
936eda14cbcSMatt Macy 
937eda14cbcSMatt Macy 	err = dnode_hold(os, object, FTAG, &dn);
938eda14cbcSMatt Macy 	if (err != 0)
939eda14cbcSMatt Macy 		return (err);
940eda14cbcSMatt Macy 	err = dmu_free_long_range_impl(os, dn, offset, length);
941eda14cbcSMatt Macy 
942eda14cbcSMatt Macy 	/*
943eda14cbcSMatt Macy 	 * It is important to zero out the maxblkid when freeing the entire
944eda14cbcSMatt Macy 	 * file, so that (a) subsequent calls to dmu_free_long_range_impl()
945eda14cbcSMatt Macy 	 * will take the fast path, and (b) dnode_reallocate() can verify
946eda14cbcSMatt Macy 	 * that the entire file has been freed.
947eda14cbcSMatt Macy 	 */
948eda14cbcSMatt Macy 	if (err == 0 && offset == 0 && length == DMU_OBJECT_END)
949eda14cbcSMatt Macy 		dn->dn_maxblkid = 0;
950eda14cbcSMatt Macy 
951eda14cbcSMatt Macy 	dnode_rele(dn, FTAG);
952eda14cbcSMatt Macy 	return (err);
953eda14cbcSMatt Macy }
954eda14cbcSMatt Macy 
955eda14cbcSMatt Macy int
956eda14cbcSMatt Macy dmu_free_long_object(objset_t *os, uint64_t object)
957eda14cbcSMatt Macy {
958eda14cbcSMatt Macy 	dmu_tx_t *tx;
959eda14cbcSMatt Macy 	int err;
960eda14cbcSMatt Macy 
961eda14cbcSMatt Macy 	err = dmu_free_long_range(os, object, 0, DMU_OBJECT_END);
962eda14cbcSMatt Macy 	if (err != 0)
963eda14cbcSMatt Macy 		return (err);
964eda14cbcSMatt Macy 
965eda14cbcSMatt Macy 	tx = dmu_tx_create(os);
966eda14cbcSMatt Macy 	dmu_tx_hold_bonus(tx, object);
967eda14cbcSMatt Macy 	dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
968eda14cbcSMatt Macy 	dmu_tx_mark_netfree(tx);
969eda14cbcSMatt Macy 	err = dmu_tx_assign(tx, TXG_WAIT);
970eda14cbcSMatt Macy 	if (err == 0) {
971eda14cbcSMatt Macy 		if (err == 0)
972eda14cbcSMatt Macy 			err = dmu_object_free(os, object, tx);
973eda14cbcSMatt Macy 
974eda14cbcSMatt Macy 		dmu_tx_commit(tx);
975eda14cbcSMatt Macy 	} else {
976eda14cbcSMatt Macy 		dmu_tx_abort(tx);
977eda14cbcSMatt Macy 	}
978eda14cbcSMatt Macy 
979eda14cbcSMatt Macy 	return (err);
980eda14cbcSMatt Macy }
981eda14cbcSMatt Macy 
982eda14cbcSMatt Macy int
983eda14cbcSMatt Macy dmu_free_range(objset_t *os, uint64_t object, uint64_t offset,
984eda14cbcSMatt Macy     uint64_t size, dmu_tx_t *tx)
985eda14cbcSMatt Macy {
986eda14cbcSMatt Macy 	dnode_t *dn;
987eda14cbcSMatt Macy 	int err = dnode_hold(os, object, FTAG, &dn);
988eda14cbcSMatt Macy 	if (err)
989eda14cbcSMatt Macy 		return (err);
990eda14cbcSMatt Macy 	ASSERT(offset < UINT64_MAX);
991eda14cbcSMatt Macy 	ASSERT(size == DMU_OBJECT_END || size <= UINT64_MAX - offset);
992eda14cbcSMatt Macy 	dnode_free_range(dn, offset, size, tx);
993eda14cbcSMatt Macy 	dnode_rele(dn, FTAG);
994eda14cbcSMatt Macy 	return (0);
995eda14cbcSMatt Macy }
996eda14cbcSMatt Macy 
997eda14cbcSMatt Macy static int
998eda14cbcSMatt Macy dmu_read_impl(dnode_t *dn, uint64_t offset, uint64_t size,
999eda14cbcSMatt Macy     void *buf, uint32_t flags)
1000eda14cbcSMatt Macy {
1001eda14cbcSMatt Macy 	dmu_buf_t **dbp;
1002eda14cbcSMatt Macy 	int numbufs, err = 0;
1003eda14cbcSMatt Macy 
1004eda14cbcSMatt Macy 	/*
1005eda14cbcSMatt Macy 	 * Deal with odd block sizes, where there can't be data past the first
1006eda14cbcSMatt Macy 	 * block.  If we ever do the tail block optimization, we will need to
1007eda14cbcSMatt Macy 	 * handle that here as well.
1008eda14cbcSMatt Macy 	 */
1009eda14cbcSMatt Macy 	if (dn->dn_maxblkid == 0) {
1010eda14cbcSMatt Macy 		uint64_t newsz = offset > dn->dn_datablksz ? 0 :
1011eda14cbcSMatt Macy 		    MIN(size, dn->dn_datablksz - offset);
1012eda14cbcSMatt Macy 		bzero((char *)buf + newsz, size - newsz);
1013eda14cbcSMatt Macy 		size = newsz;
1014eda14cbcSMatt Macy 	}
1015eda14cbcSMatt Macy 
1016eda14cbcSMatt Macy 	while (size > 0) {
1017eda14cbcSMatt Macy 		uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2);
1018eda14cbcSMatt Macy 		int i;
1019eda14cbcSMatt Macy 
1020eda14cbcSMatt Macy 		/*
1021eda14cbcSMatt Macy 		 * NB: we could do this block-at-a-time, but it's nice
1022eda14cbcSMatt Macy 		 * to be reading in parallel.
1023eda14cbcSMatt Macy 		 */
1024eda14cbcSMatt Macy 		err = dmu_buf_hold_array_by_dnode(dn, offset, mylen,
1025eda14cbcSMatt Macy 		    TRUE, FTAG, &numbufs, &dbp, flags);
1026eda14cbcSMatt Macy 		if (err)
1027eda14cbcSMatt Macy 			break;
1028eda14cbcSMatt Macy 
1029eda14cbcSMatt Macy 		for (i = 0; i < numbufs; i++) {
1030eda14cbcSMatt Macy 			uint64_t tocpy;
1031eda14cbcSMatt Macy 			int64_t bufoff;
1032eda14cbcSMatt Macy 			dmu_buf_t *db = dbp[i];
1033eda14cbcSMatt Macy 
1034eda14cbcSMatt Macy 			ASSERT(size > 0);
1035eda14cbcSMatt Macy 
1036eda14cbcSMatt Macy 			bufoff = offset - db->db_offset;
1037eda14cbcSMatt Macy 			tocpy = MIN(db->db_size - bufoff, size);
1038eda14cbcSMatt Macy 
1039eda14cbcSMatt Macy 			(void) memcpy(buf, (char *)db->db_data + bufoff, tocpy);
1040eda14cbcSMatt Macy 
1041eda14cbcSMatt Macy 			offset += tocpy;
1042eda14cbcSMatt Macy 			size -= tocpy;
1043eda14cbcSMatt Macy 			buf = (char *)buf + tocpy;
1044eda14cbcSMatt Macy 		}
1045eda14cbcSMatt Macy 		dmu_buf_rele_array(dbp, numbufs, FTAG);
1046eda14cbcSMatt Macy 	}
1047eda14cbcSMatt Macy 	return (err);
1048eda14cbcSMatt Macy }
1049eda14cbcSMatt Macy 
1050eda14cbcSMatt Macy int
1051eda14cbcSMatt Macy dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1052eda14cbcSMatt Macy     void *buf, uint32_t flags)
1053eda14cbcSMatt Macy {
1054eda14cbcSMatt Macy 	dnode_t *dn;
1055eda14cbcSMatt Macy 	int err;
1056eda14cbcSMatt Macy 
1057eda14cbcSMatt Macy 	err = dnode_hold(os, object, FTAG, &dn);
1058eda14cbcSMatt Macy 	if (err != 0)
1059eda14cbcSMatt Macy 		return (err);
1060eda14cbcSMatt Macy 
1061eda14cbcSMatt Macy 	err = dmu_read_impl(dn, offset, size, buf, flags);
1062eda14cbcSMatt Macy 	dnode_rele(dn, FTAG);
1063eda14cbcSMatt Macy 	return (err);
1064eda14cbcSMatt Macy }
1065eda14cbcSMatt Macy 
1066eda14cbcSMatt Macy int
1067eda14cbcSMatt Macy dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf,
1068eda14cbcSMatt Macy     uint32_t flags)
1069eda14cbcSMatt Macy {
1070eda14cbcSMatt Macy 	return (dmu_read_impl(dn, offset, size, buf, flags));
1071eda14cbcSMatt Macy }
1072eda14cbcSMatt Macy 
1073eda14cbcSMatt Macy static void
1074eda14cbcSMatt Macy dmu_write_impl(dmu_buf_t **dbp, int numbufs, uint64_t offset, uint64_t size,
1075eda14cbcSMatt Macy     const void *buf, dmu_tx_t *tx)
1076eda14cbcSMatt Macy {
1077eda14cbcSMatt Macy 	int i;
1078eda14cbcSMatt Macy 
1079eda14cbcSMatt Macy 	for (i = 0; i < numbufs; i++) {
1080eda14cbcSMatt Macy 		uint64_t tocpy;
1081eda14cbcSMatt Macy 		int64_t bufoff;
1082eda14cbcSMatt Macy 		dmu_buf_t *db = dbp[i];
1083eda14cbcSMatt Macy 
1084eda14cbcSMatt Macy 		ASSERT(size > 0);
1085eda14cbcSMatt Macy 
1086eda14cbcSMatt Macy 		bufoff = offset - db->db_offset;
1087eda14cbcSMatt Macy 		tocpy = MIN(db->db_size - bufoff, size);
1088eda14cbcSMatt Macy 
1089eda14cbcSMatt Macy 		ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
1090eda14cbcSMatt Macy 
1091eda14cbcSMatt Macy 		if (tocpy == db->db_size)
1092eda14cbcSMatt Macy 			dmu_buf_will_fill(db, tx);
1093eda14cbcSMatt Macy 		else
1094eda14cbcSMatt Macy 			dmu_buf_will_dirty(db, tx);
1095eda14cbcSMatt Macy 
1096eda14cbcSMatt Macy 		(void) memcpy((char *)db->db_data + bufoff, buf, tocpy);
1097eda14cbcSMatt Macy 
1098eda14cbcSMatt Macy 		if (tocpy == db->db_size)
1099eda14cbcSMatt Macy 			dmu_buf_fill_done(db, tx);
1100eda14cbcSMatt Macy 
1101eda14cbcSMatt Macy 		offset += tocpy;
1102eda14cbcSMatt Macy 		size -= tocpy;
1103eda14cbcSMatt Macy 		buf = (char *)buf + tocpy;
1104eda14cbcSMatt Macy 	}
1105eda14cbcSMatt Macy }
1106eda14cbcSMatt Macy 
1107eda14cbcSMatt Macy void
1108eda14cbcSMatt Macy dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1109eda14cbcSMatt Macy     const void *buf, dmu_tx_t *tx)
1110eda14cbcSMatt Macy {
1111eda14cbcSMatt Macy 	dmu_buf_t **dbp;
1112eda14cbcSMatt Macy 	int numbufs;
1113eda14cbcSMatt Macy 
1114eda14cbcSMatt Macy 	if (size == 0)
1115eda14cbcSMatt Macy 		return;
1116eda14cbcSMatt Macy 
1117eda14cbcSMatt Macy 	VERIFY0(dmu_buf_hold_array(os, object, offset, size,
1118eda14cbcSMatt Macy 	    FALSE, FTAG, &numbufs, &dbp));
1119eda14cbcSMatt Macy 	dmu_write_impl(dbp, numbufs, offset, size, buf, tx);
1120eda14cbcSMatt Macy 	dmu_buf_rele_array(dbp, numbufs, FTAG);
1121eda14cbcSMatt Macy }
1122eda14cbcSMatt Macy 
1123eda14cbcSMatt Macy /*
1124eda14cbcSMatt Macy  * Note: Lustre is an external consumer of this interface.
1125eda14cbcSMatt Macy  */
1126eda14cbcSMatt Macy void
1127eda14cbcSMatt Macy dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size,
1128eda14cbcSMatt Macy     const void *buf, dmu_tx_t *tx)
1129eda14cbcSMatt Macy {
1130eda14cbcSMatt Macy 	dmu_buf_t **dbp;
1131eda14cbcSMatt Macy 	int numbufs;
1132eda14cbcSMatt Macy 
1133eda14cbcSMatt Macy 	if (size == 0)
1134eda14cbcSMatt Macy 		return;
1135eda14cbcSMatt Macy 
1136eda14cbcSMatt Macy 	VERIFY0(dmu_buf_hold_array_by_dnode(dn, offset, size,
1137eda14cbcSMatt Macy 	    FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH));
1138eda14cbcSMatt Macy 	dmu_write_impl(dbp, numbufs, offset, size, buf, tx);
1139eda14cbcSMatt Macy 	dmu_buf_rele_array(dbp, numbufs, FTAG);
1140eda14cbcSMatt Macy }
1141eda14cbcSMatt Macy 
1142eda14cbcSMatt Macy void
1143eda14cbcSMatt Macy dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1144eda14cbcSMatt Macy     dmu_tx_t *tx)
1145eda14cbcSMatt Macy {
1146eda14cbcSMatt Macy 	dmu_buf_t **dbp;
1147eda14cbcSMatt Macy 	int numbufs, i;
1148eda14cbcSMatt Macy 
1149eda14cbcSMatt Macy 	if (size == 0)
1150eda14cbcSMatt Macy 		return;
1151eda14cbcSMatt Macy 
1152eda14cbcSMatt Macy 	VERIFY(0 == dmu_buf_hold_array(os, object, offset, size,
1153eda14cbcSMatt Macy 	    FALSE, FTAG, &numbufs, &dbp));
1154eda14cbcSMatt Macy 
1155eda14cbcSMatt Macy 	for (i = 0; i < numbufs; i++) {
1156eda14cbcSMatt Macy 		dmu_buf_t *db = dbp[i];
1157eda14cbcSMatt Macy 
1158eda14cbcSMatt Macy 		dmu_buf_will_not_fill(db, tx);
1159eda14cbcSMatt Macy 	}
1160eda14cbcSMatt Macy 	dmu_buf_rele_array(dbp, numbufs, FTAG);
1161eda14cbcSMatt Macy }
1162eda14cbcSMatt Macy 
1163eda14cbcSMatt Macy void
1164eda14cbcSMatt Macy dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset,
1165eda14cbcSMatt Macy     void *data, uint8_t etype, uint8_t comp, int uncompressed_size,
1166eda14cbcSMatt Macy     int compressed_size, int byteorder, dmu_tx_t *tx)
1167eda14cbcSMatt Macy {
1168eda14cbcSMatt Macy 	dmu_buf_t *db;
1169eda14cbcSMatt Macy 
1170eda14cbcSMatt Macy 	ASSERT3U(etype, <, NUM_BP_EMBEDDED_TYPES);
1171eda14cbcSMatt Macy 	ASSERT3U(comp, <, ZIO_COMPRESS_FUNCTIONS);
1172eda14cbcSMatt Macy 	VERIFY0(dmu_buf_hold_noread(os, object, offset,
1173eda14cbcSMatt Macy 	    FTAG, &db));
1174eda14cbcSMatt Macy 
1175eda14cbcSMatt Macy 	dmu_buf_write_embedded(db,
1176eda14cbcSMatt Macy 	    data, (bp_embedded_type_t)etype, (enum zio_compress)comp,
1177eda14cbcSMatt Macy 	    uncompressed_size, compressed_size, byteorder, tx);
1178eda14cbcSMatt Macy 
1179eda14cbcSMatt Macy 	dmu_buf_rele(db, FTAG);
1180eda14cbcSMatt Macy }
1181eda14cbcSMatt Macy 
1182eda14cbcSMatt Macy void
1183eda14cbcSMatt Macy dmu_redact(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1184eda14cbcSMatt Macy     dmu_tx_t *tx)
1185eda14cbcSMatt Macy {
1186eda14cbcSMatt Macy 	int numbufs, i;
1187eda14cbcSMatt Macy 	dmu_buf_t **dbp;
1188eda14cbcSMatt Macy 
1189eda14cbcSMatt Macy 	VERIFY0(dmu_buf_hold_array(os, object, offset, size, FALSE, FTAG,
1190eda14cbcSMatt Macy 	    &numbufs, &dbp));
1191eda14cbcSMatt Macy 	for (i = 0; i < numbufs; i++)
1192eda14cbcSMatt Macy 		dmu_buf_redact(dbp[i], tx);
1193eda14cbcSMatt Macy 	dmu_buf_rele_array(dbp, numbufs, FTAG);
1194eda14cbcSMatt Macy }
1195eda14cbcSMatt Macy 
1196eda14cbcSMatt Macy #ifdef _KERNEL
1197eda14cbcSMatt Macy int
1198184c1b94SMartin Matuska dmu_read_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size)
1199eda14cbcSMatt Macy {
1200eda14cbcSMatt Macy 	dmu_buf_t **dbp;
1201eda14cbcSMatt Macy 	int numbufs, i, err;
1202eda14cbcSMatt Macy 
1203eda14cbcSMatt Macy 	/*
1204eda14cbcSMatt Macy 	 * NB: we could do this block-at-a-time, but it's nice
1205eda14cbcSMatt Macy 	 * to be reading in parallel.
1206eda14cbcSMatt Macy 	 */
1207184c1b94SMartin Matuska 	err = dmu_buf_hold_array_by_dnode(dn, zfs_uio_offset(uio), size,
1208eda14cbcSMatt Macy 	    TRUE, FTAG, &numbufs, &dbp, 0);
1209eda14cbcSMatt Macy 	if (err)
1210eda14cbcSMatt Macy 		return (err);
1211eda14cbcSMatt Macy 
1212eda14cbcSMatt Macy 	for (i = 0; i < numbufs; i++) {
1213eda14cbcSMatt Macy 		uint64_t tocpy;
1214eda14cbcSMatt Macy 		int64_t bufoff;
1215eda14cbcSMatt Macy 		dmu_buf_t *db = dbp[i];
1216eda14cbcSMatt Macy 
1217eda14cbcSMatt Macy 		ASSERT(size > 0);
1218eda14cbcSMatt Macy 
1219184c1b94SMartin Matuska 		bufoff = zfs_uio_offset(uio) - db->db_offset;
1220eda14cbcSMatt Macy 		tocpy = MIN(db->db_size - bufoff, size);
1221eda14cbcSMatt Macy 
1222184c1b94SMartin Matuska 		err = zfs_uio_fault_move((char *)db->db_data + bufoff, tocpy,
1223eda14cbcSMatt Macy 		    UIO_READ, uio);
1224184c1b94SMartin Matuska 
1225eda14cbcSMatt Macy 		if (err)
1226eda14cbcSMatt Macy 			break;
1227eda14cbcSMatt Macy 
1228eda14cbcSMatt Macy 		size -= tocpy;
1229eda14cbcSMatt Macy 	}
1230eda14cbcSMatt Macy 	dmu_buf_rele_array(dbp, numbufs, FTAG);
1231eda14cbcSMatt Macy 
1232eda14cbcSMatt Macy 	return (err);
1233eda14cbcSMatt Macy }
1234eda14cbcSMatt Macy 
1235eda14cbcSMatt Macy /*
1236eda14cbcSMatt Macy  * Read 'size' bytes into the uio buffer.
1237eda14cbcSMatt Macy  * From object zdb->db_object.
1238184c1b94SMartin Matuska  * Starting at zfs_uio_offset(uio).
1239eda14cbcSMatt Macy  *
1240eda14cbcSMatt Macy  * If the caller already has a dbuf in the target object
1241eda14cbcSMatt Macy  * (e.g. its bonus buffer), this routine is faster than dmu_read_uio(),
1242eda14cbcSMatt Macy  * because we don't have to find the dnode_t for the object.
1243eda14cbcSMatt Macy  */
1244eda14cbcSMatt Macy int
1245184c1b94SMartin Matuska dmu_read_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size)
1246eda14cbcSMatt Macy {
1247eda14cbcSMatt Macy 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
1248eda14cbcSMatt Macy 	dnode_t *dn;
1249eda14cbcSMatt Macy 	int err;
1250eda14cbcSMatt Macy 
1251eda14cbcSMatt Macy 	if (size == 0)
1252eda14cbcSMatt Macy 		return (0);
1253eda14cbcSMatt Macy 
1254eda14cbcSMatt Macy 	DB_DNODE_ENTER(db);
1255eda14cbcSMatt Macy 	dn = DB_DNODE(db);
1256eda14cbcSMatt Macy 	err = dmu_read_uio_dnode(dn, uio, size);
1257eda14cbcSMatt Macy 	DB_DNODE_EXIT(db);
1258eda14cbcSMatt Macy 
1259eda14cbcSMatt Macy 	return (err);
1260eda14cbcSMatt Macy }
1261eda14cbcSMatt Macy 
1262eda14cbcSMatt Macy /*
1263eda14cbcSMatt Macy  * Read 'size' bytes into the uio buffer.
1264eda14cbcSMatt Macy  * From the specified object
1265184c1b94SMartin Matuska  * Starting at offset zfs_uio_offset(uio).
1266eda14cbcSMatt Macy  */
1267eda14cbcSMatt Macy int
1268184c1b94SMartin Matuska dmu_read_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size)
1269eda14cbcSMatt Macy {
1270eda14cbcSMatt Macy 	dnode_t *dn;
1271eda14cbcSMatt Macy 	int err;
1272eda14cbcSMatt Macy 
1273eda14cbcSMatt Macy 	if (size == 0)
1274eda14cbcSMatt Macy 		return (0);
1275eda14cbcSMatt Macy 
1276eda14cbcSMatt Macy 	err = dnode_hold(os, object, FTAG, &dn);
1277eda14cbcSMatt Macy 	if (err)
1278eda14cbcSMatt Macy 		return (err);
1279eda14cbcSMatt Macy 
1280eda14cbcSMatt Macy 	err = dmu_read_uio_dnode(dn, uio, size);
1281eda14cbcSMatt Macy 
1282eda14cbcSMatt Macy 	dnode_rele(dn, FTAG);
1283eda14cbcSMatt Macy 
1284eda14cbcSMatt Macy 	return (err);
1285eda14cbcSMatt Macy }
1286eda14cbcSMatt Macy 
1287eda14cbcSMatt Macy int
1288184c1b94SMartin Matuska dmu_write_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size, dmu_tx_t *tx)
1289eda14cbcSMatt Macy {
1290eda14cbcSMatt Macy 	dmu_buf_t **dbp;
1291eda14cbcSMatt Macy 	int numbufs;
1292eda14cbcSMatt Macy 	int err = 0;
1293eda14cbcSMatt Macy 	int i;
1294eda14cbcSMatt Macy 
1295184c1b94SMartin Matuska 	err = dmu_buf_hold_array_by_dnode(dn, zfs_uio_offset(uio), size,
1296eda14cbcSMatt Macy 	    FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH);
1297eda14cbcSMatt Macy 	if (err)
1298eda14cbcSMatt Macy 		return (err);
1299eda14cbcSMatt Macy 
1300eda14cbcSMatt Macy 	for (i = 0; i < numbufs; i++) {
1301eda14cbcSMatt Macy 		uint64_t tocpy;
1302eda14cbcSMatt Macy 		int64_t bufoff;
1303eda14cbcSMatt Macy 		dmu_buf_t *db = dbp[i];
1304eda14cbcSMatt Macy 
1305eda14cbcSMatt Macy 		ASSERT(size > 0);
1306eda14cbcSMatt Macy 
1307184c1b94SMartin Matuska 		bufoff = zfs_uio_offset(uio) - db->db_offset;
1308eda14cbcSMatt Macy 		tocpy = MIN(db->db_size - bufoff, size);
1309eda14cbcSMatt Macy 
1310eda14cbcSMatt Macy 		ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
1311eda14cbcSMatt Macy 
1312eda14cbcSMatt Macy 		if (tocpy == db->db_size)
1313eda14cbcSMatt Macy 			dmu_buf_will_fill(db, tx);
1314eda14cbcSMatt Macy 		else
1315eda14cbcSMatt Macy 			dmu_buf_will_dirty(db, tx);
1316eda14cbcSMatt Macy 
1317eda14cbcSMatt Macy 		/*
1318184c1b94SMartin Matuska 		 * XXX zfs_uiomove could block forever (eg.nfs-backed
1319eda14cbcSMatt Macy 		 * pages).  There needs to be a uiolockdown() function
1320184c1b94SMartin Matuska 		 * to lock the pages in memory, so that zfs_uiomove won't
1321eda14cbcSMatt Macy 		 * block.
1322eda14cbcSMatt Macy 		 */
1323184c1b94SMartin Matuska 		err = zfs_uio_fault_move((char *)db->db_data + bufoff,
1324184c1b94SMartin Matuska 		    tocpy, UIO_WRITE, uio);
1325184c1b94SMartin Matuska 
1326eda14cbcSMatt Macy 		if (tocpy == db->db_size)
1327eda14cbcSMatt Macy 			dmu_buf_fill_done(db, tx);
1328eda14cbcSMatt Macy 
1329eda14cbcSMatt Macy 		if (err)
1330eda14cbcSMatt Macy 			break;
1331eda14cbcSMatt Macy 
1332eda14cbcSMatt Macy 		size -= tocpy;
1333eda14cbcSMatt Macy 	}
1334eda14cbcSMatt Macy 
1335eda14cbcSMatt Macy 	dmu_buf_rele_array(dbp, numbufs, FTAG);
1336eda14cbcSMatt Macy 	return (err);
1337eda14cbcSMatt Macy }
1338eda14cbcSMatt Macy 
1339eda14cbcSMatt Macy /*
1340eda14cbcSMatt Macy  * Write 'size' bytes from the uio buffer.
1341eda14cbcSMatt Macy  * To object zdb->db_object.
1342184c1b94SMartin Matuska  * Starting at offset zfs_uio_offset(uio).
1343eda14cbcSMatt Macy  *
1344eda14cbcSMatt Macy  * If the caller already has a dbuf in the target object
1345eda14cbcSMatt Macy  * (e.g. its bonus buffer), this routine is faster than dmu_write_uio(),
1346eda14cbcSMatt Macy  * because we don't have to find the dnode_t for the object.
1347eda14cbcSMatt Macy  */
1348eda14cbcSMatt Macy int
1349184c1b94SMartin Matuska dmu_write_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size,
1350eda14cbcSMatt Macy     dmu_tx_t *tx)
1351eda14cbcSMatt Macy {
1352eda14cbcSMatt Macy 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
1353eda14cbcSMatt Macy 	dnode_t *dn;
1354eda14cbcSMatt Macy 	int err;
1355eda14cbcSMatt Macy 
1356eda14cbcSMatt Macy 	if (size == 0)
1357eda14cbcSMatt Macy 		return (0);
1358eda14cbcSMatt Macy 
1359eda14cbcSMatt Macy 	DB_DNODE_ENTER(db);
1360eda14cbcSMatt Macy 	dn = DB_DNODE(db);
1361eda14cbcSMatt Macy 	err = dmu_write_uio_dnode(dn, uio, size, tx);
1362eda14cbcSMatt Macy 	DB_DNODE_EXIT(db);
1363eda14cbcSMatt Macy 
1364eda14cbcSMatt Macy 	return (err);
1365eda14cbcSMatt Macy }
1366eda14cbcSMatt Macy 
1367eda14cbcSMatt Macy /*
1368eda14cbcSMatt Macy  * Write 'size' bytes from the uio buffer.
1369eda14cbcSMatt Macy  * To the specified object.
1370184c1b94SMartin Matuska  * Starting at offset zfs_uio_offset(uio).
1371eda14cbcSMatt Macy  */
1372eda14cbcSMatt Macy int
1373184c1b94SMartin Matuska dmu_write_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size,
1374eda14cbcSMatt Macy     dmu_tx_t *tx)
1375eda14cbcSMatt Macy {
1376eda14cbcSMatt Macy 	dnode_t *dn;
1377eda14cbcSMatt Macy 	int err;
1378eda14cbcSMatt Macy 
1379eda14cbcSMatt Macy 	if (size == 0)
1380eda14cbcSMatt Macy 		return (0);
1381eda14cbcSMatt Macy 
1382eda14cbcSMatt Macy 	err = dnode_hold(os, object, FTAG, &dn);
1383eda14cbcSMatt Macy 	if (err)
1384eda14cbcSMatt Macy 		return (err);
1385eda14cbcSMatt Macy 
1386eda14cbcSMatt Macy 	err = dmu_write_uio_dnode(dn, uio, size, tx);
1387eda14cbcSMatt Macy 
1388eda14cbcSMatt Macy 	dnode_rele(dn, FTAG);
1389eda14cbcSMatt Macy 
1390eda14cbcSMatt Macy 	return (err);
1391eda14cbcSMatt Macy }
1392eda14cbcSMatt Macy #endif /* _KERNEL */
1393eda14cbcSMatt Macy 
1394eda14cbcSMatt Macy /*
1395eda14cbcSMatt Macy  * Allocate a loaned anonymous arc buffer.
1396eda14cbcSMatt Macy  */
1397eda14cbcSMatt Macy arc_buf_t *
1398eda14cbcSMatt Macy dmu_request_arcbuf(dmu_buf_t *handle, int size)
1399eda14cbcSMatt Macy {
1400eda14cbcSMatt Macy 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle;
1401eda14cbcSMatt Macy 
1402eda14cbcSMatt Macy 	return (arc_loan_buf(db->db_objset->os_spa, B_FALSE, size));
1403eda14cbcSMatt Macy }
1404eda14cbcSMatt Macy 
1405eda14cbcSMatt Macy /*
1406eda14cbcSMatt Macy  * Free a loaned arc buffer.
1407eda14cbcSMatt Macy  */
1408eda14cbcSMatt Macy void
1409eda14cbcSMatt Macy dmu_return_arcbuf(arc_buf_t *buf)
1410eda14cbcSMatt Macy {
1411eda14cbcSMatt Macy 	arc_return_buf(buf, FTAG);
1412eda14cbcSMatt Macy 	arc_buf_destroy(buf, FTAG);
1413eda14cbcSMatt Macy }
1414eda14cbcSMatt Macy 
1415eda14cbcSMatt Macy /*
14167877fdebSMatt Macy  * A "lightweight" write is faster than a regular write (e.g.
14177877fdebSMatt Macy  * dmu_write_by_dnode() or dmu_assign_arcbuf_by_dnode()), because it avoids the
14187877fdebSMatt Macy  * CPU cost of creating a dmu_buf_impl_t and arc_buf_[hdr_]_t.  However, the
14197877fdebSMatt Macy  * data can not be read or overwritten until the transaction's txg has been
14207877fdebSMatt Macy  * synced.  This makes it appropriate for workloads that are known to be
14217877fdebSMatt Macy  * (temporarily) write-only, like "zfs receive".
14227877fdebSMatt Macy  *
14237877fdebSMatt Macy  * A single block is written, starting at the specified offset in bytes.  If
14247877fdebSMatt Macy  * the call is successful, it returns 0 and the provided abd has been
14257877fdebSMatt Macy  * consumed (the caller should not free it).
14267877fdebSMatt Macy  */
14277877fdebSMatt Macy int
14287877fdebSMatt Macy dmu_lightweight_write_by_dnode(dnode_t *dn, uint64_t offset, abd_t *abd,
14297877fdebSMatt Macy     const zio_prop_t *zp, enum zio_flag flags, dmu_tx_t *tx)
14307877fdebSMatt Macy {
14317877fdebSMatt Macy 	dbuf_dirty_record_t *dr =
14327877fdebSMatt Macy 	    dbuf_dirty_lightweight(dn, dbuf_whichblock(dn, 0, offset), tx);
14337877fdebSMatt Macy 	if (dr == NULL)
14347877fdebSMatt Macy 		return (SET_ERROR(EIO));
14357877fdebSMatt Macy 	dr->dt.dll.dr_abd = abd;
14367877fdebSMatt Macy 	dr->dt.dll.dr_props = *zp;
14377877fdebSMatt Macy 	dr->dt.dll.dr_flags = flags;
14387877fdebSMatt Macy 	return (0);
14397877fdebSMatt Macy }
14407877fdebSMatt Macy 
14417877fdebSMatt Macy /*
1442eda14cbcSMatt Macy  * When possible directly assign passed loaned arc buffer to a dbuf.
1443eda14cbcSMatt Macy  * If this is not possible copy the contents of passed arc buf via
1444eda14cbcSMatt Macy  * dmu_write().
1445eda14cbcSMatt Macy  */
1446eda14cbcSMatt Macy int
1447eda14cbcSMatt Macy dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf,
1448eda14cbcSMatt Macy     dmu_tx_t *tx)
1449eda14cbcSMatt Macy {
1450eda14cbcSMatt Macy 	dmu_buf_impl_t *db;
1451eda14cbcSMatt Macy 	objset_t *os = dn->dn_objset;
1452eda14cbcSMatt Macy 	uint64_t object = dn->dn_object;
1453eda14cbcSMatt Macy 	uint32_t blksz = (uint32_t)arc_buf_lsize(buf);
1454eda14cbcSMatt Macy 	uint64_t blkid;
1455eda14cbcSMatt Macy 
1456eda14cbcSMatt Macy 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
1457eda14cbcSMatt Macy 	blkid = dbuf_whichblock(dn, 0, offset);
1458eda14cbcSMatt Macy 	db = dbuf_hold(dn, blkid, FTAG);
1459eda14cbcSMatt Macy 	if (db == NULL)
1460eda14cbcSMatt Macy 		return (SET_ERROR(EIO));
1461eda14cbcSMatt Macy 	rw_exit(&dn->dn_struct_rwlock);
1462eda14cbcSMatt Macy 
1463eda14cbcSMatt Macy 	/*
14647877fdebSMatt Macy 	 * We can only assign if the offset is aligned and the arc buf is the
14657877fdebSMatt Macy 	 * same size as the dbuf.
1466eda14cbcSMatt Macy 	 */
1467eda14cbcSMatt Macy 	if (offset == db->db.db_offset && blksz == db->db.db_size) {
1468ba27dd8bSMartin Matuska 		zfs_racct_write(blksz, 1);
1469eda14cbcSMatt Macy 		dbuf_assign_arcbuf(db, buf, tx);
1470eda14cbcSMatt Macy 		dbuf_rele(db, FTAG);
1471eda14cbcSMatt Macy 	} else {
1472eda14cbcSMatt Macy 		/* compressed bufs must always be assignable to their dbuf */
1473eda14cbcSMatt Macy 		ASSERT3U(arc_get_compression(buf), ==, ZIO_COMPRESS_OFF);
1474eda14cbcSMatt Macy 		ASSERT(!(buf->b_flags & ARC_BUF_FLAG_COMPRESSED));
1475eda14cbcSMatt Macy 
1476eda14cbcSMatt Macy 		dbuf_rele(db, FTAG);
1477eda14cbcSMatt Macy 		dmu_write(os, object, offset, blksz, buf->b_data, tx);
1478eda14cbcSMatt Macy 		dmu_return_arcbuf(buf);
1479eda14cbcSMatt Macy 	}
1480eda14cbcSMatt Macy 
1481eda14cbcSMatt Macy 	return (0);
1482eda14cbcSMatt Macy }
1483eda14cbcSMatt Macy 
1484eda14cbcSMatt Macy int
1485eda14cbcSMatt Macy dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf,
1486eda14cbcSMatt Macy     dmu_tx_t *tx)
1487eda14cbcSMatt Macy {
1488eda14cbcSMatt Macy 	int err;
1489eda14cbcSMatt Macy 	dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle;
1490eda14cbcSMatt Macy 
1491eda14cbcSMatt Macy 	DB_DNODE_ENTER(dbuf);
1492eda14cbcSMatt Macy 	err = dmu_assign_arcbuf_by_dnode(DB_DNODE(dbuf), offset, buf, tx);
1493eda14cbcSMatt Macy 	DB_DNODE_EXIT(dbuf);
1494eda14cbcSMatt Macy 
1495eda14cbcSMatt Macy 	return (err);
1496eda14cbcSMatt Macy }
1497eda14cbcSMatt Macy 
1498eda14cbcSMatt Macy typedef struct {
1499eda14cbcSMatt Macy 	dbuf_dirty_record_t	*dsa_dr;
1500eda14cbcSMatt Macy 	dmu_sync_cb_t		*dsa_done;
1501eda14cbcSMatt Macy 	zgd_t			*dsa_zgd;
1502eda14cbcSMatt Macy 	dmu_tx_t		*dsa_tx;
1503eda14cbcSMatt Macy } dmu_sync_arg_t;
1504eda14cbcSMatt Macy 
1505eda14cbcSMatt Macy /* ARGSUSED */
1506eda14cbcSMatt Macy static void
1507eda14cbcSMatt Macy dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg)
1508eda14cbcSMatt Macy {
1509eda14cbcSMatt Macy 	dmu_sync_arg_t *dsa = varg;
1510eda14cbcSMatt Macy 	dmu_buf_t *db = dsa->dsa_zgd->zgd_db;
1511eda14cbcSMatt Macy 	blkptr_t *bp = zio->io_bp;
1512eda14cbcSMatt Macy 
1513eda14cbcSMatt Macy 	if (zio->io_error == 0) {
1514eda14cbcSMatt Macy 		if (BP_IS_HOLE(bp)) {
1515eda14cbcSMatt Macy 			/*
1516eda14cbcSMatt Macy 			 * A block of zeros may compress to a hole, but the
1517eda14cbcSMatt Macy 			 * block size still needs to be known for replay.
1518eda14cbcSMatt Macy 			 */
1519eda14cbcSMatt Macy 			BP_SET_LSIZE(bp, db->db_size);
1520eda14cbcSMatt Macy 		} else if (!BP_IS_EMBEDDED(bp)) {
1521eda14cbcSMatt Macy 			ASSERT(BP_GET_LEVEL(bp) == 0);
1522eda14cbcSMatt Macy 			BP_SET_FILL(bp, 1);
1523eda14cbcSMatt Macy 		}
1524eda14cbcSMatt Macy 	}
1525eda14cbcSMatt Macy }
1526eda14cbcSMatt Macy 
1527eda14cbcSMatt Macy static void
1528eda14cbcSMatt Macy dmu_sync_late_arrival_ready(zio_t *zio)
1529eda14cbcSMatt Macy {
1530eda14cbcSMatt Macy 	dmu_sync_ready(zio, NULL, zio->io_private);
1531eda14cbcSMatt Macy }
1532eda14cbcSMatt Macy 
1533eda14cbcSMatt Macy /* ARGSUSED */
1534eda14cbcSMatt Macy static void
1535eda14cbcSMatt Macy dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg)
1536eda14cbcSMatt Macy {
1537eda14cbcSMatt Macy 	dmu_sync_arg_t *dsa = varg;
1538eda14cbcSMatt Macy 	dbuf_dirty_record_t *dr = dsa->dsa_dr;
1539eda14cbcSMatt Macy 	dmu_buf_impl_t *db = dr->dr_dbuf;
1540eda14cbcSMatt Macy 	zgd_t *zgd = dsa->dsa_zgd;
1541eda14cbcSMatt Macy 
1542eda14cbcSMatt Macy 	/*
1543eda14cbcSMatt Macy 	 * Record the vdev(s) backing this blkptr so they can be flushed after
1544eda14cbcSMatt Macy 	 * the writes for the lwb have completed.
1545eda14cbcSMatt Macy 	 */
1546eda14cbcSMatt Macy 	if (zio->io_error == 0) {
1547eda14cbcSMatt Macy 		zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
1548eda14cbcSMatt Macy 	}
1549eda14cbcSMatt Macy 
1550eda14cbcSMatt Macy 	mutex_enter(&db->db_mtx);
1551eda14cbcSMatt Macy 	ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC);
1552eda14cbcSMatt Macy 	if (zio->io_error == 0) {
1553eda14cbcSMatt Macy 		dr->dt.dl.dr_nopwrite = !!(zio->io_flags & ZIO_FLAG_NOPWRITE);
1554eda14cbcSMatt Macy 		if (dr->dt.dl.dr_nopwrite) {
1555eda14cbcSMatt Macy 			blkptr_t *bp = zio->io_bp;
1556eda14cbcSMatt Macy 			blkptr_t *bp_orig = &zio->io_bp_orig;
1557eda14cbcSMatt Macy 			uint8_t chksum = BP_GET_CHECKSUM(bp_orig);
1558eda14cbcSMatt Macy 
1559eda14cbcSMatt Macy 			ASSERT(BP_EQUAL(bp, bp_orig));
1560eda14cbcSMatt Macy 			VERIFY(BP_EQUAL(bp, db->db_blkptr));
1561eda14cbcSMatt Macy 			ASSERT(zio->io_prop.zp_compress != ZIO_COMPRESS_OFF);
1562eda14cbcSMatt Macy 			VERIFY(zio_checksum_table[chksum].ci_flags &
1563eda14cbcSMatt Macy 			    ZCHECKSUM_FLAG_NOPWRITE);
1564eda14cbcSMatt Macy 		}
1565eda14cbcSMatt Macy 		dr->dt.dl.dr_overridden_by = *zio->io_bp;
1566eda14cbcSMatt Macy 		dr->dt.dl.dr_override_state = DR_OVERRIDDEN;
1567eda14cbcSMatt Macy 		dr->dt.dl.dr_copies = zio->io_prop.zp_copies;
1568eda14cbcSMatt Macy 
1569eda14cbcSMatt Macy 		/*
1570eda14cbcSMatt Macy 		 * Old style holes are filled with all zeros, whereas
1571eda14cbcSMatt Macy 		 * new-style holes maintain their lsize, type, level,
1572eda14cbcSMatt Macy 		 * and birth time (see zio_write_compress). While we
1573eda14cbcSMatt Macy 		 * need to reset the BP_SET_LSIZE() call that happened
1574eda14cbcSMatt Macy 		 * in dmu_sync_ready for old style holes, we do *not*
1575eda14cbcSMatt Macy 		 * want to wipe out the information contained in new
1576eda14cbcSMatt Macy 		 * style holes. Thus, only zero out the block pointer if
1577eda14cbcSMatt Macy 		 * it's an old style hole.
1578eda14cbcSMatt Macy 		 */
1579eda14cbcSMatt Macy 		if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by) &&
1580eda14cbcSMatt Macy 		    dr->dt.dl.dr_overridden_by.blk_birth == 0)
1581eda14cbcSMatt Macy 			BP_ZERO(&dr->dt.dl.dr_overridden_by);
1582eda14cbcSMatt Macy 	} else {
1583eda14cbcSMatt Macy 		dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1584eda14cbcSMatt Macy 	}
1585eda14cbcSMatt Macy 	cv_broadcast(&db->db_changed);
1586eda14cbcSMatt Macy 	mutex_exit(&db->db_mtx);
1587eda14cbcSMatt Macy 
1588eda14cbcSMatt Macy 	dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
1589eda14cbcSMatt Macy 
1590eda14cbcSMatt Macy 	kmem_free(dsa, sizeof (*dsa));
1591eda14cbcSMatt Macy }
1592eda14cbcSMatt Macy 
1593eda14cbcSMatt Macy static void
1594eda14cbcSMatt Macy dmu_sync_late_arrival_done(zio_t *zio)
1595eda14cbcSMatt Macy {
1596eda14cbcSMatt Macy 	blkptr_t *bp = zio->io_bp;
1597eda14cbcSMatt Macy 	dmu_sync_arg_t *dsa = zio->io_private;
1598eda14cbcSMatt Macy 	zgd_t *zgd = dsa->dsa_zgd;
1599eda14cbcSMatt Macy 
1600eda14cbcSMatt Macy 	if (zio->io_error == 0) {
1601eda14cbcSMatt Macy 		/*
1602eda14cbcSMatt Macy 		 * Record the vdev(s) backing this blkptr so they can be
1603eda14cbcSMatt Macy 		 * flushed after the writes for the lwb have completed.
1604eda14cbcSMatt Macy 		 */
1605eda14cbcSMatt Macy 		zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
1606eda14cbcSMatt Macy 
1607eda14cbcSMatt Macy 		if (!BP_IS_HOLE(bp)) {
1608eda14cbcSMatt Macy 			blkptr_t *bp_orig __maybe_unused = &zio->io_bp_orig;
1609eda14cbcSMatt Macy 			ASSERT(!(zio->io_flags & ZIO_FLAG_NOPWRITE));
1610eda14cbcSMatt Macy 			ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig));
1611eda14cbcSMatt Macy 			ASSERT(zio->io_bp->blk_birth == zio->io_txg);
1612eda14cbcSMatt Macy 			ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa));
1613eda14cbcSMatt Macy 			zio_free(zio->io_spa, zio->io_txg, zio->io_bp);
1614eda14cbcSMatt Macy 		}
1615eda14cbcSMatt Macy 	}
1616eda14cbcSMatt Macy 
1617eda14cbcSMatt Macy 	dmu_tx_commit(dsa->dsa_tx);
1618eda14cbcSMatt Macy 
1619eda14cbcSMatt Macy 	dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
1620eda14cbcSMatt Macy 
1621184c1b94SMartin Matuska 	abd_free(zio->io_abd);
1622eda14cbcSMatt Macy 	kmem_free(dsa, sizeof (*dsa));
1623eda14cbcSMatt Macy }
1624eda14cbcSMatt Macy 
1625eda14cbcSMatt Macy static int
1626eda14cbcSMatt Macy dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd,
1627eda14cbcSMatt Macy     zio_prop_t *zp, zbookmark_phys_t *zb)
1628eda14cbcSMatt Macy {
1629eda14cbcSMatt Macy 	dmu_sync_arg_t *dsa;
1630eda14cbcSMatt Macy 	dmu_tx_t *tx;
1631eda14cbcSMatt Macy 
1632eda14cbcSMatt Macy 	tx = dmu_tx_create(os);
1633eda14cbcSMatt Macy 	dmu_tx_hold_space(tx, zgd->zgd_db->db_size);
1634eda14cbcSMatt Macy 	if (dmu_tx_assign(tx, TXG_WAIT) != 0) {
1635eda14cbcSMatt Macy 		dmu_tx_abort(tx);
1636eda14cbcSMatt Macy 		/* Make zl_get_data do txg_waited_synced() */
1637eda14cbcSMatt Macy 		return (SET_ERROR(EIO));
1638eda14cbcSMatt Macy 	}
1639eda14cbcSMatt Macy 
1640eda14cbcSMatt Macy 	/*
1641eda14cbcSMatt Macy 	 * In order to prevent the zgd's lwb from being free'd prior to
1642eda14cbcSMatt Macy 	 * dmu_sync_late_arrival_done() being called, we have to ensure
1643eda14cbcSMatt Macy 	 * the lwb's "max txg" takes this tx's txg into account.
1644eda14cbcSMatt Macy 	 */
1645eda14cbcSMatt Macy 	zil_lwb_add_txg(zgd->zgd_lwb, dmu_tx_get_txg(tx));
1646eda14cbcSMatt Macy 
1647eda14cbcSMatt Macy 	dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
1648eda14cbcSMatt Macy 	dsa->dsa_dr = NULL;
1649eda14cbcSMatt Macy 	dsa->dsa_done = done;
1650eda14cbcSMatt Macy 	dsa->dsa_zgd = zgd;
1651eda14cbcSMatt Macy 	dsa->dsa_tx = tx;
1652eda14cbcSMatt Macy 
1653eda14cbcSMatt Macy 	/*
1654eda14cbcSMatt Macy 	 * Since we are currently syncing this txg, it's nontrivial to
1655eda14cbcSMatt Macy 	 * determine what BP to nopwrite against, so we disable nopwrite.
1656eda14cbcSMatt Macy 	 *
1657eda14cbcSMatt Macy 	 * When syncing, the db_blkptr is initially the BP of the previous
1658eda14cbcSMatt Macy 	 * txg.  We can not nopwrite against it because it will be changed
1659eda14cbcSMatt Macy 	 * (this is similar to the non-late-arrival case where the dbuf is
1660eda14cbcSMatt Macy 	 * dirty in a future txg).
1661eda14cbcSMatt Macy 	 *
1662eda14cbcSMatt Macy 	 * Then dbuf_write_ready() sets bp_blkptr to the location we will write.
1663eda14cbcSMatt Macy 	 * We can not nopwrite against it because although the BP will not
1664eda14cbcSMatt Macy 	 * (typically) be changed, the data has not yet been persisted to this
1665eda14cbcSMatt Macy 	 * location.
1666eda14cbcSMatt Macy 	 *
1667eda14cbcSMatt Macy 	 * Finally, when dbuf_write_done() is called, it is theoretically
1668eda14cbcSMatt Macy 	 * possible to always nopwrite, because the data that was written in
1669eda14cbcSMatt Macy 	 * this txg is the same data that we are trying to write.  However we
1670eda14cbcSMatt Macy 	 * would need to check that this dbuf is not dirty in any future
1671eda14cbcSMatt Macy 	 * txg's (as we do in the normal dmu_sync() path). For simplicity, we
1672eda14cbcSMatt Macy 	 * don't nopwrite in this case.
1673eda14cbcSMatt Macy 	 */
1674eda14cbcSMatt Macy 	zp->zp_nopwrite = B_FALSE;
1675eda14cbcSMatt Macy 
1676eda14cbcSMatt Macy 	zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp,
1677eda14cbcSMatt Macy 	    abd_get_from_buf(zgd->zgd_db->db_data, zgd->zgd_db->db_size),
1678eda14cbcSMatt Macy 	    zgd->zgd_db->db_size, zgd->zgd_db->db_size, zp,
1679eda14cbcSMatt Macy 	    dmu_sync_late_arrival_ready, NULL, NULL, dmu_sync_late_arrival_done,
1680eda14cbcSMatt Macy 	    dsa, ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, zb));
1681eda14cbcSMatt Macy 
1682eda14cbcSMatt Macy 	return (0);
1683eda14cbcSMatt Macy }
1684eda14cbcSMatt Macy 
1685eda14cbcSMatt Macy /*
1686eda14cbcSMatt Macy  * Intent log support: sync the block associated with db to disk.
1687eda14cbcSMatt Macy  * N.B. and XXX: the caller is responsible for making sure that the
1688eda14cbcSMatt Macy  * data isn't changing while dmu_sync() is writing it.
1689eda14cbcSMatt Macy  *
1690eda14cbcSMatt Macy  * Return values:
1691eda14cbcSMatt Macy  *
1692eda14cbcSMatt Macy  *	EEXIST: this txg has already been synced, so there's nothing to do.
1693eda14cbcSMatt Macy  *		The caller should not log the write.
1694eda14cbcSMatt Macy  *
1695eda14cbcSMatt Macy  *	ENOENT: the block was dbuf_free_range()'d, so there's nothing to do.
1696eda14cbcSMatt Macy  *		The caller should not log the write.
1697eda14cbcSMatt Macy  *
1698eda14cbcSMatt Macy  *	EALREADY: this block is already in the process of being synced.
1699eda14cbcSMatt Macy  *		The caller should track its progress (somehow).
1700eda14cbcSMatt Macy  *
1701eda14cbcSMatt Macy  *	EIO: could not do the I/O.
1702eda14cbcSMatt Macy  *		The caller should do a txg_wait_synced().
1703eda14cbcSMatt Macy  *
1704eda14cbcSMatt Macy  *	0: the I/O has been initiated.
1705eda14cbcSMatt Macy  *		The caller should log this blkptr in the done callback.
1706eda14cbcSMatt Macy  *		It is possible that the I/O will fail, in which case
1707eda14cbcSMatt Macy  *		the error will be reported to the done callback and
1708eda14cbcSMatt Macy  *		propagated to pio from zio_done().
1709eda14cbcSMatt Macy  */
1710eda14cbcSMatt Macy int
1711eda14cbcSMatt Macy dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd)
1712eda14cbcSMatt Macy {
1713eda14cbcSMatt Macy 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db;
1714eda14cbcSMatt Macy 	objset_t *os = db->db_objset;
1715eda14cbcSMatt Macy 	dsl_dataset_t *ds = os->os_dsl_dataset;
1716eda14cbcSMatt Macy 	dbuf_dirty_record_t *dr, *dr_next;
1717eda14cbcSMatt Macy 	dmu_sync_arg_t *dsa;
1718eda14cbcSMatt Macy 	zbookmark_phys_t zb;
1719eda14cbcSMatt Macy 	zio_prop_t zp;
1720eda14cbcSMatt Macy 	dnode_t *dn;
1721eda14cbcSMatt Macy 
1722eda14cbcSMatt Macy 	ASSERT(pio != NULL);
1723eda14cbcSMatt Macy 	ASSERT(txg != 0);
1724eda14cbcSMatt Macy 
1725eda14cbcSMatt Macy 	SET_BOOKMARK(&zb, ds->ds_object,
1726eda14cbcSMatt Macy 	    db->db.db_object, db->db_level, db->db_blkid);
1727eda14cbcSMatt Macy 
1728eda14cbcSMatt Macy 	DB_DNODE_ENTER(db);
1729eda14cbcSMatt Macy 	dn = DB_DNODE(db);
1730eda14cbcSMatt Macy 	dmu_write_policy(os, dn, db->db_level, WP_DMU_SYNC, &zp);
1731eda14cbcSMatt Macy 	DB_DNODE_EXIT(db);
1732eda14cbcSMatt Macy 
1733eda14cbcSMatt Macy 	/*
1734eda14cbcSMatt Macy 	 * If we're frozen (running ziltest), we always need to generate a bp.
1735eda14cbcSMatt Macy 	 */
1736eda14cbcSMatt Macy 	if (txg > spa_freeze_txg(os->os_spa))
1737eda14cbcSMatt Macy 		return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
1738eda14cbcSMatt Macy 
1739eda14cbcSMatt Macy 	/*
1740eda14cbcSMatt Macy 	 * Grabbing db_mtx now provides a barrier between dbuf_sync_leaf()
1741eda14cbcSMatt Macy 	 * and us.  If we determine that this txg is not yet syncing,
1742eda14cbcSMatt Macy 	 * but it begins to sync a moment later, that's OK because the
1743eda14cbcSMatt Macy 	 * sync thread will block in dbuf_sync_leaf() until we drop db_mtx.
1744eda14cbcSMatt Macy 	 */
1745eda14cbcSMatt Macy 	mutex_enter(&db->db_mtx);
1746eda14cbcSMatt Macy 
1747eda14cbcSMatt Macy 	if (txg <= spa_last_synced_txg(os->os_spa)) {
1748eda14cbcSMatt Macy 		/*
1749eda14cbcSMatt Macy 		 * This txg has already synced.  There's nothing to do.
1750eda14cbcSMatt Macy 		 */
1751eda14cbcSMatt Macy 		mutex_exit(&db->db_mtx);
1752eda14cbcSMatt Macy 		return (SET_ERROR(EEXIST));
1753eda14cbcSMatt Macy 	}
1754eda14cbcSMatt Macy 
1755eda14cbcSMatt Macy 	if (txg <= spa_syncing_txg(os->os_spa)) {
1756eda14cbcSMatt Macy 		/*
1757eda14cbcSMatt Macy 		 * This txg is currently syncing, so we can't mess with
1758eda14cbcSMatt Macy 		 * the dirty record anymore; just write a new log block.
1759eda14cbcSMatt Macy 		 */
1760eda14cbcSMatt Macy 		mutex_exit(&db->db_mtx);
1761eda14cbcSMatt Macy 		return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
1762eda14cbcSMatt Macy 	}
1763eda14cbcSMatt Macy 
1764eda14cbcSMatt Macy 	dr = dbuf_find_dirty_eq(db, txg);
1765eda14cbcSMatt Macy 
1766eda14cbcSMatt Macy 	if (dr == NULL) {
1767eda14cbcSMatt Macy 		/*
1768eda14cbcSMatt Macy 		 * There's no dr for this dbuf, so it must have been freed.
1769eda14cbcSMatt Macy 		 * There's no need to log writes to freed blocks, so we're done.
1770eda14cbcSMatt Macy 		 */
1771eda14cbcSMatt Macy 		mutex_exit(&db->db_mtx);
1772eda14cbcSMatt Macy 		return (SET_ERROR(ENOENT));
1773eda14cbcSMatt Macy 	}
1774eda14cbcSMatt Macy 
1775eda14cbcSMatt Macy 	dr_next = list_next(&db->db_dirty_records, dr);
1776eda14cbcSMatt Macy 	ASSERT(dr_next == NULL || dr_next->dr_txg < txg);
1777eda14cbcSMatt Macy 
1778eda14cbcSMatt Macy 	if (db->db_blkptr != NULL) {
1779eda14cbcSMatt Macy 		/*
1780eda14cbcSMatt Macy 		 * We need to fill in zgd_bp with the current blkptr so that
1781eda14cbcSMatt Macy 		 * the nopwrite code can check if we're writing the same
1782eda14cbcSMatt Macy 		 * data that's already on disk.  We can only nopwrite if we
1783eda14cbcSMatt Macy 		 * are sure that after making the copy, db_blkptr will not
1784eda14cbcSMatt Macy 		 * change until our i/o completes.  We ensure this by
1785eda14cbcSMatt Macy 		 * holding the db_mtx, and only allowing nopwrite if the
1786eda14cbcSMatt Macy 		 * block is not already dirty (see below).  This is verified
1787eda14cbcSMatt Macy 		 * by dmu_sync_done(), which VERIFYs that the db_blkptr has
1788eda14cbcSMatt Macy 		 * not changed.
1789eda14cbcSMatt Macy 		 */
1790eda14cbcSMatt Macy 		*zgd->zgd_bp = *db->db_blkptr;
1791eda14cbcSMatt Macy 	}
1792eda14cbcSMatt Macy 
1793eda14cbcSMatt Macy 	/*
1794eda14cbcSMatt Macy 	 * Assume the on-disk data is X, the current syncing data (in
1795eda14cbcSMatt Macy 	 * txg - 1) is Y, and the current in-memory data is Z (currently
1796eda14cbcSMatt Macy 	 * in dmu_sync).
1797eda14cbcSMatt Macy 	 *
1798eda14cbcSMatt Macy 	 * We usually want to perform a nopwrite if X and Z are the
1799eda14cbcSMatt Macy 	 * same.  However, if Y is different (i.e. the BP is going to
1800eda14cbcSMatt Macy 	 * change before this write takes effect), then a nopwrite will
1801eda14cbcSMatt Macy 	 * be incorrect - we would override with X, which could have
1802eda14cbcSMatt Macy 	 * been freed when Y was written.
1803eda14cbcSMatt Macy 	 *
1804eda14cbcSMatt Macy 	 * (Note that this is not a concern when we are nop-writing from
1805eda14cbcSMatt Macy 	 * syncing context, because X and Y must be identical, because
1806eda14cbcSMatt Macy 	 * all previous txgs have been synced.)
1807eda14cbcSMatt Macy 	 *
1808eda14cbcSMatt Macy 	 * Therefore, we disable nopwrite if the current BP could change
1809eda14cbcSMatt Macy 	 * before this TXG.  There are two ways it could change: by
1810eda14cbcSMatt Macy 	 * being dirty (dr_next is non-NULL), or by being freed
1811eda14cbcSMatt Macy 	 * (dnode_block_freed()).  This behavior is verified by
1812eda14cbcSMatt Macy 	 * zio_done(), which VERIFYs that the override BP is identical
1813eda14cbcSMatt Macy 	 * to the on-disk BP.
1814eda14cbcSMatt Macy 	 */
1815eda14cbcSMatt Macy 	DB_DNODE_ENTER(db);
1816eda14cbcSMatt Macy 	dn = DB_DNODE(db);
1817eda14cbcSMatt Macy 	if (dr_next != NULL || dnode_block_freed(dn, db->db_blkid))
1818eda14cbcSMatt Macy 		zp.zp_nopwrite = B_FALSE;
1819eda14cbcSMatt Macy 	DB_DNODE_EXIT(db);
1820eda14cbcSMatt Macy 
1821eda14cbcSMatt Macy 	ASSERT(dr->dr_txg == txg);
1822eda14cbcSMatt Macy 	if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC ||
1823eda14cbcSMatt Macy 	    dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
1824eda14cbcSMatt Macy 		/*
1825eda14cbcSMatt Macy 		 * We have already issued a sync write for this buffer,
1826eda14cbcSMatt Macy 		 * or this buffer has already been synced.  It could not
1827eda14cbcSMatt Macy 		 * have been dirtied since, or we would have cleared the state.
1828eda14cbcSMatt Macy 		 */
1829eda14cbcSMatt Macy 		mutex_exit(&db->db_mtx);
1830eda14cbcSMatt Macy 		return (SET_ERROR(EALREADY));
1831eda14cbcSMatt Macy 	}
1832eda14cbcSMatt Macy 
1833eda14cbcSMatt Macy 	ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
1834eda14cbcSMatt Macy 	dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC;
1835eda14cbcSMatt Macy 	mutex_exit(&db->db_mtx);
1836eda14cbcSMatt Macy 
1837eda14cbcSMatt Macy 	dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
1838eda14cbcSMatt Macy 	dsa->dsa_dr = dr;
1839eda14cbcSMatt Macy 	dsa->dsa_done = done;
1840eda14cbcSMatt Macy 	dsa->dsa_zgd = zgd;
1841eda14cbcSMatt Macy 	dsa->dsa_tx = NULL;
1842eda14cbcSMatt Macy 
1843eda14cbcSMatt Macy 	zio_nowait(arc_write(pio, os->os_spa, txg,
1844eda14cbcSMatt Macy 	    zgd->zgd_bp, dr->dt.dl.dr_data, DBUF_IS_L2CACHEABLE(db),
1845eda14cbcSMatt Macy 	    &zp, dmu_sync_ready, NULL, NULL, dmu_sync_done, dsa,
1846eda14cbcSMatt Macy 	    ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb));
1847eda14cbcSMatt Macy 
1848eda14cbcSMatt Macy 	return (0);
1849eda14cbcSMatt Macy }
1850eda14cbcSMatt Macy 
1851eda14cbcSMatt Macy int
1852eda14cbcSMatt Macy dmu_object_set_nlevels(objset_t *os, uint64_t object, int nlevels, dmu_tx_t *tx)
1853eda14cbcSMatt Macy {
1854eda14cbcSMatt Macy 	dnode_t *dn;
1855eda14cbcSMatt Macy 	int err;
1856eda14cbcSMatt Macy 
1857eda14cbcSMatt Macy 	err = dnode_hold(os, object, FTAG, &dn);
1858eda14cbcSMatt Macy 	if (err)
1859eda14cbcSMatt Macy 		return (err);
1860eda14cbcSMatt Macy 	err = dnode_set_nlevels(dn, nlevels, tx);
1861eda14cbcSMatt Macy 	dnode_rele(dn, FTAG);
1862eda14cbcSMatt Macy 	return (err);
1863eda14cbcSMatt Macy }
1864eda14cbcSMatt Macy 
1865eda14cbcSMatt Macy int
1866eda14cbcSMatt Macy dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs,
1867eda14cbcSMatt Macy     dmu_tx_t *tx)
1868eda14cbcSMatt Macy {
1869eda14cbcSMatt Macy 	dnode_t *dn;
1870eda14cbcSMatt Macy 	int err;
1871eda14cbcSMatt Macy 
1872eda14cbcSMatt Macy 	err = dnode_hold(os, object, FTAG, &dn);
1873eda14cbcSMatt Macy 	if (err)
1874eda14cbcSMatt Macy 		return (err);
1875eda14cbcSMatt Macy 	err = dnode_set_blksz(dn, size, ibs, tx);
1876eda14cbcSMatt Macy 	dnode_rele(dn, FTAG);
1877eda14cbcSMatt Macy 	return (err);
1878eda14cbcSMatt Macy }
1879eda14cbcSMatt Macy 
1880eda14cbcSMatt Macy int
1881eda14cbcSMatt Macy dmu_object_set_maxblkid(objset_t *os, uint64_t object, uint64_t maxblkid,
1882eda14cbcSMatt Macy     dmu_tx_t *tx)
1883eda14cbcSMatt Macy {
1884eda14cbcSMatt Macy 	dnode_t *dn;
1885eda14cbcSMatt Macy 	int err;
1886eda14cbcSMatt Macy 
1887eda14cbcSMatt Macy 	err = dnode_hold(os, object, FTAG, &dn);
1888eda14cbcSMatt Macy 	if (err)
1889eda14cbcSMatt Macy 		return (err);
1890eda14cbcSMatt Macy 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1891eda14cbcSMatt Macy 	dnode_new_blkid(dn, maxblkid, tx, B_FALSE, B_TRUE);
1892eda14cbcSMatt Macy 	rw_exit(&dn->dn_struct_rwlock);
1893eda14cbcSMatt Macy 	dnode_rele(dn, FTAG);
1894eda14cbcSMatt Macy 	return (0);
1895eda14cbcSMatt Macy }
1896eda14cbcSMatt Macy 
1897eda14cbcSMatt Macy void
1898eda14cbcSMatt Macy dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum,
1899eda14cbcSMatt Macy     dmu_tx_t *tx)
1900eda14cbcSMatt Macy {
1901eda14cbcSMatt Macy 	dnode_t *dn;
1902eda14cbcSMatt Macy 
1903eda14cbcSMatt Macy 	/*
1904eda14cbcSMatt Macy 	 * Send streams include each object's checksum function.  This
1905eda14cbcSMatt Macy 	 * check ensures that the receiving system can understand the
1906eda14cbcSMatt Macy 	 * checksum function transmitted.
1907eda14cbcSMatt Macy 	 */
1908eda14cbcSMatt Macy 	ASSERT3U(checksum, <, ZIO_CHECKSUM_LEGACY_FUNCTIONS);
1909eda14cbcSMatt Macy 
1910eda14cbcSMatt Macy 	VERIFY0(dnode_hold(os, object, FTAG, &dn));
1911eda14cbcSMatt Macy 	ASSERT3U(checksum, <, ZIO_CHECKSUM_FUNCTIONS);
1912eda14cbcSMatt Macy 	dn->dn_checksum = checksum;
1913eda14cbcSMatt Macy 	dnode_setdirty(dn, tx);
1914eda14cbcSMatt Macy 	dnode_rele(dn, FTAG);
1915eda14cbcSMatt Macy }
1916eda14cbcSMatt Macy 
1917eda14cbcSMatt Macy void
1918eda14cbcSMatt Macy dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress,
1919eda14cbcSMatt Macy     dmu_tx_t *tx)
1920eda14cbcSMatt Macy {
1921eda14cbcSMatt Macy 	dnode_t *dn;
1922eda14cbcSMatt Macy 
1923eda14cbcSMatt Macy 	/*
1924eda14cbcSMatt Macy 	 * Send streams include each object's compression function.  This
1925eda14cbcSMatt Macy 	 * check ensures that the receiving system can understand the
1926eda14cbcSMatt Macy 	 * compression function transmitted.
1927eda14cbcSMatt Macy 	 */
1928eda14cbcSMatt Macy 	ASSERT3U(compress, <, ZIO_COMPRESS_LEGACY_FUNCTIONS);
1929eda14cbcSMatt Macy 
1930eda14cbcSMatt Macy 	VERIFY0(dnode_hold(os, object, FTAG, &dn));
1931eda14cbcSMatt Macy 	dn->dn_compress = compress;
1932eda14cbcSMatt Macy 	dnode_setdirty(dn, tx);
1933eda14cbcSMatt Macy 	dnode_rele(dn, FTAG);
1934eda14cbcSMatt Macy }
1935eda14cbcSMatt Macy 
1936eda14cbcSMatt Macy /*
1937eda14cbcSMatt Macy  * When the "redundant_metadata" property is set to "most", only indirect
1938eda14cbcSMatt Macy  * blocks of this level and higher will have an additional ditto block.
1939eda14cbcSMatt Macy  */
1940eda14cbcSMatt Macy int zfs_redundant_metadata_most_ditto_level = 2;
1941eda14cbcSMatt Macy 
1942eda14cbcSMatt Macy void
1943eda14cbcSMatt Macy dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp)
1944eda14cbcSMatt Macy {
1945eda14cbcSMatt Macy 	dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET;
1946eda14cbcSMatt Macy 	boolean_t ismd = (level > 0 || DMU_OT_IS_METADATA(type) ||
1947eda14cbcSMatt Macy 	    (wp & WP_SPILL));
1948eda14cbcSMatt Macy 	enum zio_checksum checksum = os->os_checksum;
1949eda14cbcSMatt Macy 	enum zio_compress compress = os->os_compress;
1950eda14cbcSMatt Macy 	uint8_t complevel = os->os_complevel;
1951eda14cbcSMatt Macy 	enum zio_checksum dedup_checksum = os->os_dedup_checksum;
1952eda14cbcSMatt Macy 	boolean_t dedup = B_FALSE;
1953eda14cbcSMatt Macy 	boolean_t nopwrite = B_FALSE;
1954eda14cbcSMatt Macy 	boolean_t dedup_verify = os->os_dedup_verify;
1955eda14cbcSMatt Macy 	boolean_t encrypt = B_FALSE;
1956eda14cbcSMatt Macy 	int copies = os->os_copies;
1957eda14cbcSMatt Macy 
1958eda14cbcSMatt Macy 	/*
1959eda14cbcSMatt Macy 	 * We maintain different write policies for each of the following
1960eda14cbcSMatt Macy 	 * types of data:
1961eda14cbcSMatt Macy 	 *	 1. metadata
1962eda14cbcSMatt Macy 	 *	 2. preallocated blocks (i.e. level-0 blocks of a dump device)
1963eda14cbcSMatt Macy 	 *	 3. all other level 0 blocks
1964eda14cbcSMatt Macy 	 */
1965eda14cbcSMatt Macy 	if (ismd) {
1966eda14cbcSMatt Macy 		/*
1967eda14cbcSMatt Macy 		 * XXX -- we should design a compression algorithm
1968eda14cbcSMatt Macy 		 * that specializes in arrays of bps.
1969eda14cbcSMatt Macy 		 */
1970eda14cbcSMatt Macy 		compress = zio_compress_select(os->os_spa,
1971eda14cbcSMatt Macy 		    ZIO_COMPRESS_ON, ZIO_COMPRESS_ON);
1972eda14cbcSMatt Macy 
1973eda14cbcSMatt Macy 		/*
1974eda14cbcSMatt Macy 		 * Metadata always gets checksummed.  If the data
1975eda14cbcSMatt Macy 		 * checksum is multi-bit correctable, and it's not a
1976eda14cbcSMatt Macy 		 * ZBT-style checksum, then it's suitable for metadata
1977eda14cbcSMatt Macy 		 * as well.  Otherwise, the metadata checksum defaults
1978eda14cbcSMatt Macy 		 * to fletcher4.
1979eda14cbcSMatt Macy 		 */
1980eda14cbcSMatt Macy 		if (!(zio_checksum_table[checksum].ci_flags &
1981eda14cbcSMatt Macy 		    ZCHECKSUM_FLAG_METADATA) ||
1982eda14cbcSMatt Macy 		    (zio_checksum_table[checksum].ci_flags &
1983eda14cbcSMatt Macy 		    ZCHECKSUM_FLAG_EMBEDDED))
1984eda14cbcSMatt Macy 			checksum = ZIO_CHECKSUM_FLETCHER_4;
1985eda14cbcSMatt Macy 
1986eda14cbcSMatt Macy 		if (os->os_redundant_metadata == ZFS_REDUNDANT_METADATA_ALL ||
1987eda14cbcSMatt Macy 		    (os->os_redundant_metadata ==
1988eda14cbcSMatt Macy 		    ZFS_REDUNDANT_METADATA_MOST &&
1989eda14cbcSMatt Macy 		    (level >= zfs_redundant_metadata_most_ditto_level ||
1990eda14cbcSMatt Macy 		    DMU_OT_IS_METADATA(type) || (wp & WP_SPILL))))
1991eda14cbcSMatt Macy 			copies++;
1992eda14cbcSMatt Macy 	} else if (wp & WP_NOFILL) {
1993eda14cbcSMatt Macy 		ASSERT(level == 0);
1994eda14cbcSMatt Macy 
1995eda14cbcSMatt Macy 		/*
1996eda14cbcSMatt Macy 		 * If we're writing preallocated blocks, we aren't actually
1997eda14cbcSMatt Macy 		 * writing them so don't set any policy properties.  These
1998eda14cbcSMatt Macy 		 * blocks are currently only used by an external subsystem
1999eda14cbcSMatt Macy 		 * outside of zfs (i.e. dump) and not written by the zio
2000eda14cbcSMatt Macy 		 * pipeline.
2001eda14cbcSMatt Macy 		 */
2002eda14cbcSMatt Macy 		compress = ZIO_COMPRESS_OFF;
2003eda14cbcSMatt Macy 		checksum = ZIO_CHECKSUM_OFF;
2004eda14cbcSMatt Macy 	} else {
2005eda14cbcSMatt Macy 		compress = zio_compress_select(os->os_spa, dn->dn_compress,
2006eda14cbcSMatt Macy 		    compress);
2007eda14cbcSMatt Macy 		complevel = zio_complevel_select(os->os_spa, compress,
2008eda14cbcSMatt Macy 		    complevel, complevel);
2009eda14cbcSMatt Macy 
2010eda14cbcSMatt Macy 		checksum = (dedup_checksum == ZIO_CHECKSUM_OFF) ?
2011eda14cbcSMatt Macy 		    zio_checksum_select(dn->dn_checksum, checksum) :
2012eda14cbcSMatt Macy 		    dedup_checksum;
2013eda14cbcSMatt Macy 
2014eda14cbcSMatt Macy 		/*
2015eda14cbcSMatt Macy 		 * Determine dedup setting.  If we are in dmu_sync(),
2016eda14cbcSMatt Macy 		 * we won't actually dedup now because that's all
2017eda14cbcSMatt Macy 		 * done in syncing context; but we do want to use the
2018eda14cbcSMatt Macy 		 * dedup checksum.  If the checksum is not strong
2019eda14cbcSMatt Macy 		 * enough to ensure unique signatures, force
2020eda14cbcSMatt Macy 		 * dedup_verify.
2021eda14cbcSMatt Macy 		 */
2022eda14cbcSMatt Macy 		if (dedup_checksum != ZIO_CHECKSUM_OFF) {
2023eda14cbcSMatt Macy 			dedup = (wp & WP_DMU_SYNC) ? B_FALSE : B_TRUE;
2024eda14cbcSMatt Macy 			if (!(zio_checksum_table[checksum].ci_flags &
2025eda14cbcSMatt Macy 			    ZCHECKSUM_FLAG_DEDUP))
2026eda14cbcSMatt Macy 				dedup_verify = B_TRUE;
2027eda14cbcSMatt Macy 		}
2028eda14cbcSMatt Macy 
2029eda14cbcSMatt Macy 		/*
2030eda14cbcSMatt Macy 		 * Enable nopwrite if we have secure enough checksum
2031eda14cbcSMatt Macy 		 * algorithm (see comment in zio_nop_write) and
2032eda14cbcSMatt Macy 		 * compression is enabled.  We don't enable nopwrite if
2033eda14cbcSMatt Macy 		 * dedup is enabled as the two features are mutually
2034eda14cbcSMatt Macy 		 * exclusive.
2035eda14cbcSMatt Macy 		 */
2036eda14cbcSMatt Macy 		nopwrite = (!dedup && (zio_checksum_table[checksum].ci_flags &
2037eda14cbcSMatt Macy 		    ZCHECKSUM_FLAG_NOPWRITE) &&
2038eda14cbcSMatt Macy 		    compress != ZIO_COMPRESS_OFF && zfs_nopwrite_enabled);
2039eda14cbcSMatt Macy 	}
2040eda14cbcSMatt Macy 
2041eda14cbcSMatt Macy 	/*
2042eda14cbcSMatt Macy 	 * All objects in an encrypted objset are protected from modification
2043eda14cbcSMatt Macy 	 * via a MAC. Encrypted objects store their IV and salt in the last DVA
2044eda14cbcSMatt Macy 	 * in the bp, so we cannot use all copies. Encrypted objects are also
2045eda14cbcSMatt Macy 	 * not subject to nopwrite since writing the same data will still
2046eda14cbcSMatt Macy 	 * result in a new ciphertext. Only encrypted blocks can be dedup'd
2047eda14cbcSMatt Macy 	 * to avoid ambiguity in the dedup code since the DDT does not store
2048eda14cbcSMatt Macy 	 * object types.
2049eda14cbcSMatt Macy 	 */
2050eda14cbcSMatt Macy 	if (os->os_encrypted && (wp & WP_NOFILL) == 0) {
2051eda14cbcSMatt Macy 		encrypt = B_TRUE;
2052eda14cbcSMatt Macy 
2053eda14cbcSMatt Macy 		if (DMU_OT_IS_ENCRYPTED(type)) {
2054eda14cbcSMatt Macy 			copies = MIN(copies, SPA_DVAS_PER_BP - 1);
2055eda14cbcSMatt Macy 			nopwrite = B_FALSE;
2056eda14cbcSMatt Macy 		} else {
2057eda14cbcSMatt Macy 			dedup = B_FALSE;
2058eda14cbcSMatt Macy 		}
2059eda14cbcSMatt Macy 
2060eda14cbcSMatt Macy 		if (level <= 0 &&
2061eda14cbcSMatt Macy 		    (type == DMU_OT_DNODE || type == DMU_OT_OBJSET)) {
2062eda14cbcSMatt Macy 			compress = ZIO_COMPRESS_EMPTY;
2063eda14cbcSMatt Macy 		}
2064eda14cbcSMatt Macy 	}
2065eda14cbcSMatt Macy 
2066eda14cbcSMatt Macy 	zp->zp_compress = compress;
2067eda14cbcSMatt Macy 	zp->zp_complevel = complevel;
2068eda14cbcSMatt Macy 	zp->zp_checksum = checksum;
2069eda14cbcSMatt Macy 	zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type;
2070eda14cbcSMatt Macy 	zp->zp_level = level;
2071eda14cbcSMatt Macy 	zp->zp_copies = MIN(copies, spa_max_replication(os->os_spa));
2072eda14cbcSMatt Macy 	zp->zp_dedup = dedup;
2073eda14cbcSMatt Macy 	zp->zp_dedup_verify = dedup && dedup_verify;
2074eda14cbcSMatt Macy 	zp->zp_nopwrite = nopwrite;
2075eda14cbcSMatt Macy 	zp->zp_encrypt = encrypt;
2076eda14cbcSMatt Macy 	zp->zp_byteorder = ZFS_HOST_BYTEORDER;
2077eda14cbcSMatt Macy 	bzero(zp->zp_salt, ZIO_DATA_SALT_LEN);
2078eda14cbcSMatt Macy 	bzero(zp->zp_iv, ZIO_DATA_IV_LEN);
2079eda14cbcSMatt Macy 	bzero(zp->zp_mac, ZIO_DATA_MAC_LEN);
2080eda14cbcSMatt Macy 	zp->zp_zpl_smallblk = DMU_OT_IS_FILE(zp->zp_type) ?
2081eda14cbcSMatt Macy 	    os->os_zpl_special_smallblock : 0;
2082eda14cbcSMatt Macy 
2083eda14cbcSMatt Macy 	ASSERT3U(zp->zp_compress, !=, ZIO_COMPRESS_INHERIT);
2084eda14cbcSMatt Macy }
2085eda14cbcSMatt Macy 
2086eda14cbcSMatt Macy /*
2087eda14cbcSMatt Macy  * This function is only called from zfs_holey_common() for zpl_llseek()
2088eda14cbcSMatt Macy  * in order to determine the location of holes.  In order to accurately
2089eda14cbcSMatt Macy  * report holes all dirty data must be synced to disk.  This causes extremely
2090eda14cbcSMatt Macy  * poor performance when seeking for holes in a dirty file.  As a compromise,
2091eda14cbcSMatt Macy  * only provide hole data when the dnode is clean.  When a dnode is dirty
2092eda14cbcSMatt Macy  * report the dnode as having no holes which is always a safe thing to do.
2093eda14cbcSMatt Macy  */
2094eda14cbcSMatt Macy int
2095eda14cbcSMatt Macy dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off)
2096eda14cbcSMatt Macy {
2097eda14cbcSMatt Macy 	dnode_t *dn;
2098eda14cbcSMatt Macy 	int i, err;
2099eda14cbcSMatt Macy 	boolean_t clean = B_TRUE;
2100eda14cbcSMatt Macy 
2101eda14cbcSMatt Macy 	err = dnode_hold(os, object, FTAG, &dn);
2102eda14cbcSMatt Macy 	if (err)
2103eda14cbcSMatt Macy 		return (err);
2104eda14cbcSMatt Macy 
2105eda14cbcSMatt Macy 	/*
2106eda14cbcSMatt Macy 	 * Check if dnode is dirty
2107eda14cbcSMatt Macy 	 */
2108eda14cbcSMatt Macy 	for (i = 0; i < TXG_SIZE; i++) {
2109eda14cbcSMatt Macy 		if (multilist_link_active(&dn->dn_dirty_link[i])) {
2110eda14cbcSMatt Macy 			clean = B_FALSE;
2111eda14cbcSMatt Macy 			break;
2112eda14cbcSMatt Macy 		}
2113eda14cbcSMatt Macy 	}
2114eda14cbcSMatt Macy 
2115eda14cbcSMatt Macy 	/*
2116eda14cbcSMatt Macy 	 * If compatibility option is on, sync any current changes before
2117eda14cbcSMatt Macy 	 * we go trundling through the block pointers.
2118eda14cbcSMatt Macy 	 */
2119eda14cbcSMatt Macy 	if (!clean && zfs_dmu_offset_next_sync) {
2120eda14cbcSMatt Macy 		clean = B_TRUE;
2121eda14cbcSMatt Macy 		dnode_rele(dn, FTAG);
2122eda14cbcSMatt Macy 		txg_wait_synced(dmu_objset_pool(os), 0);
2123eda14cbcSMatt Macy 		err = dnode_hold(os, object, FTAG, &dn);
2124eda14cbcSMatt Macy 		if (err)
2125eda14cbcSMatt Macy 			return (err);
2126eda14cbcSMatt Macy 	}
2127eda14cbcSMatt Macy 
2128eda14cbcSMatt Macy 	if (clean)
2129eda14cbcSMatt Macy 		err = dnode_next_offset(dn,
2130eda14cbcSMatt Macy 		    (hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0);
2131eda14cbcSMatt Macy 	else
2132eda14cbcSMatt Macy 		err = SET_ERROR(EBUSY);
2133eda14cbcSMatt Macy 
2134eda14cbcSMatt Macy 	dnode_rele(dn, FTAG);
2135eda14cbcSMatt Macy 
2136eda14cbcSMatt Macy 	return (err);
2137eda14cbcSMatt Macy }
2138eda14cbcSMatt Macy 
2139eda14cbcSMatt Macy void
2140eda14cbcSMatt Macy __dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
2141eda14cbcSMatt Macy {
2142eda14cbcSMatt Macy 	dnode_phys_t *dnp = dn->dn_phys;
2143eda14cbcSMatt Macy 
2144eda14cbcSMatt Macy 	doi->doi_data_block_size = dn->dn_datablksz;
2145eda14cbcSMatt Macy 	doi->doi_metadata_block_size = dn->dn_indblkshift ?
2146eda14cbcSMatt Macy 	    1ULL << dn->dn_indblkshift : 0;
2147eda14cbcSMatt Macy 	doi->doi_type = dn->dn_type;
2148eda14cbcSMatt Macy 	doi->doi_bonus_type = dn->dn_bonustype;
2149eda14cbcSMatt Macy 	doi->doi_bonus_size = dn->dn_bonuslen;
2150eda14cbcSMatt Macy 	doi->doi_dnodesize = dn->dn_num_slots << DNODE_SHIFT;
2151eda14cbcSMatt Macy 	doi->doi_indirection = dn->dn_nlevels;
2152eda14cbcSMatt Macy 	doi->doi_checksum = dn->dn_checksum;
2153eda14cbcSMatt Macy 	doi->doi_compress = dn->dn_compress;
2154eda14cbcSMatt Macy 	doi->doi_nblkptr = dn->dn_nblkptr;
2155eda14cbcSMatt Macy 	doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9;
2156eda14cbcSMatt Macy 	doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
2157eda14cbcSMatt Macy 	doi->doi_fill_count = 0;
2158eda14cbcSMatt Macy 	for (int i = 0; i < dnp->dn_nblkptr; i++)
2159eda14cbcSMatt Macy 		doi->doi_fill_count += BP_GET_FILL(&dnp->dn_blkptr[i]);
2160eda14cbcSMatt Macy }
2161eda14cbcSMatt Macy 
2162eda14cbcSMatt Macy void
2163eda14cbcSMatt Macy dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
2164eda14cbcSMatt Macy {
2165eda14cbcSMatt Macy 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
2166eda14cbcSMatt Macy 	mutex_enter(&dn->dn_mtx);
2167eda14cbcSMatt Macy 
2168eda14cbcSMatt Macy 	__dmu_object_info_from_dnode(dn, doi);
2169eda14cbcSMatt Macy 
2170eda14cbcSMatt Macy 	mutex_exit(&dn->dn_mtx);
2171eda14cbcSMatt Macy 	rw_exit(&dn->dn_struct_rwlock);
2172eda14cbcSMatt Macy }
2173eda14cbcSMatt Macy 
2174eda14cbcSMatt Macy /*
2175eda14cbcSMatt Macy  * Get information on a DMU object.
2176eda14cbcSMatt Macy  * If doi is NULL, just indicates whether the object exists.
2177eda14cbcSMatt Macy  */
2178eda14cbcSMatt Macy int
2179eda14cbcSMatt Macy dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi)
2180eda14cbcSMatt Macy {
2181eda14cbcSMatt Macy 	dnode_t *dn;
2182eda14cbcSMatt Macy 	int err = dnode_hold(os, object, FTAG, &dn);
2183eda14cbcSMatt Macy 
2184eda14cbcSMatt Macy 	if (err)
2185eda14cbcSMatt Macy 		return (err);
2186eda14cbcSMatt Macy 
2187eda14cbcSMatt Macy 	if (doi != NULL)
2188eda14cbcSMatt Macy 		dmu_object_info_from_dnode(dn, doi);
2189eda14cbcSMatt Macy 
2190eda14cbcSMatt Macy 	dnode_rele(dn, FTAG);
2191eda14cbcSMatt Macy 	return (0);
2192eda14cbcSMatt Macy }
2193eda14cbcSMatt Macy 
2194eda14cbcSMatt Macy /*
2195eda14cbcSMatt Macy  * As above, but faster; can be used when you have a held dbuf in hand.
2196eda14cbcSMatt Macy  */
2197eda14cbcSMatt Macy void
2198eda14cbcSMatt Macy dmu_object_info_from_db(dmu_buf_t *db_fake, dmu_object_info_t *doi)
2199eda14cbcSMatt Macy {
2200eda14cbcSMatt Macy 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2201eda14cbcSMatt Macy 
2202eda14cbcSMatt Macy 	DB_DNODE_ENTER(db);
2203eda14cbcSMatt Macy 	dmu_object_info_from_dnode(DB_DNODE(db), doi);
2204eda14cbcSMatt Macy 	DB_DNODE_EXIT(db);
2205eda14cbcSMatt Macy }
2206eda14cbcSMatt Macy 
2207eda14cbcSMatt Macy /*
2208eda14cbcSMatt Macy  * Faster still when you only care about the size.
2209eda14cbcSMatt Macy  */
2210eda14cbcSMatt Macy void
2211eda14cbcSMatt Macy dmu_object_size_from_db(dmu_buf_t *db_fake, uint32_t *blksize,
2212eda14cbcSMatt Macy     u_longlong_t *nblk512)
2213eda14cbcSMatt Macy {
2214eda14cbcSMatt Macy 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2215eda14cbcSMatt Macy 	dnode_t *dn;
2216eda14cbcSMatt Macy 
2217eda14cbcSMatt Macy 	DB_DNODE_ENTER(db);
2218eda14cbcSMatt Macy 	dn = DB_DNODE(db);
2219eda14cbcSMatt Macy 
2220eda14cbcSMatt Macy 	*blksize = dn->dn_datablksz;
2221eda14cbcSMatt Macy 	/* add in number of slots used for the dnode itself */
2222eda14cbcSMatt Macy 	*nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >>
2223eda14cbcSMatt Macy 	    SPA_MINBLOCKSHIFT) + dn->dn_num_slots;
2224eda14cbcSMatt Macy 	DB_DNODE_EXIT(db);
2225eda14cbcSMatt Macy }
2226eda14cbcSMatt Macy 
2227eda14cbcSMatt Macy void
2228eda14cbcSMatt Macy dmu_object_dnsize_from_db(dmu_buf_t *db_fake, int *dnsize)
2229eda14cbcSMatt Macy {
2230eda14cbcSMatt Macy 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2231eda14cbcSMatt Macy 	dnode_t *dn;
2232eda14cbcSMatt Macy 
2233eda14cbcSMatt Macy 	DB_DNODE_ENTER(db);
2234eda14cbcSMatt Macy 	dn = DB_DNODE(db);
2235eda14cbcSMatt Macy 	*dnsize = dn->dn_num_slots << DNODE_SHIFT;
2236eda14cbcSMatt Macy 	DB_DNODE_EXIT(db);
2237eda14cbcSMatt Macy }
2238eda14cbcSMatt Macy 
2239eda14cbcSMatt Macy void
2240eda14cbcSMatt Macy byteswap_uint64_array(void *vbuf, size_t size)
2241eda14cbcSMatt Macy {
2242eda14cbcSMatt Macy 	uint64_t *buf = vbuf;
2243eda14cbcSMatt Macy 	size_t count = size >> 3;
2244eda14cbcSMatt Macy 	int i;
2245eda14cbcSMatt Macy 
2246eda14cbcSMatt Macy 	ASSERT((size & 7) == 0);
2247eda14cbcSMatt Macy 
2248eda14cbcSMatt Macy 	for (i = 0; i < count; i++)
2249eda14cbcSMatt Macy 		buf[i] = BSWAP_64(buf[i]);
2250eda14cbcSMatt Macy }
2251eda14cbcSMatt Macy 
2252eda14cbcSMatt Macy void
2253eda14cbcSMatt Macy byteswap_uint32_array(void *vbuf, size_t size)
2254eda14cbcSMatt Macy {
2255eda14cbcSMatt Macy 	uint32_t *buf = vbuf;
2256eda14cbcSMatt Macy 	size_t count = size >> 2;
2257eda14cbcSMatt Macy 	int i;
2258eda14cbcSMatt Macy 
2259eda14cbcSMatt Macy 	ASSERT((size & 3) == 0);
2260eda14cbcSMatt Macy 
2261eda14cbcSMatt Macy 	for (i = 0; i < count; i++)
2262eda14cbcSMatt Macy 		buf[i] = BSWAP_32(buf[i]);
2263eda14cbcSMatt Macy }
2264eda14cbcSMatt Macy 
2265eda14cbcSMatt Macy void
2266eda14cbcSMatt Macy byteswap_uint16_array(void *vbuf, size_t size)
2267eda14cbcSMatt Macy {
2268eda14cbcSMatt Macy 	uint16_t *buf = vbuf;
2269eda14cbcSMatt Macy 	size_t count = size >> 1;
2270eda14cbcSMatt Macy 	int i;
2271eda14cbcSMatt Macy 
2272eda14cbcSMatt Macy 	ASSERT((size & 1) == 0);
2273eda14cbcSMatt Macy 
2274eda14cbcSMatt Macy 	for (i = 0; i < count; i++)
2275eda14cbcSMatt Macy 		buf[i] = BSWAP_16(buf[i]);
2276eda14cbcSMatt Macy }
2277eda14cbcSMatt Macy 
2278eda14cbcSMatt Macy /* ARGSUSED */
2279eda14cbcSMatt Macy void
2280eda14cbcSMatt Macy byteswap_uint8_array(void *vbuf, size_t size)
2281eda14cbcSMatt Macy {
2282eda14cbcSMatt Macy }
2283eda14cbcSMatt Macy 
2284eda14cbcSMatt Macy void
2285eda14cbcSMatt Macy dmu_init(void)
2286eda14cbcSMatt Macy {
2287eda14cbcSMatt Macy 	abd_init();
2288eda14cbcSMatt Macy 	zfs_dbgmsg_init();
2289eda14cbcSMatt Macy 	sa_cache_init();
2290eda14cbcSMatt Macy 	dmu_objset_init();
2291eda14cbcSMatt Macy 	dnode_init();
2292eda14cbcSMatt Macy 	zfetch_init();
2293eda14cbcSMatt Macy 	dmu_tx_init();
2294eda14cbcSMatt Macy 	l2arc_init();
2295eda14cbcSMatt Macy 	arc_init();
2296eda14cbcSMatt Macy 	dbuf_init();
2297eda14cbcSMatt Macy }
2298eda14cbcSMatt Macy 
2299eda14cbcSMatt Macy void
2300eda14cbcSMatt Macy dmu_fini(void)
2301eda14cbcSMatt Macy {
2302eda14cbcSMatt Macy 	arc_fini(); /* arc depends on l2arc, so arc must go first */
2303eda14cbcSMatt Macy 	l2arc_fini();
2304eda14cbcSMatt Macy 	dmu_tx_fini();
2305eda14cbcSMatt Macy 	zfetch_fini();
2306eda14cbcSMatt Macy 	dbuf_fini();
2307eda14cbcSMatt Macy 	dnode_fini();
2308eda14cbcSMatt Macy 	dmu_objset_fini();
2309eda14cbcSMatt Macy 	sa_cache_fini();
2310eda14cbcSMatt Macy 	zfs_dbgmsg_fini();
2311eda14cbcSMatt Macy 	abd_fini();
2312eda14cbcSMatt Macy }
2313eda14cbcSMatt Macy 
2314eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_bonus_hold);
2315eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_bonus_hold_by_dnode);
2316eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_buf_hold_array_by_bonus);
2317eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_buf_rele_array);
2318eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_prefetch);
2319eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_free_range);
2320eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_free_long_range);
2321eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_free_long_object);
2322eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_read);
2323eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_read_by_dnode);
2324eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_write);
2325eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_write_by_dnode);
2326eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_prealloc);
2327eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_object_info);
2328eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_object_info_from_dnode);
2329eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_object_info_from_db);
2330eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_object_size_from_db);
2331eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_object_dnsize_from_db);
2332eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_object_set_nlevels);
2333eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_object_set_blocksize);
2334eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_object_set_maxblkid);
2335eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_object_set_checksum);
2336eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_object_set_compress);
2337ac0bf12eSMatt Macy EXPORT_SYMBOL(dmu_offset_next);
2338eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_write_policy);
2339eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_sync);
2340eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_request_arcbuf);
2341eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_return_arcbuf);
2342eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_assign_arcbuf_by_dnode);
2343eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_assign_arcbuf_by_dbuf);
2344eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_buf_hold);
2345eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_ot);
2346eda14cbcSMatt Macy 
2347eda14cbcSMatt Macy /* BEGIN CSTYLED */
2348eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs, zfs_, nopwrite_enabled, INT, ZMOD_RW,
2349eda14cbcSMatt Macy 	"Enable NOP writes");
2350eda14cbcSMatt Macy 
2351eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs, zfs_, per_txg_dirty_frees_percent, ULONG, ZMOD_RW,
2352eda14cbcSMatt Macy 	"Percentage of dirtied blocks from frees in one TXG");
2353eda14cbcSMatt Macy 
2354eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs, zfs_, dmu_offset_next_sync, INT, ZMOD_RW,
2355eda14cbcSMatt Macy 	"Enable forcing txg sync to find holes");
2356eda14cbcSMatt Macy 
2357eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs, , dmu_prefetch_max, INT, ZMOD_RW,
2358eda14cbcSMatt Macy 	"Limit one prefetch call to this size");
2359eda14cbcSMatt Macy /* END CSTYLED */
2360