xref: /illumos-gate/usr/src/uts/common/fs/zfs/dmu.c (revision 54811da5ac6b517992fdc173df5d605e4e61fdc0)
1fa9e4066Sahrens /*
2fa9e4066Sahrens  * CDDL HEADER START
3fa9e4066Sahrens  *
4fa9e4066Sahrens  * The contents of this file are subject to the terms of the
5ea8dc4b6Seschrock  * Common Development and Distribution License (the "License").
6ea8dc4b6Seschrock  * You may not use this file except in compliance with the License.
7fa9e4066Sahrens  *
8fa9e4066Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9fa9e4066Sahrens  * or http://www.opensolaris.org/os/licensing.
10fa9e4066Sahrens  * See the License for the specific language governing permissions
11fa9e4066Sahrens  * and limitations under the License.
12fa9e4066Sahrens  *
13fa9e4066Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14fa9e4066Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15fa9e4066Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16fa9e4066Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17fa9e4066Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18fa9e4066Sahrens  *
19fa9e4066Sahrens  * CDDL HEADER END
20fa9e4066Sahrens  */
21fa9e4066Sahrens /*
2294d1a210STim Haley  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23fa9e4066Sahrens  */
24857c96d2SIgor Kozhukhov /*
25857c96d2SIgor Kozhukhov  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26857c96d2SIgor Kozhukhov  * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27857c96d2SIgor Kozhukhov  * Copyright 2016 Nexenta Systems, Inc. All rights reserved.
28857c96d2SIgor Kozhukhov  * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
29857c96d2SIgor Kozhukhov  * Copyright (c) 2018 DilOS
30857c96d2SIgor Kozhukhov  */
31aad02571SSaso Kiselkov 
32fa9e4066Sahrens #include <sys/dmu.h>
33fa9e4066Sahrens #include <sys/dmu_impl.h>
34fa9e4066Sahrens #include <sys/dmu_tx.h>
35fa9e4066Sahrens #include <sys/dbuf.h>
36fa9e4066Sahrens #include <sys/dnode.h>
37fa9e4066Sahrens #include <sys/zfs_context.h>
38fa9e4066Sahrens #include <sys/dmu_objset.h>
39fa9e4066Sahrens #include <sys/dmu_traverse.h>
40fa9e4066Sahrens #include <sys/dsl_dataset.h>
41fa9e4066Sahrens #include <sys/dsl_dir.h>
42fa9e4066Sahrens #include <sys/dsl_pool.h>
431d452cf5Sahrens #include <sys/dsl_synctask.h>
44a2eea2e1Sahrens #include <sys/dsl_prop.h>
45fa9e4066Sahrens #include <sys/dmu_zfetch.h>
46fa9e4066Sahrens #include <sys/zfs_ioctl.h>
47fa9e4066Sahrens #include <sys/zap.h>
48ea8dc4b6Seschrock #include <sys/zio_checksum.h>
4980901aeaSGeorge Wilson #include <sys/zio_compress.h>
500a586ceaSMark Shellenbaum #include <sys/sa.h>
51b8289d24SDaniil Lunev #include <sys/zfeature.h>
52770499e1SDan Kimmel #include <sys/abd.h>
5344eda4d7Smaybee #ifdef _KERNEL
5444eda4d7Smaybee #include <sys/vmsystm.h>
550fab61baSJonathan W Adams #include <sys/zfs_znode.h>
5644eda4d7Smaybee #endif
57fa9e4066Sahrens 
58857c96d2SIgor Kozhukhov static xuio_stats_t xuio_stats = {
59857c96d2SIgor Kozhukhov 	{ "onloan_read_buf",	KSTAT_DATA_UINT64 },
60857c96d2SIgor Kozhukhov 	{ "onloan_write_buf",	KSTAT_DATA_UINT64 },
61857c96d2SIgor Kozhukhov 	{ "read_buf_copied",	KSTAT_DATA_UINT64 },
62857c96d2SIgor Kozhukhov 	{ "read_buf_nocopy",	KSTAT_DATA_UINT64 },
63857c96d2SIgor Kozhukhov 	{ "write_buf_copied",	KSTAT_DATA_UINT64 },
64857c96d2SIgor Kozhukhov 	{ "write_buf_nocopy",	KSTAT_DATA_UINT64 }
65857c96d2SIgor Kozhukhov };
66857c96d2SIgor Kozhukhov 
67857c96d2SIgor Kozhukhov #define	XUIOSTAT_INCR(stat, val)	\
68857c96d2SIgor Kozhukhov 	atomic_add_64(&xuio_stats.stat.value.ui64, (val))
69857c96d2SIgor Kozhukhov #define	XUIOSTAT_BUMP(stat)	XUIOSTAT_INCR(stat, 1)
70857c96d2SIgor Kozhukhov 
7180901aeaSGeorge Wilson /*
7280901aeaSGeorge Wilson  * Enable/disable nopwrite feature.
7380901aeaSGeorge Wilson  */
7480901aeaSGeorge Wilson int zfs_nopwrite_enabled = 1;
7580901aeaSGeorge Wilson 
76ff5177eeSAlek Pinchuk /*
77ff5177eeSAlek Pinchuk  * Tunable to control percentage of dirtied blocks from frees in one TXG.
78ff5177eeSAlek Pinchuk  * After this threshold is crossed, additional dirty blocks from frees
79ff5177eeSAlek Pinchuk  * wait until the next TXG.
80ff5177eeSAlek Pinchuk  * A value of zero will disable this throttle.
81ff5177eeSAlek Pinchuk  */
82ff5177eeSAlek Pinchuk uint32_t zfs_per_txg_dirty_frees_percent = 30;
83ff5177eeSAlek Pinchuk 
845cabbc6bSPrashanth Sreenivasa /*
855cabbc6bSPrashanth Sreenivasa  * This can be used for testing, to ensure that certain actions happen
865cabbc6bSPrashanth Sreenivasa  * while in the middle of a remap (which might otherwise complete too
875cabbc6bSPrashanth Sreenivasa  * quickly).
885cabbc6bSPrashanth Sreenivasa  */
895cabbc6bSPrashanth Sreenivasa int zfs_object_remap_one_indirect_delay_ticks = 0;
905cabbc6bSPrashanth Sreenivasa 
91fa9e4066Sahrens const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = {
92adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT8,  TRUE,  FALSE,  "unallocated"		},
93adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  TRUE,   "object directory"		},
94adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, TRUE,  TRUE,   "object array"		},
95adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT8,  TRUE,  FALSE,  "packed nvlist"		},
96adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, TRUE,  FALSE,  "packed nvlist size"		},
97adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, TRUE,  FALSE,  "bpobj"			},
98adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, TRUE,  FALSE,  "bpobj header"		},
99adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, TRUE,  FALSE,  "SPA space map header"	},
100adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, TRUE,  FALSE,  "SPA space map"		},
101adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, TRUE,  FALSE,  "ZIL intent log"		},
102adb52d92SMatthew Ahrens 	{ DMU_BSWAP_DNODE,  TRUE,  FALSE,  "DMU dnode"			},
103adb52d92SMatthew Ahrens 	{ DMU_BSWAP_OBJSET, TRUE,  TRUE,   "DMU objset"			},
104adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, TRUE,  TRUE,   "DSL directory"		},
105adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  TRUE,   "DSL directory child map"	},
106adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  TRUE,   "DSL dataset snap map"	},
107adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  TRUE,   "DSL props"			},
108adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, TRUE,  TRUE,   "DSL dataset"		},
109adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZNODE,  TRUE,  FALSE,  "ZFS znode"			},
110adb52d92SMatthew Ahrens 	{ DMU_BSWAP_OLDACL, TRUE,  FALSE,  "ZFS V0 ACL"			},
111adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT8,  FALSE, FALSE,  "ZFS plain file"		},
112adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "ZFS directory"		},
113adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "ZFS master node"		},
114adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "ZFS delete queue"		},
115adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT8,  FALSE, FALSE,  "zvol object"		},
116adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "zvol prop"			},
117adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT8,  FALSE, FALSE,  "other uint8[]"		},
118adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, FALSE, FALSE,  "other uint64[]"		},
119adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "other ZAP"			},
120adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "persistent error log"	},
121adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT8,  TRUE,  FALSE,  "SPA history"		},
122adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, TRUE,  FALSE,  "SPA history offsets"	},
123adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  TRUE,   "Pool properties"		},
124adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  TRUE,   "DSL permissions"		},
125adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ACL,    TRUE,  FALSE,  "ZFS ACL"			},
126adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT8,  TRUE,  FALSE,  "ZFS SYSACL"			},
127adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT8,  TRUE,  FALSE,  "FUID table"			},
128adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, TRUE,  FALSE,  "FUID table size"		},
129adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  TRUE,   "DSL dataset next clones"	},
130adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "scan work queue"		},
131adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "ZFS user/group used"	},
132adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "ZFS user/group quota"	},
133adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  TRUE,   "snapshot refcount tags"	},
134adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "DDT ZAP algorithm"		},
135adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "DDT statistics"		},
136adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT8,  TRUE,  FALSE,  "System attributes"		},
137adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "SA master node"		},
138adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "SA attr registration"	},
139adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "SA attr layouts"		},
140adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "scan translations"		},
141adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT8,  FALSE, FALSE,  "deduplicated block"		},
142adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  TRUE,   "DSL deadlist map"		},
143adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, TRUE,  TRUE,   "DSL deadlist map hdr"	},
144adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  TRUE,   "DSL dir clones"		},
145adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, TRUE,  FALSE,  "bpobj subobj"		}
146ad135b5dSChristopher Siden };
147ad135b5dSChristopher Siden 
148ad135b5dSChristopher Siden const dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = {
149ad135b5dSChristopher Siden 	{	byteswap_uint8_array,	"uint8"		},
150ad135b5dSChristopher Siden 	{	byteswap_uint16_array,	"uint16"	},
151ad135b5dSChristopher Siden 	{	byteswap_uint32_array,	"uint32"	},
152ad135b5dSChristopher Siden 	{	byteswap_uint64_array,	"uint64"	},
153ad135b5dSChristopher Siden 	{	zap_byteswap,		"zap"		},
154ad135b5dSChristopher Siden 	{	dnode_buf_byteswap,	"dnode"		},
155ad135b5dSChristopher Siden 	{	dmu_objset_byteswap,	"objset"	},
156ad135b5dSChristopher Siden 	{	zfs_znode_byteswap,	"znode"		},
157ad135b5dSChristopher Siden 	{	zfs_oldacl_byteswap,	"oldacl"	},
158ad135b5dSChristopher Siden 	{	zfs_acl_byteswap,	"acl"		}
1593f9d6ad7SLin Ling };
160fa9e4066Sahrens 
161fa9e4066Sahrens int
16279d72832SMatthew Ahrens dmu_buf_hold_noread_by_dnode(dnode_t *dn, uint64_t offset,
16379d72832SMatthew Ahrens     void *tag, dmu_buf_t **dbp)
16479d72832SMatthew Ahrens {
16579d72832SMatthew Ahrens 	uint64_t blkid;
16679d72832SMatthew Ahrens 	dmu_buf_impl_t *db;
16779d72832SMatthew Ahrens 
16879d72832SMatthew Ahrens 	blkid = dbuf_whichblock(dn, 0, offset);
16979d72832SMatthew Ahrens 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
17079d72832SMatthew Ahrens 	db = dbuf_hold(dn, blkid, tag);
17179d72832SMatthew Ahrens 	rw_exit(&dn->dn_struct_rwlock);
17279d72832SMatthew Ahrens 
17379d72832SMatthew Ahrens 	if (db == NULL) {
17479d72832SMatthew Ahrens 		*dbp = NULL;
17579d72832SMatthew Ahrens 		return (SET_ERROR(EIO));
17679d72832SMatthew Ahrens 	}
17779d72832SMatthew Ahrens 
17879d72832SMatthew Ahrens 	*dbp = &db->db;
17979d72832SMatthew Ahrens 	return (0);
18079d72832SMatthew Ahrens }
18179d72832SMatthew Ahrens int
1825d7b4d43SMatthew Ahrens dmu_buf_hold_noread(objset_t *os, uint64_t object, uint64_t offset,
1835d7b4d43SMatthew Ahrens     void *tag, dmu_buf_t **dbp)
184fa9e4066Sahrens {
185fa9e4066Sahrens 	dnode_t *dn;
186fa9e4066Sahrens 	uint64_t blkid;
187fa9e4066Sahrens 	dmu_buf_impl_t *db;
188ea8dc4b6Seschrock 	int err;
189fa9e4066Sahrens 
190503ad85cSMatthew Ahrens 	err = dnode_hold(os, object, FTAG, &dn);
191ea8dc4b6Seschrock 	if (err)
192ea8dc4b6Seschrock 		return (err);
193a2cdcdd2SPaul Dagnelie 	blkid = dbuf_whichblock(dn, 0, offset);
194fa9e4066Sahrens 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
195ea8dc4b6Seschrock 	db = dbuf_hold(dn, blkid, tag);
196fa9e4066Sahrens 	rw_exit(&dn->dn_struct_rwlock);
1975d7b4d43SMatthew Ahrens 	dnode_rele(dn, FTAG);
1985d7b4d43SMatthew Ahrens 
199ea8dc4b6Seschrock 	if (db == NULL) {
2005d7b4d43SMatthew Ahrens 		*dbp = NULL;
2015d7b4d43SMatthew Ahrens 		return (SET_ERROR(EIO));
2025d7b4d43SMatthew Ahrens 	}
2035d7b4d43SMatthew Ahrens 
2045d7b4d43SMatthew Ahrens 	*dbp = &db->db;
2055d7b4d43SMatthew Ahrens 	return (err);
2065d7b4d43SMatthew Ahrens }
2075d7b4d43SMatthew Ahrens 
2085d7b4d43SMatthew Ahrens int
20979d72832SMatthew Ahrens dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset,
21079d72832SMatthew Ahrens     void *tag, dmu_buf_t **dbp, int flags)
21179d72832SMatthew Ahrens {
21279d72832SMatthew Ahrens 	int err;
21379d72832SMatthew Ahrens 	int db_flags = DB_RF_CANFAIL;
21479d72832SMatthew Ahrens 
21579d72832SMatthew Ahrens 	if (flags & DMU_READ_NO_PREFETCH)
21679d72832SMatthew Ahrens 		db_flags |= DB_RF_NOPREFETCH;
21779d72832SMatthew Ahrens 
21879d72832SMatthew Ahrens 	err = dmu_buf_hold_noread_by_dnode(dn, offset, tag, dbp);
21979d72832SMatthew Ahrens 	if (err == 0) {
22079d72832SMatthew Ahrens 		dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp);
22179d72832SMatthew Ahrens 		err = dbuf_read(db, NULL, db_flags);
22279d72832SMatthew Ahrens 		if (err != 0) {
22379d72832SMatthew Ahrens 			dbuf_rele(db, tag);
22479d72832SMatthew Ahrens 			*dbp = NULL;
22579d72832SMatthew Ahrens 		}
22679d72832SMatthew Ahrens 	}
22779d72832SMatthew Ahrens 
22879d72832SMatthew Ahrens 	return (err);
22979d72832SMatthew Ahrens }
23079d72832SMatthew Ahrens 
23179d72832SMatthew Ahrens int
2325d7b4d43SMatthew Ahrens dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
2335d7b4d43SMatthew Ahrens     void *tag, dmu_buf_t **dbp, int flags)
2345d7b4d43SMatthew Ahrens {
2355d7b4d43SMatthew Ahrens 	int err;
2365d7b4d43SMatthew Ahrens 	int db_flags = DB_RF_CANFAIL;
2375d7b4d43SMatthew Ahrens 
2385d7b4d43SMatthew Ahrens 	if (flags & DMU_READ_NO_PREFETCH)
2395d7b4d43SMatthew Ahrens 		db_flags |= DB_RF_NOPREFETCH;
2405d7b4d43SMatthew Ahrens 
2415d7b4d43SMatthew Ahrens 	err = dmu_buf_hold_noread(os, object, offset, tag, dbp);
2425d7b4d43SMatthew Ahrens 	if (err == 0) {
2435d7b4d43SMatthew Ahrens 		dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp);
24447cb52daSJeff Bonwick 		err = dbuf_read(db, NULL, db_flags);
2455d7b4d43SMatthew Ahrens 		if (err != 0) {
246ea8dc4b6Seschrock 			dbuf_rele(db, tag);
2475d7b4d43SMatthew Ahrens 			*dbp = NULL;
248ea8dc4b6Seschrock 		}
249fa9e4066Sahrens 	}
250fa9e4066Sahrens 
251ea8dc4b6Seschrock 	return (err);
252fa9e4066Sahrens }
253fa9e4066Sahrens 
254fa9e4066Sahrens int
255fa9e4066Sahrens dmu_bonus_max(void)
256fa9e4066Sahrens {
257*54811da5SToomas Soome 	return (DN_OLD_MAX_BONUSLEN);
258fa9e4066Sahrens }
259fa9e4066Sahrens 
2601934e92fSmaybee int
261744947dcSTom Erickson dmu_set_bonus(dmu_buf_t *db_fake, int newsize, dmu_tx_t *tx)
2621934e92fSmaybee {
263744947dcSTom Erickson 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
264744947dcSTom Erickson 	dnode_t *dn;
265744947dcSTom Erickson 	int error;
2661934e92fSmaybee 
267744947dcSTom Erickson 	DB_DNODE_ENTER(db);
268744947dcSTom Erickson 	dn = DB_DNODE(db);
269744947dcSTom Erickson 
270744947dcSTom Erickson 	if (dn->dn_bonus != db) {
271be6fd75aSMatthew Ahrens 		error = SET_ERROR(EINVAL);
272744947dcSTom Erickson 	} else if (newsize < 0 || newsize > db_fake->db_size) {
273be6fd75aSMatthew Ahrens 		error = SET_ERROR(EINVAL);
274744947dcSTom Erickson 	} else {
2751934e92fSmaybee 		dnode_setbonuslen(dn, newsize, tx);
276744947dcSTom Erickson 		error = 0;
277744947dcSTom Erickson 	}
278744947dcSTom Erickson 
279744947dcSTom Erickson 	DB_DNODE_EXIT(db);
280744947dcSTom Erickson 	return (error);
2811934e92fSmaybee }
2821934e92fSmaybee 
2830a586ceaSMark Shellenbaum int
284744947dcSTom Erickson dmu_set_bonustype(dmu_buf_t *db_fake, dmu_object_type_t type, dmu_tx_t *tx)
2850a586ceaSMark Shellenbaum {
286744947dcSTom Erickson 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
287744947dcSTom Erickson 	dnode_t *dn;
288744947dcSTom Erickson 	int error;
2890a586ceaSMark Shellenbaum 
290744947dcSTom Erickson 	DB_DNODE_ENTER(db);
291744947dcSTom Erickson 	dn = DB_DNODE(db);
2920a586ceaSMark Shellenbaum 
293ad135b5dSChristopher Siden 	if (!DMU_OT_IS_VALID(type)) {
294be6fd75aSMatthew Ahrens 		error = SET_ERROR(EINVAL);
295744947dcSTom Erickson 	} else if (dn->dn_bonus != db) {
296be6fd75aSMatthew Ahrens 		error = SET_ERROR(EINVAL);
297744947dcSTom Erickson 	} else {
2980a586ceaSMark Shellenbaum 		dnode_setbonus_type(dn, type, tx);
299744947dcSTom Erickson 		error = 0;
300744947dcSTom Erickson 	}
301744947dcSTom Erickson 
302744947dcSTom Erickson 	DB_DNODE_EXIT(db);
303744947dcSTom Erickson 	return (error);
304744947dcSTom Erickson }
305744947dcSTom Erickson 
306744947dcSTom Erickson dmu_object_type_t
307744947dcSTom Erickson dmu_get_bonustype(dmu_buf_t *db_fake)
308744947dcSTom Erickson {
309744947dcSTom Erickson 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
310744947dcSTom Erickson 	dnode_t *dn;
311744947dcSTom Erickson 	dmu_object_type_t type;
312744947dcSTom Erickson 
313744947dcSTom Erickson 	DB_DNODE_ENTER(db);
314744947dcSTom Erickson 	dn = DB_DNODE(db);
315744947dcSTom Erickson 	type = dn->dn_bonustype;
316744947dcSTom Erickson 	DB_DNODE_EXIT(db);
317744947dcSTom Erickson 
318744947dcSTom Erickson 	return (type);
3190a586ceaSMark Shellenbaum }
3200a586ceaSMark Shellenbaum 
3210a586ceaSMark Shellenbaum int
3220a586ceaSMark Shellenbaum dmu_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx)
3230a586ceaSMark Shellenbaum {
3240a586ceaSMark Shellenbaum 	dnode_t *dn;
3250a586ceaSMark Shellenbaum 	int error;
3260a586ceaSMark Shellenbaum 
3270a586ceaSMark Shellenbaum 	error = dnode_hold(os, object, FTAG, &dn);
3280a586ceaSMark Shellenbaum 	dbuf_rm_spill(dn, tx);
32906e0070dSMark Shellenbaum 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
33006e0070dSMark Shellenbaum 	dnode_rm_spill(dn, tx);
33106e0070dSMark Shellenbaum 	rw_exit(&dn->dn_struct_rwlock);
3320a586ceaSMark Shellenbaum 	dnode_rele(dn, FTAG);
3330a586ceaSMark Shellenbaum 	return (error);
3340a586ceaSMark Shellenbaum }
3350a586ceaSMark Shellenbaum 
336fa9e4066Sahrens /*
337ea8dc4b6Seschrock  * returns ENOENT, EIO, or 0.
338fa9e4066Sahrens  */
339ea8dc4b6Seschrock int
340ea8dc4b6Seschrock dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp)
341fa9e4066Sahrens {
342ea8dc4b6Seschrock 	dnode_t *dn;
343fa9e4066Sahrens 	dmu_buf_impl_t *db;
3441934e92fSmaybee 	int error;
345fa9e4066Sahrens 
346503ad85cSMatthew Ahrens 	error = dnode_hold(os, object, FTAG, &dn);
3471934e92fSmaybee 	if (error)
3481934e92fSmaybee 		return (error);
349fa9e4066Sahrens 
350fa9e4066Sahrens 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
351ea8dc4b6Seschrock 	if (dn->dn_bonus == NULL) {
352fa9e4066Sahrens 		rw_exit(&dn->dn_struct_rwlock);
353ea8dc4b6Seschrock 		rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
354ea8dc4b6Seschrock 		if (dn->dn_bonus == NULL)
3551934e92fSmaybee 			dbuf_create_bonus(dn);
356fa9e4066Sahrens 	}
357ea8dc4b6Seschrock 	db = dn->dn_bonus;
3581934e92fSmaybee 
3591934e92fSmaybee 	/* as long as the bonus buf is held, the dnode will be held */
360744947dcSTom Erickson 	if (refcount_add(&db->db_holds, tag) == 1) {
3611934e92fSmaybee 		VERIFY(dnode_add_ref(dn, db));
362640c1670SJosef 'Jeff' Sipek 		atomic_inc_32(&dn->dn_dbufs_count);
363744947dcSTom Erickson 	}
364744947dcSTom Erickson 
365744947dcSTom Erickson 	/*
366744947dcSTom Erickson 	 * Wait to drop dn_struct_rwlock until after adding the bonus dbuf's
367744947dcSTom Erickson 	 * hold and incrementing the dbuf count to ensure that dnode_move() sees
368744947dcSTom Erickson 	 * a dnode hold for every dbuf.
369744947dcSTom Erickson 	 */
370744947dcSTom Erickson 	rw_exit(&dn->dn_struct_rwlock);
3711934e92fSmaybee 
372fa9e4066Sahrens 	dnode_rele(dn, FTAG);
373ea8dc4b6Seschrock 
37447cb52daSJeff Bonwick 	VERIFY(0 == dbuf_read(db, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH));
375ea8dc4b6Seschrock 
376ea8dc4b6Seschrock 	*dbp = &db->db;
377ea8dc4b6Seschrock 	return (0);
378fa9e4066Sahrens }
379fa9e4066Sahrens 
38013506d1eSmaybee /*
3810a586ceaSMark Shellenbaum  * returns ENOENT, EIO, or 0.
3820a586ceaSMark Shellenbaum  *
3830a586ceaSMark Shellenbaum  * This interface will allocate a blank spill dbuf when a spill blk
3840a586ceaSMark Shellenbaum  * doesn't already exist on the dnode.
3850a586ceaSMark Shellenbaum  *
3860a586ceaSMark Shellenbaum  * if you only want to find an already existing spill db, then
3870a586ceaSMark Shellenbaum  * dmu_spill_hold_existing() should be used.
3880a586ceaSMark Shellenbaum  */
3890a586ceaSMark Shellenbaum int
3900a586ceaSMark Shellenbaum dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, void *tag, dmu_buf_t **dbp)
3910a586ceaSMark Shellenbaum {
3920a586ceaSMark Shellenbaum 	dmu_buf_impl_t *db = NULL;
3930a586ceaSMark Shellenbaum 	int err;
3940a586ceaSMark Shellenbaum 
3950a586ceaSMark Shellenbaum 	if ((flags & DB_RF_HAVESTRUCT) == 0)
3960a586ceaSMark Shellenbaum 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
3970a586ceaSMark Shellenbaum 
3980a586ceaSMark Shellenbaum 	db = dbuf_hold(dn, DMU_SPILL_BLKID, tag);
3990a586ceaSMark Shellenbaum 
4000a586ceaSMark Shellenbaum 	if ((flags & DB_RF_HAVESTRUCT) == 0)
4010a586ceaSMark Shellenbaum 		rw_exit(&dn->dn_struct_rwlock);
4020a586ceaSMark Shellenbaum 
4030a586ceaSMark Shellenbaum 	ASSERT(db != NULL);
4041d8ccc7bSMark Shellenbaum 	err = dbuf_read(db, NULL, flags);
4051d8ccc7bSMark Shellenbaum 	if (err == 0)
4060a586ceaSMark Shellenbaum 		*dbp = &db->db;
4071d8ccc7bSMark Shellenbaum 	else
4081d8ccc7bSMark Shellenbaum 		dbuf_rele(db, tag);
4090a586ceaSMark Shellenbaum 	return (err);
4100a586ceaSMark Shellenbaum }
4110a586ceaSMark Shellenbaum 
4120a586ceaSMark Shellenbaum int
4130a586ceaSMark Shellenbaum dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
4140a586ceaSMark Shellenbaum {
415744947dcSTom Erickson 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
416744947dcSTom Erickson 	dnode_t *dn;
4170a586ceaSMark Shellenbaum 	int err;
4180a586ceaSMark Shellenbaum 
419744947dcSTom Erickson 	DB_DNODE_ENTER(db);
420744947dcSTom Erickson 	dn = DB_DNODE(db);
421744947dcSTom Erickson 
422744947dcSTom Erickson 	if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) {
423be6fd75aSMatthew Ahrens 		err = SET_ERROR(EINVAL);
424744947dcSTom Erickson 	} else {
4250a586ceaSMark Shellenbaum 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
4260a586ceaSMark Shellenbaum 
4270a586ceaSMark Shellenbaum 		if (!dn->dn_have_spill) {
428be6fd75aSMatthew Ahrens 			err = SET_ERROR(ENOENT);
429744947dcSTom Erickson 		} else {
4301d8ccc7bSMark Shellenbaum 			err = dmu_spill_hold_by_dnode(dn,
4311d8ccc7bSMark Shellenbaum 			    DB_RF_HAVESTRUCT | DB_RF_CANFAIL, tag, dbp);
432744947dcSTom Erickson 		}
433744947dcSTom Erickson 
4340a586ceaSMark Shellenbaum 		rw_exit(&dn->dn_struct_rwlock);
435744947dcSTom Erickson 	}
436744947dcSTom Erickson 
437744947dcSTom Erickson 	DB_DNODE_EXIT(db);
4380a586ceaSMark Shellenbaum 	return (err);
4390a586ceaSMark Shellenbaum }
4400a586ceaSMark Shellenbaum 
4410a586ceaSMark Shellenbaum int
4420a586ceaSMark Shellenbaum dmu_spill_hold_by_bonus(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
4430a586ceaSMark Shellenbaum {
444744947dcSTom Erickson 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
445744947dcSTom Erickson 	dnode_t *dn;
446744947dcSTom Erickson 	int err;
447744947dcSTom Erickson 
448744947dcSTom Erickson 	DB_DNODE_ENTER(db);
449744947dcSTom Erickson 	dn = DB_DNODE(db);
450744947dcSTom Erickson 	err = dmu_spill_hold_by_dnode(dn, DB_RF_CANFAIL, tag, dbp);
451744947dcSTom Erickson 	DB_DNODE_EXIT(db);
452744947dcSTom Erickson 
453744947dcSTom Erickson 	return (err);
4540a586ceaSMark Shellenbaum }
4550a586ceaSMark Shellenbaum 
4560a586ceaSMark Shellenbaum /*
45713506d1eSmaybee  * Note: longer-term, we should modify all of the dmu_buf_*() interfaces
45813506d1eSmaybee  * to take a held dnode rather than <os, object> -- the lookup is wasteful,
45913506d1eSmaybee  * and can induce severe lock contention when writing to several files
46013506d1eSmaybee  * whose dnodes are in the same block.
46113506d1eSmaybee  */
4628dfe5547SRichard Yao int
4637bfdf011SNeil Perrin dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
464cf6106c8SMatthew Ahrens     boolean_t read, void *tag, int *numbufsp, dmu_buf_t ***dbpp, uint32_t flags)
465fa9e4066Sahrens {
466fa9e4066Sahrens 	dmu_buf_t **dbp;
467fa9e4066Sahrens 	uint64_t blkid, nblks, i;
4687bfdf011SNeil Perrin 	uint32_t dbuf_flags;
469ea8dc4b6Seschrock 	int err;
470ea8dc4b6Seschrock 	zio_t *zio;
471ea8dc4b6Seschrock 
472ea8dc4b6Seschrock 	ASSERT(length <= DMU_MAX_ACCESS);
473fa9e4066Sahrens 
474cf6106c8SMatthew Ahrens 	/*
475cf6106c8SMatthew Ahrens 	 * Note: We directly notify the prefetch code of this read, so that
476cf6106c8SMatthew Ahrens 	 * we can tell it about the multi-block read.  dbuf_read() only knows
477cf6106c8SMatthew Ahrens 	 * about the one block it is accessing.
478cf6106c8SMatthew Ahrens 	 */
479cf6106c8SMatthew Ahrens 	dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT |
480cf6106c8SMatthew Ahrens 	    DB_RF_NOPREFETCH;
481ea8dc4b6Seschrock 
482fa9e4066Sahrens 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
483fa9e4066Sahrens 	if (dn->dn_datablkshift) {
484fa9e4066Sahrens 		int blkshift = dn->dn_datablkshift;
485fa9e4066Sahrens 		nblks = (P2ROUNDUP(offset + length, 1ULL << blkshift) -
486fa9e4066Sahrens 		    P2ALIGN(offset, 1ULL << blkshift)) >> blkshift;
487fa9e4066Sahrens 	} else {
4880125049cSahrens 		if (offset + length > dn->dn_datablksz) {
4890125049cSahrens 			zfs_panic_recover("zfs: accessing past end of object "
4900125049cSahrens 			    "%llx/%llx (size=%u access=%llu+%llu)",
4910125049cSahrens 			    (longlong_t)dn->dn_objset->
4920125049cSahrens 			    os_dsl_dataset->ds_object,
4930125049cSahrens 			    (longlong_t)dn->dn_object, dn->dn_datablksz,
4940125049cSahrens 			    (longlong_t)offset, (longlong_t)length);
495c87b8fc5SMark J Musante 			rw_exit(&dn->dn_struct_rwlock);
496be6fd75aSMatthew Ahrens 			return (SET_ERROR(EIO));
4970125049cSahrens 		}
498fa9e4066Sahrens 		nblks = 1;
499fa9e4066Sahrens 	}
500ea8dc4b6Seschrock 	dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP);
501fa9e4066Sahrens 
502e14bb325SJeff Bonwick 	zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, ZIO_FLAG_CANFAIL);
503a2cdcdd2SPaul Dagnelie 	blkid = dbuf_whichblock(dn, 0, offset);
504fa9e4066Sahrens 	for (i = 0; i < nblks; i++) {
505ea8dc4b6Seschrock 		dmu_buf_impl_t *db = dbuf_hold(dn, blkid + i, tag);
506ea8dc4b6Seschrock 		if (db == NULL) {
507ea8dc4b6Seschrock 			rw_exit(&dn->dn_struct_rwlock);
508ea8dc4b6Seschrock 			dmu_buf_rele_array(dbp, nblks, tag);
509ea8dc4b6Seschrock 			zio_nowait(zio);
510be6fd75aSMatthew Ahrens 			return (SET_ERROR(EIO));
511ea8dc4b6Seschrock 		}
512cf6106c8SMatthew Ahrens 
513ea8dc4b6Seschrock 		/* initiate async i/o */
514cf6106c8SMatthew Ahrens 		if (read)
5157bfdf011SNeil Perrin 			(void) dbuf_read(db, zio, dbuf_flags);
516ea8dc4b6Seschrock 		dbp[i] = &db->db;
517fa9e4066Sahrens 	}
518cf6106c8SMatthew Ahrens 
519cb92f413SAlexander Motin 	if ((flags & DMU_READ_NO_PREFETCH) == 0 &&
520cb92f413SAlexander Motin 	    DNODE_META_IS_CACHEABLE(dn) && length <= zfetch_array_rd_sz) {
521cb92f413SAlexander Motin 		dmu_zfetch(&dn->dn_zfetch, blkid, nblks,
522cb92f413SAlexander Motin 		    read && DNODE_IS_CACHEABLE(dn));
523cf6106c8SMatthew Ahrens 	}
524fa9e4066Sahrens 	rw_exit(&dn->dn_struct_rwlock);
525fa9e4066Sahrens 
526ea8dc4b6Seschrock 	/* wait for async i/o */
527ea8dc4b6Seschrock 	err = zio_wait(zio);
528ea8dc4b6Seschrock 	if (err) {
529ea8dc4b6Seschrock 		dmu_buf_rele_array(dbp, nblks, tag);
530ea8dc4b6Seschrock 		return (err);
531ea8dc4b6Seschrock 	}
532ea8dc4b6Seschrock 
533ea8dc4b6Seschrock 	/* wait for other io to complete */
534ea8dc4b6Seschrock 	if (read) {
535ea8dc4b6Seschrock 		for (i = 0; i < nblks; i++) {
536ea8dc4b6Seschrock 			dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i];
537ea8dc4b6Seschrock 			mutex_enter(&db->db_mtx);
538ea8dc4b6Seschrock 			while (db->db_state == DB_READ ||
539ea8dc4b6Seschrock 			    db->db_state == DB_FILL)
540ea8dc4b6Seschrock 				cv_wait(&db->db_changed, &db->db_mtx);
541ea8dc4b6Seschrock 			if (db->db_state == DB_UNCACHED)
542be6fd75aSMatthew Ahrens 				err = SET_ERROR(EIO);
543ea8dc4b6Seschrock 			mutex_exit(&db->db_mtx);
544ea8dc4b6Seschrock 			if (err) {
545ea8dc4b6Seschrock 				dmu_buf_rele_array(dbp, nblks, tag);
546ea8dc4b6Seschrock 				return (err);
547ea8dc4b6Seschrock 			}
548ea8dc4b6Seschrock 		}
549ea8dc4b6Seschrock 	}
550ea8dc4b6Seschrock 
551ea8dc4b6Seschrock 	*numbufsp = nblks;
552ea8dc4b6Seschrock 	*dbpp = dbp;
553ea8dc4b6Seschrock 	return (0);
554fa9e4066Sahrens }
555fa9e4066Sahrens 
556a2eea2e1Sahrens static int
55713506d1eSmaybee dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
55813506d1eSmaybee     uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp)
55913506d1eSmaybee {
56013506d1eSmaybee 	dnode_t *dn;
56113506d1eSmaybee 	int err;
56213506d1eSmaybee 
563503ad85cSMatthew Ahrens 	err = dnode_hold(os, object, FTAG, &dn);
56413506d1eSmaybee 	if (err)
56513506d1eSmaybee 		return (err);
56613506d1eSmaybee 
56713506d1eSmaybee 	err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
5687bfdf011SNeil Perrin 	    numbufsp, dbpp, DMU_READ_PREFETCH);
56913506d1eSmaybee 
57013506d1eSmaybee 	dnode_rele(dn, FTAG);
57113506d1eSmaybee 
57213506d1eSmaybee 	return (err);
57313506d1eSmaybee }
57413506d1eSmaybee 
57513506d1eSmaybee int
576744947dcSTom Erickson dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset,
577cf6106c8SMatthew Ahrens     uint64_t length, boolean_t read, void *tag, int *numbufsp,
578cf6106c8SMatthew Ahrens     dmu_buf_t ***dbpp)
57913506d1eSmaybee {
580744947dcSTom Erickson 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
581744947dcSTom Erickson 	dnode_t *dn;
58213506d1eSmaybee 	int err;
58313506d1eSmaybee 
584744947dcSTom Erickson 	DB_DNODE_ENTER(db);
585744947dcSTom Erickson 	dn = DB_DNODE(db);
58613506d1eSmaybee 	err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
5877bfdf011SNeil Perrin 	    numbufsp, dbpp, DMU_READ_PREFETCH);
588744947dcSTom Erickson 	DB_DNODE_EXIT(db);
58913506d1eSmaybee 
59013506d1eSmaybee 	return (err);
59113506d1eSmaybee }
59213506d1eSmaybee 
593fa9e4066Sahrens void
594ea8dc4b6Seschrock dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, void *tag)
595fa9e4066Sahrens {
596fa9e4066Sahrens 	int i;
597fa9e4066Sahrens 	dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake;
598fa9e4066Sahrens 
599fa9e4066Sahrens 	if (numbufs == 0)
600fa9e4066Sahrens 		return;
601fa9e4066Sahrens 
602ea8dc4b6Seschrock 	for (i = 0; i < numbufs; i++) {
603ea8dc4b6Seschrock 		if (dbp[i])
604ea8dc4b6Seschrock 			dbuf_rele(dbp[i], tag);
605ea8dc4b6Seschrock 	}
606fa9e4066Sahrens 
607fa9e4066Sahrens 	kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs);
608fa9e4066Sahrens }
609fa9e4066Sahrens 
61069962b56SMatthew Ahrens /*
611a2cdcdd2SPaul Dagnelie  * Issue prefetch i/os for the given blocks.  If level is greater than 0, the
612a2cdcdd2SPaul Dagnelie  * indirect blocks prefeteched will be those that point to the blocks containing
613a2cdcdd2SPaul Dagnelie  * the data starting at offset, and continuing to offset + len.
61469962b56SMatthew Ahrens  *
615a2cdcdd2SPaul Dagnelie  * Note that if the indirect blocks above the blocks being prefetched are not in
616a2cdcdd2SPaul Dagnelie  * cache, they will be asychronously read in.
61769962b56SMatthew Ahrens  */
618fa9e4066Sahrens void
619a2cdcdd2SPaul Dagnelie dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset,
620a2cdcdd2SPaul Dagnelie     uint64_t len, zio_priority_t pri)
621fa9e4066Sahrens {
622fa9e4066Sahrens 	dnode_t *dn;
623fa9e4066Sahrens 	uint64_t blkid;
62469962b56SMatthew Ahrens 	int nblks, err;
625fa9e4066Sahrens 
626fa9e4066Sahrens 	if (len == 0) {  /* they're interested in the bonus buffer */
627744947dcSTom Erickson 		dn = DMU_META_DNODE(os);
628fa9e4066Sahrens 
629fa9e4066Sahrens 		if (object == 0 || object >= DN_MAX_OBJECT)
630fa9e4066Sahrens 			return;
631fa9e4066Sahrens 
632fa9e4066Sahrens 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
633a2cdcdd2SPaul Dagnelie 		blkid = dbuf_whichblock(dn, level,
634a2cdcdd2SPaul Dagnelie 		    object * sizeof (dnode_phys_t));
635a2cdcdd2SPaul Dagnelie 		dbuf_prefetch(dn, level, blkid, pri, 0);
636fa9e4066Sahrens 		rw_exit(&dn->dn_struct_rwlock);
637fa9e4066Sahrens 		return;
638fa9e4066Sahrens 	}
639fa9e4066Sahrens 
640fa9e4066Sahrens 	/*
641fa9e4066Sahrens 	 * XXX - Note, if the dnode for the requested object is not
642fa9e4066Sahrens 	 * already cached, we will do a *synchronous* read in the
643fa9e4066Sahrens 	 * dnode_hold() call.  The same is true for any indirects.
644fa9e4066Sahrens 	 */
645503ad85cSMatthew Ahrens 	err = dnode_hold(os, object, FTAG, &dn);
646ea8dc4b6Seschrock 	if (err != 0)
647fa9e4066Sahrens 		return;
648fa9e4066Sahrens 
649fa9e4066Sahrens 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
650a2cdcdd2SPaul Dagnelie 	/*
651a2cdcdd2SPaul Dagnelie 	 * offset + len - 1 is the last byte we want to prefetch for, and offset
652a2cdcdd2SPaul Dagnelie 	 * is the first.  Then dbuf_whichblk(dn, level, off + len - 1) is the
653a2cdcdd2SPaul Dagnelie 	 * last block we want to prefetch, and dbuf_whichblock(dn, level,
654a2cdcdd2SPaul Dagnelie 	 * offset)  is the first.  Then the number we need to prefetch is the
655a2cdcdd2SPaul Dagnelie 	 * last - first + 1.
656a2cdcdd2SPaul Dagnelie 	 */
657a2cdcdd2SPaul Dagnelie 	if (level > 0 || dn->dn_datablkshift != 0) {
658a2cdcdd2SPaul Dagnelie 		nblks = dbuf_whichblock(dn, level, offset + len - 1) -
659a2cdcdd2SPaul Dagnelie 		    dbuf_whichblock(dn, level, offset) + 1;
660fa9e4066Sahrens 	} else {
661fa9e4066Sahrens 		nblks = (offset < dn->dn_datablksz);
662fa9e4066Sahrens 	}
663fa9e4066Sahrens 
664fa9e4066Sahrens 	if (nblks != 0) {
665a2cdcdd2SPaul Dagnelie 		blkid = dbuf_whichblock(dn, level, offset);
66669962b56SMatthew Ahrens 		for (int i = 0; i < nblks; i++)
667a2cdcdd2SPaul Dagnelie 			dbuf_prefetch(dn, level, blkid + i, pri, 0);
668fa9e4066Sahrens 	}
669fa9e4066Sahrens 
670fa9e4066Sahrens 	rw_exit(&dn->dn_struct_rwlock);
671fa9e4066Sahrens 
672fa9e4066Sahrens 	dnode_rele(dn, FTAG);
673fa9e4066Sahrens }
674fa9e4066Sahrens 
67576256205SMark Maybee /*
67676256205SMark Maybee  * Get the next "chunk" of file data to free.  We traverse the file from
67776256205SMark Maybee  * the end so that the file gets shorter over time (if we crashes in the
67876256205SMark Maybee  * middle, this will leave us in a better state).  We find allocated file
67976256205SMark Maybee  * data by simply searching the allocated level 1 indirects.
680713d6c20SMatthew Ahrens  *
681713d6c20SMatthew Ahrens  * On input, *start should be the first offset that does not need to be
682713d6c20SMatthew Ahrens  * freed (e.g. "offset + length").  On return, *start will be the first
683713d6c20SMatthew Ahrens  * offset that should be freed.
68476256205SMark Maybee  */
685cdb0ab79Smaybee static int
686713d6c20SMatthew Ahrens get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum)
687cdb0ab79Smaybee {
688713d6c20SMatthew Ahrens 	uint64_t maxblks = DMU_MAX_ACCESS >> (dn->dn_indblkshift + 1);
689713d6c20SMatthew Ahrens 	/* bytes of data covered by a level-1 indirect block */
69076256205SMark Maybee 	uint64_t iblkrange =
6911c8564a7SMark Maybee 	    dn->dn_datablksz * EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT);
692cdb0ab79Smaybee 
693713d6c20SMatthew Ahrens 	ASSERT3U(minimum, <=, *start);
694cdb0ab79Smaybee 
695713d6c20SMatthew Ahrens 	if (*start - minimum <= iblkrange * maxblks) {
696713d6c20SMatthew Ahrens 		*start = minimum;
697cdb0ab79Smaybee 		return (0);
698cdb0ab79Smaybee 	}
69976256205SMark Maybee 	ASSERT(ISP2(iblkrange));
700cdb0ab79Smaybee 
701713d6c20SMatthew Ahrens 	for (uint64_t blks = 0; *start > minimum && blks < maxblks; blks++) {
7021c8564a7SMark Maybee 		int err;
703cdb0ab79Smaybee 
704713d6c20SMatthew Ahrens 		/*
705713d6c20SMatthew Ahrens 		 * dnode_next_offset(BACKWARDS) will find an allocated L1
706713d6c20SMatthew Ahrens 		 * indirect block at or before the input offset.  We must
707713d6c20SMatthew Ahrens 		 * decrement *start so that it is at the end of the region
708713d6c20SMatthew Ahrens 		 * to search.
709713d6c20SMatthew Ahrens 		 */
710713d6c20SMatthew Ahrens 		(*start)--;
711cdb0ab79Smaybee 		err = dnode_next_offset(dn,
71276256205SMark Maybee 		    DNODE_FIND_BACKWARDS, start, 2, 1, 0);
713cdb0ab79Smaybee 
714713d6c20SMatthew Ahrens 		/* if there are no indirect blocks before start, we are done */
71576256205SMark Maybee 		if (err == ESRCH) {
716713d6c20SMatthew Ahrens 			*start = minimum;
717713d6c20SMatthew Ahrens 			break;
718713d6c20SMatthew Ahrens 		} else if (err != 0) {
719cdb0ab79Smaybee 			return (err);
72076256205SMark Maybee 		}
721cdb0ab79Smaybee 
722713d6c20SMatthew Ahrens 		/* set start to the beginning of this L1 indirect */
72376256205SMark Maybee 		*start = P2ALIGN(*start, iblkrange);
724cdb0ab79Smaybee 	}
725713d6c20SMatthew Ahrens 	if (*start < minimum)
726713d6c20SMatthew Ahrens 		*start = minimum;
727cdb0ab79Smaybee 	return (0);
728cdb0ab79Smaybee }
729cdb0ab79Smaybee 
730eb721827SAlek Pinchuk /*
731eb721827SAlek Pinchuk  * If this objset is of type OST_ZFS return true if vfs's unmounted flag is set,
732eb721827SAlek Pinchuk  * otherwise return false.
733eb721827SAlek Pinchuk  * Used below in dmu_free_long_range_impl() to enable abort when unmounting
734eb721827SAlek Pinchuk  */
735eb721827SAlek Pinchuk /*ARGSUSED*/
736eb721827SAlek Pinchuk static boolean_t
737eb721827SAlek Pinchuk dmu_objset_zfs_unmounting(objset_t *os)
738eb721827SAlek Pinchuk {
739eb721827SAlek Pinchuk #ifdef _KERNEL
740eb721827SAlek Pinchuk 	if (dmu_objset_type(os) == DMU_OST_ZFS)
741eb721827SAlek Pinchuk 		return (zfs_get_vfs_flag_unmounted(os));
742eb721827SAlek Pinchuk #endif
743eb721827SAlek Pinchuk 	return (B_FALSE);
744eb721827SAlek Pinchuk }
745eb721827SAlek Pinchuk 
746cdb0ab79Smaybee static int
747cdb0ab79Smaybee dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
748713d6c20SMatthew Ahrens     uint64_t length)
749cdb0ab79Smaybee {
750713d6c20SMatthew Ahrens 	uint64_t object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
751713d6c20SMatthew Ahrens 	int err;
752ff5177eeSAlek Pinchuk 	uint64_t dirty_frees_threshold;
753ff5177eeSAlek Pinchuk 	dsl_pool_t *dp = dmu_objset_pool(os);
754cdb0ab79Smaybee 
755713d6c20SMatthew Ahrens 	if (offset >= object_size)
756cdb0ab79Smaybee 		return (0);
757cdb0ab79Smaybee 
758ff5177eeSAlek Pinchuk 	if (zfs_per_txg_dirty_frees_percent <= 100)
759ff5177eeSAlek Pinchuk 		dirty_frees_threshold =
760ff5177eeSAlek Pinchuk 		    zfs_per_txg_dirty_frees_percent * zfs_dirty_data_max / 100;
761ff5177eeSAlek Pinchuk 	else
762ff5177eeSAlek Pinchuk 		dirty_frees_threshold = zfs_dirty_data_max / 4;
763ff5177eeSAlek Pinchuk 
764713d6c20SMatthew Ahrens 	if (length == DMU_OBJECT_END || offset + length > object_size)
765713d6c20SMatthew Ahrens 		length = object_size - offset;
766713d6c20SMatthew Ahrens 
767713d6c20SMatthew Ahrens 	while (length != 0) {
768ff5177eeSAlek Pinchuk 		uint64_t chunk_end, chunk_begin, chunk_len;
769ff5177eeSAlek Pinchuk 		uint64_t long_free_dirty_all_txgs = 0;
770ff5177eeSAlek Pinchuk 		dmu_tx_t *tx;
771713d6c20SMatthew Ahrens 
772eb721827SAlek Pinchuk 		if (dmu_objset_zfs_unmounting(dn->dn_objset))
773eb721827SAlek Pinchuk 			return (SET_ERROR(EINTR));
774eb721827SAlek Pinchuk 
775713d6c20SMatthew Ahrens 		chunk_end = chunk_begin = offset + length;
776713d6c20SMatthew Ahrens 
777713d6c20SMatthew Ahrens 		/* move chunk_begin backwards to the beginning of this chunk */
778713d6c20SMatthew Ahrens 		err = get_next_chunk(dn, &chunk_begin, offset);
779cdb0ab79Smaybee 		if (err)
780cdb0ab79Smaybee 			return (err);
781713d6c20SMatthew Ahrens 		ASSERT3U(chunk_begin, >=, offset);
782713d6c20SMatthew Ahrens 		ASSERT3U(chunk_begin, <=, chunk_end);
783cdb0ab79Smaybee 
784ff5177eeSAlek Pinchuk 		chunk_len = chunk_end - chunk_begin;
785ff5177eeSAlek Pinchuk 
786ff5177eeSAlek Pinchuk 		mutex_enter(&dp->dp_lock);
787ff5177eeSAlek Pinchuk 		for (int t = 0; t < TXG_SIZE; t++) {
788ff5177eeSAlek Pinchuk 			long_free_dirty_all_txgs +=
789ff5177eeSAlek Pinchuk 			    dp->dp_long_free_dirty_pertxg[t];
790ff5177eeSAlek Pinchuk 		}
791ff5177eeSAlek Pinchuk 		mutex_exit(&dp->dp_lock);
792ff5177eeSAlek Pinchuk 
793ff5177eeSAlek Pinchuk 		/*
794ff5177eeSAlek Pinchuk 		 * To avoid filling up a TXG with just frees wait for
795ff5177eeSAlek Pinchuk 		 * the next TXG to open before freeing more chunks if
796ff5177eeSAlek Pinchuk 		 * we have reached the threshold of frees
797ff5177eeSAlek Pinchuk 		 */
798ff5177eeSAlek Pinchuk 		if (dirty_frees_threshold != 0 &&
799ff5177eeSAlek Pinchuk 		    long_free_dirty_all_txgs >= dirty_frees_threshold) {
800ff5177eeSAlek Pinchuk 			txg_wait_open(dp, 0);
801ff5177eeSAlek Pinchuk 			continue;
802ff5177eeSAlek Pinchuk 		}
803ff5177eeSAlek Pinchuk 
804ff5177eeSAlek Pinchuk 		tx = dmu_tx_create(os);
805ff5177eeSAlek Pinchuk 		dmu_tx_hold_free(tx, dn->dn_object, chunk_begin, chunk_len);
8064bb73804SMatthew Ahrens 
8074bb73804SMatthew Ahrens 		/*
8084bb73804SMatthew Ahrens 		 * Mark this transaction as typically resulting in a net
8094bb73804SMatthew Ahrens 		 * reduction in space used.
8104bb73804SMatthew Ahrens 		 */
8114bb73804SMatthew Ahrens 		dmu_tx_mark_netfree(tx);
812cdb0ab79Smaybee 		err = dmu_tx_assign(tx, TXG_WAIT);
813cdb0ab79Smaybee 		if (err) {
814cdb0ab79Smaybee 			dmu_tx_abort(tx);
815cdb0ab79Smaybee 			return (err);
816cdb0ab79Smaybee 		}
817ff5177eeSAlek Pinchuk 
818ff5177eeSAlek Pinchuk 		mutex_enter(&dp->dp_lock);
819ff5177eeSAlek Pinchuk 		dp->dp_long_free_dirty_pertxg[dmu_tx_get_txg(tx) & TXG_MASK] +=
820ff5177eeSAlek Pinchuk 		    chunk_len;
821ff5177eeSAlek Pinchuk 		mutex_exit(&dp->dp_lock);
822ff5177eeSAlek Pinchuk 		DTRACE_PROBE3(free__long__range,
823ff5177eeSAlek Pinchuk 		    uint64_t, long_free_dirty_all_txgs, uint64_t, chunk_len,
824ff5177eeSAlek Pinchuk 		    uint64_t, dmu_tx_get_txg(tx));
825ff5177eeSAlek Pinchuk 		dnode_free_range(dn, chunk_begin, chunk_len, tx);
826cdb0ab79Smaybee 		dmu_tx_commit(tx);
827713d6c20SMatthew Ahrens 
828ff5177eeSAlek Pinchuk 		length -= chunk_len;
829cdb0ab79Smaybee 	}
830cdb0ab79Smaybee 	return (0);
831cdb0ab79Smaybee }
832cdb0ab79Smaybee 
833cdb0ab79Smaybee int
834cdb0ab79Smaybee dmu_free_long_range(objset_t *os, uint64_t object,
835cdb0ab79Smaybee     uint64_t offset, uint64_t length)
836cdb0ab79Smaybee {
837cdb0ab79Smaybee 	dnode_t *dn;
838cdb0ab79Smaybee 	int err;
839cdb0ab79Smaybee 
840503ad85cSMatthew Ahrens 	err = dnode_hold(os, object, FTAG, &dn);
841cdb0ab79Smaybee 	if (err != 0)
842cdb0ab79Smaybee 		return (err);
843713d6c20SMatthew Ahrens 	err = dmu_free_long_range_impl(os, dn, offset, length);
8445253393bSMatthew Ahrens 
8455253393bSMatthew Ahrens 	/*
8465253393bSMatthew Ahrens 	 * It is important to zero out the maxblkid when freeing the entire
8475253393bSMatthew Ahrens 	 * file, so that (a) subsequent calls to dmu_free_long_range_impl()
8485253393bSMatthew Ahrens 	 * will take the fast path, and (b) dnode_reallocate() can verify
8495253393bSMatthew Ahrens 	 * that the entire file has been freed.
8505253393bSMatthew Ahrens 	 */
85143466aaeSMax Grossman 	if (err == 0 && offset == 0 && length == DMU_OBJECT_END)
8525253393bSMatthew Ahrens 		dn->dn_maxblkid = 0;
8535253393bSMatthew Ahrens 
854cdb0ab79Smaybee 	dnode_rele(dn, FTAG);
855cdb0ab79Smaybee 	return (err);
856cdb0ab79Smaybee }
857cdb0ab79Smaybee 
858cdb0ab79Smaybee int
859713d6c20SMatthew Ahrens dmu_free_long_object(objset_t *os, uint64_t object)
860cdb0ab79Smaybee {
861cdb0ab79Smaybee 	dmu_tx_t *tx;
862cdb0ab79Smaybee 	int err;
863cdb0ab79Smaybee 
864713d6c20SMatthew Ahrens 	err = dmu_free_long_range(os, object, 0, DMU_OBJECT_END);
865cdb0ab79Smaybee 	if (err != 0)
866cdb0ab79Smaybee 		return (err);
867713d6c20SMatthew Ahrens 
868cdb0ab79Smaybee 	tx = dmu_tx_create(os);
869cdb0ab79Smaybee 	dmu_tx_hold_bonus(tx, object);
870713d6c20SMatthew Ahrens 	dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
8714bb73804SMatthew Ahrens 	dmu_tx_mark_netfree(tx);
872cdb0ab79Smaybee 	err = dmu_tx_assign(tx, TXG_WAIT);
873cdb0ab79Smaybee 	if (err == 0) {
874713d6c20SMatthew Ahrens 		err = dmu_object_free(os, object, tx);
875cdb0ab79Smaybee 		dmu_tx_commit(tx);
876cdb0ab79Smaybee 	} else {
877cdb0ab79Smaybee 		dmu_tx_abort(tx);
878cdb0ab79Smaybee 	}
879713d6c20SMatthew Ahrens 
880cdb0ab79Smaybee 	return (err);
881cdb0ab79Smaybee }
882cdb0ab79Smaybee 
883ea8dc4b6Seschrock int
884fa9e4066Sahrens dmu_free_range(objset_t *os, uint64_t object, uint64_t offset,
885fa9e4066Sahrens     uint64_t size, dmu_tx_t *tx)
886fa9e4066Sahrens {
887ea8dc4b6Seschrock 	dnode_t *dn;
888503ad85cSMatthew Ahrens 	int err = dnode_hold(os, object, FTAG, &dn);
889ea8dc4b6Seschrock 	if (err)
890ea8dc4b6Seschrock 		return (err);
891fa9e4066Sahrens 	ASSERT(offset < UINT64_MAX);
892fa9e4066Sahrens 	ASSERT(size == -1ULL || size <= UINT64_MAX - offset);
893fa9e4066Sahrens 	dnode_free_range(dn, offset, size, tx);
894fa9e4066Sahrens 	dnode_rele(dn, FTAG);
895ea8dc4b6Seschrock 	return (0);
896fa9e4066Sahrens }
897fa9e4066Sahrens 
898b0c42cd4Sbzzz77 static int
899b0c42cd4Sbzzz77 dmu_read_impl(dnode_t *dn, uint64_t offset, uint64_t size,
9007bfdf011SNeil Perrin     void *buf, uint32_t flags)
901fa9e4066Sahrens {
902fa9e4066Sahrens 	dmu_buf_t **dbp;
903b0c42cd4Sbzzz77 	int numbufs, err = 0;
904feb08c6bSbillm 
905feb08c6bSbillm 	/*
906feb08c6bSbillm 	 * Deal with odd block sizes, where there can't be data past the first
907feb08c6bSbillm 	 * block.  If we ever do the tail block optimization, we will need to
908feb08c6bSbillm 	 * handle that here as well.
909feb08c6bSbillm 	 */
910c87b8fc5SMark J Musante 	if (dn->dn_maxblkid == 0) {
911fa9e4066Sahrens 		int newsz = offset > dn->dn_datablksz ? 0 :
912fa9e4066Sahrens 		    MIN(size, dn->dn_datablksz - offset);
913fa9e4066Sahrens 		bzero((char *)buf + newsz, size - newsz);
914fa9e4066Sahrens 		size = newsz;
915fa9e4066Sahrens 	}
916fa9e4066Sahrens 
917fa9e4066Sahrens 	while (size > 0) {
918fa9e4066Sahrens 		uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2);
919c87b8fc5SMark J Musante 		int i;
920fa9e4066Sahrens 
921fa9e4066Sahrens 		/*
922fa9e4066Sahrens 		 * NB: we could do this block-at-a-time, but it's nice
923fa9e4066Sahrens 		 * to be reading in parallel.
924fa9e4066Sahrens 		 */
925a2eea2e1Sahrens 		err = dmu_buf_hold_array_by_dnode(dn, offset, mylen,
9267bfdf011SNeil Perrin 		    TRUE, FTAG, &numbufs, &dbp, flags);
927ea8dc4b6Seschrock 		if (err)
9281934e92fSmaybee 			break;
929fa9e4066Sahrens 
930fa9e4066Sahrens 		for (i = 0; i < numbufs; i++) {
931fa9e4066Sahrens 			int tocpy;
932fa9e4066Sahrens 			int bufoff;
933fa9e4066Sahrens 			dmu_buf_t *db = dbp[i];
934fa9e4066Sahrens 
935fa9e4066Sahrens 			ASSERT(size > 0);
936fa9e4066Sahrens 
937fa9e4066Sahrens 			bufoff = offset - db->db_offset;
938fa9e4066Sahrens 			tocpy = (int)MIN(db->db_size - bufoff, size);
939fa9e4066Sahrens 
940fa9e4066Sahrens 			bcopy((char *)db->db_data + bufoff, buf, tocpy);
941fa9e4066Sahrens 
942fa9e4066Sahrens 			offset += tocpy;
943fa9e4066Sahrens 			size -= tocpy;
944fa9e4066Sahrens 			buf = (char *)buf + tocpy;
945fa9e4066Sahrens 		}
946ea8dc4b6Seschrock 		dmu_buf_rele_array(dbp, numbufs, FTAG);
947fa9e4066Sahrens 	}
948b0c42cd4Sbzzz77 	return (err);
949b0c42cd4Sbzzz77 }
950b0c42cd4Sbzzz77 
951b0c42cd4Sbzzz77 int
952b0c42cd4Sbzzz77 dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
953b0c42cd4Sbzzz77     void *buf, uint32_t flags)
954b0c42cd4Sbzzz77 {
955b0c42cd4Sbzzz77 	dnode_t *dn;
956b0c42cd4Sbzzz77 	int err;
957b0c42cd4Sbzzz77 
958b0c42cd4Sbzzz77 	err = dnode_hold(os, object, FTAG, &dn);
959b0c42cd4Sbzzz77 	if (err != 0)
960b0c42cd4Sbzzz77 		return (err);
961b0c42cd4Sbzzz77 
962b0c42cd4Sbzzz77 	err = dmu_read_impl(dn, offset, size, buf, flags);
963a2eea2e1Sahrens 	dnode_rele(dn, FTAG);
9641934e92fSmaybee 	return (err);
965fa9e4066Sahrens }
966fa9e4066Sahrens 
967b0c42cd4Sbzzz77 int
968b0c42cd4Sbzzz77 dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf,
969b0c42cd4Sbzzz77     uint32_t flags)
970b0c42cd4Sbzzz77 {
971b0c42cd4Sbzzz77 	return (dmu_read_impl(dn, offset, size, buf, flags));
972b0c42cd4Sbzzz77 }
973b0c42cd4Sbzzz77 
974b0c42cd4Sbzzz77 static void
975b0c42cd4Sbzzz77 dmu_write_impl(dmu_buf_t **dbp, int numbufs, uint64_t offset, uint64_t size,
976fa9e4066Sahrens     const void *buf, dmu_tx_t *tx)
977fa9e4066Sahrens {
978b0c42cd4Sbzzz77 	int i;
979fa9e4066Sahrens 
980fa9e4066Sahrens 	for (i = 0; i < numbufs; i++) {
981fa9e4066Sahrens 		int tocpy;
982fa9e4066Sahrens 		int bufoff;
983fa9e4066Sahrens 		dmu_buf_t *db = dbp[i];
984fa9e4066Sahrens 
985fa9e4066Sahrens 		ASSERT(size > 0);
986fa9e4066Sahrens 
987fa9e4066Sahrens 		bufoff = offset - db->db_offset;
988fa9e4066Sahrens 		tocpy = (int)MIN(db->db_size - bufoff, size);
989fa9e4066Sahrens 
990fa9e4066Sahrens 		ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
991fa9e4066Sahrens 
992fa9e4066Sahrens 		if (tocpy == db->db_size)
993fa9e4066Sahrens 			dmu_buf_will_fill(db, tx);
994fa9e4066Sahrens 		else
995fa9e4066Sahrens 			dmu_buf_will_dirty(db, tx);
996fa9e4066Sahrens 
997fa9e4066Sahrens 		bcopy(buf, (char *)db->db_data + bufoff, tocpy);
998fa9e4066Sahrens 
999fa9e4066Sahrens 		if (tocpy == db->db_size)
1000fa9e4066Sahrens 			dmu_buf_fill_done(db, tx);
1001fa9e4066Sahrens 
1002fa9e4066Sahrens 		offset += tocpy;
1003fa9e4066Sahrens 		size -= tocpy;
1004fa9e4066Sahrens 		buf = (char *)buf + tocpy;
1005fa9e4066Sahrens 	}
1006b0c42cd4Sbzzz77 }
1007b0c42cd4Sbzzz77 
1008b0c42cd4Sbzzz77 void
1009b0c42cd4Sbzzz77 dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1010b0c42cd4Sbzzz77     const void *buf, dmu_tx_t *tx)
1011b0c42cd4Sbzzz77 {
1012b0c42cd4Sbzzz77 	dmu_buf_t **dbp;
1013b0c42cd4Sbzzz77 	int numbufs;
1014b0c42cd4Sbzzz77 
1015b0c42cd4Sbzzz77 	if (size == 0)
1016b0c42cd4Sbzzz77 		return;
1017b0c42cd4Sbzzz77 
1018b0c42cd4Sbzzz77 	VERIFY0(dmu_buf_hold_array(os, object, offset, size,
1019b0c42cd4Sbzzz77 	    FALSE, FTAG, &numbufs, &dbp));
1020b0c42cd4Sbzzz77 	dmu_write_impl(dbp, numbufs, offset, size, buf, tx);
1021b0c42cd4Sbzzz77 	dmu_buf_rele_array(dbp, numbufs, FTAG);
1022b0c42cd4Sbzzz77 }
1023b0c42cd4Sbzzz77 
1024b0c42cd4Sbzzz77 void
1025b0c42cd4Sbzzz77 dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size,
1026b0c42cd4Sbzzz77     const void *buf, dmu_tx_t *tx)
1027b0c42cd4Sbzzz77 {
1028b0c42cd4Sbzzz77 	dmu_buf_t **dbp;
1029b0c42cd4Sbzzz77 	int numbufs;
1030b0c42cd4Sbzzz77 
1031b0c42cd4Sbzzz77 	if (size == 0)
1032b0c42cd4Sbzzz77 		return;
1033b0c42cd4Sbzzz77 
1034b0c42cd4Sbzzz77 	VERIFY0(dmu_buf_hold_array_by_dnode(dn, offset, size,
1035b0c42cd4Sbzzz77 	    FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH));
1036b0c42cd4Sbzzz77 	dmu_write_impl(dbp, numbufs, offset, size, buf, tx);
1037ea8dc4b6Seschrock 	dmu_buf_rele_array(dbp, numbufs, FTAG);
1038fa9e4066Sahrens }
1039fa9e4066Sahrens 
10405cabbc6bSPrashanth Sreenivasa static int
10415cabbc6bSPrashanth Sreenivasa dmu_object_remap_one_indirect(objset_t *os, dnode_t *dn,
10425cabbc6bSPrashanth Sreenivasa     uint64_t last_removal_txg, uint64_t offset)
10435cabbc6bSPrashanth Sreenivasa {
10445cabbc6bSPrashanth Sreenivasa 	uint64_t l1blkid = dbuf_whichblock(dn, 1, offset);
10455cabbc6bSPrashanth Sreenivasa 	int err = 0;
10465cabbc6bSPrashanth Sreenivasa 
10475cabbc6bSPrashanth Sreenivasa 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
10485cabbc6bSPrashanth Sreenivasa 	dmu_buf_impl_t *dbuf = dbuf_hold_level(dn, 1, l1blkid, FTAG);
10495cabbc6bSPrashanth Sreenivasa 	ASSERT3P(dbuf, !=, NULL);
10505cabbc6bSPrashanth Sreenivasa 
10515cabbc6bSPrashanth Sreenivasa 	/*
10525cabbc6bSPrashanth Sreenivasa 	 * If the block hasn't been written yet, this default will ensure
10535cabbc6bSPrashanth Sreenivasa 	 * we don't try to remap it.
10545cabbc6bSPrashanth Sreenivasa 	 */
10555cabbc6bSPrashanth Sreenivasa 	uint64_t birth = UINT64_MAX;
10565cabbc6bSPrashanth Sreenivasa 	ASSERT3U(last_removal_txg, !=, UINT64_MAX);
10575cabbc6bSPrashanth Sreenivasa 	if (dbuf->db_blkptr != NULL)
10585cabbc6bSPrashanth Sreenivasa 		birth = dbuf->db_blkptr->blk_birth;
10595cabbc6bSPrashanth Sreenivasa 	rw_exit(&dn->dn_struct_rwlock);
10605cabbc6bSPrashanth Sreenivasa 
10615cabbc6bSPrashanth Sreenivasa 	/*
10625cabbc6bSPrashanth Sreenivasa 	 * If this L1 was already written after the last removal, then we've
10635cabbc6bSPrashanth Sreenivasa 	 * already tried to remap it.
10645cabbc6bSPrashanth Sreenivasa 	 */
10655cabbc6bSPrashanth Sreenivasa 	if (birth <= last_removal_txg &&
10665cabbc6bSPrashanth Sreenivasa 	    dbuf_read(dbuf, NULL, DB_RF_MUST_SUCCEED) == 0 &&
10675cabbc6bSPrashanth Sreenivasa 	    dbuf_can_remap(dbuf)) {
10685cabbc6bSPrashanth Sreenivasa 		dmu_tx_t *tx = dmu_tx_create(os);
10695cabbc6bSPrashanth Sreenivasa 		dmu_tx_hold_remap_l1indirect(tx, dn->dn_object);
10705cabbc6bSPrashanth Sreenivasa 		err = dmu_tx_assign(tx, TXG_WAIT);
10715cabbc6bSPrashanth Sreenivasa 		if (err == 0) {
10725cabbc6bSPrashanth Sreenivasa 			(void) dbuf_dirty(dbuf, tx);
10735cabbc6bSPrashanth Sreenivasa 			dmu_tx_commit(tx);
10745cabbc6bSPrashanth Sreenivasa 		} else {
10755cabbc6bSPrashanth Sreenivasa 			dmu_tx_abort(tx);
10765cabbc6bSPrashanth Sreenivasa 		}
10775cabbc6bSPrashanth Sreenivasa 	}
10785cabbc6bSPrashanth Sreenivasa 
10795cabbc6bSPrashanth Sreenivasa 	dbuf_rele(dbuf, FTAG);
10805cabbc6bSPrashanth Sreenivasa 
10815cabbc6bSPrashanth Sreenivasa 	delay(zfs_object_remap_one_indirect_delay_ticks);
10825cabbc6bSPrashanth Sreenivasa 
10835cabbc6bSPrashanth Sreenivasa 	return (err);
10845cabbc6bSPrashanth Sreenivasa }
10855cabbc6bSPrashanth Sreenivasa 
10865cabbc6bSPrashanth Sreenivasa /*
10875cabbc6bSPrashanth Sreenivasa  * Remap all blockpointers in the object, if possible, so that they reference
10885cabbc6bSPrashanth Sreenivasa  * only concrete vdevs.
10895cabbc6bSPrashanth Sreenivasa  *
10905cabbc6bSPrashanth Sreenivasa  * To do this, iterate over the L0 blockpointers and remap any that reference
10915cabbc6bSPrashanth Sreenivasa  * an indirect vdev. Note that we only examine L0 blockpointers; since we
10925cabbc6bSPrashanth Sreenivasa  * cannot guarantee that we can remap all blockpointer anyways (due to split
10935cabbc6bSPrashanth Sreenivasa  * blocks), we do not want to make the code unnecessarily complicated to
10945cabbc6bSPrashanth Sreenivasa  * catch the unlikely case that there is an L1 block on an indirect vdev that
10955cabbc6bSPrashanth Sreenivasa  * contains no indirect blockpointers.
10965cabbc6bSPrashanth Sreenivasa  */
10975cabbc6bSPrashanth Sreenivasa int
10985cabbc6bSPrashanth Sreenivasa dmu_object_remap_indirects(objset_t *os, uint64_t object,
10995cabbc6bSPrashanth Sreenivasa     uint64_t last_removal_txg)
11005cabbc6bSPrashanth Sreenivasa {
11015cabbc6bSPrashanth Sreenivasa 	uint64_t offset, l1span;
11025cabbc6bSPrashanth Sreenivasa 	int err;
11035cabbc6bSPrashanth Sreenivasa 	dnode_t *dn;
11045cabbc6bSPrashanth Sreenivasa 
11055cabbc6bSPrashanth Sreenivasa 	err = dnode_hold(os, object, FTAG, &dn);
11065cabbc6bSPrashanth Sreenivasa 	if (err != 0) {
11075cabbc6bSPrashanth Sreenivasa 		return (err);
11085cabbc6bSPrashanth Sreenivasa 	}
11095cabbc6bSPrashanth Sreenivasa 
11105cabbc6bSPrashanth Sreenivasa 	if (dn->dn_nlevels <= 1) {
11115cabbc6bSPrashanth Sreenivasa 		if (issig(JUSTLOOKING) && issig(FORREAL)) {
11125cabbc6bSPrashanth Sreenivasa 			err = SET_ERROR(EINTR);
11135cabbc6bSPrashanth Sreenivasa 		}
11145cabbc6bSPrashanth Sreenivasa 
11155cabbc6bSPrashanth Sreenivasa 		/*
11165cabbc6bSPrashanth Sreenivasa 		 * If the dnode has no indirect blocks, we cannot dirty them.
11175cabbc6bSPrashanth Sreenivasa 		 * We still want to remap the blkptr(s) in the dnode if
11185cabbc6bSPrashanth Sreenivasa 		 * appropriate, so mark it as dirty.
11195cabbc6bSPrashanth Sreenivasa 		 */
11205cabbc6bSPrashanth Sreenivasa 		if (err == 0 && dnode_needs_remap(dn)) {
11215cabbc6bSPrashanth Sreenivasa 			dmu_tx_t *tx = dmu_tx_create(os);
11225cabbc6bSPrashanth Sreenivasa 			dmu_tx_hold_bonus(tx, dn->dn_object);
11235cabbc6bSPrashanth Sreenivasa 			if ((err = dmu_tx_assign(tx, TXG_WAIT)) == 0) {
11245cabbc6bSPrashanth Sreenivasa 				dnode_setdirty(dn, tx);
11255cabbc6bSPrashanth Sreenivasa 				dmu_tx_commit(tx);
11265cabbc6bSPrashanth Sreenivasa 			} else {
11275cabbc6bSPrashanth Sreenivasa 				dmu_tx_abort(tx);
11285cabbc6bSPrashanth Sreenivasa 			}
11295cabbc6bSPrashanth Sreenivasa 		}
11305cabbc6bSPrashanth Sreenivasa 
11315cabbc6bSPrashanth Sreenivasa 		dnode_rele(dn, FTAG);
11325cabbc6bSPrashanth Sreenivasa 		return (err);
11335cabbc6bSPrashanth Sreenivasa 	}
11345cabbc6bSPrashanth Sreenivasa 
11355cabbc6bSPrashanth Sreenivasa 	offset = 0;
11365cabbc6bSPrashanth Sreenivasa 	l1span = 1ULL << (dn->dn_indblkshift - SPA_BLKPTRSHIFT +
11375cabbc6bSPrashanth Sreenivasa 	    dn->dn_datablkshift);
11385cabbc6bSPrashanth Sreenivasa 	/*
11395cabbc6bSPrashanth Sreenivasa 	 * Find the next L1 indirect that is not a hole.
11405cabbc6bSPrashanth Sreenivasa 	 */
11415cabbc6bSPrashanth Sreenivasa 	while (dnode_next_offset(dn, 0, &offset, 2, 1, 0) == 0) {
11425cabbc6bSPrashanth Sreenivasa 		if (issig(JUSTLOOKING) && issig(FORREAL)) {
11435cabbc6bSPrashanth Sreenivasa 			err = SET_ERROR(EINTR);
11445cabbc6bSPrashanth Sreenivasa 			break;
11455cabbc6bSPrashanth Sreenivasa 		}
11465cabbc6bSPrashanth Sreenivasa 		if ((err = dmu_object_remap_one_indirect(os, dn,
11475cabbc6bSPrashanth Sreenivasa 		    last_removal_txg, offset)) != 0) {
11485cabbc6bSPrashanth Sreenivasa 			break;
11495cabbc6bSPrashanth Sreenivasa 		}
11505cabbc6bSPrashanth Sreenivasa 		offset += l1span;
11515cabbc6bSPrashanth Sreenivasa 	}
11525cabbc6bSPrashanth Sreenivasa 
11535cabbc6bSPrashanth Sreenivasa 	dnode_rele(dn, FTAG);
11545cabbc6bSPrashanth Sreenivasa 	return (err);
11555cabbc6bSPrashanth Sreenivasa }
11565cabbc6bSPrashanth Sreenivasa 
115782c9918fSTim Haley void
115882c9918fSTim Haley dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
115982c9918fSTim Haley     dmu_tx_t *tx)
116082c9918fSTim Haley {
116182c9918fSTim Haley 	dmu_buf_t **dbp;
116282c9918fSTim Haley 	int numbufs, i;
116382c9918fSTim Haley 
116482c9918fSTim Haley 	if (size == 0)
116582c9918fSTim Haley 		return;
116682c9918fSTim Haley 
116782c9918fSTim Haley 	VERIFY(0 == dmu_buf_hold_array(os, object, offset, size,
116882c9918fSTim Haley 	    FALSE, FTAG, &numbufs, &dbp));
116982c9918fSTim Haley 
117082c9918fSTim Haley 	for (i = 0; i < numbufs; i++) {
117182c9918fSTim Haley 		dmu_buf_t *db = dbp[i];
117282c9918fSTim Haley 
117382c9918fSTim Haley 		dmu_buf_will_not_fill(db, tx);
117482c9918fSTim Haley 	}
117582c9918fSTim Haley 	dmu_buf_rele_array(dbp, numbufs, FTAG);
117682c9918fSTim Haley }
117782c9918fSTim Haley 
11785d7b4d43SMatthew Ahrens void
11795d7b4d43SMatthew Ahrens dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset,
11805d7b4d43SMatthew Ahrens     void *data, uint8_t etype, uint8_t comp, int uncompressed_size,
11815d7b4d43SMatthew Ahrens     int compressed_size, int byteorder, dmu_tx_t *tx)
11825d7b4d43SMatthew Ahrens {
11835d7b4d43SMatthew Ahrens 	dmu_buf_t *db;
11845d7b4d43SMatthew Ahrens 
11855d7b4d43SMatthew Ahrens 	ASSERT3U(etype, <, NUM_BP_EMBEDDED_TYPES);
11865d7b4d43SMatthew Ahrens 	ASSERT3U(comp, <, ZIO_COMPRESS_FUNCTIONS);
11875d7b4d43SMatthew Ahrens 	VERIFY0(dmu_buf_hold_noread(os, object, offset,
11885d7b4d43SMatthew Ahrens 	    FTAG, &db));
11895d7b4d43SMatthew Ahrens 
11905d7b4d43SMatthew Ahrens 	dmu_buf_write_embedded(db,
11915d7b4d43SMatthew Ahrens 	    data, (bp_embedded_type_t)etype, (enum zio_compress)comp,
11925d7b4d43SMatthew Ahrens 	    uncompressed_size, compressed_size, byteorder, tx);
11935d7b4d43SMatthew Ahrens 
11945d7b4d43SMatthew Ahrens 	dmu_buf_rele(db, FTAG);
11955d7b4d43SMatthew Ahrens }
11965d7b4d43SMatthew Ahrens 
1197c242f9a0Schunli zhang - Sun Microsystems - Irvine United States /*
1198c242f9a0Schunli zhang - Sun Microsystems - Irvine United States  * DMU support for xuio
1199c242f9a0Schunli zhang - Sun Microsystems - Irvine United States  */
1200c242f9a0Schunli zhang - Sun Microsystems - Irvine United States kstat_t *xuio_ksp = NULL;
1201c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1202c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int
1203c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_init(xuio_t *xuio, int nblk)
1204c242f9a0Schunli zhang - Sun Microsystems - Irvine United States {
1205c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	dmu_xuio_t *priv;
1206c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	uio_t *uio = &xuio->xu_uio;
1207c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1208c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	uio->uio_iovcnt = nblk;
1209c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	uio->uio_iov = kmem_zalloc(nblk * sizeof (iovec_t), KM_SLEEP);
1210c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1211c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	priv = kmem_zalloc(sizeof (dmu_xuio_t), KM_SLEEP);
1212c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	priv->cnt = nblk;
1213c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	priv->bufs = kmem_zalloc(nblk * sizeof (arc_buf_t *), KM_SLEEP);
1214c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	priv->iovp = uio->uio_iov;
1215c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	XUIO_XUZC_PRIV(xuio) = priv;
1216c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1217c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	if (XUIO_XUZC_RW(xuio) == UIO_READ)
1218c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 		XUIOSTAT_INCR(xuiostat_onloan_rbuf, nblk);
1219c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	else
1220c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 		XUIOSTAT_INCR(xuiostat_onloan_wbuf, nblk);
1221c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1222c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	return (0);
1223c242f9a0Schunli zhang - Sun Microsystems - Irvine United States }
1224c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1225c242f9a0Schunli zhang - Sun Microsystems - Irvine United States void
1226c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_fini(xuio_t *xuio)
1227c242f9a0Schunli zhang - Sun Microsystems - Irvine United States {
1228c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
1229c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	int nblk = priv->cnt;
1230c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1231c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	kmem_free(priv->iovp, nblk * sizeof (iovec_t));
1232c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	kmem_free(priv->bufs, nblk * sizeof (arc_buf_t *));
1233c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	kmem_free(priv, sizeof (dmu_xuio_t));
1234c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1235c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	if (XUIO_XUZC_RW(xuio) == UIO_READ)
1236c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 		XUIOSTAT_INCR(xuiostat_onloan_rbuf, -nblk);
1237c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	else
1238c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 		XUIOSTAT_INCR(xuiostat_onloan_wbuf, -nblk);
1239c242f9a0Schunli zhang - Sun Microsystems - Irvine United States }
1240c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1241c242f9a0Schunli zhang - Sun Microsystems - Irvine United States /*
1242c242f9a0Schunli zhang - Sun Microsystems - Irvine United States  * Initialize iov[priv->next] and priv->bufs[priv->next] with { off, n, abuf }
1243c242f9a0Schunli zhang - Sun Microsystems - Irvine United States  * and increase priv->next by 1.
1244c242f9a0Schunli zhang - Sun Microsystems - Irvine United States  */
1245c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int
1246c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_add(xuio_t *xuio, arc_buf_t *abuf, offset_t off, size_t n)
1247c242f9a0Schunli zhang - Sun Microsystems - Irvine United States {
1248c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	struct iovec *iov;
1249c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	uio_t *uio = &xuio->xu_uio;
1250c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
1251c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	int i = priv->next++;
1252c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1253c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	ASSERT(i < priv->cnt);
12545602294fSDan Kimmel 	ASSERT(off + n <= arc_buf_lsize(abuf));
1255c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	iov = uio->uio_iov + i;
1256c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	iov->iov_base = (char *)abuf->b_data + off;
1257c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	iov->iov_len = n;
1258c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	priv->bufs[i] = abuf;
1259c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	return (0);
1260c242f9a0Schunli zhang - Sun Microsystems - Irvine United States }
1261c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1262c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int
1263c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_cnt(xuio_t *xuio)
1264c242f9a0Schunli zhang - Sun Microsystems - Irvine United States {
1265c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
1266c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	return (priv->cnt);
1267c242f9a0Schunli zhang - Sun Microsystems - Irvine United States }
1268c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1269c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_buf_t *
1270c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_arcbuf(xuio_t *xuio, int i)
1271c242f9a0Schunli zhang - Sun Microsystems - Irvine United States {
1272c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
1273c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1274c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	ASSERT(i < priv->cnt);
1275c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	return (priv->bufs[i]);
1276c242f9a0Schunli zhang - Sun Microsystems - Irvine United States }
1277c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1278c242f9a0Schunli zhang - Sun Microsystems - Irvine United States void
1279c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_clear(xuio_t *xuio, int i)
1280c242f9a0Schunli zhang - Sun Microsystems - Irvine United States {
1281c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
1282c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1283c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	ASSERT(i < priv->cnt);
1284c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	priv->bufs[i] = NULL;
1285c242f9a0Schunli zhang - Sun Microsystems - Irvine United States }
1286c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1287c242f9a0Schunli zhang - Sun Microsystems - Irvine United States static void
1288c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_stat_init(void)
1289c242f9a0Schunli zhang - Sun Microsystems - Irvine United States {
1290c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	xuio_ksp = kstat_create("zfs", 0, "xuio_stats", "misc",
1291c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	    KSTAT_TYPE_NAMED, sizeof (xuio_stats) / sizeof (kstat_named_t),
1292c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	    KSTAT_FLAG_VIRTUAL);
1293c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	if (xuio_ksp != NULL) {
1294c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 		xuio_ksp->ks_data = &xuio_stats;
1295c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 		kstat_install(xuio_ksp);
1296c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	}
1297c242f9a0Schunli zhang - Sun Microsystems - Irvine United States }
1298c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1299c242f9a0Schunli zhang - Sun Microsystems - Irvine United States static void
1300c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_stat_fini(void)
1301c242f9a0Schunli zhang - Sun Microsystems - Irvine United States {
1302c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	if (xuio_ksp != NULL) {
1303c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 		kstat_delete(xuio_ksp);
1304c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 		xuio_ksp = NULL;
1305c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	}
1306c242f9a0Schunli zhang - Sun Microsystems - Irvine United States }
1307c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1308c242f9a0Schunli zhang - Sun Microsystems - Irvine United States void
130999aa8b55SPrashanth Sreenivasa xuio_stat_wbuf_copied(void)
1310c242f9a0Schunli zhang - Sun Microsystems - Irvine United States {
1311c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	XUIOSTAT_BUMP(xuiostat_wbuf_copied);
1312c242f9a0Schunli zhang - Sun Microsystems - Irvine United States }
1313c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1314c242f9a0Schunli zhang - Sun Microsystems - Irvine United States void
131599aa8b55SPrashanth Sreenivasa xuio_stat_wbuf_nocopy(void)
1316c242f9a0Schunli zhang - Sun Microsystems - Irvine United States {
1317c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	XUIOSTAT_BUMP(xuiostat_wbuf_nocopy);
1318c242f9a0Schunli zhang - Sun Microsystems - Irvine United States }
1319c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1320fa9e4066Sahrens #ifdef _KERNEL
13218dfe5547SRichard Yao int
1322f8554bb9SMatthew Ahrens dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size)
1323feb08c6bSbillm {
1324feb08c6bSbillm 	dmu_buf_t **dbp;
1325feb08c6bSbillm 	int numbufs, i, err;
1326c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	xuio_t *xuio = NULL;
1327feb08c6bSbillm 
1328feb08c6bSbillm 	/*
1329feb08c6bSbillm 	 * NB: we could do this block-at-a-time, but it's nice
1330feb08c6bSbillm 	 * to be reading in parallel.
1331feb08c6bSbillm 	 */
1332f8554bb9SMatthew Ahrens 	err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size,
1333f8554bb9SMatthew Ahrens 	    TRUE, FTAG, &numbufs, &dbp, 0);
1334feb08c6bSbillm 	if (err)
1335feb08c6bSbillm 		return (err);
1336feb08c6bSbillm 
1337c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	if (uio->uio_extflg == UIO_XUIO)
1338c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 		xuio = (xuio_t *)uio;
1339c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1340feb08c6bSbillm 	for (i = 0; i < numbufs; i++) {
1341feb08c6bSbillm 		int tocpy;
1342feb08c6bSbillm 		int bufoff;
1343feb08c6bSbillm 		dmu_buf_t *db = dbp[i];
1344feb08c6bSbillm 
1345feb08c6bSbillm 		ASSERT(size > 0);
1346feb08c6bSbillm 
1347feb08c6bSbillm 		bufoff = uio->uio_loffset - db->db_offset;
1348feb08c6bSbillm 		tocpy = (int)MIN(db->db_size - bufoff, size);
1349feb08c6bSbillm 
1350c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 		if (xuio) {
1351c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 			dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
1352c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 			arc_buf_t *dbuf_abuf = dbi->db_buf;
1353c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 			arc_buf_t *abuf = dbuf_loan_arcbuf(dbi);
1354c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 			err = dmu_xuio_add(xuio, abuf, bufoff, tocpy);
1355c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 			if (!err) {
1356c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 				uio->uio_resid -= tocpy;
1357c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 				uio->uio_loffset += tocpy;
1358c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 			}
1359c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1360c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 			if (abuf == dbuf_abuf)
1361c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 				XUIOSTAT_BUMP(xuiostat_rbuf_nocopy);
1362c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 			else
1363c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 				XUIOSTAT_BUMP(xuiostat_rbuf_copied);
1364c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 		} else {
1365feb08c6bSbillm 			err = uiomove((char *)db->db_data + bufoff, tocpy,
1366feb08c6bSbillm 			    UIO_READ, uio);
1367c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 		}
1368feb08c6bSbillm 		if (err)
1369feb08c6bSbillm 			break;
1370feb08c6bSbillm 
1371feb08c6bSbillm 		size -= tocpy;
1372feb08c6bSbillm 	}
1373feb08c6bSbillm 	dmu_buf_rele_array(dbp, numbufs, FTAG);
1374feb08c6bSbillm 
1375feb08c6bSbillm 	return (err);
1376feb08c6bSbillm }
1377feb08c6bSbillm 
1378f8554bb9SMatthew Ahrens /*
1379f8554bb9SMatthew Ahrens  * Read 'size' bytes into the uio buffer.
1380f8554bb9SMatthew Ahrens  * From object zdb->db_object.
1381f8554bb9SMatthew Ahrens  * Starting at offset uio->uio_loffset.
1382f8554bb9SMatthew Ahrens  *
1383f8554bb9SMatthew Ahrens  * If the caller already has a dbuf in the target object
1384f8554bb9SMatthew Ahrens  * (e.g. its bonus buffer), this routine is faster than dmu_read_uio(),
1385f8554bb9SMatthew Ahrens  * because we don't have to find the dnode_t for the object.
1386f8554bb9SMatthew Ahrens  */
1387f8554bb9SMatthew Ahrens int
1388f8554bb9SMatthew Ahrens dmu_read_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size)
1389f8554bb9SMatthew Ahrens {
1390f8554bb9SMatthew Ahrens 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
1391f8554bb9SMatthew Ahrens 	dnode_t *dn;
1392f8554bb9SMatthew Ahrens 	int err;
1393f8554bb9SMatthew Ahrens 
1394f8554bb9SMatthew Ahrens 	if (size == 0)
1395f8554bb9SMatthew Ahrens 		return (0);
1396f8554bb9SMatthew Ahrens 
1397f8554bb9SMatthew Ahrens 	DB_DNODE_ENTER(db);
1398f8554bb9SMatthew Ahrens 	dn = DB_DNODE(db);
1399f8554bb9SMatthew Ahrens 	err = dmu_read_uio_dnode(dn, uio, size);
1400f8554bb9SMatthew Ahrens 	DB_DNODE_EXIT(db);
1401f8554bb9SMatthew Ahrens 
1402f8554bb9SMatthew Ahrens 	return (err);
1403f8554bb9SMatthew Ahrens }
1404f8554bb9SMatthew Ahrens 
1405f8554bb9SMatthew Ahrens /*
1406f8554bb9SMatthew Ahrens  * Read 'size' bytes into the uio buffer.
1407f8554bb9SMatthew Ahrens  * From the specified object
1408f8554bb9SMatthew Ahrens  * Starting at offset uio->uio_loffset.
1409f8554bb9SMatthew Ahrens  */
1410f8554bb9SMatthew Ahrens int
1411f8554bb9SMatthew Ahrens dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size)
1412f8554bb9SMatthew Ahrens {
1413f8554bb9SMatthew Ahrens 	dnode_t *dn;
1414f8554bb9SMatthew Ahrens 	int err;
1415f8554bb9SMatthew Ahrens 
1416f8554bb9SMatthew Ahrens 	if (size == 0)
1417f8554bb9SMatthew Ahrens 		return (0);
1418f8554bb9SMatthew Ahrens 
1419f8554bb9SMatthew Ahrens 	err = dnode_hold(os, object, FTAG, &dn);
1420f8554bb9SMatthew Ahrens 	if (err)
1421f8554bb9SMatthew Ahrens 		return (err);
1422f8554bb9SMatthew Ahrens 
1423f8554bb9SMatthew Ahrens 	err = dmu_read_uio_dnode(dn, uio, size);
1424f8554bb9SMatthew Ahrens 
1425f8554bb9SMatthew Ahrens 	dnode_rele(dn, FTAG);
1426f8554bb9SMatthew Ahrens 
1427f8554bb9SMatthew Ahrens 	return (err);
1428f8554bb9SMatthew Ahrens }
1429f8554bb9SMatthew Ahrens 
14308dfe5547SRichard Yao int
143194d1a210STim Haley dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx)
1432fa9e4066Sahrens {
1433fa9e4066Sahrens 	dmu_buf_t **dbp;
143494d1a210STim Haley 	int numbufs;
1435fa9e4066Sahrens 	int err = 0;
143694d1a210STim Haley 	int i;
1437fa9e4066Sahrens 
143894d1a210STim Haley 	err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size,
143994d1a210STim Haley 	    FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH);
1440ea8dc4b6Seschrock 	if (err)
1441ea8dc4b6Seschrock 		return (err);
1442fa9e4066Sahrens 
1443fa9e4066Sahrens 	for (i = 0; i < numbufs; i++) {
1444fa9e4066Sahrens 		int tocpy;
1445fa9e4066Sahrens 		int bufoff;
1446fa9e4066Sahrens 		dmu_buf_t *db = dbp[i];
1447fa9e4066Sahrens 
1448fa9e4066Sahrens 		ASSERT(size > 0);
1449fa9e4066Sahrens 
1450feb08c6bSbillm 		bufoff = uio->uio_loffset - db->db_offset;
1451fa9e4066Sahrens 		tocpy = (int)MIN(db->db_size - bufoff, size);
1452fa9e4066Sahrens 
1453fa9e4066Sahrens 		ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
1454fa9e4066Sahrens 
1455fa9e4066Sahrens 		if (tocpy == db->db_size)
1456fa9e4066Sahrens 			dmu_buf_will_fill(db, tx);
1457fa9e4066Sahrens 		else
1458fa9e4066Sahrens 			dmu_buf_will_dirty(db, tx);
1459fa9e4066Sahrens 
1460fa9e4066Sahrens 		/*
1461fa9e4066Sahrens 		 * XXX uiomove could block forever (eg. nfs-backed
1462fa9e4066Sahrens 		 * pages).  There needs to be a uiolockdown() function
1463fa9e4066Sahrens 		 * to lock the pages in memory, so that uiomove won't
1464fa9e4066Sahrens 		 * block.
1465fa9e4066Sahrens 		 */
1466fa9e4066Sahrens 		err = uiomove((char *)db->db_data + bufoff, tocpy,
1467fa9e4066Sahrens 		    UIO_WRITE, uio);
1468fa9e4066Sahrens 
1469fa9e4066Sahrens 		if (tocpy == db->db_size)
1470fa9e4066Sahrens 			dmu_buf_fill_done(db, tx);
1471fa9e4066Sahrens 
1472fa9e4066Sahrens 		if (err)
1473fa9e4066Sahrens 			break;
1474fa9e4066Sahrens 
1475fa9e4066Sahrens 		size -= tocpy;
1476fa9e4066Sahrens 	}
147794d1a210STim Haley 
1478ea8dc4b6Seschrock 	dmu_buf_rele_array(dbp, numbufs, FTAG);
1479fa9e4066Sahrens 	return (err);
1480fa9e4066Sahrens }
148144eda4d7Smaybee 
1482f8554bb9SMatthew Ahrens /*
1483f8554bb9SMatthew Ahrens  * Write 'size' bytes from the uio buffer.
1484f8554bb9SMatthew Ahrens  * To object zdb->db_object.
1485f8554bb9SMatthew Ahrens  * Starting at offset uio->uio_loffset.
1486f8554bb9SMatthew Ahrens  *
1487f8554bb9SMatthew Ahrens  * If the caller already has a dbuf in the target object
1488f8554bb9SMatthew Ahrens  * (e.g. its bonus buffer), this routine is faster than dmu_write_uio(),
1489f8554bb9SMatthew Ahrens  * because we don't have to find the dnode_t for the object.
1490f8554bb9SMatthew Ahrens  */
149144eda4d7Smaybee int
149294d1a210STim Haley dmu_write_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size,
149394d1a210STim Haley     dmu_tx_t *tx)
149494d1a210STim Haley {
1495744947dcSTom Erickson 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
1496744947dcSTom Erickson 	dnode_t *dn;
1497744947dcSTom Erickson 	int err;
1498744947dcSTom Erickson 
149994d1a210STim Haley 	if (size == 0)
150094d1a210STim Haley 		return (0);
150194d1a210STim Haley 
1502744947dcSTom Erickson 	DB_DNODE_ENTER(db);
1503744947dcSTom Erickson 	dn = DB_DNODE(db);
1504744947dcSTom Erickson 	err = dmu_write_uio_dnode(dn, uio, size, tx);
1505744947dcSTom Erickson 	DB_DNODE_EXIT(db);
1506744947dcSTom Erickson 
1507744947dcSTom Erickson 	return (err);
150894d1a210STim Haley }
150994d1a210STim Haley 
1510f8554bb9SMatthew Ahrens /*
1511f8554bb9SMatthew Ahrens  * Write 'size' bytes from the uio buffer.
1512f8554bb9SMatthew Ahrens  * To the specified object.
1513f8554bb9SMatthew Ahrens  * Starting at offset uio->uio_loffset.
1514f8554bb9SMatthew Ahrens  */
151594d1a210STim Haley int
151694d1a210STim Haley dmu_write_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size,
151794d1a210STim Haley     dmu_tx_t *tx)
151894d1a210STim Haley {
151994d1a210STim Haley 	dnode_t *dn;
152094d1a210STim Haley 	int err;
152194d1a210STim Haley 
152294d1a210STim Haley 	if (size == 0)
152394d1a210STim Haley 		return (0);
152494d1a210STim Haley 
152594d1a210STim Haley 	err = dnode_hold(os, object, FTAG, &dn);
152694d1a210STim Haley 	if (err)
152794d1a210STim Haley 		return (err);
152894d1a210STim Haley 
152994d1a210STim Haley 	err = dmu_write_uio_dnode(dn, uio, size, tx);
153094d1a210STim Haley 
153194d1a210STim Haley 	dnode_rele(dn, FTAG);
153294d1a210STim Haley 
153394d1a210STim Haley 	return (err);
153494d1a210STim Haley }
153594d1a210STim Haley 
153694d1a210STim Haley int
153744eda4d7Smaybee dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
153844eda4d7Smaybee     page_t *pp, dmu_tx_t *tx)
153944eda4d7Smaybee {
154044eda4d7Smaybee 	dmu_buf_t **dbp;
154144eda4d7Smaybee 	int numbufs, i;
154244eda4d7Smaybee 	int err;
154344eda4d7Smaybee 
154444eda4d7Smaybee 	if (size == 0)
154544eda4d7Smaybee 		return (0);
154644eda4d7Smaybee 
154744eda4d7Smaybee 	err = dmu_buf_hold_array(os, object, offset, size,
154844eda4d7Smaybee 	    FALSE, FTAG, &numbufs, &dbp);
154944eda4d7Smaybee 	if (err)
155044eda4d7Smaybee 		return (err);
155144eda4d7Smaybee 
155244eda4d7Smaybee 	for (i = 0; i < numbufs; i++) {
155344eda4d7Smaybee 		int tocpy, copied, thiscpy;
155444eda4d7Smaybee 		int bufoff;
155544eda4d7Smaybee 		dmu_buf_t *db = dbp[i];
155644eda4d7Smaybee 		caddr_t va;
155744eda4d7Smaybee 
155844eda4d7Smaybee 		ASSERT(size > 0);
155944eda4d7Smaybee 		ASSERT3U(db->db_size, >=, PAGESIZE);
156044eda4d7Smaybee 
156144eda4d7Smaybee 		bufoff = offset - db->db_offset;
156244eda4d7Smaybee 		tocpy = (int)MIN(db->db_size - bufoff, size);
156344eda4d7Smaybee 
156444eda4d7Smaybee 		ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
156544eda4d7Smaybee 
156644eda4d7Smaybee 		if (tocpy == db->db_size)
156744eda4d7Smaybee 			dmu_buf_will_fill(db, tx);
156844eda4d7Smaybee 		else
156944eda4d7Smaybee 			dmu_buf_will_dirty(db, tx);
157044eda4d7Smaybee 
157144eda4d7Smaybee 		for (copied = 0; copied < tocpy; copied += PAGESIZE) {
157244eda4d7Smaybee 			ASSERT3U(pp->p_offset, ==, db->db_offset + bufoff);
157344eda4d7Smaybee 			thiscpy = MIN(PAGESIZE, tocpy - copied);
15740fab61baSJonathan W Adams 			va = zfs_map_page(pp, S_READ);
157544eda4d7Smaybee 			bcopy(va, (char *)db->db_data + bufoff, thiscpy);
15760fab61baSJonathan W Adams 			zfs_unmap_page(pp, va);
157744eda4d7Smaybee 			pp = pp->p_next;
157844eda4d7Smaybee 			bufoff += PAGESIZE;
157944eda4d7Smaybee 		}
158044eda4d7Smaybee 
158144eda4d7Smaybee 		if (tocpy == db->db_size)
158244eda4d7Smaybee 			dmu_buf_fill_done(db, tx);
158344eda4d7Smaybee 
158444eda4d7Smaybee 		offset += tocpy;
158544eda4d7Smaybee 		size -= tocpy;
158644eda4d7Smaybee 	}
158744eda4d7Smaybee 	dmu_buf_rele_array(dbp, numbufs, FTAG);
158844eda4d7Smaybee 	return (err);
158944eda4d7Smaybee }
1590fa9e4066Sahrens #endif
1591fa9e4066Sahrens 
15922fdbea25SAleksandr Guzovskiy /*
15932fdbea25SAleksandr Guzovskiy  * Allocate a loaned anonymous arc buffer.
15942fdbea25SAleksandr Guzovskiy  */
15952fdbea25SAleksandr Guzovskiy arc_buf_t *
15962fdbea25SAleksandr Guzovskiy dmu_request_arcbuf(dmu_buf_t *handle, int size)
15972fdbea25SAleksandr Guzovskiy {
1598744947dcSTom Erickson 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle;
15992fdbea25SAleksandr Guzovskiy 
16005602294fSDan Kimmel 	return (arc_loan_buf(db->db_objset->os_spa, B_FALSE, size));
16012fdbea25SAleksandr Guzovskiy }
16022fdbea25SAleksandr Guzovskiy 
16032fdbea25SAleksandr Guzovskiy /*
16042fdbea25SAleksandr Guzovskiy  * Free a loaned arc buffer.
16052fdbea25SAleksandr Guzovskiy  */
16062fdbea25SAleksandr Guzovskiy void
16072fdbea25SAleksandr Guzovskiy dmu_return_arcbuf(arc_buf_t *buf)
16082fdbea25SAleksandr Guzovskiy {
16092fdbea25SAleksandr Guzovskiy 	arc_return_buf(buf, FTAG);
1610dcbf3bd6SGeorge Wilson 	arc_buf_destroy(buf, FTAG);
16112fdbea25SAleksandr Guzovskiy }
16122fdbea25SAleksandr Guzovskiy 
16132fdbea25SAleksandr Guzovskiy /*
16142fdbea25SAleksandr Guzovskiy  * When possible directly assign passed loaned arc buffer to a dbuf.
16152fdbea25SAleksandr Guzovskiy  * If this is not possible copy the contents of passed arc buf via
16162fdbea25SAleksandr Guzovskiy  * dmu_write().
16172fdbea25SAleksandr Guzovskiy  */
16182fdbea25SAleksandr Guzovskiy void
16198dfe5547SRichard Yao dmu_assign_arcbuf_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf,
16202fdbea25SAleksandr Guzovskiy     dmu_tx_t *tx)
16212fdbea25SAleksandr Guzovskiy {
16222fdbea25SAleksandr Guzovskiy 	dmu_buf_impl_t *db;
16235602294fSDan Kimmel 	uint32_t blksz = (uint32_t)arc_buf_lsize(buf);
16242fdbea25SAleksandr Guzovskiy 	uint64_t blkid;
16252fdbea25SAleksandr Guzovskiy 
16262fdbea25SAleksandr Guzovskiy 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
1627a2cdcdd2SPaul Dagnelie 	blkid = dbuf_whichblock(dn, 0, offset);
16282fdbea25SAleksandr Guzovskiy 	VERIFY((db = dbuf_hold(dn, blkid, FTAG)) != NULL);
16292fdbea25SAleksandr Guzovskiy 	rw_exit(&dn->dn_struct_rwlock);
16302fdbea25SAleksandr Guzovskiy 
16318a904709SMatthew Ahrens 	/*
16328a904709SMatthew Ahrens 	 * We can only assign if the offset is aligned, the arc buf is the
16335602294fSDan Kimmel 	 * same size as the dbuf, and the dbuf is not metadata.
16348a904709SMatthew Ahrens 	 */
16355602294fSDan Kimmel 	if (offset == db->db.db_offset && blksz == db->db.db_size) {
16362fdbea25SAleksandr Guzovskiy 		dbuf_assign_arcbuf(db, buf, tx);
16372fdbea25SAleksandr Guzovskiy 		dbuf_rele(db, FTAG);
16382fdbea25SAleksandr Guzovskiy 	} else {
1639744947dcSTom Erickson 		objset_t *os;
1640744947dcSTom Erickson 		uint64_t object;
1641744947dcSTom Erickson 
16425602294fSDan Kimmel 		/* compressed bufs must always be assignable to their dbuf */
16435602294fSDan Kimmel 		ASSERT3U(arc_get_compression(buf), ==, ZIO_COMPRESS_OFF);
16445602294fSDan Kimmel 		ASSERT(!(buf->b_flags & ARC_BUF_FLAG_COMPRESSED));
16455602294fSDan Kimmel 
1646744947dcSTom Erickson 		os = dn->dn_objset;
1647744947dcSTom Erickson 		object = dn->dn_object;
1648744947dcSTom Erickson 
16492fdbea25SAleksandr Guzovskiy 		dbuf_rele(db, FTAG);
1650744947dcSTom Erickson 		dmu_write(os, object, offset, blksz, buf->b_data, tx);
16512fdbea25SAleksandr Guzovskiy 		dmu_return_arcbuf(buf);
1652c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 		XUIOSTAT_BUMP(xuiostat_wbuf_copied);
16532fdbea25SAleksandr Guzovskiy 	}
16542fdbea25SAleksandr Guzovskiy }
16552fdbea25SAleksandr Guzovskiy 
16568dfe5547SRichard Yao void
16578dfe5547SRichard Yao dmu_assign_arcbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf,
16588dfe5547SRichard Yao     dmu_tx_t *tx)
16598dfe5547SRichard Yao {
16608dfe5547SRichard Yao 	dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle;
16618dfe5547SRichard Yao 
16628dfe5547SRichard Yao 	DB_DNODE_ENTER(dbuf);
16638dfe5547SRichard Yao 	dmu_assign_arcbuf_dnode(DB_DNODE(dbuf), offset, buf, tx);
16648dfe5547SRichard Yao 	DB_DNODE_EXIT(dbuf);
16658dfe5547SRichard Yao }
16668dfe5547SRichard Yao 
1667c5c6ffa0Smaybee typedef struct {
1668b24ab676SJeff Bonwick 	dbuf_dirty_record_t	*dsa_dr;
1669b24ab676SJeff Bonwick 	dmu_sync_cb_t		*dsa_done;
1670b24ab676SJeff Bonwick 	zgd_t			*dsa_zgd;
1671b24ab676SJeff Bonwick 	dmu_tx_t		*dsa_tx;
1672c717a561Smaybee } dmu_sync_arg_t;
1673c5c6ffa0Smaybee 
1674c5c6ffa0Smaybee /* ARGSUSED */
1675c5c6ffa0Smaybee static void
1676e14bb325SJeff Bonwick dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg)
1677e14bb325SJeff Bonwick {
1678b24ab676SJeff Bonwick 	dmu_sync_arg_t *dsa = varg;
1679b24ab676SJeff Bonwick 	dmu_buf_t *db = dsa->dsa_zgd->zgd_db;
1680e14bb325SJeff Bonwick 	blkptr_t *bp = zio->io_bp;
1681975c32a0SNeil Perrin 
1682b24ab676SJeff Bonwick 	if (zio->io_error == 0) {
1683b24ab676SJeff Bonwick 		if (BP_IS_HOLE(bp)) {
1684b24ab676SJeff Bonwick 			/*
1685b24ab676SJeff Bonwick 			 * A block of zeros may compress to a hole, but the
1686b24ab676SJeff Bonwick 			 * block size still needs to be known for replay.
1687b24ab676SJeff Bonwick 			 */
1688b24ab676SJeff Bonwick 			BP_SET_LSIZE(bp, db->db_size);
16895d7b4d43SMatthew Ahrens 		} else if (!BP_IS_EMBEDDED(bp)) {
1690e14bb325SJeff Bonwick 			ASSERT(BP_GET_LEVEL(bp) == 0);
1691e14bb325SJeff Bonwick 			bp->blk_fill = 1;
1692e14bb325SJeff Bonwick 		}
1693e14bb325SJeff Bonwick 	}
1694b24ab676SJeff Bonwick }
1695b24ab676SJeff Bonwick 
1696b24ab676SJeff Bonwick static void
1697b24ab676SJeff Bonwick dmu_sync_late_arrival_ready(zio_t *zio)
1698b24ab676SJeff Bonwick {
1699b24ab676SJeff Bonwick 	dmu_sync_ready(zio, NULL, zio->io_private);
1700b24ab676SJeff Bonwick }
1701e14bb325SJeff Bonwick 
1702e14bb325SJeff Bonwick /* ARGSUSED */
1703e14bb325SJeff Bonwick static void
1704c5c6ffa0Smaybee dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg)
1705c5c6ffa0Smaybee {
1706b24ab676SJeff Bonwick 	dmu_sync_arg_t *dsa = varg;
1707b24ab676SJeff Bonwick 	dbuf_dirty_record_t *dr = dsa->dsa_dr;
1708c717a561Smaybee 	dmu_buf_impl_t *db = dr->dr_dbuf;
1709cab3a55eSPrakash Surya 	zgd_t *zgd = dsa->dsa_zgd;
1710cab3a55eSPrakash Surya 
1711cab3a55eSPrakash Surya 	/*
1712cab3a55eSPrakash Surya 	 * Record the vdev(s) backing this blkptr so they can be flushed after
1713cab3a55eSPrakash Surya 	 * the writes for the lwb have completed.
1714cab3a55eSPrakash Surya 	 */
1715cab3a55eSPrakash Surya 	if (zio->io_error == 0) {
1716cab3a55eSPrakash Surya 		zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
1717cab3a55eSPrakash Surya 	}
1718c5c6ffa0Smaybee 
1719b50a0fe0SNeil Perrin 	mutex_enter(&db->db_mtx);
1720b50a0fe0SNeil Perrin 	ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC);
1721b24ab676SJeff Bonwick 	if (zio->io_error == 0) {
172280901aeaSGeorge Wilson 		dr->dt.dl.dr_nopwrite = !!(zio->io_flags & ZIO_FLAG_NOPWRITE);
172380901aeaSGeorge Wilson 		if (dr->dt.dl.dr_nopwrite) {
172480901aeaSGeorge Wilson 			blkptr_t *bp = zio->io_bp;
172580901aeaSGeorge Wilson 			blkptr_t *bp_orig = &zio->io_bp_orig;
172680901aeaSGeorge Wilson 			uint8_t chksum = BP_GET_CHECKSUM(bp_orig);
172780901aeaSGeorge Wilson 
172880901aeaSGeorge Wilson 			ASSERT(BP_EQUAL(bp, bp_orig));
1729b7edcb94SMatthew Ahrens 			VERIFY(BP_EQUAL(bp, db->db_blkptr));
173080901aeaSGeorge Wilson 			ASSERT(zio->io_prop.zp_compress != ZIO_COMPRESS_OFF);
173145818ee1SMatthew Ahrens 			ASSERT(zio_checksum_table[chksum].ci_flags &
173245818ee1SMatthew Ahrens 			    ZCHECKSUM_FLAG_NOPWRITE);
173380901aeaSGeorge Wilson 		}
1734b24ab676SJeff Bonwick 		dr->dt.dl.dr_overridden_by = *zio->io_bp;
1735b24ab676SJeff Bonwick 		dr->dt.dl.dr_override_state = DR_OVERRIDDEN;
1736b24ab676SJeff Bonwick 		dr->dt.dl.dr_copies = zio->io_prop.zp_copies;
173770163ac5SPrakash Surya 
173870163ac5SPrakash Surya 		/*
173970163ac5SPrakash Surya 		 * Old style holes are filled with all zeros, whereas
174070163ac5SPrakash Surya 		 * new-style holes maintain their lsize, type, level,
174170163ac5SPrakash Surya 		 * and birth time (see zio_write_compress). While we
174270163ac5SPrakash Surya 		 * need to reset the BP_SET_LSIZE() call that happened
174370163ac5SPrakash Surya 		 * in dmu_sync_ready for old style holes, we do *not*
174470163ac5SPrakash Surya 		 * want to wipe out the information contained in new
174570163ac5SPrakash Surya 		 * style holes. Thus, only zero out the block pointer if
174670163ac5SPrakash Surya 		 * it's an old style hole.
174770163ac5SPrakash Surya 		 */
174870163ac5SPrakash Surya 		if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by) &&
174970163ac5SPrakash Surya 		    dr->dt.dl.dr_overridden_by.blk_birth == 0)
1750b50a0fe0SNeil Perrin 			BP_ZERO(&dr->dt.dl.dr_overridden_by);
1751b24ab676SJeff Bonwick 	} else {
1752b24ab676SJeff Bonwick 		dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1753b24ab676SJeff Bonwick 	}
1754c5c6ffa0Smaybee 	cv_broadcast(&db->db_changed);
1755b50a0fe0SNeil Perrin 	mutex_exit(&db->db_mtx);
1756b50a0fe0SNeil Perrin 
1757b24ab676SJeff Bonwick 	dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
1758c717a561Smaybee 
1759b24ab676SJeff Bonwick 	kmem_free(dsa, sizeof (*dsa));
1760b24ab676SJeff Bonwick }
1761b24ab676SJeff Bonwick 
1762b24ab676SJeff Bonwick static void
1763b24ab676SJeff Bonwick dmu_sync_late_arrival_done(zio_t *zio)
1764b24ab676SJeff Bonwick {
1765b24ab676SJeff Bonwick 	blkptr_t *bp = zio->io_bp;
1766b24ab676SJeff Bonwick 	dmu_sync_arg_t *dsa = zio->io_private;
176780901aeaSGeorge Wilson 	blkptr_t *bp_orig = &zio->io_bp_orig;
1768cab3a55eSPrakash Surya 	zgd_t *zgd = dsa->dsa_zgd;
1769b24ab676SJeff Bonwick 
1770cab3a55eSPrakash Surya 	if (zio->io_error == 0) {
1771cab3a55eSPrakash Surya 		/*
1772cab3a55eSPrakash Surya 		 * Record the vdev(s) backing this blkptr so they can be
1773cab3a55eSPrakash Surya 		 * flushed after the writes for the lwb have completed.
1774cab3a55eSPrakash Surya 		 */
1775cab3a55eSPrakash Surya 		zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
1776cab3a55eSPrakash Surya 
1777cab3a55eSPrakash Surya 		if (!BP_IS_HOLE(bp)) {
1778b7edcb94SMatthew Ahrens 			ASSERT(!(zio->io_flags & ZIO_FLAG_NOPWRITE));
177980901aeaSGeorge Wilson 			ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig));
1780b24ab676SJeff Bonwick 			ASSERT(zio->io_bp->blk_birth == zio->io_txg);
1781b24ab676SJeff Bonwick 			ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa));
1782b24ab676SJeff Bonwick 			zio_free(zio->io_spa, zio->io_txg, zio->io_bp);
1783b24ab676SJeff Bonwick 		}
1784cab3a55eSPrakash Surya 	}
1785b24ab676SJeff Bonwick 
1786b24ab676SJeff Bonwick 	dmu_tx_commit(dsa->dsa_tx);
1787b24ab676SJeff Bonwick 
1788b24ab676SJeff Bonwick 	dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
1789b24ab676SJeff Bonwick 
1790770499e1SDan Kimmel 	abd_put(zio->io_abd);
1791b24ab676SJeff Bonwick 	kmem_free(dsa, sizeof (*dsa));
1792b24ab676SJeff Bonwick }
1793b24ab676SJeff Bonwick 
1794b24ab676SJeff Bonwick static int
1795b24ab676SJeff Bonwick dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd,
17967802d7bfSMatthew Ahrens     zio_prop_t *zp, zbookmark_phys_t *zb)
1797b24ab676SJeff Bonwick {
1798b24ab676SJeff Bonwick 	dmu_sync_arg_t *dsa;
1799b24ab676SJeff Bonwick 	dmu_tx_t *tx;
1800b24ab676SJeff Bonwick 
1801b24ab676SJeff Bonwick 	tx = dmu_tx_create(os);
1802b24ab676SJeff Bonwick 	dmu_tx_hold_space(tx, zgd->zgd_db->db_size);
18036e1f5caaSNeil Perrin 	if (dmu_tx_assign(tx, TXG_WAIT) != 0) {
1804b24ab676SJeff Bonwick 		dmu_tx_abort(tx);
1805be6fd75aSMatthew Ahrens 		/* Make zl_get_data do txg_waited_synced() */
1806be6fd75aSMatthew Ahrens 		return (SET_ERROR(EIO));
1807b24ab676SJeff Bonwick 	}
1808b24ab676SJeff Bonwick 
18091271e4b1SPrakash Surya 	/*
18101271e4b1SPrakash Surya 	 * In order to prevent the zgd's lwb from being free'd prior to
18111271e4b1SPrakash Surya 	 * dmu_sync_late_arrival_done() being called, we have to ensure
18121271e4b1SPrakash Surya 	 * the lwb's "max txg" takes this tx's txg into account.
18131271e4b1SPrakash Surya 	 */
18141271e4b1SPrakash Surya 	zil_lwb_add_txg(zgd->zgd_lwb, dmu_tx_get_txg(tx));
18151271e4b1SPrakash Surya 
1816b24ab676SJeff Bonwick 	dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
1817b24ab676SJeff Bonwick 	dsa->dsa_dr = NULL;
1818b24ab676SJeff Bonwick 	dsa->dsa_done = done;
1819b24ab676SJeff Bonwick 	dsa->dsa_zgd = zgd;
1820b24ab676SJeff Bonwick 	dsa->dsa_tx = tx;
1821b24ab676SJeff Bonwick 
1822b7edcb94SMatthew Ahrens 	/*
1823b7edcb94SMatthew Ahrens 	 * Since we are currently syncing this txg, it's nontrivial to
1824b7edcb94SMatthew Ahrens 	 * determine what BP to nopwrite against, so we disable nopwrite.
1825b7edcb94SMatthew Ahrens 	 *
1826b7edcb94SMatthew Ahrens 	 * When syncing, the db_blkptr is initially the BP of the previous
1827b7edcb94SMatthew Ahrens 	 * txg.  We can not nopwrite against it because it will be changed
1828b7edcb94SMatthew Ahrens 	 * (this is similar to the non-late-arrival case where the dbuf is
1829b7edcb94SMatthew Ahrens 	 * dirty in a future txg).
1830b7edcb94SMatthew Ahrens 	 *
1831b7edcb94SMatthew Ahrens 	 * Then dbuf_write_ready() sets bp_blkptr to the location we will write.
1832b7edcb94SMatthew Ahrens 	 * We can not nopwrite against it because although the BP will not
1833b7edcb94SMatthew Ahrens 	 * (typically) be changed, the data has not yet been persisted to this
1834b7edcb94SMatthew Ahrens 	 * location.
1835b7edcb94SMatthew Ahrens 	 *
1836b7edcb94SMatthew Ahrens 	 * Finally, when dbuf_write_done() is called, it is theoretically
1837b7edcb94SMatthew Ahrens 	 * possible to always nopwrite, because the data that was written in
1838b7edcb94SMatthew Ahrens 	 * this txg is the same data that we are trying to write.  However we
1839b7edcb94SMatthew Ahrens 	 * would need to check that this dbuf is not dirty in any future
1840b7edcb94SMatthew Ahrens 	 * txg's (as we do in the normal dmu_sync() path). For simplicity, we
1841b7edcb94SMatthew Ahrens 	 * don't nopwrite in this case.
1842b7edcb94SMatthew Ahrens 	 */
1843b7edcb94SMatthew Ahrens 	zp->zp_nopwrite = B_FALSE;
1844b7edcb94SMatthew Ahrens 
18455602294fSDan Kimmel 	zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp,
1846770499e1SDan Kimmel 	    abd_get_from_buf(zgd->zgd_db->db_data, zgd->zgd_db->db_size),
1847770499e1SDan Kimmel 	    zgd->zgd_db->db_size, zgd->zgd_db->db_size, zp,
1848770499e1SDan Kimmel 	    dmu_sync_late_arrival_ready, NULL, NULL, dmu_sync_late_arrival_done,
1849770499e1SDan Kimmel 	    dsa, ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, zb));
1850b24ab676SJeff Bonwick 
1851b24ab676SJeff Bonwick 	return (0);
1852c5c6ffa0Smaybee }
1853c5c6ffa0Smaybee 
1854fa9e4066Sahrens /*
1855c5c6ffa0Smaybee  * Intent log support: sync the block associated with db to disk.
1856c5c6ffa0Smaybee  * N.B. and XXX: the caller is responsible for making sure that the
1857c5c6ffa0Smaybee  * data isn't changing while dmu_sync() is writing it.
1858fa9e4066Sahrens  *
1859fa9e4066Sahrens  * Return values:
1860fa9e4066Sahrens  *
186180901aeaSGeorge Wilson  *	EEXIST: this txg has already been synced, so there's nothing to do.
1862fa9e4066Sahrens  *		The caller should not log the write.
1863fa9e4066Sahrens  *
1864fa9e4066Sahrens  *	ENOENT: the block was dbuf_free_range()'d, so there's nothing to do.
1865fa9e4066Sahrens  *		The caller should not log the write.
1866fa9e4066Sahrens  *
1867c5c6ffa0Smaybee  *	EALREADY: this block is already in the process of being synced.
1868c5c6ffa0Smaybee  *		The caller should track its progress (somehow).
1869fa9e4066Sahrens  *
1870b24ab676SJeff Bonwick  *	EIO: could not do the I/O.
1871b24ab676SJeff Bonwick  *		The caller should do a txg_wait_synced().
1872fa9e4066Sahrens  *
1873b24ab676SJeff Bonwick  *	0: the I/O has been initiated.
1874b24ab676SJeff Bonwick  *		The caller should log this blkptr in the done callback.
1875b24ab676SJeff Bonwick  *		It is possible that the I/O will fail, in which case
1876b24ab676SJeff Bonwick  *		the error will be reported to the done callback and
1877b24ab676SJeff Bonwick  *		propagated to pio from zio_done().
1878fa9e4066Sahrens  */
1879fa9e4066Sahrens int
1880b24ab676SJeff Bonwick dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd)
1881fa9e4066Sahrens {
1882b24ab676SJeff Bonwick 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db;
1883503ad85cSMatthew Ahrens 	objset_t *os = db->db_objset;
1884b24ab676SJeff Bonwick 	dsl_dataset_t *ds = os->os_dsl_dataset;
1885c717a561Smaybee 	dbuf_dirty_record_t *dr;
1886b24ab676SJeff Bonwick 	dmu_sync_arg_t *dsa;
18877802d7bfSMatthew Ahrens 	zbookmark_phys_t zb;
1888b24ab676SJeff Bonwick 	zio_prop_t zp;
1889744947dcSTom Erickson 	dnode_t *dn;
1890fa9e4066Sahrens 
1891b24ab676SJeff Bonwick 	ASSERT(pio != NULL);
1892fa9e4066Sahrens 	ASSERT(txg != 0);
1893fa9e4066Sahrens 
1894b24ab676SJeff Bonwick 	SET_BOOKMARK(&zb, ds->ds_object,
1895b24ab676SJeff Bonwick 	    db->db.db_object, db->db_level, db->db_blkid);
1896b24ab676SJeff Bonwick 
1897744947dcSTom Erickson 	DB_DNODE_ENTER(db);
1898744947dcSTom Erickson 	dn = DB_DNODE(db);
1899adaec86aSMatthew Ahrens 	dmu_write_policy(os, dn, db->db_level, WP_DMU_SYNC, &zp);
1900744947dcSTom Erickson 	DB_DNODE_EXIT(db);
1901fa9e4066Sahrens 
1902fa9e4066Sahrens 	/*
1903b24ab676SJeff Bonwick 	 * If we're frozen (running ziltest), we always need to generate a bp.
1904ea8dc4b6Seschrock 	 */
1905b24ab676SJeff Bonwick 	if (txg > spa_freeze_txg(os->os_spa))
1906b24ab676SJeff Bonwick 		return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
1907ea8dc4b6Seschrock 
1908ea8dc4b6Seschrock 	/*
1909b24ab676SJeff Bonwick 	 * Grabbing db_mtx now provides a barrier between dbuf_sync_leaf()
1910b24ab676SJeff Bonwick 	 * and us.  If we determine that this txg is not yet syncing,
1911b24ab676SJeff Bonwick 	 * but it begins to sync a moment later, that's OK because the
1912b24ab676SJeff Bonwick 	 * sync thread will block in dbuf_sync_leaf() until we drop db_mtx.
1913fa9e4066Sahrens 	 */
1914b24ab676SJeff Bonwick 	mutex_enter(&db->db_mtx);
1915b24ab676SJeff Bonwick 
1916b24ab676SJeff Bonwick 	if (txg <= spa_last_synced_txg(os->os_spa)) {
1917fa9e4066Sahrens 		/*
1918b24ab676SJeff Bonwick 		 * This txg has already synced.  There's nothing to do.
1919fa9e4066Sahrens 		 */
1920b24ab676SJeff Bonwick 		mutex_exit(&db->db_mtx);
1921be6fd75aSMatthew Ahrens 		return (SET_ERROR(EEXIST));
1922fa9e4066Sahrens 	}
1923fa9e4066Sahrens 
1924b24ab676SJeff Bonwick 	if (txg <= spa_syncing_txg(os->os_spa)) {
1925c5c6ffa0Smaybee 		/*
1926b24ab676SJeff Bonwick 		 * This txg is currently syncing, so we can't mess with
1927b24ab676SJeff Bonwick 		 * the dirty record anymore; just write a new log block.
1928c5c6ffa0Smaybee 		 */
192913506d1eSmaybee 		mutex_exit(&db->db_mtx);
1930b24ab676SJeff Bonwick 		return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
1931c5c6ffa0Smaybee 	}
1932c5c6ffa0Smaybee 
1933c717a561Smaybee 	dr = db->db_last_dirty;
1934b24ab676SJeff Bonwick 	while (dr && dr->dr_txg != txg)
1935c717a561Smaybee 		dr = dr->dr_next;
1936b24ab676SJeff Bonwick 
1937b24ab676SJeff Bonwick 	if (dr == NULL) {
1938c5c6ffa0Smaybee 		/*
1939b24ab676SJeff Bonwick 		 * There's no dr for this dbuf, so it must have been freed.
1940fa9e4066Sahrens 		 * There's no need to log writes to freed blocks, so we're done.
1941fa9e4066Sahrens 		 */
1942fa9e4066Sahrens 		mutex_exit(&db->db_mtx);
1943be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENOENT));
1944fa9e4066Sahrens 	}
1945fa9e4066Sahrens 
194680901aeaSGeorge Wilson 	ASSERT(dr->dr_next == NULL || dr->dr_next->dr_txg < txg);
194780901aeaSGeorge Wilson 
1948b7edcb94SMatthew Ahrens 	if (db->db_blkptr != NULL) {
1949b7edcb94SMatthew Ahrens 		/*
1950b7edcb94SMatthew Ahrens 		 * We need to fill in zgd_bp with the current blkptr so that
1951b7edcb94SMatthew Ahrens 		 * the nopwrite code can check if we're writing the same
1952b7edcb94SMatthew Ahrens 		 * data that's already on disk.  We can only nopwrite if we
1953b7edcb94SMatthew Ahrens 		 * are sure that after making the copy, db_blkptr will not
1954b7edcb94SMatthew Ahrens 		 * change until our i/o completes.  We ensure this by
1955b7edcb94SMatthew Ahrens 		 * holding the db_mtx, and only allowing nopwrite if the
1956b7edcb94SMatthew Ahrens 		 * block is not already dirty (see below).  This is verified
1957b7edcb94SMatthew Ahrens 		 * by dmu_sync_done(), which VERIFYs that the db_blkptr has
1958b7edcb94SMatthew Ahrens 		 * not changed.
1959b7edcb94SMatthew Ahrens 		 */
1960b7edcb94SMatthew Ahrens 		*zgd->zgd_bp = *db->db_blkptr;
1961b7edcb94SMatthew Ahrens 	}
1962b7edcb94SMatthew Ahrens 
196380901aeaSGeorge Wilson 	/*
196434e8acefSMatthew Ahrens 	 * Assume the on-disk data is X, the current syncing data (in
196534e8acefSMatthew Ahrens 	 * txg - 1) is Y, and the current in-memory data is Z (currently
196634e8acefSMatthew Ahrens 	 * in dmu_sync).
196734e8acefSMatthew Ahrens 	 *
196834e8acefSMatthew Ahrens 	 * We usually want to perform a nopwrite if X and Z are the
196934e8acefSMatthew Ahrens 	 * same.  However, if Y is different (i.e. the BP is going to
197034e8acefSMatthew Ahrens 	 * change before this write takes effect), then a nopwrite will
197134e8acefSMatthew Ahrens 	 * be incorrect - we would override with X, which could have
197234e8acefSMatthew Ahrens 	 * been freed when Y was written.
197334e8acefSMatthew Ahrens 	 *
197434e8acefSMatthew Ahrens 	 * (Note that this is not a concern when we are nop-writing from
197534e8acefSMatthew Ahrens 	 * syncing context, because X and Y must be identical, because
197634e8acefSMatthew Ahrens 	 * all previous txgs have been synced.)
197734e8acefSMatthew Ahrens 	 *
197834e8acefSMatthew Ahrens 	 * Therefore, we disable nopwrite if the current BP could change
197934e8acefSMatthew Ahrens 	 * before this TXG.  There are two ways it could change: by
198034e8acefSMatthew Ahrens 	 * being dirty (dr_next is non-NULL), or by being freed
198134e8acefSMatthew Ahrens 	 * (dnode_block_freed()).  This behavior is verified by
198234e8acefSMatthew Ahrens 	 * zio_done(), which VERIFYs that the override BP is identical
198334e8acefSMatthew Ahrens 	 * to the on-disk BP.
198480901aeaSGeorge Wilson 	 */
198534e8acefSMatthew Ahrens 	DB_DNODE_ENTER(db);
198634e8acefSMatthew Ahrens 	dn = DB_DNODE(db);
198734e8acefSMatthew Ahrens 	if (dr->dr_next != NULL || dnode_block_freed(dn, db->db_blkid))
198880901aeaSGeorge Wilson 		zp.zp_nopwrite = B_FALSE;
198934e8acefSMatthew Ahrens 	DB_DNODE_EXIT(db);
199080901aeaSGeorge Wilson 
1991c717a561Smaybee 	ASSERT(dr->dr_txg == txg);
1992b24ab676SJeff Bonwick 	if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC ||
1993b24ab676SJeff Bonwick 	    dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
1994c5c6ffa0Smaybee 		/*
1995b24ab676SJeff Bonwick 		 * We have already issued a sync write for this buffer,
1996b24ab676SJeff Bonwick 		 * or this buffer has already been synced.  It could not
1997c717a561Smaybee 		 * have been dirtied since, or we would have cleared the state.
1998c717a561Smaybee 		 */
1999c717a561Smaybee 		mutex_exit(&db->db_mtx);
2000be6fd75aSMatthew Ahrens 		return (SET_ERROR(EALREADY));
2001c717a561Smaybee 	}
2002c717a561Smaybee 
2003b24ab676SJeff Bonwick 	ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
2004c717a561Smaybee 	dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC;
2005fa9e4066Sahrens 	mutex_exit(&db->db_mtx);
2006fa9e4066Sahrens 
2007b24ab676SJeff Bonwick 	dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
2008b24ab676SJeff Bonwick 	dsa->dsa_dr = dr;
2009b24ab676SJeff Bonwick 	dsa->dsa_done = done;
2010b24ab676SJeff Bonwick 	dsa->dsa_zgd = zgd;
2011b24ab676SJeff Bonwick 	dsa->dsa_tx = NULL;
2012e14bb325SJeff Bonwick 
2013b24ab676SJeff Bonwick 	zio_nowait(arc_write(pio, os->os_spa, txg,
2014b7edcb94SMatthew Ahrens 	    zgd->zgd_bp, dr->dt.dl.dr_data, DBUF_IS_L2CACHEABLE(db),
2015dcbf3bd6SGeorge Wilson 	    &zp, dmu_sync_ready, NULL, NULL, dmu_sync_done, dsa,
20168df0bcf0SPaul Dagnelie 	    ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb));
2017e14bb325SJeff Bonwick 
2018b24ab676SJeff Bonwick 	return (0);
2019fa9e4066Sahrens }
2020fa9e4066Sahrens 
2021fa9e4066Sahrens int
2022fa9e4066Sahrens dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs,
2023fa9e4066Sahrens     dmu_tx_t *tx)
2024fa9e4066Sahrens {
2025ea8dc4b6Seschrock 	dnode_t *dn;
2026ea8dc4b6Seschrock 	int err;
2027ea8dc4b6Seschrock 
2028503ad85cSMatthew Ahrens 	err = dnode_hold(os, object, FTAG, &dn);
2029ea8dc4b6Seschrock 	if (err)
2030ea8dc4b6Seschrock 		return (err);
2031ea8dc4b6Seschrock 	err = dnode_set_blksz(dn, size, ibs, tx);
2032fa9e4066Sahrens 	dnode_rele(dn, FTAG);
2033fa9e4066Sahrens 	return (err);
2034fa9e4066Sahrens }
2035fa9e4066Sahrens 
2036fa9e4066Sahrens void
2037fa9e4066Sahrens dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum,
2038fa9e4066Sahrens     dmu_tx_t *tx)
2039fa9e4066Sahrens {
2040ea8dc4b6Seschrock 	dnode_t *dn;
2041ea8dc4b6Seschrock 
20425d7b4d43SMatthew Ahrens 	/*
20435d7b4d43SMatthew Ahrens 	 * Send streams include each object's checksum function.  This
20445d7b4d43SMatthew Ahrens 	 * check ensures that the receiving system can understand the
20455d7b4d43SMatthew Ahrens 	 * checksum function transmitted.
20465d7b4d43SMatthew Ahrens 	 */
20475d7b4d43SMatthew Ahrens 	ASSERT3U(checksum, <, ZIO_CHECKSUM_LEGACY_FUNCTIONS);
20485d7b4d43SMatthew Ahrens 
20495d7b4d43SMatthew Ahrens 	VERIFY0(dnode_hold(os, object, FTAG, &dn));
20505d7b4d43SMatthew Ahrens 	ASSERT3U(checksum, <, ZIO_CHECKSUM_FUNCTIONS);
2051fa9e4066Sahrens 	dn->dn_checksum = checksum;
2052fa9e4066Sahrens 	dnode_setdirty(dn, tx);
2053fa9e4066Sahrens 	dnode_rele(dn, FTAG);
2054fa9e4066Sahrens }
2055fa9e4066Sahrens 
2056fa9e4066Sahrens void
2057fa9e4066Sahrens dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress,
2058fa9e4066Sahrens     dmu_tx_t *tx)
2059fa9e4066Sahrens {
2060ea8dc4b6Seschrock 	dnode_t *dn;
2061ea8dc4b6Seschrock 
20625d7b4d43SMatthew Ahrens 	/*
20635d7b4d43SMatthew Ahrens 	 * Send streams include each object's compression function.  This
20645d7b4d43SMatthew Ahrens 	 * check ensures that the receiving system can understand the
20655d7b4d43SMatthew Ahrens 	 * compression function transmitted.
20665d7b4d43SMatthew Ahrens 	 */
20675d7b4d43SMatthew Ahrens 	ASSERT3U(compress, <, ZIO_COMPRESS_LEGACY_FUNCTIONS);
20685d7b4d43SMatthew Ahrens 
20695d7b4d43SMatthew Ahrens 	VERIFY0(dnode_hold(os, object, FTAG, &dn));
2070fa9e4066Sahrens 	dn->dn_compress = compress;
2071fa9e4066Sahrens 	dnode_setdirty(dn, tx);
2072fa9e4066Sahrens 	dnode_rele(dn, FTAG);
2073fa9e4066Sahrens }
2074fa9e4066Sahrens 
2075b24ab676SJeff Bonwick int zfs_mdcomp_disable = 0;
2076b24ab676SJeff Bonwick 
2077edf345e6SMatthew Ahrens /*
2078edf345e6SMatthew Ahrens  * When the "redundant_metadata" property is set to "most", only indirect
2079edf345e6SMatthew Ahrens  * blocks of this level and higher will have an additional ditto block.
2080edf345e6SMatthew Ahrens  */
2081edf345e6SMatthew Ahrens int zfs_redundant_metadata_most_ditto_level = 2;
2082edf345e6SMatthew Ahrens 
2083b24ab676SJeff Bonwick void
2084adaec86aSMatthew Ahrens dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp)
2085b24ab676SJeff Bonwick {
2086b24ab676SJeff Bonwick 	dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET;
2087ad135b5dSChristopher Siden 	boolean_t ismd = (level > 0 || DMU_OT_IS_METADATA(type) ||
20881d8ccc7bSMark Shellenbaum 	    (wp & WP_SPILL));
2089b24ab676SJeff Bonwick 	enum zio_checksum checksum = os->os_checksum;
2090b24ab676SJeff Bonwick 	enum zio_compress compress = os->os_compress;
2091b24ab676SJeff Bonwick 	enum zio_checksum dedup_checksum = os->os_dedup_checksum;
20927540df39SGeorge Wilson 	boolean_t dedup = B_FALSE;
20937540df39SGeorge Wilson 	boolean_t nopwrite = B_FALSE;
2094b24ab676SJeff Bonwick 	boolean_t dedup_verify = os->os_dedup_verify;
2095b24ab676SJeff Bonwick 	int copies = os->os_copies;
2096b24ab676SJeff Bonwick 
2097b24ab676SJeff Bonwick 	/*
209880901aeaSGeorge Wilson 	 * We maintain different write policies for each of the following
209980901aeaSGeorge Wilson 	 * types of data:
210080901aeaSGeorge Wilson 	 *	 1. metadata
210180901aeaSGeorge Wilson 	 *	 2. preallocated blocks (i.e. level-0 blocks of a dump device)
210280901aeaSGeorge Wilson 	 *	 3. all other level 0 blocks
2103b24ab676SJeff Bonwick 	 */
2104b24ab676SJeff Bonwick 	if (ismd) {
2105db1741f5SJustin T. Gibbs 		if (zfs_mdcomp_disable) {
2106db1741f5SJustin T. Gibbs 			compress = ZIO_COMPRESS_EMPTY;
2107db1741f5SJustin T. Gibbs 		} else {
2108b24ab676SJeff Bonwick 			/*
210980901aeaSGeorge Wilson 			 * XXX -- we should design a compression algorithm
211080901aeaSGeorge Wilson 			 * that specializes in arrays of bps.
211180901aeaSGeorge Wilson 			 */
2112db1741f5SJustin T. Gibbs 			compress = zio_compress_select(os->os_spa,
2113db1741f5SJustin T. Gibbs 			    ZIO_COMPRESS_ON, ZIO_COMPRESS_ON);
2114b8289d24SDaniil Lunev 		}
211580901aeaSGeorge Wilson 
211680901aeaSGeorge Wilson 		/*
2117b24ab676SJeff Bonwick 		 * Metadata always gets checksummed.  If the data
2118b24ab676SJeff Bonwick 		 * checksum is multi-bit correctable, and it's not a
2119b24ab676SJeff Bonwick 		 * ZBT-style checksum, then it's suitable for metadata
2120b24ab676SJeff Bonwick 		 * as well.  Otherwise, the metadata checksum defaults
2121b24ab676SJeff Bonwick 		 * to fletcher4.
2122b24ab676SJeff Bonwick 		 */
212345818ee1SMatthew Ahrens 		if (!(zio_checksum_table[checksum].ci_flags &
212445818ee1SMatthew Ahrens 		    ZCHECKSUM_FLAG_METADATA) ||
212545818ee1SMatthew Ahrens 		    (zio_checksum_table[checksum].ci_flags &
212645818ee1SMatthew Ahrens 		    ZCHECKSUM_FLAG_EMBEDDED))
2127b24ab676SJeff Bonwick 			checksum = ZIO_CHECKSUM_FLETCHER_4;
2128edf345e6SMatthew Ahrens 
2129edf345e6SMatthew Ahrens 		if (os->os_redundant_metadata == ZFS_REDUNDANT_METADATA_ALL ||
2130edf345e6SMatthew Ahrens 		    (os->os_redundant_metadata ==
2131edf345e6SMatthew Ahrens 		    ZFS_REDUNDANT_METADATA_MOST &&
2132edf345e6SMatthew Ahrens 		    (level >= zfs_redundant_metadata_most_ditto_level ||
2133edf345e6SMatthew Ahrens 		    DMU_OT_IS_METADATA(type) || (wp & WP_SPILL))))
2134edf345e6SMatthew Ahrens 			copies++;
213580901aeaSGeorge Wilson 	} else if (wp & WP_NOFILL) {
213680901aeaSGeorge Wilson 		ASSERT(level == 0);
2137b24ab676SJeff Bonwick 
2138b24ab676SJeff Bonwick 		/*
213980901aeaSGeorge Wilson 		 * If we're writing preallocated blocks, we aren't actually
214080901aeaSGeorge Wilson 		 * writing them so don't set any policy properties.  These
214180901aeaSGeorge Wilson 		 * blocks are currently only used by an external subsystem
214280901aeaSGeorge Wilson 		 * outside of zfs (i.e. dump) and not written by the zio
214380901aeaSGeorge Wilson 		 * pipeline.
2144b24ab676SJeff Bonwick 		 */
214580901aeaSGeorge Wilson 		compress = ZIO_COMPRESS_OFF;
2146810e43b2SBill Pijewski 		checksum = ZIO_CHECKSUM_NOPARITY;
2147b24ab676SJeff Bonwick 	} else {
2148db1741f5SJustin T. Gibbs 		compress = zio_compress_select(os->os_spa, dn->dn_compress,
2149db1741f5SJustin T. Gibbs 		    compress);
215080901aeaSGeorge Wilson 
215180901aeaSGeorge Wilson 		checksum = (dedup_checksum == ZIO_CHECKSUM_OFF) ?
215280901aeaSGeorge Wilson 		    zio_checksum_select(dn->dn_checksum, checksum) :
215380901aeaSGeorge Wilson 		    dedup_checksum;
215480901aeaSGeorge Wilson 
215580901aeaSGeorge Wilson 		/*
215680901aeaSGeorge Wilson 		 * Determine dedup setting.  If we are in dmu_sync(),
215780901aeaSGeorge Wilson 		 * we won't actually dedup now because that's all
215880901aeaSGeorge Wilson 		 * done in syncing context; but we do want to use the
215980901aeaSGeorge Wilson 		 * dedup checkum.  If the checksum is not strong
216080901aeaSGeorge Wilson 		 * enough to ensure unique signatures, force
216180901aeaSGeorge Wilson 		 * dedup_verify.
216280901aeaSGeorge Wilson 		 */
216380901aeaSGeorge Wilson 		if (dedup_checksum != ZIO_CHECKSUM_OFF) {
216480901aeaSGeorge Wilson 			dedup = (wp & WP_DMU_SYNC) ? B_FALSE : B_TRUE;
216545818ee1SMatthew Ahrens 			if (!(zio_checksum_table[checksum].ci_flags &
216645818ee1SMatthew Ahrens 			    ZCHECKSUM_FLAG_DEDUP))
216780901aeaSGeorge Wilson 				dedup_verify = B_TRUE;
2168b24ab676SJeff Bonwick 		}
2169b24ab676SJeff Bonwick 
2170b24ab676SJeff Bonwick 		/*
217145818ee1SMatthew Ahrens 		 * Enable nopwrite if we have secure enough checksum
217245818ee1SMatthew Ahrens 		 * algorithm (see comment in zio_nop_write) and
217345818ee1SMatthew Ahrens 		 * compression is enabled.  We don't enable nopwrite if
217445818ee1SMatthew Ahrens 		 * dedup is enabled as the two features are mutually
217545818ee1SMatthew Ahrens 		 * exclusive.
2176b24ab676SJeff Bonwick 		 */
217745818ee1SMatthew Ahrens 		nopwrite = (!dedup && (zio_checksum_table[checksum].ci_flags &
217845818ee1SMatthew Ahrens 		    ZCHECKSUM_FLAG_NOPWRITE) &&
217980901aeaSGeorge Wilson 		    compress != ZIO_COMPRESS_OFF && zfs_nopwrite_enabled);
2180b24ab676SJeff Bonwick 	}
2181b24ab676SJeff Bonwick 
2182b24ab676SJeff Bonwick 	zp->zp_checksum = checksum;
2183adaec86aSMatthew Ahrens 	zp->zp_compress = compress;
21845602294fSDan Kimmel 	ASSERT3U(zp->zp_compress, !=, ZIO_COMPRESS_INHERIT);
21855602294fSDan Kimmel 
21860a586ceaSMark Shellenbaum 	zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type;
2187b24ab676SJeff Bonwick 	zp->zp_level = level;
2188edf345e6SMatthew Ahrens 	zp->zp_copies = MIN(copies, spa_max_replication(os->os_spa));
2189b24ab676SJeff Bonwick 	zp->zp_dedup = dedup;
2190b24ab676SJeff Bonwick 	zp->zp_dedup_verify = dedup && dedup_verify;
219180901aeaSGeorge Wilson 	zp->zp_nopwrite = nopwrite;
2192b24ab676SJeff Bonwick }
2193b24ab676SJeff Bonwick 
219444cd46caSbillm int
2195fa9e4066Sahrens dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off)
2196fa9e4066Sahrens {
2197fa9e4066Sahrens 	dnode_t *dn;
21982bcf0248SMax Grossman 	int err;
2199fa9e4066Sahrens 
2200fa9e4066Sahrens 	/*
2201fa9e4066Sahrens 	 * Sync any current changes before
2202fa9e4066Sahrens 	 * we go trundling through the block pointers.
2203fa9e4066Sahrens 	 */
22042bcf0248SMax Grossman 	err = dmu_object_wait_synced(os, object);
22052bcf0248SMax Grossman 	if (err) {
22062bcf0248SMax Grossman 		return (err);
2207fa9e4066Sahrens 	}
22082bcf0248SMax Grossman 
2209503ad85cSMatthew Ahrens 	err = dnode_hold(os, object, FTAG, &dn);
22102bcf0248SMax Grossman 	if (err) {
2211ea8dc4b6Seschrock 		return (err);
2212fa9e4066Sahrens 	}
2213fa9e4066Sahrens 
2214cdb0ab79Smaybee 	err = dnode_next_offset(dn, (hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0);
2215fa9e4066Sahrens 	dnode_rele(dn, FTAG);
2216fa9e4066Sahrens 
2217fa9e4066Sahrens 	return (err);
2218fa9e4066Sahrens }
2219fa9e4066Sahrens 
22202bcf0248SMax Grossman /*
22212bcf0248SMax Grossman  * Given the ZFS object, if it contains any dirty nodes
22222bcf0248SMax Grossman  * this function flushes all dirty blocks to disk. This
22232bcf0248SMax Grossman  * ensures the DMU object info is updated. A more efficient
22242bcf0248SMax Grossman  * future version might just find the TXG with the maximum
22252bcf0248SMax Grossman  * ID and wait for that to be synced.
22262bcf0248SMax Grossman  */
22272bcf0248SMax Grossman int
22289a686fbcSPaul Dagnelie dmu_object_wait_synced(objset_t *os, uint64_t object)
22299a686fbcSPaul Dagnelie {
22302bcf0248SMax Grossman 	dnode_t *dn;
22312bcf0248SMax Grossman 	int error, i;
22322bcf0248SMax Grossman 
22332bcf0248SMax Grossman 	error = dnode_hold(os, object, FTAG, &dn);
22342bcf0248SMax Grossman 	if (error) {
22352bcf0248SMax Grossman 		return (error);
22362bcf0248SMax Grossman 	}
22372bcf0248SMax Grossman 
22382bcf0248SMax Grossman 	for (i = 0; i < TXG_SIZE; i++) {
22392bcf0248SMax Grossman 		if (list_link_active(&dn->dn_dirty_link[i])) {
22402bcf0248SMax Grossman 			break;
22412bcf0248SMax Grossman 		}
22422bcf0248SMax Grossman 	}
22432bcf0248SMax Grossman 	dnode_rele(dn, FTAG);
22442bcf0248SMax Grossman 	if (i != TXG_SIZE) {
22452bcf0248SMax Grossman 		txg_wait_synced(dmu_objset_pool(os), 0);
22462bcf0248SMax Grossman 	}
22472bcf0248SMax Grossman 
22482bcf0248SMax Grossman 	return (0);
22492bcf0248SMax Grossman }
22502bcf0248SMax Grossman 
2251fa9e4066Sahrens void
2252fa9e4066Sahrens dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
2253fa9e4066Sahrens {
2254b24ab676SJeff Bonwick 	dnode_phys_t *dnp;
2255b24ab676SJeff Bonwick 
2256fa9e4066Sahrens 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
2257fa9e4066Sahrens 	mutex_enter(&dn->dn_mtx);
2258fa9e4066Sahrens 
2259b24ab676SJeff Bonwick 	dnp = dn->dn_phys;
2260b24ab676SJeff Bonwick 
2261fa9e4066Sahrens 	doi->doi_data_block_size = dn->dn_datablksz;
2262fa9e4066Sahrens 	doi->doi_metadata_block_size = dn->dn_indblkshift ?
2263fa9e4066Sahrens 	    1ULL << dn->dn_indblkshift : 0;
2264b24ab676SJeff Bonwick 	doi->doi_type = dn->dn_type;
2265b24ab676SJeff Bonwick 	doi->doi_bonus_type = dn->dn_bonustype;
2266b24ab676SJeff Bonwick 	doi->doi_bonus_size = dn->dn_bonuslen;
2267*54811da5SToomas Soome 	doi->doi_dnodesize = dn->dn_num_slots << DNODE_SHIFT;
2268fa9e4066Sahrens 	doi->doi_indirection = dn->dn_nlevels;
2269fa9e4066Sahrens 	doi->doi_checksum = dn->dn_checksum;
2270fa9e4066Sahrens 	doi->doi_compress = dn->dn_compress;
2271e77d42eaSMatthew Ahrens 	doi->doi_nblkptr = dn->dn_nblkptr;
2272b24ab676SJeff Bonwick 	doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9;
2273d0475637SMatthew Ahrens 	doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
2274b24ab676SJeff Bonwick 	doi->doi_fill_count = 0;
2275b24ab676SJeff Bonwick 	for (int i = 0; i < dnp->dn_nblkptr; i++)
22765d7b4d43SMatthew Ahrens 		doi->doi_fill_count += BP_GET_FILL(&dnp->dn_blkptr[i]);
2277fa9e4066Sahrens 
2278fa9e4066Sahrens 	mutex_exit(&dn->dn_mtx);
2279fa9e4066Sahrens 	rw_exit(&dn->dn_struct_rwlock);
2280fa9e4066Sahrens }
2281fa9e4066Sahrens 
2282fa9e4066Sahrens /*
2283fa9e4066Sahrens  * Get information on a DMU object.
2284fa9e4066Sahrens  * If doi is NULL, just indicates whether the object exists.
2285fa9e4066Sahrens  */
2286fa9e4066Sahrens int
2287fa9e4066Sahrens dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi)
2288fa9e4066Sahrens {
2289ea8dc4b6Seschrock 	dnode_t *dn;
2290503ad85cSMatthew Ahrens 	int err = dnode_hold(os, object, FTAG, &dn);
2291fa9e4066Sahrens 
2292ea8dc4b6Seschrock 	if (err)
2293ea8dc4b6Seschrock 		return (err);
2294fa9e4066Sahrens 
2295fa9e4066Sahrens 	if (doi != NULL)
2296fa9e4066Sahrens 		dmu_object_info_from_dnode(dn, doi);
2297fa9e4066Sahrens 
2298fa9e4066Sahrens 	dnode_rele(dn, FTAG);
2299fa9e4066Sahrens 	return (0);
2300fa9e4066Sahrens }
2301fa9e4066Sahrens 
2302fa9e4066Sahrens /*
2303fa9e4066Sahrens  * As above, but faster; can be used when you have a held dbuf in hand.
2304fa9e4066Sahrens  */
2305fa9e4066Sahrens void
2306744947dcSTom Erickson dmu_object_info_from_db(dmu_buf_t *db_fake, dmu_object_info_t *doi)
2307fa9e4066Sahrens {
2308744947dcSTom Erickson 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2309744947dcSTom Erickson 
2310744947dcSTom Erickson 	DB_DNODE_ENTER(db);
2311744947dcSTom Erickson 	dmu_object_info_from_dnode(DB_DNODE(db), doi);
2312744947dcSTom Erickson 	DB_DNODE_EXIT(db);
2313fa9e4066Sahrens }
2314fa9e4066Sahrens 
2315fa9e4066Sahrens /*
2316fa9e4066Sahrens  * Faster still when you only care about the size.
2317fa9e4066Sahrens  * This is specifically optimized for zfs_getattr().
2318fa9e4066Sahrens  */
2319fa9e4066Sahrens void
2320744947dcSTom Erickson dmu_object_size_from_db(dmu_buf_t *db_fake, uint32_t *blksize,
2321744947dcSTom Erickson     u_longlong_t *nblk512)
2322fa9e4066Sahrens {
2323744947dcSTom Erickson 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2324744947dcSTom Erickson 	dnode_t *dn;
2325744947dcSTom Erickson 
2326744947dcSTom Erickson 	DB_DNODE_ENTER(db);
2327744947dcSTom Erickson 	dn = DB_DNODE(db);
2328fa9e4066Sahrens 
2329fa9e4066Sahrens 	*blksize = dn->dn_datablksz;
2330*54811da5SToomas Soome 	/* add in number of slots used for the dnode itself */
233199653d4eSeschrock 	*nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >>
2332*54811da5SToomas Soome 	    SPA_MINBLOCKSHIFT) + dn->dn_num_slots;
2333*54811da5SToomas Soome 	DB_DNODE_EXIT(db);
2334*54811da5SToomas Soome }
2335*54811da5SToomas Soome 
2336*54811da5SToomas Soome void
2337*54811da5SToomas Soome dmu_object_dnsize_from_db(dmu_buf_t *db_fake, int *dnsize)
2338*54811da5SToomas Soome {
2339*54811da5SToomas Soome 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2340*54811da5SToomas Soome 	dnode_t *dn;
2341*54811da5SToomas Soome 
2342*54811da5SToomas Soome 	DB_DNODE_ENTER(db);
2343*54811da5SToomas Soome 	dn = DB_DNODE(db);
2344*54811da5SToomas Soome 	*dnsize = dn->dn_num_slots << DNODE_SHIFT;
2345744947dcSTom Erickson 	DB_DNODE_EXIT(db);
2346fa9e4066Sahrens }
2347fa9e4066Sahrens 
2348fa9e4066Sahrens void
2349fa9e4066Sahrens byteswap_uint64_array(void *vbuf, size_t size)
2350fa9e4066Sahrens {
2351fa9e4066Sahrens 	uint64_t *buf = vbuf;
2352fa9e4066Sahrens 	size_t count = size >> 3;
2353fa9e4066Sahrens 	int i;
2354fa9e4066Sahrens 
2355fa9e4066Sahrens 	ASSERT((size & 7) == 0);
2356fa9e4066Sahrens 
2357fa9e4066Sahrens 	for (i = 0; i < count; i++)
2358fa9e4066Sahrens 		buf[i] = BSWAP_64(buf[i]);
2359fa9e4066Sahrens }
2360fa9e4066Sahrens 
2361fa9e4066Sahrens void
2362fa9e4066Sahrens byteswap_uint32_array(void *vbuf, size_t size)
2363fa9e4066Sahrens {
2364fa9e4066Sahrens 	uint32_t *buf = vbuf;
2365fa9e4066Sahrens 	size_t count = size >> 2;
2366fa9e4066Sahrens 	int i;
2367fa9e4066Sahrens 
2368fa9e4066Sahrens 	ASSERT((size & 3) == 0);
2369fa9e4066Sahrens 
2370fa9e4066Sahrens 	for (i = 0; i < count; i++)
2371fa9e4066Sahrens 		buf[i] = BSWAP_32(buf[i]);
2372fa9e4066Sahrens }
2373fa9e4066Sahrens 
2374fa9e4066Sahrens void
2375fa9e4066Sahrens byteswap_uint16_array(void *vbuf, size_t size)
2376fa9e4066Sahrens {
2377fa9e4066Sahrens 	uint16_t *buf = vbuf;
2378fa9e4066Sahrens 	size_t count = size >> 1;
2379fa9e4066Sahrens 	int i;
2380fa9e4066Sahrens 
2381fa9e4066Sahrens 	ASSERT((size & 1) == 0);
2382fa9e4066Sahrens 
2383fa9e4066Sahrens 	for (i = 0; i < count; i++)
2384fa9e4066Sahrens 		buf[i] = BSWAP_16(buf[i]);
2385fa9e4066Sahrens }
2386fa9e4066Sahrens 
2387fa9e4066Sahrens /* ARGSUSED */
2388fa9e4066Sahrens void
2389fa9e4066Sahrens byteswap_uint8_array(void *vbuf, size_t size)
2390fa9e4066Sahrens {
2391fa9e4066Sahrens }
2392fa9e4066Sahrens 
2393fa9e4066Sahrens void
2394fa9e4066Sahrens dmu_init(void)
2395fa9e4066Sahrens {
2396770499e1SDan Kimmel 	abd_init();
23973f9d6ad7SLin Ling 	zfs_dbgmsg_init();
2398744947dcSTom Erickson 	sa_cache_init();
2399744947dcSTom Erickson 	xuio_stat_init();
2400744947dcSTom Erickson 	dmu_objset_init();
2401fa9e4066Sahrens 	dnode_init();
24027cbf8b43SRich Morris 	zfetch_init();
2403fa94a07fSbrendan 	l2arc_init();
2404ce636f8bSMatthew Ahrens 	arc_init();
2405dcbf3bd6SGeorge Wilson 	dbuf_init();
2406fa9e4066Sahrens }
2407fa9e4066Sahrens 
2408fa9e4066Sahrens void
2409fa9e4066Sahrens dmu_fini(void)
2410fa9e4066Sahrens {
24113e30c24aSWill Andrews 	arc_fini(); /* arc depends on l2arc, so arc must go first */
2412ce636f8bSMatthew Ahrens 	l2arc_fini();
24137cbf8b43SRich Morris 	zfetch_fini();
2414fa9e4066Sahrens 	dbuf_fini();
2415744947dcSTom Erickson 	dnode_fini();
2416744947dcSTom Erickson 	dmu_objset_fini();
2417c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	xuio_stat_fini();
24180a586ceaSMark Shellenbaum 	sa_cache_fini();
24193f9d6ad7SLin Ling 	zfs_dbgmsg_fini();
2420770499e1SDan Kimmel 	abd_fini();
2421fa9e4066Sahrens }
2422