1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5ea8dc4b6Seschrock * Common Development and Distribution License (the "License"). 6ea8dc4b6Seschrock * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 2294d1a210STim Haley * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23adaec86aSMatthew Ahrens * Copyright (c) 2011, 2017 by Delphix. All rights reserved. 24fa9e4066Sahrens */ 25aad02571SSaso Kiselkov /* Copyright (c) 2013 by Saso Kiselkov. All rights reserved. */ 26810e43b2SBill Pijewski /* Copyright (c) 2013, Joyent, Inc. All rights reserved. */ 27eb721827SAlek Pinchuk /* Copyright 2016 Nexenta Systems, Inc. All rights reserved. */ 28aad02571SSaso Kiselkov 29fa9e4066Sahrens #include <sys/dmu.h> 30fa9e4066Sahrens #include <sys/dmu_impl.h> 31fa9e4066Sahrens #include <sys/dmu_tx.h> 32fa9e4066Sahrens #include <sys/dbuf.h> 33fa9e4066Sahrens #include <sys/dnode.h> 34fa9e4066Sahrens #include <sys/zfs_context.h> 35fa9e4066Sahrens #include <sys/dmu_objset.h> 36fa9e4066Sahrens #include <sys/dmu_traverse.h> 37fa9e4066Sahrens #include <sys/dsl_dataset.h> 38fa9e4066Sahrens #include <sys/dsl_dir.h> 39fa9e4066Sahrens #include <sys/dsl_pool.h> 401d452cf5Sahrens #include <sys/dsl_synctask.h> 41a2eea2e1Sahrens #include <sys/dsl_prop.h> 42fa9e4066Sahrens #include <sys/dmu_zfetch.h> 43fa9e4066Sahrens #include <sys/zfs_ioctl.h> 44fa9e4066Sahrens #include <sys/zap.h> 45ea8dc4b6Seschrock #include <sys/zio_checksum.h> 4680901aeaSGeorge Wilson #include <sys/zio_compress.h> 470a586ceaSMark Shellenbaum #include <sys/sa.h> 48b8289d24SDaniil Lunev #include <sys/zfeature.h> 49770499e1SDan Kimmel #include <sys/abd.h> 5044eda4d7Smaybee #ifdef _KERNEL 5144eda4d7Smaybee #include <sys/vmsystm.h> 520fab61baSJonathan W Adams #include <sys/zfs_znode.h> 5344eda4d7Smaybee #endif 54fa9e4066Sahrens 5580901aeaSGeorge Wilson /* 5680901aeaSGeorge Wilson * Enable/disable nopwrite feature. 5780901aeaSGeorge Wilson */ 5880901aeaSGeorge Wilson int zfs_nopwrite_enabled = 1; 5980901aeaSGeorge Wilson 60ff5177eeSAlek Pinchuk /* 61ff5177eeSAlek Pinchuk * Tunable to control percentage of dirtied blocks from frees in one TXG. 62ff5177eeSAlek Pinchuk * After this threshold is crossed, additional dirty blocks from frees 63ff5177eeSAlek Pinchuk * wait until the next TXG. 64ff5177eeSAlek Pinchuk * A value of zero will disable this throttle. 65ff5177eeSAlek Pinchuk */ 66ff5177eeSAlek Pinchuk uint32_t zfs_per_txg_dirty_frees_percent = 30; 67ff5177eeSAlek Pinchuk 68fa9e4066Sahrens const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = { 69ad135b5dSChristopher Siden { DMU_BSWAP_UINT8, TRUE, "unallocated" }, 70ad135b5dSChristopher Siden { DMU_BSWAP_ZAP, TRUE, "object directory" }, 71ad135b5dSChristopher Siden { DMU_BSWAP_UINT64, TRUE, "object array" }, 72ad135b5dSChristopher Siden { DMU_BSWAP_UINT8, TRUE, "packed nvlist" }, 73ad135b5dSChristopher Siden { DMU_BSWAP_UINT64, TRUE, "packed nvlist size" }, 74ad135b5dSChristopher Siden { DMU_BSWAP_UINT64, TRUE, "bpobj" }, 75ad135b5dSChristopher Siden { DMU_BSWAP_UINT64, TRUE, "bpobj header" }, 76ad135b5dSChristopher Siden { DMU_BSWAP_UINT64, TRUE, "SPA space map header" }, 77ad135b5dSChristopher Siden { DMU_BSWAP_UINT64, TRUE, "SPA space map" }, 78ad135b5dSChristopher Siden { DMU_BSWAP_UINT64, TRUE, "ZIL intent log" }, 79ad135b5dSChristopher Siden { DMU_BSWAP_DNODE, TRUE, "DMU dnode" }, 80ad135b5dSChristopher Siden { DMU_BSWAP_OBJSET, TRUE, "DMU objset" }, 81ad135b5dSChristopher Siden { DMU_BSWAP_UINT64, TRUE, "DSL directory" }, 82ad135b5dSChristopher Siden { DMU_BSWAP_ZAP, TRUE, "DSL directory child map"}, 83ad135b5dSChristopher Siden { DMU_BSWAP_ZAP, TRUE, "DSL dataset snap map" }, 84ad135b5dSChristopher Siden { DMU_BSWAP_ZAP, TRUE, "DSL props" }, 85ad135b5dSChristopher Siden { DMU_BSWAP_UINT64, TRUE, "DSL dataset" }, 86ad135b5dSChristopher Siden { DMU_BSWAP_ZNODE, TRUE, "ZFS znode" }, 87ad135b5dSChristopher Siden { DMU_BSWAP_OLDACL, TRUE, "ZFS V0 ACL" }, 88ad135b5dSChristopher Siden { DMU_BSWAP_UINT8, FALSE, "ZFS plain file" }, 89ad135b5dSChristopher Siden { DMU_BSWAP_ZAP, TRUE, "ZFS directory" }, 90ad135b5dSChristopher Siden { DMU_BSWAP_ZAP, TRUE, "ZFS master node" }, 91ad135b5dSChristopher Siden { DMU_BSWAP_ZAP, TRUE, "ZFS delete queue" }, 92ad135b5dSChristopher Siden { DMU_BSWAP_UINT8, FALSE, "zvol object" }, 93ad135b5dSChristopher Siden { DMU_BSWAP_ZAP, TRUE, "zvol prop" }, 94ad135b5dSChristopher Siden { DMU_BSWAP_UINT8, FALSE, "other uint8[]" }, 95ad135b5dSChristopher Siden { DMU_BSWAP_UINT64, FALSE, "other uint64[]" }, 96ad135b5dSChristopher Siden { DMU_BSWAP_ZAP, TRUE, "other ZAP" }, 97ad135b5dSChristopher Siden { DMU_BSWAP_ZAP, TRUE, "persistent error log" }, 98ad135b5dSChristopher Siden { DMU_BSWAP_UINT8, TRUE, "SPA history" }, 99ad135b5dSChristopher Siden { DMU_BSWAP_UINT64, TRUE, "SPA history offsets" }, 100ad135b5dSChristopher Siden { DMU_BSWAP_ZAP, TRUE, "Pool properties" }, 101ad135b5dSChristopher Siden { DMU_BSWAP_ZAP, TRUE, "DSL permissions" }, 102ad135b5dSChristopher Siden { DMU_BSWAP_ACL, TRUE, "ZFS ACL" }, 103ad135b5dSChristopher Siden { DMU_BSWAP_UINT8, TRUE, "ZFS SYSACL" }, 104ad135b5dSChristopher Siden { DMU_BSWAP_UINT8, TRUE, "FUID table" }, 105ad135b5dSChristopher Siden { DMU_BSWAP_UINT64, TRUE, "FUID table size" }, 106ad135b5dSChristopher Siden { DMU_BSWAP_ZAP, TRUE, "DSL dataset next clones"}, 107ad135b5dSChristopher Siden { DMU_BSWAP_ZAP, TRUE, "scan work queue" }, 108ad135b5dSChristopher Siden { DMU_BSWAP_ZAP, TRUE, "ZFS user/group used" }, 109ad135b5dSChristopher Siden { DMU_BSWAP_ZAP, TRUE, "ZFS user/group quota" }, 110ad135b5dSChristopher Siden { DMU_BSWAP_ZAP, TRUE, "snapshot refcount tags"}, 111ad135b5dSChristopher Siden { DMU_BSWAP_ZAP, TRUE, "DDT ZAP algorithm" }, 112ad135b5dSChristopher Siden { DMU_BSWAP_ZAP, TRUE, "DDT statistics" }, 113ad135b5dSChristopher Siden { DMU_BSWAP_UINT8, TRUE, "System attributes" }, 114ad135b5dSChristopher Siden { DMU_BSWAP_ZAP, TRUE, "SA master node" }, 115ad135b5dSChristopher Siden { DMU_BSWAP_ZAP, TRUE, "SA attr registration" }, 116ad135b5dSChristopher Siden { DMU_BSWAP_ZAP, TRUE, "SA attr layouts" }, 117ad135b5dSChristopher Siden { DMU_BSWAP_ZAP, TRUE, "scan translations" }, 118ad135b5dSChristopher Siden { DMU_BSWAP_UINT8, FALSE, "deduplicated block" }, 119ad135b5dSChristopher Siden { DMU_BSWAP_ZAP, TRUE, "DSL deadlist map" }, 120ad135b5dSChristopher Siden { DMU_BSWAP_UINT64, TRUE, "DSL deadlist map hdr" }, 121ad135b5dSChristopher Siden { DMU_BSWAP_ZAP, TRUE, "DSL dir clones" }, 122ad135b5dSChristopher Siden { DMU_BSWAP_UINT64, TRUE, "bpobj subobj" } 123ad135b5dSChristopher Siden }; 124ad135b5dSChristopher Siden 125ad135b5dSChristopher Siden const dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = { 126ad135b5dSChristopher Siden { byteswap_uint8_array, "uint8" }, 127ad135b5dSChristopher Siden { byteswap_uint16_array, "uint16" }, 128ad135b5dSChristopher Siden { byteswap_uint32_array, "uint32" }, 129ad135b5dSChristopher Siden { byteswap_uint64_array, "uint64" }, 130ad135b5dSChristopher Siden { zap_byteswap, "zap" }, 131ad135b5dSChristopher Siden { dnode_buf_byteswap, "dnode" }, 132ad135b5dSChristopher Siden { dmu_objset_byteswap, "objset" }, 133ad135b5dSChristopher Siden { zfs_znode_byteswap, "znode" }, 134ad135b5dSChristopher Siden { zfs_oldacl_byteswap, "oldacl" }, 135ad135b5dSChristopher Siden { zfs_acl_byteswap, "acl" } 1363f9d6ad7SLin Ling }; 137fa9e4066Sahrens 138fa9e4066Sahrens int 13979d72832SMatthew Ahrens dmu_buf_hold_noread_by_dnode(dnode_t *dn, uint64_t offset, 14079d72832SMatthew Ahrens void *tag, dmu_buf_t **dbp) 14179d72832SMatthew Ahrens { 14279d72832SMatthew Ahrens uint64_t blkid; 14379d72832SMatthew Ahrens dmu_buf_impl_t *db; 14479d72832SMatthew Ahrens 14579d72832SMatthew Ahrens blkid = dbuf_whichblock(dn, 0, offset); 14679d72832SMatthew Ahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 14779d72832SMatthew Ahrens db = dbuf_hold(dn, blkid, tag); 14879d72832SMatthew Ahrens rw_exit(&dn->dn_struct_rwlock); 14979d72832SMatthew Ahrens 15079d72832SMatthew Ahrens if (db == NULL) { 15179d72832SMatthew Ahrens *dbp = NULL; 15279d72832SMatthew Ahrens return (SET_ERROR(EIO)); 15379d72832SMatthew Ahrens } 15479d72832SMatthew Ahrens 15579d72832SMatthew Ahrens *dbp = &db->db; 15679d72832SMatthew Ahrens return (0); 15779d72832SMatthew Ahrens } 15879d72832SMatthew Ahrens int 1595d7b4d43SMatthew Ahrens dmu_buf_hold_noread(objset_t *os, uint64_t object, uint64_t offset, 1605d7b4d43SMatthew Ahrens void *tag, dmu_buf_t **dbp) 161fa9e4066Sahrens { 162fa9e4066Sahrens dnode_t *dn; 163fa9e4066Sahrens uint64_t blkid; 164fa9e4066Sahrens dmu_buf_impl_t *db; 165ea8dc4b6Seschrock int err; 166fa9e4066Sahrens 167503ad85cSMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 168ea8dc4b6Seschrock if (err) 169ea8dc4b6Seschrock return (err); 170a2cdcdd2SPaul Dagnelie blkid = dbuf_whichblock(dn, 0, offset); 171fa9e4066Sahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 172ea8dc4b6Seschrock db = dbuf_hold(dn, blkid, tag); 173fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 1745d7b4d43SMatthew Ahrens dnode_rele(dn, FTAG); 1755d7b4d43SMatthew Ahrens 176ea8dc4b6Seschrock if (db == NULL) { 1775d7b4d43SMatthew Ahrens *dbp = NULL; 1785d7b4d43SMatthew Ahrens return (SET_ERROR(EIO)); 1795d7b4d43SMatthew Ahrens } 1805d7b4d43SMatthew Ahrens 1815d7b4d43SMatthew Ahrens *dbp = &db->db; 1825d7b4d43SMatthew Ahrens return (err); 1835d7b4d43SMatthew Ahrens } 1845d7b4d43SMatthew Ahrens 1855d7b4d43SMatthew Ahrens int 18679d72832SMatthew Ahrens dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset, 18779d72832SMatthew Ahrens void *tag, dmu_buf_t **dbp, int flags) 18879d72832SMatthew Ahrens { 18979d72832SMatthew Ahrens int err; 19079d72832SMatthew Ahrens int db_flags = DB_RF_CANFAIL; 19179d72832SMatthew Ahrens 19279d72832SMatthew Ahrens if (flags & DMU_READ_NO_PREFETCH) 19379d72832SMatthew Ahrens db_flags |= DB_RF_NOPREFETCH; 19479d72832SMatthew Ahrens 19579d72832SMatthew Ahrens err = dmu_buf_hold_noread_by_dnode(dn, offset, tag, dbp); 19679d72832SMatthew Ahrens if (err == 0) { 19779d72832SMatthew Ahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp); 19879d72832SMatthew Ahrens err = dbuf_read(db, NULL, db_flags); 19979d72832SMatthew Ahrens if (err != 0) { 20079d72832SMatthew Ahrens dbuf_rele(db, tag); 20179d72832SMatthew Ahrens *dbp = NULL; 20279d72832SMatthew Ahrens } 20379d72832SMatthew Ahrens } 20479d72832SMatthew Ahrens 20579d72832SMatthew Ahrens return (err); 20679d72832SMatthew Ahrens } 20779d72832SMatthew Ahrens 20879d72832SMatthew Ahrens int 2095d7b4d43SMatthew Ahrens dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset, 2105d7b4d43SMatthew Ahrens void *tag, dmu_buf_t **dbp, int flags) 2115d7b4d43SMatthew Ahrens { 2125d7b4d43SMatthew Ahrens int err; 2135d7b4d43SMatthew Ahrens int db_flags = DB_RF_CANFAIL; 2145d7b4d43SMatthew Ahrens 2155d7b4d43SMatthew Ahrens if (flags & DMU_READ_NO_PREFETCH) 2165d7b4d43SMatthew Ahrens db_flags |= DB_RF_NOPREFETCH; 2175d7b4d43SMatthew Ahrens 2185d7b4d43SMatthew Ahrens err = dmu_buf_hold_noread(os, object, offset, tag, dbp); 2195d7b4d43SMatthew Ahrens if (err == 0) { 2205d7b4d43SMatthew Ahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp); 22147cb52daSJeff Bonwick err = dbuf_read(db, NULL, db_flags); 2225d7b4d43SMatthew Ahrens if (err != 0) { 223ea8dc4b6Seschrock dbuf_rele(db, tag); 2245d7b4d43SMatthew Ahrens *dbp = NULL; 225ea8dc4b6Seschrock } 226fa9e4066Sahrens } 227fa9e4066Sahrens 228ea8dc4b6Seschrock return (err); 229fa9e4066Sahrens } 230fa9e4066Sahrens 231fa9e4066Sahrens int 232fa9e4066Sahrens dmu_bonus_max(void) 233fa9e4066Sahrens { 234fa9e4066Sahrens return (DN_MAX_BONUSLEN); 235fa9e4066Sahrens } 236fa9e4066Sahrens 2371934e92fSmaybee int 238744947dcSTom Erickson dmu_set_bonus(dmu_buf_t *db_fake, int newsize, dmu_tx_t *tx) 2391934e92fSmaybee { 240744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 241744947dcSTom Erickson dnode_t *dn; 242744947dcSTom Erickson int error; 2431934e92fSmaybee 244744947dcSTom Erickson DB_DNODE_ENTER(db); 245744947dcSTom Erickson dn = DB_DNODE(db); 246744947dcSTom Erickson 247744947dcSTom Erickson if (dn->dn_bonus != db) { 248be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 249744947dcSTom Erickson } else if (newsize < 0 || newsize > db_fake->db_size) { 250be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 251744947dcSTom Erickson } else { 2521934e92fSmaybee dnode_setbonuslen(dn, newsize, tx); 253744947dcSTom Erickson error = 0; 254744947dcSTom Erickson } 255744947dcSTom Erickson 256744947dcSTom Erickson DB_DNODE_EXIT(db); 257744947dcSTom Erickson return (error); 2581934e92fSmaybee } 2591934e92fSmaybee 2600a586ceaSMark Shellenbaum int 261744947dcSTom Erickson dmu_set_bonustype(dmu_buf_t *db_fake, dmu_object_type_t type, dmu_tx_t *tx) 2620a586ceaSMark Shellenbaum { 263744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 264744947dcSTom Erickson dnode_t *dn; 265744947dcSTom Erickson int error; 2660a586ceaSMark Shellenbaum 267744947dcSTom Erickson DB_DNODE_ENTER(db); 268744947dcSTom Erickson dn = DB_DNODE(db); 2690a586ceaSMark Shellenbaum 270ad135b5dSChristopher Siden if (!DMU_OT_IS_VALID(type)) { 271be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 272744947dcSTom Erickson } else if (dn->dn_bonus != db) { 273be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 274744947dcSTom Erickson } else { 2750a586ceaSMark Shellenbaum dnode_setbonus_type(dn, type, tx); 276744947dcSTom Erickson error = 0; 277744947dcSTom Erickson } 278744947dcSTom Erickson 279744947dcSTom Erickson DB_DNODE_EXIT(db); 280744947dcSTom Erickson return (error); 281744947dcSTom Erickson } 282744947dcSTom Erickson 283744947dcSTom Erickson dmu_object_type_t 284744947dcSTom Erickson dmu_get_bonustype(dmu_buf_t *db_fake) 285744947dcSTom Erickson { 286744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 287744947dcSTom Erickson dnode_t *dn; 288744947dcSTom Erickson dmu_object_type_t type; 289744947dcSTom Erickson 290744947dcSTom Erickson DB_DNODE_ENTER(db); 291744947dcSTom Erickson dn = DB_DNODE(db); 292744947dcSTom Erickson type = dn->dn_bonustype; 293744947dcSTom Erickson DB_DNODE_EXIT(db); 294744947dcSTom Erickson 295744947dcSTom Erickson return (type); 2960a586ceaSMark Shellenbaum } 2970a586ceaSMark Shellenbaum 2980a586ceaSMark Shellenbaum int 2990a586ceaSMark Shellenbaum dmu_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx) 3000a586ceaSMark Shellenbaum { 3010a586ceaSMark Shellenbaum dnode_t *dn; 3020a586ceaSMark Shellenbaum int error; 3030a586ceaSMark Shellenbaum 3040a586ceaSMark Shellenbaum error = dnode_hold(os, object, FTAG, &dn); 3050a586ceaSMark Shellenbaum dbuf_rm_spill(dn, tx); 30606e0070dSMark Shellenbaum rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 30706e0070dSMark Shellenbaum dnode_rm_spill(dn, tx); 30806e0070dSMark Shellenbaum rw_exit(&dn->dn_struct_rwlock); 3090a586ceaSMark Shellenbaum dnode_rele(dn, FTAG); 3100a586ceaSMark Shellenbaum return (error); 3110a586ceaSMark Shellenbaum } 3120a586ceaSMark Shellenbaum 313fa9e4066Sahrens /* 314ea8dc4b6Seschrock * returns ENOENT, EIO, or 0. 315fa9e4066Sahrens */ 316ea8dc4b6Seschrock int 317ea8dc4b6Seschrock dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp) 318fa9e4066Sahrens { 319ea8dc4b6Seschrock dnode_t *dn; 320fa9e4066Sahrens dmu_buf_impl_t *db; 3211934e92fSmaybee int error; 322fa9e4066Sahrens 323503ad85cSMatthew Ahrens error = dnode_hold(os, object, FTAG, &dn); 3241934e92fSmaybee if (error) 3251934e92fSmaybee return (error); 326fa9e4066Sahrens 327fa9e4066Sahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 328ea8dc4b6Seschrock if (dn->dn_bonus == NULL) { 329fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 330ea8dc4b6Seschrock rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 331ea8dc4b6Seschrock if (dn->dn_bonus == NULL) 3321934e92fSmaybee dbuf_create_bonus(dn); 333fa9e4066Sahrens } 334ea8dc4b6Seschrock db = dn->dn_bonus; 3351934e92fSmaybee 3361934e92fSmaybee /* as long as the bonus buf is held, the dnode will be held */ 337744947dcSTom Erickson if (refcount_add(&db->db_holds, tag) == 1) { 3381934e92fSmaybee VERIFY(dnode_add_ref(dn, db)); 339640c1670SJosef 'Jeff' Sipek atomic_inc_32(&dn->dn_dbufs_count); 340744947dcSTom Erickson } 341744947dcSTom Erickson 342744947dcSTom Erickson /* 343744947dcSTom Erickson * Wait to drop dn_struct_rwlock until after adding the bonus dbuf's 344744947dcSTom Erickson * hold and incrementing the dbuf count to ensure that dnode_move() sees 345744947dcSTom Erickson * a dnode hold for every dbuf. 346744947dcSTom Erickson */ 347744947dcSTom Erickson rw_exit(&dn->dn_struct_rwlock); 3481934e92fSmaybee 349fa9e4066Sahrens dnode_rele(dn, FTAG); 350ea8dc4b6Seschrock 35147cb52daSJeff Bonwick VERIFY(0 == dbuf_read(db, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH)); 352ea8dc4b6Seschrock 353ea8dc4b6Seschrock *dbp = &db->db; 354ea8dc4b6Seschrock return (0); 355fa9e4066Sahrens } 356fa9e4066Sahrens 35713506d1eSmaybee /* 3580a586ceaSMark Shellenbaum * returns ENOENT, EIO, or 0. 3590a586ceaSMark Shellenbaum * 3600a586ceaSMark Shellenbaum * This interface will allocate a blank spill dbuf when a spill blk 3610a586ceaSMark Shellenbaum * doesn't already exist on the dnode. 3620a586ceaSMark Shellenbaum * 3630a586ceaSMark Shellenbaum * if you only want to find an already existing spill db, then 3640a586ceaSMark Shellenbaum * dmu_spill_hold_existing() should be used. 3650a586ceaSMark Shellenbaum */ 3660a586ceaSMark Shellenbaum int 3670a586ceaSMark Shellenbaum dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, void *tag, dmu_buf_t **dbp) 3680a586ceaSMark Shellenbaum { 3690a586ceaSMark Shellenbaum dmu_buf_impl_t *db = NULL; 3700a586ceaSMark Shellenbaum int err; 3710a586ceaSMark Shellenbaum 3720a586ceaSMark Shellenbaum if ((flags & DB_RF_HAVESTRUCT) == 0) 3730a586ceaSMark Shellenbaum rw_enter(&dn->dn_struct_rwlock, RW_READER); 3740a586ceaSMark Shellenbaum 3750a586ceaSMark Shellenbaum db = dbuf_hold(dn, DMU_SPILL_BLKID, tag); 3760a586ceaSMark Shellenbaum 3770a586ceaSMark Shellenbaum if ((flags & DB_RF_HAVESTRUCT) == 0) 3780a586ceaSMark Shellenbaum rw_exit(&dn->dn_struct_rwlock); 3790a586ceaSMark Shellenbaum 3800a586ceaSMark Shellenbaum ASSERT(db != NULL); 3811d8ccc7bSMark Shellenbaum err = dbuf_read(db, NULL, flags); 3821d8ccc7bSMark Shellenbaum if (err == 0) 3830a586ceaSMark Shellenbaum *dbp = &db->db; 3841d8ccc7bSMark Shellenbaum else 3851d8ccc7bSMark Shellenbaum dbuf_rele(db, tag); 3860a586ceaSMark Shellenbaum return (err); 3870a586ceaSMark Shellenbaum } 3880a586ceaSMark Shellenbaum 3890a586ceaSMark Shellenbaum int 3900a586ceaSMark Shellenbaum dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp) 3910a586ceaSMark Shellenbaum { 392744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus; 393744947dcSTom Erickson dnode_t *dn; 3940a586ceaSMark Shellenbaum int err; 3950a586ceaSMark Shellenbaum 396744947dcSTom Erickson DB_DNODE_ENTER(db); 397744947dcSTom Erickson dn = DB_DNODE(db); 398744947dcSTom Erickson 399744947dcSTom Erickson if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) { 400be6fd75aSMatthew Ahrens err = SET_ERROR(EINVAL); 401744947dcSTom Erickson } else { 4020a586ceaSMark Shellenbaum rw_enter(&dn->dn_struct_rwlock, RW_READER); 4030a586ceaSMark Shellenbaum 4040a586ceaSMark Shellenbaum if (!dn->dn_have_spill) { 405be6fd75aSMatthew Ahrens err = SET_ERROR(ENOENT); 406744947dcSTom Erickson } else { 4071d8ccc7bSMark Shellenbaum err = dmu_spill_hold_by_dnode(dn, 4081d8ccc7bSMark Shellenbaum DB_RF_HAVESTRUCT | DB_RF_CANFAIL, tag, dbp); 409744947dcSTom Erickson } 410744947dcSTom Erickson 4110a586ceaSMark Shellenbaum rw_exit(&dn->dn_struct_rwlock); 412744947dcSTom Erickson } 413744947dcSTom Erickson 414744947dcSTom Erickson DB_DNODE_EXIT(db); 4150a586ceaSMark Shellenbaum return (err); 4160a586ceaSMark Shellenbaum } 4170a586ceaSMark Shellenbaum 4180a586ceaSMark Shellenbaum int 4190a586ceaSMark Shellenbaum dmu_spill_hold_by_bonus(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp) 4200a586ceaSMark Shellenbaum { 421744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus; 422744947dcSTom Erickson dnode_t *dn; 423744947dcSTom Erickson int err; 424744947dcSTom Erickson 425744947dcSTom Erickson DB_DNODE_ENTER(db); 426744947dcSTom Erickson dn = DB_DNODE(db); 427744947dcSTom Erickson err = dmu_spill_hold_by_dnode(dn, DB_RF_CANFAIL, tag, dbp); 428744947dcSTom Erickson DB_DNODE_EXIT(db); 429744947dcSTom Erickson 430744947dcSTom Erickson return (err); 4310a586ceaSMark Shellenbaum } 4320a586ceaSMark Shellenbaum 4330a586ceaSMark Shellenbaum /* 43413506d1eSmaybee * Note: longer-term, we should modify all of the dmu_buf_*() interfaces 43513506d1eSmaybee * to take a held dnode rather than <os, object> -- the lookup is wasteful, 43613506d1eSmaybee * and can induce severe lock contention when writing to several files 43713506d1eSmaybee * whose dnodes are in the same block. 43813506d1eSmaybee */ 43913506d1eSmaybee static int 4407bfdf011SNeil Perrin dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length, 441cf6106c8SMatthew Ahrens boolean_t read, void *tag, int *numbufsp, dmu_buf_t ***dbpp, uint32_t flags) 442fa9e4066Sahrens { 443fa9e4066Sahrens dmu_buf_t **dbp; 444fa9e4066Sahrens uint64_t blkid, nblks, i; 4457bfdf011SNeil Perrin uint32_t dbuf_flags; 446ea8dc4b6Seschrock int err; 447ea8dc4b6Seschrock zio_t *zio; 448ea8dc4b6Seschrock 449ea8dc4b6Seschrock ASSERT(length <= DMU_MAX_ACCESS); 450fa9e4066Sahrens 451cf6106c8SMatthew Ahrens /* 452cf6106c8SMatthew Ahrens * Note: We directly notify the prefetch code of this read, so that 453cf6106c8SMatthew Ahrens * we can tell it about the multi-block read. dbuf_read() only knows 454cf6106c8SMatthew Ahrens * about the one block it is accessing. 455cf6106c8SMatthew Ahrens */ 456cf6106c8SMatthew Ahrens dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT | 457cf6106c8SMatthew Ahrens DB_RF_NOPREFETCH; 458ea8dc4b6Seschrock 459fa9e4066Sahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 460fa9e4066Sahrens if (dn->dn_datablkshift) { 461fa9e4066Sahrens int blkshift = dn->dn_datablkshift; 462fa9e4066Sahrens nblks = (P2ROUNDUP(offset + length, 1ULL << blkshift) - 463fa9e4066Sahrens P2ALIGN(offset, 1ULL << blkshift)) >> blkshift; 464fa9e4066Sahrens } else { 4650125049cSahrens if (offset + length > dn->dn_datablksz) { 4660125049cSahrens zfs_panic_recover("zfs: accessing past end of object " 4670125049cSahrens "%llx/%llx (size=%u access=%llu+%llu)", 4680125049cSahrens (longlong_t)dn->dn_objset-> 4690125049cSahrens os_dsl_dataset->ds_object, 4700125049cSahrens (longlong_t)dn->dn_object, dn->dn_datablksz, 4710125049cSahrens (longlong_t)offset, (longlong_t)length); 472c87b8fc5SMark J Musante rw_exit(&dn->dn_struct_rwlock); 473be6fd75aSMatthew Ahrens return (SET_ERROR(EIO)); 4740125049cSahrens } 475fa9e4066Sahrens nblks = 1; 476fa9e4066Sahrens } 477ea8dc4b6Seschrock dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP); 478fa9e4066Sahrens 479e14bb325SJeff Bonwick zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, ZIO_FLAG_CANFAIL); 480a2cdcdd2SPaul Dagnelie blkid = dbuf_whichblock(dn, 0, offset); 481fa9e4066Sahrens for (i = 0; i < nblks; i++) { 482ea8dc4b6Seschrock dmu_buf_impl_t *db = dbuf_hold(dn, blkid + i, tag); 483ea8dc4b6Seschrock if (db == NULL) { 484ea8dc4b6Seschrock rw_exit(&dn->dn_struct_rwlock); 485ea8dc4b6Seschrock dmu_buf_rele_array(dbp, nblks, tag); 486ea8dc4b6Seschrock zio_nowait(zio); 487be6fd75aSMatthew Ahrens return (SET_ERROR(EIO)); 488ea8dc4b6Seschrock } 489cf6106c8SMatthew Ahrens 490ea8dc4b6Seschrock /* initiate async i/o */ 491cf6106c8SMatthew Ahrens if (read) 4927bfdf011SNeil Perrin (void) dbuf_read(db, zio, dbuf_flags); 493ea8dc4b6Seschrock dbp[i] = &db->db; 494fa9e4066Sahrens } 495cf6106c8SMatthew Ahrens 496cb92f413SAlexander Motin if ((flags & DMU_READ_NO_PREFETCH) == 0 && 497cb92f413SAlexander Motin DNODE_META_IS_CACHEABLE(dn) && length <= zfetch_array_rd_sz) { 498cb92f413SAlexander Motin dmu_zfetch(&dn->dn_zfetch, blkid, nblks, 499cb92f413SAlexander Motin read && DNODE_IS_CACHEABLE(dn)); 500cf6106c8SMatthew Ahrens } 501fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 502fa9e4066Sahrens 503ea8dc4b6Seschrock /* wait for async i/o */ 504ea8dc4b6Seschrock err = zio_wait(zio); 505ea8dc4b6Seschrock if (err) { 506ea8dc4b6Seschrock dmu_buf_rele_array(dbp, nblks, tag); 507ea8dc4b6Seschrock return (err); 508ea8dc4b6Seschrock } 509ea8dc4b6Seschrock 510ea8dc4b6Seschrock /* wait for other io to complete */ 511ea8dc4b6Seschrock if (read) { 512ea8dc4b6Seschrock for (i = 0; i < nblks; i++) { 513ea8dc4b6Seschrock dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i]; 514ea8dc4b6Seschrock mutex_enter(&db->db_mtx); 515ea8dc4b6Seschrock while (db->db_state == DB_READ || 516ea8dc4b6Seschrock db->db_state == DB_FILL) 517ea8dc4b6Seschrock cv_wait(&db->db_changed, &db->db_mtx); 518ea8dc4b6Seschrock if (db->db_state == DB_UNCACHED) 519be6fd75aSMatthew Ahrens err = SET_ERROR(EIO); 520ea8dc4b6Seschrock mutex_exit(&db->db_mtx); 521ea8dc4b6Seschrock if (err) { 522ea8dc4b6Seschrock dmu_buf_rele_array(dbp, nblks, tag); 523ea8dc4b6Seschrock return (err); 524ea8dc4b6Seschrock } 525ea8dc4b6Seschrock } 526ea8dc4b6Seschrock } 527ea8dc4b6Seschrock 528ea8dc4b6Seschrock *numbufsp = nblks; 529ea8dc4b6Seschrock *dbpp = dbp; 530ea8dc4b6Seschrock return (0); 531fa9e4066Sahrens } 532fa9e4066Sahrens 533a2eea2e1Sahrens static int 53413506d1eSmaybee dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset, 53513506d1eSmaybee uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp) 53613506d1eSmaybee { 53713506d1eSmaybee dnode_t *dn; 53813506d1eSmaybee int err; 53913506d1eSmaybee 540503ad85cSMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 54113506d1eSmaybee if (err) 54213506d1eSmaybee return (err); 54313506d1eSmaybee 54413506d1eSmaybee err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, 5457bfdf011SNeil Perrin numbufsp, dbpp, DMU_READ_PREFETCH); 54613506d1eSmaybee 54713506d1eSmaybee dnode_rele(dn, FTAG); 54813506d1eSmaybee 54913506d1eSmaybee return (err); 55013506d1eSmaybee } 55113506d1eSmaybee 55213506d1eSmaybee int 553744947dcSTom Erickson dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset, 554cf6106c8SMatthew Ahrens uint64_t length, boolean_t read, void *tag, int *numbufsp, 555cf6106c8SMatthew Ahrens dmu_buf_t ***dbpp) 55613506d1eSmaybee { 557744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 558744947dcSTom Erickson dnode_t *dn; 55913506d1eSmaybee int err; 56013506d1eSmaybee 561744947dcSTom Erickson DB_DNODE_ENTER(db); 562744947dcSTom Erickson dn = DB_DNODE(db); 56313506d1eSmaybee err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, 5647bfdf011SNeil Perrin numbufsp, dbpp, DMU_READ_PREFETCH); 565744947dcSTom Erickson DB_DNODE_EXIT(db); 56613506d1eSmaybee 56713506d1eSmaybee return (err); 56813506d1eSmaybee } 56913506d1eSmaybee 570fa9e4066Sahrens void 571ea8dc4b6Seschrock dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, void *tag) 572fa9e4066Sahrens { 573fa9e4066Sahrens int i; 574fa9e4066Sahrens dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake; 575fa9e4066Sahrens 576fa9e4066Sahrens if (numbufs == 0) 577fa9e4066Sahrens return; 578fa9e4066Sahrens 579ea8dc4b6Seschrock for (i = 0; i < numbufs; i++) { 580ea8dc4b6Seschrock if (dbp[i]) 581ea8dc4b6Seschrock dbuf_rele(dbp[i], tag); 582ea8dc4b6Seschrock } 583fa9e4066Sahrens 584fa9e4066Sahrens kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs); 585fa9e4066Sahrens } 586fa9e4066Sahrens 58769962b56SMatthew Ahrens /* 588a2cdcdd2SPaul Dagnelie * Issue prefetch i/os for the given blocks. If level is greater than 0, the 589a2cdcdd2SPaul Dagnelie * indirect blocks prefeteched will be those that point to the blocks containing 590a2cdcdd2SPaul Dagnelie * the data starting at offset, and continuing to offset + len. 59169962b56SMatthew Ahrens * 592a2cdcdd2SPaul Dagnelie * Note that if the indirect blocks above the blocks being prefetched are not in 593a2cdcdd2SPaul Dagnelie * cache, they will be asychronously read in. 59469962b56SMatthew Ahrens */ 595fa9e4066Sahrens void 596a2cdcdd2SPaul Dagnelie dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset, 597a2cdcdd2SPaul Dagnelie uint64_t len, zio_priority_t pri) 598fa9e4066Sahrens { 599fa9e4066Sahrens dnode_t *dn; 600fa9e4066Sahrens uint64_t blkid; 60169962b56SMatthew Ahrens int nblks, err; 602fa9e4066Sahrens 603fa9e4066Sahrens if (len == 0) { /* they're interested in the bonus buffer */ 604744947dcSTom Erickson dn = DMU_META_DNODE(os); 605fa9e4066Sahrens 606fa9e4066Sahrens if (object == 0 || object >= DN_MAX_OBJECT) 607fa9e4066Sahrens return; 608fa9e4066Sahrens 609fa9e4066Sahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 610a2cdcdd2SPaul Dagnelie blkid = dbuf_whichblock(dn, level, 611a2cdcdd2SPaul Dagnelie object * sizeof (dnode_phys_t)); 612a2cdcdd2SPaul Dagnelie dbuf_prefetch(dn, level, blkid, pri, 0); 613fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 614fa9e4066Sahrens return; 615fa9e4066Sahrens } 616fa9e4066Sahrens 617fa9e4066Sahrens /* 618fa9e4066Sahrens * XXX - Note, if the dnode for the requested object is not 619fa9e4066Sahrens * already cached, we will do a *synchronous* read in the 620fa9e4066Sahrens * dnode_hold() call. The same is true for any indirects. 621fa9e4066Sahrens */ 622503ad85cSMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 623ea8dc4b6Seschrock if (err != 0) 624fa9e4066Sahrens return; 625fa9e4066Sahrens 626fa9e4066Sahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 627a2cdcdd2SPaul Dagnelie /* 628a2cdcdd2SPaul Dagnelie * offset + len - 1 is the last byte we want to prefetch for, and offset 629a2cdcdd2SPaul Dagnelie * is the first. Then dbuf_whichblk(dn, level, off + len - 1) is the 630a2cdcdd2SPaul Dagnelie * last block we want to prefetch, and dbuf_whichblock(dn, level, 631a2cdcdd2SPaul Dagnelie * offset) is the first. Then the number we need to prefetch is the 632a2cdcdd2SPaul Dagnelie * last - first + 1. 633a2cdcdd2SPaul Dagnelie */ 634a2cdcdd2SPaul Dagnelie if (level > 0 || dn->dn_datablkshift != 0) { 635a2cdcdd2SPaul Dagnelie nblks = dbuf_whichblock(dn, level, offset + len - 1) - 636a2cdcdd2SPaul Dagnelie dbuf_whichblock(dn, level, offset) + 1; 637fa9e4066Sahrens } else { 638fa9e4066Sahrens nblks = (offset < dn->dn_datablksz); 639fa9e4066Sahrens } 640fa9e4066Sahrens 641fa9e4066Sahrens if (nblks != 0) { 642a2cdcdd2SPaul Dagnelie blkid = dbuf_whichblock(dn, level, offset); 64369962b56SMatthew Ahrens for (int i = 0; i < nblks; i++) 644a2cdcdd2SPaul Dagnelie dbuf_prefetch(dn, level, blkid + i, pri, 0); 645fa9e4066Sahrens } 646fa9e4066Sahrens 647fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 648fa9e4066Sahrens 649fa9e4066Sahrens dnode_rele(dn, FTAG); 650fa9e4066Sahrens } 651fa9e4066Sahrens 65276256205SMark Maybee /* 65376256205SMark Maybee * Get the next "chunk" of file data to free. We traverse the file from 65476256205SMark Maybee * the end so that the file gets shorter over time (if we crashes in the 65576256205SMark Maybee * middle, this will leave us in a better state). We find allocated file 65676256205SMark Maybee * data by simply searching the allocated level 1 indirects. 657713d6c20SMatthew Ahrens * 658713d6c20SMatthew Ahrens * On input, *start should be the first offset that does not need to be 659713d6c20SMatthew Ahrens * freed (e.g. "offset + length"). On return, *start will be the first 660713d6c20SMatthew Ahrens * offset that should be freed. 66176256205SMark Maybee */ 662cdb0ab79Smaybee static int 663713d6c20SMatthew Ahrens get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum) 664cdb0ab79Smaybee { 665713d6c20SMatthew Ahrens uint64_t maxblks = DMU_MAX_ACCESS >> (dn->dn_indblkshift + 1); 666713d6c20SMatthew Ahrens /* bytes of data covered by a level-1 indirect block */ 66776256205SMark Maybee uint64_t iblkrange = 6681c8564a7SMark Maybee dn->dn_datablksz * EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT); 669cdb0ab79Smaybee 670713d6c20SMatthew Ahrens ASSERT3U(minimum, <=, *start); 671cdb0ab79Smaybee 672713d6c20SMatthew Ahrens if (*start - minimum <= iblkrange * maxblks) { 673713d6c20SMatthew Ahrens *start = minimum; 674cdb0ab79Smaybee return (0); 675cdb0ab79Smaybee } 67676256205SMark Maybee ASSERT(ISP2(iblkrange)); 677cdb0ab79Smaybee 678713d6c20SMatthew Ahrens for (uint64_t blks = 0; *start > minimum && blks < maxblks; blks++) { 6791c8564a7SMark Maybee int err; 680cdb0ab79Smaybee 681713d6c20SMatthew Ahrens /* 682713d6c20SMatthew Ahrens * dnode_next_offset(BACKWARDS) will find an allocated L1 683713d6c20SMatthew Ahrens * indirect block at or before the input offset. We must 684713d6c20SMatthew Ahrens * decrement *start so that it is at the end of the region 685713d6c20SMatthew Ahrens * to search. 686713d6c20SMatthew Ahrens */ 687713d6c20SMatthew Ahrens (*start)--; 688cdb0ab79Smaybee err = dnode_next_offset(dn, 68976256205SMark Maybee DNODE_FIND_BACKWARDS, start, 2, 1, 0); 690cdb0ab79Smaybee 691713d6c20SMatthew Ahrens /* if there are no indirect blocks before start, we are done */ 69276256205SMark Maybee if (err == ESRCH) { 693713d6c20SMatthew Ahrens *start = minimum; 694713d6c20SMatthew Ahrens break; 695713d6c20SMatthew Ahrens } else if (err != 0) { 696cdb0ab79Smaybee return (err); 69776256205SMark Maybee } 698cdb0ab79Smaybee 699713d6c20SMatthew Ahrens /* set start to the beginning of this L1 indirect */ 70076256205SMark Maybee *start = P2ALIGN(*start, iblkrange); 701cdb0ab79Smaybee } 702713d6c20SMatthew Ahrens if (*start < minimum) 703713d6c20SMatthew Ahrens *start = minimum; 704cdb0ab79Smaybee return (0); 705cdb0ab79Smaybee } 706cdb0ab79Smaybee 707eb721827SAlek Pinchuk /* 708eb721827SAlek Pinchuk * If this objset is of type OST_ZFS return true if vfs's unmounted flag is set, 709eb721827SAlek Pinchuk * otherwise return false. 710eb721827SAlek Pinchuk * Used below in dmu_free_long_range_impl() to enable abort when unmounting 711eb721827SAlek Pinchuk */ 712eb721827SAlek Pinchuk /*ARGSUSED*/ 713eb721827SAlek Pinchuk static boolean_t 714eb721827SAlek Pinchuk dmu_objset_zfs_unmounting(objset_t *os) 715eb721827SAlek Pinchuk { 716eb721827SAlek Pinchuk #ifdef _KERNEL 717eb721827SAlek Pinchuk if (dmu_objset_type(os) == DMU_OST_ZFS) 718eb721827SAlek Pinchuk return (zfs_get_vfs_flag_unmounted(os)); 719eb721827SAlek Pinchuk #endif 720eb721827SAlek Pinchuk return (B_FALSE); 721eb721827SAlek Pinchuk } 722eb721827SAlek Pinchuk 723cdb0ab79Smaybee static int 724cdb0ab79Smaybee dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset, 725713d6c20SMatthew Ahrens uint64_t length) 726cdb0ab79Smaybee { 727713d6c20SMatthew Ahrens uint64_t object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz; 728713d6c20SMatthew Ahrens int err; 729ff5177eeSAlek Pinchuk uint64_t dirty_frees_threshold; 730ff5177eeSAlek Pinchuk dsl_pool_t *dp = dmu_objset_pool(os); 731cdb0ab79Smaybee 732713d6c20SMatthew Ahrens if (offset >= object_size) 733cdb0ab79Smaybee return (0); 734cdb0ab79Smaybee 735ff5177eeSAlek Pinchuk if (zfs_per_txg_dirty_frees_percent <= 100) 736ff5177eeSAlek Pinchuk dirty_frees_threshold = 737ff5177eeSAlek Pinchuk zfs_per_txg_dirty_frees_percent * zfs_dirty_data_max / 100; 738ff5177eeSAlek Pinchuk else 739ff5177eeSAlek Pinchuk dirty_frees_threshold = zfs_dirty_data_max / 4; 740ff5177eeSAlek Pinchuk 741713d6c20SMatthew Ahrens if (length == DMU_OBJECT_END || offset + length > object_size) 742713d6c20SMatthew Ahrens length = object_size - offset; 743713d6c20SMatthew Ahrens 744713d6c20SMatthew Ahrens while (length != 0) { 745ff5177eeSAlek Pinchuk uint64_t chunk_end, chunk_begin, chunk_len; 746ff5177eeSAlek Pinchuk uint64_t long_free_dirty_all_txgs = 0; 747ff5177eeSAlek Pinchuk dmu_tx_t *tx; 748713d6c20SMatthew Ahrens 749eb721827SAlek Pinchuk if (dmu_objset_zfs_unmounting(dn->dn_objset)) 750eb721827SAlek Pinchuk return (SET_ERROR(EINTR)); 751eb721827SAlek Pinchuk 752713d6c20SMatthew Ahrens chunk_end = chunk_begin = offset + length; 753713d6c20SMatthew Ahrens 754713d6c20SMatthew Ahrens /* move chunk_begin backwards to the beginning of this chunk */ 755713d6c20SMatthew Ahrens err = get_next_chunk(dn, &chunk_begin, offset); 756cdb0ab79Smaybee if (err) 757cdb0ab79Smaybee return (err); 758713d6c20SMatthew Ahrens ASSERT3U(chunk_begin, >=, offset); 759713d6c20SMatthew Ahrens ASSERT3U(chunk_begin, <=, chunk_end); 760cdb0ab79Smaybee 761ff5177eeSAlek Pinchuk chunk_len = chunk_end - chunk_begin; 762ff5177eeSAlek Pinchuk 763ff5177eeSAlek Pinchuk mutex_enter(&dp->dp_lock); 764ff5177eeSAlek Pinchuk for (int t = 0; t < TXG_SIZE; t++) { 765ff5177eeSAlek Pinchuk long_free_dirty_all_txgs += 766ff5177eeSAlek Pinchuk dp->dp_long_free_dirty_pertxg[t]; 767ff5177eeSAlek Pinchuk } 768ff5177eeSAlek Pinchuk mutex_exit(&dp->dp_lock); 769ff5177eeSAlek Pinchuk 770ff5177eeSAlek Pinchuk /* 771ff5177eeSAlek Pinchuk * To avoid filling up a TXG with just frees wait for 772ff5177eeSAlek Pinchuk * the next TXG to open before freeing more chunks if 773ff5177eeSAlek Pinchuk * we have reached the threshold of frees 774ff5177eeSAlek Pinchuk */ 775ff5177eeSAlek Pinchuk if (dirty_frees_threshold != 0 && 776ff5177eeSAlek Pinchuk long_free_dirty_all_txgs >= dirty_frees_threshold) { 777ff5177eeSAlek Pinchuk txg_wait_open(dp, 0); 778ff5177eeSAlek Pinchuk continue; 779ff5177eeSAlek Pinchuk } 780ff5177eeSAlek Pinchuk 781ff5177eeSAlek Pinchuk tx = dmu_tx_create(os); 782ff5177eeSAlek Pinchuk dmu_tx_hold_free(tx, dn->dn_object, chunk_begin, chunk_len); 7834bb73804SMatthew Ahrens 7844bb73804SMatthew Ahrens /* 7854bb73804SMatthew Ahrens * Mark this transaction as typically resulting in a net 7864bb73804SMatthew Ahrens * reduction in space used. 7874bb73804SMatthew Ahrens */ 7884bb73804SMatthew Ahrens dmu_tx_mark_netfree(tx); 789cdb0ab79Smaybee err = dmu_tx_assign(tx, TXG_WAIT); 790cdb0ab79Smaybee if (err) { 791cdb0ab79Smaybee dmu_tx_abort(tx); 792cdb0ab79Smaybee return (err); 793cdb0ab79Smaybee } 794ff5177eeSAlek Pinchuk 795ff5177eeSAlek Pinchuk mutex_enter(&dp->dp_lock); 796ff5177eeSAlek Pinchuk dp->dp_long_free_dirty_pertxg[dmu_tx_get_txg(tx) & TXG_MASK] += 797ff5177eeSAlek Pinchuk chunk_len; 798ff5177eeSAlek Pinchuk mutex_exit(&dp->dp_lock); 799ff5177eeSAlek Pinchuk DTRACE_PROBE3(free__long__range, 800ff5177eeSAlek Pinchuk uint64_t, long_free_dirty_all_txgs, uint64_t, chunk_len, 801ff5177eeSAlek Pinchuk uint64_t, dmu_tx_get_txg(tx)); 802ff5177eeSAlek Pinchuk dnode_free_range(dn, chunk_begin, chunk_len, tx); 803cdb0ab79Smaybee dmu_tx_commit(tx); 804713d6c20SMatthew Ahrens 805ff5177eeSAlek Pinchuk length -= chunk_len; 806cdb0ab79Smaybee } 807cdb0ab79Smaybee return (0); 808cdb0ab79Smaybee } 809cdb0ab79Smaybee 810cdb0ab79Smaybee int 811cdb0ab79Smaybee dmu_free_long_range(objset_t *os, uint64_t object, 812cdb0ab79Smaybee uint64_t offset, uint64_t length) 813cdb0ab79Smaybee { 814cdb0ab79Smaybee dnode_t *dn; 815cdb0ab79Smaybee int err; 816cdb0ab79Smaybee 817503ad85cSMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 818cdb0ab79Smaybee if (err != 0) 819cdb0ab79Smaybee return (err); 820713d6c20SMatthew Ahrens err = dmu_free_long_range_impl(os, dn, offset, length); 8215253393bSMatthew Ahrens 8225253393bSMatthew Ahrens /* 8235253393bSMatthew Ahrens * It is important to zero out the maxblkid when freeing the entire 8245253393bSMatthew Ahrens * file, so that (a) subsequent calls to dmu_free_long_range_impl() 8255253393bSMatthew Ahrens * will take the fast path, and (b) dnode_reallocate() can verify 8265253393bSMatthew Ahrens * that the entire file has been freed. 8275253393bSMatthew Ahrens */ 82843466aaeSMax Grossman if (err == 0 && offset == 0 && length == DMU_OBJECT_END) 8295253393bSMatthew Ahrens dn->dn_maxblkid = 0; 8305253393bSMatthew Ahrens 831cdb0ab79Smaybee dnode_rele(dn, FTAG); 832cdb0ab79Smaybee return (err); 833cdb0ab79Smaybee } 834cdb0ab79Smaybee 835cdb0ab79Smaybee int 836713d6c20SMatthew Ahrens dmu_free_long_object(objset_t *os, uint64_t object) 837cdb0ab79Smaybee { 838cdb0ab79Smaybee dmu_tx_t *tx; 839cdb0ab79Smaybee int err; 840cdb0ab79Smaybee 841713d6c20SMatthew Ahrens err = dmu_free_long_range(os, object, 0, DMU_OBJECT_END); 842cdb0ab79Smaybee if (err != 0) 843cdb0ab79Smaybee return (err); 844713d6c20SMatthew Ahrens 845cdb0ab79Smaybee tx = dmu_tx_create(os); 846cdb0ab79Smaybee dmu_tx_hold_bonus(tx, object); 847713d6c20SMatthew Ahrens dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END); 8484bb73804SMatthew Ahrens dmu_tx_mark_netfree(tx); 849cdb0ab79Smaybee err = dmu_tx_assign(tx, TXG_WAIT); 850cdb0ab79Smaybee if (err == 0) { 851713d6c20SMatthew Ahrens err = dmu_object_free(os, object, tx); 852cdb0ab79Smaybee dmu_tx_commit(tx); 853cdb0ab79Smaybee } else { 854cdb0ab79Smaybee dmu_tx_abort(tx); 855cdb0ab79Smaybee } 856713d6c20SMatthew Ahrens 857cdb0ab79Smaybee return (err); 858cdb0ab79Smaybee } 859cdb0ab79Smaybee 860ea8dc4b6Seschrock int 861fa9e4066Sahrens dmu_free_range(objset_t *os, uint64_t object, uint64_t offset, 862fa9e4066Sahrens uint64_t size, dmu_tx_t *tx) 863fa9e4066Sahrens { 864ea8dc4b6Seschrock dnode_t *dn; 865503ad85cSMatthew Ahrens int err = dnode_hold(os, object, FTAG, &dn); 866ea8dc4b6Seschrock if (err) 867ea8dc4b6Seschrock return (err); 868fa9e4066Sahrens ASSERT(offset < UINT64_MAX); 869fa9e4066Sahrens ASSERT(size == -1ULL || size <= UINT64_MAX - offset); 870fa9e4066Sahrens dnode_free_range(dn, offset, size, tx); 871fa9e4066Sahrens dnode_rele(dn, FTAG); 872ea8dc4b6Seschrock return (0); 873fa9e4066Sahrens } 874fa9e4066Sahrens 875b0c42cd4Sbzzz77 static int 876b0c42cd4Sbzzz77 dmu_read_impl(dnode_t *dn, uint64_t offset, uint64_t size, 8777bfdf011SNeil Perrin void *buf, uint32_t flags) 878fa9e4066Sahrens { 879fa9e4066Sahrens dmu_buf_t **dbp; 880b0c42cd4Sbzzz77 int numbufs, err = 0; 881feb08c6bSbillm 882feb08c6bSbillm /* 883feb08c6bSbillm * Deal with odd block sizes, where there can't be data past the first 884feb08c6bSbillm * block. If we ever do the tail block optimization, we will need to 885feb08c6bSbillm * handle that here as well. 886feb08c6bSbillm */ 887c87b8fc5SMark J Musante if (dn->dn_maxblkid == 0) { 888fa9e4066Sahrens int newsz = offset > dn->dn_datablksz ? 0 : 889fa9e4066Sahrens MIN(size, dn->dn_datablksz - offset); 890fa9e4066Sahrens bzero((char *)buf + newsz, size - newsz); 891fa9e4066Sahrens size = newsz; 892fa9e4066Sahrens } 893fa9e4066Sahrens 894fa9e4066Sahrens while (size > 0) { 895fa9e4066Sahrens uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2); 896c87b8fc5SMark J Musante int i; 897fa9e4066Sahrens 898fa9e4066Sahrens /* 899fa9e4066Sahrens * NB: we could do this block-at-a-time, but it's nice 900fa9e4066Sahrens * to be reading in parallel. 901fa9e4066Sahrens */ 902a2eea2e1Sahrens err = dmu_buf_hold_array_by_dnode(dn, offset, mylen, 9037bfdf011SNeil Perrin TRUE, FTAG, &numbufs, &dbp, flags); 904ea8dc4b6Seschrock if (err) 9051934e92fSmaybee break; 906fa9e4066Sahrens 907fa9e4066Sahrens for (i = 0; i < numbufs; i++) { 908fa9e4066Sahrens int tocpy; 909fa9e4066Sahrens int bufoff; 910fa9e4066Sahrens dmu_buf_t *db = dbp[i]; 911fa9e4066Sahrens 912fa9e4066Sahrens ASSERT(size > 0); 913fa9e4066Sahrens 914fa9e4066Sahrens bufoff = offset - db->db_offset; 915fa9e4066Sahrens tocpy = (int)MIN(db->db_size - bufoff, size); 916fa9e4066Sahrens 917fa9e4066Sahrens bcopy((char *)db->db_data + bufoff, buf, tocpy); 918fa9e4066Sahrens 919fa9e4066Sahrens offset += tocpy; 920fa9e4066Sahrens size -= tocpy; 921fa9e4066Sahrens buf = (char *)buf + tocpy; 922fa9e4066Sahrens } 923ea8dc4b6Seschrock dmu_buf_rele_array(dbp, numbufs, FTAG); 924fa9e4066Sahrens } 925b0c42cd4Sbzzz77 return (err); 926b0c42cd4Sbzzz77 } 927b0c42cd4Sbzzz77 928b0c42cd4Sbzzz77 int 929b0c42cd4Sbzzz77 dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 930b0c42cd4Sbzzz77 void *buf, uint32_t flags) 931b0c42cd4Sbzzz77 { 932b0c42cd4Sbzzz77 dnode_t *dn; 933b0c42cd4Sbzzz77 int err; 934b0c42cd4Sbzzz77 935b0c42cd4Sbzzz77 err = dnode_hold(os, object, FTAG, &dn); 936b0c42cd4Sbzzz77 if (err != 0) 937b0c42cd4Sbzzz77 return (err); 938b0c42cd4Sbzzz77 939b0c42cd4Sbzzz77 err = dmu_read_impl(dn, offset, size, buf, flags); 940a2eea2e1Sahrens dnode_rele(dn, FTAG); 9411934e92fSmaybee return (err); 942fa9e4066Sahrens } 943fa9e4066Sahrens 944b0c42cd4Sbzzz77 int 945b0c42cd4Sbzzz77 dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf, 946b0c42cd4Sbzzz77 uint32_t flags) 947b0c42cd4Sbzzz77 { 948b0c42cd4Sbzzz77 return (dmu_read_impl(dn, offset, size, buf, flags)); 949b0c42cd4Sbzzz77 } 950b0c42cd4Sbzzz77 951b0c42cd4Sbzzz77 static void 952b0c42cd4Sbzzz77 dmu_write_impl(dmu_buf_t **dbp, int numbufs, uint64_t offset, uint64_t size, 953fa9e4066Sahrens const void *buf, dmu_tx_t *tx) 954fa9e4066Sahrens { 955b0c42cd4Sbzzz77 int i; 956fa9e4066Sahrens 957fa9e4066Sahrens for (i = 0; i < numbufs; i++) { 958fa9e4066Sahrens int tocpy; 959fa9e4066Sahrens int bufoff; 960fa9e4066Sahrens dmu_buf_t *db = dbp[i]; 961fa9e4066Sahrens 962fa9e4066Sahrens ASSERT(size > 0); 963fa9e4066Sahrens 964fa9e4066Sahrens bufoff = offset - db->db_offset; 965fa9e4066Sahrens tocpy = (int)MIN(db->db_size - bufoff, size); 966fa9e4066Sahrens 967fa9e4066Sahrens ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 968fa9e4066Sahrens 969fa9e4066Sahrens if (tocpy == db->db_size) 970fa9e4066Sahrens dmu_buf_will_fill(db, tx); 971fa9e4066Sahrens else 972fa9e4066Sahrens dmu_buf_will_dirty(db, tx); 973fa9e4066Sahrens 974fa9e4066Sahrens bcopy(buf, (char *)db->db_data + bufoff, tocpy); 975fa9e4066Sahrens 976fa9e4066Sahrens if (tocpy == db->db_size) 977fa9e4066Sahrens dmu_buf_fill_done(db, tx); 978fa9e4066Sahrens 979fa9e4066Sahrens offset += tocpy; 980fa9e4066Sahrens size -= tocpy; 981fa9e4066Sahrens buf = (char *)buf + tocpy; 982fa9e4066Sahrens } 983b0c42cd4Sbzzz77 } 984b0c42cd4Sbzzz77 985b0c42cd4Sbzzz77 void 986b0c42cd4Sbzzz77 dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 987b0c42cd4Sbzzz77 const void *buf, dmu_tx_t *tx) 988b0c42cd4Sbzzz77 { 989b0c42cd4Sbzzz77 dmu_buf_t **dbp; 990b0c42cd4Sbzzz77 int numbufs; 991b0c42cd4Sbzzz77 992b0c42cd4Sbzzz77 if (size == 0) 993b0c42cd4Sbzzz77 return; 994b0c42cd4Sbzzz77 995b0c42cd4Sbzzz77 VERIFY0(dmu_buf_hold_array(os, object, offset, size, 996b0c42cd4Sbzzz77 FALSE, FTAG, &numbufs, &dbp)); 997b0c42cd4Sbzzz77 dmu_write_impl(dbp, numbufs, offset, size, buf, tx); 998b0c42cd4Sbzzz77 dmu_buf_rele_array(dbp, numbufs, FTAG); 999b0c42cd4Sbzzz77 } 1000b0c42cd4Sbzzz77 1001b0c42cd4Sbzzz77 void 1002b0c42cd4Sbzzz77 dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, 1003b0c42cd4Sbzzz77 const void *buf, dmu_tx_t *tx) 1004b0c42cd4Sbzzz77 { 1005b0c42cd4Sbzzz77 dmu_buf_t **dbp; 1006b0c42cd4Sbzzz77 int numbufs; 1007b0c42cd4Sbzzz77 1008b0c42cd4Sbzzz77 if (size == 0) 1009b0c42cd4Sbzzz77 return; 1010b0c42cd4Sbzzz77 1011b0c42cd4Sbzzz77 VERIFY0(dmu_buf_hold_array_by_dnode(dn, offset, size, 1012b0c42cd4Sbzzz77 FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH)); 1013b0c42cd4Sbzzz77 dmu_write_impl(dbp, numbufs, offset, size, buf, tx); 1014ea8dc4b6Seschrock dmu_buf_rele_array(dbp, numbufs, FTAG); 1015fa9e4066Sahrens } 1016fa9e4066Sahrens 101782c9918fSTim Haley void 101882c9918fSTim Haley dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 101982c9918fSTim Haley dmu_tx_t *tx) 102082c9918fSTim Haley { 102182c9918fSTim Haley dmu_buf_t **dbp; 102282c9918fSTim Haley int numbufs, i; 102382c9918fSTim Haley 102482c9918fSTim Haley if (size == 0) 102582c9918fSTim Haley return; 102682c9918fSTim Haley 102782c9918fSTim Haley VERIFY(0 == dmu_buf_hold_array(os, object, offset, size, 102882c9918fSTim Haley FALSE, FTAG, &numbufs, &dbp)); 102982c9918fSTim Haley 103082c9918fSTim Haley for (i = 0; i < numbufs; i++) { 103182c9918fSTim Haley dmu_buf_t *db = dbp[i]; 103282c9918fSTim Haley 103382c9918fSTim Haley dmu_buf_will_not_fill(db, tx); 103482c9918fSTim Haley } 103582c9918fSTim Haley dmu_buf_rele_array(dbp, numbufs, FTAG); 103682c9918fSTim Haley } 103782c9918fSTim Haley 10385d7b4d43SMatthew Ahrens void 10395d7b4d43SMatthew Ahrens dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset, 10405d7b4d43SMatthew Ahrens void *data, uint8_t etype, uint8_t comp, int uncompressed_size, 10415d7b4d43SMatthew Ahrens int compressed_size, int byteorder, dmu_tx_t *tx) 10425d7b4d43SMatthew Ahrens { 10435d7b4d43SMatthew Ahrens dmu_buf_t *db; 10445d7b4d43SMatthew Ahrens 10455d7b4d43SMatthew Ahrens ASSERT3U(etype, <, NUM_BP_EMBEDDED_TYPES); 10465d7b4d43SMatthew Ahrens ASSERT3U(comp, <, ZIO_COMPRESS_FUNCTIONS); 10475d7b4d43SMatthew Ahrens VERIFY0(dmu_buf_hold_noread(os, object, offset, 10485d7b4d43SMatthew Ahrens FTAG, &db)); 10495d7b4d43SMatthew Ahrens 10505d7b4d43SMatthew Ahrens dmu_buf_write_embedded(db, 10515d7b4d43SMatthew Ahrens data, (bp_embedded_type_t)etype, (enum zio_compress)comp, 10525d7b4d43SMatthew Ahrens uncompressed_size, compressed_size, byteorder, tx); 10535d7b4d43SMatthew Ahrens 10545d7b4d43SMatthew Ahrens dmu_buf_rele(db, FTAG); 10555d7b4d43SMatthew Ahrens } 10565d7b4d43SMatthew Ahrens 1057c242f9a0Schunli zhang - Sun Microsystems - Irvine United States /* 1058c242f9a0Schunli zhang - Sun Microsystems - Irvine United States * DMU support for xuio 1059c242f9a0Schunli zhang - Sun Microsystems - Irvine United States */ 1060c242f9a0Schunli zhang - Sun Microsystems - Irvine United States kstat_t *xuio_ksp = NULL; 1061c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1062c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int 1063c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_init(xuio_t *xuio, int nblk) 1064c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1065c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_t *priv; 1066c242f9a0Schunli zhang - Sun Microsystems - Irvine United States uio_t *uio = &xuio->xu_uio; 1067c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1068c242f9a0Schunli zhang - Sun Microsystems - Irvine United States uio->uio_iovcnt = nblk; 1069c242f9a0Schunli zhang - Sun Microsystems - Irvine United States uio->uio_iov = kmem_zalloc(nblk * sizeof (iovec_t), KM_SLEEP); 1070c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1071c242f9a0Schunli zhang - Sun Microsystems - Irvine United States priv = kmem_zalloc(sizeof (dmu_xuio_t), KM_SLEEP); 1072c242f9a0Schunli zhang - Sun Microsystems - Irvine United States priv->cnt = nblk; 1073c242f9a0Schunli zhang - Sun Microsystems - Irvine United States priv->bufs = kmem_zalloc(nblk * sizeof (arc_buf_t *), KM_SLEEP); 1074c242f9a0Schunli zhang - Sun Microsystems - Irvine United States priv->iovp = uio->uio_iov; 1075c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIO_XUZC_PRIV(xuio) = priv; 1076c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1077c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (XUIO_XUZC_RW(xuio) == UIO_READ) 1078c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_INCR(xuiostat_onloan_rbuf, nblk); 1079c242f9a0Schunli zhang - Sun Microsystems - Irvine United States else 1080c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_INCR(xuiostat_onloan_wbuf, nblk); 1081c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1082c242f9a0Schunli zhang - Sun Microsystems - Irvine United States return (0); 1083c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1084c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1085c242f9a0Schunli zhang - Sun Microsystems - Irvine United States void 1086c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_fini(xuio_t *xuio) 1087c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1088c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 1089c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int nblk = priv->cnt; 1090c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1091c242f9a0Schunli zhang - Sun Microsystems - Irvine United States kmem_free(priv->iovp, nblk * sizeof (iovec_t)); 1092c242f9a0Schunli zhang - Sun Microsystems - Irvine United States kmem_free(priv->bufs, nblk * sizeof (arc_buf_t *)); 1093c242f9a0Schunli zhang - Sun Microsystems - Irvine United States kmem_free(priv, sizeof (dmu_xuio_t)); 1094c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1095c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (XUIO_XUZC_RW(xuio) == UIO_READ) 1096c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_INCR(xuiostat_onloan_rbuf, -nblk); 1097c242f9a0Schunli zhang - Sun Microsystems - Irvine United States else 1098c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_INCR(xuiostat_onloan_wbuf, -nblk); 1099c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1100c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1101c242f9a0Schunli zhang - Sun Microsystems - Irvine United States /* 1102c242f9a0Schunli zhang - Sun Microsystems - Irvine United States * Initialize iov[priv->next] and priv->bufs[priv->next] with { off, n, abuf } 1103c242f9a0Schunli zhang - Sun Microsystems - Irvine United States * and increase priv->next by 1. 1104c242f9a0Schunli zhang - Sun Microsystems - Irvine United States */ 1105c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int 1106c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_add(xuio_t *xuio, arc_buf_t *abuf, offset_t off, size_t n) 1107c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1108c242f9a0Schunli zhang - Sun Microsystems - Irvine United States struct iovec *iov; 1109c242f9a0Schunli zhang - Sun Microsystems - Irvine United States uio_t *uio = &xuio->xu_uio; 1110c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 1111c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int i = priv->next++; 1112c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1113c242f9a0Schunli zhang - Sun Microsystems - Irvine United States ASSERT(i < priv->cnt); 11145602294fSDan Kimmel ASSERT(off + n <= arc_buf_lsize(abuf)); 1115c242f9a0Schunli zhang - Sun Microsystems - Irvine United States iov = uio->uio_iov + i; 1116c242f9a0Schunli zhang - Sun Microsystems - Irvine United States iov->iov_base = (char *)abuf->b_data + off; 1117c242f9a0Schunli zhang - Sun Microsystems - Irvine United States iov->iov_len = n; 1118c242f9a0Schunli zhang - Sun Microsystems - Irvine United States priv->bufs[i] = abuf; 1119c242f9a0Schunli zhang - Sun Microsystems - Irvine United States return (0); 1120c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1121c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1122c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int 1123c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_cnt(xuio_t *xuio) 1124c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1125c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 1126c242f9a0Schunli zhang - Sun Microsystems - Irvine United States return (priv->cnt); 1127c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1128c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1129c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_buf_t * 1130c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_arcbuf(xuio_t *xuio, int i) 1131c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1132c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 1133c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1134c242f9a0Schunli zhang - Sun Microsystems - Irvine United States ASSERT(i < priv->cnt); 1135c242f9a0Schunli zhang - Sun Microsystems - Irvine United States return (priv->bufs[i]); 1136c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1137c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1138c242f9a0Schunli zhang - Sun Microsystems - Irvine United States void 1139c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_clear(xuio_t *xuio, int i) 1140c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1141c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 1142c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1143c242f9a0Schunli zhang - Sun Microsystems - Irvine United States ASSERT(i < priv->cnt); 1144c242f9a0Schunli zhang - Sun Microsystems - Irvine United States priv->bufs[i] = NULL; 1145c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1146c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1147c242f9a0Schunli zhang - Sun Microsystems - Irvine United States static void 1148c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_stat_init(void) 1149c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1150c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_ksp = kstat_create("zfs", 0, "xuio_stats", "misc", 1151c242f9a0Schunli zhang - Sun Microsystems - Irvine United States KSTAT_TYPE_NAMED, sizeof (xuio_stats) / sizeof (kstat_named_t), 1152c242f9a0Schunli zhang - Sun Microsystems - Irvine United States KSTAT_FLAG_VIRTUAL); 1153c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (xuio_ksp != NULL) { 1154c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_ksp->ks_data = &xuio_stats; 1155c242f9a0Schunli zhang - Sun Microsystems - Irvine United States kstat_install(xuio_ksp); 1156c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1157c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1158c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1159c242f9a0Schunli zhang - Sun Microsystems - Irvine United States static void 1160c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_stat_fini(void) 1161c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1162c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (xuio_ksp != NULL) { 1163c242f9a0Schunli zhang - Sun Microsystems - Irvine United States kstat_delete(xuio_ksp); 1164c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_ksp = NULL; 1165c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1166c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1167c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1168c242f9a0Schunli zhang - Sun Microsystems - Irvine United States void 116999aa8b55SPrashanth Sreenivasa xuio_stat_wbuf_copied(void) 1170c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1171c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_BUMP(xuiostat_wbuf_copied); 1172c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1173c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1174c242f9a0Schunli zhang - Sun Microsystems - Irvine United States void 117599aa8b55SPrashanth Sreenivasa xuio_stat_wbuf_nocopy(void) 1176c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1177c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_BUMP(xuiostat_wbuf_nocopy); 1178c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1179c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1180fa9e4066Sahrens #ifdef _KERNEL 1181f8554bb9SMatthew Ahrens static int 1182f8554bb9SMatthew Ahrens dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size) 1183feb08c6bSbillm { 1184feb08c6bSbillm dmu_buf_t **dbp; 1185feb08c6bSbillm int numbufs, i, err; 1186c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_t *xuio = NULL; 1187feb08c6bSbillm 1188feb08c6bSbillm /* 1189feb08c6bSbillm * NB: we could do this block-at-a-time, but it's nice 1190feb08c6bSbillm * to be reading in parallel. 1191feb08c6bSbillm */ 1192f8554bb9SMatthew Ahrens err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size, 1193f8554bb9SMatthew Ahrens TRUE, FTAG, &numbufs, &dbp, 0); 1194feb08c6bSbillm if (err) 1195feb08c6bSbillm return (err); 1196feb08c6bSbillm 1197c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (uio->uio_extflg == UIO_XUIO) 1198c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio = (xuio_t *)uio; 1199c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1200feb08c6bSbillm for (i = 0; i < numbufs; i++) { 1201feb08c6bSbillm int tocpy; 1202feb08c6bSbillm int bufoff; 1203feb08c6bSbillm dmu_buf_t *db = dbp[i]; 1204feb08c6bSbillm 1205feb08c6bSbillm ASSERT(size > 0); 1206feb08c6bSbillm 1207feb08c6bSbillm bufoff = uio->uio_loffset - db->db_offset; 1208feb08c6bSbillm tocpy = (int)MIN(db->db_size - bufoff, size); 1209feb08c6bSbillm 1210c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (xuio) { 1211c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 1212c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_buf_t *dbuf_abuf = dbi->db_buf; 1213c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_buf_t *abuf = dbuf_loan_arcbuf(dbi); 1214c242f9a0Schunli zhang - Sun Microsystems - Irvine United States err = dmu_xuio_add(xuio, abuf, bufoff, tocpy); 1215c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (!err) { 1216c242f9a0Schunli zhang - Sun Microsystems - Irvine United States uio->uio_resid -= tocpy; 1217c242f9a0Schunli zhang - Sun Microsystems - Irvine United States uio->uio_loffset += tocpy; 1218c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1219c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1220c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (abuf == dbuf_abuf) 1221c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_BUMP(xuiostat_rbuf_nocopy); 1222c242f9a0Schunli zhang - Sun Microsystems - Irvine United States else 1223c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_BUMP(xuiostat_rbuf_copied); 1224c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } else { 1225feb08c6bSbillm err = uiomove((char *)db->db_data + bufoff, tocpy, 1226feb08c6bSbillm UIO_READ, uio); 1227c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1228feb08c6bSbillm if (err) 1229feb08c6bSbillm break; 1230feb08c6bSbillm 1231feb08c6bSbillm size -= tocpy; 1232feb08c6bSbillm } 1233feb08c6bSbillm dmu_buf_rele_array(dbp, numbufs, FTAG); 1234feb08c6bSbillm 1235feb08c6bSbillm return (err); 1236feb08c6bSbillm } 1237feb08c6bSbillm 1238f8554bb9SMatthew Ahrens /* 1239f8554bb9SMatthew Ahrens * Read 'size' bytes into the uio buffer. 1240f8554bb9SMatthew Ahrens * From object zdb->db_object. 1241f8554bb9SMatthew Ahrens * Starting at offset uio->uio_loffset. 1242f8554bb9SMatthew Ahrens * 1243f8554bb9SMatthew Ahrens * If the caller already has a dbuf in the target object 1244f8554bb9SMatthew Ahrens * (e.g. its bonus buffer), this routine is faster than dmu_read_uio(), 1245f8554bb9SMatthew Ahrens * because we don't have to find the dnode_t for the object. 1246f8554bb9SMatthew Ahrens */ 1247f8554bb9SMatthew Ahrens int 1248f8554bb9SMatthew Ahrens dmu_read_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size) 1249f8554bb9SMatthew Ahrens { 1250f8554bb9SMatthew Ahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb; 1251f8554bb9SMatthew Ahrens dnode_t *dn; 1252f8554bb9SMatthew Ahrens int err; 1253f8554bb9SMatthew Ahrens 1254f8554bb9SMatthew Ahrens if (size == 0) 1255f8554bb9SMatthew Ahrens return (0); 1256f8554bb9SMatthew Ahrens 1257f8554bb9SMatthew Ahrens DB_DNODE_ENTER(db); 1258f8554bb9SMatthew Ahrens dn = DB_DNODE(db); 1259f8554bb9SMatthew Ahrens err = dmu_read_uio_dnode(dn, uio, size); 1260f8554bb9SMatthew Ahrens DB_DNODE_EXIT(db); 1261f8554bb9SMatthew Ahrens 1262f8554bb9SMatthew Ahrens return (err); 1263f8554bb9SMatthew Ahrens } 1264f8554bb9SMatthew Ahrens 1265f8554bb9SMatthew Ahrens /* 1266f8554bb9SMatthew Ahrens * Read 'size' bytes into the uio buffer. 1267f8554bb9SMatthew Ahrens * From the specified object 1268f8554bb9SMatthew Ahrens * Starting at offset uio->uio_loffset. 1269f8554bb9SMatthew Ahrens */ 1270f8554bb9SMatthew Ahrens int 1271f8554bb9SMatthew Ahrens dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size) 1272f8554bb9SMatthew Ahrens { 1273f8554bb9SMatthew Ahrens dnode_t *dn; 1274f8554bb9SMatthew Ahrens int err; 1275f8554bb9SMatthew Ahrens 1276f8554bb9SMatthew Ahrens if (size == 0) 1277f8554bb9SMatthew Ahrens return (0); 1278f8554bb9SMatthew Ahrens 1279f8554bb9SMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 1280f8554bb9SMatthew Ahrens if (err) 1281f8554bb9SMatthew Ahrens return (err); 1282f8554bb9SMatthew Ahrens 1283f8554bb9SMatthew Ahrens err = dmu_read_uio_dnode(dn, uio, size); 1284f8554bb9SMatthew Ahrens 1285f8554bb9SMatthew Ahrens dnode_rele(dn, FTAG); 1286f8554bb9SMatthew Ahrens 1287f8554bb9SMatthew Ahrens return (err); 1288f8554bb9SMatthew Ahrens } 1289f8554bb9SMatthew Ahrens 129094d1a210STim Haley static int 129194d1a210STim Haley dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx) 1292fa9e4066Sahrens { 1293fa9e4066Sahrens dmu_buf_t **dbp; 129494d1a210STim Haley int numbufs; 1295fa9e4066Sahrens int err = 0; 129694d1a210STim Haley int i; 1297fa9e4066Sahrens 129894d1a210STim Haley err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size, 129994d1a210STim Haley FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH); 1300ea8dc4b6Seschrock if (err) 1301ea8dc4b6Seschrock return (err); 1302fa9e4066Sahrens 1303fa9e4066Sahrens for (i = 0; i < numbufs; i++) { 1304fa9e4066Sahrens int tocpy; 1305fa9e4066Sahrens int bufoff; 1306fa9e4066Sahrens dmu_buf_t *db = dbp[i]; 1307fa9e4066Sahrens 1308fa9e4066Sahrens ASSERT(size > 0); 1309fa9e4066Sahrens 1310feb08c6bSbillm bufoff = uio->uio_loffset - db->db_offset; 1311fa9e4066Sahrens tocpy = (int)MIN(db->db_size - bufoff, size); 1312fa9e4066Sahrens 1313fa9e4066Sahrens ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 1314fa9e4066Sahrens 1315fa9e4066Sahrens if (tocpy == db->db_size) 1316fa9e4066Sahrens dmu_buf_will_fill(db, tx); 1317fa9e4066Sahrens else 1318fa9e4066Sahrens dmu_buf_will_dirty(db, tx); 1319fa9e4066Sahrens 1320fa9e4066Sahrens /* 1321fa9e4066Sahrens * XXX uiomove could block forever (eg. nfs-backed 1322fa9e4066Sahrens * pages). There needs to be a uiolockdown() function 1323fa9e4066Sahrens * to lock the pages in memory, so that uiomove won't 1324fa9e4066Sahrens * block. 1325fa9e4066Sahrens */ 1326fa9e4066Sahrens err = uiomove((char *)db->db_data + bufoff, tocpy, 1327fa9e4066Sahrens UIO_WRITE, uio); 1328fa9e4066Sahrens 1329fa9e4066Sahrens if (tocpy == db->db_size) 1330fa9e4066Sahrens dmu_buf_fill_done(db, tx); 1331fa9e4066Sahrens 1332fa9e4066Sahrens if (err) 1333fa9e4066Sahrens break; 1334fa9e4066Sahrens 1335fa9e4066Sahrens size -= tocpy; 1336fa9e4066Sahrens } 133794d1a210STim Haley 1338ea8dc4b6Seschrock dmu_buf_rele_array(dbp, numbufs, FTAG); 1339fa9e4066Sahrens return (err); 1340fa9e4066Sahrens } 134144eda4d7Smaybee 1342f8554bb9SMatthew Ahrens /* 1343f8554bb9SMatthew Ahrens * Write 'size' bytes from the uio buffer. 1344f8554bb9SMatthew Ahrens * To object zdb->db_object. 1345f8554bb9SMatthew Ahrens * Starting at offset uio->uio_loffset. 1346f8554bb9SMatthew Ahrens * 1347f8554bb9SMatthew Ahrens * If the caller already has a dbuf in the target object 1348f8554bb9SMatthew Ahrens * (e.g. its bonus buffer), this routine is faster than dmu_write_uio(), 1349f8554bb9SMatthew Ahrens * because we don't have to find the dnode_t for the object. 1350f8554bb9SMatthew Ahrens */ 135144eda4d7Smaybee int 135294d1a210STim Haley dmu_write_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size, 135394d1a210STim Haley dmu_tx_t *tx) 135494d1a210STim Haley { 1355744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb; 1356744947dcSTom Erickson dnode_t *dn; 1357744947dcSTom Erickson int err; 1358744947dcSTom Erickson 135994d1a210STim Haley if (size == 0) 136094d1a210STim Haley return (0); 136194d1a210STim Haley 1362744947dcSTom Erickson DB_DNODE_ENTER(db); 1363744947dcSTom Erickson dn = DB_DNODE(db); 1364744947dcSTom Erickson err = dmu_write_uio_dnode(dn, uio, size, tx); 1365744947dcSTom Erickson DB_DNODE_EXIT(db); 1366744947dcSTom Erickson 1367744947dcSTom Erickson return (err); 136894d1a210STim Haley } 136994d1a210STim Haley 1370f8554bb9SMatthew Ahrens /* 1371f8554bb9SMatthew Ahrens * Write 'size' bytes from the uio buffer. 1372f8554bb9SMatthew Ahrens * To the specified object. 1373f8554bb9SMatthew Ahrens * Starting at offset uio->uio_loffset. 1374f8554bb9SMatthew Ahrens */ 137594d1a210STim Haley int 137694d1a210STim Haley dmu_write_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size, 137794d1a210STim Haley dmu_tx_t *tx) 137894d1a210STim Haley { 137994d1a210STim Haley dnode_t *dn; 138094d1a210STim Haley int err; 138194d1a210STim Haley 138294d1a210STim Haley if (size == 0) 138394d1a210STim Haley return (0); 138494d1a210STim Haley 138594d1a210STim Haley err = dnode_hold(os, object, FTAG, &dn); 138694d1a210STim Haley if (err) 138794d1a210STim Haley return (err); 138894d1a210STim Haley 138994d1a210STim Haley err = dmu_write_uio_dnode(dn, uio, size, tx); 139094d1a210STim Haley 139194d1a210STim Haley dnode_rele(dn, FTAG); 139294d1a210STim Haley 139394d1a210STim Haley return (err); 139494d1a210STim Haley } 139594d1a210STim Haley 139694d1a210STim Haley int 139744eda4d7Smaybee dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 139844eda4d7Smaybee page_t *pp, dmu_tx_t *tx) 139944eda4d7Smaybee { 140044eda4d7Smaybee dmu_buf_t **dbp; 140144eda4d7Smaybee int numbufs, i; 140244eda4d7Smaybee int err; 140344eda4d7Smaybee 140444eda4d7Smaybee if (size == 0) 140544eda4d7Smaybee return (0); 140644eda4d7Smaybee 140744eda4d7Smaybee err = dmu_buf_hold_array(os, object, offset, size, 140844eda4d7Smaybee FALSE, FTAG, &numbufs, &dbp); 140944eda4d7Smaybee if (err) 141044eda4d7Smaybee return (err); 141144eda4d7Smaybee 141244eda4d7Smaybee for (i = 0; i < numbufs; i++) { 141344eda4d7Smaybee int tocpy, copied, thiscpy; 141444eda4d7Smaybee int bufoff; 141544eda4d7Smaybee dmu_buf_t *db = dbp[i]; 141644eda4d7Smaybee caddr_t va; 141744eda4d7Smaybee 141844eda4d7Smaybee ASSERT(size > 0); 141944eda4d7Smaybee ASSERT3U(db->db_size, >=, PAGESIZE); 142044eda4d7Smaybee 142144eda4d7Smaybee bufoff = offset - db->db_offset; 142244eda4d7Smaybee tocpy = (int)MIN(db->db_size - bufoff, size); 142344eda4d7Smaybee 142444eda4d7Smaybee ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 142544eda4d7Smaybee 142644eda4d7Smaybee if (tocpy == db->db_size) 142744eda4d7Smaybee dmu_buf_will_fill(db, tx); 142844eda4d7Smaybee else 142944eda4d7Smaybee dmu_buf_will_dirty(db, tx); 143044eda4d7Smaybee 143144eda4d7Smaybee for (copied = 0; copied < tocpy; copied += PAGESIZE) { 143244eda4d7Smaybee ASSERT3U(pp->p_offset, ==, db->db_offset + bufoff); 143344eda4d7Smaybee thiscpy = MIN(PAGESIZE, tocpy - copied); 14340fab61baSJonathan W Adams va = zfs_map_page(pp, S_READ); 143544eda4d7Smaybee bcopy(va, (char *)db->db_data + bufoff, thiscpy); 14360fab61baSJonathan W Adams zfs_unmap_page(pp, va); 143744eda4d7Smaybee pp = pp->p_next; 143844eda4d7Smaybee bufoff += PAGESIZE; 143944eda4d7Smaybee } 144044eda4d7Smaybee 144144eda4d7Smaybee if (tocpy == db->db_size) 144244eda4d7Smaybee dmu_buf_fill_done(db, tx); 144344eda4d7Smaybee 144444eda4d7Smaybee offset += tocpy; 144544eda4d7Smaybee size -= tocpy; 144644eda4d7Smaybee } 144744eda4d7Smaybee dmu_buf_rele_array(dbp, numbufs, FTAG); 144844eda4d7Smaybee return (err); 144944eda4d7Smaybee } 1450fa9e4066Sahrens #endif 1451fa9e4066Sahrens 14522fdbea25SAleksandr Guzovskiy /* 14532fdbea25SAleksandr Guzovskiy * Allocate a loaned anonymous arc buffer. 14542fdbea25SAleksandr Guzovskiy */ 14552fdbea25SAleksandr Guzovskiy arc_buf_t * 14562fdbea25SAleksandr Guzovskiy dmu_request_arcbuf(dmu_buf_t *handle, int size) 14572fdbea25SAleksandr Guzovskiy { 1458744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle; 14592fdbea25SAleksandr Guzovskiy 14605602294fSDan Kimmel return (arc_loan_buf(db->db_objset->os_spa, B_FALSE, size)); 14612fdbea25SAleksandr Guzovskiy } 14622fdbea25SAleksandr Guzovskiy 14632fdbea25SAleksandr Guzovskiy /* 14642fdbea25SAleksandr Guzovskiy * Free a loaned arc buffer. 14652fdbea25SAleksandr Guzovskiy */ 14662fdbea25SAleksandr Guzovskiy void 14672fdbea25SAleksandr Guzovskiy dmu_return_arcbuf(arc_buf_t *buf) 14682fdbea25SAleksandr Guzovskiy { 14692fdbea25SAleksandr Guzovskiy arc_return_buf(buf, FTAG); 1470dcbf3bd6SGeorge Wilson arc_buf_destroy(buf, FTAG); 14712fdbea25SAleksandr Guzovskiy } 14722fdbea25SAleksandr Guzovskiy 14732fdbea25SAleksandr Guzovskiy /* 14742fdbea25SAleksandr Guzovskiy * When possible directly assign passed loaned arc buffer to a dbuf. 14752fdbea25SAleksandr Guzovskiy * If this is not possible copy the contents of passed arc buf via 14762fdbea25SAleksandr Guzovskiy * dmu_write(). 14772fdbea25SAleksandr Guzovskiy */ 14782fdbea25SAleksandr Guzovskiy void 14792fdbea25SAleksandr Guzovskiy dmu_assign_arcbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf, 14802fdbea25SAleksandr Guzovskiy dmu_tx_t *tx) 14812fdbea25SAleksandr Guzovskiy { 1482744947dcSTom Erickson dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle; 1483744947dcSTom Erickson dnode_t *dn; 14842fdbea25SAleksandr Guzovskiy dmu_buf_impl_t *db; 14855602294fSDan Kimmel uint32_t blksz = (uint32_t)arc_buf_lsize(buf); 14862fdbea25SAleksandr Guzovskiy uint64_t blkid; 14872fdbea25SAleksandr Guzovskiy 1488744947dcSTom Erickson DB_DNODE_ENTER(dbuf); 1489744947dcSTom Erickson dn = DB_DNODE(dbuf); 14902fdbea25SAleksandr Guzovskiy rw_enter(&dn->dn_struct_rwlock, RW_READER); 1491a2cdcdd2SPaul Dagnelie blkid = dbuf_whichblock(dn, 0, offset); 14922fdbea25SAleksandr Guzovskiy VERIFY((db = dbuf_hold(dn, blkid, FTAG)) != NULL); 14932fdbea25SAleksandr Guzovskiy rw_exit(&dn->dn_struct_rwlock); 1494744947dcSTom Erickson DB_DNODE_EXIT(dbuf); 14952fdbea25SAleksandr Guzovskiy 14968a904709SMatthew Ahrens /* 14978a904709SMatthew Ahrens * We can only assign if the offset is aligned, the arc buf is the 14985602294fSDan Kimmel * same size as the dbuf, and the dbuf is not metadata. 14998a904709SMatthew Ahrens */ 15005602294fSDan Kimmel if (offset == db->db.db_offset && blksz == db->db.db_size) { 15012fdbea25SAleksandr Guzovskiy dbuf_assign_arcbuf(db, buf, tx); 15022fdbea25SAleksandr Guzovskiy dbuf_rele(db, FTAG); 15032fdbea25SAleksandr Guzovskiy } else { 1504744947dcSTom Erickson objset_t *os; 1505744947dcSTom Erickson uint64_t object; 1506744947dcSTom Erickson 15075602294fSDan Kimmel /* compressed bufs must always be assignable to their dbuf */ 15085602294fSDan Kimmel ASSERT3U(arc_get_compression(buf), ==, ZIO_COMPRESS_OFF); 15095602294fSDan Kimmel ASSERT(!(buf->b_flags & ARC_BUF_FLAG_COMPRESSED)); 15105602294fSDan Kimmel 1511744947dcSTom Erickson DB_DNODE_ENTER(dbuf); 1512744947dcSTom Erickson dn = DB_DNODE(dbuf); 1513744947dcSTom Erickson os = dn->dn_objset; 1514744947dcSTom Erickson object = dn->dn_object; 1515744947dcSTom Erickson DB_DNODE_EXIT(dbuf); 1516744947dcSTom Erickson 15172fdbea25SAleksandr Guzovskiy dbuf_rele(db, FTAG); 1518744947dcSTom Erickson dmu_write(os, object, offset, blksz, buf->b_data, tx); 15192fdbea25SAleksandr Guzovskiy dmu_return_arcbuf(buf); 1520c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_BUMP(xuiostat_wbuf_copied); 15212fdbea25SAleksandr Guzovskiy } 15222fdbea25SAleksandr Guzovskiy } 15232fdbea25SAleksandr Guzovskiy 1524c5c6ffa0Smaybee typedef struct { 1525b24ab676SJeff Bonwick dbuf_dirty_record_t *dsa_dr; 1526b24ab676SJeff Bonwick dmu_sync_cb_t *dsa_done; 1527b24ab676SJeff Bonwick zgd_t *dsa_zgd; 1528b24ab676SJeff Bonwick dmu_tx_t *dsa_tx; 1529c717a561Smaybee } dmu_sync_arg_t; 1530c5c6ffa0Smaybee 1531c5c6ffa0Smaybee /* ARGSUSED */ 1532c5c6ffa0Smaybee static void 1533e14bb325SJeff Bonwick dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg) 1534e14bb325SJeff Bonwick { 1535b24ab676SJeff Bonwick dmu_sync_arg_t *dsa = varg; 1536b24ab676SJeff Bonwick dmu_buf_t *db = dsa->dsa_zgd->zgd_db; 1537e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 1538975c32a0SNeil Perrin 1539b24ab676SJeff Bonwick if (zio->io_error == 0) { 1540b24ab676SJeff Bonwick if (BP_IS_HOLE(bp)) { 1541b24ab676SJeff Bonwick /* 1542b24ab676SJeff Bonwick * A block of zeros may compress to a hole, but the 1543b24ab676SJeff Bonwick * block size still needs to be known for replay. 1544b24ab676SJeff Bonwick */ 1545b24ab676SJeff Bonwick BP_SET_LSIZE(bp, db->db_size); 15465d7b4d43SMatthew Ahrens } else if (!BP_IS_EMBEDDED(bp)) { 1547e14bb325SJeff Bonwick ASSERT(BP_GET_LEVEL(bp) == 0); 1548e14bb325SJeff Bonwick bp->blk_fill = 1; 1549e14bb325SJeff Bonwick } 1550e14bb325SJeff Bonwick } 1551b24ab676SJeff Bonwick } 1552b24ab676SJeff Bonwick 1553b24ab676SJeff Bonwick static void 1554b24ab676SJeff Bonwick dmu_sync_late_arrival_ready(zio_t *zio) 1555b24ab676SJeff Bonwick { 1556b24ab676SJeff Bonwick dmu_sync_ready(zio, NULL, zio->io_private); 1557b24ab676SJeff Bonwick } 1558e14bb325SJeff Bonwick 1559e14bb325SJeff Bonwick /* ARGSUSED */ 1560e14bb325SJeff Bonwick static void 1561c5c6ffa0Smaybee dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg) 1562c5c6ffa0Smaybee { 1563b24ab676SJeff Bonwick dmu_sync_arg_t *dsa = varg; 1564b24ab676SJeff Bonwick dbuf_dirty_record_t *dr = dsa->dsa_dr; 1565c717a561Smaybee dmu_buf_impl_t *db = dr->dr_dbuf; 1566c5c6ffa0Smaybee 1567b50a0fe0SNeil Perrin mutex_enter(&db->db_mtx); 1568b50a0fe0SNeil Perrin ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC); 1569b24ab676SJeff Bonwick if (zio->io_error == 0) { 157080901aeaSGeorge Wilson dr->dt.dl.dr_nopwrite = !!(zio->io_flags & ZIO_FLAG_NOPWRITE); 157180901aeaSGeorge Wilson if (dr->dt.dl.dr_nopwrite) { 157280901aeaSGeorge Wilson blkptr_t *bp = zio->io_bp; 157380901aeaSGeorge Wilson blkptr_t *bp_orig = &zio->io_bp_orig; 157480901aeaSGeorge Wilson uint8_t chksum = BP_GET_CHECKSUM(bp_orig); 157580901aeaSGeorge Wilson 157680901aeaSGeorge Wilson ASSERT(BP_EQUAL(bp, bp_orig)); 1577*b7edcb94SMatthew Ahrens VERIFY(BP_EQUAL(bp, db->db_blkptr)); 157880901aeaSGeorge Wilson ASSERT(zio->io_prop.zp_compress != ZIO_COMPRESS_OFF); 157945818ee1SMatthew Ahrens ASSERT(zio_checksum_table[chksum].ci_flags & 158045818ee1SMatthew Ahrens ZCHECKSUM_FLAG_NOPWRITE); 158180901aeaSGeorge Wilson } 1582b24ab676SJeff Bonwick dr->dt.dl.dr_overridden_by = *zio->io_bp; 1583b24ab676SJeff Bonwick dr->dt.dl.dr_override_state = DR_OVERRIDDEN; 1584b24ab676SJeff Bonwick dr->dt.dl.dr_copies = zio->io_prop.zp_copies; 158570163ac5SPrakash Surya 158670163ac5SPrakash Surya /* 158770163ac5SPrakash Surya * Old style holes are filled with all zeros, whereas 158870163ac5SPrakash Surya * new-style holes maintain their lsize, type, level, 158970163ac5SPrakash Surya * and birth time (see zio_write_compress). While we 159070163ac5SPrakash Surya * need to reset the BP_SET_LSIZE() call that happened 159170163ac5SPrakash Surya * in dmu_sync_ready for old style holes, we do *not* 159270163ac5SPrakash Surya * want to wipe out the information contained in new 159370163ac5SPrakash Surya * style holes. Thus, only zero out the block pointer if 159470163ac5SPrakash Surya * it's an old style hole. 159570163ac5SPrakash Surya */ 159670163ac5SPrakash Surya if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by) && 159770163ac5SPrakash Surya dr->dt.dl.dr_overridden_by.blk_birth == 0) 1598b50a0fe0SNeil Perrin BP_ZERO(&dr->dt.dl.dr_overridden_by); 1599b24ab676SJeff Bonwick } else { 1600b24ab676SJeff Bonwick dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 1601b24ab676SJeff Bonwick } 1602c5c6ffa0Smaybee cv_broadcast(&db->db_changed); 1603b50a0fe0SNeil Perrin mutex_exit(&db->db_mtx); 1604b50a0fe0SNeil Perrin 1605b24ab676SJeff Bonwick dsa->dsa_done(dsa->dsa_zgd, zio->io_error); 1606c717a561Smaybee 1607b24ab676SJeff Bonwick kmem_free(dsa, sizeof (*dsa)); 1608b24ab676SJeff Bonwick } 1609b24ab676SJeff Bonwick 1610b24ab676SJeff Bonwick static void 1611b24ab676SJeff Bonwick dmu_sync_late_arrival_done(zio_t *zio) 1612b24ab676SJeff Bonwick { 1613b24ab676SJeff Bonwick blkptr_t *bp = zio->io_bp; 1614b24ab676SJeff Bonwick dmu_sync_arg_t *dsa = zio->io_private; 161580901aeaSGeorge Wilson blkptr_t *bp_orig = &zio->io_bp_orig; 1616b24ab676SJeff Bonwick 1617b24ab676SJeff Bonwick if (zio->io_error == 0 && !BP_IS_HOLE(bp)) { 1618*b7edcb94SMatthew Ahrens ASSERT(!(zio->io_flags & ZIO_FLAG_NOPWRITE)); 161980901aeaSGeorge Wilson ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig)); 1620b24ab676SJeff Bonwick ASSERT(zio->io_bp->blk_birth == zio->io_txg); 1621b24ab676SJeff Bonwick ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa)); 1622b24ab676SJeff Bonwick zio_free(zio->io_spa, zio->io_txg, zio->io_bp); 1623b24ab676SJeff Bonwick } 1624b24ab676SJeff Bonwick 1625b24ab676SJeff Bonwick dmu_tx_commit(dsa->dsa_tx); 1626b24ab676SJeff Bonwick 1627b24ab676SJeff Bonwick dsa->dsa_done(dsa->dsa_zgd, zio->io_error); 1628b24ab676SJeff Bonwick 1629770499e1SDan Kimmel abd_put(zio->io_abd); 1630b24ab676SJeff Bonwick kmem_free(dsa, sizeof (*dsa)); 1631b24ab676SJeff Bonwick } 1632b24ab676SJeff Bonwick 1633b24ab676SJeff Bonwick static int 1634b24ab676SJeff Bonwick dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd, 16357802d7bfSMatthew Ahrens zio_prop_t *zp, zbookmark_phys_t *zb) 1636b24ab676SJeff Bonwick { 1637b24ab676SJeff Bonwick dmu_sync_arg_t *dsa; 1638b24ab676SJeff Bonwick dmu_tx_t *tx; 1639b24ab676SJeff Bonwick 1640b24ab676SJeff Bonwick tx = dmu_tx_create(os); 1641b24ab676SJeff Bonwick dmu_tx_hold_space(tx, zgd->zgd_db->db_size); 16426e1f5caaSNeil Perrin if (dmu_tx_assign(tx, TXG_WAIT) != 0) { 1643b24ab676SJeff Bonwick dmu_tx_abort(tx); 1644be6fd75aSMatthew Ahrens /* Make zl_get_data do txg_waited_synced() */ 1645be6fd75aSMatthew Ahrens return (SET_ERROR(EIO)); 1646b24ab676SJeff Bonwick } 1647b24ab676SJeff Bonwick 1648b24ab676SJeff Bonwick dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); 1649b24ab676SJeff Bonwick dsa->dsa_dr = NULL; 1650b24ab676SJeff Bonwick dsa->dsa_done = done; 1651b24ab676SJeff Bonwick dsa->dsa_zgd = zgd; 1652b24ab676SJeff Bonwick dsa->dsa_tx = tx; 1653b24ab676SJeff Bonwick 1654*b7edcb94SMatthew Ahrens /* 1655*b7edcb94SMatthew Ahrens * Since we are currently syncing this txg, it's nontrivial to 1656*b7edcb94SMatthew Ahrens * determine what BP to nopwrite against, so we disable nopwrite. 1657*b7edcb94SMatthew Ahrens * 1658*b7edcb94SMatthew Ahrens * When syncing, the db_blkptr is initially the BP of the previous 1659*b7edcb94SMatthew Ahrens * txg. We can not nopwrite against it because it will be changed 1660*b7edcb94SMatthew Ahrens * (this is similar to the non-late-arrival case where the dbuf is 1661*b7edcb94SMatthew Ahrens * dirty in a future txg). 1662*b7edcb94SMatthew Ahrens * 1663*b7edcb94SMatthew Ahrens * Then dbuf_write_ready() sets bp_blkptr to the location we will write. 1664*b7edcb94SMatthew Ahrens * We can not nopwrite against it because although the BP will not 1665*b7edcb94SMatthew Ahrens * (typically) be changed, the data has not yet been persisted to this 1666*b7edcb94SMatthew Ahrens * location. 1667*b7edcb94SMatthew Ahrens * 1668*b7edcb94SMatthew Ahrens * Finally, when dbuf_write_done() is called, it is theoretically 1669*b7edcb94SMatthew Ahrens * possible to always nopwrite, because the data that was written in 1670*b7edcb94SMatthew Ahrens * this txg is the same data that we are trying to write. However we 1671*b7edcb94SMatthew Ahrens * would need to check that this dbuf is not dirty in any future 1672*b7edcb94SMatthew Ahrens * txg's (as we do in the normal dmu_sync() path). For simplicity, we 1673*b7edcb94SMatthew Ahrens * don't nopwrite in this case. 1674*b7edcb94SMatthew Ahrens */ 1675*b7edcb94SMatthew Ahrens zp->zp_nopwrite = B_FALSE; 1676*b7edcb94SMatthew Ahrens 16775602294fSDan Kimmel zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp, 1678770499e1SDan Kimmel abd_get_from_buf(zgd->zgd_db->db_data, zgd->zgd_db->db_size), 1679770499e1SDan Kimmel zgd->zgd_db->db_size, zgd->zgd_db->db_size, zp, 1680770499e1SDan Kimmel dmu_sync_late_arrival_ready, NULL, NULL, dmu_sync_late_arrival_done, 1681770499e1SDan Kimmel dsa, ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, zb)); 1682b24ab676SJeff Bonwick 1683b24ab676SJeff Bonwick return (0); 1684c5c6ffa0Smaybee } 1685c5c6ffa0Smaybee 1686fa9e4066Sahrens /* 1687c5c6ffa0Smaybee * Intent log support: sync the block associated with db to disk. 1688c5c6ffa0Smaybee * N.B. and XXX: the caller is responsible for making sure that the 1689c5c6ffa0Smaybee * data isn't changing while dmu_sync() is writing it. 1690fa9e4066Sahrens * 1691fa9e4066Sahrens * Return values: 1692fa9e4066Sahrens * 169380901aeaSGeorge Wilson * EEXIST: this txg has already been synced, so there's nothing to do. 1694fa9e4066Sahrens * The caller should not log the write. 1695fa9e4066Sahrens * 1696fa9e4066Sahrens * ENOENT: the block was dbuf_free_range()'d, so there's nothing to do. 1697fa9e4066Sahrens * The caller should not log the write. 1698fa9e4066Sahrens * 1699c5c6ffa0Smaybee * EALREADY: this block is already in the process of being synced. 1700c5c6ffa0Smaybee * The caller should track its progress (somehow). 1701fa9e4066Sahrens * 1702b24ab676SJeff Bonwick * EIO: could not do the I/O. 1703b24ab676SJeff Bonwick * The caller should do a txg_wait_synced(). 1704fa9e4066Sahrens * 1705b24ab676SJeff Bonwick * 0: the I/O has been initiated. 1706b24ab676SJeff Bonwick * The caller should log this blkptr in the done callback. 1707b24ab676SJeff Bonwick * It is possible that the I/O will fail, in which case 1708b24ab676SJeff Bonwick * the error will be reported to the done callback and 1709b24ab676SJeff Bonwick * propagated to pio from zio_done(). 1710fa9e4066Sahrens */ 1711fa9e4066Sahrens int 1712b24ab676SJeff Bonwick dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd) 1713fa9e4066Sahrens { 1714b24ab676SJeff Bonwick dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db; 1715503ad85cSMatthew Ahrens objset_t *os = db->db_objset; 1716b24ab676SJeff Bonwick dsl_dataset_t *ds = os->os_dsl_dataset; 1717c717a561Smaybee dbuf_dirty_record_t *dr; 1718b24ab676SJeff Bonwick dmu_sync_arg_t *dsa; 17197802d7bfSMatthew Ahrens zbookmark_phys_t zb; 1720b24ab676SJeff Bonwick zio_prop_t zp; 1721744947dcSTom Erickson dnode_t *dn; 1722fa9e4066Sahrens 1723b24ab676SJeff Bonwick ASSERT(pio != NULL); 1724fa9e4066Sahrens ASSERT(txg != 0); 1725fa9e4066Sahrens 1726b24ab676SJeff Bonwick SET_BOOKMARK(&zb, ds->ds_object, 1727b24ab676SJeff Bonwick db->db.db_object, db->db_level, db->db_blkid); 1728b24ab676SJeff Bonwick 1729744947dcSTom Erickson DB_DNODE_ENTER(db); 1730744947dcSTom Erickson dn = DB_DNODE(db); 1731adaec86aSMatthew Ahrens dmu_write_policy(os, dn, db->db_level, WP_DMU_SYNC, &zp); 1732744947dcSTom Erickson DB_DNODE_EXIT(db); 1733fa9e4066Sahrens 1734fa9e4066Sahrens /* 1735b24ab676SJeff Bonwick * If we're frozen (running ziltest), we always need to generate a bp. 1736ea8dc4b6Seschrock */ 1737b24ab676SJeff Bonwick if (txg > spa_freeze_txg(os->os_spa)) 1738b24ab676SJeff Bonwick return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb)); 1739ea8dc4b6Seschrock 1740ea8dc4b6Seschrock /* 1741b24ab676SJeff Bonwick * Grabbing db_mtx now provides a barrier between dbuf_sync_leaf() 1742b24ab676SJeff Bonwick * and us. If we determine that this txg is not yet syncing, 1743b24ab676SJeff Bonwick * but it begins to sync a moment later, that's OK because the 1744b24ab676SJeff Bonwick * sync thread will block in dbuf_sync_leaf() until we drop db_mtx. 1745fa9e4066Sahrens */ 1746b24ab676SJeff Bonwick mutex_enter(&db->db_mtx); 1747b24ab676SJeff Bonwick 1748b24ab676SJeff Bonwick if (txg <= spa_last_synced_txg(os->os_spa)) { 1749fa9e4066Sahrens /* 1750b24ab676SJeff Bonwick * This txg has already synced. There's nothing to do. 1751fa9e4066Sahrens */ 1752b24ab676SJeff Bonwick mutex_exit(&db->db_mtx); 1753be6fd75aSMatthew Ahrens return (SET_ERROR(EEXIST)); 1754fa9e4066Sahrens } 1755fa9e4066Sahrens 1756b24ab676SJeff Bonwick if (txg <= spa_syncing_txg(os->os_spa)) { 1757c5c6ffa0Smaybee /* 1758b24ab676SJeff Bonwick * This txg is currently syncing, so we can't mess with 1759b24ab676SJeff Bonwick * the dirty record anymore; just write a new log block. 1760c5c6ffa0Smaybee */ 176113506d1eSmaybee mutex_exit(&db->db_mtx); 1762b24ab676SJeff Bonwick return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb)); 1763c5c6ffa0Smaybee } 1764c5c6ffa0Smaybee 1765c717a561Smaybee dr = db->db_last_dirty; 1766b24ab676SJeff Bonwick while (dr && dr->dr_txg != txg) 1767c717a561Smaybee dr = dr->dr_next; 1768b24ab676SJeff Bonwick 1769b24ab676SJeff Bonwick if (dr == NULL) { 1770c5c6ffa0Smaybee /* 1771b24ab676SJeff Bonwick * There's no dr for this dbuf, so it must have been freed. 1772fa9e4066Sahrens * There's no need to log writes to freed blocks, so we're done. 1773fa9e4066Sahrens */ 1774fa9e4066Sahrens mutex_exit(&db->db_mtx); 1775be6fd75aSMatthew Ahrens return (SET_ERROR(ENOENT)); 1776fa9e4066Sahrens } 1777fa9e4066Sahrens 177880901aeaSGeorge Wilson ASSERT(dr->dr_next == NULL || dr->dr_next->dr_txg < txg); 177980901aeaSGeorge Wilson 1780*b7edcb94SMatthew Ahrens if (db->db_blkptr != NULL) { 1781*b7edcb94SMatthew Ahrens /* 1782*b7edcb94SMatthew Ahrens * We need to fill in zgd_bp with the current blkptr so that 1783*b7edcb94SMatthew Ahrens * the nopwrite code can check if we're writing the same 1784*b7edcb94SMatthew Ahrens * data that's already on disk. We can only nopwrite if we 1785*b7edcb94SMatthew Ahrens * are sure that after making the copy, db_blkptr will not 1786*b7edcb94SMatthew Ahrens * change until our i/o completes. We ensure this by 1787*b7edcb94SMatthew Ahrens * holding the db_mtx, and only allowing nopwrite if the 1788*b7edcb94SMatthew Ahrens * block is not already dirty (see below). This is verified 1789*b7edcb94SMatthew Ahrens * by dmu_sync_done(), which VERIFYs that the db_blkptr has 1790*b7edcb94SMatthew Ahrens * not changed. 1791*b7edcb94SMatthew Ahrens */ 1792*b7edcb94SMatthew Ahrens *zgd->zgd_bp = *db->db_blkptr; 1793*b7edcb94SMatthew Ahrens } 1794*b7edcb94SMatthew Ahrens 179580901aeaSGeorge Wilson /* 179634e8acefSMatthew Ahrens * Assume the on-disk data is X, the current syncing data (in 179734e8acefSMatthew Ahrens * txg - 1) is Y, and the current in-memory data is Z (currently 179834e8acefSMatthew Ahrens * in dmu_sync). 179934e8acefSMatthew Ahrens * 180034e8acefSMatthew Ahrens * We usually want to perform a nopwrite if X and Z are the 180134e8acefSMatthew Ahrens * same. However, if Y is different (i.e. the BP is going to 180234e8acefSMatthew Ahrens * change before this write takes effect), then a nopwrite will 180334e8acefSMatthew Ahrens * be incorrect - we would override with X, which could have 180434e8acefSMatthew Ahrens * been freed when Y was written. 180534e8acefSMatthew Ahrens * 180634e8acefSMatthew Ahrens * (Note that this is not a concern when we are nop-writing from 180734e8acefSMatthew Ahrens * syncing context, because X and Y must be identical, because 180834e8acefSMatthew Ahrens * all previous txgs have been synced.) 180934e8acefSMatthew Ahrens * 181034e8acefSMatthew Ahrens * Therefore, we disable nopwrite if the current BP could change 181134e8acefSMatthew Ahrens * before this TXG. There are two ways it could change: by 181234e8acefSMatthew Ahrens * being dirty (dr_next is non-NULL), or by being freed 181334e8acefSMatthew Ahrens * (dnode_block_freed()). This behavior is verified by 181434e8acefSMatthew Ahrens * zio_done(), which VERIFYs that the override BP is identical 181534e8acefSMatthew Ahrens * to the on-disk BP. 181680901aeaSGeorge Wilson */ 181734e8acefSMatthew Ahrens DB_DNODE_ENTER(db); 181834e8acefSMatthew Ahrens dn = DB_DNODE(db); 181934e8acefSMatthew Ahrens if (dr->dr_next != NULL || dnode_block_freed(dn, db->db_blkid)) 182080901aeaSGeorge Wilson zp.zp_nopwrite = B_FALSE; 182134e8acefSMatthew Ahrens DB_DNODE_EXIT(db); 182280901aeaSGeorge Wilson 1823c717a561Smaybee ASSERT(dr->dr_txg == txg); 1824b24ab676SJeff Bonwick if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC || 1825b24ab676SJeff Bonwick dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 1826c5c6ffa0Smaybee /* 1827b24ab676SJeff Bonwick * We have already issued a sync write for this buffer, 1828b24ab676SJeff Bonwick * or this buffer has already been synced. It could not 1829c717a561Smaybee * have been dirtied since, or we would have cleared the state. 1830c717a561Smaybee */ 1831c717a561Smaybee mutex_exit(&db->db_mtx); 1832be6fd75aSMatthew Ahrens return (SET_ERROR(EALREADY)); 1833c717a561Smaybee } 1834c717a561Smaybee 1835b24ab676SJeff Bonwick ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 1836c717a561Smaybee dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC; 1837fa9e4066Sahrens mutex_exit(&db->db_mtx); 1838fa9e4066Sahrens 1839b24ab676SJeff Bonwick dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); 1840b24ab676SJeff Bonwick dsa->dsa_dr = dr; 1841b24ab676SJeff Bonwick dsa->dsa_done = done; 1842b24ab676SJeff Bonwick dsa->dsa_zgd = zgd; 1843b24ab676SJeff Bonwick dsa->dsa_tx = NULL; 1844e14bb325SJeff Bonwick 1845b24ab676SJeff Bonwick zio_nowait(arc_write(pio, os->os_spa, txg, 1846*b7edcb94SMatthew Ahrens zgd->zgd_bp, dr->dt.dl.dr_data, DBUF_IS_L2CACHEABLE(db), 1847dcbf3bd6SGeorge Wilson &zp, dmu_sync_ready, NULL, NULL, dmu_sync_done, dsa, 18488df0bcf0SPaul Dagnelie ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb)); 1849e14bb325SJeff Bonwick 1850b24ab676SJeff Bonwick return (0); 1851fa9e4066Sahrens } 1852fa9e4066Sahrens 1853fa9e4066Sahrens int 1854fa9e4066Sahrens dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs, 1855fa9e4066Sahrens dmu_tx_t *tx) 1856fa9e4066Sahrens { 1857ea8dc4b6Seschrock dnode_t *dn; 1858ea8dc4b6Seschrock int err; 1859ea8dc4b6Seschrock 1860503ad85cSMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 1861ea8dc4b6Seschrock if (err) 1862ea8dc4b6Seschrock return (err); 1863ea8dc4b6Seschrock err = dnode_set_blksz(dn, size, ibs, tx); 1864fa9e4066Sahrens dnode_rele(dn, FTAG); 1865fa9e4066Sahrens return (err); 1866fa9e4066Sahrens } 1867fa9e4066Sahrens 1868fa9e4066Sahrens void 1869fa9e4066Sahrens dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum, 1870fa9e4066Sahrens dmu_tx_t *tx) 1871fa9e4066Sahrens { 1872ea8dc4b6Seschrock dnode_t *dn; 1873ea8dc4b6Seschrock 18745d7b4d43SMatthew Ahrens /* 18755d7b4d43SMatthew Ahrens * Send streams include each object's checksum function. This 18765d7b4d43SMatthew Ahrens * check ensures that the receiving system can understand the 18775d7b4d43SMatthew Ahrens * checksum function transmitted. 18785d7b4d43SMatthew Ahrens */ 18795d7b4d43SMatthew Ahrens ASSERT3U(checksum, <, ZIO_CHECKSUM_LEGACY_FUNCTIONS); 18805d7b4d43SMatthew Ahrens 18815d7b4d43SMatthew Ahrens VERIFY0(dnode_hold(os, object, FTAG, &dn)); 18825d7b4d43SMatthew Ahrens ASSERT3U(checksum, <, ZIO_CHECKSUM_FUNCTIONS); 1883fa9e4066Sahrens dn->dn_checksum = checksum; 1884fa9e4066Sahrens dnode_setdirty(dn, tx); 1885fa9e4066Sahrens dnode_rele(dn, FTAG); 1886fa9e4066Sahrens } 1887fa9e4066Sahrens 1888fa9e4066Sahrens void 1889fa9e4066Sahrens dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress, 1890fa9e4066Sahrens dmu_tx_t *tx) 1891fa9e4066Sahrens { 1892ea8dc4b6Seschrock dnode_t *dn; 1893ea8dc4b6Seschrock 18945d7b4d43SMatthew Ahrens /* 18955d7b4d43SMatthew Ahrens * Send streams include each object's compression function. This 18965d7b4d43SMatthew Ahrens * check ensures that the receiving system can understand the 18975d7b4d43SMatthew Ahrens * compression function transmitted. 18985d7b4d43SMatthew Ahrens */ 18995d7b4d43SMatthew Ahrens ASSERT3U(compress, <, ZIO_COMPRESS_LEGACY_FUNCTIONS); 19005d7b4d43SMatthew Ahrens 19015d7b4d43SMatthew Ahrens VERIFY0(dnode_hold(os, object, FTAG, &dn)); 1902fa9e4066Sahrens dn->dn_compress = compress; 1903fa9e4066Sahrens dnode_setdirty(dn, tx); 1904fa9e4066Sahrens dnode_rele(dn, FTAG); 1905fa9e4066Sahrens } 1906fa9e4066Sahrens 1907b24ab676SJeff Bonwick int zfs_mdcomp_disable = 0; 1908b24ab676SJeff Bonwick 1909edf345e6SMatthew Ahrens /* 1910edf345e6SMatthew Ahrens * When the "redundant_metadata" property is set to "most", only indirect 1911edf345e6SMatthew Ahrens * blocks of this level and higher will have an additional ditto block. 1912edf345e6SMatthew Ahrens */ 1913edf345e6SMatthew Ahrens int zfs_redundant_metadata_most_ditto_level = 2; 1914edf345e6SMatthew Ahrens 1915b24ab676SJeff Bonwick void 1916adaec86aSMatthew Ahrens dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp) 1917b24ab676SJeff Bonwick { 1918b24ab676SJeff Bonwick dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET; 1919ad135b5dSChristopher Siden boolean_t ismd = (level > 0 || DMU_OT_IS_METADATA(type) || 19201d8ccc7bSMark Shellenbaum (wp & WP_SPILL)); 1921b24ab676SJeff Bonwick enum zio_checksum checksum = os->os_checksum; 1922b24ab676SJeff Bonwick enum zio_compress compress = os->os_compress; 1923b24ab676SJeff Bonwick enum zio_checksum dedup_checksum = os->os_dedup_checksum; 19247540df39SGeorge Wilson boolean_t dedup = B_FALSE; 19257540df39SGeorge Wilson boolean_t nopwrite = B_FALSE; 1926b24ab676SJeff Bonwick boolean_t dedup_verify = os->os_dedup_verify; 1927b24ab676SJeff Bonwick int copies = os->os_copies; 1928b24ab676SJeff Bonwick 1929b24ab676SJeff Bonwick /* 193080901aeaSGeorge Wilson * We maintain different write policies for each of the following 193180901aeaSGeorge Wilson * types of data: 193280901aeaSGeorge Wilson * 1. metadata 193380901aeaSGeorge Wilson * 2. preallocated blocks (i.e. level-0 blocks of a dump device) 193480901aeaSGeorge Wilson * 3. all other level 0 blocks 1935b24ab676SJeff Bonwick */ 1936b24ab676SJeff Bonwick if (ismd) { 1937db1741f5SJustin T. Gibbs if (zfs_mdcomp_disable) { 1938db1741f5SJustin T. Gibbs compress = ZIO_COMPRESS_EMPTY; 1939db1741f5SJustin T. Gibbs } else { 1940b24ab676SJeff Bonwick /* 194180901aeaSGeorge Wilson * XXX -- we should design a compression algorithm 194280901aeaSGeorge Wilson * that specializes in arrays of bps. 194380901aeaSGeorge Wilson */ 1944db1741f5SJustin T. Gibbs compress = zio_compress_select(os->os_spa, 1945db1741f5SJustin T. Gibbs ZIO_COMPRESS_ON, ZIO_COMPRESS_ON); 1946b8289d24SDaniil Lunev } 194780901aeaSGeorge Wilson 194880901aeaSGeorge Wilson /* 1949b24ab676SJeff Bonwick * Metadata always gets checksummed. If the data 1950b24ab676SJeff Bonwick * checksum is multi-bit correctable, and it's not a 1951b24ab676SJeff Bonwick * ZBT-style checksum, then it's suitable for metadata 1952b24ab676SJeff Bonwick * as well. Otherwise, the metadata checksum defaults 1953b24ab676SJeff Bonwick * to fletcher4. 1954b24ab676SJeff Bonwick */ 195545818ee1SMatthew Ahrens if (!(zio_checksum_table[checksum].ci_flags & 195645818ee1SMatthew Ahrens ZCHECKSUM_FLAG_METADATA) || 195745818ee1SMatthew Ahrens (zio_checksum_table[checksum].ci_flags & 195845818ee1SMatthew Ahrens ZCHECKSUM_FLAG_EMBEDDED)) 1959b24ab676SJeff Bonwick checksum = ZIO_CHECKSUM_FLETCHER_4; 1960edf345e6SMatthew Ahrens 1961edf345e6SMatthew Ahrens if (os->os_redundant_metadata == ZFS_REDUNDANT_METADATA_ALL || 1962edf345e6SMatthew Ahrens (os->os_redundant_metadata == 1963edf345e6SMatthew Ahrens ZFS_REDUNDANT_METADATA_MOST && 1964edf345e6SMatthew Ahrens (level >= zfs_redundant_metadata_most_ditto_level || 1965edf345e6SMatthew Ahrens DMU_OT_IS_METADATA(type) || (wp & WP_SPILL)))) 1966edf345e6SMatthew Ahrens copies++; 196780901aeaSGeorge Wilson } else if (wp & WP_NOFILL) { 196880901aeaSGeorge Wilson ASSERT(level == 0); 1969b24ab676SJeff Bonwick 1970b24ab676SJeff Bonwick /* 197180901aeaSGeorge Wilson * If we're writing preallocated blocks, we aren't actually 197280901aeaSGeorge Wilson * writing them so don't set any policy properties. These 197380901aeaSGeorge Wilson * blocks are currently only used by an external subsystem 197480901aeaSGeorge Wilson * outside of zfs (i.e. dump) and not written by the zio 197580901aeaSGeorge Wilson * pipeline. 1976b24ab676SJeff Bonwick */ 197780901aeaSGeorge Wilson compress = ZIO_COMPRESS_OFF; 1978810e43b2SBill Pijewski checksum = ZIO_CHECKSUM_NOPARITY; 1979b24ab676SJeff Bonwick } else { 1980db1741f5SJustin T. Gibbs compress = zio_compress_select(os->os_spa, dn->dn_compress, 1981db1741f5SJustin T. Gibbs compress); 198280901aeaSGeorge Wilson 198380901aeaSGeorge Wilson checksum = (dedup_checksum == ZIO_CHECKSUM_OFF) ? 198480901aeaSGeorge Wilson zio_checksum_select(dn->dn_checksum, checksum) : 198580901aeaSGeorge Wilson dedup_checksum; 198680901aeaSGeorge Wilson 198780901aeaSGeorge Wilson /* 198880901aeaSGeorge Wilson * Determine dedup setting. If we are in dmu_sync(), 198980901aeaSGeorge Wilson * we won't actually dedup now because that's all 199080901aeaSGeorge Wilson * done in syncing context; but we do want to use the 199180901aeaSGeorge Wilson * dedup checkum. If the checksum is not strong 199280901aeaSGeorge Wilson * enough to ensure unique signatures, force 199380901aeaSGeorge Wilson * dedup_verify. 199480901aeaSGeorge Wilson */ 199580901aeaSGeorge Wilson if (dedup_checksum != ZIO_CHECKSUM_OFF) { 199680901aeaSGeorge Wilson dedup = (wp & WP_DMU_SYNC) ? B_FALSE : B_TRUE; 199745818ee1SMatthew Ahrens if (!(zio_checksum_table[checksum].ci_flags & 199845818ee1SMatthew Ahrens ZCHECKSUM_FLAG_DEDUP)) 199980901aeaSGeorge Wilson dedup_verify = B_TRUE; 2000b24ab676SJeff Bonwick } 2001b24ab676SJeff Bonwick 2002b24ab676SJeff Bonwick /* 200345818ee1SMatthew Ahrens * Enable nopwrite if we have secure enough checksum 200445818ee1SMatthew Ahrens * algorithm (see comment in zio_nop_write) and 200545818ee1SMatthew Ahrens * compression is enabled. We don't enable nopwrite if 200645818ee1SMatthew Ahrens * dedup is enabled as the two features are mutually 200745818ee1SMatthew Ahrens * exclusive. 2008b24ab676SJeff Bonwick */ 200945818ee1SMatthew Ahrens nopwrite = (!dedup && (zio_checksum_table[checksum].ci_flags & 201045818ee1SMatthew Ahrens ZCHECKSUM_FLAG_NOPWRITE) && 201180901aeaSGeorge Wilson compress != ZIO_COMPRESS_OFF && zfs_nopwrite_enabled); 2012b24ab676SJeff Bonwick } 2013b24ab676SJeff Bonwick 2014b24ab676SJeff Bonwick zp->zp_checksum = checksum; 2015adaec86aSMatthew Ahrens zp->zp_compress = compress; 20165602294fSDan Kimmel ASSERT3U(zp->zp_compress, !=, ZIO_COMPRESS_INHERIT); 20175602294fSDan Kimmel 20180a586ceaSMark Shellenbaum zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type; 2019b24ab676SJeff Bonwick zp->zp_level = level; 2020edf345e6SMatthew Ahrens zp->zp_copies = MIN(copies, spa_max_replication(os->os_spa)); 2021b24ab676SJeff Bonwick zp->zp_dedup = dedup; 2022b24ab676SJeff Bonwick zp->zp_dedup_verify = dedup && dedup_verify; 202380901aeaSGeorge Wilson zp->zp_nopwrite = nopwrite; 2024b24ab676SJeff Bonwick } 2025b24ab676SJeff Bonwick 202644cd46caSbillm int 2027fa9e4066Sahrens dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off) 2028fa9e4066Sahrens { 2029fa9e4066Sahrens dnode_t *dn; 20302bcf0248SMax Grossman int err; 2031fa9e4066Sahrens 2032fa9e4066Sahrens /* 2033fa9e4066Sahrens * Sync any current changes before 2034fa9e4066Sahrens * we go trundling through the block pointers. 2035fa9e4066Sahrens */ 20362bcf0248SMax Grossman err = dmu_object_wait_synced(os, object); 20372bcf0248SMax Grossman if (err) { 20382bcf0248SMax Grossman return (err); 2039fa9e4066Sahrens } 20402bcf0248SMax Grossman 2041503ad85cSMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 20422bcf0248SMax Grossman if (err) { 2043ea8dc4b6Seschrock return (err); 2044fa9e4066Sahrens } 2045fa9e4066Sahrens 2046cdb0ab79Smaybee err = dnode_next_offset(dn, (hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0); 2047fa9e4066Sahrens dnode_rele(dn, FTAG); 2048fa9e4066Sahrens 2049fa9e4066Sahrens return (err); 2050fa9e4066Sahrens } 2051fa9e4066Sahrens 20522bcf0248SMax Grossman /* 20532bcf0248SMax Grossman * Given the ZFS object, if it contains any dirty nodes 20542bcf0248SMax Grossman * this function flushes all dirty blocks to disk. This 20552bcf0248SMax Grossman * ensures the DMU object info is updated. A more efficient 20562bcf0248SMax Grossman * future version might just find the TXG with the maximum 20572bcf0248SMax Grossman * ID and wait for that to be synced. 20582bcf0248SMax Grossman */ 20592bcf0248SMax Grossman int 20609a686fbcSPaul Dagnelie dmu_object_wait_synced(objset_t *os, uint64_t object) 20619a686fbcSPaul Dagnelie { 20622bcf0248SMax Grossman dnode_t *dn; 20632bcf0248SMax Grossman int error, i; 20642bcf0248SMax Grossman 20652bcf0248SMax Grossman error = dnode_hold(os, object, FTAG, &dn); 20662bcf0248SMax Grossman if (error) { 20672bcf0248SMax Grossman return (error); 20682bcf0248SMax Grossman } 20692bcf0248SMax Grossman 20702bcf0248SMax Grossman for (i = 0; i < TXG_SIZE; i++) { 20712bcf0248SMax Grossman if (list_link_active(&dn->dn_dirty_link[i])) { 20722bcf0248SMax Grossman break; 20732bcf0248SMax Grossman } 20742bcf0248SMax Grossman } 20752bcf0248SMax Grossman dnode_rele(dn, FTAG); 20762bcf0248SMax Grossman if (i != TXG_SIZE) { 20772bcf0248SMax Grossman txg_wait_synced(dmu_objset_pool(os), 0); 20782bcf0248SMax Grossman } 20792bcf0248SMax Grossman 20802bcf0248SMax Grossman return (0); 20812bcf0248SMax Grossman } 20822bcf0248SMax Grossman 2083fa9e4066Sahrens void 2084fa9e4066Sahrens dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi) 2085fa9e4066Sahrens { 2086b24ab676SJeff Bonwick dnode_phys_t *dnp; 2087b24ab676SJeff Bonwick 2088fa9e4066Sahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 2089fa9e4066Sahrens mutex_enter(&dn->dn_mtx); 2090fa9e4066Sahrens 2091b24ab676SJeff Bonwick dnp = dn->dn_phys; 2092b24ab676SJeff Bonwick 2093fa9e4066Sahrens doi->doi_data_block_size = dn->dn_datablksz; 2094fa9e4066Sahrens doi->doi_metadata_block_size = dn->dn_indblkshift ? 2095fa9e4066Sahrens 1ULL << dn->dn_indblkshift : 0; 2096b24ab676SJeff Bonwick doi->doi_type = dn->dn_type; 2097b24ab676SJeff Bonwick doi->doi_bonus_type = dn->dn_bonustype; 2098b24ab676SJeff Bonwick doi->doi_bonus_size = dn->dn_bonuslen; 2099fa9e4066Sahrens doi->doi_indirection = dn->dn_nlevels; 2100fa9e4066Sahrens doi->doi_checksum = dn->dn_checksum; 2101fa9e4066Sahrens doi->doi_compress = dn->dn_compress; 2102e77d42eaSMatthew Ahrens doi->doi_nblkptr = dn->dn_nblkptr; 2103b24ab676SJeff Bonwick doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9; 2104d0475637SMatthew Ahrens doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz; 2105b24ab676SJeff Bonwick doi->doi_fill_count = 0; 2106b24ab676SJeff Bonwick for (int i = 0; i < dnp->dn_nblkptr; i++) 21075d7b4d43SMatthew Ahrens doi->doi_fill_count += BP_GET_FILL(&dnp->dn_blkptr[i]); 2108fa9e4066Sahrens 2109fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 2110fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 2111fa9e4066Sahrens } 2112fa9e4066Sahrens 2113fa9e4066Sahrens /* 2114fa9e4066Sahrens * Get information on a DMU object. 2115fa9e4066Sahrens * If doi is NULL, just indicates whether the object exists. 2116fa9e4066Sahrens */ 2117fa9e4066Sahrens int 2118fa9e4066Sahrens dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi) 2119fa9e4066Sahrens { 2120ea8dc4b6Seschrock dnode_t *dn; 2121503ad85cSMatthew Ahrens int err = dnode_hold(os, object, FTAG, &dn); 2122fa9e4066Sahrens 2123ea8dc4b6Seschrock if (err) 2124ea8dc4b6Seschrock return (err); 2125fa9e4066Sahrens 2126fa9e4066Sahrens if (doi != NULL) 2127fa9e4066Sahrens dmu_object_info_from_dnode(dn, doi); 2128fa9e4066Sahrens 2129fa9e4066Sahrens dnode_rele(dn, FTAG); 2130fa9e4066Sahrens return (0); 2131fa9e4066Sahrens } 2132fa9e4066Sahrens 2133fa9e4066Sahrens /* 2134fa9e4066Sahrens * As above, but faster; can be used when you have a held dbuf in hand. 2135fa9e4066Sahrens */ 2136fa9e4066Sahrens void 2137744947dcSTom Erickson dmu_object_info_from_db(dmu_buf_t *db_fake, dmu_object_info_t *doi) 2138fa9e4066Sahrens { 2139744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2140744947dcSTom Erickson 2141744947dcSTom Erickson DB_DNODE_ENTER(db); 2142744947dcSTom Erickson dmu_object_info_from_dnode(DB_DNODE(db), doi); 2143744947dcSTom Erickson DB_DNODE_EXIT(db); 2144fa9e4066Sahrens } 2145fa9e4066Sahrens 2146fa9e4066Sahrens /* 2147fa9e4066Sahrens * Faster still when you only care about the size. 2148fa9e4066Sahrens * This is specifically optimized for zfs_getattr(). 2149fa9e4066Sahrens */ 2150fa9e4066Sahrens void 2151744947dcSTom Erickson dmu_object_size_from_db(dmu_buf_t *db_fake, uint32_t *blksize, 2152744947dcSTom Erickson u_longlong_t *nblk512) 2153fa9e4066Sahrens { 2154744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2155744947dcSTom Erickson dnode_t *dn; 2156744947dcSTom Erickson 2157744947dcSTom Erickson DB_DNODE_ENTER(db); 2158744947dcSTom Erickson dn = DB_DNODE(db); 2159fa9e4066Sahrens 2160fa9e4066Sahrens *blksize = dn->dn_datablksz; 216199653d4eSeschrock /* add 1 for dnode space */ 216299653d4eSeschrock *nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >> 216399653d4eSeschrock SPA_MINBLOCKSHIFT) + 1; 2164744947dcSTom Erickson DB_DNODE_EXIT(db); 2165fa9e4066Sahrens } 2166fa9e4066Sahrens 2167fa9e4066Sahrens void 2168fa9e4066Sahrens byteswap_uint64_array(void *vbuf, size_t size) 2169fa9e4066Sahrens { 2170fa9e4066Sahrens uint64_t *buf = vbuf; 2171fa9e4066Sahrens size_t count = size >> 3; 2172fa9e4066Sahrens int i; 2173fa9e4066Sahrens 2174fa9e4066Sahrens ASSERT((size & 7) == 0); 2175fa9e4066Sahrens 2176fa9e4066Sahrens for (i = 0; i < count; i++) 2177fa9e4066Sahrens buf[i] = BSWAP_64(buf[i]); 2178fa9e4066Sahrens } 2179fa9e4066Sahrens 2180fa9e4066Sahrens void 2181fa9e4066Sahrens byteswap_uint32_array(void *vbuf, size_t size) 2182fa9e4066Sahrens { 2183fa9e4066Sahrens uint32_t *buf = vbuf; 2184fa9e4066Sahrens size_t count = size >> 2; 2185fa9e4066Sahrens int i; 2186fa9e4066Sahrens 2187fa9e4066Sahrens ASSERT((size & 3) == 0); 2188fa9e4066Sahrens 2189fa9e4066Sahrens for (i = 0; i < count; i++) 2190fa9e4066Sahrens buf[i] = BSWAP_32(buf[i]); 2191fa9e4066Sahrens } 2192fa9e4066Sahrens 2193fa9e4066Sahrens void 2194fa9e4066Sahrens byteswap_uint16_array(void *vbuf, size_t size) 2195fa9e4066Sahrens { 2196fa9e4066Sahrens uint16_t *buf = vbuf; 2197fa9e4066Sahrens size_t count = size >> 1; 2198fa9e4066Sahrens int i; 2199fa9e4066Sahrens 2200fa9e4066Sahrens ASSERT((size & 1) == 0); 2201fa9e4066Sahrens 2202fa9e4066Sahrens for (i = 0; i < count; i++) 2203fa9e4066Sahrens buf[i] = BSWAP_16(buf[i]); 2204fa9e4066Sahrens } 2205fa9e4066Sahrens 2206fa9e4066Sahrens /* ARGSUSED */ 2207fa9e4066Sahrens void 2208fa9e4066Sahrens byteswap_uint8_array(void *vbuf, size_t size) 2209fa9e4066Sahrens { 2210fa9e4066Sahrens } 2211fa9e4066Sahrens 2212fa9e4066Sahrens void 2213fa9e4066Sahrens dmu_init(void) 2214fa9e4066Sahrens { 2215770499e1SDan Kimmel abd_init(); 22163f9d6ad7SLin Ling zfs_dbgmsg_init(); 2217744947dcSTom Erickson sa_cache_init(); 2218744947dcSTom Erickson xuio_stat_init(); 2219744947dcSTom Erickson dmu_objset_init(); 2220fa9e4066Sahrens dnode_init(); 22217cbf8b43SRich Morris zfetch_init(); 2222fa94a07fSbrendan l2arc_init(); 2223ce636f8bSMatthew Ahrens arc_init(); 2224dcbf3bd6SGeorge Wilson dbuf_init(); 2225fa9e4066Sahrens } 2226fa9e4066Sahrens 2227fa9e4066Sahrens void 2228fa9e4066Sahrens dmu_fini(void) 2229fa9e4066Sahrens { 22303e30c24aSWill Andrews arc_fini(); /* arc depends on l2arc, so arc must go first */ 2231ce636f8bSMatthew Ahrens l2arc_fini(); 22327cbf8b43SRich Morris zfetch_fini(); 2233fa9e4066Sahrens dbuf_fini(); 2234744947dcSTom Erickson dnode_fini(); 2235744947dcSTom Erickson dmu_objset_fini(); 2236c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_stat_fini(); 22370a586ceaSMark Shellenbaum sa_cache_fini(); 22383f9d6ad7SLin Ling zfs_dbgmsg_fini(); 2239770499e1SDan Kimmel abd_fini(); 2240fa9e4066Sahrens } 2241