1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5ea8dc4b6Seschrock * Common Development and Distribution License (the "License"). 6ea8dc4b6Seschrock * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 2294d1a210STim Haley * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23adaec86aSMatthew Ahrens * Copyright (c) 2011, 2017 by Delphix. All rights reserved. 24fa9e4066Sahrens */ 25aad02571SSaso Kiselkov /* Copyright (c) 2013 by Saso Kiselkov. All rights reserved. */ 26810e43b2SBill Pijewski /* Copyright (c) 2013, Joyent, Inc. All rights reserved. */ 27eb721827SAlek Pinchuk /* Copyright 2016 Nexenta Systems, Inc. All rights reserved. */ 28aad02571SSaso Kiselkov 29fa9e4066Sahrens #include <sys/dmu.h> 30fa9e4066Sahrens #include <sys/dmu_impl.h> 31fa9e4066Sahrens #include <sys/dmu_tx.h> 32fa9e4066Sahrens #include <sys/dbuf.h> 33fa9e4066Sahrens #include <sys/dnode.h> 34fa9e4066Sahrens #include <sys/zfs_context.h> 35fa9e4066Sahrens #include <sys/dmu_objset.h> 36fa9e4066Sahrens #include <sys/dmu_traverse.h> 37fa9e4066Sahrens #include <sys/dsl_dataset.h> 38fa9e4066Sahrens #include <sys/dsl_dir.h> 39fa9e4066Sahrens #include <sys/dsl_pool.h> 401d452cf5Sahrens #include <sys/dsl_synctask.h> 41a2eea2e1Sahrens #include <sys/dsl_prop.h> 42fa9e4066Sahrens #include <sys/dmu_zfetch.h> 43fa9e4066Sahrens #include <sys/zfs_ioctl.h> 44fa9e4066Sahrens #include <sys/zap.h> 45ea8dc4b6Seschrock #include <sys/zio_checksum.h> 4680901aeaSGeorge Wilson #include <sys/zio_compress.h> 470a586ceaSMark Shellenbaum #include <sys/sa.h> 48b8289d24SDaniil Lunev #include <sys/zfeature.h> 49770499e1SDan Kimmel #include <sys/abd.h> 5044eda4d7Smaybee #ifdef _KERNEL 5144eda4d7Smaybee #include <sys/vmsystm.h> 520fab61baSJonathan W Adams #include <sys/zfs_znode.h> 5344eda4d7Smaybee #endif 54fa9e4066Sahrens 5580901aeaSGeorge Wilson /* 5680901aeaSGeorge Wilson * Enable/disable nopwrite feature. 5780901aeaSGeorge Wilson */ 5880901aeaSGeorge Wilson int zfs_nopwrite_enabled = 1; 5980901aeaSGeorge Wilson 60ff5177eeSAlek Pinchuk /* 61ff5177eeSAlek Pinchuk * Tunable to control percentage of dirtied blocks from frees in one TXG. 62ff5177eeSAlek Pinchuk * After this threshold is crossed, additional dirty blocks from frees 63ff5177eeSAlek Pinchuk * wait until the next TXG. 64ff5177eeSAlek Pinchuk * A value of zero will disable this throttle. 65ff5177eeSAlek Pinchuk */ 66ff5177eeSAlek Pinchuk uint32_t zfs_per_txg_dirty_frees_percent = 30; 67ff5177eeSAlek Pinchuk 685cabbc6bSPrashanth Sreenivasa /* 695cabbc6bSPrashanth Sreenivasa * This can be used for testing, to ensure that certain actions happen 705cabbc6bSPrashanth Sreenivasa * while in the middle of a remap (which might otherwise complete too 715cabbc6bSPrashanth Sreenivasa * quickly). 725cabbc6bSPrashanth Sreenivasa */ 735cabbc6bSPrashanth Sreenivasa int zfs_object_remap_one_indirect_delay_ticks = 0; 745cabbc6bSPrashanth Sreenivasa 75fa9e4066Sahrens const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = { 76adb52d92SMatthew Ahrens { DMU_BSWAP_UINT8, TRUE, FALSE, "unallocated" }, 77adb52d92SMatthew Ahrens { DMU_BSWAP_ZAP, TRUE, TRUE, "object directory" }, 78adb52d92SMatthew Ahrens { DMU_BSWAP_UINT64, TRUE, TRUE, "object array" }, 79adb52d92SMatthew Ahrens { DMU_BSWAP_UINT8, TRUE, FALSE, "packed nvlist" }, 80adb52d92SMatthew Ahrens { DMU_BSWAP_UINT64, TRUE, FALSE, "packed nvlist size" }, 81adb52d92SMatthew Ahrens { DMU_BSWAP_UINT64, TRUE, FALSE, "bpobj" }, 82adb52d92SMatthew Ahrens { DMU_BSWAP_UINT64, TRUE, FALSE, "bpobj header" }, 83adb52d92SMatthew Ahrens { DMU_BSWAP_UINT64, TRUE, FALSE, "SPA space map header" }, 84adb52d92SMatthew Ahrens { DMU_BSWAP_UINT64, TRUE, FALSE, "SPA space map" }, 85adb52d92SMatthew Ahrens { DMU_BSWAP_UINT64, TRUE, FALSE, "ZIL intent log" }, 86adb52d92SMatthew Ahrens { DMU_BSWAP_DNODE, TRUE, FALSE, "DMU dnode" }, 87adb52d92SMatthew Ahrens { DMU_BSWAP_OBJSET, TRUE, TRUE, "DMU objset" }, 88adb52d92SMatthew Ahrens { DMU_BSWAP_UINT64, TRUE, TRUE, "DSL directory" }, 89adb52d92SMatthew Ahrens { DMU_BSWAP_ZAP, TRUE, TRUE, "DSL directory child map" }, 90adb52d92SMatthew Ahrens { DMU_BSWAP_ZAP, TRUE, TRUE, "DSL dataset snap map" }, 91adb52d92SMatthew Ahrens { DMU_BSWAP_ZAP, TRUE, TRUE, "DSL props" }, 92adb52d92SMatthew Ahrens { DMU_BSWAP_UINT64, TRUE, TRUE, "DSL dataset" }, 93adb52d92SMatthew Ahrens { DMU_BSWAP_ZNODE, TRUE, FALSE, "ZFS znode" }, 94adb52d92SMatthew Ahrens { DMU_BSWAP_OLDACL, TRUE, FALSE, "ZFS V0 ACL" }, 95adb52d92SMatthew Ahrens { DMU_BSWAP_UINT8, FALSE, FALSE, "ZFS plain file" }, 96adb52d92SMatthew Ahrens { DMU_BSWAP_ZAP, TRUE, FALSE, "ZFS directory" }, 97adb52d92SMatthew Ahrens { DMU_BSWAP_ZAP, TRUE, FALSE, "ZFS master node" }, 98adb52d92SMatthew Ahrens { DMU_BSWAP_ZAP, TRUE, FALSE, "ZFS delete queue" }, 99adb52d92SMatthew Ahrens { DMU_BSWAP_UINT8, FALSE, FALSE, "zvol object" }, 100adb52d92SMatthew Ahrens { DMU_BSWAP_ZAP, TRUE, FALSE, "zvol prop" }, 101adb52d92SMatthew Ahrens { DMU_BSWAP_UINT8, FALSE, FALSE, "other uint8[]" }, 102adb52d92SMatthew Ahrens { DMU_BSWAP_UINT64, FALSE, FALSE, "other uint64[]" }, 103adb52d92SMatthew Ahrens { DMU_BSWAP_ZAP, TRUE, FALSE, "other ZAP" }, 104adb52d92SMatthew Ahrens { DMU_BSWAP_ZAP, TRUE, FALSE, "persistent error log" }, 105adb52d92SMatthew Ahrens { DMU_BSWAP_UINT8, TRUE, FALSE, "SPA history" }, 106adb52d92SMatthew Ahrens { DMU_BSWAP_UINT64, TRUE, FALSE, "SPA history offsets" }, 107adb52d92SMatthew Ahrens { DMU_BSWAP_ZAP, TRUE, TRUE, "Pool properties" }, 108adb52d92SMatthew Ahrens { DMU_BSWAP_ZAP, TRUE, TRUE, "DSL permissions" }, 109adb52d92SMatthew Ahrens { DMU_BSWAP_ACL, TRUE, FALSE, "ZFS ACL" }, 110adb52d92SMatthew Ahrens { DMU_BSWAP_UINT8, TRUE, FALSE, "ZFS SYSACL" }, 111adb52d92SMatthew Ahrens { DMU_BSWAP_UINT8, TRUE, FALSE, "FUID table" }, 112adb52d92SMatthew Ahrens { DMU_BSWAP_UINT64, TRUE, FALSE, "FUID table size" }, 113adb52d92SMatthew Ahrens { DMU_BSWAP_ZAP, TRUE, TRUE, "DSL dataset next clones" }, 114adb52d92SMatthew Ahrens { DMU_BSWAP_ZAP, TRUE, FALSE, "scan work queue" }, 115adb52d92SMatthew Ahrens { DMU_BSWAP_ZAP, TRUE, FALSE, "ZFS user/group used" }, 116adb52d92SMatthew Ahrens { DMU_BSWAP_ZAP, TRUE, FALSE, "ZFS user/group quota" }, 117adb52d92SMatthew Ahrens { DMU_BSWAP_ZAP, TRUE, TRUE, "snapshot refcount tags" }, 118adb52d92SMatthew Ahrens { DMU_BSWAP_ZAP, TRUE, FALSE, "DDT ZAP algorithm" }, 119adb52d92SMatthew Ahrens { DMU_BSWAP_ZAP, TRUE, FALSE, "DDT statistics" }, 120adb52d92SMatthew Ahrens { DMU_BSWAP_UINT8, TRUE, FALSE, "System attributes" }, 121adb52d92SMatthew Ahrens { DMU_BSWAP_ZAP, TRUE, FALSE, "SA master node" }, 122adb52d92SMatthew Ahrens { DMU_BSWAP_ZAP, TRUE, FALSE, "SA attr registration" }, 123adb52d92SMatthew Ahrens { DMU_BSWAP_ZAP, TRUE, FALSE, "SA attr layouts" }, 124adb52d92SMatthew Ahrens { DMU_BSWAP_ZAP, TRUE, FALSE, "scan translations" }, 125adb52d92SMatthew Ahrens { DMU_BSWAP_UINT8, FALSE, FALSE, "deduplicated block" }, 126adb52d92SMatthew Ahrens { DMU_BSWAP_ZAP, TRUE, TRUE, "DSL deadlist map" }, 127adb52d92SMatthew Ahrens { DMU_BSWAP_UINT64, TRUE, TRUE, "DSL deadlist map hdr" }, 128adb52d92SMatthew Ahrens { DMU_BSWAP_ZAP, TRUE, TRUE, "DSL dir clones" }, 129adb52d92SMatthew Ahrens { DMU_BSWAP_UINT64, TRUE, FALSE, "bpobj subobj" } 130ad135b5dSChristopher Siden }; 131ad135b5dSChristopher Siden 132ad135b5dSChristopher Siden const dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = { 133ad135b5dSChristopher Siden { byteswap_uint8_array, "uint8" }, 134ad135b5dSChristopher Siden { byteswap_uint16_array, "uint16" }, 135ad135b5dSChristopher Siden { byteswap_uint32_array, "uint32" }, 136ad135b5dSChristopher Siden { byteswap_uint64_array, "uint64" }, 137ad135b5dSChristopher Siden { zap_byteswap, "zap" }, 138ad135b5dSChristopher Siden { dnode_buf_byteswap, "dnode" }, 139ad135b5dSChristopher Siden { dmu_objset_byteswap, "objset" }, 140ad135b5dSChristopher Siden { zfs_znode_byteswap, "znode" }, 141ad135b5dSChristopher Siden { zfs_oldacl_byteswap, "oldacl" }, 142ad135b5dSChristopher Siden { zfs_acl_byteswap, "acl" } 1433f9d6ad7SLin Ling }; 144fa9e4066Sahrens 145fa9e4066Sahrens int 14679d72832SMatthew Ahrens dmu_buf_hold_noread_by_dnode(dnode_t *dn, uint64_t offset, 14779d72832SMatthew Ahrens void *tag, dmu_buf_t **dbp) 14879d72832SMatthew Ahrens { 14979d72832SMatthew Ahrens uint64_t blkid; 15079d72832SMatthew Ahrens dmu_buf_impl_t *db; 15179d72832SMatthew Ahrens 15279d72832SMatthew Ahrens blkid = dbuf_whichblock(dn, 0, offset); 15379d72832SMatthew Ahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 15479d72832SMatthew Ahrens db = dbuf_hold(dn, blkid, tag); 15579d72832SMatthew Ahrens rw_exit(&dn->dn_struct_rwlock); 15679d72832SMatthew Ahrens 15779d72832SMatthew Ahrens if (db == NULL) { 15879d72832SMatthew Ahrens *dbp = NULL; 15979d72832SMatthew Ahrens return (SET_ERROR(EIO)); 16079d72832SMatthew Ahrens } 16179d72832SMatthew Ahrens 16279d72832SMatthew Ahrens *dbp = &db->db; 16379d72832SMatthew Ahrens return (0); 16479d72832SMatthew Ahrens } 16579d72832SMatthew Ahrens int 1665d7b4d43SMatthew Ahrens dmu_buf_hold_noread(objset_t *os, uint64_t object, uint64_t offset, 1675d7b4d43SMatthew Ahrens void *tag, dmu_buf_t **dbp) 168fa9e4066Sahrens { 169fa9e4066Sahrens dnode_t *dn; 170fa9e4066Sahrens uint64_t blkid; 171fa9e4066Sahrens dmu_buf_impl_t *db; 172ea8dc4b6Seschrock int err; 173fa9e4066Sahrens 174503ad85cSMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 175ea8dc4b6Seschrock if (err) 176ea8dc4b6Seschrock return (err); 177a2cdcdd2SPaul Dagnelie blkid = dbuf_whichblock(dn, 0, offset); 178fa9e4066Sahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 179ea8dc4b6Seschrock db = dbuf_hold(dn, blkid, tag); 180fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 1815d7b4d43SMatthew Ahrens dnode_rele(dn, FTAG); 1825d7b4d43SMatthew Ahrens 183ea8dc4b6Seschrock if (db == NULL) { 1845d7b4d43SMatthew Ahrens *dbp = NULL; 1855d7b4d43SMatthew Ahrens return (SET_ERROR(EIO)); 1865d7b4d43SMatthew Ahrens } 1875d7b4d43SMatthew Ahrens 1885d7b4d43SMatthew Ahrens *dbp = &db->db; 1895d7b4d43SMatthew Ahrens return (err); 1905d7b4d43SMatthew Ahrens } 1915d7b4d43SMatthew Ahrens 1925d7b4d43SMatthew Ahrens int 19379d72832SMatthew Ahrens dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset, 19479d72832SMatthew Ahrens void *tag, dmu_buf_t **dbp, int flags) 19579d72832SMatthew Ahrens { 19679d72832SMatthew Ahrens int err; 19779d72832SMatthew Ahrens int db_flags = DB_RF_CANFAIL; 19879d72832SMatthew Ahrens 19979d72832SMatthew Ahrens if (flags & DMU_READ_NO_PREFETCH) 20079d72832SMatthew Ahrens db_flags |= DB_RF_NOPREFETCH; 20179d72832SMatthew Ahrens 20279d72832SMatthew Ahrens err = dmu_buf_hold_noread_by_dnode(dn, offset, tag, dbp); 20379d72832SMatthew Ahrens if (err == 0) { 20479d72832SMatthew Ahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp); 20579d72832SMatthew Ahrens err = dbuf_read(db, NULL, db_flags); 20679d72832SMatthew Ahrens if (err != 0) { 20779d72832SMatthew Ahrens dbuf_rele(db, tag); 20879d72832SMatthew Ahrens *dbp = NULL; 20979d72832SMatthew Ahrens } 21079d72832SMatthew Ahrens } 21179d72832SMatthew Ahrens 21279d72832SMatthew Ahrens return (err); 21379d72832SMatthew Ahrens } 21479d72832SMatthew Ahrens 21579d72832SMatthew Ahrens int 2165d7b4d43SMatthew Ahrens dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset, 2175d7b4d43SMatthew Ahrens void *tag, dmu_buf_t **dbp, int flags) 2185d7b4d43SMatthew Ahrens { 2195d7b4d43SMatthew Ahrens int err; 2205d7b4d43SMatthew Ahrens int db_flags = DB_RF_CANFAIL; 2215d7b4d43SMatthew Ahrens 2225d7b4d43SMatthew Ahrens if (flags & DMU_READ_NO_PREFETCH) 2235d7b4d43SMatthew Ahrens db_flags |= DB_RF_NOPREFETCH; 2245d7b4d43SMatthew Ahrens 2255d7b4d43SMatthew Ahrens err = dmu_buf_hold_noread(os, object, offset, tag, dbp); 2265d7b4d43SMatthew Ahrens if (err == 0) { 2275d7b4d43SMatthew Ahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp); 22847cb52daSJeff Bonwick err = dbuf_read(db, NULL, db_flags); 2295d7b4d43SMatthew Ahrens if (err != 0) { 230ea8dc4b6Seschrock dbuf_rele(db, tag); 2315d7b4d43SMatthew Ahrens *dbp = NULL; 232ea8dc4b6Seschrock } 233fa9e4066Sahrens } 234fa9e4066Sahrens 235ea8dc4b6Seschrock return (err); 236fa9e4066Sahrens } 237fa9e4066Sahrens 238fa9e4066Sahrens int 239fa9e4066Sahrens dmu_bonus_max(void) 240fa9e4066Sahrens { 241fa9e4066Sahrens return (DN_MAX_BONUSLEN); 242fa9e4066Sahrens } 243fa9e4066Sahrens 2441934e92fSmaybee int 245744947dcSTom Erickson dmu_set_bonus(dmu_buf_t *db_fake, int newsize, dmu_tx_t *tx) 2461934e92fSmaybee { 247744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 248744947dcSTom Erickson dnode_t *dn; 249744947dcSTom Erickson int error; 2501934e92fSmaybee 251744947dcSTom Erickson DB_DNODE_ENTER(db); 252744947dcSTom Erickson dn = DB_DNODE(db); 253744947dcSTom Erickson 254744947dcSTom Erickson if (dn->dn_bonus != db) { 255be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 256744947dcSTom Erickson } else if (newsize < 0 || newsize > db_fake->db_size) { 257be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 258744947dcSTom Erickson } else { 2591934e92fSmaybee dnode_setbonuslen(dn, newsize, tx); 260744947dcSTom Erickson error = 0; 261744947dcSTom Erickson } 262744947dcSTom Erickson 263744947dcSTom Erickson DB_DNODE_EXIT(db); 264744947dcSTom Erickson return (error); 2651934e92fSmaybee } 2661934e92fSmaybee 2670a586ceaSMark Shellenbaum int 268744947dcSTom Erickson dmu_set_bonustype(dmu_buf_t *db_fake, dmu_object_type_t type, dmu_tx_t *tx) 2690a586ceaSMark Shellenbaum { 270744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 271744947dcSTom Erickson dnode_t *dn; 272744947dcSTom Erickson int error; 2730a586ceaSMark Shellenbaum 274744947dcSTom Erickson DB_DNODE_ENTER(db); 275744947dcSTom Erickson dn = DB_DNODE(db); 2760a586ceaSMark Shellenbaum 277ad135b5dSChristopher Siden if (!DMU_OT_IS_VALID(type)) { 278be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 279744947dcSTom Erickson } else if (dn->dn_bonus != db) { 280be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 281744947dcSTom Erickson } else { 2820a586ceaSMark Shellenbaum dnode_setbonus_type(dn, type, tx); 283744947dcSTom Erickson error = 0; 284744947dcSTom Erickson } 285744947dcSTom Erickson 286744947dcSTom Erickson DB_DNODE_EXIT(db); 287744947dcSTom Erickson return (error); 288744947dcSTom Erickson } 289744947dcSTom Erickson 290744947dcSTom Erickson dmu_object_type_t 291744947dcSTom Erickson dmu_get_bonustype(dmu_buf_t *db_fake) 292744947dcSTom Erickson { 293744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 294744947dcSTom Erickson dnode_t *dn; 295744947dcSTom Erickson dmu_object_type_t type; 296744947dcSTom Erickson 297744947dcSTom Erickson DB_DNODE_ENTER(db); 298744947dcSTom Erickson dn = DB_DNODE(db); 299744947dcSTom Erickson type = dn->dn_bonustype; 300744947dcSTom Erickson DB_DNODE_EXIT(db); 301744947dcSTom Erickson 302744947dcSTom Erickson return (type); 3030a586ceaSMark Shellenbaum } 3040a586ceaSMark Shellenbaum 3050a586ceaSMark Shellenbaum int 3060a586ceaSMark Shellenbaum dmu_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx) 3070a586ceaSMark Shellenbaum { 3080a586ceaSMark Shellenbaum dnode_t *dn; 3090a586ceaSMark Shellenbaum int error; 3100a586ceaSMark Shellenbaum 3110a586ceaSMark Shellenbaum error = dnode_hold(os, object, FTAG, &dn); 3120a586ceaSMark Shellenbaum dbuf_rm_spill(dn, tx); 31306e0070dSMark Shellenbaum rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 31406e0070dSMark Shellenbaum dnode_rm_spill(dn, tx); 31506e0070dSMark Shellenbaum rw_exit(&dn->dn_struct_rwlock); 3160a586ceaSMark Shellenbaum dnode_rele(dn, FTAG); 3170a586ceaSMark Shellenbaum return (error); 3180a586ceaSMark Shellenbaum } 3190a586ceaSMark Shellenbaum 320fa9e4066Sahrens /* 321ea8dc4b6Seschrock * returns ENOENT, EIO, or 0. 322fa9e4066Sahrens */ 323ea8dc4b6Seschrock int 324ea8dc4b6Seschrock dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp) 325fa9e4066Sahrens { 326ea8dc4b6Seschrock dnode_t *dn; 327fa9e4066Sahrens dmu_buf_impl_t *db; 3281934e92fSmaybee int error; 329fa9e4066Sahrens 330503ad85cSMatthew Ahrens error = dnode_hold(os, object, FTAG, &dn); 3311934e92fSmaybee if (error) 3321934e92fSmaybee return (error); 333fa9e4066Sahrens 334fa9e4066Sahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 335ea8dc4b6Seschrock if (dn->dn_bonus == NULL) { 336fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 337ea8dc4b6Seschrock rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 338ea8dc4b6Seschrock if (dn->dn_bonus == NULL) 3391934e92fSmaybee dbuf_create_bonus(dn); 340fa9e4066Sahrens } 341ea8dc4b6Seschrock db = dn->dn_bonus; 3421934e92fSmaybee 3431934e92fSmaybee /* as long as the bonus buf is held, the dnode will be held */ 344744947dcSTom Erickson if (refcount_add(&db->db_holds, tag) == 1) { 3451934e92fSmaybee VERIFY(dnode_add_ref(dn, db)); 346640c1670SJosef 'Jeff' Sipek atomic_inc_32(&dn->dn_dbufs_count); 347744947dcSTom Erickson } 348744947dcSTom Erickson 349744947dcSTom Erickson /* 350744947dcSTom Erickson * Wait to drop dn_struct_rwlock until after adding the bonus dbuf's 351744947dcSTom Erickson * hold and incrementing the dbuf count to ensure that dnode_move() sees 352744947dcSTom Erickson * a dnode hold for every dbuf. 353744947dcSTom Erickson */ 354744947dcSTom Erickson rw_exit(&dn->dn_struct_rwlock); 3551934e92fSmaybee 356fa9e4066Sahrens dnode_rele(dn, FTAG); 357ea8dc4b6Seschrock 35847cb52daSJeff Bonwick VERIFY(0 == dbuf_read(db, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH)); 359ea8dc4b6Seschrock 360ea8dc4b6Seschrock *dbp = &db->db; 361ea8dc4b6Seschrock return (0); 362fa9e4066Sahrens } 363fa9e4066Sahrens 36413506d1eSmaybee /* 3650a586ceaSMark Shellenbaum * returns ENOENT, EIO, or 0. 3660a586ceaSMark Shellenbaum * 3670a586ceaSMark Shellenbaum * This interface will allocate a blank spill dbuf when a spill blk 3680a586ceaSMark Shellenbaum * doesn't already exist on the dnode. 3690a586ceaSMark Shellenbaum * 3700a586ceaSMark Shellenbaum * if you only want to find an already existing spill db, then 3710a586ceaSMark Shellenbaum * dmu_spill_hold_existing() should be used. 3720a586ceaSMark Shellenbaum */ 3730a586ceaSMark Shellenbaum int 3740a586ceaSMark Shellenbaum dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, void *tag, dmu_buf_t **dbp) 3750a586ceaSMark Shellenbaum { 3760a586ceaSMark Shellenbaum dmu_buf_impl_t *db = NULL; 3770a586ceaSMark Shellenbaum int err; 3780a586ceaSMark Shellenbaum 3790a586ceaSMark Shellenbaum if ((flags & DB_RF_HAVESTRUCT) == 0) 3800a586ceaSMark Shellenbaum rw_enter(&dn->dn_struct_rwlock, RW_READER); 3810a586ceaSMark Shellenbaum 3820a586ceaSMark Shellenbaum db = dbuf_hold(dn, DMU_SPILL_BLKID, tag); 3830a586ceaSMark Shellenbaum 3840a586ceaSMark Shellenbaum if ((flags & DB_RF_HAVESTRUCT) == 0) 3850a586ceaSMark Shellenbaum rw_exit(&dn->dn_struct_rwlock); 3860a586ceaSMark Shellenbaum 3870a586ceaSMark Shellenbaum ASSERT(db != NULL); 3881d8ccc7bSMark Shellenbaum err = dbuf_read(db, NULL, flags); 3891d8ccc7bSMark Shellenbaum if (err == 0) 3900a586ceaSMark Shellenbaum *dbp = &db->db; 3911d8ccc7bSMark Shellenbaum else 3921d8ccc7bSMark Shellenbaum dbuf_rele(db, tag); 3930a586ceaSMark Shellenbaum return (err); 3940a586ceaSMark Shellenbaum } 3950a586ceaSMark Shellenbaum 3960a586ceaSMark Shellenbaum int 3970a586ceaSMark Shellenbaum dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp) 3980a586ceaSMark Shellenbaum { 399744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus; 400744947dcSTom Erickson dnode_t *dn; 4010a586ceaSMark Shellenbaum int err; 4020a586ceaSMark Shellenbaum 403744947dcSTom Erickson DB_DNODE_ENTER(db); 404744947dcSTom Erickson dn = DB_DNODE(db); 405744947dcSTom Erickson 406744947dcSTom Erickson if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) { 407be6fd75aSMatthew Ahrens err = SET_ERROR(EINVAL); 408744947dcSTom Erickson } else { 4090a586ceaSMark Shellenbaum rw_enter(&dn->dn_struct_rwlock, RW_READER); 4100a586ceaSMark Shellenbaum 4110a586ceaSMark Shellenbaum if (!dn->dn_have_spill) { 412be6fd75aSMatthew Ahrens err = SET_ERROR(ENOENT); 413744947dcSTom Erickson } else { 4141d8ccc7bSMark Shellenbaum err = dmu_spill_hold_by_dnode(dn, 4151d8ccc7bSMark Shellenbaum DB_RF_HAVESTRUCT | DB_RF_CANFAIL, tag, dbp); 416744947dcSTom Erickson } 417744947dcSTom Erickson 4180a586ceaSMark Shellenbaum rw_exit(&dn->dn_struct_rwlock); 419744947dcSTom Erickson } 420744947dcSTom Erickson 421744947dcSTom Erickson DB_DNODE_EXIT(db); 4220a586ceaSMark Shellenbaum return (err); 4230a586ceaSMark Shellenbaum } 4240a586ceaSMark Shellenbaum 4250a586ceaSMark Shellenbaum int 4260a586ceaSMark Shellenbaum dmu_spill_hold_by_bonus(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp) 4270a586ceaSMark Shellenbaum { 428744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus; 429744947dcSTom Erickson dnode_t *dn; 430744947dcSTom Erickson int err; 431744947dcSTom Erickson 432744947dcSTom Erickson DB_DNODE_ENTER(db); 433744947dcSTom Erickson dn = DB_DNODE(db); 434744947dcSTom Erickson err = dmu_spill_hold_by_dnode(dn, DB_RF_CANFAIL, tag, dbp); 435744947dcSTom Erickson DB_DNODE_EXIT(db); 436744947dcSTom Erickson 437744947dcSTom Erickson return (err); 4380a586ceaSMark Shellenbaum } 4390a586ceaSMark Shellenbaum 4400a586ceaSMark Shellenbaum /* 44113506d1eSmaybee * Note: longer-term, we should modify all of the dmu_buf_*() interfaces 44213506d1eSmaybee * to take a held dnode rather than <os, object> -- the lookup is wasteful, 44313506d1eSmaybee * and can induce severe lock contention when writing to several files 44413506d1eSmaybee * whose dnodes are in the same block. 44513506d1eSmaybee */ 446*8dfe5547SRichard Yao int 4477bfdf011SNeil Perrin dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length, 448cf6106c8SMatthew Ahrens boolean_t read, void *tag, int *numbufsp, dmu_buf_t ***dbpp, uint32_t flags) 449fa9e4066Sahrens { 450fa9e4066Sahrens dmu_buf_t **dbp; 451fa9e4066Sahrens uint64_t blkid, nblks, i; 4527bfdf011SNeil Perrin uint32_t dbuf_flags; 453ea8dc4b6Seschrock int err; 454ea8dc4b6Seschrock zio_t *zio; 455ea8dc4b6Seschrock 456ea8dc4b6Seschrock ASSERT(length <= DMU_MAX_ACCESS); 457fa9e4066Sahrens 458cf6106c8SMatthew Ahrens /* 459cf6106c8SMatthew Ahrens * Note: We directly notify the prefetch code of this read, so that 460cf6106c8SMatthew Ahrens * we can tell it about the multi-block read. dbuf_read() only knows 461cf6106c8SMatthew Ahrens * about the one block it is accessing. 462cf6106c8SMatthew Ahrens */ 463cf6106c8SMatthew Ahrens dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT | 464cf6106c8SMatthew Ahrens DB_RF_NOPREFETCH; 465ea8dc4b6Seschrock 466fa9e4066Sahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 467fa9e4066Sahrens if (dn->dn_datablkshift) { 468fa9e4066Sahrens int blkshift = dn->dn_datablkshift; 469fa9e4066Sahrens nblks = (P2ROUNDUP(offset + length, 1ULL << blkshift) - 470fa9e4066Sahrens P2ALIGN(offset, 1ULL << blkshift)) >> blkshift; 471fa9e4066Sahrens } else { 4720125049cSahrens if (offset + length > dn->dn_datablksz) { 4730125049cSahrens zfs_panic_recover("zfs: accessing past end of object " 4740125049cSahrens "%llx/%llx (size=%u access=%llu+%llu)", 4750125049cSahrens (longlong_t)dn->dn_objset-> 4760125049cSahrens os_dsl_dataset->ds_object, 4770125049cSahrens (longlong_t)dn->dn_object, dn->dn_datablksz, 4780125049cSahrens (longlong_t)offset, (longlong_t)length); 479c87b8fc5SMark J Musante rw_exit(&dn->dn_struct_rwlock); 480be6fd75aSMatthew Ahrens return (SET_ERROR(EIO)); 4810125049cSahrens } 482fa9e4066Sahrens nblks = 1; 483fa9e4066Sahrens } 484ea8dc4b6Seschrock dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP); 485fa9e4066Sahrens 486e14bb325SJeff Bonwick zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, ZIO_FLAG_CANFAIL); 487a2cdcdd2SPaul Dagnelie blkid = dbuf_whichblock(dn, 0, offset); 488fa9e4066Sahrens for (i = 0; i < nblks; i++) { 489ea8dc4b6Seschrock dmu_buf_impl_t *db = dbuf_hold(dn, blkid + i, tag); 490ea8dc4b6Seschrock if (db == NULL) { 491ea8dc4b6Seschrock rw_exit(&dn->dn_struct_rwlock); 492ea8dc4b6Seschrock dmu_buf_rele_array(dbp, nblks, tag); 493ea8dc4b6Seschrock zio_nowait(zio); 494be6fd75aSMatthew Ahrens return (SET_ERROR(EIO)); 495ea8dc4b6Seschrock } 496cf6106c8SMatthew Ahrens 497ea8dc4b6Seschrock /* initiate async i/o */ 498cf6106c8SMatthew Ahrens if (read) 4997bfdf011SNeil Perrin (void) dbuf_read(db, zio, dbuf_flags); 500ea8dc4b6Seschrock dbp[i] = &db->db; 501fa9e4066Sahrens } 502cf6106c8SMatthew Ahrens 503cb92f413SAlexander Motin if ((flags & DMU_READ_NO_PREFETCH) == 0 && 504cb92f413SAlexander Motin DNODE_META_IS_CACHEABLE(dn) && length <= zfetch_array_rd_sz) { 505cb92f413SAlexander Motin dmu_zfetch(&dn->dn_zfetch, blkid, nblks, 506cb92f413SAlexander Motin read && DNODE_IS_CACHEABLE(dn)); 507cf6106c8SMatthew Ahrens } 508fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 509fa9e4066Sahrens 510ea8dc4b6Seschrock /* wait for async i/o */ 511ea8dc4b6Seschrock err = zio_wait(zio); 512ea8dc4b6Seschrock if (err) { 513ea8dc4b6Seschrock dmu_buf_rele_array(dbp, nblks, tag); 514ea8dc4b6Seschrock return (err); 515ea8dc4b6Seschrock } 516ea8dc4b6Seschrock 517ea8dc4b6Seschrock /* wait for other io to complete */ 518ea8dc4b6Seschrock if (read) { 519ea8dc4b6Seschrock for (i = 0; i < nblks; i++) { 520ea8dc4b6Seschrock dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i]; 521ea8dc4b6Seschrock mutex_enter(&db->db_mtx); 522ea8dc4b6Seschrock while (db->db_state == DB_READ || 523ea8dc4b6Seschrock db->db_state == DB_FILL) 524ea8dc4b6Seschrock cv_wait(&db->db_changed, &db->db_mtx); 525ea8dc4b6Seschrock if (db->db_state == DB_UNCACHED) 526be6fd75aSMatthew Ahrens err = SET_ERROR(EIO); 527ea8dc4b6Seschrock mutex_exit(&db->db_mtx); 528ea8dc4b6Seschrock if (err) { 529ea8dc4b6Seschrock dmu_buf_rele_array(dbp, nblks, tag); 530ea8dc4b6Seschrock return (err); 531ea8dc4b6Seschrock } 532ea8dc4b6Seschrock } 533ea8dc4b6Seschrock } 534ea8dc4b6Seschrock 535ea8dc4b6Seschrock *numbufsp = nblks; 536ea8dc4b6Seschrock *dbpp = dbp; 537ea8dc4b6Seschrock return (0); 538fa9e4066Sahrens } 539fa9e4066Sahrens 540a2eea2e1Sahrens static int 54113506d1eSmaybee dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset, 54213506d1eSmaybee uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp) 54313506d1eSmaybee { 54413506d1eSmaybee dnode_t *dn; 54513506d1eSmaybee int err; 54613506d1eSmaybee 547503ad85cSMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 54813506d1eSmaybee if (err) 54913506d1eSmaybee return (err); 55013506d1eSmaybee 55113506d1eSmaybee err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, 5527bfdf011SNeil Perrin numbufsp, dbpp, DMU_READ_PREFETCH); 55313506d1eSmaybee 55413506d1eSmaybee dnode_rele(dn, FTAG); 55513506d1eSmaybee 55613506d1eSmaybee return (err); 55713506d1eSmaybee } 55813506d1eSmaybee 55913506d1eSmaybee int 560744947dcSTom Erickson dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset, 561cf6106c8SMatthew Ahrens uint64_t length, boolean_t read, void *tag, int *numbufsp, 562cf6106c8SMatthew Ahrens dmu_buf_t ***dbpp) 56313506d1eSmaybee { 564744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 565744947dcSTom Erickson dnode_t *dn; 56613506d1eSmaybee int err; 56713506d1eSmaybee 568744947dcSTom Erickson DB_DNODE_ENTER(db); 569744947dcSTom Erickson dn = DB_DNODE(db); 57013506d1eSmaybee err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, 5717bfdf011SNeil Perrin numbufsp, dbpp, DMU_READ_PREFETCH); 572744947dcSTom Erickson DB_DNODE_EXIT(db); 57313506d1eSmaybee 57413506d1eSmaybee return (err); 57513506d1eSmaybee } 57613506d1eSmaybee 577fa9e4066Sahrens void 578ea8dc4b6Seschrock dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, void *tag) 579fa9e4066Sahrens { 580fa9e4066Sahrens int i; 581fa9e4066Sahrens dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake; 582fa9e4066Sahrens 583fa9e4066Sahrens if (numbufs == 0) 584fa9e4066Sahrens return; 585fa9e4066Sahrens 586ea8dc4b6Seschrock for (i = 0; i < numbufs; i++) { 587ea8dc4b6Seschrock if (dbp[i]) 588ea8dc4b6Seschrock dbuf_rele(dbp[i], tag); 589ea8dc4b6Seschrock } 590fa9e4066Sahrens 591fa9e4066Sahrens kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs); 592fa9e4066Sahrens } 593fa9e4066Sahrens 59469962b56SMatthew Ahrens /* 595a2cdcdd2SPaul Dagnelie * Issue prefetch i/os for the given blocks. If level is greater than 0, the 596a2cdcdd2SPaul Dagnelie * indirect blocks prefeteched will be those that point to the blocks containing 597a2cdcdd2SPaul Dagnelie * the data starting at offset, and continuing to offset + len. 59869962b56SMatthew Ahrens * 599a2cdcdd2SPaul Dagnelie * Note that if the indirect blocks above the blocks being prefetched are not in 600a2cdcdd2SPaul Dagnelie * cache, they will be asychronously read in. 60169962b56SMatthew Ahrens */ 602fa9e4066Sahrens void 603a2cdcdd2SPaul Dagnelie dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset, 604a2cdcdd2SPaul Dagnelie uint64_t len, zio_priority_t pri) 605fa9e4066Sahrens { 606fa9e4066Sahrens dnode_t *dn; 607fa9e4066Sahrens uint64_t blkid; 60869962b56SMatthew Ahrens int nblks, err; 609fa9e4066Sahrens 610fa9e4066Sahrens if (len == 0) { /* they're interested in the bonus buffer */ 611744947dcSTom Erickson dn = DMU_META_DNODE(os); 612fa9e4066Sahrens 613fa9e4066Sahrens if (object == 0 || object >= DN_MAX_OBJECT) 614fa9e4066Sahrens return; 615fa9e4066Sahrens 616fa9e4066Sahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 617a2cdcdd2SPaul Dagnelie blkid = dbuf_whichblock(dn, level, 618a2cdcdd2SPaul Dagnelie object * sizeof (dnode_phys_t)); 619a2cdcdd2SPaul Dagnelie dbuf_prefetch(dn, level, blkid, pri, 0); 620fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 621fa9e4066Sahrens return; 622fa9e4066Sahrens } 623fa9e4066Sahrens 624fa9e4066Sahrens /* 625fa9e4066Sahrens * XXX - Note, if the dnode for the requested object is not 626fa9e4066Sahrens * already cached, we will do a *synchronous* read in the 627fa9e4066Sahrens * dnode_hold() call. The same is true for any indirects. 628fa9e4066Sahrens */ 629503ad85cSMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 630ea8dc4b6Seschrock if (err != 0) 631fa9e4066Sahrens return; 632fa9e4066Sahrens 633fa9e4066Sahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 634a2cdcdd2SPaul Dagnelie /* 635a2cdcdd2SPaul Dagnelie * offset + len - 1 is the last byte we want to prefetch for, and offset 636a2cdcdd2SPaul Dagnelie * is the first. Then dbuf_whichblk(dn, level, off + len - 1) is the 637a2cdcdd2SPaul Dagnelie * last block we want to prefetch, and dbuf_whichblock(dn, level, 638a2cdcdd2SPaul Dagnelie * offset) is the first. Then the number we need to prefetch is the 639a2cdcdd2SPaul Dagnelie * last - first + 1. 640a2cdcdd2SPaul Dagnelie */ 641a2cdcdd2SPaul Dagnelie if (level > 0 || dn->dn_datablkshift != 0) { 642a2cdcdd2SPaul Dagnelie nblks = dbuf_whichblock(dn, level, offset + len - 1) - 643a2cdcdd2SPaul Dagnelie dbuf_whichblock(dn, level, offset) + 1; 644fa9e4066Sahrens } else { 645fa9e4066Sahrens nblks = (offset < dn->dn_datablksz); 646fa9e4066Sahrens } 647fa9e4066Sahrens 648fa9e4066Sahrens if (nblks != 0) { 649a2cdcdd2SPaul Dagnelie blkid = dbuf_whichblock(dn, level, offset); 65069962b56SMatthew Ahrens for (int i = 0; i < nblks; i++) 651a2cdcdd2SPaul Dagnelie dbuf_prefetch(dn, level, blkid + i, pri, 0); 652fa9e4066Sahrens } 653fa9e4066Sahrens 654fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 655fa9e4066Sahrens 656fa9e4066Sahrens dnode_rele(dn, FTAG); 657fa9e4066Sahrens } 658fa9e4066Sahrens 65976256205SMark Maybee /* 66076256205SMark Maybee * Get the next "chunk" of file data to free. We traverse the file from 66176256205SMark Maybee * the end so that the file gets shorter over time (if we crashes in the 66276256205SMark Maybee * middle, this will leave us in a better state). We find allocated file 66376256205SMark Maybee * data by simply searching the allocated level 1 indirects. 664713d6c20SMatthew Ahrens * 665713d6c20SMatthew Ahrens * On input, *start should be the first offset that does not need to be 666713d6c20SMatthew Ahrens * freed (e.g. "offset + length"). On return, *start will be the first 667713d6c20SMatthew Ahrens * offset that should be freed. 66876256205SMark Maybee */ 669cdb0ab79Smaybee static int 670713d6c20SMatthew Ahrens get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum) 671cdb0ab79Smaybee { 672713d6c20SMatthew Ahrens uint64_t maxblks = DMU_MAX_ACCESS >> (dn->dn_indblkshift + 1); 673713d6c20SMatthew Ahrens /* bytes of data covered by a level-1 indirect block */ 67476256205SMark Maybee uint64_t iblkrange = 6751c8564a7SMark Maybee dn->dn_datablksz * EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT); 676cdb0ab79Smaybee 677713d6c20SMatthew Ahrens ASSERT3U(minimum, <=, *start); 678cdb0ab79Smaybee 679713d6c20SMatthew Ahrens if (*start - minimum <= iblkrange * maxblks) { 680713d6c20SMatthew Ahrens *start = minimum; 681cdb0ab79Smaybee return (0); 682cdb0ab79Smaybee } 68376256205SMark Maybee ASSERT(ISP2(iblkrange)); 684cdb0ab79Smaybee 685713d6c20SMatthew Ahrens for (uint64_t blks = 0; *start > minimum && blks < maxblks; blks++) { 6861c8564a7SMark Maybee int err; 687cdb0ab79Smaybee 688713d6c20SMatthew Ahrens /* 689713d6c20SMatthew Ahrens * dnode_next_offset(BACKWARDS) will find an allocated L1 690713d6c20SMatthew Ahrens * indirect block at or before the input offset. We must 691713d6c20SMatthew Ahrens * decrement *start so that it is at the end of the region 692713d6c20SMatthew Ahrens * to search. 693713d6c20SMatthew Ahrens */ 694713d6c20SMatthew Ahrens (*start)--; 695cdb0ab79Smaybee err = dnode_next_offset(dn, 69676256205SMark Maybee DNODE_FIND_BACKWARDS, start, 2, 1, 0); 697cdb0ab79Smaybee 698713d6c20SMatthew Ahrens /* if there are no indirect blocks before start, we are done */ 69976256205SMark Maybee if (err == ESRCH) { 700713d6c20SMatthew Ahrens *start = minimum; 701713d6c20SMatthew Ahrens break; 702713d6c20SMatthew Ahrens } else if (err != 0) { 703cdb0ab79Smaybee return (err); 70476256205SMark Maybee } 705cdb0ab79Smaybee 706713d6c20SMatthew Ahrens /* set start to the beginning of this L1 indirect */ 70776256205SMark Maybee *start = P2ALIGN(*start, iblkrange); 708cdb0ab79Smaybee } 709713d6c20SMatthew Ahrens if (*start < minimum) 710713d6c20SMatthew Ahrens *start = minimum; 711cdb0ab79Smaybee return (0); 712cdb0ab79Smaybee } 713cdb0ab79Smaybee 714eb721827SAlek Pinchuk /* 715eb721827SAlek Pinchuk * If this objset is of type OST_ZFS return true if vfs's unmounted flag is set, 716eb721827SAlek Pinchuk * otherwise return false. 717eb721827SAlek Pinchuk * Used below in dmu_free_long_range_impl() to enable abort when unmounting 718eb721827SAlek Pinchuk */ 719eb721827SAlek Pinchuk /*ARGSUSED*/ 720eb721827SAlek Pinchuk static boolean_t 721eb721827SAlek Pinchuk dmu_objset_zfs_unmounting(objset_t *os) 722eb721827SAlek Pinchuk { 723eb721827SAlek Pinchuk #ifdef _KERNEL 724eb721827SAlek Pinchuk if (dmu_objset_type(os) == DMU_OST_ZFS) 725eb721827SAlek Pinchuk return (zfs_get_vfs_flag_unmounted(os)); 726eb721827SAlek Pinchuk #endif 727eb721827SAlek Pinchuk return (B_FALSE); 728eb721827SAlek Pinchuk } 729eb721827SAlek Pinchuk 730cdb0ab79Smaybee static int 731cdb0ab79Smaybee dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset, 732713d6c20SMatthew Ahrens uint64_t length) 733cdb0ab79Smaybee { 734713d6c20SMatthew Ahrens uint64_t object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz; 735713d6c20SMatthew Ahrens int err; 736ff5177eeSAlek Pinchuk uint64_t dirty_frees_threshold; 737ff5177eeSAlek Pinchuk dsl_pool_t *dp = dmu_objset_pool(os); 738cdb0ab79Smaybee 739713d6c20SMatthew Ahrens if (offset >= object_size) 740cdb0ab79Smaybee return (0); 741cdb0ab79Smaybee 742ff5177eeSAlek Pinchuk if (zfs_per_txg_dirty_frees_percent <= 100) 743ff5177eeSAlek Pinchuk dirty_frees_threshold = 744ff5177eeSAlek Pinchuk zfs_per_txg_dirty_frees_percent * zfs_dirty_data_max / 100; 745ff5177eeSAlek Pinchuk else 746ff5177eeSAlek Pinchuk dirty_frees_threshold = zfs_dirty_data_max / 4; 747ff5177eeSAlek Pinchuk 748713d6c20SMatthew Ahrens if (length == DMU_OBJECT_END || offset + length > object_size) 749713d6c20SMatthew Ahrens length = object_size - offset; 750713d6c20SMatthew Ahrens 751713d6c20SMatthew Ahrens while (length != 0) { 752ff5177eeSAlek Pinchuk uint64_t chunk_end, chunk_begin, chunk_len; 753ff5177eeSAlek Pinchuk uint64_t long_free_dirty_all_txgs = 0; 754ff5177eeSAlek Pinchuk dmu_tx_t *tx; 755713d6c20SMatthew Ahrens 756eb721827SAlek Pinchuk if (dmu_objset_zfs_unmounting(dn->dn_objset)) 757eb721827SAlek Pinchuk return (SET_ERROR(EINTR)); 758eb721827SAlek Pinchuk 759713d6c20SMatthew Ahrens chunk_end = chunk_begin = offset + length; 760713d6c20SMatthew Ahrens 761713d6c20SMatthew Ahrens /* move chunk_begin backwards to the beginning of this chunk */ 762713d6c20SMatthew Ahrens err = get_next_chunk(dn, &chunk_begin, offset); 763cdb0ab79Smaybee if (err) 764cdb0ab79Smaybee return (err); 765713d6c20SMatthew Ahrens ASSERT3U(chunk_begin, >=, offset); 766713d6c20SMatthew Ahrens ASSERT3U(chunk_begin, <=, chunk_end); 767cdb0ab79Smaybee 768ff5177eeSAlek Pinchuk chunk_len = chunk_end - chunk_begin; 769ff5177eeSAlek Pinchuk 770ff5177eeSAlek Pinchuk mutex_enter(&dp->dp_lock); 771ff5177eeSAlek Pinchuk for (int t = 0; t < TXG_SIZE; t++) { 772ff5177eeSAlek Pinchuk long_free_dirty_all_txgs += 773ff5177eeSAlek Pinchuk dp->dp_long_free_dirty_pertxg[t]; 774ff5177eeSAlek Pinchuk } 775ff5177eeSAlek Pinchuk mutex_exit(&dp->dp_lock); 776ff5177eeSAlek Pinchuk 777ff5177eeSAlek Pinchuk /* 778ff5177eeSAlek Pinchuk * To avoid filling up a TXG with just frees wait for 779ff5177eeSAlek Pinchuk * the next TXG to open before freeing more chunks if 780ff5177eeSAlek Pinchuk * we have reached the threshold of frees 781ff5177eeSAlek Pinchuk */ 782ff5177eeSAlek Pinchuk if (dirty_frees_threshold != 0 && 783ff5177eeSAlek Pinchuk long_free_dirty_all_txgs >= dirty_frees_threshold) { 784ff5177eeSAlek Pinchuk txg_wait_open(dp, 0); 785ff5177eeSAlek Pinchuk continue; 786ff5177eeSAlek Pinchuk } 787ff5177eeSAlek Pinchuk 788ff5177eeSAlek Pinchuk tx = dmu_tx_create(os); 789ff5177eeSAlek Pinchuk dmu_tx_hold_free(tx, dn->dn_object, chunk_begin, chunk_len); 7904bb73804SMatthew Ahrens 7914bb73804SMatthew Ahrens /* 7924bb73804SMatthew Ahrens * Mark this transaction as typically resulting in a net 7934bb73804SMatthew Ahrens * reduction in space used. 7944bb73804SMatthew Ahrens */ 7954bb73804SMatthew Ahrens dmu_tx_mark_netfree(tx); 796cdb0ab79Smaybee err = dmu_tx_assign(tx, TXG_WAIT); 797cdb0ab79Smaybee if (err) { 798cdb0ab79Smaybee dmu_tx_abort(tx); 799cdb0ab79Smaybee return (err); 800cdb0ab79Smaybee } 801ff5177eeSAlek Pinchuk 802ff5177eeSAlek Pinchuk mutex_enter(&dp->dp_lock); 803ff5177eeSAlek Pinchuk dp->dp_long_free_dirty_pertxg[dmu_tx_get_txg(tx) & TXG_MASK] += 804ff5177eeSAlek Pinchuk chunk_len; 805ff5177eeSAlek Pinchuk mutex_exit(&dp->dp_lock); 806ff5177eeSAlek Pinchuk DTRACE_PROBE3(free__long__range, 807ff5177eeSAlek Pinchuk uint64_t, long_free_dirty_all_txgs, uint64_t, chunk_len, 808ff5177eeSAlek Pinchuk uint64_t, dmu_tx_get_txg(tx)); 809ff5177eeSAlek Pinchuk dnode_free_range(dn, chunk_begin, chunk_len, tx); 810cdb0ab79Smaybee dmu_tx_commit(tx); 811713d6c20SMatthew Ahrens 812ff5177eeSAlek Pinchuk length -= chunk_len; 813cdb0ab79Smaybee } 814cdb0ab79Smaybee return (0); 815cdb0ab79Smaybee } 816cdb0ab79Smaybee 817cdb0ab79Smaybee int 818cdb0ab79Smaybee dmu_free_long_range(objset_t *os, uint64_t object, 819cdb0ab79Smaybee uint64_t offset, uint64_t length) 820cdb0ab79Smaybee { 821cdb0ab79Smaybee dnode_t *dn; 822cdb0ab79Smaybee int err; 823cdb0ab79Smaybee 824503ad85cSMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 825cdb0ab79Smaybee if (err != 0) 826cdb0ab79Smaybee return (err); 827713d6c20SMatthew Ahrens err = dmu_free_long_range_impl(os, dn, offset, length); 8285253393bSMatthew Ahrens 8295253393bSMatthew Ahrens /* 8305253393bSMatthew Ahrens * It is important to zero out the maxblkid when freeing the entire 8315253393bSMatthew Ahrens * file, so that (a) subsequent calls to dmu_free_long_range_impl() 8325253393bSMatthew Ahrens * will take the fast path, and (b) dnode_reallocate() can verify 8335253393bSMatthew Ahrens * that the entire file has been freed. 8345253393bSMatthew Ahrens */ 83543466aaeSMax Grossman if (err == 0 && offset == 0 && length == DMU_OBJECT_END) 8365253393bSMatthew Ahrens dn->dn_maxblkid = 0; 8375253393bSMatthew Ahrens 838cdb0ab79Smaybee dnode_rele(dn, FTAG); 839cdb0ab79Smaybee return (err); 840cdb0ab79Smaybee } 841cdb0ab79Smaybee 842cdb0ab79Smaybee int 843713d6c20SMatthew Ahrens dmu_free_long_object(objset_t *os, uint64_t object) 844cdb0ab79Smaybee { 845cdb0ab79Smaybee dmu_tx_t *tx; 846cdb0ab79Smaybee int err; 847cdb0ab79Smaybee 848713d6c20SMatthew Ahrens err = dmu_free_long_range(os, object, 0, DMU_OBJECT_END); 849cdb0ab79Smaybee if (err != 0) 850cdb0ab79Smaybee return (err); 851713d6c20SMatthew Ahrens 852cdb0ab79Smaybee tx = dmu_tx_create(os); 853cdb0ab79Smaybee dmu_tx_hold_bonus(tx, object); 854713d6c20SMatthew Ahrens dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END); 8554bb73804SMatthew Ahrens dmu_tx_mark_netfree(tx); 856cdb0ab79Smaybee err = dmu_tx_assign(tx, TXG_WAIT); 857cdb0ab79Smaybee if (err == 0) { 858713d6c20SMatthew Ahrens err = dmu_object_free(os, object, tx); 859cdb0ab79Smaybee dmu_tx_commit(tx); 860cdb0ab79Smaybee } else { 861cdb0ab79Smaybee dmu_tx_abort(tx); 862cdb0ab79Smaybee } 863713d6c20SMatthew Ahrens 864cdb0ab79Smaybee return (err); 865cdb0ab79Smaybee } 866cdb0ab79Smaybee 867ea8dc4b6Seschrock int 868fa9e4066Sahrens dmu_free_range(objset_t *os, uint64_t object, uint64_t offset, 869fa9e4066Sahrens uint64_t size, dmu_tx_t *tx) 870fa9e4066Sahrens { 871ea8dc4b6Seschrock dnode_t *dn; 872503ad85cSMatthew Ahrens int err = dnode_hold(os, object, FTAG, &dn); 873ea8dc4b6Seschrock if (err) 874ea8dc4b6Seschrock return (err); 875fa9e4066Sahrens ASSERT(offset < UINT64_MAX); 876fa9e4066Sahrens ASSERT(size == -1ULL || size <= UINT64_MAX - offset); 877fa9e4066Sahrens dnode_free_range(dn, offset, size, tx); 878fa9e4066Sahrens dnode_rele(dn, FTAG); 879ea8dc4b6Seschrock return (0); 880fa9e4066Sahrens } 881fa9e4066Sahrens 882b0c42cd4Sbzzz77 static int 883b0c42cd4Sbzzz77 dmu_read_impl(dnode_t *dn, uint64_t offset, uint64_t size, 8847bfdf011SNeil Perrin void *buf, uint32_t flags) 885fa9e4066Sahrens { 886fa9e4066Sahrens dmu_buf_t **dbp; 887b0c42cd4Sbzzz77 int numbufs, err = 0; 888feb08c6bSbillm 889feb08c6bSbillm /* 890feb08c6bSbillm * Deal with odd block sizes, where there can't be data past the first 891feb08c6bSbillm * block. If we ever do the tail block optimization, we will need to 892feb08c6bSbillm * handle that here as well. 893feb08c6bSbillm */ 894c87b8fc5SMark J Musante if (dn->dn_maxblkid == 0) { 895fa9e4066Sahrens int newsz = offset > dn->dn_datablksz ? 0 : 896fa9e4066Sahrens MIN(size, dn->dn_datablksz - offset); 897fa9e4066Sahrens bzero((char *)buf + newsz, size - newsz); 898fa9e4066Sahrens size = newsz; 899fa9e4066Sahrens } 900fa9e4066Sahrens 901fa9e4066Sahrens while (size > 0) { 902fa9e4066Sahrens uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2); 903c87b8fc5SMark J Musante int i; 904fa9e4066Sahrens 905fa9e4066Sahrens /* 906fa9e4066Sahrens * NB: we could do this block-at-a-time, but it's nice 907fa9e4066Sahrens * to be reading in parallel. 908fa9e4066Sahrens */ 909a2eea2e1Sahrens err = dmu_buf_hold_array_by_dnode(dn, offset, mylen, 9107bfdf011SNeil Perrin TRUE, FTAG, &numbufs, &dbp, flags); 911ea8dc4b6Seschrock if (err) 9121934e92fSmaybee break; 913fa9e4066Sahrens 914fa9e4066Sahrens for (i = 0; i < numbufs; i++) { 915fa9e4066Sahrens int tocpy; 916fa9e4066Sahrens int bufoff; 917fa9e4066Sahrens dmu_buf_t *db = dbp[i]; 918fa9e4066Sahrens 919fa9e4066Sahrens ASSERT(size > 0); 920fa9e4066Sahrens 921fa9e4066Sahrens bufoff = offset - db->db_offset; 922fa9e4066Sahrens tocpy = (int)MIN(db->db_size - bufoff, size); 923fa9e4066Sahrens 924fa9e4066Sahrens bcopy((char *)db->db_data + bufoff, buf, tocpy); 925fa9e4066Sahrens 926fa9e4066Sahrens offset += tocpy; 927fa9e4066Sahrens size -= tocpy; 928fa9e4066Sahrens buf = (char *)buf + tocpy; 929fa9e4066Sahrens } 930ea8dc4b6Seschrock dmu_buf_rele_array(dbp, numbufs, FTAG); 931fa9e4066Sahrens } 932b0c42cd4Sbzzz77 return (err); 933b0c42cd4Sbzzz77 } 934b0c42cd4Sbzzz77 935b0c42cd4Sbzzz77 int 936b0c42cd4Sbzzz77 dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 937b0c42cd4Sbzzz77 void *buf, uint32_t flags) 938b0c42cd4Sbzzz77 { 939b0c42cd4Sbzzz77 dnode_t *dn; 940b0c42cd4Sbzzz77 int err; 941b0c42cd4Sbzzz77 942b0c42cd4Sbzzz77 err = dnode_hold(os, object, FTAG, &dn); 943b0c42cd4Sbzzz77 if (err != 0) 944b0c42cd4Sbzzz77 return (err); 945b0c42cd4Sbzzz77 946b0c42cd4Sbzzz77 err = dmu_read_impl(dn, offset, size, buf, flags); 947a2eea2e1Sahrens dnode_rele(dn, FTAG); 9481934e92fSmaybee return (err); 949fa9e4066Sahrens } 950fa9e4066Sahrens 951b0c42cd4Sbzzz77 int 952b0c42cd4Sbzzz77 dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf, 953b0c42cd4Sbzzz77 uint32_t flags) 954b0c42cd4Sbzzz77 { 955b0c42cd4Sbzzz77 return (dmu_read_impl(dn, offset, size, buf, flags)); 956b0c42cd4Sbzzz77 } 957b0c42cd4Sbzzz77 958b0c42cd4Sbzzz77 static void 959b0c42cd4Sbzzz77 dmu_write_impl(dmu_buf_t **dbp, int numbufs, uint64_t offset, uint64_t size, 960fa9e4066Sahrens const void *buf, dmu_tx_t *tx) 961fa9e4066Sahrens { 962b0c42cd4Sbzzz77 int i; 963fa9e4066Sahrens 964fa9e4066Sahrens for (i = 0; i < numbufs; i++) { 965fa9e4066Sahrens int tocpy; 966fa9e4066Sahrens int bufoff; 967fa9e4066Sahrens dmu_buf_t *db = dbp[i]; 968fa9e4066Sahrens 969fa9e4066Sahrens ASSERT(size > 0); 970fa9e4066Sahrens 971fa9e4066Sahrens bufoff = offset - db->db_offset; 972fa9e4066Sahrens tocpy = (int)MIN(db->db_size - bufoff, size); 973fa9e4066Sahrens 974fa9e4066Sahrens ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 975fa9e4066Sahrens 976fa9e4066Sahrens if (tocpy == db->db_size) 977fa9e4066Sahrens dmu_buf_will_fill(db, tx); 978fa9e4066Sahrens else 979fa9e4066Sahrens dmu_buf_will_dirty(db, tx); 980fa9e4066Sahrens 981fa9e4066Sahrens bcopy(buf, (char *)db->db_data + bufoff, tocpy); 982fa9e4066Sahrens 983fa9e4066Sahrens if (tocpy == db->db_size) 984fa9e4066Sahrens dmu_buf_fill_done(db, tx); 985fa9e4066Sahrens 986fa9e4066Sahrens offset += tocpy; 987fa9e4066Sahrens size -= tocpy; 988fa9e4066Sahrens buf = (char *)buf + tocpy; 989fa9e4066Sahrens } 990b0c42cd4Sbzzz77 } 991b0c42cd4Sbzzz77 992b0c42cd4Sbzzz77 void 993b0c42cd4Sbzzz77 dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 994b0c42cd4Sbzzz77 const void *buf, dmu_tx_t *tx) 995b0c42cd4Sbzzz77 { 996b0c42cd4Sbzzz77 dmu_buf_t **dbp; 997b0c42cd4Sbzzz77 int numbufs; 998b0c42cd4Sbzzz77 999b0c42cd4Sbzzz77 if (size == 0) 1000b0c42cd4Sbzzz77 return; 1001b0c42cd4Sbzzz77 1002b0c42cd4Sbzzz77 VERIFY0(dmu_buf_hold_array(os, object, offset, size, 1003b0c42cd4Sbzzz77 FALSE, FTAG, &numbufs, &dbp)); 1004b0c42cd4Sbzzz77 dmu_write_impl(dbp, numbufs, offset, size, buf, tx); 1005b0c42cd4Sbzzz77 dmu_buf_rele_array(dbp, numbufs, FTAG); 1006b0c42cd4Sbzzz77 } 1007b0c42cd4Sbzzz77 1008b0c42cd4Sbzzz77 void 1009b0c42cd4Sbzzz77 dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, 1010b0c42cd4Sbzzz77 const void *buf, dmu_tx_t *tx) 1011b0c42cd4Sbzzz77 { 1012b0c42cd4Sbzzz77 dmu_buf_t **dbp; 1013b0c42cd4Sbzzz77 int numbufs; 1014b0c42cd4Sbzzz77 1015b0c42cd4Sbzzz77 if (size == 0) 1016b0c42cd4Sbzzz77 return; 1017b0c42cd4Sbzzz77 1018b0c42cd4Sbzzz77 VERIFY0(dmu_buf_hold_array_by_dnode(dn, offset, size, 1019b0c42cd4Sbzzz77 FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH)); 1020b0c42cd4Sbzzz77 dmu_write_impl(dbp, numbufs, offset, size, buf, tx); 1021ea8dc4b6Seschrock dmu_buf_rele_array(dbp, numbufs, FTAG); 1022fa9e4066Sahrens } 1023fa9e4066Sahrens 10245cabbc6bSPrashanth Sreenivasa static int 10255cabbc6bSPrashanth Sreenivasa dmu_object_remap_one_indirect(objset_t *os, dnode_t *dn, 10265cabbc6bSPrashanth Sreenivasa uint64_t last_removal_txg, uint64_t offset) 10275cabbc6bSPrashanth Sreenivasa { 10285cabbc6bSPrashanth Sreenivasa uint64_t l1blkid = dbuf_whichblock(dn, 1, offset); 10295cabbc6bSPrashanth Sreenivasa int err = 0; 10305cabbc6bSPrashanth Sreenivasa 10315cabbc6bSPrashanth Sreenivasa rw_enter(&dn->dn_struct_rwlock, RW_READER); 10325cabbc6bSPrashanth Sreenivasa dmu_buf_impl_t *dbuf = dbuf_hold_level(dn, 1, l1blkid, FTAG); 10335cabbc6bSPrashanth Sreenivasa ASSERT3P(dbuf, !=, NULL); 10345cabbc6bSPrashanth Sreenivasa 10355cabbc6bSPrashanth Sreenivasa /* 10365cabbc6bSPrashanth Sreenivasa * If the block hasn't been written yet, this default will ensure 10375cabbc6bSPrashanth Sreenivasa * we don't try to remap it. 10385cabbc6bSPrashanth Sreenivasa */ 10395cabbc6bSPrashanth Sreenivasa uint64_t birth = UINT64_MAX; 10405cabbc6bSPrashanth Sreenivasa ASSERT3U(last_removal_txg, !=, UINT64_MAX); 10415cabbc6bSPrashanth Sreenivasa if (dbuf->db_blkptr != NULL) 10425cabbc6bSPrashanth Sreenivasa birth = dbuf->db_blkptr->blk_birth; 10435cabbc6bSPrashanth Sreenivasa rw_exit(&dn->dn_struct_rwlock); 10445cabbc6bSPrashanth Sreenivasa 10455cabbc6bSPrashanth Sreenivasa /* 10465cabbc6bSPrashanth Sreenivasa * If this L1 was already written after the last removal, then we've 10475cabbc6bSPrashanth Sreenivasa * already tried to remap it. 10485cabbc6bSPrashanth Sreenivasa */ 10495cabbc6bSPrashanth Sreenivasa if (birth <= last_removal_txg && 10505cabbc6bSPrashanth Sreenivasa dbuf_read(dbuf, NULL, DB_RF_MUST_SUCCEED) == 0 && 10515cabbc6bSPrashanth Sreenivasa dbuf_can_remap(dbuf)) { 10525cabbc6bSPrashanth Sreenivasa dmu_tx_t *tx = dmu_tx_create(os); 10535cabbc6bSPrashanth Sreenivasa dmu_tx_hold_remap_l1indirect(tx, dn->dn_object); 10545cabbc6bSPrashanth Sreenivasa err = dmu_tx_assign(tx, TXG_WAIT); 10555cabbc6bSPrashanth Sreenivasa if (err == 0) { 10565cabbc6bSPrashanth Sreenivasa (void) dbuf_dirty(dbuf, tx); 10575cabbc6bSPrashanth Sreenivasa dmu_tx_commit(tx); 10585cabbc6bSPrashanth Sreenivasa } else { 10595cabbc6bSPrashanth Sreenivasa dmu_tx_abort(tx); 10605cabbc6bSPrashanth Sreenivasa } 10615cabbc6bSPrashanth Sreenivasa } 10625cabbc6bSPrashanth Sreenivasa 10635cabbc6bSPrashanth Sreenivasa dbuf_rele(dbuf, FTAG); 10645cabbc6bSPrashanth Sreenivasa 10655cabbc6bSPrashanth Sreenivasa delay(zfs_object_remap_one_indirect_delay_ticks); 10665cabbc6bSPrashanth Sreenivasa 10675cabbc6bSPrashanth Sreenivasa return (err); 10685cabbc6bSPrashanth Sreenivasa } 10695cabbc6bSPrashanth Sreenivasa 10705cabbc6bSPrashanth Sreenivasa /* 10715cabbc6bSPrashanth Sreenivasa * Remap all blockpointers in the object, if possible, so that they reference 10725cabbc6bSPrashanth Sreenivasa * only concrete vdevs. 10735cabbc6bSPrashanth Sreenivasa * 10745cabbc6bSPrashanth Sreenivasa * To do this, iterate over the L0 blockpointers and remap any that reference 10755cabbc6bSPrashanth Sreenivasa * an indirect vdev. Note that we only examine L0 blockpointers; since we 10765cabbc6bSPrashanth Sreenivasa * cannot guarantee that we can remap all blockpointer anyways (due to split 10775cabbc6bSPrashanth Sreenivasa * blocks), we do not want to make the code unnecessarily complicated to 10785cabbc6bSPrashanth Sreenivasa * catch the unlikely case that there is an L1 block on an indirect vdev that 10795cabbc6bSPrashanth Sreenivasa * contains no indirect blockpointers. 10805cabbc6bSPrashanth Sreenivasa */ 10815cabbc6bSPrashanth Sreenivasa int 10825cabbc6bSPrashanth Sreenivasa dmu_object_remap_indirects(objset_t *os, uint64_t object, 10835cabbc6bSPrashanth Sreenivasa uint64_t last_removal_txg) 10845cabbc6bSPrashanth Sreenivasa { 10855cabbc6bSPrashanth Sreenivasa uint64_t offset, l1span; 10865cabbc6bSPrashanth Sreenivasa int err; 10875cabbc6bSPrashanth Sreenivasa dnode_t *dn; 10885cabbc6bSPrashanth Sreenivasa 10895cabbc6bSPrashanth Sreenivasa err = dnode_hold(os, object, FTAG, &dn); 10905cabbc6bSPrashanth Sreenivasa if (err != 0) { 10915cabbc6bSPrashanth Sreenivasa return (err); 10925cabbc6bSPrashanth Sreenivasa } 10935cabbc6bSPrashanth Sreenivasa 10945cabbc6bSPrashanth Sreenivasa if (dn->dn_nlevels <= 1) { 10955cabbc6bSPrashanth Sreenivasa if (issig(JUSTLOOKING) && issig(FORREAL)) { 10965cabbc6bSPrashanth Sreenivasa err = SET_ERROR(EINTR); 10975cabbc6bSPrashanth Sreenivasa } 10985cabbc6bSPrashanth Sreenivasa 10995cabbc6bSPrashanth Sreenivasa /* 11005cabbc6bSPrashanth Sreenivasa * If the dnode has no indirect blocks, we cannot dirty them. 11015cabbc6bSPrashanth Sreenivasa * We still want to remap the blkptr(s) in the dnode if 11025cabbc6bSPrashanth Sreenivasa * appropriate, so mark it as dirty. 11035cabbc6bSPrashanth Sreenivasa */ 11045cabbc6bSPrashanth Sreenivasa if (err == 0 && dnode_needs_remap(dn)) { 11055cabbc6bSPrashanth Sreenivasa dmu_tx_t *tx = dmu_tx_create(os); 11065cabbc6bSPrashanth Sreenivasa dmu_tx_hold_bonus(tx, dn->dn_object); 11075cabbc6bSPrashanth Sreenivasa if ((err = dmu_tx_assign(tx, TXG_WAIT)) == 0) { 11085cabbc6bSPrashanth Sreenivasa dnode_setdirty(dn, tx); 11095cabbc6bSPrashanth Sreenivasa dmu_tx_commit(tx); 11105cabbc6bSPrashanth Sreenivasa } else { 11115cabbc6bSPrashanth Sreenivasa dmu_tx_abort(tx); 11125cabbc6bSPrashanth Sreenivasa } 11135cabbc6bSPrashanth Sreenivasa } 11145cabbc6bSPrashanth Sreenivasa 11155cabbc6bSPrashanth Sreenivasa dnode_rele(dn, FTAG); 11165cabbc6bSPrashanth Sreenivasa return (err); 11175cabbc6bSPrashanth Sreenivasa } 11185cabbc6bSPrashanth Sreenivasa 11195cabbc6bSPrashanth Sreenivasa offset = 0; 11205cabbc6bSPrashanth Sreenivasa l1span = 1ULL << (dn->dn_indblkshift - SPA_BLKPTRSHIFT + 11215cabbc6bSPrashanth Sreenivasa dn->dn_datablkshift); 11225cabbc6bSPrashanth Sreenivasa /* 11235cabbc6bSPrashanth Sreenivasa * Find the next L1 indirect that is not a hole. 11245cabbc6bSPrashanth Sreenivasa */ 11255cabbc6bSPrashanth Sreenivasa while (dnode_next_offset(dn, 0, &offset, 2, 1, 0) == 0) { 11265cabbc6bSPrashanth Sreenivasa if (issig(JUSTLOOKING) && issig(FORREAL)) { 11275cabbc6bSPrashanth Sreenivasa err = SET_ERROR(EINTR); 11285cabbc6bSPrashanth Sreenivasa break; 11295cabbc6bSPrashanth Sreenivasa } 11305cabbc6bSPrashanth Sreenivasa if ((err = dmu_object_remap_one_indirect(os, dn, 11315cabbc6bSPrashanth Sreenivasa last_removal_txg, offset)) != 0) { 11325cabbc6bSPrashanth Sreenivasa break; 11335cabbc6bSPrashanth Sreenivasa } 11345cabbc6bSPrashanth Sreenivasa offset += l1span; 11355cabbc6bSPrashanth Sreenivasa } 11365cabbc6bSPrashanth Sreenivasa 11375cabbc6bSPrashanth Sreenivasa dnode_rele(dn, FTAG); 11385cabbc6bSPrashanth Sreenivasa return (err); 11395cabbc6bSPrashanth Sreenivasa } 11405cabbc6bSPrashanth Sreenivasa 114182c9918fSTim Haley void 114282c9918fSTim Haley dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 114382c9918fSTim Haley dmu_tx_t *tx) 114482c9918fSTim Haley { 114582c9918fSTim Haley dmu_buf_t **dbp; 114682c9918fSTim Haley int numbufs, i; 114782c9918fSTim Haley 114882c9918fSTim Haley if (size == 0) 114982c9918fSTim Haley return; 115082c9918fSTim Haley 115182c9918fSTim Haley VERIFY(0 == dmu_buf_hold_array(os, object, offset, size, 115282c9918fSTim Haley FALSE, FTAG, &numbufs, &dbp)); 115382c9918fSTim Haley 115482c9918fSTim Haley for (i = 0; i < numbufs; i++) { 115582c9918fSTim Haley dmu_buf_t *db = dbp[i]; 115682c9918fSTim Haley 115782c9918fSTim Haley dmu_buf_will_not_fill(db, tx); 115882c9918fSTim Haley } 115982c9918fSTim Haley dmu_buf_rele_array(dbp, numbufs, FTAG); 116082c9918fSTim Haley } 116182c9918fSTim Haley 11625d7b4d43SMatthew Ahrens void 11635d7b4d43SMatthew Ahrens dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset, 11645d7b4d43SMatthew Ahrens void *data, uint8_t etype, uint8_t comp, int uncompressed_size, 11655d7b4d43SMatthew Ahrens int compressed_size, int byteorder, dmu_tx_t *tx) 11665d7b4d43SMatthew Ahrens { 11675d7b4d43SMatthew Ahrens dmu_buf_t *db; 11685d7b4d43SMatthew Ahrens 11695d7b4d43SMatthew Ahrens ASSERT3U(etype, <, NUM_BP_EMBEDDED_TYPES); 11705d7b4d43SMatthew Ahrens ASSERT3U(comp, <, ZIO_COMPRESS_FUNCTIONS); 11715d7b4d43SMatthew Ahrens VERIFY0(dmu_buf_hold_noread(os, object, offset, 11725d7b4d43SMatthew Ahrens FTAG, &db)); 11735d7b4d43SMatthew Ahrens 11745d7b4d43SMatthew Ahrens dmu_buf_write_embedded(db, 11755d7b4d43SMatthew Ahrens data, (bp_embedded_type_t)etype, (enum zio_compress)comp, 11765d7b4d43SMatthew Ahrens uncompressed_size, compressed_size, byteorder, tx); 11775d7b4d43SMatthew Ahrens 11785d7b4d43SMatthew Ahrens dmu_buf_rele(db, FTAG); 11795d7b4d43SMatthew Ahrens } 11805d7b4d43SMatthew Ahrens 1181c242f9a0Schunli zhang - Sun Microsystems - Irvine United States /* 1182c242f9a0Schunli zhang - Sun Microsystems - Irvine United States * DMU support for xuio 1183c242f9a0Schunli zhang - Sun Microsystems - Irvine United States */ 1184c242f9a0Schunli zhang - Sun Microsystems - Irvine United States kstat_t *xuio_ksp = NULL; 1185c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1186c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int 1187c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_init(xuio_t *xuio, int nblk) 1188c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1189c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_t *priv; 1190c242f9a0Schunli zhang - Sun Microsystems - Irvine United States uio_t *uio = &xuio->xu_uio; 1191c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1192c242f9a0Schunli zhang - Sun Microsystems - Irvine United States uio->uio_iovcnt = nblk; 1193c242f9a0Schunli zhang - Sun Microsystems - Irvine United States uio->uio_iov = kmem_zalloc(nblk * sizeof (iovec_t), KM_SLEEP); 1194c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1195c242f9a0Schunli zhang - Sun Microsystems - Irvine United States priv = kmem_zalloc(sizeof (dmu_xuio_t), KM_SLEEP); 1196c242f9a0Schunli zhang - Sun Microsystems - Irvine United States priv->cnt = nblk; 1197c242f9a0Schunli zhang - Sun Microsystems - Irvine United States priv->bufs = kmem_zalloc(nblk * sizeof (arc_buf_t *), KM_SLEEP); 1198c242f9a0Schunli zhang - Sun Microsystems - Irvine United States priv->iovp = uio->uio_iov; 1199c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIO_XUZC_PRIV(xuio) = priv; 1200c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1201c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (XUIO_XUZC_RW(xuio) == UIO_READ) 1202c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_INCR(xuiostat_onloan_rbuf, nblk); 1203c242f9a0Schunli zhang - Sun Microsystems - Irvine United States else 1204c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_INCR(xuiostat_onloan_wbuf, nblk); 1205c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1206c242f9a0Schunli zhang - Sun Microsystems - Irvine United States return (0); 1207c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1208c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1209c242f9a0Schunli zhang - Sun Microsystems - Irvine United States void 1210c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_fini(xuio_t *xuio) 1211c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1212c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 1213c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int nblk = priv->cnt; 1214c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1215c242f9a0Schunli zhang - Sun Microsystems - Irvine United States kmem_free(priv->iovp, nblk * sizeof (iovec_t)); 1216c242f9a0Schunli zhang - Sun Microsystems - Irvine United States kmem_free(priv->bufs, nblk * sizeof (arc_buf_t *)); 1217c242f9a0Schunli zhang - Sun Microsystems - Irvine United States kmem_free(priv, sizeof (dmu_xuio_t)); 1218c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1219c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (XUIO_XUZC_RW(xuio) == UIO_READ) 1220c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_INCR(xuiostat_onloan_rbuf, -nblk); 1221c242f9a0Schunli zhang - Sun Microsystems - Irvine United States else 1222c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_INCR(xuiostat_onloan_wbuf, -nblk); 1223c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1224c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1225c242f9a0Schunli zhang - Sun Microsystems - Irvine United States /* 1226c242f9a0Schunli zhang - Sun Microsystems - Irvine United States * Initialize iov[priv->next] and priv->bufs[priv->next] with { off, n, abuf } 1227c242f9a0Schunli zhang - Sun Microsystems - Irvine United States * and increase priv->next by 1. 1228c242f9a0Schunli zhang - Sun Microsystems - Irvine United States */ 1229c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int 1230c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_add(xuio_t *xuio, arc_buf_t *abuf, offset_t off, size_t n) 1231c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1232c242f9a0Schunli zhang - Sun Microsystems - Irvine United States struct iovec *iov; 1233c242f9a0Schunli zhang - Sun Microsystems - Irvine United States uio_t *uio = &xuio->xu_uio; 1234c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 1235c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int i = priv->next++; 1236c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1237c242f9a0Schunli zhang - Sun Microsystems - Irvine United States ASSERT(i < priv->cnt); 12385602294fSDan Kimmel ASSERT(off + n <= arc_buf_lsize(abuf)); 1239c242f9a0Schunli zhang - Sun Microsystems - Irvine United States iov = uio->uio_iov + i; 1240c242f9a0Schunli zhang - Sun Microsystems - Irvine United States iov->iov_base = (char *)abuf->b_data + off; 1241c242f9a0Schunli zhang - Sun Microsystems - Irvine United States iov->iov_len = n; 1242c242f9a0Schunli zhang - Sun Microsystems - Irvine United States priv->bufs[i] = abuf; 1243c242f9a0Schunli zhang - Sun Microsystems - Irvine United States return (0); 1244c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1245c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1246c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int 1247c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_cnt(xuio_t *xuio) 1248c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1249c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 1250c242f9a0Schunli zhang - Sun Microsystems - Irvine United States return (priv->cnt); 1251c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1252c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1253c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_buf_t * 1254c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_arcbuf(xuio_t *xuio, int i) 1255c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1256c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 1257c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1258c242f9a0Schunli zhang - Sun Microsystems - Irvine United States ASSERT(i < priv->cnt); 1259c242f9a0Schunli zhang - Sun Microsystems - Irvine United States return (priv->bufs[i]); 1260c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1261c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1262c242f9a0Schunli zhang - Sun Microsystems - Irvine United States void 1263c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_clear(xuio_t *xuio, int i) 1264c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1265c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 1266c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1267c242f9a0Schunli zhang - Sun Microsystems - Irvine United States ASSERT(i < priv->cnt); 1268c242f9a0Schunli zhang - Sun Microsystems - Irvine United States priv->bufs[i] = NULL; 1269c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1270c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1271c242f9a0Schunli zhang - Sun Microsystems - Irvine United States static void 1272c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_stat_init(void) 1273c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1274c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_ksp = kstat_create("zfs", 0, "xuio_stats", "misc", 1275c242f9a0Schunli zhang - Sun Microsystems - Irvine United States KSTAT_TYPE_NAMED, sizeof (xuio_stats) / sizeof (kstat_named_t), 1276c242f9a0Schunli zhang - Sun Microsystems - Irvine United States KSTAT_FLAG_VIRTUAL); 1277c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (xuio_ksp != NULL) { 1278c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_ksp->ks_data = &xuio_stats; 1279c242f9a0Schunli zhang - Sun Microsystems - Irvine United States kstat_install(xuio_ksp); 1280c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1281c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1282c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1283c242f9a0Schunli zhang - Sun Microsystems - Irvine United States static void 1284c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_stat_fini(void) 1285c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1286c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (xuio_ksp != NULL) { 1287c242f9a0Schunli zhang - Sun Microsystems - Irvine United States kstat_delete(xuio_ksp); 1288c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_ksp = NULL; 1289c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1290c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1291c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1292c242f9a0Schunli zhang - Sun Microsystems - Irvine United States void 129399aa8b55SPrashanth Sreenivasa xuio_stat_wbuf_copied(void) 1294c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1295c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_BUMP(xuiostat_wbuf_copied); 1296c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1297c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1298c242f9a0Schunli zhang - Sun Microsystems - Irvine United States void 129999aa8b55SPrashanth Sreenivasa xuio_stat_wbuf_nocopy(void) 1300c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1301c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_BUMP(xuiostat_wbuf_nocopy); 1302c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1303c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1304fa9e4066Sahrens #ifdef _KERNEL 1305*8dfe5547SRichard Yao int 1306f8554bb9SMatthew Ahrens dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size) 1307feb08c6bSbillm { 1308feb08c6bSbillm dmu_buf_t **dbp; 1309feb08c6bSbillm int numbufs, i, err; 1310c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_t *xuio = NULL; 1311feb08c6bSbillm 1312feb08c6bSbillm /* 1313feb08c6bSbillm * NB: we could do this block-at-a-time, but it's nice 1314feb08c6bSbillm * to be reading in parallel. 1315feb08c6bSbillm */ 1316f8554bb9SMatthew Ahrens err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size, 1317f8554bb9SMatthew Ahrens TRUE, FTAG, &numbufs, &dbp, 0); 1318feb08c6bSbillm if (err) 1319feb08c6bSbillm return (err); 1320feb08c6bSbillm 1321c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (uio->uio_extflg == UIO_XUIO) 1322c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio = (xuio_t *)uio; 1323c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1324feb08c6bSbillm for (i = 0; i < numbufs; i++) { 1325feb08c6bSbillm int tocpy; 1326feb08c6bSbillm int bufoff; 1327feb08c6bSbillm dmu_buf_t *db = dbp[i]; 1328feb08c6bSbillm 1329feb08c6bSbillm ASSERT(size > 0); 1330feb08c6bSbillm 1331feb08c6bSbillm bufoff = uio->uio_loffset - db->db_offset; 1332feb08c6bSbillm tocpy = (int)MIN(db->db_size - bufoff, size); 1333feb08c6bSbillm 1334c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (xuio) { 1335c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 1336c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_buf_t *dbuf_abuf = dbi->db_buf; 1337c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_buf_t *abuf = dbuf_loan_arcbuf(dbi); 1338c242f9a0Schunli zhang - Sun Microsystems - Irvine United States err = dmu_xuio_add(xuio, abuf, bufoff, tocpy); 1339c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (!err) { 1340c242f9a0Schunli zhang - Sun Microsystems - Irvine United States uio->uio_resid -= tocpy; 1341c242f9a0Schunli zhang - Sun Microsystems - Irvine United States uio->uio_loffset += tocpy; 1342c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1343c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1344c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (abuf == dbuf_abuf) 1345c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_BUMP(xuiostat_rbuf_nocopy); 1346c242f9a0Schunli zhang - Sun Microsystems - Irvine United States else 1347c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_BUMP(xuiostat_rbuf_copied); 1348c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } else { 1349feb08c6bSbillm err = uiomove((char *)db->db_data + bufoff, tocpy, 1350feb08c6bSbillm UIO_READ, uio); 1351c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1352feb08c6bSbillm if (err) 1353feb08c6bSbillm break; 1354feb08c6bSbillm 1355feb08c6bSbillm size -= tocpy; 1356feb08c6bSbillm } 1357feb08c6bSbillm dmu_buf_rele_array(dbp, numbufs, FTAG); 1358feb08c6bSbillm 1359feb08c6bSbillm return (err); 1360feb08c6bSbillm } 1361feb08c6bSbillm 1362f8554bb9SMatthew Ahrens /* 1363f8554bb9SMatthew Ahrens * Read 'size' bytes into the uio buffer. 1364f8554bb9SMatthew Ahrens * From object zdb->db_object. 1365f8554bb9SMatthew Ahrens * Starting at offset uio->uio_loffset. 1366f8554bb9SMatthew Ahrens * 1367f8554bb9SMatthew Ahrens * If the caller already has a dbuf in the target object 1368f8554bb9SMatthew Ahrens * (e.g. its bonus buffer), this routine is faster than dmu_read_uio(), 1369f8554bb9SMatthew Ahrens * because we don't have to find the dnode_t for the object. 1370f8554bb9SMatthew Ahrens */ 1371f8554bb9SMatthew Ahrens int 1372f8554bb9SMatthew Ahrens dmu_read_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size) 1373f8554bb9SMatthew Ahrens { 1374f8554bb9SMatthew Ahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb; 1375f8554bb9SMatthew Ahrens dnode_t *dn; 1376f8554bb9SMatthew Ahrens int err; 1377f8554bb9SMatthew Ahrens 1378f8554bb9SMatthew Ahrens if (size == 0) 1379f8554bb9SMatthew Ahrens return (0); 1380f8554bb9SMatthew Ahrens 1381f8554bb9SMatthew Ahrens DB_DNODE_ENTER(db); 1382f8554bb9SMatthew Ahrens dn = DB_DNODE(db); 1383f8554bb9SMatthew Ahrens err = dmu_read_uio_dnode(dn, uio, size); 1384f8554bb9SMatthew Ahrens DB_DNODE_EXIT(db); 1385f8554bb9SMatthew Ahrens 1386f8554bb9SMatthew Ahrens return (err); 1387f8554bb9SMatthew Ahrens } 1388f8554bb9SMatthew Ahrens 1389f8554bb9SMatthew Ahrens /* 1390f8554bb9SMatthew Ahrens * Read 'size' bytes into the uio buffer. 1391f8554bb9SMatthew Ahrens * From the specified object 1392f8554bb9SMatthew Ahrens * Starting at offset uio->uio_loffset. 1393f8554bb9SMatthew Ahrens */ 1394f8554bb9SMatthew Ahrens int 1395f8554bb9SMatthew Ahrens dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size) 1396f8554bb9SMatthew Ahrens { 1397f8554bb9SMatthew Ahrens dnode_t *dn; 1398f8554bb9SMatthew Ahrens int err; 1399f8554bb9SMatthew Ahrens 1400f8554bb9SMatthew Ahrens if (size == 0) 1401f8554bb9SMatthew Ahrens return (0); 1402f8554bb9SMatthew Ahrens 1403f8554bb9SMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 1404f8554bb9SMatthew Ahrens if (err) 1405f8554bb9SMatthew Ahrens return (err); 1406f8554bb9SMatthew Ahrens 1407f8554bb9SMatthew Ahrens err = dmu_read_uio_dnode(dn, uio, size); 1408f8554bb9SMatthew Ahrens 1409f8554bb9SMatthew Ahrens dnode_rele(dn, FTAG); 1410f8554bb9SMatthew Ahrens 1411f8554bb9SMatthew Ahrens return (err); 1412f8554bb9SMatthew Ahrens } 1413f8554bb9SMatthew Ahrens 1414*8dfe5547SRichard Yao int 141594d1a210STim Haley dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx) 1416fa9e4066Sahrens { 1417fa9e4066Sahrens dmu_buf_t **dbp; 141894d1a210STim Haley int numbufs; 1419fa9e4066Sahrens int err = 0; 142094d1a210STim Haley int i; 1421fa9e4066Sahrens 142294d1a210STim Haley err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size, 142394d1a210STim Haley FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH); 1424ea8dc4b6Seschrock if (err) 1425ea8dc4b6Seschrock return (err); 1426fa9e4066Sahrens 1427fa9e4066Sahrens for (i = 0; i < numbufs; i++) { 1428fa9e4066Sahrens int tocpy; 1429fa9e4066Sahrens int bufoff; 1430fa9e4066Sahrens dmu_buf_t *db = dbp[i]; 1431fa9e4066Sahrens 1432fa9e4066Sahrens ASSERT(size > 0); 1433fa9e4066Sahrens 1434feb08c6bSbillm bufoff = uio->uio_loffset - db->db_offset; 1435fa9e4066Sahrens tocpy = (int)MIN(db->db_size - bufoff, size); 1436fa9e4066Sahrens 1437fa9e4066Sahrens ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 1438fa9e4066Sahrens 1439fa9e4066Sahrens if (tocpy == db->db_size) 1440fa9e4066Sahrens dmu_buf_will_fill(db, tx); 1441fa9e4066Sahrens else 1442fa9e4066Sahrens dmu_buf_will_dirty(db, tx); 1443fa9e4066Sahrens 1444fa9e4066Sahrens /* 1445fa9e4066Sahrens * XXX uiomove could block forever (eg. nfs-backed 1446fa9e4066Sahrens * pages). There needs to be a uiolockdown() function 1447fa9e4066Sahrens * to lock the pages in memory, so that uiomove won't 1448fa9e4066Sahrens * block. 1449fa9e4066Sahrens */ 1450fa9e4066Sahrens err = uiomove((char *)db->db_data + bufoff, tocpy, 1451fa9e4066Sahrens UIO_WRITE, uio); 1452fa9e4066Sahrens 1453fa9e4066Sahrens if (tocpy == db->db_size) 1454fa9e4066Sahrens dmu_buf_fill_done(db, tx); 1455fa9e4066Sahrens 1456fa9e4066Sahrens if (err) 1457fa9e4066Sahrens break; 1458fa9e4066Sahrens 1459fa9e4066Sahrens size -= tocpy; 1460fa9e4066Sahrens } 146194d1a210STim Haley 1462ea8dc4b6Seschrock dmu_buf_rele_array(dbp, numbufs, FTAG); 1463fa9e4066Sahrens return (err); 1464fa9e4066Sahrens } 146544eda4d7Smaybee 1466f8554bb9SMatthew Ahrens /* 1467f8554bb9SMatthew Ahrens * Write 'size' bytes from the uio buffer. 1468f8554bb9SMatthew Ahrens * To object zdb->db_object. 1469f8554bb9SMatthew Ahrens * Starting at offset uio->uio_loffset. 1470f8554bb9SMatthew Ahrens * 1471f8554bb9SMatthew Ahrens * If the caller already has a dbuf in the target object 1472f8554bb9SMatthew Ahrens * (e.g. its bonus buffer), this routine is faster than dmu_write_uio(), 1473f8554bb9SMatthew Ahrens * because we don't have to find the dnode_t for the object. 1474f8554bb9SMatthew Ahrens */ 147544eda4d7Smaybee int 147694d1a210STim Haley dmu_write_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size, 147794d1a210STim Haley dmu_tx_t *tx) 147894d1a210STim Haley { 1479744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb; 1480744947dcSTom Erickson dnode_t *dn; 1481744947dcSTom Erickson int err; 1482744947dcSTom Erickson 148394d1a210STim Haley if (size == 0) 148494d1a210STim Haley return (0); 148594d1a210STim Haley 1486744947dcSTom Erickson DB_DNODE_ENTER(db); 1487744947dcSTom Erickson dn = DB_DNODE(db); 1488744947dcSTom Erickson err = dmu_write_uio_dnode(dn, uio, size, tx); 1489744947dcSTom Erickson DB_DNODE_EXIT(db); 1490744947dcSTom Erickson 1491744947dcSTom Erickson return (err); 149294d1a210STim Haley } 149394d1a210STim Haley 1494f8554bb9SMatthew Ahrens /* 1495f8554bb9SMatthew Ahrens * Write 'size' bytes from the uio buffer. 1496f8554bb9SMatthew Ahrens * To the specified object. 1497f8554bb9SMatthew Ahrens * Starting at offset uio->uio_loffset. 1498f8554bb9SMatthew Ahrens */ 149994d1a210STim Haley int 150094d1a210STim Haley dmu_write_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size, 150194d1a210STim Haley dmu_tx_t *tx) 150294d1a210STim Haley { 150394d1a210STim Haley dnode_t *dn; 150494d1a210STim Haley int err; 150594d1a210STim Haley 150694d1a210STim Haley if (size == 0) 150794d1a210STim Haley return (0); 150894d1a210STim Haley 150994d1a210STim Haley err = dnode_hold(os, object, FTAG, &dn); 151094d1a210STim Haley if (err) 151194d1a210STim Haley return (err); 151294d1a210STim Haley 151394d1a210STim Haley err = dmu_write_uio_dnode(dn, uio, size, tx); 151494d1a210STim Haley 151594d1a210STim Haley dnode_rele(dn, FTAG); 151694d1a210STim Haley 151794d1a210STim Haley return (err); 151894d1a210STim Haley } 151994d1a210STim Haley 152094d1a210STim Haley int 152144eda4d7Smaybee dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 152244eda4d7Smaybee page_t *pp, dmu_tx_t *tx) 152344eda4d7Smaybee { 152444eda4d7Smaybee dmu_buf_t **dbp; 152544eda4d7Smaybee int numbufs, i; 152644eda4d7Smaybee int err; 152744eda4d7Smaybee 152844eda4d7Smaybee if (size == 0) 152944eda4d7Smaybee return (0); 153044eda4d7Smaybee 153144eda4d7Smaybee err = dmu_buf_hold_array(os, object, offset, size, 153244eda4d7Smaybee FALSE, FTAG, &numbufs, &dbp); 153344eda4d7Smaybee if (err) 153444eda4d7Smaybee return (err); 153544eda4d7Smaybee 153644eda4d7Smaybee for (i = 0; i < numbufs; i++) { 153744eda4d7Smaybee int tocpy, copied, thiscpy; 153844eda4d7Smaybee int bufoff; 153944eda4d7Smaybee dmu_buf_t *db = dbp[i]; 154044eda4d7Smaybee caddr_t va; 154144eda4d7Smaybee 154244eda4d7Smaybee ASSERT(size > 0); 154344eda4d7Smaybee ASSERT3U(db->db_size, >=, PAGESIZE); 154444eda4d7Smaybee 154544eda4d7Smaybee bufoff = offset - db->db_offset; 154644eda4d7Smaybee tocpy = (int)MIN(db->db_size - bufoff, size); 154744eda4d7Smaybee 154844eda4d7Smaybee ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 154944eda4d7Smaybee 155044eda4d7Smaybee if (tocpy == db->db_size) 155144eda4d7Smaybee dmu_buf_will_fill(db, tx); 155244eda4d7Smaybee else 155344eda4d7Smaybee dmu_buf_will_dirty(db, tx); 155444eda4d7Smaybee 155544eda4d7Smaybee for (copied = 0; copied < tocpy; copied += PAGESIZE) { 155644eda4d7Smaybee ASSERT3U(pp->p_offset, ==, db->db_offset + bufoff); 155744eda4d7Smaybee thiscpy = MIN(PAGESIZE, tocpy - copied); 15580fab61baSJonathan W Adams va = zfs_map_page(pp, S_READ); 155944eda4d7Smaybee bcopy(va, (char *)db->db_data + bufoff, thiscpy); 15600fab61baSJonathan W Adams zfs_unmap_page(pp, va); 156144eda4d7Smaybee pp = pp->p_next; 156244eda4d7Smaybee bufoff += PAGESIZE; 156344eda4d7Smaybee } 156444eda4d7Smaybee 156544eda4d7Smaybee if (tocpy == db->db_size) 156644eda4d7Smaybee dmu_buf_fill_done(db, tx); 156744eda4d7Smaybee 156844eda4d7Smaybee offset += tocpy; 156944eda4d7Smaybee size -= tocpy; 157044eda4d7Smaybee } 157144eda4d7Smaybee dmu_buf_rele_array(dbp, numbufs, FTAG); 157244eda4d7Smaybee return (err); 157344eda4d7Smaybee } 1574fa9e4066Sahrens #endif 1575fa9e4066Sahrens 15762fdbea25SAleksandr Guzovskiy /* 15772fdbea25SAleksandr Guzovskiy * Allocate a loaned anonymous arc buffer. 15782fdbea25SAleksandr Guzovskiy */ 15792fdbea25SAleksandr Guzovskiy arc_buf_t * 15802fdbea25SAleksandr Guzovskiy dmu_request_arcbuf(dmu_buf_t *handle, int size) 15812fdbea25SAleksandr Guzovskiy { 1582744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle; 15832fdbea25SAleksandr Guzovskiy 15845602294fSDan Kimmel return (arc_loan_buf(db->db_objset->os_spa, B_FALSE, size)); 15852fdbea25SAleksandr Guzovskiy } 15862fdbea25SAleksandr Guzovskiy 15872fdbea25SAleksandr Guzovskiy /* 15882fdbea25SAleksandr Guzovskiy * Free a loaned arc buffer. 15892fdbea25SAleksandr Guzovskiy */ 15902fdbea25SAleksandr Guzovskiy void 15912fdbea25SAleksandr Guzovskiy dmu_return_arcbuf(arc_buf_t *buf) 15922fdbea25SAleksandr Guzovskiy { 15932fdbea25SAleksandr Guzovskiy arc_return_buf(buf, FTAG); 1594dcbf3bd6SGeorge Wilson arc_buf_destroy(buf, FTAG); 15952fdbea25SAleksandr Guzovskiy } 15962fdbea25SAleksandr Guzovskiy 15972fdbea25SAleksandr Guzovskiy /* 15982fdbea25SAleksandr Guzovskiy * When possible directly assign passed loaned arc buffer to a dbuf. 15992fdbea25SAleksandr Guzovskiy * If this is not possible copy the contents of passed arc buf via 16002fdbea25SAleksandr Guzovskiy * dmu_write(). 16012fdbea25SAleksandr Guzovskiy */ 16022fdbea25SAleksandr Guzovskiy void 1603*8dfe5547SRichard Yao dmu_assign_arcbuf_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf, 16042fdbea25SAleksandr Guzovskiy dmu_tx_t *tx) 16052fdbea25SAleksandr Guzovskiy { 16062fdbea25SAleksandr Guzovskiy dmu_buf_impl_t *db; 16075602294fSDan Kimmel uint32_t blksz = (uint32_t)arc_buf_lsize(buf); 16082fdbea25SAleksandr Guzovskiy uint64_t blkid; 16092fdbea25SAleksandr Guzovskiy 16102fdbea25SAleksandr Guzovskiy rw_enter(&dn->dn_struct_rwlock, RW_READER); 1611a2cdcdd2SPaul Dagnelie blkid = dbuf_whichblock(dn, 0, offset); 16122fdbea25SAleksandr Guzovskiy VERIFY((db = dbuf_hold(dn, blkid, FTAG)) != NULL); 16132fdbea25SAleksandr Guzovskiy rw_exit(&dn->dn_struct_rwlock); 16142fdbea25SAleksandr Guzovskiy 16158a904709SMatthew Ahrens /* 16168a904709SMatthew Ahrens * We can only assign if the offset is aligned, the arc buf is the 16175602294fSDan Kimmel * same size as the dbuf, and the dbuf is not metadata. 16188a904709SMatthew Ahrens */ 16195602294fSDan Kimmel if (offset == db->db.db_offset && blksz == db->db.db_size) { 16202fdbea25SAleksandr Guzovskiy dbuf_assign_arcbuf(db, buf, tx); 16212fdbea25SAleksandr Guzovskiy dbuf_rele(db, FTAG); 16222fdbea25SAleksandr Guzovskiy } else { 1623744947dcSTom Erickson objset_t *os; 1624744947dcSTom Erickson uint64_t object; 1625744947dcSTom Erickson 16265602294fSDan Kimmel /* compressed bufs must always be assignable to their dbuf */ 16275602294fSDan Kimmel ASSERT3U(arc_get_compression(buf), ==, ZIO_COMPRESS_OFF); 16285602294fSDan Kimmel ASSERT(!(buf->b_flags & ARC_BUF_FLAG_COMPRESSED)); 16295602294fSDan Kimmel 1630744947dcSTom Erickson os = dn->dn_objset; 1631744947dcSTom Erickson object = dn->dn_object; 1632744947dcSTom Erickson 16332fdbea25SAleksandr Guzovskiy dbuf_rele(db, FTAG); 1634744947dcSTom Erickson dmu_write(os, object, offset, blksz, buf->b_data, tx); 16352fdbea25SAleksandr Guzovskiy dmu_return_arcbuf(buf); 1636c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_BUMP(xuiostat_wbuf_copied); 16372fdbea25SAleksandr Guzovskiy } 16382fdbea25SAleksandr Guzovskiy } 16392fdbea25SAleksandr Guzovskiy 1640*8dfe5547SRichard Yao void 1641*8dfe5547SRichard Yao dmu_assign_arcbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf, 1642*8dfe5547SRichard Yao dmu_tx_t *tx) 1643*8dfe5547SRichard Yao { 1644*8dfe5547SRichard Yao dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle; 1645*8dfe5547SRichard Yao 1646*8dfe5547SRichard Yao DB_DNODE_ENTER(dbuf); 1647*8dfe5547SRichard Yao dmu_assign_arcbuf_dnode(DB_DNODE(dbuf), offset, buf, tx); 1648*8dfe5547SRichard Yao DB_DNODE_EXIT(dbuf); 1649*8dfe5547SRichard Yao } 1650*8dfe5547SRichard Yao 1651c5c6ffa0Smaybee typedef struct { 1652b24ab676SJeff Bonwick dbuf_dirty_record_t *dsa_dr; 1653b24ab676SJeff Bonwick dmu_sync_cb_t *dsa_done; 1654b24ab676SJeff Bonwick zgd_t *dsa_zgd; 1655b24ab676SJeff Bonwick dmu_tx_t *dsa_tx; 1656c717a561Smaybee } dmu_sync_arg_t; 1657c5c6ffa0Smaybee 1658c5c6ffa0Smaybee /* ARGSUSED */ 1659c5c6ffa0Smaybee static void 1660e14bb325SJeff Bonwick dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg) 1661e14bb325SJeff Bonwick { 1662b24ab676SJeff Bonwick dmu_sync_arg_t *dsa = varg; 1663b24ab676SJeff Bonwick dmu_buf_t *db = dsa->dsa_zgd->zgd_db; 1664e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 1665975c32a0SNeil Perrin 1666b24ab676SJeff Bonwick if (zio->io_error == 0) { 1667b24ab676SJeff Bonwick if (BP_IS_HOLE(bp)) { 1668b24ab676SJeff Bonwick /* 1669b24ab676SJeff Bonwick * A block of zeros may compress to a hole, but the 1670b24ab676SJeff Bonwick * block size still needs to be known for replay. 1671b24ab676SJeff Bonwick */ 1672b24ab676SJeff Bonwick BP_SET_LSIZE(bp, db->db_size); 16735d7b4d43SMatthew Ahrens } else if (!BP_IS_EMBEDDED(bp)) { 1674e14bb325SJeff Bonwick ASSERT(BP_GET_LEVEL(bp) == 0); 1675e14bb325SJeff Bonwick bp->blk_fill = 1; 1676e14bb325SJeff Bonwick } 1677e14bb325SJeff Bonwick } 1678b24ab676SJeff Bonwick } 1679b24ab676SJeff Bonwick 1680b24ab676SJeff Bonwick static void 1681b24ab676SJeff Bonwick dmu_sync_late_arrival_ready(zio_t *zio) 1682b24ab676SJeff Bonwick { 1683b24ab676SJeff Bonwick dmu_sync_ready(zio, NULL, zio->io_private); 1684b24ab676SJeff Bonwick } 1685e14bb325SJeff Bonwick 1686e14bb325SJeff Bonwick /* ARGSUSED */ 1687e14bb325SJeff Bonwick static void 1688c5c6ffa0Smaybee dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg) 1689c5c6ffa0Smaybee { 1690b24ab676SJeff Bonwick dmu_sync_arg_t *dsa = varg; 1691b24ab676SJeff Bonwick dbuf_dirty_record_t *dr = dsa->dsa_dr; 1692c717a561Smaybee dmu_buf_impl_t *db = dr->dr_dbuf; 1693c5c6ffa0Smaybee 1694b50a0fe0SNeil Perrin mutex_enter(&db->db_mtx); 1695b50a0fe0SNeil Perrin ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC); 1696b24ab676SJeff Bonwick if (zio->io_error == 0) { 169780901aeaSGeorge Wilson dr->dt.dl.dr_nopwrite = !!(zio->io_flags & ZIO_FLAG_NOPWRITE); 169880901aeaSGeorge Wilson if (dr->dt.dl.dr_nopwrite) { 169980901aeaSGeorge Wilson blkptr_t *bp = zio->io_bp; 170080901aeaSGeorge Wilson blkptr_t *bp_orig = &zio->io_bp_orig; 170180901aeaSGeorge Wilson uint8_t chksum = BP_GET_CHECKSUM(bp_orig); 170280901aeaSGeorge Wilson 170380901aeaSGeorge Wilson ASSERT(BP_EQUAL(bp, bp_orig)); 1704b7edcb94SMatthew Ahrens VERIFY(BP_EQUAL(bp, db->db_blkptr)); 170580901aeaSGeorge Wilson ASSERT(zio->io_prop.zp_compress != ZIO_COMPRESS_OFF); 170645818ee1SMatthew Ahrens ASSERT(zio_checksum_table[chksum].ci_flags & 170745818ee1SMatthew Ahrens ZCHECKSUM_FLAG_NOPWRITE); 170880901aeaSGeorge Wilson } 1709b24ab676SJeff Bonwick dr->dt.dl.dr_overridden_by = *zio->io_bp; 1710b24ab676SJeff Bonwick dr->dt.dl.dr_override_state = DR_OVERRIDDEN; 1711b24ab676SJeff Bonwick dr->dt.dl.dr_copies = zio->io_prop.zp_copies; 171270163ac5SPrakash Surya 171370163ac5SPrakash Surya /* 171470163ac5SPrakash Surya * Old style holes are filled with all zeros, whereas 171570163ac5SPrakash Surya * new-style holes maintain their lsize, type, level, 171670163ac5SPrakash Surya * and birth time (see zio_write_compress). While we 171770163ac5SPrakash Surya * need to reset the BP_SET_LSIZE() call that happened 171870163ac5SPrakash Surya * in dmu_sync_ready for old style holes, we do *not* 171970163ac5SPrakash Surya * want to wipe out the information contained in new 172070163ac5SPrakash Surya * style holes. Thus, only zero out the block pointer if 172170163ac5SPrakash Surya * it's an old style hole. 172270163ac5SPrakash Surya */ 172370163ac5SPrakash Surya if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by) && 172470163ac5SPrakash Surya dr->dt.dl.dr_overridden_by.blk_birth == 0) 1725b50a0fe0SNeil Perrin BP_ZERO(&dr->dt.dl.dr_overridden_by); 1726b24ab676SJeff Bonwick } else { 1727b24ab676SJeff Bonwick dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 1728b24ab676SJeff Bonwick } 1729c5c6ffa0Smaybee cv_broadcast(&db->db_changed); 1730b50a0fe0SNeil Perrin mutex_exit(&db->db_mtx); 1731b50a0fe0SNeil Perrin 1732b24ab676SJeff Bonwick dsa->dsa_done(dsa->dsa_zgd, zio->io_error); 1733c717a561Smaybee 1734b24ab676SJeff Bonwick kmem_free(dsa, sizeof (*dsa)); 1735b24ab676SJeff Bonwick } 1736b24ab676SJeff Bonwick 1737b24ab676SJeff Bonwick static void 1738b24ab676SJeff Bonwick dmu_sync_late_arrival_done(zio_t *zio) 1739b24ab676SJeff Bonwick { 1740b24ab676SJeff Bonwick blkptr_t *bp = zio->io_bp; 1741b24ab676SJeff Bonwick dmu_sync_arg_t *dsa = zio->io_private; 174280901aeaSGeorge Wilson blkptr_t *bp_orig = &zio->io_bp_orig; 1743b24ab676SJeff Bonwick 1744b24ab676SJeff Bonwick if (zio->io_error == 0 && !BP_IS_HOLE(bp)) { 1745b7edcb94SMatthew Ahrens ASSERT(!(zio->io_flags & ZIO_FLAG_NOPWRITE)); 174680901aeaSGeorge Wilson ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig)); 1747b24ab676SJeff Bonwick ASSERT(zio->io_bp->blk_birth == zio->io_txg); 1748b24ab676SJeff Bonwick ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa)); 1749b24ab676SJeff Bonwick zio_free(zio->io_spa, zio->io_txg, zio->io_bp); 1750b24ab676SJeff Bonwick } 1751b24ab676SJeff Bonwick 1752b24ab676SJeff Bonwick dmu_tx_commit(dsa->dsa_tx); 1753b24ab676SJeff Bonwick 1754b24ab676SJeff Bonwick dsa->dsa_done(dsa->dsa_zgd, zio->io_error); 1755b24ab676SJeff Bonwick 1756770499e1SDan Kimmel abd_put(zio->io_abd); 1757b24ab676SJeff Bonwick kmem_free(dsa, sizeof (*dsa)); 1758b24ab676SJeff Bonwick } 1759b24ab676SJeff Bonwick 1760b24ab676SJeff Bonwick static int 1761b24ab676SJeff Bonwick dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd, 17627802d7bfSMatthew Ahrens zio_prop_t *zp, zbookmark_phys_t *zb) 1763b24ab676SJeff Bonwick { 1764b24ab676SJeff Bonwick dmu_sync_arg_t *dsa; 1765b24ab676SJeff Bonwick dmu_tx_t *tx; 1766b24ab676SJeff Bonwick 1767b24ab676SJeff Bonwick tx = dmu_tx_create(os); 1768b24ab676SJeff Bonwick dmu_tx_hold_space(tx, zgd->zgd_db->db_size); 17696e1f5caaSNeil Perrin if (dmu_tx_assign(tx, TXG_WAIT) != 0) { 1770b24ab676SJeff Bonwick dmu_tx_abort(tx); 1771be6fd75aSMatthew Ahrens /* Make zl_get_data do txg_waited_synced() */ 1772be6fd75aSMatthew Ahrens return (SET_ERROR(EIO)); 1773b24ab676SJeff Bonwick } 1774b24ab676SJeff Bonwick 17751271e4b1SPrakash Surya /* 17761271e4b1SPrakash Surya * In order to prevent the zgd's lwb from being free'd prior to 17771271e4b1SPrakash Surya * dmu_sync_late_arrival_done() being called, we have to ensure 17781271e4b1SPrakash Surya * the lwb's "max txg" takes this tx's txg into account. 17791271e4b1SPrakash Surya */ 17801271e4b1SPrakash Surya zil_lwb_add_txg(zgd->zgd_lwb, dmu_tx_get_txg(tx)); 17811271e4b1SPrakash Surya 1782b24ab676SJeff Bonwick dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); 1783b24ab676SJeff Bonwick dsa->dsa_dr = NULL; 1784b24ab676SJeff Bonwick dsa->dsa_done = done; 1785b24ab676SJeff Bonwick dsa->dsa_zgd = zgd; 1786b24ab676SJeff Bonwick dsa->dsa_tx = tx; 1787b24ab676SJeff Bonwick 1788b7edcb94SMatthew Ahrens /* 1789b7edcb94SMatthew Ahrens * Since we are currently syncing this txg, it's nontrivial to 1790b7edcb94SMatthew Ahrens * determine what BP to nopwrite against, so we disable nopwrite. 1791b7edcb94SMatthew Ahrens * 1792b7edcb94SMatthew Ahrens * When syncing, the db_blkptr is initially the BP of the previous 1793b7edcb94SMatthew Ahrens * txg. We can not nopwrite against it because it will be changed 1794b7edcb94SMatthew Ahrens * (this is similar to the non-late-arrival case where the dbuf is 1795b7edcb94SMatthew Ahrens * dirty in a future txg). 1796b7edcb94SMatthew Ahrens * 1797b7edcb94SMatthew Ahrens * Then dbuf_write_ready() sets bp_blkptr to the location we will write. 1798b7edcb94SMatthew Ahrens * We can not nopwrite against it because although the BP will not 1799b7edcb94SMatthew Ahrens * (typically) be changed, the data has not yet been persisted to this 1800b7edcb94SMatthew Ahrens * location. 1801b7edcb94SMatthew Ahrens * 1802b7edcb94SMatthew Ahrens * Finally, when dbuf_write_done() is called, it is theoretically 1803b7edcb94SMatthew Ahrens * possible to always nopwrite, because the data that was written in 1804b7edcb94SMatthew Ahrens * this txg is the same data that we are trying to write. However we 1805b7edcb94SMatthew Ahrens * would need to check that this dbuf is not dirty in any future 1806b7edcb94SMatthew Ahrens * txg's (as we do in the normal dmu_sync() path). For simplicity, we 1807b7edcb94SMatthew Ahrens * don't nopwrite in this case. 1808b7edcb94SMatthew Ahrens */ 1809b7edcb94SMatthew Ahrens zp->zp_nopwrite = B_FALSE; 1810b7edcb94SMatthew Ahrens 18115602294fSDan Kimmel zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp, 1812770499e1SDan Kimmel abd_get_from_buf(zgd->zgd_db->db_data, zgd->zgd_db->db_size), 1813770499e1SDan Kimmel zgd->zgd_db->db_size, zgd->zgd_db->db_size, zp, 1814770499e1SDan Kimmel dmu_sync_late_arrival_ready, NULL, NULL, dmu_sync_late_arrival_done, 1815770499e1SDan Kimmel dsa, ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, zb)); 1816b24ab676SJeff Bonwick 1817b24ab676SJeff Bonwick return (0); 1818c5c6ffa0Smaybee } 1819c5c6ffa0Smaybee 1820fa9e4066Sahrens /* 1821c5c6ffa0Smaybee * Intent log support: sync the block associated with db to disk. 1822c5c6ffa0Smaybee * N.B. and XXX: the caller is responsible for making sure that the 1823c5c6ffa0Smaybee * data isn't changing while dmu_sync() is writing it. 1824fa9e4066Sahrens * 1825fa9e4066Sahrens * Return values: 1826fa9e4066Sahrens * 182780901aeaSGeorge Wilson * EEXIST: this txg has already been synced, so there's nothing to do. 1828fa9e4066Sahrens * The caller should not log the write. 1829fa9e4066Sahrens * 1830fa9e4066Sahrens * ENOENT: the block was dbuf_free_range()'d, so there's nothing to do. 1831fa9e4066Sahrens * The caller should not log the write. 1832fa9e4066Sahrens * 1833c5c6ffa0Smaybee * EALREADY: this block is already in the process of being synced. 1834c5c6ffa0Smaybee * The caller should track its progress (somehow). 1835fa9e4066Sahrens * 1836b24ab676SJeff Bonwick * EIO: could not do the I/O. 1837b24ab676SJeff Bonwick * The caller should do a txg_wait_synced(). 1838fa9e4066Sahrens * 1839b24ab676SJeff Bonwick * 0: the I/O has been initiated. 1840b24ab676SJeff Bonwick * The caller should log this blkptr in the done callback. 1841b24ab676SJeff Bonwick * It is possible that the I/O will fail, in which case 1842b24ab676SJeff Bonwick * the error will be reported to the done callback and 1843b24ab676SJeff Bonwick * propagated to pio from zio_done(). 1844fa9e4066Sahrens */ 1845fa9e4066Sahrens int 1846b24ab676SJeff Bonwick dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd) 1847fa9e4066Sahrens { 1848b24ab676SJeff Bonwick dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db; 1849503ad85cSMatthew Ahrens objset_t *os = db->db_objset; 1850b24ab676SJeff Bonwick dsl_dataset_t *ds = os->os_dsl_dataset; 1851c717a561Smaybee dbuf_dirty_record_t *dr; 1852b24ab676SJeff Bonwick dmu_sync_arg_t *dsa; 18537802d7bfSMatthew Ahrens zbookmark_phys_t zb; 1854b24ab676SJeff Bonwick zio_prop_t zp; 1855744947dcSTom Erickson dnode_t *dn; 1856fa9e4066Sahrens 1857b24ab676SJeff Bonwick ASSERT(pio != NULL); 1858fa9e4066Sahrens ASSERT(txg != 0); 1859fa9e4066Sahrens 1860b24ab676SJeff Bonwick SET_BOOKMARK(&zb, ds->ds_object, 1861b24ab676SJeff Bonwick db->db.db_object, db->db_level, db->db_blkid); 1862b24ab676SJeff Bonwick 1863744947dcSTom Erickson DB_DNODE_ENTER(db); 1864744947dcSTom Erickson dn = DB_DNODE(db); 1865adaec86aSMatthew Ahrens dmu_write_policy(os, dn, db->db_level, WP_DMU_SYNC, &zp); 1866744947dcSTom Erickson DB_DNODE_EXIT(db); 1867fa9e4066Sahrens 1868fa9e4066Sahrens /* 1869b24ab676SJeff Bonwick * If we're frozen (running ziltest), we always need to generate a bp. 1870ea8dc4b6Seschrock */ 1871b24ab676SJeff Bonwick if (txg > spa_freeze_txg(os->os_spa)) 1872b24ab676SJeff Bonwick return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb)); 1873ea8dc4b6Seschrock 1874ea8dc4b6Seschrock /* 1875b24ab676SJeff Bonwick * Grabbing db_mtx now provides a barrier between dbuf_sync_leaf() 1876b24ab676SJeff Bonwick * and us. If we determine that this txg is not yet syncing, 1877b24ab676SJeff Bonwick * but it begins to sync a moment later, that's OK because the 1878b24ab676SJeff Bonwick * sync thread will block in dbuf_sync_leaf() until we drop db_mtx. 1879fa9e4066Sahrens */ 1880b24ab676SJeff Bonwick mutex_enter(&db->db_mtx); 1881b24ab676SJeff Bonwick 1882b24ab676SJeff Bonwick if (txg <= spa_last_synced_txg(os->os_spa)) { 1883fa9e4066Sahrens /* 1884b24ab676SJeff Bonwick * This txg has already synced. There's nothing to do. 1885fa9e4066Sahrens */ 1886b24ab676SJeff Bonwick mutex_exit(&db->db_mtx); 1887be6fd75aSMatthew Ahrens return (SET_ERROR(EEXIST)); 1888fa9e4066Sahrens } 1889fa9e4066Sahrens 1890b24ab676SJeff Bonwick if (txg <= spa_syncing_txg(os->os_spa)) { 1891c5c6ffa0Smaybee /* 1892b24ab676SJeff Bonwick * This txg is currently syncing, so we can't mess with 1893b24ab676SJeff Bonwick * the dirty record anymore; just write a new log block. 1894c5c6ffa0Smaybee */ 189513506d1eSmaybee mutex_exit(&db->db_mtx); 1896b24ab676SJeff Bonwick return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb)); 1897c5c6ffa0Smaybee } 1898c5c6ffa0Smaybee 1899c717a561Smaybee dr = db->db_last_dirty; 1900b24ab676SJeff Bonwick while (dr && dr->dr_txg != txg) 1901c717a561Smaybee dr = dr->dr_next; 1902b24ab676SJeff Bonwick 1903b24ab676SJeff Bonwick if (dr == NULL) { 1904c5c6ffa0Smaybee /* 1905b24ab676SJeff Bonwick * There's no dr for this dbuf, so it must have been freed. 1906fa9e4066Sahrens * There's no need to log writes to freed blocks, so we're done. 1907fa9e4066Sahrens */ 1908fa9e4066Sahrens mutex_exit(&db->db_mtx); 1909be6fd75aSMatthew Ahrens return (SET_ERROR(ENOENT)); 1910fa9e4066Sahrens } 1911fa9e4066Sahrens 191280901aeaSGeorge Wilson ASSERT(dr->dr_next == NULL || dr->dr_next->dr_txg < txg); 191380901aeaSGeorge Wilson 1914b7edcb94SMatthew Ahrens if (db->db_blkptr != NULL) { 1915b7edcb94SMatthew Ahrens /* 1916b7edcb94SMatthew Ahrens * We need to fill in zgd_bp with the current blkptr so that 1917b7edcb94SMatthew Ahrens * the nopwrite code can check if we're writing the same 1918b7edcb94SMatthew Ahrens * data that's already on disk. We can only nopwrite if we 1919b7edcb94SMatthew Ahrens * are sure that after making the copy, db_blkptr will not 1920b7edcb94SMatthew Ahrens * change until our i/o completes. We ensure this by 1921b7edcb94SMatthew Ahrens * holding the db_mtx, and only allowing nopwrite if the 1922b7edcb94SMatthew Ahrens * block is not already dirty (see below). This is verified 1923b7edcb94SMatthew Ahrens * by dmu_sync_done(), which VERIFYs that the db_blkptr has 1924b7edcb94SMatthew Ahrens * not changed. 1925b7edcb94SMatthew Ahrens */ 1926b7edcb94SMatthew Ahrens *zgd->zgd_bp = *db->db_blkptr; 1927b7edcb94SMatthew Ahrens } 1928b7edcb94SMatthew Ahrens 192980901aeaSGeorge Wilson /* 193034e8acefSMatthew Ahrens * Assume the on-disk data is X, the current syncing data (in 193134e8acefSMatthew Ahrens * txg - 1) is Y, and the current in-memory data is Z (currently 193234e8acefSMatthew Ahrens * in dmu_sync). 193334e8acefSMatthew Ahrens * 193434e8acefSMatthew Ahrens * We usually want to perform a nopwrite if X and Z are the 193534e8acefSMatthew Ahrens * same. However, if Y is different (i.e. the BP is going to 193634e8acefSMatthew Ahrens * change before this write takes effect), then a nopwrite will 193734e8acefSMatthew Ahrens * be incorrect - we would override with X, which could have 193834e8acefSMatthew Ahrens * been freed when Y was written. 193934e8acefSMatthew Ahrens * 194034e8acefSMatthew Ahrens * (Note that this is not a concern when we are nop-writing from 194134e8acefSMatthew Ahrens * syncing context, because X and Y must be identical, because 194234e8acefSMatthew Ahrens * all previous txgs have been synced.) 194334e8acefSMatthew Ahrens * 194434e8acefSMatthew Ahrens * Therefore, we disable nopwrite if the current BP could change 194534e8acefSMatthew Ahrens * before this TXG. There are two ways it could change: by 194634e8acefSMatthew Ahrens * being dirty (dr_next is non-NULL), or by being freed 194734e8acefSMatthew Ahrens * (dnode_block_freed()). This behavior is verified by 194834e8acefSMatthew Ahrens * zio_done(), which VERIFYs that the override BP is identical 194934e8acefSMatthew Ahrens * to the on-disk BP. 195080901aeaSGeorge Wilson */ 195134e8acefSMatthew Ahrens DB_DNODE_ENTER(db); 195234e8acefSMatthew Ahrens dn = DB_DNODE(db); 195334e8acefSMatthew Ahrens if (dr->dr_next != NULL || dnode_block_freed(dn, db->db_blkid)) 195480901aeaSGeorge Wilson zp.zp_nopwrite = B_FALSE; 195534e8acefSMatthew Ahrens DB_DNODE_EXIT(db); 195680901aeaSGeorge Wilson 1957c717a561Smaybee ASSERT(dr->dr_txg == txg); 1958b24ab676SJeff Bonwick if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC || 1959b24ab676SJeff Bonwick dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 1960c5c6ffa0Smaybee /* 1961b24ab676SJeff Bonwick * We have already issued a sync write for this buffer, 1962b24ab676SJeff Bonwick * or this buffer has already been synced. It could not 1963c717a561Smaybee * have been dirtied since, or we would have cleared the state. 1964c717a561Smaybee */ 1965c717a561Smaybee mutex_exit(&db->db_mtx); 1966be6fd75aSMatthew Ahrens return (SET_ERROR(EALREADY)); 1967c717a561Smaybee } 1968c717a561Smaybee 1969b24ab676SJeff Bonwick ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 1970c717a561Smaybee dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC; 1971fa9e4066Sahrens mutex_exit(&db->db_mtx); 1972fa9e4066Sahrens 1973b24ab676SJeff Bonwick dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); 1974b24ab676SJeff Bonwick dsa->dsa_dr = dr; 1975b24ab676SJeff Bonwick dsa->dsa_done = done; 1976b24ab676SJeff Bonwick dsa->dsa_zgd = zgd; 1977b24ab676SJeff Bonwick dsa->dsa_tx = NULL; 1978e14bb325SJeff Bonwick 1979b24ab676SJeff Bonwick zio_nowait(arc_write(pio, os->os_spa, txg, 1980b7edcb94SMatthew Ahrens zgd->zgd_bp, dr->dt.dl.dr_data, DBUF_IS_L2CACHEABLE(db), 1981dcbf3bd6SGeorge Wilson &zp, dmu_sync_ready, NULL, NULL, dmu_sync_done, dsa, 19828df0bcf0SPaul Dagnelie ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb)); 1983e14bb325SJeff Bonwick 1984b24ab676SJeff Bonwick return (0); 1985fa9e4066Sahrens } 1986fa9e4066Sahrens 1987fa9e4066Sahrens int 1988fa9e4066Sahrens dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs, 1989fa9e4066Sahrens dmu_tx_t *tx) 1990fa9e4066Sahrens { 1991ea8dc4b6Seschrock dnode_t *dn; 1992ea8dc4b6Seschrock int err; 1993ea8dc4b6Seschrock 1994503ad85cSMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 1995ea8dc4b6Seschrock if (err) 1996ea8dc4b6Seschrock return (err); 1997ea8dc4b6Seschrock err = dnode_set_blksz(dn, size, ibs, tx); 1998fa9e4066Sahrens dnode_rele(dn, FTAG); 1999fa9e4066Sahrens return (err); 2000fa9e4066Sahrens } 2001fa9e4066Sahrens 2002fa9e4066Sahrens void 2003fa9e4066Sahrens dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum, 2004fa9e4066Sahrens dmu_tx_t *tx) 2005fa9e4066Sahrens { 2006ea8dc4b6Seschrock dnode_t *dn; 2007ea8dc4b6Seschrock 20085d7b4d43SMatthew Ahrens /* 20095d7b4d43SMatthew Ahrens * Send streams include each object's checksum function. This 20105d7b4d43SMatthew Ahrens * check ensures that the receiving system can understand the 20115d7b4d43SMatthew Ahrens * checksum function transmitted. 20125d7b4d43SMatthew Ahrens */ 20135d7b4d43SMatthew Ahrens ASSERT3U(checksum, <, ZIO_CHECKSUM_LEGACY_FUNCTIONS); 20145d7b4d43SMatthew Ahrens 20155d7b4d43SMatthew Ahrens VERIFY0(dnode_hold(os, object, FTAG, &dn)); 20165d7b4d43SMatthew Ahrens ASSERT3U(checksum, <, ZIO_CHECKSUM_FUNCTIONS); 2017fa9e4066Sahrens dn->dn_checksum = checksum; 2018fa9e4066Sahrens dnode_setdirty(dn, tx); 2019fa9e4066Sahrens dnode_rele(dn, FTAG); 2020fa9e4066Sahrens } 2021fa9e4066Sahrens 2022fa9e4066Sahrens void 2023fa9e4066Sahrens dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress, 2024fa9e4066Sahrens dmu_tx_t *tx) 2025fa9e4066Sahrens { 2026ea8dc4b6Seschrock dnode_t *dn; 2027ea8dc4b6Seschrock 20285d7b4d43SMatthew Ahrens /* 20295d7b4d43SMatthew Ahrens * Send streams include each object's compression function. This 20305d7b4d43SMatthew Ahrens * check ensures that the receiving system can understand the 20315d7b4d43SMatthew Ahrens * compression function transmitted. 20325d7b4d43SMatthew Ahrens */ 20335d7b4d43SMatthew Ahrens ASSERT3U(compress, <, ZIO_COMPRESS_LEGACY_FUNCTIONS); 20345d7b4d43SMatthew Ahrens 20355d7b4d43SMatthew Ahrens VERIFY0(dnode_hold(os, object, FTAG, &dn)); 2036fa9e4066Sahrens dn->dn_compress = compress; 2037fa9e4066Sahrens dnode_setdirty(dn, tx); 2038fa9e4066Sahrens dnode_rele(dn, FTAG); 2039fa9e4066Sahrens } 2040fa9e4066Sahrens 2041b24ab676SJeff Bonwick int zfs_mdcomp_disable = 0; 2042b24ab676SJeff Bonwick 2043edf345e6SMatthew Ahrens /* 2044edf345e6SMatthew Ahrens * When the "redundant_metadata" property is set to "most", only indirect 2045edf345e6SMatthew Ahrens * blocks of this level and higher will have an additional ditto block. 2046edf345e6SMatthew Ahrens */ 2047edf345e6SMatthew Ahrens int zfs_redundant_metadata_most_ditto_level = 2; 2048edf345e6SMatthew Ahrens 2049b24ab676SJeff Bonwick void 2050adaec86aSMatthew Ahrens dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp) 2051b24ab676SJeff Bonwick { 2052b24ab676SJeff Bonwick dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET; 2053ad135b5dSChristopher Siden boolean_t ismd = (level > 0 || DMU_OT_IS_METADATA(type) || 20541d8ccc7bSMark Shellenbaum (wp & WP_SPILL)); 2055b24ab676SJeff Bonwick enum zio_checksum checksum = os->os_checksum; 2056b24ab676SJeff Bonwick enum zio_compress compress = os->os_compress; 2057b24ab676SJeff Bonwick enum zio_checksum dedup_checksum = os->os_dedup_checksum; 20587540df39SGeorge Wilson boolean_t dedup = B_FALSE; 20597540df39SGeorge Wilson boolean_t nopwrite = B_FALSE; 2060b24ab676SJeff Bonwick boolean_t dedup_verify = os->os_dedup_verify; 2061b24ab676SJeff Bonwick int copies = os->os_copies; 2062b24ab676SJeff Bonwick 2063b24ab676SJeff Bonwick /* 206480901aeaSGeorge Wilson * We maintain different write policies for each of the following 206580901aeaSGeorge Wilson * types of data: 206680901aeaSGeorge Wilson * 1. metadata 206780901aeaSGeorge Wilson * 2. preallocated blocks (i.e. level-0 blocks of a dump device) 206880901aeaSGeorge Wilson * 3. all other level 0 blocks 2069b24ab676SJeff Bonwick */ 2070b24ab676SJeff Bonwick if (ismd) { 2071db1741f5SJustin T. Gibbs if (zfs_mdcomp_disable) { 2072db1741f5SJustin T. Gibbs compress = ZIO_COMPRESS_EMPTY; 2073db1741f5SJustin T. Gibbs } else { 2074b24ab676SJeff Bonwick /* 207580901aeaSGeorge Wilson * XXX -- we should design a compression algorithm 207680901aeaSGeorge Wilson * that specializes in arrays of bps. 207780901aeaSGeorge Wilson */ 2078db1741f5SJustin T. Gibbs compress = zio_compress_select(os->os_spa, 2079db1741f5SJustin T. Gibbs ZIO_COMPRESS_ON, ZIO_COMPRESS_ON); 2080b8289d24SDaniil Lunev } 208180901aeaSGeorge Wilson 208280901aeaSGeorge Wilson /* 2083b24ab676SJeff Bonwick * Metadata always gets checksummed. If the data 2084b24ab676SJeff Bonwick * checksum is multi-bit correctable, and it's not a 2085b24ab676SJeff Bonwick * ZBT-style checksum, then it's suitable for metadata 2086b24ab676SJeff Bonwick * as well. Otherwise, the metadata checksum defaults 2087b24ab676SJeff Bonwick * to fletcher4. 2088b24ab676SJeff Bonwick */ 208945818ee1SMatthew Ahrens if (!(zio_checksum_table[checksum].ci_flags & 209045818ee1SMatthew Ahrens ZCHECKSUM_FLAG_METADATA) || 209145818ee1SMatthew Ahrens (zio_checksum_table[checksum].ci_flags & 209245818ee1SMatthew Ahrens ZCHECKSUM_FLAG_EMBEDDED)) 2093b24ab676SJeff Bonwick checksum = ZIO_CHECKSUM_FLETCHER_4; 2094edf345e6SMatthew Ahrens 2095edf345e6SMatthew Ahrens if (os->os_redundant_metadata == ZFS_REDUNDANT_METADATA_ALL || 2096edf345e6SMatthew Ahrens (os->os_redundant_metadata == 2097edf345e6SMatthew Ahrens ZFS_REDUNDANT_METADATA_MOST && 2098edf345e6SMatthew Ahrens (level >= zfs_redundant_metadata_most_ditto_level || 2099edf345e6SMatthew Ahrens DMU_OT_IS_METADATA(type) || (wp & WP_SPILL)))) 2100edf345e6SMatthew Ahrens copies++; 210180901aeaSGeorge Wilson } else if (wp & WP_NOFILL) { 210280901aeaSGeorge Wilson ASSERT(level == 0); 2103b24ab676SJeff Bonwick 2104b24ab676SJeff Bonwick /* 210580901aeaSGeorge Wilson * If we're writing preallocated blocks, we aren't actually 210680901aeaSGeorge Wilson * writing them so don't set any policy properties. These 210780901aeaSGeorge Wilson * blocks are currently only used by an external subsystem 210880901aeaSGeorge Wilson * outside of zfs (i.e. dump) and not written by the zio 210980901aeaSGeorge Wilson * pipeline. 2110b24ab676SJeff Bonwick */ 211180901aeaSGeorge Wilson compress = ZIO_COMPRESS_OFF; 2112810e43b2SBill Pijewski checksum = ZIO_CHECKSUM_NOPARITY; 2113b24ab676SJeff Bonwick } else { 2114db1741f5SJustin T. Gibbs compress = zio_compress_select(os->os_spa, dn->dn_compress, 2115db1741f5SJustin T. Gibbs compress); 211680901aeaSGeorge Wilson 211780901aeaSGeorge Wilson checksum = (dedup_checksum == ZIO_CHECKSUM_OFF) ? 211880901aeaSGeorge Wilson zio_checksum_select(dn->dn_checksum, checksum) : 211980901aeaSGeorge Wilson dedup_checksum; 212080901aeaSGeorge Wilson 212180901aeaSGeorge Wilson /* 212280901aeaSGeorge Wilson * Determine dedup setting. If we are in dmu_sync(), 212380901aeaSGeorge Wilson * we won't actually dedup now because that's all 212480901aeaSGeorge Wilson * done in syncing context; but we do want to use the 212580901aeaSGeorge Wilson * dedup checkum. If the checksum is not strong 212680901aeaSGeorge Wilson * enough to ensure unique signatures, force 212780901aeaSGeorge Wilson * dedup_verify. 212880901aeaSGeorge Wilson */ 212980901aeaSGeorge Wilson if (dedup_checksum != ZIO_CHECKSUM_OFF) { 213080901aeaSGeorge Wilson dedup = (wp & WP_DMU_SYNC) ? B_FALSE : B_TRUE; 213145818ee1SMatthew Ahrens if (!(zio_checksum_table[checksum].ci_flags & 213245818ee1SMatthew Ahrens ZCHECKSUM_FLAG_DEDUP)) 213380901aeaSGeorge Wilson dedup_verify = B_TRUE; 2134b24ab676SJeff Bonwick } 2135b24ab676SJeff Bonwick 2136b24ab676SJeff Bonwick /* 213745818ee1SMatthew Ahrens * Enable nopwrite if we have secure enough checksum 213845818ee1SMatthew Ahrens * algorithm (see comment in zio_nop_write) and 213945818ee1SMatthew Ahrens * compression is enabled. We don't enable nopwrite if 214045818ee1SMatthew Ahrens * dedup is enabled as the two features are mutually 214145818ee1SMatthew Ahrens * exclusive. 2142b24ab676SJeff Bonwick */ 214345818ee1SMatthew Ahrens nopwrite = (!dedup && (zio_checksum_table[checksum].ci_flags & 214445818ee1SMatthew Ahrens ZCHECKSUM_FLAG_NOPWRITE) && 214580901aeaSGeorge Wilson compress != ZIO_COMPRESS_OFF && zfs_nopwrite_enabled); 2146b24ab676SJeff Bonwick } 2147b24ab676SJeff Bonwick 2148b24ab676SJeff Bonwick zp->zp_checksum = checksum; 2149adaec86aSMatthew Ahrens zp->zp_compress = compress; 21505602294fSDan Kimmel ASSERT3U(zp->zp_compress, !=, ZIO_COMPRESS_INHERIT); 21515602294fSDan Kimmel 21520a586ceaSMark Shellenbaum zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type; 2153b24ab676SJeff Bonwick zp->zp_level = level; 2154edf345e6SMatthew Ahrens zp->zp_copies = MIN(copies, spa_max_replication(os->os_spa)); 2155b24ab676SJeff Bonwick zp->zp_dedup = dedup; 2156b24ab676SJeff Bonwick zp->zp_dedup_verify = dedup && dedup_verify; 215780901aeaSGeorge Wilson zp->zp_nopwrite = nopwrite; 2158b24ab676SJeff Bonwick } 2159b24ab676SJeff Bonwick 216044cd46caSbillm int 2161fa9e4066Sahrens dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off) 2162fa9e4066Sahrens { 2163fa9e4066Sahrens dnode_t *dn; 21642bcf0248SMax Grossman int err; 2165fa9e4066Sahrens 2166fa9e4066Sahrens /* 2167fa9e4066Sahrens * Sync any current changes before 2168fa9e4066Sahrens * we go trundling through the block pointers. 2169fa9e4066Sahrens */ 21702bcf0248SMax Grossman err = dmu_object_wait_synced(os, object); 21712bcf0248SMax Grossman if (err) { 21722bcf0248SMax Grossman return (err); 2173fa9e4066Sahrens } 21742bcf0248SMax Grossman 2175503ad85cSMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 21762bcf0248SMax Grossman if (err) { 2177ea8dc4b6Seschrock return (err); 2178fa9e4066Sahrens } 2179fa9e4066Sahrens 2180cdb0ab79Smaybee err = dnode_next_offset(dn, (hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0); 2181fa9e4066Sahrens dnode_rele(dn, FTAG); 2182fa9e4066Sahrens 2183fa9e4066Sahrens return (err); 2184fa9e4066Sahrens } 2185fa9e4066Sahrens 21862bcf0248SMax Grossman /* 21872bcf0248SMax Grossman * Given the ZFS object, if it contains any dirty nodes 21882bcf0248SMax Grossman * this function flushes all dirty blocks to disk. This 21892bcf0248SMax Grossman * ensures the DMU object info is updated. A more efficient 21902bcf0248SMax Grossman * future version might just find the TXG with the maximum 21912bcf0248SMax Grossman * ID and wait for that to be synced. 21922bcf0248SMax Grossman */ 21932bcf0248SMax Grossman int 21949a686fbcSPaul Dagnelie dmu_object_wait_synced(objset_t *os, uint64_t object) 21959a686fbcSPaul Dagnelie { 21962bcf0248SMax Grossman dnode_t *dn; 21972bcf0248SMax Grossman int error, i; 21982bcf0248SMax Grossman 21992bcf0248SMax Grossman error = dnode_hold(os, object, FTAG, &dn); 22002bcf0248SMax Grossman if (error) { 22012bcf0248SMax Grossman return (error); 22022bcf0248SMax Grossman } 22032bcf0248SMax Grossman 22042bcf0248SMax Grossman for (i = 0; i < TXG_SIZE; i++) { 22052bcf0248SMax Grossman if (list_link_active(&dn->dn_dirty_link[i])) { 22062bcf0248SMax Grossman break; 22072bcf0248SMax Grossman } 22082bcf0248SMax Grossman } 22092bcf0248SMax Grossman dnode_rele(dn, FTAG); 22102bcf0248SMax Grossman if (i != TXG_SIZE) { 22112bcf0248SMax Grossman txg_wait_synced(dmu_objset_pool(os), 0); 22122bcf0248SMax Grossman } 22132bcf0248SMax Grossman 22142bcf0248SMax Grossman return (0); 22152bcf0248SMax Grossman } 22162bcf0248SMax Grossman 2217fa9e4066Sahrens void 2218fa9e4066Sahrens dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi) 2219fa9e4066Sahrens { 2220b24ab676SJeff Bonwick dnode_phys_t *dnp; 2221b24ab676SJeff Bonwick 2222fa9e4066Sahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 2223fa9e4066Sahrens mutex_enter(&dn->dn_mtx); 2224fa9e4066Sahrens 2225b24ab676SJeff Bonwick dnp = dn->dn_phys; 2226b24ab676SJeff Bonwick 2227fa9e4066Sahrens doi->doi_data_block_size = dn->dn_datablksz; 2228fa9e4066Sahrens doi->doi_metadata_block_size = dn->dn_indblkshift ? 2229fa9e4066Sahrens 1ULL << dn->dn_indblkshift : 0; 2230b24ab676SJeff Bonwick doi->doi_type = dn->dn_type; 2231b24ab676SJeff Bonwick doi->doi_bonus_type = dn->dn_bonustype; 2232b24ab676SJeff Bonwick doi->doi_bonus_size = dn->dn_bonuslen; 2233fa9e4066Sahrens doi->doi_indirection = dn->dn_nlevels; 2234fa9e4066Sahrens doi->doi_checksum = dn->dn_checksum; 2235fa9e4066Sahrens doi->doi_compress = dn->dn_compress; 2236e77d42eaSMatthew Ahrens doi->doi_nblkptr = dn->dn_nblkptr; 2237b24ab676SJeff Bonwick doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9; 2238d0475637SMatthew Ahrens doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz; 2239b24ab676SJeff Bonwick doi->doi_fill_count = 0; 2240b24ab676SJeff Bonwick for (int i = 0; i < dnp->dn_nblkptr; i++) 22415d7b4d43SMatthew Ahrens doi->doi_fill_count += BP_GET_FILL(&dnp->dn_blkptr[i]); 2242fa9e4066Sahrens 2243fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 2244fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 2245fa9e4066Sahrens } 2246fa9e4066Sahrens 2247fa9e4066Sahrens /* 2248fa9e4066Sahrens * Get information on a DMU object. 2249fa9e4066Sahrens * If doi is NULL, just indicates whether the object exists. 2250fa9e4066Sahrens */ 2251fa9e4066Sahrens int 2252fa9e4066Sahrens dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi) 2253fa9e4066Sahrens { 2254ea8dc4b6Seschrock dnode_t *dn; 2255503ad85cSMatthew Ahrens int err = dnode_hold(os, object, FTAG, &dn); 2256fa9e4066Sahrens 2257ea8dc4b6Seschrock if (err) 2258ea8dc4b6Seschrock return (err); 2259fa9e4066Sahrens 2260fa9e4066Sahrens if (doi != NULL) 2261fa9e4066Sahrens dmu_object_info_from_dnode(dn, doi); 2262fa9e4066Sahrens 2263fa9e4066Sahrens dnode_rele(dn, FTAG); 2264fa9e4066Sahrens return (0); 2265fa9e4066Sahrens } 2266fa9e4066Sahrens 2267fa9e4066Sahrens /* 2268fa9e4066Sahrens * As above, but faster; can be used when you have a held dbuf in hand. 2269fa9e4066Sahrens */ 2270fa9e4066Sahrens void 2271744947dcSTom Erickson dmu_object_info_from_db(dmu_buf_t *db_fake, dmu_object_info_t *doi) 2272fa9e4066Sahrens { 2273744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2274744947dcSTom Erickson 2275744947dcSTom Erickson DB_DNODE_ENTER(db); 2276744947dcSTom Erickson dmu_object_info_from_dnode(DB_DNODE(db), doi); 2277744947dcSTom Erickson DB_DNODE_EXIT(db); 2278fa9e4066Sahrens } 2279fa9e4066Sahrens 2280fa9e4066Sahrens /* 2281fa9e4066Sahrens * Faster still when you only care about the size. 2282fa9e4066Sahrens * This is specifically optimized for zfs_getattr(). 2283fa9e4066Sahrens */ 2284fa9e4066Sahrens void 2285744947dcSTom Erickson dmu_object_size_from_db(dmu_buf_t *db_fake, uint32_t *blksize, 2286744947dcSTom Erickson u_longlong_t *nblk512) 2287fa9e4066Sahrens { 2288744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2289744947dcSTom Erickson dnode_t *dn; 2290744947dcSTom Erickson 2291744947dcSTom Erickson DB_DNODE_ENTER(db); 2292744947dcSTom Erickson dn = DB_DNODE(db); 2293fa9e4066Sahrens 2294fa9e4066Sahrens *blksize = dn->dn_datablksz; 229599653d4eSeschrock /* add 1 for dnode space */ 229699653d4eSeschrock *nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >> 229799653d4eSeschrock SPA_MINBLOCKSHIFT) + 1; 2298744947dcSTom Erickson DB_DNODE_EXIT(db); 2299fa9e4066Sahrens } 2300fa9e4066Sahrens 2301fa9e4066Sahrens void 2302fa9e4066Sahrens byteswap_uint64_array(void *vbuf, size_t size) 2303fa9e4066Sahrens { 2304fa9e4066Sahrens uint64_t *buf = vbuf; 2305fa9e4066Sahrens size_t count = size >> 3; 2306fa9e4066Sahrens int i; 2307fa9e4066Sahrens 2308fa9e4066Sahrens ASSERT((size & 7) == 0); 2309fa9e4066Sahrens 2310fa9e4066Sahrens for (i = 0; i < count; i++) 2311fa9e4066Sahrens buf[i] = BSWAP_64(buf[i]); 2312fa9e4066Sahrens } 2313fa9e4066Sahrens 2314fa9e4066Sahrens void 2315fa9e4066Sahrens byteswap_uint32_array(void *vbuf, size_t size) 2316fa9e4066Sahrens { 2317fa9e4066Sahrens uint32_t *buf = vbuf; 2318fa9e4066Sahrens size_t count = size >> 2; 2319fa9e4066Sahrens int i; 2320fa9e4066Sahrens 2321fa9e4066Sahrens ASSERT((size & 3) == 0); 2322fa9e4066Sahrens 2323fa9e4066Sahrens for (i = 0; i < count; i++) 2324fa9e4066Sahrens buf[i] = BSWAP_32(buf[i]); 2325fa9e4066Sahrens } 2326fa9e4066Sahrens 2327fa9e4066Sahrens void 2328fa9e4066Sahrens byteswap_uint16_array(void *vbuf, size_t size) 2329fa9e4066Sahrens { 2330fa9e4066Sahrens uint16_t *buf = vbuf; 2331fa9e4066Sahrens size_t count = size >> 1; 2332fa9e4066Sahrens int i; 2333fa9e4066Sahrens 2334fa9e4066Sahrens ASSERT((size & 1) == 0); 2335fa9e4066Sahrens 2336fa9e4066Sahrens for (i = 0; i < count; i++) 2337fa9e4066Sahrens buf[i] = BSWAP_16(buf[i]); 2338fa9e4066Sahrens } 2339fa9e4066Sahrens 2340fa9e4066Sahrens /* ARGSUSED */ 2341fa9e4066Sahrens void 2342fa9e4066Sahrens byteswap_uint8_array(void *vbuf, size_t size) 2343fa9e4066Sahrens { 2344fa9e4066Sahrens } 2345fa9e4066Sahrens 2346fa9e4066Sahrens void 2347fa9e4066Sahrens dmu_init(void) 2348fa9e4066Sahrens { 2349770499e1SDan Kimmel abd_init(); 23503f9d6ad7SLin Ling zfs_dbgmsg_init(); 2351744947dcSTom Erickson sa_cache_init(); 2352744947dcSTom Erickson xuio_stat_init(); 2353744947dcSTom Erickson dmu_objset_init(); 2354fa9e4066Sahrens dnode_init(); 23557cbf8b43SRich Morris zfetch_init(); 2356fa94a07fSbrendan l2arc_init(); 2357ce636f8bSMatthew Ahrens arc_init(); 2358dcbf3bd6SGeorge Wilson dbuf_init(); 2359fa9e4066Sahrens } 2360fa9e4066Sahrens 2361fa9e4066Sahrens void 2362fa9e4066Sahrens dmu_fini(void) 2363fa9e4066Sahrens { 23643e30c24aSWill Andrews arc_fini(); /* arc depends on l2arc, so arc must go first */ 2365ce636f8bSMatthew Ahrens l2arc_fini(); 23667cbf8b43SRich Morris zfetch_fini(); 2367fa9e4066Sahrens dbuf_fini(); 2368744947dcSTom Erickson dnode_fini(); 2369744947dcSTom Erickson dmu_objset_fini(); 2370c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_stat_fini(); 23710a586ceaSMark Shellenbaum sa_cache_fini(); 23723f9d6ad7SLin Ling zfs_dbgmsg_fini(); 2373770499e1SDan Kimmel abd_fini(); 2374fa9e4066Sahrens } 2375