1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5ea8dc4b6Seschrock * Common Development and Distribution License (the "License"). 6ea8dc4b6Seschrock * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 2294d1a210STim Haley * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23fa9e4066Sahrens */ 24857c96d2SIgor Kozhukhov /* 25857c96d2SIgor Kozhukhov * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 266d658717SJohn Levon * Copyright 2019 Joyent, Inc. 27857c96d2SIgor Kozhukhov * Copyright 2016 Nexenta Systems, Inc. All rights reserved. 28857c96d2SIgor Kozhukhov * Copyright (c) 2011, 2017 by Delphix. All rights reserved. 29857c96d2SIgor Kozhukhov * Copyright (c) 2018 DilOS 30857c96d2SIgor Kozhukhov */ 31aad02571SSaso Kiselkov 32fa9e4066Sahrens #include <sys/dmu.h> 33fa9e4066Sahrens #include <sys/dmu_impl.h> 34fa9e4066Sahrens #include <sys/dmu_tx.h> 35fa9e4066Sahrens #include <sys/dbuf.h> 36fa9e4066Sahrens #include <sys/dnode.h> 37fa9e4066Sahrens #include <sys/zfs_context.h> 38fa9e4066Sahrens #include <sys/dmu_objset.h> 39fa9e4066Sahrens #include <sys/dmu_traverse.h> 40fa9e4066Sahrens #include <sys/dsl_dataset.h> 41fa9e4066Sahrens #include <sys/dsl_dir.h> 42fa9e4066Sahrens #include <sys/dsl_pool.h> 431d452cf5Sahrens #include <sys/dsl_synctask.h> 44a2eea2e1Sahrens #include <sys/dsl_prop.h> 45fa9e4066Sahrens #include <sys/dmu_zfetch.h> 46fa9e4066Sahrens #include <sys/zfs_ioctl.h> 47fa9e4066Sahrens #include <sys/zap.h> 48ea8dc4b6Seschrock #include <sys/zio_checksum.h> 4980901aeaSGeorge Wilson #include <sys/zio_compress.h> 500a586ceaSMark Shellenbaum #include <sys/sa.h> 51b8289d24SDaniil Lunev #include <sys/zfeature.h> 52770499e1SDan Kimmel #include <sys/abd.h> 5344eda4d7Smaybee #ifdef _KERNEL 5444eda4d7Smaybee #include <sys/vmsystm.h> 550fab61baSJonathan W Adams #include <sys/zfs_znode.h> 5644eda4d7Smaybee #endif 57fa9e4066Sahrens 58857c96d2SIgor Kozhukhov static xuio_stats_t xuio_stats = { 59857c96d2SIgor Kozhukhov { "onloan_read_buf", KSTAT_DATA_UINT64 }, 60857c96d2SIgor Kozhukhov { "onloan_write_buf", KSTAT_DATA_UINT64 }, 61857c96d2SIgor Kozhukhov { "read_buf_copied", KSTAT_DATA_UINT64 }, 62857c96d2SIgor Kozhukhov { "read_buf_nocopy", KSTAT_DATA_UINT64 }, 63857c96d2SIgor Kozhukhov { "write_buf_copied", KSTAT_DATA_UINT64 }, 64857c96d2SIgor Kozhukhov { "write_buf_nocopy", KSTAT_DATA_UINT64 } 65857c96d2SIgor Kozhukhov }; 66857c96d2SIgor Kozhukhov 67857c96d2SIgor Kozhukhov #define XUIOSTAT_INCR(stat, val) \ 68857c96d2SIgor Kozhukhov atomic_add_64(&xuio_stats.stat.value.ui64, (val)) 69857c96d2SIgor Kozhukhov #define XUIOSTAT_BUMP(stat) XUIOSTAT_INCR(stat, 1) 70857c96d2SIgor Kozhukhov 7180901aeaSGeorge Wilson /* 7280901aeaSGeorge Wilson * Enable/disable nopwrite feature. 7380901aeaSGeorge Wilson */ 7480901aeaSGeorge Wilson int zfs_nopwrite_enabled = 1; 7580901aeaSGeorge Wilson 76ff5177eeSAlek Pinchuk /* 77ff5177eeSAlek Pinchuk * Tunable to control percentage of dirtied blocks from frees in one TXG. 78ff5177eeSAlek Pinchuk * After this threshold is crossed, additional dirty blocks from frees 79ff5177eeSAlek Pinchuk * wait until the next TXG. 80ff5177eeSAlek Pinchuk * A value of zero will disable this throttle. 81ff5177eeSAlek Pinchuk */ 82ff5177eeSAlek Pinchuk uint32_t zfs_per_txg_dirty_frees_percent = 30; 83ff5177eeSAlek Pinchuk 845cabbc6bSPrashanth Sreenivasa /* 855cabbc6bSPrashanth Sreenivasa * This can be used for testing, to ensure that certain actions happen 865cabbc6bSPrashanth Sreenivasa * while in the middle of a remap (which might otherwise complete too 875cabbc6bSPrashanth Sreenivasa * quickly). 885cabbc6bSPrashanth Sreenivasa */ 895cabbc6bSPrashanth Sreenivasa int zfs_object_remap_one_indirect_delay_ticks = 0; 905cabbc6bSPrashanth Sreenivasa 9152abb70eSMatthew Ahrens /* 9252abb70eSMatthew Ahrens * Limit the amount we can prefetch with one call to this amount. This 9352abb70eSMatthew Ahrens * helps to limit the amount of memory that can be used by prefetching. 9452abb70eSMatthew Ahrens * Larger objects should be prefetched a bit at a time. 9552abb70eSMatthew Ahrens */ 9652abb70eSMatthew Ahrens uint64_t dmu_prefetch_max = 8 * SPA_MAXBLOCKSIZE; 9752abb70eSMatthew Ahrens 98fa9e4066Sahrens const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = { 99eb633035STom Caputi { DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "unallocated" }, 100eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "object directory" }, 101eb633035STom Caputi { DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "object array" }, 102eb633035STom Caputi { DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "packed nvlist" }, 103eb633035STom Caputi { DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "packed nvlist size" }, 104eb633035STom Caputi { DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj" }, 105eb633035STom Caputi { DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj header" }, 106eb633035STom Caputi { DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA space map header" }, 107eb633035STom Caputi { DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA space map" }, 108eb633035STom Caputi { DMU_BSWAP_UINT64, TRUE, FALSE, TRUE, "ZIL intent log" }, 109eb633035STom Caputi { DMU_BSWAP_DNODE, TRUE, FALSE, TRUE, "DMU dnode" }, 110eb633035STom Caputi { DMU_BSWAP_OBJSET, TRUE, TRUE, FALSE, "DMU objset" }, 111eb633035STom Caputi { DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL directory" }, 112eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL directory child map" }, 113eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dataset snap map" }, 114eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL props" }, 115eb633035STom Caputi { DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL dataset" }, 116eb633035STom Caputi { DMU_BSWAP_ZNODE, TRUE, FALSE, FALSE, "ZFS znode" }, 117eb633035STom Caputi { DMU_BSWAP_OLDACL, TRUE, FALSE, TRUE, "ZFS V0 ACL" }, 118eb633035STom Caputi { DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "ZFS plain file" }, 119eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS directory" }, 120eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "ZFS master node" }, 121eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS delete queue" }, 122eb633035STom Caputi { DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "zvol object" }, 123eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "zvol prop" }, 124eb633035STom Caputi { DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "other uint8[]" }, 125eb633035STom Caputi { DMU_BSWAP_UINT64, FALSE, FALSE, TRUE, "other uint64[]" }, 126eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "other ZAP" }, 127eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "persistent error log" }, 128eb633035STom Caputi { DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "SPA history" }, 129eb633035STom Caputi { DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA history offsets" }, 130eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "Pool properties" }, 131eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL permissions" }, 132eb633035STom Caputi { DMU_BSWAP_ACL, TRUE, FALSE, TRUE, "ZFS ACL" }, 133eb633035STom Caputi { DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "ZFS SYSACL" }, 134eb633035STom Caputi { DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "FUID table" }, 135eb633035STom Caputi { DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "FUID table size" }, 136eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dataset next clones" }, 137eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "scan work queue" }, 138f67950b2SNasf-Fan { DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS user/group/project used"}, 139f67950b2SNasf-Fan { DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS user/group/proj quota"}, 140eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "snapshot refcount tags" }, 141eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "DDT ZAP algorithm" }, 142eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "DDT statistics" }, 143eb633035STom Caputi { DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "System attributes" }, 144eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA master node" }, 145eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA attr registration" }, 146eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA attr layouts" }, 147eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "scan translations" }, 148eb633035STom Caputi { DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "deduplicated block" }, 149eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL deadlist map" }, 150eb633035STom Caputi { DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL deadlist map hdr" }, 151eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dir clones" }, 152eb633035STom Caputi { DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj subobj" } 153ad135b5dSChristopher Siden }; 154ad135b5dSChristopher Siden 155ad135b5dSChristopher Siden const dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = { 156ad135b5dSChristopher Siden { byteswap_uint8_array, "uint8" }, 157ad135b5dSChristopher Siden { byteswap_uint16_array, "uint16" }, 158ad135b5dSChristopher Siden { byteswap_uint32_array, "uint32" }, 159ad135b5dSChristopher Siden { byteswap_uint64_array, "uint64" }, 160ad135b5dSChristopher Siden { zap_byteswap, "zap" }, 161ad135b5dSChristopher Siden { dnode_buf_byteswap, "dnode" }, 162ad135b5dSChristopher Siden { dmu_objset_byteswap, "objset" }, 163ad135b5dSChristopher Siden { zfs_znode_byteswap, "znode" }, 164ad135b5dSChristopher Siden { zfs_oldacl_byteswap, "oldacl" }, 165ad135b5dSChristopher Siden { zfs_acl_byteswap, "acl" } 1663f9d6ad7SLin Ling }; 167fa9e4066Sahrens 168fa9e4066Sahrens int 16979d72832SMatthew Ahrens dmu_buf_hold_noread_by_dnode(dnode_t *dn, uint64_t offset, 17079d72832SMatthew Ahrens void *tag, dmu_buf_t **dbp) 17179d72832SMatthew Ahrens { 17279d72832SMatthew Ahrens uint64_t blkid; 17379d72832SMatthew Ahrens dmu_buf_impl_t *db; 17479d72832SMatthew Ahrens 17579d72832SMatthew Ahrens blkid = dbuf_whichblock(dn, 0, offset); 17679d72832SMatthew Ahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 17779d72832SMatthew Ahrens db = dbuf_hold(dn, blkid, tag); 17879d72832SMatthew Ahrens rw_exit(&dn->dn_struct_rwlock); 17979d72832SMatthew Ahrens 18079d72832SMatthew Ahrens if (db == NULL) { 18179d72832SMatthew Ahrens *dbp = NULL; 18279d72832SMatthew Ahrens return (SET_ERROR(EIO)); 18379d72832SMatthew Ahrens } 18479d72832SMatthew Ahrens 18579d72832SMatthew Ahrens *dbp = &db->db; 18679d72832SMatthew Ahrens return (0); 18779d72832SMatthew Ahrens } 18879d72832SMatthew Ahrens int 1895d7b4d43SMatthew Ahrens dmu_buf_hold_noread(objset_t *os, uint64_t object, uint64_t offset, 1905d7b4d43SMatthew Ahrens void *tag, dmu_buf_t **dbp) 191fa9e4066Sahrens { 192fa9e4066Sahrens dnode_t *dn; 193fa9e4066Sahrens uint64_t blkid; 194fa9e4066Sahrens dmu_buf_impl_t *db; 195ea8dc4b6Seschrock int err; 196fa9e4066Sahrens 197503ad85cSMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 198ea8dc4b6Seschrock if (err) 199ea8dc4b6Seschrock return (err); 200a2cdcdd2SPaul Dagnelie blkid = dbuf_whichblock(dn, 0, offset); 201fa9e4066Sahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 202ea8dc4b6Seschrock db = dbuf_hold(dn, blkid, tag); 203fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 2045d7b4d43SMatthew Ahrens dnode_rele(dn, FTAG); 2055d7b4d43SMatthew Ahrens 206ea8dc4b6Seschrock if (db == NULL) { 2075d7b4d43SMatthew Ahrens *dbp = NULL; 2085d7b4d43SMatthew Ahrens return (SET_ERROR(EIO)); 2095d7b4d43SMatthew Ahrens } 2105d7b4d43SMatthew Ahrens 2115d7b4d43SMatthew Ahrens *dbp = &db->db; 2125d7b4d43SMatthew Ahrens return (err); 2135d7b4d43SMatthew Ahrens } 2145d7b4d43SMatthew Ahrens 2155d7b4d43SMatthew Ahrens int 21679d72832SMatthew Ahrens dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset, 21779d72832SMatthew Ahrens void *tag, dmu_buf_t **dbp, int flags) 21879d72832SMatthew Ahrens { 21979d72832SMatthew Ahrens int err; 22079d72832SMatthew Ahrens int db_flags = DB_RF_CANFAIL; 22179d72832SMatthew Ahrens 22279d72832SMatthew Ahrens if (flags & DMU_READ_NO_PREFETCH) 22379d72832SMatthew Ahrens db_flags |= DB_RF_NOPREFETCH; 224eb633035STom Caputi if (flags & DMU_READ_NO_DECRYPT) 225eb633035STom Caputi db_flags |= DB_RF_NO_DECRYPT; 22679d72832SMatthew Ahrens 22779d72832SMatthew Ahrens err = dmu_buf_hold_noread_by_dnode(dn, offset, tag, dbp); 22879d72832SMatthew Ahrens if (err == 0) { 22979d72832SMatthew Ahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp); 23079d72832SMatthew Ahrens err = dbuf_read(db, NULL, db_flags); 23179d72832SMatthew Ahrens if (err != 0) { 23279d72832SMatthew Ahrens dbuf_rele(db, tag); 23379d72832SMatthew Ahrens *dbp = NULL; 23479d72832SMatthew Ahrens } 23579d72832SMatthew Ahrens } 23679d72832SMatthew Ahrens 23779d72832SMatthew Ahrens return (err); 23879d72832SMatthew Ahrens } 23979d72832SMatthew Ahrens 24079d72832SMatthew Ahrens int 2415d7b4d43SMatthew Ahrens dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset, 2425d7b4d43SMatthew Ahrens void *tag, dmu_buf_t **dbp, int flags) 2435d7b4d43SMatthew Ahrens { 2445d7b4d43SMatthew Ahrens int err; 2455d7b4d43SMatthew Ahrens int db_flags = DB_RF_CANFAIL; 2465d7b4d43SMatthew Ahrens 2475d7b4d43SMatthew Ahrens if (flags & DMU_READ_NO_PREFETCH) 2485d7b4d43SMatthew Ahrens db_flags |= DB_RF_NOPREFETCH; 249eb633035STom Caputi if (flags & DMU_READ_NO_DECRYPT) 250eb633035STom Caputi db_flags |= DB_RF_NO_DECRYPT; 2515d7b4d43SMatthew Ahrens 2525d7b4d43SMatthew Ahrens err = dmu_buf_hold_noread(os, object, offset, tag, dbp); 2535d7b4d43SMatthew Ahrens if (err == 0) { 2545d7b4d43SMatthew Ahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp); 25547cb52daSJeff Bonwick err = dbuf_read(db, NULL, db_flags); 2565d7b4d43SMatthew Ahrens if (err != 0) { 257ea8dc4b6Seschrock dbuf_rele(db, tag); 2585d7b4d43SMatthew Ahrens *dbp = NULL; 259ea8dc4b6Seschrock } 260fa9e4066Sahrens } 261fa9e4066Sahrens 262ea8dc4b6Seschrock return (err); 263fa9e4066Sahrens } 264fa9e4066Sahrens 265fa9e4066Sahrens int 266fa9e4066Sahrens dmu_bonus_max(void) 267fa9e4066Sahrens { 26854811da5SToomas Soome return (DN_OLD_MAX_BONUSLEN); 269fa9e4066Sahrens } 270fa9e4066Sahrens 2711934e92fSmaybee int 272744947dcSTom Erickson dmu_set_bonus(dmu_buf_t *db_fake, int newsize, dmu_tx_t *tx) 2731934e92fSmaybee { 274744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 275744947dcSTom Erickson dnode_t *dn; 276744947dcSTom Erickson int error; 2771934e92fSmaybee 278744947dcSTom Erickson DB_DNODE_ENTER(db); 279744947dcSTom Erickson dn = DB_DNODE(db); 280744947dcSTom Erickson 281744947dcSTom Erickson if (dn->dn_bonus != db) { 282be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 283744947dcSTom Erickson } else if (newsize < 0 || newsize > db_fake->db_size) { 284be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 285744947dcSTom Erickson } else { 2861934e92fSmaybee dnode_setbonuslen(dn, newsize, tx); 287744947dcSTom Erickson error = 0; 288744947dcSTom Erickson } 289744947dcSTom Erickson 290744947dcSTom Erickson DB_DNODE_EXIT(db); 291744947dcSTom Erickson return (error); 2921934e92fSmaybee } 2931934e92fSmaybee 2940a586ceaSMark Shellenbaum int 295744947dcSTom Erickson dmu_set_bonustype(dmu_buf_t *db_fake, dmu_object_type_t type, dmu_tx_t *tx) 2960a586ceaSMark Shellenbaum { 297744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 298744947dcSTom Erickson dnode_t *dn; 299744947dcSTom Erickson int error; 3000a586ceaSMark Shellenbaum 301744947dcSTom Erickson DB_DNODE_ENTER(db); 302744947dcSTom Erickson dn = DB_DNODE(db); 3030a586ceaSMark Shellenbaum 304ad135b5dSChristopher Siden if (!DMU_OT_IS_VALID(type)) { 305be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 306744947dcSTom Erickson } else if (dn->dn_bonus != db) { 307be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 308744947dcSTom Erickson } else { 3090a586ceaSMark Shellenbaum dnode_setbonus_type(dn, type, tx); 310744947dcSTom Erickson error = 0; 311744947dcSTom Erickson } 312744947dcSTom Erickson 313744947dcSTom Erickson DB_DNODE_EXIT(db); 314744947dcSTom Erickson return (error); 315744947dcSTom Erickson } 316744947dcSTom Erickson 317744947dcSTom Erickson dmu_object_type_t 318744947dcSTom Erickson dmu_get_bonustype(dmu_buf_t *db_fake) 319744947dcSTom Erickson { 320744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 321744947dcSTom Erickson dnode_t *dn; 322744947dcSTom Erickson dmu_object_type_t type; 323744947dcSTom Erickson 324744947dcSTom Erickson DB_DNODE_ENTER(db); 325744947dcSTom Erickson dn = DB_DNODE(db); 326744947dcSTom Erickson type = dn->dn_bonustype; 327744947dcSTom Erickson DB_DNODE_EXIT(db); 328744947dcSTom Erickson 329744947dcSTom Erickson return (type); 3300a586ceaSMark Shellenbaum } 3310a586ceaSMark Shellenbaum 3320a586ceaSMark Shellenbaum int 3330a586ceaSMark Shellenbaum dmu_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx) 3340a586ceaSMark Shellenbaum { 3350a586ceaSMark Shellenbaum dnode_t *dn; 3360a586ceaSMark Shellenbaum int error; 3370a586ceaSMark Shellenbaum 3380a586ceaSMark Shellenbaum error = dnode_hold(os, object, FTAG, &dn); 3390a586ceaSMark Shellenbaum dbuf_rm_spill(dn, tx); 34006e0070dSMark Shellenbaum rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 34106e0070dSMark Shellenbaum dnode_rm_spill(dn, tx); 34206e0070dSMark Shellenbaum rw_exit(&dn->dn_struct_rwlock); 3430a586ceaSMark Shellenbaum dnode_rele(dn, FTAG); 3440a586ceaSMark Shellenbaum return (error); 3450a586ceaSMark Shellenbaum } 3460a586ceaSMark Shellenbaum 347fa9e4066Sahrens /* 348eb633035STom Caputi * Lookup and hold the bonus buffer for the provided dnode. If the dnode 349eb633035STom Caputi * has not yet been allocated a new bonus dbuf a will be allocated. 350eb633035STom Caputi * Returns ENOENT, EIO, or 0. 351eb633035STom Caputi */ 352eb633035STom Caputi int dmu_bonus_hold_by_dnode(dnode_t *dn, void *tag, dmu_buf_t **dbp, 353eb633035STom Caputi uint32_t flags) 354eb633035STom Caputi { 355eb633035STom Caputi dmu_buf_impl_t *db; 356eb633035STom Caputi int error; 357eb633035STom Caputi uint32_t db_flags = DB_RF_MUST_SUCCEED; 358eb633035STom Caputi 359eb633035STom Caputi if (flags & DMU_READ_NO_PREFETCH) 360eb633035STom Caputi db_flags |= DB_RF_NOPREFETCH; 361eb633035STom Caputi if (flags & DMU_READ_NO_DECRYPT) 362eb633035STom Caputi db_flags |= DB_RF_NO_DECRYPT; 363eb633035STom Caputi 364eb633035STom Caputi rw_enter(&dn->dn_struct_rwlock, RW_READER); 365eb633035STom Caputi if (dn->dn_bonus == NULL) { 366eb633035STom Caputi rw_exit(&dn->dn_struct_rwlock); 367eb633035STom Caputi rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 368eb633035STom Caputi if (dn->dn_bonus == NULL) 369eb633035STom Caputi dbuf_create_bonus(dn); 370eb633035STom Caputi } 371eb633035STom Caputi db = dn->dn_bonus; 372eb633035STom Caputi 373eb633035STom Caputi /* as long as the bonus buf is held, the dnode will be held */ 374eb633035STom Caputi if (zfs_refcount_add(&db->db_holds, tag) == 1) { 375eb633035STom Caputi VERIFY(dnode_add_ref(dn, db)); 376eb633035STom Caputi atomic_inc_32(&dn->dn_dbufs_count); 377eb633035STom Caputi } 378eb633035STom Caputi 379eb633035STom Caputi /* 380eb633035STom Caputi * Wait to drop dn_struct_rwlock until after adding the bonus dbuf's 381eb633035STom Caputi * hold and incrementing the dbuf count to ensure that dnode_move() sees 382eb633035STom Caputi * a dnode hold for every dbuf. 383eb633035STom Caputi */ 384eb633035STom Caputi rw_exit(&dn->dn_struct_rwlock); 385eb633035STom Caputi 386eb633035STom Caputi error = dbuf_read(db, NULL, db_flags); 387eb633035STom Caputi if (error) { 388eb633035STom Caputi dnode_evict_bonus(dn); 389eb633035STom Caputi dbuf_rele(db, tag); 390eb633035STom Caputi *dbp = NULL; 391eb633035STom Caputi return (error); 392eb633035STom Caputi } 393eb633035STom Caputi 394eb633035STom Caputi *dbp = &db->db; 395eb633035STom Caputi return (0); 396eb633035STom Caputi } 397eb633035STom Caputi 398eb633035STom Caputi /* 399ea8dc4b6Seschrock * returns ENOENT, EIO, or 0. 400fa9e4066Sahrens */ 401ea8dc4b6Seschrock int 402eb633035STom Caputi dmu_bonus_hold_impl(objset_t *os, uint64_t object, void *tag, uint32_t flags, 403eb633035STom Caputi dmu_buf_t **dbp) 404fa9e4066Sahrens { 405ea8dc4b6Seschrock dnode_t *dn; 406fa9e4066Sahrens dmu_buf_impl_t *db; 4071934e92fSmaybee int error; 408eb633035STom Caputi uint32_t db_flags = DB_RF_MUST_SUCCEED; 409eb633035STom Caputi 410eb633035STom Caputi if (flags & DMU_READ_NO_PREFETCH) 411eb633035STom Caputi db_flags |= DB_RF_NOPREFETCH; 412eb633035STom Caputi if (flags & DMU_READ_NO_DECRYPT) 413eb633035STom Caputi db_flags |= DB_RF_NO_DECRYPT; 414fa9e4066Sahrens 415503ad85cSMatthew Ahrens error = dnode_hold(os, object, FTAG, &dn); 4161934e92fSmaybee if (error) 4171934e92fSmaybee return (error); 418fa9e4066Sahrens 419fa9e4066Sahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 420ea8dc4b6Seschrock if (dn->dn_bonus == NULL) { 421fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 422ea8dc4b6Seschrock rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 423ea8dc4b6Seschrock if (dn->dn_bonus == NULL) 4241934e92fSmaybee dbuf_create_bonus(dn); 425fa9e4066Sahrens } 426ea8dc4b6Seschrock db = dn->dn_bonus; 4271934e92fSmaybee 4281934e92fSmaybee /* as long as the bonus buf is held, the dnode will be held */ 429e914ace2STim Schumacher if (zfs_refcount_add(&db->db_holds, tag) == 1) { 4301934e92fSmaybee VERIFY(dnode_add_ref(dn, db)); 431640c1670SJosef 'Jeff' Sipek atomic_inc_32(&dn->dn_dbufs_count); 432744947dcSTom Erickson } 433744947dcSTom Erickson 434744947dcSTom Erickson /* 435744947dcSTom Erickson * Wait to drop dn_struct_rwlock until after adding the bonus dbuf's 436744947dcSTom Erickson * hold and incrementing the dbuf count to ensure that dnode_move() sees 437744947dcSTom Erickson * a dnode hold for every dbuf. 438744947dcSTom Erickson */ 439744947dcSTom Erickson rw_exit(&dn->dn_struct_rwlock); 4401934e92fSmaybee 441fa9e4066Sahrens dnode_rele(dn, FTAG); 442ea8dc4b6Seschrock 443eb633035STom Caputi error = dbuf_read(db, NULL, db_flags); 444eb633035STom Caputi if (error) { 445eb633035STom Caputi dnode_evict_bonus(dn); 446eb633035STom Caputi dbuf_rele(db, tag); 447eb633035STom Caputi *dbp = NULL; 448eb633035STom Caputi return (error); 449eb633035STom Caputi } 450ea8dc4b6Seschrock 451ea8dc4b6Seschrock *dbp = &db->db; 452ea8dc4b6Seschrock return (0); 453fa9e4066Sahrens } 454fa9e4066Sahrens 455eb633035STom Caputi int 456eb633035STom Caputi dmu_bonus_hold(objset_t *os, uint64_t obj, void *tag, dmu_buf_t **dbp) 457eb633035STom Caputi { 458eb633035STom Caputi return (dmu_bonus_hold_impl(os, obj, tag, DMU_READ_NO_PREFETCH, dbp)); 459eb633035STom Caputi } 460eb633035STom Caputi 46113506d1eSmaybee /* 4620a586ceaSMark Shellenbaum * returns ENOENT, EIO, or 0. 4630a586ceaSMark Shellenbaum * 4640a586ceaSMark Shellenbaum * This interface will allocate a blank spill dbuf when a spill blk 4650a586ceaSMark Shellenbaum * doesn't already exist on the dnode. 4660a586ceaSMark Shellenbaum * 4670a586ceaSMark Shellenbaum * if you only want to find an already existing spill db, then 4680a586ceaSMark Shellenbaum * dmu_spill_hold_existing() should be used. 4690a586ceaSMark Shellenbaum */ 4700a586ceaSMark Shellenbaum int 4710a586ceaSMark Shellenbaum dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, void *tag, dmu_buf_t **dbp) 4720a586ceaSMark Shellenbaum { 4730a586ceaSMark Shellenbaum dmu_buf_impl_t *db = NULL; 4740a586ceaSMark Shellenbaum int err; 4750a586ceaSMark Shellenbaum 4760a586ceaSMark Shellenbaum if ((flags & DB_RF_HAVESTRUCT) == 0) 4770a586ceaSMark Shellenbaum rw_enter(&dn->dn_struct_rwlock, RW_READER); 4780a586ceaSMark Shellenbaum 4790a586ceaSMark Shellenbaum db = dbuf_hold(dn, DMU_SPILL_BLKID, tag); 4800a586ceaSMark Shellenbaum 4810a586ceaSMark Shellenbaum if ((flags & DB_RF_HAVESTRUCT) == 0) 4820a586ceaSMark Shellenbaum rw_exit(&dn->dn_struct_rwlock); 4830a586ceaSMark Shellenbaum 4840a586ceaSMark Shellenbaum ASSERT(db != NULL); 4851d8ccc7bSMark Shellenbaum err = dbuf_read(db, NULL, flags); 4861d8ccc7bSMark Shellenbaum if (err == 0) 4870a586ceaSMark Shellenbaum *dbp = &db->db; 4881d8ccc7bSMark Shellenbaum else 4891d8ccc7bSMark Shellenbaum dbuf_rele(db, tag); 4900a586ceaSMark Shellenbaum return (err); 4910a586ceaSMark Shellenbaum } 4920a586ceaSMark Shellenbaum 4930a586ceaSMark Shellenbaum int 4940a586ceaSMark Shellenbaum dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp) 4950a586ceaSMark Shellenbaum { 496744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus; 497744947dcSTom Erickson dnode_t *dn; 4980a586ceaSMark Shellenbaum int err; 4990a586ceaSMark Shellenbaum 500744947dcSTom Erickson DB_DNODE_ENTER(db); 501744947dcSTom Erickson dn = DB_DNODE(db); 502744947dcSTom Erickson 503744947dcSTom Erickson if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) { 504be6fd75aSMatthew Ahrens err = SET_ERROR(EINVAL); 505744947dcSTom Erickson } else { 5060a586ceaSMark Shellenbaum rw_enter(&dn->dn_struct_rwlock, RW_READER); 5070a586ceaSMark Shellenbaum 5080a586ceaSMark Shellenbaum if (!dn->dn_have_spill) { 509be6fd75aSMatthew Ahrens err = SET_ERROR(ENOENT); 510744947dcSTom Erickson } else { 5111d8ccc7bSMark Shellenbaum err = dmu_spill_hold_by_dnode(dn, 5121d8ccc7bSMark Shellenbaum DB_RF_HAVESTRUCT | DB_RF_CANFAIL, tag, dbp); 513744947dcSTom Erickson } 514744947dcSTom Erickson 5150a586ceaSMark Shellenbaum rw_exit(&dn->dn_struct_rwlock); 516744947dcSTom Erickson } 517744947dcSTom Erickson 518744947dcSTom Erickson DB_DNODE_EXIT(db); 5190a586ceaSMark Shellenbaum return (err); 5200a586ceaSMark Shellenbaum } 5210a586ceaSMark Shellenbaum 5220a586ceaSMark Shellenbaum int 523eb633035STom Caputi dmu_spill_hold_by_bonus(dmu_buf_t *bonus, uint32_t flags, void *tag, 524eb633035STom Caputi dmu_buf_t **dbp) 5250a586ceaSMark Shellenbaum { 526744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus; 527744947dcSTom Erickson dnode_t *dn; 528744947dcSTom Erickson int err; 529eb633035STom Caputi uint32_t db_flags = DB_RF_CANFAIL; 530eb633035STom Caputi 531eb633035STom Caputi if (flags & DMU_READ_NO_DECRYPT) 532eb633035STom Caputi db_flags |= DB_RF_NO_DECRYPT; 533744947dcSTom Erickson 534744947dcSTom Erickson DB_DNODE_ENTER(db); 535744947dcSTom Erickson dn = DB_DNODE(db); 536eb633035STom Caputi err = dmu_spill_hold_by_dnode(dn, db_flags, tag, dbp); 537744947dcSTom Erickson DB_DNODE_EXIT(db); 538744947dcSTom Erickson 539744947dcSTom Erickson return (err); 5400a586ceaSMark Shellenbaum } 5410a586ceaSMark Shellenbaum 5420a586ceaSMark Shellenbaum /* 54313506d1eSmaybee * Note: longer-term, we should modify all of the dmu_buf_*() interfaces 54413506d1eSmaybee * to take a held dnode rather than <os, object> -- the lookup is wasteful, 54513506d1eSmaybee * and can induce severe lock contention when writing to several files 54613506d1eSmaybee * whose dnodes are in the same block. 54713506d1eSmaybee */ 5488dfe5547SRichard Yao int 5497bfdf011SNeil Perrin dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length, 550cf6106c8SMatthew Ahrens boolean_t read, void *tag, int *numbufsp, dmu_buf_t ***dbpp, uint32_t flags) 551fa9e4066Sahrens { 552fa9e4066Sahrens dmu_buf_t **dbp; 553fa9e4066Sahrens uint64_t blkid, nblks, i; 5547bfdf011SNeil Perrin uint32_t dbuf_flags; 555ea8dc4b6Seschrock int err; 556ea8dc4b6Seschrock zio_t *zio; 557ea8dc4b6Seschrock 558ea8dc4b6Seschrock ASSERT(length <= DMU_MAX_ACCESS); 559fa9e4066Sahrens 560cf6106c8SMatthew Ahrens /* 561cf6106c8SMatthew Ahrens * Note: We directly notify the prefetch code of this read, so that 562cf6106c8SMatthew Ahrens * we can tell it about the multi-block read. dbuf_read() only knows 563cf6106c8SMatthew Ahrens * about the one block it is accessing. 564cf6106c8SMatthew Ahrens */ 565cf6106c8SMatthew Ahrens dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT | 566cf6106c8SMatthew Ahrens DB_RF_NOPREFETCH; 567ea8dc4b6Seschrock 568fa9e4066Sahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 569fa9e4066Sahrens if (dn->dn_datablkshift) { 570fa9e4066Sahrens int blkshift = dn->dn_datablkshift; 571fa9e4066Sahrens nblks = (P2ROUNDUP(offset + length, 1ULL << blkshift) - 572fa9e4066Sahrens P2ALIGN(offset, 1ULL << blkshift)) >> blkshift; 573fa9e4066Sahrens } else { 5740125049cSahrens if (offset + length > dn->dn_datablksz) { 5750125049cSahrens zfs_panic_recover("zfs: accessing past end of object " 5760125049cSahrens "%llx/%llx (size=%u access=%llu+%llu)", 5770125049cSahrens (longlong_t)dn->dn_objset-> 5780125049cSahrens os_dsl_dataset->ds_object, 5790125049cSahrens (longlong_t)dn->dn_object, dn->dn_datablksz, 5800125049cSahrens (longlong_t)offset, (longlong_t)length); 581c87b8fc5SMark J Musante rw_exit(&dn->dn_struct_rwlock); 582be6fd75aSMatthew Ahrens return (SET_ERROR(EIO)); 5830125049cSahrens } 584fa9e4066Sahrens nblks = 1; 585fa9e4066Sahrens } 586ea8dc4b6Seschrock dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP); 587fa9e4066Sahrens 588e14bb325SJeff Bonwick zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, ZIO_FLAG_CANFAIL); 589a2cdcdd2SPaul Dagnelie blkid = dbuf_whichblock(dn, 0, offset); 590fa9e4066Sahrens for (i = 0; i < nblks; i++) { 591ea8dc4b6Seschrock dmu_buf_impl_t *db = dbuf_hold(dn, blkid + i, tag); 592ea8dc4b6Seschrock if (db == NULL) { 593ea8dc4b6Seschrock rw_exit(&dn->dn_struct_rwlock); 594ea8dc4b6Seschrock dmu_buf_rele_array(dbp, nblks, tag); 595ea8dc4b6Seschrock zio_nowait(zio); 596be6fd75aSMatthew Ahrens return (SET_ERROR(EIO)); 597ea8dc4b6Seschrock } 598cf6106c8SMatthew Ahrens 599ea8dc4b6Seschrock /* initiate async i/o */ 600cf6106c8SMatthew Ahrens if (read) 6017bfdf011SNeil Perrin (void) dbuf_read(db, zio, dbuf_flags); 602ea8dc4b6Seschrock dbp[i] = &db->db; 603fa9e4066Sahrens } 604cf6106c8SMatthew Ahrens 605cb92f413SAlexander Motin if ((flags & DMU_READ_NO_PREFETCH) == 0 && 606cb92f413SAlexander Motin DNODE_META_IS_CACHEABLE(dn) && length <= zfetch_array_rd_sz) { 607cb92f413SAlexander Motin dmu_zfetch(&dn->dn_zfetch, blkid, nblks, 608cb92f413SAlexander Motin read && DNODE_IS_CACHEABLE(dn)); 609cf6106c8SMatthew Ahrens } 610fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 611fa9e4066Sahrens 612ea8dc4b6Seschrock /* wait for async i/o */ 613ea8dc4b6Seschrock err = zio_wait(zio); 614ea8dc4b6Seschrock if (err) { 615ea8dc4b6Seschrock dmu_buf_rele_array(dbp, nblks, tag); 616ea8dc4b6Seschrock return (err); 617ea8dc4b6Seschrock } 618ea8dc4b6Seschrock 619ea8dc4b6Seschrock /* wait for other io to complete */ 620ea8dc4b6Seschrock if (read) { 621ea8dc4b6Seschrock for (i = 0; i < nblks; i++) { 622ea8dc4b6Seschrock dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i]; 623ea8dc4b6Seschrock mutex_enter(&db->db_mtx); 624ea8dc4b6Seschrock while (db->db_state == DB_READ || 625ea8dc4b6Seschrock db->db_state == DB_FILL) 626ea8dc4b6Seschrock cv_wait(&db->db_changed, &db->db_mtx); 627ea8dc4b6Seschrock if (db->db_state == DB_UNCACHED) 628be6fd75aSMatthew Ahrens err = SET_ERROR(EIO); 629ea8dc4b6Seschrock mutex_exit(&db->db_mtx); 630ea8dc4b6Seschrock if (err) { 631ea8dc4b6Seschrock dmu_buf_rele_array(dbp, nblks, tag); 632ea8dc4b6Seschrock return (err); 633ea8dc4b6Seschrock } 634ea8dc4b6Seschrock } 635ea8dc4b6Seschrock } 636ea8dc4b6Seschrock 637ea8dc4b6Seschrock *numbufsp = nblks; 638ea8dc4b6Seschrock *dbpp = dbp; 639ea8dc4b6Seschrock return (0); 640fa9e4066Sahrens } 641fa9e4066Sahrens 642a2eea2e1Sahrens static int 64313506d1eSmaybee dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset, 64413506d1eSmaybee uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp) 64513506d1eSmaybee { 64613506d1eSmaybee dnode_t *dn; 64713506d1eSmaybee int err; 64813506d1eSmaybee 649503ad85cSMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 65013506d1eSmaybee if (err) 65113506d1eSmaybee return (err); 65213506d1eSmaybee 65313506d1eSmaybee err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, 6547bfdf011SNeil Perrin numbufsp, dbpp, DMU_READ_PREFETCH); 65513506d1eSmaybee 65613506d1eSmaybee dnode_rele(dn, FTAG); 65713506d1eSmaybee 65813506d1eSmaybee return (err); 65913506d1eSmaybee } 66013506d1eSmaybee 66113506d1eSmaybee int 662744947dcSTom Erickson dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset, 663cf6106c8SMatthew Ahrens uint64_t length, boolean_t read, void *tag, int *numbufsp, 664cf6106c8SMatthew Ahrens dmu_buf_t ***dbpp) 66513506d1eSmaybee { 666744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 667744947dcSTom Erickson dnode_t *dn; 66813506d1eSmaybee int err; 66913506d1eSmaybee 670744947dcSTom Erickson DB_DNODE_ENTER(db); 671744947dcSTom Erickson dn = DB_DNODE(db); 67213506d1eSmaybee err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, 6737bfdf011SNeil Perrin numbufsp, dbpp, DMU_READ_PREFETCH); 674744947dcSTom Erickson DB_DNODE_EXIT(db); 67513506d1eSmaybee 67613506d1eSmaybee return (err); 67713506d1eSmaybee } 67813506d1eSmaybee 679fa9e4066Sahrens void 680ea8dc4b6Seschrock dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, void *tag) 681fa9e4066Sahrens { 682fa9e4066Sahrens int i; 683fa9e4066Sahrens dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake; 684fa9e4066Sahrens 685fa9e4066Sahrens if (numbufs == 0) 686fa9e4066Sahrens return; 687fa9e4066Sahrens 688ea8dc4b6Seschrock for (i = 0; i < numbufs; i++) { 689ea8dc4b6Seschrock if (dbp[i]) 690ea8dc4b6Seschrock dbuf_rele(dbp[i], tag); 691ea8dc4b6Seschrock } 692fa9e4066Sahrens 693fa9e4066Sahrens kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs); 694fa9e4066Sahrens } 695fa9e4066Sahrens 69669962b56SMatthew Ahrens /* 697a2cdcdd2SPaul Dagnelie * Issue prefetch i/os for the given blocks. If level is greater than 0, the 698a2cdcdd2SPaul Dagnelie * indirect blocks prefeteched will be those that point to the blocks containing 699a2cdcdd2SPaul Dagnelie * the data starting at offset, and continuing to offset + len. 70069962b56SMatthew Ahrens * 701eb633035STom Caputi * Note that if the indirect blocks above the blocks being prefetched are not 702eb633035STom Caputi * in cache, they will be asychronously read in. 70369962b56SMatthew Ahrens */ 704fa9e4066Sahrens void 705a2cdcdd2SPaul Dagnelie dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset, 706a2cdcdd2SPaul Dagnelie uint64_t len, zio_priority_t pri) 707fa9e4066Sahrens { 708fa9e4066Sahrens dnode_t *dn; 709fa9e4066Sahrens uint64_t blkid; 71069962b56SMatthew Ahrens int nblks, err; 711fa9e4066Sahrens 712fa9e4066Sahrens if (len == 0) { /* they're interested in the bonus buffer */ 713744947dcSTom Erickson dn = DMU_META_DNODE(os); 714fa9e4066Sahrens 715fa9e4066Sahrens if (object == 0 || object >= DN_MAX_OBJECT) 716fa9e4066Sahrens return; 717fa9e4066Sahrens 718fa9e4066Sahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 719a2cdcdd2SPaul Dagnelie blkid = dbuf_whichblock(dn, level, 720a2cdcdd2SPaul Dagnelie object * sizeof (dnode_phys_t)); 721a2cdcdd2SPaul Dagnelie dbuf_prefetch(dn, level, blkid, pri, 0); 722fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 723fa9e4066Sahrens return; 724fa9e4066Sahrens } 725fa9e4066Sahrens 726fa9e4066Sahrens /* 72752abb70eSMatthew Ahrens * See comment before the definition of dmu_prefetch_max. 72852abb70eSMatthew Ahrens */ 72952abb70eSMatthew Ahrens len = MIN(len, dmu_prefetch_max); 73052abb70eSMatthew Ahrens 73152abb70eSMatthew Ahrens /* 732fa9e4066Sahrens * XXX - Note, if the dnode for the requested object is not 733fa9e4066Sahrens * already cached, we will do a *synchronous* read in the 734fa9e4066Sahrens * dnode_hold() call. The same is true for any indirects. 735fa9e4066Sahrens */ 736503ad85cSMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 737ea8dc4b6Seschrock if (err != 0) 738fa9e4066Sahrens return; 739fa9e4066Sahrens 740fa9e4066Sahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 741a2cdcdd2SPaul Dagnelie /* 742a2cdcdd2SPaul Dagnelie * offset + len - 1 is the last byte we want to prefetch for, and offset 743a2cdcdd2SPaul Dagnelie * is the first. Then dbuf_whichblk(dn, level, off + len - 1) is the 744a2cdcdd2SPaul Dagnelie * last block we want to prefetch, and dbuf_whichblock(dn, level, 745a2cdcdd2SPaul Dagnelie * offset) is the first. Then the number we need to prefetch is the 746a2cdcdd2SPaul Dagnelie * last - first + 1. 747a2cdcdd2SPaul Dagnelie */ 748a2cdcdd2SPaul Dagnelie if (level > 0 || dn->dn_datablkshift != 0) { 749a2cdcdd2SPaul Dagnelie nblks = dbuf_whichblock(dn, level, offset + len - 1) - 750a2cdcdd2SPaul Dagnelie dbuf_whichblock(dn, level, offset) + 1; 751fa9e4066Sahrens } else { 752fa9e4066Sahrens nblks = (offset < dn->dn_datablksz); 753fa9e4066Sahrens } 754fa9e4066Sahrens 755fa9e4066Sahrens if (nblks != 0) { 756a2cdcdd2SPaul Dagnelie blkid = dbuf_whichblock(dn, level, offset); 75769962b56SMatthew Ahrens for (int i = 0; i < nblks; i++) 758a2cdcdd2SPaul Dagnelie dbuf_prefetch(dn, level, blkid + i, pri, 0); 759fa9e4066Sahrens } 760fa9e4066Sahrens 761fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 762fa9e4066Sahrens 763fa9e4066Sahrens dnode_rele(dn, FTAG); 764fa9e4066Sahrens } 765fa9e4066Sahrens 76676256205SMark Maybee /* 76776256205SMark Maybee * Get the next "chunk" of file data to free. We traverse the file from 76876256205SMark Maybee * the end so that the file gets shorter over time (if we crashes in the 76976256205SMark Maybee * middle, this will leave us in a better state). We find allocated file 77076256205SMark Maybee * data by simply searching the allocated level 1 indirects. 771713d6c20SMatthew Ahrens * 772713d6c20SMatthew Ahrens * On input, *start should be the first offset that does not need to be 773713d6c20SMatthew Ahrens * freed (e.g. "offset + length"). On return, *start will be the first 774713d6c20SMatthew Ahrens * offset that should be freed. 77576256205SMark Maybee */ 776cdb0ab79Smaybee static int 777713d6c20SMatthew Ahrens get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum) 778cdb0ab79Smaybee { 779713d6c20SMatthew Ahrens uint64_t maxblks = DMU_MAX_ACCESS >> (dn->dn_indblkshift + 1); 780713d6c20SMatthew Ahrens /* bytes of data covered by a level-1 indirect block */ 78176256205SMark Maybee uint64_t iblkrange = 7821c8564a7SMark Maybee dn->dn_datablksz * EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT); 783cdb0ab79Smaybee 784713d6c20SMatthew Ahrens ASSERT3U(minimum, <=, *start); 785cdb0ab79Smaybee 786713d6c20SMatthew Ahrens if (*start - minimum <= iblkrange * maxblks) { 787713d6c20SMatthew Ahrens *start = minimum; 788cdb0ab79Smaybee return (0); 789cdb0ab79Smaybee } 79076256205SMark Maybee ASSERT(ISP2(iblkrange)); 791cdb0ab79Smaybee 792713d6c20SMatthew Ahrens for (uint64_t blks = 0; *start > minimum && blks < maxblks; blks++) { 7931c8564a7SMark Maybee int err; 794cdb0ab79Smaybee 795713d6c20SMatthew Ahrens /* 796713d6c20SMatthew Ahrens * dnode_next_offset(BACKWARDS) will find an allocated L1 797713d6c20SMatthew Ahrens * indirect block at or before the input offset. We must 798713d6c20SMatthew Ahrens * decrement *start so that it is at the end of the region 799713d6c20SMatthew Ahrens * to search. 800713d6c20SMatthew Ahrens */ 801713d6c20SMatthew Ahrens (*start)--; 802cdb0ab79Smaybee err = dnode_next_offset(dn, 80376256205SMark Maybee DNODE_FIND_BACKWARDS, start, 2, 1, 0); 804cdb0ab79Smaybee 805713d6c20SMatthew Ahrens /* if there are no indirect blocks before start, we are done */ 80676256205SMark Maybee if (err == ESRCH) { 807713d6c20SMatthew Ahrens *start = minimum; 808713d6c20SMatthew Ahrens break; 809713d6c20SMatthew Ahrens } else if (err != 0) { 810cdb0ab79Smaybee return (err); 81176256205SMark Maybee } 812cdb0ab79Smaybee 813713d6c20SMatthew Ahrens /* set start to the beginning of this L1 indirect */ 81476256205SMark Maybee *start = P2ALIGN(*start, iblkrange); 815cdb0ab79Smaybee } 816713d6c20SMatthew Ahrens if (*start < minimum) 817713d6c20SMatthew Ahrens *start = minimum; 818cdb0ab79Smaybee return (0); 819cdb0ab79Smaybee } 820cdb0ab79Smaybee 821eb721827SAlek Pinchuk /* 822eb721827SAlek Pinchuk * If this objset is of type OST_ZFS return true if vfs's unmounted flag is set, 823eb721827SAlek Pinchuk * otherwise return false. 824eb721827SAlek Pinchuk * Used below in dmu_free_long_range_impl() to enable abort when unmounting 825eb721827SAlek Pinchuk */ 826eb721827SAlek Pinchuk /*ARGSUSED*/ 827eb721827SAlek Pinchuk static boolean_t 828eb721827SAlek Pinchuk dmu_objset_zfs_unmounting(objset_t *os) 829eb721827SAlek Pinchuk { 830eb721827SAlek Pinchuk #ifdef _KERNEL 831eb721827SAlek Pinchuk if (dmu_objset_type(os) == DMU_OST_ZFS) 832eb721827SAlek Pinchuk return (zfs_get_vfs_flag_unmounted(os)); 833eb721827SAlek Pinchuk #endif 834eb721827SAlek Pinchuk return (B_FALSE); 835eb721827SAlek Pinchuk } 836eb721827SAlek Pinchuk 837cdb0ab79Smaybee static int 838cdb0ab79Smaybee dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset, 839713d6c20SMatthew Ahrens uint64_t length) 840cdb0ab79Smaybee { 841713d6c20SMatthew Ahrens uint64_t object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz; 842713d6c20SMatthew Ahrens int err; 843ff5177eeSAlek Pinchuk uint64_t dirty_frees_threshold; 844ff5177eeSAlek Pinchuk dsl_pool_t *dp = dmu_objset_pool(os); 845cdb0ab79Smaybee 846713d6c20SMatthew Ahrens if (offset >= object_size) 847cdb0ab79Smaybee return (0); 848cdb0ab79Smaybee 849ff5177eeSAlek Pinchuk if (zfs_per_txg_dirty_frees_percent <= 100) 850ff5177eeSAlek Pinchuk dirty_frees_threshold = 851ff5177eeSAlek Pinchuk zfs_per_txg_dirty_frees_percent * zfs_dirty_data_max / 100; 852ff5177eeSAlek Pinchuk else 853ff5177eeSAlek Pinchuk dirty_frees_threshold = zfs_dirty_data_max / 4; 854ff5177eeSAlek Pinchuk 855713d6c20SMatthew Ahrens if (length == DMU_OBJECT_END || offset + length > object_size) 856713d6c20SMatthew Ahrens length = object_size - offset; 857713d6c20SMatthew Ahrens 858713d6c20SMatthew Ahrens while (length != 0) { 859ff5177eeSAlek Pinchuk uint64_t chunk_end, chunk_begin, chunk_len; 860ff5177eeSAlek Pinchuk uint64_t long_free_dirty_all_txgs = 0; 861ff5177eeSAlek Pinchuk dmu_tx_t *tx; 862713d6c20SMatthew Ahrens 863eb721827SAlek Pinchuk if (dmu_objset_zfs_unmounting(dn->dn_objset)) 864eb721827SAlek Pinchuk return (SET_ERROR(EINTR)); 865eb721827SAlek Pinchuk 866713d6c20SMatthew Ahrens chunk_end = chunk_begin = offset + length; 867713d6c20SMatthew Ahrens 868713d6c20SMatthew Ahrens /* move chunk_begin backwards to the beginning of this chunk */ 869713d6c20SMatthew Ahrens err = get_next_chunk(dn, &chunk_begin, offset); 870cdb0ab79Smaybee if (err) 871cdb0ab79Smaybee return (err); 872713d6c20SMatthew Ahrens ASSERT3U(chunk_begin, >=, offset); 873713d6c20SMatthew Ahrens ASSERT3U(chunk_begin, <=, chunk_end); 874cdb0ab79Smaybee 875ff5177eeSAlek Pinchuk chunk_len = chunk_end - chunk_begin; 876ff5177eeSAlek Pinchuk 877ff5177eeSAlek Pinchuk mutex_enter(&dp->dp_lock); 878ff5177eeSAlek Pinchuk for (int t = 0; t < TXG_SIZE; t++) { 879ff5177eeSAlek Pinchuk long_free_dirty_all_txgs += 880ff5177eeSAlek Pinchuk dp->dp_long_free_dirty_pertxg[t]; 881ff5177eeSAlek Pinchuk } 882ff5177eeSAlek Pinchuk mutex_exit(&dp->dp_lock); 883ff5177eeSAlek Pinchuk 884ff5177eeSAlek Pinchuk /* 885ff5177eeSAlek Pinchuk * To avoid filling up a TXG with just frees wait for 886ff5177eeSAlek Pinchuk * the next TXG to open before freeing more chunks if 887ff5177eeSAlek Pinchuk * we have reached the threshold of frees 888ff5177eeSAlek Pinchuk */ 889ff5177eeSAlek Pinchuk if (dirty_frees_threshold != 0 && 890ff5177eeSAlek Pinchuk long_free_dirty_all_txgs >= dirty_frees_threshold) { 891*084fd14fSBrian Behlendorf txg_wait_open(dp, 0, B_TRUE); 892ff5177eeSAlek Pinchuk continue; 893ff5177eeSAlek Pinchuk } 894ff5177eeSAlek Pinchuk 895ff5177eeSAlek Pinchuk tx = dmu_tx_create(os); 896ff5177eeSAlek Pinchuk dmu_tx_hold_free(tx, dn->dn_object, chunk_begin, chunk_len); 8974bb73804SMatthew Ahrens 8984bb73804SMatthew Ahrens /* 8994bb73804SMatthew Ahrens * Mark this transaction as typically resulting in a net 9004bb73804SMatthew Ahrens * reduction in space used. 9014bb73804SMatthew Ahrens */ 9024bb73804SMatthew Ahrens dmu_tx_mark_netfree(tx); 903cdb0ab79Smaybee err = dmu_tx_assign(tx, TXG_WAIT); 904cdb0ab79Smaybee if (err) { 905cdb0ab79Smaybee dmu_tx_abort(tx); 906cdb0ab79Smaybee return (err); 907cdb0ab79Smaybee } 908ff5177eeSAlek Pinchuk 909ff5177eeSAlek Pinchuk mutex_enter(&dp->dp_lock); 910ff5177eeSAlek Pinchuk dp->dp_long_free_dirty_pertxg[dmu_tx_get_txg(tx) & TXG_MASK] += 911ff5177eeSAlek Pinchuk chunk_len; 912ff5177eeSAlek Pinchuk mutex_exit(&dp->dp_lock); 913ff5177eeSAlek Pinchuk DTRACE_PROBE3(free__long__range, 914ff5177eeSAlek Pinchuk uint64_t, long_free_dirty_all_txgs, uint64_t, chunk_len, 915ff5177eeSAlek Pinchuk uint64_t, dmu_tx_get_txg(tx)); 916ff5177eeSAlek Pinchuk dnode_free_range(dn, chunk_begin, chunk_len, tx); 917eb633035STom Caputi 918cdb0ab79Smaybee dmu_tx_commit(tx); 919713d6c20SMatthew Ahrens 920ff5177eeSAlek Pinchuk length -= chunk_len; 921cdb0ab79Smaybee } 922cdb0ab79Smaybee return (0); 923cdb0ab79Smaybee } 924cdb0ab79Smaybee 925cdb0ab79Smaybee int 926cdb0ab79Smaybee dmu_free_long_range(objset_t *os, uint64_t object, 927cdb0ab79Smaybee uint64_t offset, uint64_t length) 928cdb0ab79Smaybee { 929cdb0ab79Smaybee dnode_t *dn; 930cdb0ab79Smaybee int err; 931cdb0ab79Smaybee 932503ad85cSMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 933cdb0ab79Smaybee if (err != 0) 934cdb0ab79Smaybee return (err); 935713d6c20SMatthew Ahrens err = dmu_free_long_range_impl(os, dn, offset, length); 9365253393bSMatthew Ahrens 9375253393bSMatthew Ahrens /* 9385253393bSMatthew Ahrens * It is important to zero out the maxblkid when freeing the entire 9395253393bSMatthew Ahrens * file, so that (a) subsequent calls to dmu_free_long_range_impl() 9405253393bSMatthew Ahrens * will take the fast path, and (b) dnode_reallocate() can verify 9415253393bSMatthew Ahrens * that the entire file has been freed. 9425253393bSMatthew Ahrens */ 94343466aaeSMax Grossman if (err == 0 && offset == 0 && length == DMU_OBJECT_END) 9445253393bSMatthew Ahrens dn->dn_maxblkid = 0; 9455253393bSMatthew Ahrens 946cdb0ab79Smaybee dnode_rele(dn, FTAG); 947cdb0ab79Smaybee return (err); 948cdb0ab79Smaybee } 949cdb0ab79Smaybee 950cdb0ab79Smaybee int 951713d6c20SMatthew Ahrens dmu_free_long_object(objset_t *os, uint64_t object) 952cdb0ab79Smaybee { 953cdb0ab79Smaybee dmu_tx_t *tx; 954cdb0ab79Smaybee int err; 955cdb0ab79Smaybee 956713d6c20SMatthew Ahrens err = dmu_free_long_range(os, object, 0, DMU_OBJECT_END); 957cdb0ab79Smaybee if (err != 0) 958cdb0ab79Smaybee return (err); 959713d6c20SMatthew Ahrens 960cdb0ab79Smaybee tx = dmu_tx_create(os); 961cdb0ab79Smaybee dmu_tx_hold_bonus(tx, object); 962713d6c20SMatthew Ahrens dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END); 9634bb73804SMatthew Ahrens dmu_tx_mark_netfree(tx); 964cdb0ab79Smaybee err = dmu_tx_assign(tx, TXG_WAIT); 965cdb0ab79Smaybee if (err == 0) { 966eb633035STom Caputi if (err == 0) 967713d6c20SMatthew Ahrens err = dmu_object_free(os, object, tx); 968eb633035STom Caputi 969cdb0ab79Smaybee dmu_tx_commit(tx); 970cdb0ab79Smaybee } else { 971cdb0ab79Smaybee dmu_tx_abort(tx); 972cdb0ab79Smaybee } 973713d6c20SMatthew Ahrens 974cdb0ab79Smaybee return (err); 975cdb0ab79Smaybee } 976cdb0ab79Smaybee 977ea8dc4b6Seschrock int 978fa9e4066Sahrens dmu_free_range(objset_t *os, uint64_t object, uint64_t offset, 979fa9e4066Sahrens uint64_t size, dmu_tx_t *tx) 980fa9e4066Sahrens { 981ea8dc4b6Seschrock dnode_t *dn; 982503ad85cSMatthew Ahrens int err = dnode_hold(os, object, FTAG, &dn); 983ea8dc4b6Seschrock if (err) 984ea8dc4b6Seschrock return (err); 985fa9e4066Sahrens ASSERT(offset < UINT64_MAX); 986eb633035STom Caputi ASSERT(size == DMU_OBJECT_END || size <= UINT64_MAX - offset); 987fa9e4066Sahrens dnode_free_range(dn, offset, size, tx); 988fa9e4066Sahrens dnode_rele(dn, FTAG); 989ea8dc4b6Seschrock return (0); 990fa9e4066Sahrens } 991fa9e4066Sahrens 992b0c42cd4Sbzzz77 static int 993b0c42cd4Sbzzz77 dmu_read_impl(dnode_t *dn, uint64_t offset, uint64_t size, 9947bfdf011SNeil Perrin void *buf, uint32_t flags) 995fa9e4066Sahrens { 996fa9e4066Sahrens dmu_buf_t **dbp; 997b0c42cd4Sbzzz77 int numbufs, err = 0; 998feb08c6bSbillm 999feb08c6bSbillm /* 1000feb08c6bSbillm * Deal with odd block sizes, where there can't be data past the first 1001feb08c6bSbillm * block. If we ever do the tail block optimization, we will need to 1002feb08c6bSbillm * handle that here as well. 1003feb08c6bSbillm */ 1004c87b8fc5SMark J Musante if (dn->dn_maxblkid == 0) { 1005fa9e4066Sahrens int newsz = offset > dn->dn_datablksz ? 0 : 1006fa9e4066Sahrens MIN(size, dn->dn_datablksz - offset); 1007fa9e4066Sahrens bzero((char *)buf + newsz, size - newsz); 1008fa9e4066Sahrens size = newsz; 1009fa9e4066Sahrens } 1010fa9e4066Sahrens 1011fa9e4066Sahrens while (size > 0) { 1012fa9e4066Sahrens uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2); 1013c87b8fc5SMark J Musante int i; 1014fa9e4066Sahrens 1015fa9e4066Sahrens /* 1016fa9e4066Sahrens * NB: we could do this block-at-a-time, but it's nice 1017fa9e4066Sahrens * to be reading in parallel. 1018fa9e4066Sahrens */ 1019a2eea2e1Sahrens err = dmu_buf_hold_array_by_dnode(dn, offset, mylen, 10207bfdf011SNeil Perrin TRUE, FTAG, &numbufs, &dbp, flags); 1021ea8dc4b6Seschrock if (err) 10221934e92fSmaybee break; 1023fa9e4066Sahrens 1024fa9e4066Sahrens for (i = 0; i < numbufs; i++) { 1025fa9e4066Sahrens int tocpy; 1026fa9e4066Sahrens int bufoff; 1027fa9e4066Sahrens dmu_buf_t *db = dbp[i]; 1028fa9e4066Sahrens 1029fa9e4066Sahrens ASSERT(size > 0); 1030fa9e4066Sahrens 1031fa9e4066Sahrens bufoff = offset - db->db_offset; 1032fa9e4066Sahrens tocpy = (int)MIN(db->db_size - bufoff, size); 1033fa9e4066Sahrens 1034fa9e4066Sahrens bcopy((char *)db->db_data + bufoff, buf, tocpy); 1035fa9e4066Sahrens 1036fa9e4066Sahrens offset += tocpy; 1037fa9e4066Sahrens size -= tocpy; 1038fa9e4066Sahrens buf = (char *)buf + tocpy; 1039fa9e4066Sahrens } 1040ea8dc4b6Seschrock dmu_buf_rele_array(dbp, numbufs, FTAG); 1041fa9e4066Sahrens } 1042b0c42cd4Sbzzz77 return (err); 1043b0c42cd4Sbzzz77 } 1044b0c42cd4Sbzzz77 1045b0c42cd4Sbzzz77 int 1046b0c42cd4Sbzzz77 dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 1047b0c42cd4Sbzzz77 void *buf, uint32_t flags) 1048b0c42cd4Sbzzz77 { 1049b0c42cd4Sbzzz77 dnode_t *dn; 1050b0c42cd4Sbzzz77 int err; 1051b0c42cd4Sbzzz77 1052b0c42cd4Sbzzz77 err = dnode_hold(os, object, FTAG, &dn); 1053b0c42cd4Sbzzz77 if (err != 0) 1054b0c42cd4Sbzzz77 return (err); 1055b0c42cd4Sbzzz77 1056b0c42cd4Sbzzz77 err = dmu_read_impl(dn, offset, size, buf, flags); 1057a2eea2e1Sahrens dnode_rele(dn, FTAG); 10581934e92fSmaybee return (err); 1059fa9e4066Sahrens } 1060fa9e4066Sahrens 1061b0c42cd4Sbzzz77 int 1062b0c42cd4Sbzzz77 dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf, 1063b0c42cd4Sbzzz77 uint32_t flags) 1064b0c42cd4Sbzzz77 { 1065b0c42cd4Sbzzz77 return (dmu_read_impl(dn, offset, size, buf, flags)); 1066b0c42cd4Sbzzz77 } 1067b0c42cd4Sbzzz77 1068b0c42cd4Sbzzz77 static void 1069b0c42cd4Sbzzz77 dmu_write_impl(dmu_buf_t **dbp, int numbufs, uint64_t offset, uint64_t size, 1070fa9e4066Sahrens const void *buf, dmu_tx_t *tx) 1071fa9e4066Sahrens { 1072b0c42cd4Sbzzz77 int i; 1073fa9e4066Sahrens 1074fa9e4066Sahrens for (i = 0; i < numbufs; i++) { 1075fa9e4066Sahrens int tocpy; 1076fa9e4066Sahrens int bufoff; 1077fa9e4066Sahrens dmu_buf_t *db = dbp[i]; 1078fa9e4066Sahrens 1079fa9e4066Sahrens ASSERT(size > 0); 1080fa9e4066Sahrens 1081fa9e4066Sahrens bufoff = offset - db->db_offset; 1082fa9e4066Sahrens tocpy = (int)MIN(db->db_size - bufoff, size); 1083fa9e4066Sahrens 1084fa9e4066Sahrens ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 1085fa9e4066Sahrens 1086fa9e4066Sahrens if (tocpy == db->db_size) 1087fa9e4066Sahrens dmu_buf_will_fill(db, tx); 1088fa9e4066Sahrens else 1089fa9e4066Sahrens dmu_buf_will_dirty(db, tx); 1090fa9e4066Sahrens 1091fa9e4066Sahrens bcopy(buf, (char *)db->db_data + bufoff, tocpy); 1092fa9e4066Sahrens 1093fa9e4066Sahrens if (tocpy == db->db_size) 1094fa9e4066Sahrens dmu_buf_fill_done(db, tx); 1095fa9e4066Sahrens 1096fa9e4066Sahrens offset += tocpy; 1097fa9e4066Sahrens size -= tocpy; 1098fa9e4066Sahrens buf = (char *)buf + tocpy; 1099fa9e4066Sahrens } 1100b0c42cd4Sbzzz77 } 1101b0c42cd4Sbzzz77 1102b0c42cd4Sbzzz77 void 1103b0c42cd4Sbzzz77 dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 1104b0c42cd4Sbzzz77 const void *buf, dmu_tx_t *tx) 1105b0c42cd4Sbzzz77 { 1106b0c42cd4Sbzzz77 dmu_buf_t **dbp; 1107b0c42cd4Sbzzz77 int numbufs; 1108b0c42cd4Sbzzz77 1109b0c42cd4Sbzzz77 if (size == 0) 1110b0c42cd4Sbzzz77 return; 1111b0c42cd4Sbzzz77 1112b0c42cd4Sbzzz77 VERIFY0(dmu_buf_hold_array(os, object, offset, size, 1113b0c42cd4Sbzzz77 FALSE, FTAG, &numbufs, &dbp)); 1114b0c42cd4Sbzzz77 dmu_write_impl(dbp, numbufs, offset, size, buf, tx); 1115b0c42cd4Sbzzz77 dmu_buf_rele_array(dbp, numbufs, FTAG); 1116b0c42cd4Sbzzz77 } 1117b0c42cd4Sbzzz77 1118b0c42cd4Sbzzz77 void 1119b0c42cd4Sbzzz77 dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, 1120b0c42cd4Sbzzz77 const void *buf, dmu_tx_t *tx) 1121b0c42cd4Sbzzz77 { 1122b0c42cd4Sbzzz77 dmu_buf_t **dbp; 1123b0c42cd4Sbzzz77 int numbufs; 1124b0c42cd4Sbzzz77 1125b0c42cd4Sbzzz77 if (size == 0) 1126b0c42cd4Sbzzz77 return; 1127b0c42cd4Sbzzz77 1128b0c42cd4Sbzzz77 VERIFY0(dmu_buf_hold_array_by_dnode(dn, offset, size, 1129b0c42cd4Sbzzz77 FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH)); 1130b0c42cd4Sbzzz77 dmu_write_impl(dbp, numbufs, offset, size, buf, tx); 1131ea8dc4b6Seschrock dmu_buf_rele_array(dbp, numbufs, FTAG); 1132fa9e4066Sahrens } 1133fa9e4066Sahrens 11345cabbc6bSPrashanth Sreenivasa static int 11355cabbc6bSPrashanth Sreenivasa dmu_object_remap_one_indirect(objset_t *os, dnode_t *dn, 11365cabbc6bSPrashanth Sreenivasa uint64_t last_removal_txg, uint64_t offset) 11375cabbc6bSPrashanth Sreenivasa { 11385cabbc6bSPrashanth Sreenivasa uint64_t l1blkid = dbuf_whichblock(dn, 1, offset); 11395cabbc6bSPrashanth Sreenivasa int err = 0; 11405cabbc6bSPrashanth Sreenivasa 11415cabbc6bSPrashanth Sreenivasa rw_enter(&dn->dn_struct_rwlock, RW_READER); 11425cabbc6bSPrashanth Sreenivasa dmu_buf_impl_t *dbuf = dbuf_hold_level(dn, 1, l1blkid, FTAG); 11435cabbc6bSPrashanth Sreenivasa ASSERT3P(dbuf, !=, NULL); 11445cabbc6bSPrashanth Sreenivasa 11455cabbc6bSPrashanth Sreenivasa /* 11465cabbc6bSPrashanth Sreenivasa * If the block hasn't been written yet, this default will ensure 11475cabbc6bSPrashanth Sreenivasa * we don't try to remap it. 11485cabbc6bSPrashanth Sreenivasa */ 11495cabbc6bSPrashanth Sreenivasa uint64_t birth = UINT64_MAX; 11505cabbc6bSPrashanth Sreenivasa ASSERT3U(last_removal_txg, !=, UINT64_MAX); 11515cabbc6bSPrashanth Sreenivasa if (dbuf->db_blkptr != NULL) 11525cabbc6bSPrashanth Sreenivasa birth = dbuf->db_blkptr->blk_birth; 11535cabbc6bSPrashanth Sreenivasa rw_exit(&dn->dn_struct_rwlock); 11545cabbc6bSPrashanth Sreenivasa 11555cabbc6bSPrashanth Sreenivasa /* 11565cabbc6bSPrashanth Sreenivasa * If this L1 was already written after the last removal, then we've 11575cabbc6bSPrashanth Sreenivasa * already tried to remap it. 11585cabbc6bSPrashanth Sreenivasa */ 11595cabbc6bSPrashanth Sreenivasa if (birth <= last_removal_txg && 11605cabbc6bSPrashanth Sreenivasa dbuf_read(dbuf, NULL, DB_RF_MUST_SUCCEED) == 0 && 11615cabbc6bSPrashanth Sreenivasa dbuf_can_remap(dbuf)) { 11625cabbc6bSPrashanth Sreenivasa dmu_tx_t *tx = dmu_tx_create(os); 11635cabbc6bSPrashanth Sreenivasa dmu_tx_hold_remap_l1indirect(tx, dn->dn_object); 11645cabbc6bSPrashanth Sreenivasa err = dmu_tx_assign(tx, TXG_WAIT); 11655cabbc6bSPrashanth Sreenivasa if (err == 0) { 11665cabbc6bSPrashanth Sreenivasa (void) dbuf_dirty(dbuf, tx); 11675cabbc6bSPrashanth Sreenivasa dmu_tx_commit(tx); 11685cabbc6bSPrashanth Sreenivasa } else { 11695cabbc6bSPrashanth Sreenivasa dmu_tx_abort(tx); 11705cabbc6bSPrashanth Sreenivasa } 11715cabbc6bSPrashanth Sreenivasa } 11725cabbc6bSPrashanth Sreenivasa 11735cabbc6bSPrashanth Sreenivasa dbuf_rele(dbuf, FTAG); 11745cabbc6bSPrashanth Sreenivasa 11755cabbc6bSPrashanth Sreenivasa delay(zfs_object_remap_one_indirect_delay_ticks); 11765cabbc6bSPrashanth Sreenivasa 11775cabbc6bSPrashanth Sreenivasa return (err); 11785cabbc6bSPrashanth Sreenivasa } 11795cabbc6bSPrashanth Sreenivasa 11805cabbc6bSPrashanth Sreenivasa /* 11815cabbc6bSPrashanth Sreenivasa * Remap all blockpointers in the object, if possible, so that they reference 11825cabbc6bSPrashanth Sreenivasa * only concrete vdevs. 11835cabbc6bSPrashanth Sreenivasa * 11845cabbc6bSPrashanth Sreenivasa * To do this, iterate over the L0 blockpointers and remap any that reference 11855cabbc6bSPrashanth Sreenivasa * an indirect vdev. Note that we only examine L0 blockpointers; since we 11865cabbc6bSPrashanth Sreenivasa * cannot guarantee that we can remap all blockpointer anyways (due to split 11875cabbc6bSPrashanth Sreenivasa * blocks), we do not want to make the code unnecessarily complicated to 11885cabbc6bSPrashanth Sreenivasa * catch the unlikely case that there is an L1 block on an indirect vdev that 11895cabbc6bSPrashanth Sreenivasa * contains no indirect blockpointers. 11905cabbc6bSPrashanth Sreenivasa */ 11915cabbc6bSPrashanth Sreenivasa int 11925cabbc6bSPrashanth Sreenivasa dmu_object_remap_indirects(objset_t *os, uint64_t object, 11935cabbc6bSPrashanth Sreenivasa uint64_t last_removal_txg) 11945cabbc6bSPrashanth Sreenivasa { 11955cabbc6bSPrashanth Sreenivasa uint64_t offset, l1span; 11965cabbc6bSPrashanth Sreenivasa int err; 11975cabbc6bSPrashanth Sreenivasa dnode_t *dn; 11985cabbc6bSPrashanth Sreenivasa 11995cabbc6bSPrashanth Sreenivasa err = dnode_hold(os, object, FTAG, &dn); 12005cabbc6bSPrashanth Sreenivasa if (err != 0) { 12015cabbc6bSPrashanth Sreenivasa return (err); 12025cabbc6bSPrashanth Sreenivasa } 12035cabbc6bSPrashanth Sreenivasa 12045cabbc6bSPrashanth Sreenivasa if (dn->dn_nlevels <= 1) { 12055cabbc6bSPrashanth Sreenivasa if (issig(JUSTLOOKING) && issig(FORREAL)) { 12065cabbc6bSPrashanth Sreenivasa err = SET_ERROR(EINTR); 12075cabbc6bSPrashanth Sreenivasa } 12085cabbc6bSPrashanth Sreenivasa 12095cabbc6bSPrashanth Sreenivasa /* 12105cabbc6bSPrashanth Sreenivasa * If the dnode has no indirect blocks, we cannot dirty them. 12115cabbc6bSPrashanth Sreenivasa * We still want to remap the blkptr(s) in the dnode if 12125cabbc6bSPrashanth Sreenivasa * appropriate, so mark it as dirty. 12135cabbc6bSPrashanth Sreenivasa */ 12145cabbc6bSPrashanth Sreenivasa if (err == 0 && dnode_needs_remap(dn)) { 12155cabbc6bSPrashanth Sreenivasa dmu_tx_t *tx = dmu_tx_create(os); 12165cabbc6bSPrashanth Sreenivasa dmu_tx_hold_bonus(tx, dn->dn_object); 12175cabbc6bSPrashanth Sreenivasa if ((err = dmu_tx_assign(tx, TXG_WAIT)) == 0) { 12185cabbc6bSPrashanth Sreenivasa dnode_setdirty(dn, tx); 12195cabbc6bSPrashanth Sreenivasa dmu_tx_commit(tx); 12205cabbc6bSPrashanth Sreenivasa } else { 12215cabbc6bSPrashanth Sreenivasa dmu_tx_abort(tx); 12225cabbc6bSPrashanth Sreenivasa } 12235cabbc6bSPrashanth Sreenivasa } 12245cabbc6bSPrashanth Sreenivasa 12255cabbc6bSPrashanth Sreenivasa dnode_rele(dn, FTAG); 12265cabbc6bSPrashanth Sreenivasa return (err); 12275cabbc6bSPrashanth Sreenivasa } 12285cabbc6bSPrashanth Sreenivasa 12295cabbc6bSPrashanth Sreenivasa offset = 0; 12305cabbc6bSPrashanth Sreenivasa l1span = 1ULL << (dn->dn_indblkshift - SPA_BLKPTRSHIFT + 12315cabbc6bSPrashanth Sreenivasa dn->dn_datablkshift); 12325cabbc6bSPrashanth Sreenivasa /* 12335cabbc6bSPrashanth Sreenivasa * Find the next L1 indirect that is not a hole. 12345cabbc6bSPrashanth Sreenivasa */ 12355cabbc6bSPrashanth Sreenivasa while (dnode_next_offset(dn, 0, &offset, 2, 1, 0) == 0) { 12365cabbc6bSPrashanth Sreenivasa if (issig(JUSTLOOKING) && issig(FORREAL)) { 12375cabbc6bSPrashanth Sreenivasa err = SET_ERROR(EINTR); 12385cabbc6bSPrashanth Sreenivasa break; 12395cabbc6bSPrashanth Sreenivasa } 12405cabbc6bSPrashanth Sreenivasa if ((err = dmu_object_remap_one_indirect(os, dn, 12415cabbc6bSPrashanth Sreenivasa last_removal_txg, offset)) != 0) { 12425cabbc6bSPrashanth Sreenivasa break; 12435cabbc6bSPrashanth Sreenivasa } 12445cabbc6bSPrashanth Sreenivasa offset += l1span; 12455cabbc6bSPrashanth Sreenivasa } 12465cabbc6bSPrashanth Sreenivasa 12475cabbc6bSPrashanth Sreenivasa dnode_rele(dn, FTAG); 12485cabbc6bSPrashanth Sreenivasa return (err); 12495cabbc6bSPrashanth Sreenivasa } 12505cabbc6bSPrashanth Sreenivasa 125182c9918fSTim Haley void 125282c9918fSTim Haley dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 125382c9918fSTim Haley dmu_tx_t *tx) 125482c9918fSTim Haley { 125582c9918fSTim Haley dmu_buf_t **dbp; 125682c9918fSTim Haley int numbufs, i; 125782c9918fSTim Haley 125882c9918fSTim Haley if (size == 0) 125982c9918fSTim Haley return; 126082c9918fSTim Haley 126182c9918fSTim Haley VERIFY(0 == dmu_buf_hold_array(os, object, offset, size, 126282c9918fSTim Haley FALSE, FTAG, &numbufs, &dbp)); 126382c9918fSTim Haley 126482c9918fSTim Haley for (i = 0; i < numbufs; i++) { 126582c9918fSTim Haley dmu_buf_t *db = dbp[i]; 126682c9918fSTim Haley 126782c9918fSTim Haley dmu_buf_will_not_fill(db, tx); 126882c9918fSTim Haley } 126982c9918fSTim Haley dmu_buf_rele_array(dbp, numbufs, FTAG); 127082c9918fSTim Haley } 127182c9918fSTim Haley 12725d7b4d43SMatthew Ahrens void 12735d7b4d43SMatthew Ahrens dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset, 12745d7b4d43SMatthew Ahrens void *data, uint8_t etype, uint8_t comp, int uncompressed_size, 12755d7b4d43SMatthew Ahrens int compressed_size, int byteorder, dmu_tx_t *tx) 12765d7b4d43SMatthew Ahrens { 12775d7b4d43SMatthew Ahrens dmu_buf_t *db; 12785d7b4d43SMatthew Ahrens 12795d7b4d43SMatthew Ahrens ASSERT3U(etype, <, NUM_BP_EMBEDDED_TYPES); 12805d7b4d43SMatthew Ahrens ASSERT3U(comp, <, ZIO_COMPRESS_FUNCTIONS); 12815d7b4d43SMatthew Ahrens VERIFY0(dmu_buf_hold_noread(os, object, offset, 12825d7b4d43SMatthew Ahrens FTAG, &db)); 12835d7b4d43SMatthew Ahrens 12845d7b4d43SMatthew Ahrens dmu_buf_write_embedded(db, 12855d7b4d43SMatthew Ahrens data, (bp_embedded_type_t)etype, (enum zio_compress)comp, 12865d7b4d43SMatthew Ahrens uncompressed_size, compressed_size, byteorder, tx); 12875d7b4d43SMatthew Ahrens 12885d7b4d43SMatthew Ahrens dmu_buf_rele(db, FTAG); 12895d7b4d43SMatthew Ahrens } 12905d7b4d43SMatthew Ahrens 1291c242f9a0Schunli zhang - Sun Microsystems - Irvine United States /* 1292c242f9a0Schunli zhang - Sun Microsystems - Irvine United States * DMU support for xuio 1293c242f9a0Schunli zhang - Sun Microsystems - Irvine United States */ 1294c242f9a0Schunli zhang - Sun Microsystems - Irvine United States kstat_t *xuio_ksp = NULL; 1295c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1296c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int 1297c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_init(xuio_t *xuio, int nblk) 1298c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1299c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_t *priv; 1300c242f9a0Schunli zhang - Sun Microsystems - Irvine United States uio_t *uio = &xuio->xu_uio; 1301c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1302c242f9a0Schunli zhang - Sun Microsystems - Irvine United States uio->uio_iovcnt = nblk; 1303c242f9a0Schunli zhang - Sun Microsystems - Irvine United States uio->uio_iov = kmem_zalloc(nblk * sizeof (iovec_t), KM_SLEEP); 1304c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1305c242f9a0Schunli zhang - Sun Microsystems - Irvine United States priv = kmem_zalloc(sizeof (dmu_xuio_t), KM_SLEEP); 1306c242f9a0Schunli zhang - Sun Microsystems - Irvine United States priv->cnt = nblk; 1307c242f9a0Schunli zhang - Sun Microsystems - Irvine United States priv->bufs = kmem_zalloc(nblk * sizeof (arc_buf_t *), KM_SLEEP); 1308c242f9a0Schunli zhang - Sun Microsystems - Irvine United States priv->iovp = uio->uio_iov; 1309c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIO_XUZC_PRIV(xuio) = priv; 1310c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1311c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (XUIO_XUZC_RW(xuio) == UIO_READ) 1312c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_INCR(xuiostat_onloan_rbuf, nblk); 1313c242f9a0Schunli zhang - Sun Microsystems - Irvine United States else 1314c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_INCR(xuiostat_onloan_wbuf, nblk); 1315c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1316c242f9a0Schunli zhang - Sun Microsystems - Irvine United States return (0); 1317c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1318c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1319c242f9a0Schunli zhang - Sun Microsystems - Irvine United States void 1320c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_fini(xuio_t *xuio) 1321c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1322c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 1323c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int nblk = priv->cnt; 1324c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1325c242f9a0Schunli zhang - Sun Microsystems - Irvine United States kmem_free(priv->iovp, nblk * sizeof (iovec_t)); 1326c242f9a0Schunli zhang - Sun Microsystems - Irvine United States kmem_free(priv->bufs, nblk * sizeof (arc_buf_t *)); 1327c242f9a0Schunli zhang - Sun Microsystems - Irvine United States kmem_free(priv, sizeof (dmu_xuio_t)); 1328c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1329c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (XUIO_XUZC_RW(xuio) == UIO_READ) 1330c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_INCR(xuiostat_onloan_rbuf, -nblk); 1331c242f9a0Schunli zhang - Sun Microsystems - Irvine United States else 1332c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_INCR(xuiostat_onloan_wbuf, -nblk); 1333c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1334c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1335c242f9a0Schunli zhang - Sun Microsystems - Irvine United States /* 1336c242f9a0Schunli zhang - Sun Microsystems - Irvine United States * Initialize iov[priv->next] and priv->bufs[priv->next] with { off, n, abuf } 1337c242f9a0Schunli zhang - Sun Microsystems - Irvine United States * and increase priv->next by 1. 1338c242f9a0Schunli zhang - Sun Microsystems - Irvine United States */ 1339c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int 1340c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_add(xuio_t *xuio, arc_buf_t *abuf, offset_t off, size_t n) 1341c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1342c242f9a0Schunli zhang - Sun Microsystems - Irvine United States struct iovec *iov; 1343c242f9a0Schunli zhang - Sun Microsystems - Irvine United States uio_t *uio = &xuio->xu_uio; 1344c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 1345c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int i = priv->next++; 1346c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1347c242f9a0Schunli zhang - Sun Microsystems - Irvine United States ASSERT(i < priv->cnt); 13485602294fSDan Kimmel ASSERT(off + n <= arc_buf_lsize(abuf)); 1349c242f9a0Schunli zhang - Sun Microsystems - Irvine United States iov = uio->uio_iov + i; 1350c242f9a0Schunli zhang - Sun Microsystems - Irvine United States iov->iov_base = (char *)abuf->b_data + off; 1351c242f9a0Schunli zhang - Sun Microsystems - Irvine United States iov->iov_len = n; 1352c242f9a0Schunli zhang - Sun Microsystems - Irvine United States priv->bufs[i] = abuf; 1353c242f9a0Schunli zhang - Sun Microsystems - Irvine United States return (0); 1354c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1355c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1356c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int 1357c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_cnt(xuio_t *xuio) 1358c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1359c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 1360c242f9a0Schunli zhang - Sun Microsystems - Irvine United States return (priv->cnt); 1361c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1362c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1363c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_buf_t * 1364c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_arcbuf(xuio_t *xuio, int i) 1365c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1366c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 1367c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1368c242f9a0Schunli zhang - Sun Microsystems - Irvine United States ASSERT(i < priv->cnt); 1369c242f9a0Schunli zhang - Sun Microsystems - Irvine United States return (priv->bufs[i]); 1370c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1371c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1372c242f9a0Schunli zhang - Sun Microsystems - Irvine United States void 1373c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_clear(xuio_t *xuio, int i) 1374c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1375c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 1376c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1377c242f9a0Schunli zhang - Sun Microsystems - Irvine United States ASSERT(i < priv->cnt); 1378c242f9a0Schunli zhang - Sun Microsystems - Irvine United States priv->bufs[i] = NULL; 1379c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1380c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1381c242f9a0Schunli zhang - Sun Microsystems - Irvine United States static void 1382c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_stat_init(void) 1383c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1384c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_ksp = kstat_create("zfs", 0, "xuio_stats", "misc", 1385c242f9a0Schunli zhang - Sun Microsystems - Irvine United States KSTAT_TYPE_NAMED, sizeof (xuio_stats) / sizeof (kstat_named_t), 1386c242f9a0Schunli zhang - Sun Microsystems - Irvine United States KSTAT_FLAG_VIRTUAL); 1387c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (xuio_ksp != NULL) { 1388c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_ksp->ks_data = &xuio_stats; 1389c242f9a0Schunli zhang - Sun Microsystems - Irvine United States kstat_install(xuio_ksp); 1390c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1391c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1392c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1393c242f9a0Schunli zhang - Sun Microsystems - Irvine United States static void 1394c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_stat_fini(void) 1395c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1396c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (xuio_ksp != NULL) { 1397c242f9a0Schunli zhang - Sun Microsystems - Irvine United States kstat_delete(xuio_ksp); 1398c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_ksp = NULL; 1399c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1400c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1401c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1402c242f9a0Schunli zhang - Sun Microsystems - Irvine United States void 140399aa8b55SPrashanth Sreenivasa xuio_stat_wbuf_copied(void) 1404c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1405c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_BUMP(xuiostat_wbuf_copied); 1406c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1407c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1408c242f9a0Schunli zhang - Sun Microsystems - Irvine United States void 140999aa8b55SPrashanth Sreenivasa xuio_stat_wbuf_nocopy(void) 1410c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1411c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_BUMP(xuiostat_wbuf_nocopy); 1412c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1413c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1414fa9e4066Sahrens #ifdef _KERNEL 14158dfe5547SRichard Yao int 1416f8554bb9SMatthew Ahrens dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size) 1417feb08c6bSbillm { 1418feb08c6bSbillm dmu_buf_t **dbp; 1419feb08c6bSbillm int numbufs, i, err; 1420c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_t *xuio = NULL; 1421feb08c6bSbillm 1422feb08c6bSbillm /* 1423feb08c6bSbillm * NB: we could do this block-at-a-time, but it's nice 1424feb08c6bSbillm * to be reading in parallel. 1425feb08c6bSbillm */ 1426f8554bb9SMatthew Ahrens err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size, 1427f8554bb9SMatthew Ahrens TRUE, FTAG, &numbufs, &dbp, 0); 1428feb08c6bSbillm if (err) 1429feb08c6bSbillm return (err); 1430feb08c6bSbillm 1431c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (uio->uio_extflg == UIO_XUIO) 1432c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio = (xuio_t *)uio; 1433c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1434feb08c6bSbillm for (i = 0; i < numbufs; i++) { 1435feb08c6bSbillm int tocpy; 1436feb08c6bSbillm int bufoff; 1437feb08c6bSbillm dmu_buf_t *db = dbp[i]; 1438feb08c6bSbillm 1439feb08c6bSbillm ASSERT(size > 0); 1440feb08c6bSbillm 1441feb08c6bSbillm bufoff = uio->uio_loffset - db->db_offset; 1442feb08c6bSbillm tocpy = (int)MIN(db->db_size - bufoff, size); 1443feb08c6bSbillm 1444c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (xuio) { 1445c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 1446c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_buf_t *dbuf_abuf = dbi->db_buf; 1447c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_buf_t *abuf = dbuf_loan_arcbuf(dbi); 1448c242f9a0Schunli zhang - Sun Microsystems - Irvine United States err = dmu_xuio_add(xuio, abuf, bufoff, tocpy); 1449c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (!err) { 1450c242f9a0Schunli zhang - Sun Microsystems - Irvine United States uio->uio_resid -= tocpy; 1451c242f9a0Schunli zhang - Sun Microsystems - Irvine United States uio->uio_loffset += tocpy; 1452c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1453c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1454c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (abuf == dbuf_abuf) 1455c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_BUMP(xuiostat_rbuf_nocopy); 1456c242f9a0Schunli zhang - Sun Microsystems - Irvine United States else 1457c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_BUMP(xuiostat_rbuf_copied); 1458c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } else { 1459feb08c6bSbillm err = uiomove((char *)db->db_data + bufoff, tocpy, 1460feb08c6bSbillm UIO_READ, uio); 1461c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1462feb08c6bSbillm if (err) 1463feb08c6bSbillm break; 1464feb08c6bSbillm 1465feb08c6bSbillm size -= tocpy; 1466feb08c6bSbillm } 1467feb08c6bSbillm dmu_buf_rele_array(dbp, numbufs, FTAG); 1468feb08c6bSbillm 1469feb08c6bSbillm return (err); 1470feb08c6bSbillm } 1471feb08c6bSbillm 1472f8554bb9SMatthew Ahrens /* 1473f8554bb9SMatthew Ahrens * Read 'size' bytes into the uio buffer. 1474f8554bb9SMatthew Ahrens * From object zdb->db_object. 1475f8554bb9SMatthew Ahrens * Starting at offset uio->uio_loffset. 1476f8554bb9SMatthew Ahrens * 1477f8554bb9SMatthew Ahrens * If the caller already has a dbuf in the target object 1478f8554bb9SMatthew Ahrens * (e.g. its bonus buffer), this routine is faster than dmu_read_uio(), 1479f8554bb9SMatthew Ahrens * because we don't have to find the dnode_t for the object. 1480f8554bb9SMatthew Ahrens */ 1481f8554bb9SMatthew Ahrens int 1482f8554bb9SMatthew Ahrens dmu_read_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size) 1483f8554bb9SMatthew Ahrens { 1484f8554bb9SMatthew Ahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb; 1485f8554bb9SMatthew Ahrens dnode_t *dn; 1486f8554bb9SMatthew Ahrens int err; 1487f8554bb9SMatthew Ahrens 1488f8554bb9SMatthew Ahrens if (size == 0) 1489f8554bb9SMatthew Ahrens return (0); 1490f8554bb9SMatthew Ahrens 1491f8554bb9SMatthew Ahrens DB_DNODE_ENTER(db); 1492f8554bb9SMatthew Ahrens dn = DB_DNODE(db); 1493f8554bb9SMatthew Ahrens err = dmu_read_uio_dnode(dn, uio, size); 1494f8554bb9SMatthew Ahrens DB_DNODE_EXIT(db); 1495f8554bb9SMatthew Ahrens 1496f8554bb9SMatthew Ahrens return (err); 1497f8554bb9SMatthew Ahrens } 1498f8554bb9SMatthew Ahrens 1499f8554bb9SMatthew Ahrens /* 1500f8554bb9SMatthew Ahrens * Read 'size' bytes into the uio buffer. 1501f8554bb9SMatthew Ahrens * From the specified object 1502f8554bb9SMatthew Ahrens * Starting at offset uio->uio_loffset. 1503f8554bb9SMatthew Ahrens */ 1504f8554bb9SMatthew Ahrens int 1505f8554bb9SMatthew Ahrens dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size) 1506f8554bb9SMatthew Ahrens { 1507f8554bb9SMatthew Ahrens dnode_t *dn; 1508f8554bb9SMatthew Ahrens int err; 1509f8554bb9SMatthew Ahrens 1510f8554bb9SMatthew Ahrens if (size == 0) 1511f8554bb9SMatthew Ahrens return (0); 1512f8554bb9SMatthew Ahrens 1513f8554bb9SMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 1514f8554bb9SMatthew Ahrens if (err) 1515f8554bb9SMatthew Ahrens return (err); 1516f8554bb9SMatthew Ahrens 1517f8554bb9SMatthew Ahrens err = dmu_read_uio_dnode(dn, uio, size); 1518f8554bb9SMatthew Ahrens 1519f8554bb9SMatthew Ahrens dnode_rele(dn, FTAG); 1520f8554bb9SMatthew Ahrens 1521f8554bb9SMatthew Ahrens return (err); 1522f8554bb9SMatthew Ahrens } 1523f8554bb9SMatthew Ahrens 15248dfe5547SRichard Yao int 152594d1a210STim Haley dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx) 1526fa9e4066Sahrens { 1527fa9e4066Sahrens dmu_buf_t **dbp; 152894d1a210STim Haley int numbufs; 1529fa9e4066Sahrens int err = 0; 153094d1a210STim Haley int i; 1531fa9e4066Sahrens 153294d1a210STim Haley err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size, 153394d1a210STim Haley FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH); 1534ea8dc4b6Seschrock if (err) 1535ea8dc4b6Seschrock return (err); 1536fa9e4066Sahrens 1537fa9e4066Sahrens for (i = 0; i < numbufs; i++) { 1538fa9e4066Sahrens int tocpy; 1539fa9e4066Sahrens int bufoff; 1540fa9e4066Sahrens dmu_buf_t *db = dbp[i]; 1541fa9e4066Sahrens 1542fa9e4066Sahrens ASSERT(size > 0); 1543fa9e4066Sahrens 1544feb08c6bSbillm bufoff = uio->uio_loffset - db->db_offset; 1545fa9e4066Sahrens tocpy = (int)MIN(db->db_size - bufoff, size); 1546fa9e4066Sahrens 1547fa9e4066Sahrens ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 1548fa9e4066Sahrens 1549fa9e4066Sahrens if (tocpy == db->db_size) 1550fa9e4066Sahrens dmu_buf_will_fill(db, tx); 1551fa9e4066Sahrens else 1552fa9e4066Sahrens dmu_buf_will_dirty(db, tx); 1553fa9e4066Sahrens 1554fa9e4066Sahrens /* 1555fa9e4066Sahrens * XXX uiomove could block forever (eg. nfs-backed 1556fa9e4066Sahrens * pages). There needs to be a uiolockdown() function 1557fa9e4066Sahrens * to lock the pages in memory, so that uiomove won't 1558fa9e4066Sahrens * block. 1559fa9e4066Sahrens */ 1560fa9e4066Sahrens err = uiomove((char *)db->db_data + bufoff, tocpy, 1561fa9e4066Sahrens UIO_WRITE, uio); 1562fa9e4066Sahrens 1563fa9e4066Sahrens if (tocpy == db->db_size) 1564fa9e4066Sahrens dmu_buf_fill_done(db, tx); 1565fa9e4066Sahrens 1566fa9e4066Sahrens if (err) 1567fa9e4066Sahrens break; 1568fa9e4066Sahrens 1569fa9e4066Sahrens size -= tocpy; 1570fa9e4066Sahrens } 157194d1a210STim Haley 1572ea8dc4b6Seschrock dmu_buf_rele_array(dbp, numbufs, FTAG); 1573fa9e4066Sahrens return (err); 1574fa9e4066Sahrens } 157544eda4d7Smaybee 1576f8554bb9SMatthew Ahrens /* 1577f8554bb9SMatthew Ahrens * Write 'size' bytes from the uio buffer. 1578f8554bb9SMatthew Ahrens * To object zdb->db_object. 1579f8554bb9SMatthew Ahrens * Starting at offset uio->uio_loffset. 1580f8554bb9SMatthew Ahrens * 1581f8554bb9SMatthew Ahrens * If the caller already has a dbuf in the target object 1582f8554bb9SMatthew Ahrens * (e.g. its bonus buffer), this routine is faster than dmu_write_uio(), 1583f8554bb9SMatthew Ahrens * because we don't have to find the dnode_t for the object. 1584f8554bb9SMatthew Ahrens */ 158544eda4d7Smaybee int 158694d1a210STim Haley dmu_write_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size, 158794d1a210STim Haley dmu_tx_t *tx) 158894d1a210STim Haley { 1589744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb; 1590744947dcSTom Erickson dnode_t *dn; 1591744947dcSTom Erickson int err; 1592744947dcSTom Erickson 159394d1a210STim Haley if (size == 0) 159494d1a210STim Haley return (0); 159594d1a210STim Haley 1596744947dcSTom Erickson DB_DNODE_ENTER(db); 1597744947dcSTom Erickson dn = DB_DNODE(db); 1598744947dcSTom Erickson err = dmu_write_uio_dnode(dn, uio, size, tx); 1599744947dcSTom Erickson DB_DNODE_EXIT(db); 1600744947dcSTom Erickson 1601744947dcSTom Erickson return (err); 160294d1a210STim Haley } 160394d1a210STim Haley 1604f8554bb9SMatthew Ahrens /* 1605f8554bb9SMatthew Ahrens * Write 'size' bytes from the uio buffer. 1606f8554bb9SMatthew Ahrens * To the specified object. 1607f8554bb9SMatthew Ahrens * Starting at offset uio->uio_loffset. 1608f8554bb9SMatthew Ahrens */ 160994d1a210STim Haley int 161094d1a210STim Haley dmu_write_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size, 161194d1a210STim Haley dmu_tx_t *tx) 161294d1a210STim Haley { 161394d1a210STim Haley dnode_t *dn; 161494d1a210STim Haley int err; 161594d1a210STim Haley 161694d1a210STim Haley if (size == 0) 161794d1a210STim Haley return (0); 161894d1a210STim Haley 161994d1a210STim Haley err = dnode_hold(os, object, FTAG, &dn); 162094d1a210STim Haley if (err) 162194d1a210STim Haley return (err); 162294d1a210STim Haley 162394d1a210STim Haley err = dmu_write_uio_dnode(dn, uio, size, tx); 162494d1a210STim Haley 162594d1a210STim Haley dnode_rele(dn, FTAG); 162694d1a210STim Haley 162794d1a210STim Haley return (err); 162894d1a210STim Haley } 162994d1a210STim Haley 163094d1a210STim Haley int 163144eda4d7Smaybee dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 163244eda4d7Smaybee page_t *pp, dmu_tx_t *tx) 163344eda4d7Smaybee { 163444eda4d7Smaybee dmu_buf_t **dbp; 163544eda4d7Smaybee int numbufs, i; 163644eda4d7Smaybee int err; 163744eda4d7Smaybee 163844eda4d7Smaybee if (size == 0) 163944eda4d7Smaybee return (0); 164044eda4d7Smaybee 164144eda4d7Smaybee err = dmu_buf_hold_array(os, object, offset, size, 164244eda4d7Smaybee FALSE, FTAG, &numbufs, &dbp); 164344eda4d7Smaybee if (err) 164444eda4d7Smaybee return (err); 164544eda4d7Smaybee 164644eda4d7Smaybee for (i = 0; i < numbufs; i++) { 164744eda4d7Smaybee int tocpy, copied, thiscpy; 164844eda4d7Smaybee int bufoff; 164944eda4d7Smaybee dmu_buf_t *db = dbp[i]; 165044eda4d7Smaybee caddr_t va; 165144eda4d7Smaybee 165244eda4d7Smaybee ASSERT(size > 0); 165344eda4d7Smaybee ASSERT3U(db->db_size, >=, PAGESIZE); 165444eda4d7Smaybee 165544eda4d7Smaybee bufoff = offset - db->db_offset; 165644eda4d7Smaybee tocpy = (int)MIN(db->db_size - bufoff, size); 165744eda4d7Smaybee 165844eda4d7Smaybee ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 165944eda4d7Smaybee 166044eda4d7Smaybee if (tocpy == db->db_size) 166144eda4d7Smaybee dmu_buf_will_fill(db, tx); 166244eda4d7Smaybee else 166344eda4d7Smaybee dmu_buf_will_dirty(db, tx); 166444eda4d7Smaybee 166544eda4d7Smaybee for (copied = 0; copied < tocpy; copied += PAGESIZE) { 166644eda4d7Smaybee ASSERT3U(pp->p_offset, ==, db->db_offset + bufoff); 166744eda4d7Smaybee thiscpy = MIN(PAGESIZE, tocpy - copied); 16680fab61baSJonathan W Adams va = zfs_map_page(pp, S_READ); 166944eda4d7Smaybee bcopy(va, (char *)db->db_data + bufoff, thiscpy); 16700fab61baSJonathan W Adams zfs_unmap_page(pp, va); 167144eda4d7Smaybee pp = pp->p_next; 167244eda4d7Smaybee bufoff += PAGESIZE; 167344eda4d7Smaybee } 167444eda4d7Smaybee 167544eda4d7Smaybee if (tocpy == db->db_size) 167644eda4d7Smaybee dmu_buf_fill_done(db, tx); 167744eda4d7Smaybee 167844eda4d7Smaybee offset += tocpy; 167944eda4d7Smaybee size -= tocpy; 168044eda4d7Smaybee } 168144eda4d7Smaybee dmu_buf_rele_array(dbp, numbufs, FTAG); 168244eda4d7Smaybee return (err); 168344eda4d7Smaybee } 1684fa9e4066Sahrens #endif 1685fa9e4066Sahrens 16862fdbea25SAleksandr Guzovskiy /* 16872fdbea25SAleksandr Guzovskiy * Allocate a loaned anonymous arc buffer. 16882fdbea25SAleksandr Guzovskiy */ 16892fdbea25SAleksandr Guzovskiy arc_buf_t * 16902fdbea25SAleksandr Guzovskiy dmu_request_arcbuf(dmu_buf_t *handle, int size) 16912fdbea25SAleksandr Guzovskiy { 1692744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle; 16932fdbea25SAleksandr Guzovskiy 16945602294fSDan Kimmel return (arc_loan_buf(db->db_objset->os_spa, B_FALSE, size)); 16952fdbea25SAleksandr Guzovskiy } 16962fdbea25SAleksandr Guzovskiy 16972fdbea25SAleksandr Guzovskiy /* 16982fdbea25SAleksandr Guzovskiy * Free a loaned arc buffer. 16992fdbea25SAleksandr Guzovskiy */ 17002fdbea25SAleksandr Guzovskiy void 17012fdbea25SAleksandr Guzovskiy dmu_return_arcbuf(arc_buf_t *buf) 17022fdbea25SAleksandr Guzovskiy { 17032fdbea25SAleksandr Guzovskiy arc_return_buf(buf, FTAG); 1704dcbf3bd6SGeorge Wilson arc_buf_destroy(buf, FTAG); 17052fdbea25SAleksandr Guzovskiy } 17062fdbea25SAleksandr Guzovskiy 1707eb633035STom Caputi void 1708eb633035STom Caputi dmu_copy_from_buf(objset_t *os, uint64_t object, uint64_t offset, 1709eb633035STom Caputi dmu_buf_t *handle, dmu_tx_t *tx) 1710eb633035STom Caputi { 1711eb633035STom Caputi dmu_buf_t *dst_handle; 1712eb633035STom Caputi dmu_buf_impl_t *dstdb; 1713eb633035STom Caputi dmu_buf_impl_t *srcdb = (dmu_buf_impl_t *)handle; 1714a60ca23dSTom Caputi dmu_object_type_t type; 1715eb633035STom Caputi arc_buf_t *abuf; 1716eb633035STom Caputi uint64_t datalen; 1717eb633035STom Caputi boolean_t byteorder; 1718eb633035STom Caputi uint8_t salt[ZIO_DATA_SALT_LEN]; 1719eb633035STom Caputi uint8_t iv[ZIO_DATA_IV_LEN]; 1720eb633035STom Caputi uint8_t mac[ZIO_DATA_MAC_LEN]; 1721eb633035STom Caputi 1722eb633035STom Caputi ASSERT3P(srcdb->db_buf, !=, NULL); 1723eb633035STom Caputi 1724eb633035STom Caputi /* hold the db that we want to write to */ 1725eb633035STom Caputi VERIFY0(dmu_buf_hold(os, object, offset, FTAG, &dst_handle, 1726eb633035STom Caputi DMU_READ_NO_DECRYPT)); 1727eb633035STom Caputi dstdb = (dmu_buf_impl_t *)dst_handle; 1728eb633035STom Caputi datalen = arc_buf_size(srcdb->db_buf); 1729eb633035STom Caputi 1730a60ca23dSTom Caputi DB_DNODE_ENTER(dstdb); 1731a60ca23dSTom Caputi type = DB_DNODE(dstdb)->dn_type; 1732a60ca23dSTom Caputi DB_DNODE_EXIT(dstdb); 1733a60ca23dSTom Caputi 1734eb633035STom Caputi /* allocated an arc buffer that matches the type of srcdb->db_buf */ 1735eb633035STom Caputi if (arc_is_encrypted(srcdb->db_buf)) { 1736eb633035STom Caputi arc_get_raw_params(srcdb->db_buf, &byteorder, salt, iv, mac); 1737eb633035STom Caputi abuf = arc_loan_raw_buf(os->os_spa, dmu_objset_id(os), 1738a60ca23dSTom Caputi byteorder, salt, iv, mac, type, 1739eb633035STom Caputi datalen, arc_buf_lsize(srcdb->db_buf), 1740eb633035STom Caputi arc_get_compression(srcdb->db_buf)); 1741eb633035STom Caputi } else { 1742eb633035STom Caputi /* we won't get a compressed db back from dmu_buf_hold() */ 1743eb633035STom Caputi ASSERT3U(arc_get_compression(srcdb->db_buf), 1744eb633035STom Caputi ==, ZIO_COMPRESS_OFF); 1745eb633035STom Caputi abuf = arc_loan_buf(os->os_spa, 1746a60ca23dSTom Caputi DMU_OT_IS_METADATA(type), datalen); 1747eb633035STom Caputi } 1748eb633035STom Caputi 1749eb633035STom Caputi ASSERT3U(datalen, ==, arc_buf_size(abuf)); 1750eb633035STom Caputi 1751eb633035STom Caputi /* copy the data to the new buffer and assign it to the dstdb */ 1752eb633035STom Caputi bcopy(srcdb->db_buf->b_data, abuf->b_data, datalen); 1753eb633035STom Caputi dbuf_assign_arcbuf(dstdb, abuf, tx); 1754eb633035STom Caputi dmu_buf_rele(dst_handle, FTAG); 1755eb633035STom Caputi } 1756eb633035STom Caputi 17572fdbea25SAleksandr Guzovskiy /* 17582fdbea25SAleksandr Guzovskiy * When possible directly assign passed loaned arc buffer to a dbuf. 17592fdbea25SAleksandr Guzovskiy * If this is not possible copy the contents of passed arc buf via 17602fdbea25SAleksandr Guzovskiy * dmu_write(). 17612fdbea25SAleksandr Guzovskiy */ 1762eb633035STom Caputi int 1763eb633035STom Caputi dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf, 17642fdbea25SAleksandr Guzovskiy dmu_tx_t *tx) 17652fdbea25SAleksandr Guzovskiy { 17662fdbea25SAleksandr Guzovskiy dmu_buf_impl_t *db; 1767eb633035STom Caputi objset_t *os = dn->dn_objset; 1768eb633035STom Caputi uint64_t object = dn->dn_object; 17695602294fSDan Kimmel uint32_t blksz = (uint32_t)arc_buf_lsize(buf); 17702fdbea25SAleksandr Guzovskiy uint64_t blkid; 17712fdbea25SAleksandr Guzovskiy 17722fdbea25SAleksandr Guzovskiy rw_enter(&dn->dn_struct_rwlock, RW_READER); 1773a2cdcdd2SPaul Dagnelie blkid = dbuf_whichblock(dn, 0, offset); 1774eb633035STom Caputi db = dbuf_hold(dn, blkid, FTAG); 1775eb633035STom Caputi if (db == NULL) 1776eb633035STom Caputi return (SET_ERROR(EIO)); 17772fdbea25SAleksandr Guzovskiy rw_exit(&dn->dn_struct_rwlock); 17782fdbea25SAleksandr Guzovskiy 17798a904709SMatthew Ahrens /* 17808a904709SMatthew Ahrens * We can only assign if the offset is aligned, the arc buf is the 17815602294fSDan Kimmel * same size as the dbuf, and the dbuf is not metadata. 17828a904709SMatthew Ahrens */ 17835602294fSDan Kimmel if (offset == db->db.db_offset && blksz == db->db.db_size) { 17842fdbea25SAleksandr Guzovskiy dbuf_assign_arcbuf(db, buf, tx); 17852fdbea25SAleksandr Guzovskiy dbuf_rele(db, FTAG); 17862fdbea25SAleksandr Guzovskiy } else { 17875602294fSDan Kimmel /* compressed bufs must always be assignable to their dbuf */ 17885602294fSDan Kimmel ASSERT3U(arc_get_compression(buf), ==, ZIO_COMPRESS_OFF); 17895602294fSDan Kimmel ASSERT(!(buf->b_flags & ARC_BUF_FLAG_COMPRESSED)); 17905602294fSDan Kimmel 1791744947dcSTom Erickson os = dn->dn_objset; 1792744947dcSTom Erickson object = dn->dn_object; 17932fdbea25SAleksandr Guzovskiy dbuf_rele(db, FTAG); 1794744947dcSTom Erickson dmu_write(os, object, offset, blksz, buf->b_data, tx); 17952fdbea25SAleksandr Guzovskiy dmu_return_arcbuf(buf); 1796c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_BUMP(xuiostat_wbuf_copied); 17972fdbea25SAleksandr Guzovskiy } 1798eb633035STom Caputi 1799eb633035STom Caputi return (0); 18002fdbea25SAleksandr Guzovskiy } 18012fdbea25SAleksandr Guzovskiy 1802eb633035STom Caputi int 1803eb633035STom Caputi dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf, 18048dfe5547SRichard Yao dmu_tx_t *tx) 18058dfe5547SRichard Yao { 1806eb633035STom Caputi int err; 18078dfe5547SRichard Yao dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle; 18088dfe5547SRichard Yao 18098dfe5547SRichard Yao DB_DNODE_ENTER(dbuf); 1810eb633035STom Caputi err = dmu_assign_arcbuf_by_dnode(DB_DNODE(dbuf), offset, buf, tx); 18118dfe5547SRichard Yao DB_DNODE_EXIT(dbuf); 1812eb633035STom Caputi 1813eb633035STom Caputi return (err); 18148dfe5547SRichard Yao } 18158dfe5547SRichard Yao 1816c5c6ffa0Smaybee typedef struct { 1817b24ab676SJeff Bonwick dbuf_dirty_record_t *dsa_dr; 1818b24ab676SJeff Bonwick dmu_sync_cb_t *dsa_done; 1819b24ab676SJeff Bonwick zgd_t *dsa_zgd; 1820b24ab676SJeff Bonwick dmu_tx_t *dsa_tx; 1821c717a561Smaybee } dmu_sync_arg_t; 1822c5c6ffa0Smaybee 1823c5c6ffa0Smaybee /* ARGSUSED */ 1824c5c6ffa0Smaybee static void 1825e14bb325SJeff Bonwick dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg) 1826e14bb325SJeff Bonwick { 1827b24ab676SJeff Bonwick dmu_sync_arg_t *dsa = varg; 1828b24ab676SJeff Bonwick dmu_buf_t *db = dsa->dsa_zgd->zgd_db; 1829e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 1830975c32a0SNeil Perrin 1831b24ab676SJeff Bonwick if (zio->io_error == 0) { 1832b24ab676SJeff Bonwick if (BP_IS_HOLE(bp)) { 1833b24ab676SJeff Bonwick /* 1834b24ab676SJeff Bonwick * A block of zeros may compress to a hole, but the 1835b24ab676SJeff Bonwick * block size still needs to be known for replay. 1836b24ab676SJeff Bonwick */ 1837b24ab676SJeff Bonwick BP_SET_LSIZE(bp, db->db_size); 18385d7b4d43SMatthew Ahrens } else if (!BP_IS_EMBEDDED(bp)) { 1839e14bb325SJeff Bonwick ASSERT(BP_GET_LEVEL(bp) == 0); 1840eb633035STom Caputi BP_SET_FILL(bp, 1); 1841e14bb325SJeff Bonwick } 1842e14bb325SJeff Bonwick } 1843b24ab676SJeff Bonwick } 1844b24ab676SJeff Bonwick 1845b24ab676SJeff Bonwick static void 1846b24ab676SJeff Bonwick dmu_sync_late_arrival_ready(zio_t *zio) 1847b24ab676SJeff Bonwick { 1848b24ab676SJeff Bonwick dmu_sync_ready(zio, NULL, zio->io_private); 1849b24ab676SJeff Bonwick } 1850e14bb325SJeff Bonwick 1851e14bb325SJeff Bonwick /* ARGSUSED */ 1852e14bb325SJeff Bonwick static void 1853c5c6ffa0Smaybee dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg) 1854c5c6ffa0Smaybee { 1855b24ab676SJeff Bonwick dmu_sync_arg_t *dsa = varg; 1856b24ab676SJeff Bonwick dbuf_dirty_record_t *dr = dsa->dsa_dr; 1857c717a561Smaybee dmu_buf_impl_t *db = dr->dr_dbuf; 1858cab3a55eSPrakash Surya zgd_t *zgd = dsa->dsa_zgd; 1859cab3a55eSPrakash Surya 1860cab3a55eSPrakash Surya /* 1861cab3a55eSPrakash Surya * Record the vdev(s) backing this blkptr so they can be flushed after 1862cab3a55eSPrakash Surya * the writes for the lwb have completed. 1863cab3a55eSPrakash Surya */ 1864cab3a55eSPrakash Surya if (zio->io_error == 0) { 1865cab3a55eSPrakash Surya zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp); 1866cab3a55eSPrakash Surya } 1867c5c6ffa0Smaybee 1868b50a0fe0SNeil Perrin mutex_enter(&db->db_mtx); 1869b50a0fe0SNeil Perrin ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC); 1870b24ab676SJeff Bonwick if (zio->io_error == 0) { 187180901aeaSGeorge Wilson dr->dt.dl.dr_nopwrite = !!(zio->io_flags & ZIO_FLAG_NOPWRITE); 187280901aeaSGeorge Wilson if (dr->dt.dl.dr_nopwrite) { 187380901aeaSGeorge Wilson blkptr_t *bp = zio->io_bp; 187480901aeaSGeorge Wilson blkptr_t *bp_orig = &zio->io_bp_orig; 187580901aeaSGeorge Wilson uint8_t chksum = BP_GET_CHECKSUM(bp_orig); 187680901aeaSGeorge Wilson 187780901aeaSGeorge Wilson ASSERT(BP_EQUAL(bp, bp_orig)); 1878b7edcb94SMatthew Ahrens VERIFY(BP_EQUAL(bp, db->db_blkptr)); 187980901aeaSGeorge Wilson ASSERT(zio->io_prop.zp_compress != ZIO_COMPRESS_OFF); 188045818ee1SMatthew Ahrens ASSERT(zio_checksum_table[chksum].ci_flags & 188145818ee1SMatthew Ahrens ZCHECKSUM_FLAG_NOPWRITE); 188280901aeaSGeorge Wilson } 1883b24ab676SJeff Bonwick dr->dt.dl.dr_overridden_by = *zio->io_bp; 1884b24ab676SJeff Bonwick dr->dt.dl.dr_override_state = DR_OVERRIDDEN; 1885b24ab676SJeff Bonwick dr->dt.dl.dr_copies = zio->io_prop.zp_copies; 188670163ac5SPrakash Surya 188770163ac5SPrakash Surya /* 188870163ac5SPrakash Surya * Old style holes are filled with all zeros, whereas 188970163ac5SPrakash Surya * new-style holes maintain their lsize, type, level, 189070163ac5SPrakash Surya * and birth time (see zio_write_compress). While we 189170163ac5SPrakash Surya * need to reset the BP_SET_LSIZE() call that happened 189270163ac5SPrakash Surya * in dmu_sync_ready for old style holes, we do *not* 189370163ac5SPrakash Surya * want to wipe out the information contained in new 189470163ac5SPrakash Surya * style holes. Thus, only zero out the block pointer if 189570163ac5SPrakash Surya * it's an old style hole. 189670163ac5SPrakash Surya */ 189770163ac5SPrakash Surya if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by) && 189870163ac5SPrakash Surya dr->dt.dl.dr_overridden_by.blk_birth == 0) 1899b50a0fe0SNeil Perrin BP_ZERO(&dr->dt.dl.dr_overridden_by); 1900b24ab676SJeff Bonwick } else { 1901b24ab676SJeff Bonwick dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 1902b24ab676SJeff Bonwick } 1903c5c6ffa0Smaybee cv_broadcast(&db->db_changed); 1904b50a0fe0SNeil Perrin mutex_exit(&db->db_mtx); 1905b50a0fe0SNeil Perrin 1906b24ab676SJeff Bonwick dsa->dsa_done(dsa->dsa_zgd, zio->io_error); 1907c717a561Smaybee 1908b24ab676SJeff Bonwick kmem_free(dsa, sizeof (*dsa)); 1909b24ab676SJeff Bonwick } 1910b24ab676SJeff Bonwick 1911b24ab676SJeff Bonwick static void 1912b24ab676SJeff Bonwick dmu_sync_late_arrival_done(zio_t *zio) 1913b24ab676SJeff Bonwick { 1914b24ab676SJeff Bonwick blkptr_t *bp = zio->io_bp; 1915b24ab676SJeff Bonwick dmu_sync_arg_t *dsa = zio->io_private; 191680901aeaSGeorge Wilson blkptr_t *bp_orig = &zio->io_bp_orig; 1917cab3a55eSPrakash Surya zgd_t *zgd = dsa->dsa_zgd; 1918b24ab676SJeff Bonwick 1919cab3a55eSPrakash Surya if (zio->io_error == 0) { 1920cab3a55eSPrakash Surya /* 1921cab3a55eSPrakash Surya * Record the vdev(s) backing this blkptr so they can be 1922cab3a55eSPrakash Surya * flushed after the writes for the lwb have completed. 1923cab3a55eSPrakash Surya */ 1924cab3a55eSPrakash Surya zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp); 1925cab3a55eSPrakash Surya 1926cab3a55eSPrakash Surya if (!BP_IS_HOLE(bp)) { 1927b7edcb94SMatthew Ahrens ASSERT(!(zio->io_flags & ZIO_FLAG_NOPWRITE)); 192880901aeaSGeorge Wilson ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig)); 1929b24ab676SJeff Bonwick ASSERT(zio->io_bp->blk_birth == zio->io_txg); 1930b24ab676SJeff Bonwick ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa)); 1931b24ab676SJeff Bonwick zio_free(zio->io_spa, zio->io_txg, zio->io_bp); 1932b24ab676SJeff Bonwick } 1933cab3a55eSPrakash Surya } 1934b24ab676SJeff Bonwick 1935b24ab676SJeff Bonwick dmu_tx_commit(dsa->dsa_tx); 1936b24ab676SJeff Bonwick 1937b24ab676SJeff Bonwick dsa->dsa_done(dsa->dsa_zgd, zio->io_error); 1938b24ab676SJeff Bonwick 1939770499e1SDan Kimmel abd_put(zio->io_abd); 1940b24ab676SJeff Bonwick kmem_free(dsa, sizeof (*dsa)); 1941b24ab676SJeff Bonwick } 1942b24ab676SJeff Bonwick 1943b24ab676SJeff Bonwick static int 1944b24ab676SJeff Bonwick dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd, 19457802d7bfSMatthew Ahrens zio_prop_t *zp, zbookmark_phys_t *zb) 1946b24ab676SJeff Bonwick { 1947b24ab676SJeff Bonwick dmu_sync_arg_t *dsa; 1948b24ab676SJeff Bonwick dmu_tx_t *tx; 1949b24ab676SJeff Bonwick 1950b24ab676SJeff Bonwick tx = dmu_tx_create(os); 1951b24ab676SJeff Bonwick dmu_tx_hold_space(tx, zgd->zgd_db->db_size); 19526e1f5caaSNeil Perrin if (dmu_tx_assign(tx, TXG_WAIT) != 0) { 1953b24ab676SJeff Bonwick dmu_tx_abort(tx); 1954be6fd75aSMatthew Ahrens /* Make zl_get_data do txg_waited_synced() */ 1955be6fd75aSMatthew Ahrens return (SET_ERROR(EIO)); 1956b24ab676SJeff Bonwick } 1957b24ab676SJeff Bonwick 19581271e4b1SPrakash Surya /* 19591271e4b1SPrakash Surya * In order to prevent the zgd's lwb from being free'd prior to 19601271e4b1SPrakash Surya * dmu_sync_late_arrival_done() being called, we have to ensure 19611271e4b1SPrakash Surya * the lwb's "max txg" takes this tx's txg into account. 19621271e4b1SPrakash Surya */ 19631271e4b1SPrakash Surya zil_lwb_add_txg(zgd->zgd_lwb, dmu_tx_get_txg(tx)); 19641271e4b1SPrakash Surya 1965b24ab676SJeff Bonwick dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); 1966b24ab676SJeff Bonwick dsa->dsa_dr = NULL; 1967b24ab676SJeff Bonwick dsa->dsa_done = done; 1968b24ab676SJeff Bonwick dsa->dsa_zgd = zgd; 1969b24ab676SJeff Bonwick dsa->dsa_tx = tx; 1970b24ab676SJeff Bonwick 1971b7edcb94SMatthew Ahrens /* 1972b7edcb94SMatthew Ahrens * Since we are currently syncing this txg, it's nontrivial to 1973b7edcb94SMatthew Ahrens * determine what BP to nopwrite against, so we disable nopwrite. 1974b7edcb94SMatthew Ahrens * 1975b7edcb94SMatthew Ahrens * When syncing, the db_blkptr is initially the BP of the previous 1976b7edcb94SMatthew Ahrens * txg. We can not nopwrite against it because it will be changed 1977b7edcb94SMatthew Ahrens * (this is similar to the non-late-arrival case where the dbuf is 1978b7edcb94SMatthew Ahrens * dirty in a future txg). 1979b7edcb94SMatthew Ahrens * 1980b7edcb94SMatthew Ahrens * Then dbuf_write_ready() sets bp_blkptr to the location we will write. 1981b7edcb94SMatthew Ahrens * We can not nopwrite against it because although the BP will not 1982b7edcb94SMatthew Ahrens * (typically) be changed, the data has not yet been persisted to this 1983b7edcb94SMatthew Ahrens * location. 1984b7edcb94SMatthew Ahrens * 1985b7edcb94SMatthew Ahrens * Finally, when dbuf_write_done() is called, it is theoretically 1986b7edcb94SMatthew Ahrens * possible to always nopwrite, because the data that was written in 1987b7edcb94SMatthew Ahrens * this txg is the same data that we are trying to write. However we 1988b7edcb94SMatthew Ahrens * would need to check that this dbuf is not dirty in any future 1989b7edcb94SMatthew Ahrens * txg's (as we do in the normal dmu_sync() path). For simplicity, we 1990b7edcb94SMatthew Ahrens * don't nopwrite in this case. 1991b7edcb94SMatthew Ahrens */ 1992b7edcb94SMatthew Ahrens zp->zp_nopwrite = B_FALSE; 1993b7edcb94SMatthew Ahrens 19945602294fSDan Kimmel zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp, 1995770499e1SDan Kimmel abd_get_from_buf(zgd->zgd_db->db_data, zgd->zgd_db->db_size), 1996770499e1SDan Kimmel zgd->zgd_db->db_size, zgd->zgd_db->db_size, zp, 1997770499e1SDan Kimmel dmu_sync_late_arrival_ready, NULL, NULL, dmu_sync_late_arrival_done, 1998770499e1SDan Kimmel dsa, ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, zb)); 1999b24ab676SJeff Bonwick 2000b24ab676SJeff Bonwick return (0); 2001c5c6ffa0Smaybee } 2002c5c6ffa0Smaybee 2003fa9e4066Sahrens /* 2004c5c6ffa0Smaybee * Intent log support: sync the block associated with db to disk. 2005c5c6ffa0Smaybee * N.B. and XXX: the caller is responsible for making sure that the 2006c5c6ffa0Smaybee * data isn't changing while dmu_sync() is writing it. 2007fa9e4066Sahrens * 2008fa9e4066Sahrens * Return values: 2009fa9e4066Sahrens * 201080901aeaSGeorge Wilson * EEXIST: this txg has already been synced, so there's nothing to do. 2011fa9e4066Sahrens * The caller should not log the write. 2012fa9e4066Sahrens * 2013fa9e4066Sahrens * ENOENT: the block was dbuf_free_range()'d, so there's nothing to do. 2014fa9e4066Sahrens * The caller should not log the write. 2015fa9e4066Sahrens * 2016c5c6ffa0Smaybee * EALREADY: this block is already in the process of being synced. 2017c5c6ffa0Smaybee * The caller should track its progress (somehow). 2018fa9e4066Sahrens * 2019b24ab676SJeff Bonwick * EIO: could not do the I/O. 2020b24ab676SJeff Bonwick * The caller should do a txg_wait_synced(). 2021fa9e4066Sahrens * 2022b24ab676SJeff Bonwick * 0: the I/O has been initiated. 2023b24ab676SJeff Bonwick * The caller should log this blkptr in the done callback. 2024b24ab676SJeff Bonwick * It is possible that the I/O will fail, in which case 2025b24ab676SJeff Bonwick * the error will be reported to the done callback and 2026b24ab676SJeff Bonwick * propagated to pio from zio_done(). 2027fa9e4066Sahrens */ 2028fa9e4066Sahrens int 2029b24ab676SJeff Bonwick dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd) 2030fa9e4066Sahrens { 2031b24ab676SJeff Bonwick dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db; 2032503ad85cSMatthew Ahrens objset_t *os = db->db_objset; 2033b24ab676SJeff Bonwick dsl_dataset_t *ds = os->os_dsl_dataset; 2034c717a561Smaybee dbuf_dirty_record_t *dr; 2035b24ab676SJeff Bonwick dmu_sync_arg_t *dsa; 20367802d7bfSMatthew Ahrens zbookmark_phys_t zb; 2037b24ab676SJeff Bonwick zio_prop_t zp; 2038744947dcSTom Erickson dnode_t *dn; 2039fa9e4066Sahrens 2040b24ab676SJeff Bonwick ASSERT(pio != NULL); 2041fa9e4066Sahrens ASSERT(txg != 0); 2042fa9e4066Sahrens 2043b24ab676SJeff Bonwick SET_BOOKMARK(&zb, ds->ds_object, 2044b24ab676SJeff Bonwick db->db.db_object, db->db_level, db->db_blkid); 2045b24ab676SJeff Bonwick 2046744947dcSTom Erickson DB_DNODE_ENTER(db); 2047744947dcSTom Erickson dn = DB_DNODE(db); 2048adaec86aSMatthew Ahrens dmu_write_policy(os, dn, db->db_level, WP_DMU_SYNC, &zp); 2049744947dcSTom Erickson DB_DNODE_EXIT(db); 2050fa9e4066Sahrens 2051fa9e4066Sahrens /* 2052b24ab676SJeff Bonwick * If we're frozen (running ziltest), we always need to generate a bp. 2053ea8dc4b6Seschrock */ 2054b24ab676SJeff Bonwick if (txg > spa_freeze_txg(os->os_spa)) 2055b24ab676SJeff Bonwick return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb)); 2056ea8dc4b6Seschrock 2057ea8dc4b6Seschrock /* 2058b24ab676SJeff Bonwick * Grabbing db_mtx now provides a barrier between dbuf_sync_leaf() 2059b24ab676SJeff Bonwick * and us. If we determine that this txg is not yet syncing, 2060b24ab676SJeff Bonwick * but it begins to sync a moment later, that's OK because the 2061b24ab676SJeff Bonwick * sync thread will block in dbuf_sync_leaf() until we drop db_mtx. 2062fa9e4066Sahrens */ 2063b24ab676SJeff Bonwick mutex_enter(&db->db_mtx); 2064b24ab676SJeff Bonwick 2065b24ab676SJeff Bonwick if (txg <= spa_last_synced_txg(os->os_spa)) { 2066fa9e4066Sahrens /* 2067b24ab676SJeff Bonwick * This txg has already synced. There's nothing to do. 2068fa9e4066Sahrens */ 2069b24ab676SJeff Bonwick mutex_exit(&db->db_mtx); 2070be6fd75aSMatthew Ahrens return (SET_ERROR(EEXIST)); 2071fa9e4066Sahrens } 2072fa9e4066Sahrens 2073b24ab676SJeff Bonwick if (txg <= spa_syncing_txg(os->os_spa)) { 2074c5c6ffa0Smaybee /* 2075b24ab676SJeff Bonwick * This txg is currently syncing, so we can't mess with 2076b24ab676SJeff Bonwick * the dirty record anymore; just write a new log block. 2077c5c6ffa0Smaybee */ 207813506d1eSmaybee mutex_exit(&db->db_mtx); 2079b24ab676SJeff Bonwick return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb)); 2080c5c6ffa0Smaybee } 2081c5c6ffa0Smaybee 2082c717a561Smaybee dr = db->db_last_dirty; 2083b24ab676SJeff Bonwick while (dr && dr->dr_txg != txg) 2084c717a561Smaybee dr = dr->dr_next; 2085b24ab676SJeff Bonwick 2086b24ab676SJeff Bonwick if (dr == NULL) { 2087c5c6ffa0Smaybee /* 2088b24ab676SJeff Bonwick * There's no dr for this dbuf, so it must have been freed. 2089fa9e4066Sahrens * There's no need to log writes to freed blocks, so we're done. 2090fa9e4066Sahrens */ 2091fa9e4066Sahrens mutex_exit(&db->db_mtx); 2092be6fd75aSMatthew Ahrens return (SET_ERROR(ENOENT)); 2093fa9e4066Sahrens } 2094fa9e4066Sahrens 209580901aeaSGeorge Wilson ASSERT(dr->dr_next == NULL || dr->dr_next->dr_txg < txg); 209680901aeaSGeorge Wilson 2097b7edcb94SMatthew Ahrens if (db->db_blkptr != NULL) { 2098b7edcb94SMatthew Ahrens /* 2099b7edcb94SMatthew Ahrens * We need to fill in zgd_bp with the current blkptr so that 2100b7edcb94SMatthew Ahrens * the nopwrite code can check if we're writing the same 2101b7edcb94SMatthew Ahrens * data that's already on disk. We can only nopwrite if we 2102b7edcb94SMatthew Ahrens * are sure that after making the copy, db_blkptr will not 2103b7edcb94SMatthew Ahrens * change until our i/o completes. We ensure this by 2104b7edcb94SMatthew Ahrens * holding the db_mtx, and only allowing nopwrite if the 2105b7edcb94SMatthew Ahrens * block is not already dirty (see below). This is verified 2106b7edcb94SMatthew Ahrens * by dmu_sync_done(), which VERIFYs that the db_blkptr has 2107b7edcb94SMatthew Ahrens * not changed. 2108b7edcb94SMatthew Ahrens */ 2109b7edcb94SMatthew Ahrens *zgd->zgd_bp = *db->db_blkptr; 2110b7edcb94SMatthew Ahrens } 2111b7edcb94SMatthew Ahrens 211280901aeaSGeorge Wilson /* 211334e8acefSMatthew Ahrens * Assume the on-disk data is X, the current syncing data (in 211434e8acefSMatthew Ahrens * txg - 1) is Y, and the current in-memory data is Z (currently 211534e8acefSMatthew Ahrens * in dmu_sync). 211634e8acefSMatthew Ahrens * 211734e8acefSMatthew Ahrens * We usually want to perform a nopwrite if X and Z are the 211834e8acefSMatthew Ahrens * same. However, if Y is different (i.e. the BP is going to 211934e8acefSMatthew Ahrens * change before this write takes effect), then a nopwrite will 212034e8acefSMatthew Ahrens * be incorrect - we would override with X, which could have 212134e8acefSMatthew Ahrens * been freed when Y was written. 212234e8acefSMatthew Ahrens * 212334e8acefSMatthew Ahrens * (Note that this is not a concern when we are nop-writing from 212434e8acefSMatthew Ahrens * syncing context, because X and Y must be identical, because 212534e8acefSMatthew Ahrens * all previous txgs have been synced.) 212634e8acefSMatthew Ahrens * 212734e8acefSMatthew Ahrens * Therefore, we disable nopwrite if the current BP could change 212834e8acefSMatthew Ahrens * before this TXG. There are two ways it could change: by 212934e8acefSMatthew Ahrens * being dirty (dr_next is non-NULL), or by being freed 213034e8acefSMatthew Ahrens * (dnode_block_freed()). This behavior is verified by 213134e8acefSMatthew Ahrens * zio_done(), which VERIFYs that the override BP is identical 213234e8acefSMatthew Ahrens * to the on-disk BP. 213380901aeaSGeorge Wilson */ 213434e8acefSMatthew Ahrens DB_DNODE_ENTER(db); 213534e8acefSMatthew Ahrens dn = DB_DNODE(db); 213634e8acefSMatthew Ahrens if (dr->dr_next != NULL || dnode_block_freed(dn, db->db_blkid)) 213780901aeaSGeorge Wilson zp.zp_nopwrite = B_FALSE; 213834e8acefSMatthew Ahrens DB_DNODE_EXIT(db); 213980901aeaSGeorge Wilson 2140c717a561Smaybee ASSERT(dr->dr_txg == txg); 2141b24ab676SJeff Bonwick if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC || 2142b24ab676SJeff Bonwick dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 2143c5c6ffa0Smaybee /* 2144b24ab676SJeff Bonwick * We have already issued a sync write for this buffer, 2145b24ab676SJeff Bonwick * or this buffer has already been synced. It could not 2146c717a561Smaybee * have been dirtied since, or we would have cleared the state. 2147c717a561Smaybee */ 2148c717a561Smaybee mutex_exit(&db->db_mtx); 2149be6fd75aSMatthew Ahrens return (SET_ERROR(EALREADY)); 2150c717a561Smaybee } 2151c717a561Smaybee 2152b24ab676SJeff Bonwick ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 2153c717a561Smaybee dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC; 2154fa9e4066Sahrens mutex_exit(&db->db_mtx); 2155fa9e4066Sahrens 2156b24ab676SJeff Bonwick dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); 2157b24ab676SJeff Bonwick dsa->dsa_dr = dr; 2158b24ab676SJeff Bonwick dsa->dsa_done = done; 2159b24ab676SJeff Bonwick dsa->dsa_zgd = zgd; 2160b24ab676SJeff Bonwick dsa->dsa_tx = NULL; 2161e14bb325SJeff Bonwick 2162b24ab676SJeff Bonwick zio_nowait(arc_write(pio, os->os_spa, txg, 2163b7edcb94SMatthew Ahrens zgd->zgd_bp, dr->dt.dl.dr_data, DBUF_IS_L2CACHEABLE(db), 2164dcbf3bd6SGeorge Wilson &zp, dmu_sync_ready, NULL, NULL, dmu_sync_done, dsa, 21658df0bcf0SPaul Dagnelie ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb)); 2166e14bb325SJeff Bonwick 2167b24ab676SJeff Bonwick return (0); 2168fa9e4066Sahrens } 2169fa9e4066Sahrens 2170fa9e4066Sahrens int 2171eb633035STom Caputi dmu_object_set_nlevels(objset_t *os, uint64_t object, int nlevels, dmu_tx_t *tx) 2172eb633035STom Caputi { 2173eb633035STom Caputi dnode_t *dn; 2174eb633035STom Caputi int err; 2175eb633035STom Caputi 2176eb633035STom Caputi err = dnode_hold(os, object, FTAG, &dn); 2177eb633035STom Caputi if (err) 2178eb633035STom Caputi return (err); 2179eb633035STom Caputi err = dnode_set_nlevels(dn, nlevels, tx); 2180eb633035STom Caputi dnode_rele(dn, FTAG); 2181eb633035STom Caputi return (err); 2182eb633035STom Caputi } 2183eb633035STom Caputi 2184eb633035STom Caputi int 2185fa9e4066Sahrens dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs, 2186fa9e4066Sahrens dmu_tx_t *tx) 2187fa9e4066Sahrens { 2188ea8dc4b6Seschrock dnode_t *dn; 2189ea8dc4b6Seschrock int err; 2190ea8dc4b6Seschrock 2191503ad85cSMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 2192ea8dc4b6Seschrock if (err) 2193ea8dc4b6Seschrock return (err); 2194ea8dc4b6Seschrock err = dnode_set_blksz(dn, size, ibs, tx); 2195fa9e4066Sahrens dnode_rele(dn, FTAG); 2196fa9e4066Sahrens return (err); 2197fa9e4066Sahrens } 2198fa9e4066Sahrens 2199eb633035STom Caputi int 2200eb633035STom Caputi dmu_object_set_maxblkid(objset_t *os, uint64_t object, uint64_t maxblkid, 2201eb633035STom Caputi dmu_tx_t *tx) 2202eb633035STom Caputi { 2203eb633035STom Caputi dnode_t *dn; 2204eb633035STom Caputi int err; 2205eb633035STom Caputi 2206eb633035STom Caputi err = dnode_hold(os, object, FTAG, &dn); 2207eb633035STom Caputi if (err) 2208eb633035STom Caputi return (err); 2209eb633035STom Caputi rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 2210eb633035STom Caputi dnode_new_blkid(dn, maxblkid, tx, B_FALSE, B_TRUE); 2211eb633035STom Caputi rw_exit(&dn->dn_struct_rwlock); 2212eb633035STom Caputi dnode_rele(dn, FTAG); 2213eb633035STom Caputi return (0); 2214eb633035STom Caputi } 2215eb633035STom Caputi 2216fa9e4066Sahrens void 2217fa9e4066Sahrens dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum, 2218fa9e4066Sahrens dmu_tx_t *tx) 2219fa9e4066Sahrens { 2220ea8dc4b6Seschrock dnode_t *dn; 2221ea8dc4b6Seschrock 22225d7b4d43SMatthew Ahrens /* 22235d7b4d43SMatthew Ahrens * Send streams include each object's checksum function. This 22245d7b4d43SMatthew Ahrens * check ensures that the receiving system can understand the 22255d7b4d43SMatthew Ahrens * checksum function transmitted. 22265d7b4d43SMatthew Ahrens */ 22275d7b4d43SMatthew Ahrens ASSERT3U(checksum, <, ZIO_CHECKSUM_LEGACY_FUNCTIONS); 22285d7b4d43SMatthew Ahrens 22295d7b4d43SMatthew Ahrens VERIFY0(dnode_hold(os, object, FTAG, &dn)); 22305d7b4d43SMatthew Ahrens ASSERT3U(checksum, <, ZIO_CHECKSUM_FUNCTIONS); 2231fa9e4066Sahrens dn->dn_checksum = checksum; 2232fa9e4066Sahrens dnode_setdirty(dn, tx); 2233fa9e4066Sahrens dnode_rele(dn, FTAG); 2234fa9e4066Sahrens } 2235fa9e4066Sahrens 2236fa9e4066Sahrens void 2237fa9e4066Sahrens dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress, 2238fa9e4066Sahrens dmu_tx_t *tx) 2239fa9e4066Sahrens { 2240ea8dc4b6Seschrock dnode_t *dn; 2241ea8dc4b6Seschrock 22425d7b4d43SMatthew Ahrens /* 22435d7b4d43SMatthew Ahrens * Send streams include each object's compression function. This 22445d7b4d43SMatthew Ahrens * check ensures that the receiving system can understand the 22455d7b4d43SMatthew Ahrens * compression function transmitted. 22465d7b4d43SMatthew Ahrens */ 22475d7b4d43SMatthew Ahrens ASSERT3U(compress, <, ZIO_COMPRESS_LEGACY_FUNCTIONS); 22485d7b4d43SMatthew Ahrens 22495d7b4d43SMatthew Ahrens VERIFY0(dnode_hold(os, object, FTAG, &dn)); 2250fa9e4066Sahrens dn->dn_compress = compress; 2251fa9e4066Sahrens dnode_setdirty(dn, tx); 2252fa9e4066Sahrens dnode_rele(dn, FTAG); 2253fa9e4066Sahrens } 2254fa9e4066Sahrens 2255edf345e6SMatthew Ahrens /* 2256edf345e6SMatthew Ahrens * When the "redundant_metadata" property is set to "most", only indirect 2257edf345e6SMatthew Ahrens * blocks of this level and higher will have an additional ditto block. 2258edf345e6SMatthew Ahrens */ 2259edf345e6SMatthew Ahrens int zfs_redundant_metadata_most_ditto_level = 2; 2260edf345e6SMatthew Ahrens 2261b24ab676SJeff Bonwick void 2262adaec86aSMatthew Ahrens dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp) 2263b24ab676SJeff Bonwick { 2264b24ab676SJeff Bonwick dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET; 2265ad135b5dSChristopher Siden boolean_t ismd = (level > 0 || DMU_OT_IS_METADATA(type) || 22661d8ccc7bSMark Shellenbaum (wp & WP_SPILL)); 2267b24ab676SJeff Bonwick enum zio_checksum checksum = os->os_checksum; 2268b24ab676SJeff Bonwick enum zio_compress compress = os->os_compress; 2269b24ab676SJeff Bonwick enum zio_checksum dedup_checksum = os->os_dedup_checksum; 22707540df39SGeorge Wilson boolean_t dedup = B_FALSE; 22717540df39SGeorge Wilson boolean_t nopwrite = B_FALSE; 2272b24ab676SJeff Bonwick boolean_t dedup_verify = os->os_dedup_verify; 2273eb633035STom Caputi boolean_t encrypt = B_FALSE; 2274b24ab676SJeff Bonwick int copies = os->os_copies; 2275b24ab676SJeff Bonwick 2276b24ab676SJeff Bonwick /* 227780901aeaSGeorge Wilson * We maintain different write policies for each of the following 227880901aeaSGeorge Wilson * types of data: 227980901aeaSGeorge Wilson * 1. metadata 228080901aeaSGeorge Wilson * 2. preallocated blocks (i.e. level-0 blocks of a dump device) 228180901aeaSGeorge Wilson * 3. all other level 0 blocks 2282b24ab676SJeff Bonwick */ 2283b24ab676SJeff Bonwick if (ismd) { 2284b24ab676SJeff Bonwick /* 228580901aeaSGeorge Wilson * XXX -- we should design a compression algorithm 228680901aeaSGeorge Wilson * that specializes in arrays of bps. 228780901aeaSGeorge Wilson */ 2288db1741f5SJustin T. Gibbs compress = zio_compress_select(os->os_spa, 2289db1741f5SJustin T. Gibbs ZIO_COMPRESS_ON, ZIO_COMPRESS_ON); 229080901aeaSGeorge Wilson 229180901aeaSGeorge Wilson /* 2292b24ab676SJeff Bonwick * Metadata always gets checksummed. If the data 2293b24ab676SJeff Bonwick * checksum is multi-bit correctable, and it's not a 2294b24ab676SJeff Bonwick * ZBT-style checksum, then it's suitable for metadata 2295b24ab676SJeff Bonwick * as well. Otherwise, the metadata checksum defaults 2296b24ab676SJeff Bonwick * to fletcher4. 2297b24ab676SJeff Bonwick */ 229845818ee1SMatthew Ahrens if (!(zio_checksum_table[checksum].ci_flags & 229945818ee1SMatthew Ahrens ZCHECKSUM_FLAG_METADATA) || 230045818ee1SMatthew Ahrens (zio_checksum_table[checksum].ci_flags & 230145818ee1SMatthew Ahrens ZCHECKSUM_FLAG_EMBEDDED)) 2302b24ab676SJeff Bonwick checksum = ZIO_CHECKSUM_FLETCHER_4; 2303edf345e6SMatthew Ahrens 2304edf345e6SMatthew Ahrens if (os->os_redundant_metadata == ZFS_REDUNDANT_METADATA_ALL || 2305edf345e6SMatthew Ahrens (os->os_redundant_metadata == 2306edf345e6SMatthew Ahrens ZFS_REDUNDANT_METADATA_MOST && 2307edf345e6SMatthew Ahrens (level >= zfs_redundant_metadata_most_ditto_level || 2308edf345e6SMatthew Ahrens DMU_OT_IS_METADATA(type) || (wp & WP_SPILL)))) 2309edf345e6SMatthew Ahrens copies++; 231080901aeaSGeorge Wilson } else if (wp & WP_NOFILL) { 231180901aeaSGeorge Wilson ASSERT(level == 0); 2312b24ab676SJeff Bonwick 2313b24ab676SJeff Bonwick /* 231480901aeaSGeorge Wilson * If we're writing preallocated blocks, we aren't actually 231580901aeaSGeorge Wilson * writing them so don't set any policy properties. These 231680901aeaSGeorge Wilson * blocks are currently only used by an external subsystem 231780901aeaSGeorge Wilson * outside of zfs (i.e. dump) and not written by the zio 231880901aeaSGeorge Wilson * pipeline. 2319b24ab676SJeff Bonwick */ 232080901aeaSGeorge Wilson compress = ZIO_COMPRESS_OFF; 2321810e43b2SBill Pijewski checksum = ZIO_CHECKSUM_NOPARITY; 2322b24ab676SJeff Bonwick } else { 2323db1741f5SJustin T. Gibbs compress = zio_compress_select(os->os_spa, dn->dn_compress, 2324db1741f5SJustin T. Gibbs compress); 232580901aeaSGeorge Wilson 232680901aeaSGeorge Wilson checksum = (dedup_checksum == ZIO_CHECKSUM_OFF) ? 232780901aeaSGeorge Wilson zio_checksum_select(dn->dn_checksum, checksum) : 232880901aeaSGeorge Wilson dedup_checksum; 232980901aeaSGeorge Wilson 233080901aeaSGeorge Wilson /* 233180901aeaSGeorge Wilson * Determine dedup setting. If we are in dmu_sync(), 233280901aeaSGeorge Wilson * we won't actually dedup now because that's all 233380901aeaSGeorge Wilson * done in syncing context; but we do want to use the 233480901aeaSGeorge Wilson * dedup checkum. If the checksum is not strong 233580901aeaSGeorge Wilson * enough to ensure unique signatures, force 233680901aeaSGeorge Wilson * dedup_verify. 233780901aeaSGeorge Wilson */ 233880901aeaSGeorge Wilson if (dedup_checksum != ZIO_CHECKSUM_OFF) { 233980901aeaSGeorge Wilson dedup = (wp & WP_DMU_SYNC) ? B_FALSE : B_TRUE; 234045818ee1SMatthew Ahrens if (!(zio_checksum_table[checksum].ci_flags & 234145818ee1SMatthew Ahrens ZCHECKSUM_FLAG_DEDUP)) 234280901aeaSGeorge Wilson dedup_verify = B_TRUE; 2343b24ab676SJeff Bonwick } 2344b24ab676SJeff Bonwick 2345b24ab676SJeff Bonwick /* 234645818ee1SMatthew Ahrens * Enable nopwrite if we have secure enough checksum 234745818ee1SMatthew Ahrens * algorithm (see comment in zio_nop_write) and 234845818ee1SMatthew Ahrens * compression is enabled. We don't enable nopwrite if 234945818ee1SMatthew Ahrens * dedup is enabled as the two features are mutually 235045818ee1SMatthew Ahrens * exclusive. 2351b24ab676SJeff Bonwick */ 235245818ee1SMatthew Ahrens nopwrite = (!dedup && (zio_checksum_table[checksum].ci_flags & 235345818ee1SMatthew Ahrens ZCHECKSUM_FLAG_NOPWRITE) && 235480901aeaSGeorge Wilson compress != ZIO_COMPRESS_OFF && zfs_nopwrite_enabled); 2355b24ab676SJeff Bonwick } 2356b24ab676SJeff Bonwick 2357eb633035STom Caputi /* 2358eb633035STom Caputi * All objects in an encrypted objset are protected from modification 2359eb633035STom Caputi * via a MAC. Encrypted objects store their IV and salt in the last DVA 2360eb633035STom Caputi * in the bp, so we cannot use all copies. Encrypted objects are also 2361eb633035STom Caputi * not subject to nopwrite since writing the same data will still 2362eb633035STom Caputi * result in a new ciphertext. Only encrypted blocks can be dedup'd 2363eb633035STom Caputi * to avoid ambiguity in the dedup code since the DDT does not store 2364eb633035STom Caputi * object types. 2365eb633035STom Caputi */ 2366eb633035STom Caputi if (os->os_encrypted && (wp & WP_NOFILL) == 0) { 2367eb633035STom Caputi encrypt = B_TRUE; 23685602294fSDan Kimmel 2369eb633035STom Caputi if (DMU_OT_IS_ENCRYPTED(type)) { 2370eb633035STom Caputi copies = MIN(copies, SPA_DVAS_PER_BP - 1); 2371eb633035STom Caputi nopwrite = B_FALSE; 2372eb633035STom Caputi } else { 2373eb633035STom Caputi dedup = B_FALSE; 2374eb633035STom Caputi } 2375eb633035STom Caputi 2376eb633035STom Caputi if (level <= 0 && 2377eb633035STom Caputi (type == DMU_OT_DNODE || type == DMU_OT_OBJSET)) { 2378eb633035STom Caputi compress = ZIO_COMPRESS_EMPTY; 2379eb633035STom Caputi } 2380eb633035STom Caputi } 2381eb633035STom Caputi 2382eb633035STom Caputi zp->zp_compress = compress; 2383eb633035STom Caputi zp->zp_checksum = checksum; 23840a586ceaSMark Shellenbaum zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type; 2385b24ab676SJeff Bonwick zp->zp_level = level; 2386edf345e6SMatthew Ahrens zp->zp_copies = MIN(copies, spa_max_replication(os->os_spa)); 2387b24ab676SJeff Bonwick zp->zp_dedup = dedup; 2388b24ab676SJeff Bonwick zp->zp_dedup_verify = dedup && dedup_verify; 238980901aeaSGeorge Wilson zp->zp_nopwrite = nopwrite; 2390663207adSDon Brady zp->zp_zpl_smallblk = DMU_OT_IS_FILE(zp->zp_type) ? 2391663207adSDon Brady os->os_zpl_special_smallblock : 0; 2392eb633035STom Caputi zp->zp_encrypt = encrypt; 2393eb633035STom Caputi zp->zp_byteorder = ZFS_HOST_BYTEORDER; 2394eb633035STom Caputi bzero(zp->zp_salt, ZIO_DATA_SALT_LEN); 2395eb633035STom Caputi bzero(zp->zp_iv, ZIO_DATA_IV_LEN); 2396eb633035STom Caputi bzero(zp->zp_mac, ZIO_DATA_MAC_LEN); 2397b24ab676SJeff Bonwick } 2398b24ab676SJeff Bonwick 239944cd46caSbillm int 2400fa9e4066Sahrens dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off) 2401fa9e4066Sahrens { 2402fa9e4066Sahrens dnode_t *dn; 24032bcf0248SMax Grossman int err; 2404fa9e4066Sahrens 2405fa9e4066Sahrens /* 2406fa9e4066Sahrens * Sync any current changes before 2407fa9e4066Sahrens * we go trundling through the block pointers. 2408fa9e4066Sahrens */ 24092bcf0248SMax Grossman err = dmu_object_wait_synced(os, object); 24102bcf0248SMax Grossman if (err) { 24112bcf0248SMax Grossman return (err); 2412fa9e4066Sahrens } 24132bcf0248SMax Grossman 2414503ad85cSMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 24152bcf0248SMax Grossman if (err) { 2416ea8dc4b6Seschrock return (err); 2417fa9e4066Sahrens } 2418fa9e4066Sahrens 2419cdb0ab79Smaybee err = dnode_next_offset(dn, (hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0); 2420fa9e4066Sahrens dnode_rele(dn, FTAG); 2421fa9e4066Sahrens 2422fa9e4066Sahrens return (err); 2423fa9e4066Sahrens } 2424fa9e4066Sahrens 24252bcf0248SMax Grossman /* 24262bcf0248SMax Grossman * Given the ZFS object, if it contains any dirty nodes 24272bcf0248SMax Grossman * this function flushes all dirty blocks to disk. This 24282bcf0248SMax Grossman * ensures the DMU object info is updated. A more efficient 24292bcf0248SMax Grossman * future version might just find the TXG with the maximum 24302bcf0248SMax Grossman * ID and wait for that to be synced. 24312bcf0248SMax Grossman */ 24322bcf0248SMax Grossman int 24339a686fbcSPaul Dagnelie dmu_object_wait_synced(objset_t *os, uint64_t object) 24349a686fbcSPaul Dagnelie { 24352bcf0248SMax Grossman dnode_t *dn; 24362bcf0248SMax Grossman int error, i; 24372bcf0248SMax Grossman 24382bcf0248SMax Grossman error = dnode_hold(os, object, FTAG, &dn); 24392bcf0248SMax Grossman if (error) { 24402bcf0248SMax Grossman return (error); 24412bcf0248SMax Grossman } 24422bcf0248SMax Grossman 24432bcf0248SMax Grossman for (i = 0; i < TXG_SIZE; i++) { 24442bcf0248SMax Grossman if (list_link_active(&dn->dn_dirty_link[i])) { 24452bcf0248SMax Grossman break; 24462bcf0248SMax Grossman } 24472bcf0248SMax Grossman } 24482bcf0248SMax Grossman dnode_rele(dn, FTAG); 24492bcf0248SMax Grossman if (i != TXG_SIZE) { 24502bcf0248SMax Grossman txg_wait_synced(dmu_objset_pool(os), 0); 24512bcf0248SMax Grossman } 24522bcf0248SMax Grossman 24532bcf0248SMax Grossman return (0); 24542bcf0248SMax Grossman } 24552bcf0248SMax Grossman 2456fa9e4066Sahrens void 2457fa9e4066Sahrens dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi) 2458fa9e4066Sahrens { 2459b24ab676SJeff Bonwick dnode_phys_t *dnp; 2460b24ab676SJeff Bonwick 2461fa9e4066Sahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 2462fa9e4066Sahrens mutex_enter(&dn->dn_mtx); 2463fa9e4066Sahrens 2464b24ab676SJeff Bonwick dnp = dn->dn_phys; 2465b24ab676SJeff Bonwick 2466fa9e4066Sahrens doi->doi_data_block_size = dn->dn_datablksz; 2467fa9e4066Sahrens doi->doi_metadata_block_size = dn->dn_indblkshift ? 2468fa9e4066Sahrens 1ULL << dn->dn_indblkshift : 0; 2469b24ab676SJeff Bonwick doi->doi_type = dn->dn_type; 2470b24ab676SJeff Bonwick doi->doi_bonus_type = dn->dn_bonustype; 2471b24ab676SJeff Bonwick doi->doi_bonus_size = dn->dn_bonuslen; 247254811da5SToomas Soome doi->doi_dnodesize = dn->dn_num_slots << DNODE_SHIFT; 2473fa9e4066Sahrens doi->doi_indirection = dn->dn_nlevels; 2474fa9e4066Sahrens doi->doi_checksum = dn->dn_checksum; 2475fa9e4066Sahrens doi->doi_compress = dn->dn_compress; 2476e77d42eaSMatthew Ahrens doi->doi_nblkptr = dn->dn_nblkptr; 2477b24ab676SJeff Bonwick doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9; 2478d0475637SMatthew Ahrens doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz; 2479b24ab676SJeff Bonwick doi->doi_fill_count = 0; 2480b24ab676SJeff Bonwick for (int i = 0; i < dnp->dn_nblkptr; i++) 24815d7b4d43SMatthew Ahrens doi->doi_fill_count += BP_GET_FILL(&dnp->dn_blkptr[i]); 2482fa9e4066Sahrens 2483fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 2484fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 2485fa9e4066Sahrens } 2486fa9e4066Sahrens 2487fa9e4066Sahrens /* 2488fa9e4066Sahrens * Get information on a DMU object. 2489fa9e4066Sahrens * If doi is NULL, just indicates whether the object exists. 2490fa9e4066Sahrens */ 2491fa9e4066Sahrens int 2492fa9e4066Sahrens dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi) 2493fa9e4066Sahrens { 2494ea8dc4b6Seschrock dnode_t *dn; 2495503ad85cSMatthew Ahrens int err = dnode_hold(os, object, FTAG, &dn); 2496fa9e4066Sahrens 2497ea8dc4b6Seschrock if (err) 2498ea8dc4b6Seschrock return (err); 2499fa9e4066Sahrens 2500fa9e4066Sahrens if (doi != NULL) 2501fa9e4066Sahrens dmu_object_info_from_dnode(dn, doi); 2502fa9e4066Sahrens 2503fa9e4066Sahrens dnode_rele(dn, FTAG); 2504fa9e4066Sahrens return (0); 2505fa9e4066Sahrens } 2506fa9e4066Sahrens 2507fa9e4066Sahrens /* 2508fa9e4066Sahrens * As above, but faster; can be used when you have a held dbuf in hand. 2509fa9e4066Sahrens */ 2510fa9e4066Sahrens void 2511744947dcSTom Erickson dmu_object_info_from_db(dmu_buf_t *db_fake, dmu_object_info_t *doi) 2512fa9e4066Sahrens { 2513744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2514744947dcSTom Erickson 2515744947dcSTom Erickson DB_DNODE_ENTER(db); 2516744947dcSTom Erickson dmu_object_info_from_dnode(DB_DNODE(db), doi); 2517744947dcSTom Erickson DB_DNODE_EXIT(db); 2518fa9e4066Sahrens } 2519fa9e4066Sahrens 2520fa9e4066Sahrens /* 2521fa9e4066Sahrens * Faster still when you only care about the size. 2522fa9e4066Sahrens * This is specifically optimized for zfs_getattr(). 2523fa9e4066Sahrens */ 2524fa9e4066Sahrens void 2525744947dcSTom Erickson dmu_object_size_from_db(dmu_buf_t *db_fake, uint32_t *blksize, 2526744947dcSTom Erickson u_longlong_t *nblk512) 2527fa9e4066Sahrens { 2528744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2529744947dcSTom Erickson dnode_t *dn; 2530744947dcSTom Erickson 2531744947dcSTom Erickson DB_DNODE_ENTER(db); 2532744947dcSTom Erickson dn = DB_DNODE(db); 2533fa9e4066Sahrens 2534fa9e4066Sahrens *blksize = dn->dn_datablksz; 253554811da5SToomas Soome /* add in number of slots used for the dnode itself */ 253699653d4eSeschrock *nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >> 253754811da5SToomas Soome SPA_MINBLOCKSHIFT) + dn->dn_num_slots; 253854811da5SToomas Soome DB_DNODE_EXIT(db); 253954811da5SToomas Soome } 254054811da5SToomas Soome 254154811da5SToomas Soome void 254254811da5SToomas Soome dmu_object_dnsize_from_db(dmu_buf_t *db_fake, int *dnsize) 254354811da5SToomas Soome { 254454811da5SToomas Soome dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 254554811da5SToomas Soome dnode_t *dn; 254654811da5SToomas Soome 254754811da5SToomas Soome DB_DNODE_ENTER(db); 254854811da5SToomas Soome dn = DB_DNODE(db); 254954811da5SToomas Soome *dnsize = dn->dn_num_slots << DNODE_SHIFT; 2550744947dcSTom Erickson DB_DNODE_EXIT(db); 2551fa9e4066Sahrens } 2552fa9e4066Sahrens 2553fa9e4066Sahrens void 2554fa9e4066Sahrens byteswap_uint64_array(void *vbuf, size_t size) 2555fa9e4066Sahrens { 2556fa9e4066Sahrens uint64_t *buf = vbuf; 2557fa9e4066Sahrens size_t count = size >> 3; 2558fa9e4066Sahrens int i; 2559fa9e4066Sahrens 2560fa9e4066Sahrens ASSERT((size & 7) == 0); 2561fa9e4066Sahrens 2562fa9e4066Sahrens for (i = 0; i < count; i++) 2563fa9e4066Sahrens buf[i] = BSWAP_64(buf[i]); 2564fa9e4066Sahrens } 2565fa9e4066Sahrens 2566fa9e4066Sahrens void 2567fa9e4066Sahrens byteswap_uint32_array(void *vbuf, size_t size) 2568fa9e4066Sahrens { 2569fa9e4066Sahrens uint32_t *buf = vbuf; 2570fa9e4066Sahrens size_t count = size >> 2; 2571fa9e4066Sahrens int i; 2572fa9e4066Sahrens 2573fa9e4066Sahrens ASSERT((size & 3) == 0); 2574fa9e4066Sahrens 2575fa9e4066Sahrens for (i = 0; i < count; i++) 2576fa9e4066Sahrens buf[i] = BSWAP_32(buf[i]); 2577fa9e4066Sahrens } 2578fa9e4066Sahrens 2579fa9e4066Sahrens void 2580fa9e4066Sahrens byteswap_uint16_array(void *vbuf, size_t size) 2581fa9e4066Sahrens { 2582fa9e4066Sahrens uint16_t *buf = vbuf; 2583fa9e4066Sahrens size_t count = size >> 1; 2584fa9e4066Sahrens int i; 2585fa9e4066Sahrens 2586fa9e4066Sahrens ASSERT((size & 1) == 0); 2587fa9e4066Sahrens 2588fa9e4066Sahrens for (i = 0; i < count; i++) 2589fa9e4066Sahrens buf[i] = BSWAP_16(buf[i]); 2590fa9e4066Sahrens } 2591fa9e4066Sahrens 2592fa9e4066Sahrens /* ARGSUSED */ 2593fa9e4066Sahrens void 2594fa9e4066Sahrens byteswap_uint8_array(void *vbuf, size_t size) 2595fa9e4066Sahrens { 2596fa9e4066Sahrens } 2597fa9e4066Sahrens 2598fa9e4066Sahrens void 2599fa9e4066Sahrens dmu_init(void) 2600fa9e4066Sahrens { 2601770499e1SDan Kimmel abd_init(); 26023f9d6ad7SLin Ling zfs_dbgmsg_init(); 2603744947dcSTom Erickson sa_cache_init(); 2604744947dcSTom Erickson xuio_stat_init(); 2605744947dcSTom Erickson dmu_objset_init(); 2606fa9e4066Sahrens dnode_init(); 26077cbf8b43SRich Morris zfetch_init(); 2608fa94a07fSbrendan l2arc_init(); 2609ce636f8bSMatthew Ahrens arc_init(); 2610dcbf3bd6SGeorge Wilson dbuf_init(); 2611fa9e4066Sahrens } 2612fa9e4066Sahrens 2613fa9e4066Sahrens void 2614fa9e4066Sahrens dmu_fini(void) 2615fa9e4066Sahrens { 26163e30c24aSWill Andrews arc_fini(); /* arc depends on l2arc, so arc must go first */ 2617ce636f8bSMatthew Ahrens l2arc_fini(); 26187cbf8b43SRich Morris zfetch_fini(); 2619fa9e4066Sahrens dbuf_fini(); 2620744947dcSTom Erickson dnode_fini(); 2621744947dcSTom Erickson dmu_objset_fini(); 2622c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_stat_fini(); 26230a586ceaSMark Shellenbaum sa_cache_fini(); 26243f9d6ad7SLin Ling zfs_dbgmsg_fini(); 2625770499e1SDan Kimmel abd_fini(); 2626fa9e4066Sahrens } 2627