xref: /freebsd/sys/contrib/openzfs/module/zfs/zfs_log.c (revision d5b0e70f7e04d971691517ce1304d86a1e367e2e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2015, 2018 by Delphix. All rights reserved.
24  */
25 
26 
27 #include <sys/types.h>
28 #include <sys/param.h>
29 #include <sys/sysmacros.h>
30 #include <sys/cmn_err.h>
31 #include <sys/kmem.h>
32 #include <sys/thread.h>
33 #include <sys/file.h>
34 #include <sys/vfs.h>
35 #include <sys/zfs_znode.h>
36 #include <sys/zfs_dir.h>
37 #include <sys/zil.h>
38 #include <sys/zil_impl.h>
39 #include <sys/byteorder.h>
40 #include <sys/policy.h>
41 #include <sys/stat.h>
42 #include <sys/acl.h>
43 #include <sys/dmu.h>
44 #include <sys/dbuf.h>
45 #include <sys/spa.h>
46 #include <sys/zfs_fuid.h>
47 #include <sys/dsl_dataset.h>
48 
49 /*
50  * These zfs_log_* functions must be called within a dmu tx, in one
51  * of 2 contexts depending on zilog->z_replay:
52  *
53  * Non replay mode
54  * ---------------
55  * We need to record the transaction so that if it is committed to
56  * the Intent Log then it can be replayed.  An intent log transaction
57  * structure (itx_t) is allocated and all the information necessary to
58  * possibly replay the transaction is saved in it. The itx is then assigned
59  * a sequence number and inserted in the in-memory list anchored in the zilog.
60  *
61  * Replay mode
62  * -----------
63  * We need to mark the intent log record as replayed in the log header.
64  * This is done in the same transaction as the replay so that they
65  * commit atomically.
66  */
67 
68 int
69 zfs_log_create_txtype(zil_create_t type, vsecattr_t *vsecp, vattr_t *vap)
70 {
71 	int isxvattr = (vap->va_mask & ATTR_XVATTR);
72 	switch (type) {
73 	case Z_FILE:
74 		if (vsecp == NULL && !isxvattr)
75 			return (TX_CREATE);
76 		if (vsecp && isxvattr)
77 			return (TX_CREATE_ACL_ATTR);
78 		if (vsecp)
79 			return (TX_CREATE_ACL);
80 		else
81 			return (TX_CREATE_ATTR);
82 	case Z_DIR:
83 		if (vsecp == NULL && !isxvattr)
84 			return (TX_MKDIR);
85 		if (vsecp && isxvattr)
86 			return (TX_MKDIR_ACL_ATTR);
87 		if (vsecp)
88 			return (TX_MKDIR_ACL);
89 		else
90 			return (TX_MKDIR_ATTR);
91 	case Z_XATTRDIR:
92 		return (TX_MKXATTR);
93 	}
94 	ASSERT(0);
95 	return (TX_MAX_TYPE);
96 }
97 
98 /*
99  * build up the log data necessary for logging xvattr_t
100  * First lr_attr_t is initialized.  following the lr_attr_t
101  * is the mapsize and attribute bitmap copied from the xvattr_t.
102  * Following the bitmap and bitmapsize two 64 bit words are reserved
103  * for the create time which may be set.  Following the create time
104  * records a single 64 bit integer which has the bits to set on
105  * replay for the xvattr.
106  */
107 static void
108 zfs_log_xvattr(lr_attr_t *lrattr, xvattr_t *xvap)
109 {
110 	xoptattr_t *xoap;
111 
112 	xoap = xva_getxoptattr(xvap);
113 	ASSERT(xoap);
114 
115 	lrattr->lr_attr_masksize = xvap->xva_mapsize;
116 	uint32_t *bitmap = &lrattr->lr_attr_bitmap;
117 	for (int i = 0; i != xvap->xva_mapsize; i++, bitmap++)
118 		*bitmap = xvap->xva_reqattrmap[i];
119 
120 	lr_attr_end_t *end = (lr_attr_end_t *)bitmap;
121 	end->lr_attr_attrs = 0;
122 	end->lr_attr_crtime[0] = 0;
123 	end->lr_attr_crtime[1] = 0;
124 	memset(end->lr_attr_scanstamp, 0, AV_SCANSTAMP_SZ);
125 
126 	if (XVA_ISSET_REQ(xvap, XAT_READONLY))
127 		end->lr_attr_attrs |= (xoap->xoa_readonly == 0) ? 0 :
128 		    XAT0_READONLY;
129 	if (XVA_ISSET_REQ(xvap, XAT_HIDDEN))
130 		end->lr_attr_attrs |= (xoap->xoa_hidden == 0) ? 0 :
131 		    XAT0_HIDDEN;
132 	if (XVA_ISSET_REQ(xvap, XAT_SYSTEM))
133 		end->lr_attr_attrs |= (xoap->xoa_system == 0) ? 0 :
134 		    XAT0_SYSTEM;
135 	if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE))
136 		end->lr_attr_attrs |= (xoap->xoa_archive == 0) ? 0 :
137 		    XAT0_ARCHIVE;
138 	if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE))
139 		end->lr_attr_attrs |= (xoap->xoa_immutable == 0) ? 0 :
140 		    XAT0_IMMUTABLE;
141 	if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK))
142 		end->lr_attr_attrs |= (xoap->xoa_nounlink == 0) ? 0 :
143 		    XAT0_NOUNLINK;
144 	if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY))
145 		end->lr_attr_attrs |= (xoap->xoa_appendonly == 0) ? 0 :
146 		    XAT0_APPENDONLY;
147 	if (XVA_ISSET_REQ(xvap, XAT_OPAQUE))
148 		end->lr_attr_attrs |= (xoap->xoa_opaque == 0) ? 0 :
149 		    XAT0_APPENDONLY;
150 	if (XVA_ISSET_REQ(xvap, XAT_NODUMP))
151 		end->lr_attr_attrs |= (xoap->xoa_nodump == 0) ? 0 :
152 		    XAT0_NODUMP;
153 	if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED))
154 		end->lr_attr_attrs |= (xoap->xoa_av_quarantined == 0) ? 0 :
155 		    XAT0_AV_QUARANTINED;
156 	if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED))
157 		end->lr_attr_attrs |= (xoap->xoa_av_modified == 0) ? 0 :
158 		    XAT0_AV_MODIFIED;
159 	if (XVA_ISSET_REQ(xvap, XAT_CREATETIME))
160 		ZFS_TIME_ENCODE(&xoap->xoa_createtime, end->lr_attr_crtime);
161 	if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
162 		ASSERT(!XVA_ISSET_REQ(xvap, XAT_PROJID));
163 
164 		memcpy(end->lr_attr_scanstamp, xoap->xoa_av_scanstamp,
165 		    AV_SCANSTAMP_SZ);
166 	} else if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
167 		/*
168 		 * XAT_PROJID and XAT_AV_SCANSTAMP will never be valid
169 		 * at the same time, so we can share the same space.
170 		 */
171 		memcpy(end->lr_attr_scanstamp, &xoap->xoa_projid,
172 		    sizeof (uint64_t));
173 	}
174 	if (XVA_ISSET_REQ(xvap, XAT_REPARSE))
175 		end->lr_attr_attrs |= (xoap->xoa_reparse == 0) ? 0 :
176 		    XAT0_REPARSE;
177 	if (XVA_ISSET_REQ(xvap, XAT_OFFLINE))
178 		end->lr_attr_attrs |= (xoap->xoa_offline == 0) ? 0 :
179 		    XAT0_OFFLINE;
180 	if (XVA_ISSET_REQ(xvap, XAT_SPARSE))
181 		end->lr_attr_attrs |= (xoap->xoa_sparse == 0) ? 0 :
182 		    XAT0_SPARSE;
183 	if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT))
184 		end->lr_attr_attrs |= (xoap->xoa_projinherit == 0) ? 0 :
185 		    XAT0_PROJINHERIT;
186 }
187 
188 static void *
189 zfs_log_fuid_ids(zfs_fuid_info_t *fuidp, void *start)
190 {
191 	zfs_fuid_t *zfuid;
192 	uint64_t *fuidloc = start;
193 
194 	/* First copy in the ACE FUIDs */
195 	for (zfuid = list_head(&fuidp->z_fuids); zfuid;
196 	    zfuid = list_next(&fuidp->z_fuids, zfuid)) {
197 		*fuidloc++ = zfuid->z_logfuid;
198 	}
199 	return (fuidloc);
200 }
201 
202 
203 static void *
204 zfs_log_fuid_domains(zfs_fuid_info_t *fuidp, void *start)
205 {
206 	zfs_fuid_domain_t *zdomain;
207 
208 	/* now copy in the domain info, if any */
209 	if (fuidp->z_domain_str_sz != 0) {
210 		for (zdomain = list_head(&fuidp->z_domains); zdomain;
211 		    zdomain = list_next(&fuidp->z_domains, zdomain)) {
212 			memcpy(start, zdomain->z_domain,
213 			    strlen(zdomain->z_domain) + 1);
214 			start = (caddr_t)start +
215 			    strlen(zdomain->z_domain) + 1;
216 		}
217 	}
218 	return (start);
219 }
220 
221 /*
222  * If zp is an xattr node, check whether the xattr owner is unlinked.
223  * We don't want to log anything if the owner is unlinked.
224  */
225 static int
226 zfs_xattr_owner_unlinked(znode_t *zp)
227 {
228 	int unlinked = 0;
229 	znode_t *dzp;
230 #ifdef __FreeBSD__
231 	znode_t *tzp = zp;
232 
233 	/*
234 	 * zrele drops the vnode lock which violates the VOP locking contract
235 	 * on FreeBSD. See comment at the top of zfs_replay.c for more detail.
236 	 */
237 	/*
238 	 * if zp is XATTR node, keep walking up via z_xattr_parent until we
239 	 * get the owner
240 	 */
241 	while (tzp->z_pflags & ZFS_XATTR) {
242 		ASSERT3U(zp->z_xattr_parent, !=, 0);
243 		if (zfs_zget(ZTOZSB(tzp), tzp->z_xattr_parent, &dzp) != 0) {
244 			unlinked = 1;
245 			break;
246 		}
247 
248 		if (tzp != zp)
249 			zrele(tzp);
250 		tzp = dzp;
251 		unlinked = tzp->z_unlinked;
252 	}
253 	if (tzp != zp)
254 		zrele(tzp);
255 #else
256 	zhold(zp);
257 	/*
258 	 * if zp is XATTR node, keep walking up via z_xattr_parent until we
259 	 * get the owner
260 	 */
261 	while (zp->z_pflags & ZFS_XATTR) {
262 		ASSERT3U(zp->z_xattr_parent, !=, 0);
263 		if (zfs_zget(ZTOZSB(zp), zp->z_xattr_parent, &dzp) != 0) {
264 			unlinked = 1;
265 			break;
266 		}
267 
268 		zrele(zp);
269 		zp = dzp;
270 		unlinked = zp->z_unlinked;
271 	}
272 	zrele(zp);
273 #endif
274 	return (unlinked);
275 }
276 
277 /*
278  * Handles TX_CREATE, TX_CREATE_ATTR, TX_MKDIR, TX_MKDIR_ATTR and
279  * TK_MKXATTR transactions.
280  *
281  * TX_CREATE and TX_MKDIR are standard creates, but they may have FUID
282  * domain information appended prior to the name.  In this case the
283  * uid/gid in the log record will be a log centric FUID.
284  *
285  * TX_CREATE_ACL_ATTR and TX_MKDIR_ACL_ATTR handle special creates that
286  * may contain attributes, ACL and optional fuid information.
287  *
288  * TX_CREATE_ACL and TX_MKDIR_ACL handle special creates that specify
289  * and ACL and normal users/groups in the ACEs.
290  *
291  * There may be an optional xvattr attribute information similar
292  * to zfs_log_setattr.
293  *
294  * Also, after the file name "domain" strings may be appended.
295  */
296 void
297 zfs_log_create(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
298     znode_t *dzp, znode_t *zp, const char *name, vsecattr_t *vsecp,
299     zfs_fuid_info_t *fuidp, vattr_t *vap)
300 {
301 	itx_t *itx;
302 	lr_create_t *lr;
303 	lr_acl_create_t *lracl;
304 	size_t aclsize = 0;
305 	size_t xvatsize = 0;
306 	size_t txsize;
307 	xvattr_t *xvap = (xvattr_t *)vap;
308 	void *end;
309 	size_t lrsize;
310 	size_t namesize = strlen(name) + 1;
311 	size_t fuidsz = 0;
312 
313 	if (zil_replaying(zilog, tx) || zfs_xattr_owner_unlinked(dzp))
314 		return;
315 
316 	/*
317 	 * If we have FUIDs present then add in space for
318 	 * domains and ACE fuid's if any.
319 	 */
320 	if (fuidp) {
321 		fuidsz += fuidp->z_domain_str_sz;
322 		fuidsz += fuidp->z_fuid_cnt * sizeof (uint64_t);
323 	}
324 
325 	if (vap->va_mask & ATTR_XVATTR)
326 		xvatsize = ZIL_XVAT_SIZE(xvap->xva_mapsize);
327 
328 	if ((int)txtype == TX_CREATE_ATTR || (int)txtype == TX_MKDIR_ATTR ||
329 	    (int)txtype == TX_CREATE || (int)txtype == TX_MKDIR ||
330 	    (int)txtype == TX_MKXATTR) {
331 		txsize = sizeof (*lr) + namesize + fuidsz + xvatsize;
332 		lrsize = sizeof (*lr);
333 	} else {
334 		txsize =
335 		    sizeof (lr_acl_create_t) + namesize + fuidsz +
336 		    ZIL_ACE_LENGTH(aclsize) + xvatsize;
337 		lrsize = sizeof (lr_acl_create_t);
338 	}
339 
340 	itx = zil_itx_create(txtype, txsize);
341 
342 	lr = (lr_create_t *)&itx->itx_lr;
343 	lr->lr_doid = dzp->z_id;
344 	lr->lr_foid = zp->z_id;
345 	/* Store dnode slot count in 8 bits above object id. */
346 	LR_FOID_SET_SLOTS(lr->lr_foid, zp->z_dnodesize >> DNODE_SHIFT);
347 	lr->lr_mode = zp->z_mode;
348 	if (!IS_EPHEMERAL(KUID_TO_SUID(ZTOUID(zp)))) {
349 		lr->lr_uid = (uint64_t)KUID_TO_SUID(ZTOUID(zp));
350 	} else {
351 		lr->lr_uid = fuidp->z_fuid_owner;
352 	}
353 	if (!IS_EPHEMERAL(KGID_TO_SGID(ZTOGID(zp)))) {
354 		lr->lr_gid = (uint64_t)KGID_TO_SGID(ZTOGID(zp));
355 	} else {
356 		lr->lr_gid = fuidp->z_fuid_group;
357 	}
358 	(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(ZTOZSB(zp)), &lr->lr_gen,
359 	    sizeof (uint64_t));
360 	(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(ZTOZSB(zp)),
361 	    lr->lr_crtime, sizeof (uint64_t) * 2);
362 
363 	if (sa_lookup(zp->z_sa_hdl, SA_ZPL_RDEV(ZTOZSB(zp)), &lr->lr_rdev,
364 	    sizeof (lr->lr_rdev)) != 0)
365 		lr->lr_rdev = 0;
366 
367 	/*
368 	 * Fill in xvattr info if any
369 	 */
370 	if (vap->va_mask & ATTR_XVATTR) {
371 		zfs_log_xvattr((lr_attr_t *)((caddr_t)lr + lrsize), xvap);
372 		end = (caddr_t)lr + lrsize + xvatsize;
373 	} else {
374 		end = (caddr_t)lr + lrsize;
375 	}
376 
377 	/* Now fill in any ACL info */
378 
379 	if (vsecp) {
380 		lracl = (lr_acl_create_t *)&itx->itx_lr;
381 		lracl->lr_aclcnt = vsecp->vsa_aclcnt;
382 		lracl->lr_acl_bytes = aclsize;
383 		lracl->lr_domcnt = fuidp ? fuidp->z_domain_cnt : 0;
384 		lracl->lr_fuidcnt  = fuidp ? fuidp->z_fuid_cnt : 0;
385 		if (vsecp->vsa_aclflags & VSA_ACE_ACLFLAGS)
386 			lracl->lr_acl_flags = (uint64_t)vsecp->vsa_aclflags;
387 		else
388 			lracl->lr_acl_flags = 0;
389 
390 		memcpy(end, vsecp->vsa_aclentp, aclsize);
391 		end = (caddr_t)end + ZIL_ACE_LENGTH(aclsize);
392 	}
393 
394 	/* drop in FUID info */
395 	if (fuidp) {
396 		end = zfs_log_fuid_ids(fuidp, end);
397 		end = zfs_log_fuid_domains(fuidp, end);
398 	}
399 	/*
400 	 * Now place file name in log record
401 	 */
402 	memcpy(end, name, namesize);
403 
404 	zil_itx_assign(zilog, itx, tx);
405 }
406 
407 /*
408  * Handles both TX_REMOVE and TX_RMDIR transactions.
409  */
410 void
411 zfs_log_remove(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
412     znode_t *dzp, const char *name, uint64_t foid, boolean_t unlinked)
413 {
414 	itx_t *itx;
415 	lr_remove_t *lr;
416 	size_t namesize = strlen(name) + 1;
417 
418 	if (zil_replaying(zilog, tx) || zfs_xattr_owner_unlinked(dzp))
419 		return;
420 
421 	itx = zil_itx_create(txtype, sizeof (*lr) + namesize);
422 	lr = (lr_remove_t *)&itx->itx_lr;
423 	lr->lr_doid = dzp->z_id;
424 	memcpy(lr + 1, name, namesize);
425 
426 	itx->itx_oid = foid;
427 
428 	/*
429 	 * Object ids can be re-instantiated in the next txg so
430 	 * remove any async transactions to avoid future leaks.
431 	 * This can happen if a fsync occurs on the re-instantiated
432 	 * object for a WR_INDIRECT or WR_NEED_COPY write, which gets
433 	 * the new file data and flushes a write record for the old object.
434 	 */
435 	if (unlinked) {
436 		ASSERT((txtype & ~TX_CI) == TX_REMOVE);
437 		zil_remove_async(zilog, foid);
438 	}
439 	zil_itx_assign(zilog, itx, tx);
440 }
441 
442 /*
443  * Handles TX_LINK transactions.
444  */
445 void
446 zfs_log_link(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
447     znode_t *dzp, znode_t *zp, const char *name)
448 {
449 	itx_t *itx;
450 	lr_link_t *lr;
451 	size_t namesize = strlen(name) + 1;
452 
453 	if (zil_replaying(zilog, tx))
454 		return;
455 
456 	itx = zil_itx_create(txtype, sizeof (*lr) + namesize);
457 	lr = (lr_link_t *)&itx->itx_lr;
458 	lr->lr_doid = dzp->z_id;
459 	lr->lr_link_obj = zp->z_id;
460 	memcpy(lr + 1, name, namesize);
461 
462 	zil_itx_assign(zilog, itx, tx);
463 }
464 
465 /*
466  * Handles TX_SYMLINK transactions.
467  */
468 void
469 zfs_log_symlink(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
470     znode_t *dzp, znode_t *zp, const char *name, const char *link)
471 {
472 	itx_t *itx;
473 	lr_create_t *lr;
474 	size_t namesize = strlen(name) + 1;
475 	size_t linksize = strlen(link) + 1;
476 
477 	if (zil_replaying(zilog, tx))
478 		return;
479 
480 	itx = zil_itx_create(txtype, sizeof (*lr) + namesize + linksize);
481 	lr = (lr_create_t *)&itx->itx_lr;
482 	lr->lr_doid = dzp->z_id;
483 	lr->lr_foid = zp->z_id;
484 	lr->lr_uid = KUID_TO_SUID(ZTOUID(zp));
485 	lr->lr_gid = KGID_TO_SGID(ZTOGID(zp));
486 	lr->lr_mode = zp->z_mode;
487 	(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(ZTOZSB(zp)), &lr->lr_gen,
488 	    sizeof (uint64_t));
489 	(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(ZTOZSB(zp)),
490 	    lr->lr_crtime, sizeof (uint64_t) * 2);
491 	memcpy((char *)(lr + 1), name, namesize);
492 	memcpy((char *)(lr + 1) + namesize, link, linksize);
493 
494 	zil_itx_assign(zilog, itx, tx);
495 }
496 
497 /*
498  * Handles TX_RENAME transactions.
499  */
500 void
501 zfs_log_rename(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, znode_t *sdzp,
502     const char *sname, znode_t *tdzp, const char *dname, znode_t *szp)
503 {
504 	itx_t *itx;
505 	lr_rename_t *lr;
506 	size_t snamesize = strlen(sname) + 1;
507 	size_t dnamesize = strlen(dname) + 1;
508 
509 	if (zil_replaying(zilog, tx))
510 		return;
511 
512 	itx = zil_itx_create(txtype, sizeof (*lr) + snamesize + dnamesize);
513 	lr = (lr_rename_t *)&itx->itx_lr;
514 	lr->lr_sdoid = sdzp->z_id;
515 	lr->lr_tdoid = tdzp->z_id;
516 	memcpy((char *)(lr + 1), sname, snamesize);
517 	memcpy((char *)(lr + 1) + snamesize, dname, dnamesize);
518 	itx->itx_oid = szp->z_id;
519 
520 	zil_itx_assign(zilog, itx, tx);
521 }
522 
523 /*
524  * zfs_log_write() handles TX_WRITE transactions. The specified callback is
525  * called as soon as the write is on stable storage (be it via a DMU sync or a
526  * ZIL commit).
527  */
528 static long zfs_immediate_write_sz = 32768;
529 
530 void
531 zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype,
532     znode_t *zp, offset_t off, ssize_t resid, int ioflag,
533     zil_callback_t callback, void *callback_data)
534 {
535 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
536 	uint32_t blocksize = zp->z_blksz;
537 	itx_wr_state_t write_state;
538 	uintptr_t fsync_cnt;
539 	uint64_t gen = 0;
540 	ssize_t size = resid;
541 
542 	if (zil_replaying(zilog, tx) || zp->z_unlinked ||
543 	    zfs_xattr_owner_unlinked(zp)) {
544 		if (callback != NULL)
545 			callback(callback_data);
546 		return;
547 	}
548 
549 	if (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
550 		write_state = WR_INDIRECT;
551 	else if (!spa_has_slogs(zilog->zl_spa) &&
552 	    resid >= zfs_immediate_write_sz)
553 		write_state = WR_INDIRECT;
554 	else if (ioflag & (O_SYNC | O_DSYNC))
555 		write_state = WR_COPIED;
556 	else
557 		write_state = WR_NEED_COPY;
558 
559 	if ((fsync_cnt = (uintptr_t)tsd_get(zfs_fsyncer_key)) != 0) {
560 		(void) tsd_set(zfs_fsyncer_key, (void *)(fsync_cnt - 1));
561 	}
562 
563 	(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(ZTOZSB(zp)), &gen,
564 	    sizeof (gen));
565 
566 	while (resid) {
567 		itx_t *itx;
568 		lr_write_t *lr;
569 		itx_wr_state_t wr_state = write_state;
570 		ssize_t len = resid;
571 
572 		/*
573 		 * A WR_COPIED record must fit entirely in one log block.
574 		 * Large writes can use WR_NEED_COPY, which the ZIL will
575 		 * split into multiple records across several log blocks
576 		 * if necessary.
577 		 */
578 		if (wr_state == WR_COPIED &&
579 		    resid > zil_max_copied_data(zilog))
580 			wr_state = WR_NEED_COPY;
581 		else if (wr_state == WR_INDIRECT)
582 			len = MIN(blocksize - P2PHASE(off, blocksize), resid);
583 
584 		itx = zil_itx_create(txtype, sizeof (*lr) +
585 		    (wr_state == WR_COPIED ? len : 0));
586 		lr = (lr_write_t *)&itx->itx_lr;
587 
588 		/*
589 		 * For WR_COPIED records, copy the data into the lr_write_t.
590 		 */
591 		if (wr_state == WR_COPIED) {
592 			int err;
593 			DB_DNODE_ENTER(db);
594 			err = dmu_read_by_dnode(DB_DNODE(db), off, len, lr + 1,
595 			    DMU_READ_NO_PREFETCH);
596 			if (err != 0) {
597 				zil_itx_destroy(itx);
598 				itx = zil_itx_create(txtype, sizeof (*lr));
599 				lr = (lr_write_t *)&itx->itx_lr;
600 				wr_state = WR_NEED_COPY;
601 			}
602 			DB_DNODE_EXIT(db);
603 		}
604 
605 		itx->itx_wr_state = wr_state;
606 		lr->lr_foid = zp->z_id;
607 		lr->lr_offset = off;
608 		lr->lr_length = len;
609 		lr->lr_blkoff = 0;
610 		BP_ZERO(&lr->lr_blkptr);
611 
612 		itx->itx_private = ZTOZSB(zp);
613 		itx->itx_gen = gen;
614 
615 		if (!(ioflag & (O_SYNC | O_DSYNC)) && (zp->z_sync_cnt == 0) &&
616 		    (fsync_cnt == 0))
617 			itx->itx_sync = B_FALSE;
618 
619 		itx->itx_callback = callback;
620 		itx->itx_callback_data = callback_data;
621 		zil_itx_assign(zilog, itx, tx);
622 
623 		off += len;
624 		resid -= len;
625 	}
626 
627 	if (write_state == WR_COPIED || write_state == WR_NEED_COPY) {
628 		dsl_pool_wrlog_count(zilog->zl_dmu_pool, size, tx->tx_txg);
629 	}
630 }
631 
632 /*
633  * Handles TX_TRUNCATE transactions.
634  */
635 void
636 zfs_log_truncate(zilog_t *zilog, dmu_tx_t *tx, int txtype,
637     znode_t *zp, uint64_t off, uint64_t len)
638 {
639 	itx_t *itx;
640 	lr_truncate_t *lr;
641 
642 	if (zil_replaying(zilog, tx) || zp->z_unlinked ||
643 	    zfs_xattr_owner_unlinked(zp))
644 		return;
645 
646 	itx = zil_itx_create(txtype, sizeof (*lr));
647 	lr = (lr_truncate_t *)&itx->itx_lr;
648 	lr->lr_foid = zp->z_id;
649 	lr->lr_offset = off;
650 	lr->lr_length = len;
651 
652 	itx->itx_sync = (zp->z_sync_cnt != 0);
653 	zil_itx_assign(zilog, itx, tx);
654 }
655 
656 /*
657  * Handles TX_SETATTR transactions.
658  */
659 void
660 zfs_log_setattr(zilog_t *zilog, dmu_tx_t *tx, int txtype,
661     znode_t *zp, vattr_t *vap, uint_t mask_applied, zfs_fuid_info_t *fuidp)
662 {
663 	itx_t		*itx;
664 	lr_setattr_t	*lr;
665 	xvattr_t	*xvap = (xvattr_t *)vap;
666 	size_t		recsize = sizeof (lr_setattr_t);
667 	void		*start;
668 
669 	if (zil_replaying(zilog, tx) || zp->z_unlinked)
670 		return;
671 
672 	/*
673 	 * If XVATTR set, then log record size needs to allow
674 	 * for lr_attr_t + xvattr mask, mapsize and create time
675 	 * plus actual attribute values
676 	 */
677 	if (vap->va_mask & ATTR_XVATTR)
678 		recsize = sizeof (*lr) + ZIL_XVAT_SIZE(xvap->xva_mapsize);
679 
680 	if (fuidp)
681 		recsize += fuidp->z_domain_str_sz;
682 
683 	itx = zil_itx_create(txtype, recsize);
684 	lr = (lr_setattr_t *)&itx->itx_lr;
685 	lr->lr_foid = zp->z_id;
686 	lr->lr_mask = (uint64_t)mask_applied;
687 	lr->lr_mode = (uint64_t)vap->va_mode;
688 	if ((mask_applied & ATTR_UID) && IS_EPHEMERAL(vap->va_uid))
689 		lr->lr_uid = fuidp->z_fuid_owner;
690 	else
691 		lr->lr_uid = (uint64_t)vap->va_uid;
692 
693 	if ((mask_applied & ATTR_GID) && IS_EPHEMERAL(vap->va_gid))
694 		lr->lr_gid = fuidp->z_fuid_group;
695 	else
696 		lr->lr_gid = (uint64_t)vap->va_gid;
697 
698 	lr->lr_size = (uint64_t)vap->va_size;
699 	ZFS_TIME_ENCODE(&vap->va_atime, lr->lr_atime);
700 	ZFS_TIME_ENCODE(&vap->va_mtime, lr->lr_mtime);
701 	start = (lr_setattr_t *)(lr + 1);
702 	if (vap->va_mask & ATTR_XVATTR) {
703 		zfs_log_xvattr((lr_attr_t *)start, xvap);
704 		start = (caddr_t)start + ZIL_XVAT_SIZE(xvap->xva_mapsize);
705 	}
706 
707 	/*
708 	 * Now stick on domain information if any on end
709 	 */
710 
711 	if (fuidp)
712 		(void) zfs_log_fuid_domains(fuidp, start);
713 
714 	itx->itx_sync = (zp->z_sync_cnt != 0);
715 	zil_itx_assign(zilog, itx, tx);
716 }
717 
718 /*
719  * Handles TX_SETSAXATTR transactions.
720  */
721 void
722 zfs_log_setsaxattr(zilog_t *zilog, dmu_tx_t *tx, int txtype,
723     znode_t *zp, const char *name, const void *value, size_t size)
724 {
725 	itx_t		*itx;
726 	lr_setsaxattr_t	*lr;
727 	size_t		recsize = sizeof (lr_setsaxattr_t);
728 	void		*xattrstart;
729 	int		namelen;
730 
731 	if (zil_replaying(zilog, tx) || zp->z_unlinked)
732 		return;
733 
734 	namelen = strlen(name) + 1;
735 	recsize += (namelen + size);
736 	itx = zil_itx_create(txtype, recsize);
737 	lr = (lr_setsaxattr_t *)&itx->itx_lr;
738 	lr->lr_foid = zp->z_id;
739 	xattrstart = (char *)(lr + 1);
740 	memcpy(xattrstart, name, namelen);
741 	if (value != NULL) {
742 		memcpy((char *)xattrstart + namelen, value, size);
743 		lr->lr_size = size;
744 	} else {
745 		lr->lr_size = 0;
746 	}
747 
748 	itx->itx_sync = (zp->z_sync_cnt != 0);
749 	zil_itx_assign(zilog, itx, tx);
750 }
751 
752 /*
753  * Handles TX_ACL transactions.
754  */
755 void
756 zfs_log_acl(zilog_t *zilog, dmu_tx_t *tx, znode_t *zp,
757     vsecattr_t *vsecp, zfs_fuid_info_t *fuidp)
758 {
759 	itx_t *itx;
760 	lr_acl_v0_t *lrv0;
761 	lr_acl_t *lr;
762 	int txtype;
763 	int lrsize;
764 	size_t txsize;
765 	size_t aclbytes = vsecp->vsa_aclentsz;
766 
767 	if (zil_replaying(zilog, tx) || zp->z_unlinked)
768 		return;
769 
770 	txtype = (ZTOZSB(zp)->z_version < ZPL_VERSION_FUID) ?
771 	    TX_ACL_V0 : TX_ACL;
772 
773 	if (txtype == TX_ACL)
774 		lrsize = sizeof (*lr);
775 	else
776 		lrsize = sizeof (*lrv0);
777 
778 	txsize = lrsize +
779 	    ((txtype == TX_ACL) ? ZIL_ACE_LENGTH(aclbytes) : aclbytes) +
780 	    (fuidp ? fuidp->z_domain_str_sz : 0) +
781 	    sizeof (uint64_t) * (fuidp ? fuidp->z_fuid_cnt : 0);
782 
783 	itx = zil_itx_create(txtype, txsize);
784 
785 	lr = (lr_acl_t *)&itx->itx_lr;
786 	lr->lr_foid = zp->z_id;
787 	if (txtype == TX_ACL) {
788 		lr->lr_acl_bytes = aclbytes;
789 		lr->lr_domcnt = fuidp ? fuidp->z_domain_cnt : 0;
790 		lr->lr_fuidcnt = fuidp ? fuidp->z_fuid_cnt : 0;
791 		if (vsecp->vsa_mask & VSA_ACE_ACLFLAGS)
792 			lr->lr_acl_flags = (uint64_t)vsecp->vsa_aclflags;
793 		else
794 			lr->lr_acl_flags = 0;
795 	}
796 	lr->lr_aclcnt = (uint64_t)vsecp->vsa_aclcnt;
797 
798 	if (txtype == TX_ACL_V0) {
799 		lrv0 = (lr_acl_v0_t *)lr;
800 		memcpy(lrv0 + 1, vsecp->vsa_aclentp, aclbytes);
801 	} else {
802 		void *start = (ace_t *)(lr + 1);
803 
804 		memcpy(start, vsecp->vsa_aclentp, aclbytes);
805 
806 		start = (caddr_t)start + ZIL_ACE_LENGTH(aclbytes);
807 
808 		if (fuidp) {
809 			start = zfs_log_fuid_ids(fuidp, start);
810 			(void) zfs_log_fuid_domains(fuidp, start);
811 		}
812 	}
813 
814 	itx->itx_sync = (zp->z_sync_cnt != 0);
815 	zil_itx_assign(zilog, itx, tx);
816 }
817 
818 ZFS_MODULE_PARAM(zfs, zfs_, immediate_write_sz, LONG, ZMOD_RW,
819 	"Largest data block to write to zil");
820