xref: /titanic_44/usr/src/cmd/fm/fmd/common/fmd_log.c (revision fe0e7ec4d916b05b52d8c7cc8a3e6a1b28e77b6f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * FMD Log File Subsystem
31  *
32  * Events are written to one of two log files as they are received or created;
33  * the error log tracks all ereport.* events received on the inbound event
34  * transport, and the fault log tracks all list.* events generated by fmd or
35  * its client modules.  In addition, we use the same log file format to cache
36  * state and events associated with ASRUs that are named in a diagnosis.
37  *
38  * The log files use the exacct format manipulated by libexacct(3LIB) and
39  * originally defined in PSARC 1999/119.  However, the exacct library was
40  * designed primarily for read-only clients and without the synchronous i/o
41  * considerations and seeking required for fmd, so we use libexacct here only
42  * to read and write the file headers and to pack data from memory into a file
43  * bytestream.  All of the i/o and file offset manipulations are performed by
44  * the fmd code below.  Our exacct file management uses the following grammar:
45  *
46  * file := hdr toc event*
47  * hdr := EXD_FMA_LABEL EXD_FMA_VERSION EXD_FMA_OSREL EXD_FMA_OSVER
48  * EXD_FMA_PLAT EXD_FMA_UUID
49  * toc := EXD_FMA_OFFSET
50  * event := EXD_FMA_TODSEC EXD_FMA_TODNSEC EXD_FMA_NVLIST evref* or legacy evref
51  * evref := EXD_FMA_UUID EXD_FMA_OFFSET
52  * legacy evref := EXD_FMA_MAJOR EXD_FMA_MINOR EXD_FMA_INODE EXD_FMA_OFFSET
53  *
54  * Any event can be uniquely identified by the tuple (file, offset) where file
55  * is encoded as (uuid) when we are cross-linking files.  For legacy file
56  * formats we still support encoding the reference as (major, minor, inode).
57  * Note that we break out of the file's dev_t into its two 32-bit components to
58  * permit development of either 32-bit or 64-bit log readers and writers; the
59  * LFS APIs do not yet export a 64-bit dev_t to fstat64(), so there is no way
60  * for a 32-bit application to retrieve and store a 64-bit dev_t.
61  *
62  * In order to replay events in the event of an fmd crash, events are initially
63  * written to the error log using the group catalog tag EXD_GROUP_RFMA by the
64  * fmd_log_append() function.  Later, once an event transitions from the
65  * received state to one of its other states (see fmd_event.c for details),
66  * fmd_log_commit() is used to overwrite the tag with EXD_GROUP_FMA, indicating
67  * that the event is fully processed and no longer needs to be replayed.
68  */
69 
70 #include <sys/types.h>
71 #include <sys/mkdev.h>
72 #include <sys/statvfs.h>
73 #include <sys/fm/protocol.h>
74 #include <sys/exacct_impl.h>
75 #include <uuid/uuid.h>
76 
77 #include <unistd.h>
78 #include <limits.h>
79 #include <fcntl.h>
80 #include <ctype.h>
81 
82 #include <fmd_alloc.h>
83 #include <fmd_error.h>
84 #include <fmd_string.h>
85 #include <fmd_event.h>
86 #include <fmd_conf.h>
87 #include <fmd_subr.h>
88 #include <fmd_case.h>
89 #include <fmd_log.h>
90 
91 #include <fmd.h>
92 
93 #define	CAT_FMA_RGROUP	(EXT_GROUP | EXC_DEFAULT | EXD_GROUP_RFMA)
94 #define	CAT_FMA_GROUP	(EXT_GROUP | EXC_DEFAULT | EXD_GROUP_FMA)
95 
96 #define	CAT_FMA_LABEL	(EXT_STRING | EXC_DEFAULT | EXD_FMA_LABEL)
97 #define	CAT_FMA_VERSION	(EXT_STRING | EXC_DEFAULT | EXD_FMA_VERSION)
98 #define	CAT_FMA_OSREL	(EXT_STRING | EXC_DEFAULT | EXD_FMA_OSREL)
99 #define	CAT_FMA_OSVER	(EXT_STRING | EXC_DEFAULT | EXD_FMA_OSVER)
100 #define	CAT_FMA_PLAT	(EXT_STRING | EXC_DEFAULT | EXD_FMA_PLAT)
101 #define	CAT_FMA_UUID	(EXT_STRING | EXC_DEFAULT | EXD_FMA_UUID)
102 #define	CAT_FMA_TODSEC	(EXT_UINT64 | EXC_DEFAULT | EXD_FMA_TODSEC)
103 #define	CAT_FMA_TODNSEC	(EXT_UINT64 | EXC_DEFAULT | EXD_FMA_TODNSEC)
104 #define	CAT_FMA_NVLIST	(EXT_RAW | EXC_DEFAULT | EXD_FMA_NVLIST)
105 #define	CAT_FMA_MAJOR	(EXT_UINT32 | EXC_DEFAULT | EXD_FMA_MAJOR)
106 #define	CAT_FMA_MINOR	(EXT_UINT32 | EXC_DEFAULT | EXD_FMA_MINOR)
107 #define	CAT_FMA_INODE	(EXT_UINT64 | EXC_DEFAULT | EXD_FMA_INODE)
108 #define	CAT_FMA_OFFSET	(EXT_UINT64 | EXC_DEFAULT | EXD_FMA_OFFSET)
109 
110 static ssize_t
111 fmd_log_write(fmd_log_t *lp, const void *buf, size_t n)
112 {
113 	ssize_t resid = n;
114 	ssize_t len;
115 
116 	ASSERT(MUTEX_HELD(&lp->log_lock));
117 
118 	while (resid != 0) {
119 		if ((len = write(lp->log_fd, buf, resid)) <= 0)
120 			break;
121 
122 		resid -= len;
123 		buf = (char *)buf + len;
124 	}
125 
126 	if (resid == n && n != 0)
127 		return (-1);
128 
129 	return (n - resid);
130 }
131 
132 static int
133 fmd_log_write_hdr(fmd_log_t *lp, const char *tag)
134 {
135 	ea_object_t hdr, toc, i0, i1, i2, i3, i4, i5, i6;
136 	const char *osrel, *osver, *plat;
137 	off64_t off = 0;
138 	int err = 0;
139 	uuid_t uuid;
140 
141 	(void) fmd_conf_getprop(fmd.d_conf, "osrelease", &osrel);
142 	(void) fmd_conf_getprop(fmd.d_conf, "osversion", &osver);
143 	(void) fmd_conf_getprop(fmd.d_conf, "platform", &plat);
144 	(void) fmd_conf_getprop(fmd.d_conf, "uuidlen", &lp->log_uuidlen);
145 
146 	lp->log_uuid = fmd_zalloc(lp->log_uuidlen + 1, FMD_SLEEP);
147 	uuid_generate(uuid);
148 	uuid_unparse(uuid, lp->log_uuid);
149 
150 	err |= ea_set_group(&hdr, CAT_FMA_GROUP);
151 	err |= ea_set_group(&toc, CAT_FMA_GROUP);
152 
153 	err |= ea_set_item(&i0, CAT_FMA_LABEL, tag, 0);
154 	err |= ea_set_item(&i1, CAT_FMA_VERSION, fmd.d_version, 0);
155 	err |= ea_set_item(&i2, CAT_FMA_OSREL, osrel, 0);
156 	err |= ea_set_item(&i3, CAT_FMA_OSVER, osver, 0);
157 	err |= ea_set_item(&i4, CAT_FMA_PLAT, plat, 0);
158 	err |= ea_set_item(&i5, CAT_FMA_UUID, lp->log_uuid, 0);
159 	err |= ea_set_item(&i6, CAT_FMA_OFFSET, &off, 0);
160 
161 	(void) ea_attach_to_group(&hdr, &i0);
162 	(void) ea_attach_to_group(&hdr, &i1);
163 	(void) ea_attach_to_group(&hdr, &i2);
164 	(void) ea_attach_to_group(&hdr, &i3);
165 	(void) ea_attach_to_group(&hdr, &i4);
166 	(void) ea_attach_to_group(&hdr, &i5);
167 	(void) ea_attach_to_group(&toc, &i6);
168 
169 	if (err == 0) {
170 		size_t hdr_size = ea_pack_object(&hdr, NULL, 0);
171 		size_t toc_size = ea_pack_object(&toc, NULL, 0);
172 
173 		size_t size = hdr_size + toc_size;
174 		void *buf = fmd_alloc(size, FMD_SLEEP);
175 
176 		(void) ea_pack_object(&hdr, buf, hdr_size);
177 		(void) ea_pack_object(&toc, (char *)buf + hdr_size, toc_size);
178 
179 		if ((lp->log_off = lseek64(lp->log_fd, 0, SEEK_END)) == -1L)
180 			fmd_panic("failed to seek log %s", lp->log_name);
181 
182 		if (fmd_log_write(lp, buf, size) != size)
183 			err = errno; /* save errno for fmd_set_errno() below */
184 
185 		fmd_free(buf, size);
186 
187 		lp->log_toc = lp->log_off + hdr_size;
188 		lp->log_beg = lp->log_off + hdr_size + toc_size;
189 		lp->log_off = lp->log_off + hdr_size + toc_size;
190 
191 		if (lp->log_off != lseek64(lp->log_fd, 0, SEEK_END))
192 			fmd_panic("eof off != log_off 0x%llx\n", lp->log_off);
193 	} else
194 		err = EFMD_LOG_EXACCT;
195 
196 	(void) ea_free_item(&i0, EUP_ALLOC);
197 	(void) ea_free_item(&i1, EUP_ALLOC);
198 	(void) ea_free_item(&i2, EUP_ALLOC);
199 	(void) ea_free_item(&i3, EUP_ALLOC);
200 	(void) ea_free_item(&i4, EUP_ALLOC);
201 	(void) ea_free_item(&i5, EUP_ALLOC);
202 	(void) ea_free_item(&i6, EUP_ALLOC);
203 
204 	return (err ? fmd_set_errno(err) : 0);
205 }
206 
207 static int
208 fmd_log_check_err(fmd_log_t *lp, int err, const char *msg)
209 {
210 	int eaerr = ea_error();
211 	char buf[BUFSIZ];
212 
213 	(void) snprintf(buf, sizeof (buf), "%s: %s: %s\n",
214 	    lp->log_name, msg, eaerr != EXR_OK ?
215 	    fmd_ea_strerror(eaerr) : "catalog tag mismatch");
216 
217 	fmd_error(err, buf);
218 	return (fmd_set_errno(err));
219 }
220 
221 static int
222 fmd_log_check_hdr(fmd_log_t *lp, const char *tag)
223 {
224 	int got_version = 0, got_label = 0;
225 	ea_object_t *grp, *obj;
226 	off64_t hdr_off, hdr_size;
227 	int dvers, fvers;
228 	const char *p;
229 
230 	ea_clear(&lp->log_ea); /* resync exacct file */
231 
232 	if ((hdr_off = lseek64(lp->log_fd, 0, SEEK_CUR)) == -1L)
233 		fmd_panic("failed to seek log %s", lp->log_name);
234 
235 	/*
236 	 * Read the first group of log meta-data: the write-once read-only
237 	 * file header.  We read all records in this group, ignoring all but
238 	 * the VERSION and LABEL, which are required and must be verified.
239 	 */
240 	if ((grp = ea_get_object_tree(&lp->log_ea, 1)) == NULL ||
241 	    grp->eo_catalog != CAT_FMA_GROUP) {
242 		ea_free_object(grp, EUP_ALLOC);
243 		return (fmd_log_check_err(lp, EFMD_LOG_INVAL,
244 		    "invalid fma hdr record group"));
245 	}
246 
247 	for (obj = grp->eo_group.eg_objs; obj != NULL; obj = obj->eo_next) {
248 		switch (obj->eo_catalog) {
249 		case CAT_FMA_VERSION:
250 			for (dvers = 0, p = fmd.d_version;
251 			    *p != '\0'; p++) {
252 				if (isdigit(*p))
253 					dvers = dvers * 10 + (*p - '0');
254 				else
255 					break;
256 			}
257 
258 			for (fvers = 0, p = obj->eo_item.ei_string;
259 			    *p != '\0'; p++) {
260 				if (isdigit(*p))
261 					fvers = fvers * 10 + (*p - '0');
262 				else
263 					break;
264 			}
265 
266 			if (fvers > dvers) {
267 				fmd_error(EFMD_LOG_INVAL, "%s: log version "
268 				    "%s is not supported by this daemon\n",
269 				    lp->log_name, obj->eo_item.ei_string);
270 				ea_free_object(grp, EUP_ALLOC);
271 				return (fmd_set_errno(EFMD_LOG_VERSION));
272 			}
273 
274 			got_version++;
275 			break;
276 
277 		case CAT_FMA_LABEL:
278 			if (strcmp(obj->eo_item.ei_string, tag) != 0) {
279 				fmd_error(EFMD_LOG_INVAL, "%s: log tag '%s' "
280 				    "does not matched expected tag '%s'\n",
281 				    lp->log_name, obj->eo_item.ei_string, tag);
282 				ea_free_object(grp, EUP_ALLOC);
283 				return (fmd_set_errno(EFMD_LOG_INVAL));
284 			}
285 			got_label++;
286 			break;
287 		case CAT_FMA_UUID:
288 			lp->log_uuid = fmd_strdup(obj->eo_item.ei_string,
289 			    FMD_SLEEP);
290 			lp->log_uuidlen = strlen(lp->log_uuid);
291 			break;
292 		}
293 	}
294 
295 	hdr_size = ea_pack_object(grp, NULL, 0);
296 	ea_free_object(grp, EUP_ALLOC);
297 
298 	if (!got_version || !got_label) {
299 		fmd_error(EFMD_LOG_INVAL, "%s: fmd hdr record group did not "
300 		    "include mandatory version and/or label\n", lp->log_name);
301 		return (fmd_set_errno(EFMD_LOG_INVAL));
302 	}
303 
304 	/*
305 	 * Read the second group of log meta-data: the table of contents.  We
306 	 * expect this group to contain an OFFSET object indicating the current
307 	 * value of log_skip.  We save this in our fmd_log_t and then return.
308 	 */
309 	if ((grp = ea_get_object_tree(&lp->log_ea, 1)) == NULL ||
310 	    grp->eo_catalog != CAT_FMA_GROUP || grp->eo_group.eg_nobjs < 1 ||
311 	    grp->eo_group.eg_objs->eo_catalog != CAT_FMA_OFFSET) {
312 		ea_free_object(grp, EUP_ALLOC);
313 		return (fmd_log_check_err(lp, EFMD_LOG_INVAL,
314 		    "invalid fma toc record group"));
315 	}
316 
317 	lp->log_toc = hdr_off + hdr_size;
318 	lp->log_beg = hdr_off + hdr_size + ea_pack_object(grp, NULL, 0);
319 	lp->log_off = lseek64(lp->log_fd, 0, SEEK_END);
320 	lp->log_skip = grp->eo_group.eg_objs->eo_item.ei_uint64;
321 
322 	if (lp->log_skip > lp->log_off) {
323 		fmd_error(EFMD_LOG_INVAL, "%s: skip %llx exceeds file size; "
324 		    "resetting to zero\n", lp->log_name, lp->log_skip);
325 		lp->log_skip = 0;
326 	}
327 
328 	ea_free_object(grp, EUP_ALLOC);
329 	return (0);
330 }
331 
332 static int
333 fmd_log_open_exacct(fmd_log_t *lp, int aflags, int oflags)
334 {
335 	int fd = dup(lp->log_fd);
336 	const char *creator;
337 
338 	(void) fmd_conf_getprop(fmd.d_conf, "log.creator", &creator);
339 
340 	if (ea_fdopen(&lp->log_ea, fd, creator, aflags, oflags) != 0) {
341 		fmd_error(EFMD_LOG_EXACCT, "%s: failed to open log file: %s\n",
342 		    lp->log_name, fmd_ea_strerror(ea_error()));
343 		(void) close(fd);
344 		return (fmd_set_errno(EFMD_LOG_EXACCT));
345 	}
346 
347 	lp->log_flags |= FMD_LF_EAOPEN;
348 	return (0);
349 }
350 
351 static fmd_log_t *
352 fmd_log_xopen(const char *root, const char *name, const char *tag, int oflags)
353 {
354 	fmd_log_t *lp = fmd_zalloc(sizeof (fmd_log_t), FMD_SLEEP);
355 
356 	char buf[PATH_MAX];
357 	size_t len;
358 	int err;
359 
360 	(void) pthread_mutex_init(&lp->log_lock, NULL);
361 	(void) pthread_cond_init(&lp->log_cv, NULL);
362 	(void) pthread_mutex_lock(&lp->log_lock);
363 
364 	len = strlen(root) + strlen(name) + 2; /* for "/" and "\0" */
365 	lp->log_name = fmd_alloc(len, FMD_SLEEP);
366 	(void) snprintf(lp->log_name, len, "%s/%s", root, name);
367 	lp->log_tag = fmd_strdup(tag, FMD_SLEEP);
368 	(void) fmd_conf_getprop(fmd.d_conf, "log.minfree", &lp->log_minfree);
369 
370 	if (strcmp(lp->log_tag, FMD_LOG_ERROR) == 0)
371 		lp->log_flags |= FMD_LF_REPLAY;
372 
373 top:
374 	if ((lp->log_fd = open64(lp->log_name, oflags, 0644)) == -1 ||
375 	    fstat64(lp->log_fd, &lp->log_stat) == -1) {
376 		fmd_error(EFMD_LOG_OPEN, "failed to open log %s", lp->log_name);
377 		fmd_log_close(lp);
378 		return (NULL);
379 	}
380 
381 	/*
382 	 * If our open() created the log file, use libexacct to write a header
383 	 * and position the file just after the header (EO_TAIL).  If the log
384 	 * file already existed, use libexacct to validate the header and again
385 	 * position the file just after the header (EO_HEAD).  Note that we lie
386 	 * to libexacct about 'oflags' in order to achieve the desired result.
387 	 */
388 	if (lp->log_stat.st_size == 0) {
389 		err = fmd_log_open_exacct(lp, EO_VALID_HDR | EO_TAIL,
390 		    O_CREAT | O_WRONLY) || fmd_log_write_hdr(lp, tag);
391 	} else {
392 		err = fmd_log_open_exacct(lp, EO_VALID_HDR | EO_HEAD,
393 		    O_RDONLY) || fmd_log_check_hdr(lp, tag);
394 	}
395 
396 	/*
397 	 * If ea_fdopen() failed and the log was pre-existing, attempt to move
398 	 * it aside and start a new one.  If we created the log but failed to
399 	 * initialize it, then we have no choice but to give up (e.g. EROFS).
400 	 */
401 	if (err) {
402 		fmd_error(EFMD_LOG_OPEN,
403 		    "failed to initialize log %s", lp->log_name);
404 
405 		if (lp->log_flags & FMD_LF_EAOPEN) {
406 			lp->log_flags &= ~FMD_LF_EAOPEN;
407 			(void) ea_close(&lp->log_ea);
408 		}
409 
410 		(void) close(lp->log_fd);
411 		lp->log_fd = -1;
412 
413 		if (lp->log_stat.st_size != 0 && snprintf(buf,
414 		    sizeof (buf), "%s-", lp->log_name) < PATH_MAX &&
415 		    rename(lp->log_name, buf) == 0) {
416 			TRACE((FMD_DBG_LOG, "mv %s to %s", lp->log_name, buf));
417 			if (oflags & O_CREAT)
418 				goto top;
419 		}
420 
421 		fmd_log_close(lp);
422 		return (NULL);
423 	}
424 
425 	lp->log_refs++;
426 	(void) pthread_mutex_unlock(&lp->log_lock);
427 
428 	return (lp);
429 }
430 
431 fmd_log_t *
432 fmd_log_tryopen(const char *root, const char *name, const char *tag)
433 {
434 	return (fmd_log_xopen(root, name, tag, O_RDWR | O_SYNC));
435 }
436 
437 fmd_log_t *
438 fmd_log_open(const char *root, const char *name, const char *tag)
439 {
440 	return (fmd_log_xopen(root, name, tag, O_RDWR | O_CREAT | O_SYNC));
441 }
442 
443 void
444 fmd_log_close(fmd_log_t *lp)
445 {
446 	ASSERT(MUTEX_HELD(&lp->log_lock));
447 	ASSERT(lp->log_refs == 0);
448 
449 	if ((lp->log_flags & FMD_LF_EAOPEN) && ea_close(&lp->log_ea) != 0) {
450 		fmd_error(EFMD_LOG_CLOSE, "failed to close log %s: %s\n",
451 		    lp->log_name, fmd_ea_strerror(ea_error()));
452 	}
453 
454 	if (lp->log_fd >= 0 && close(lp->log_fd) != 0) {
455 		fmd_error(EFMD_LOG_CLOSE,
456 		    "failed to close log %s", lp->log_name);
457 	}
458 
459 	fmd_strfree(lp->log_name);
460 	fmd_strfree(lp->log_tag);
461 	if (lp->log_uuid != NULL)
462 		fmd_free(lp->log_uuid, lp->log_uuidlen + 1);
463 
464 	fmd_free(lp, sizeof (fmd_log_t));
465 }
466 
467 void
468 fmd_log_hold_pending(fmd_log_t *lp)
469 {
470 	(void) pthread_mutex_lock(&lp->log_lock);
471 
472 	lp->log_refs++;
473 	ASSERT(lp->log_refs != 0);
474 
475 	if (lp->log_flags & FMD_LF_REPLAY) {
476 		lp->log_pending++;
477 		ASSERT(lp->log_pending != 0);
478 	}
479 
480 	(void) pthread_mutex_unlock(&lp->log_lock);
481 }
482 
483 void
484 fmd_log_hold(fmd_log_t *lp)
485 {
486 	(void) pthread_mutex_lock(&lp->log_lock);
487 	lp->log_refs++;
488 	ASSERT(lp->log_refs != 0);
489 	(void) pthread_mutex_unlock(&lp->log_lock);
490 }
491 
492 void
493 fmd_log_rele(fmd_log_t *lp)
494 {
495 	(void) pthread_mutex_lock(&lp->log_lock);
496 	ASSERT(lp->log_refs != 0);
497 
498 	if (--lp->log_refs == 0)
499 		fmd_log_close(lp);
500 	else
501 		(void) pthread_mutex_unlock(&lp->log_lock);
502 }
503 
504 void
505 fmd_log_append(fmd_log_t *lp, fmd_event_t *e, fmd_case_t *cp)
506 {
507 	fmd_event_impl_t *ep = (fmd_event_impl_t *)e;
508 	fmd_case_impl_t *cip = (fmd_case_impl_t *)cp;
509 	int err = 0;
510 
511 	ea_object_t grp0, grp1, i0, i1, i2, *items;
512 	ea_object_t **fe = NULL;
513 	size_t nvsize, easize, itsize, frsize;
514 	char *nvbuf, *eabuf;
515 	statvfs64_t stv;
516 
517 	(void) pthread_mutex_lock(&ep->ev_lock);
518 
519 	ASSERT(ep->ev_flags & FMD_EVF_VOLATILE);
520 	ASSERT(ep->ev_log == NULL);
521 
522 	(void) nvlist_size(ep->ev_nvl, &nvsize, NV_ENCODE_XDR);
523 	nvbuf = fmd_alloc(nvsize, FMD_SLEEP);
524 	(void) nvlist_pack(ep->ev_nvl, &nvbuf, &nvsize, NV_ENCODE_XDR, 0);
525 
526 	if (lp->log_flags & FMD_LF_REPLAY)
527 		err |= ea_set_group(&grp0, CAT_FMA_RGROUP);
528 	else
529 		err |= ea_set_group(&grp0, CAT_FMA_GROUP);
530 
531 	err |= ea_set_item(&i0, CAT_FMA_TODSEC, &ep->ev_time.ftv_sec, 0);
532 	err |= ea_set_item(&i1, CAT_FMA_TODNSEC, &ep->ev_time.ftv_nsec, 0);
533 	err |= ea_set_item(&i2, CAT_FMA_NVLIST, nvbuf, nvsize);
534 
535 	if (err != 0) {
536 		(void) pthread_mutex_unlock(&ep->ev_lock);
537 		err = EFMD_LOG_EXACCT;
538 		goto exerr;
539 	}
540 
541 	(void) ea_attach_to_group(&grp0, &i0);
542 	(void) ea_attach_to_group(&grp0, &i1);
543 	(void) ea_attach_to_group(&grp0, &i2);
544 
545 	/*
546 	 * If this event has a case associated with it (i.e. it is a list),
547 	 * then allocate a block of ea_object_t's and fill in a group for
548 	 * each event saved in the case's item list.  For each such group,
549 	 * we attach it to grp1, which in turn will be attached to grp0.
550 	 */
551 	if (cp != NULL) {
552 		ea_object_t *egrp, *ip, **fp;
553 		fmd_event_impl_t *eip;
554 		fmd_case_item_t *cit;
555 
556 		(void) ea_set_group(&grp1, CAT_FMA_GROUP);
557 		frsize = sizeof (ea_object_t *) * cip->ci_nitems;
558 		itsize = sizeof (ea_object_t) * cip->ci_nitems * 5;
559 		items = ip = fmd_alloc(itsize, FMD_SLEEP);
560 
561 		for (cit = cip->ci_items; cit != NULL; cit = cit->cit_next) {
562 			major_t maj;
563 			minor_t min;
564 
565 			eip = (fmd_event_impl_t *)cit->cit_event;
566 
567 			if (eip->ev_log == NULL)
568 				continue; /* event was never logged */
569 
570 			maj = major(eip->ev_log->log_stat.st_dev);
571 			min = minor(eip->ev_log->log_stat.st_dev);
572 
573 			(void) ea_set_group(ip, CAT_FMA_GROUP);
574 			egrp = ip++; /* first obj is group */
575 
576 			/*
577 			 * If the event log file is in legacy format,
578 			 * then write the xref to the file in the legacy
579 			 * maj/min/inode method else write it using the
580 			 * file uuid.
581 			 */
582 			if (eip->ev_log->log_uuid == NULL) {
583 				(void) ea_set_item(ip, CAT_FMA_MAJOR, &maj, 0);
584 				(void) ea_attach_to_group(egrp, ip++);
585 				(void) ea_set_item(ip, CAT_FMA_MINOR, &min, 0);
586 				(void) ea_attach_to_group(egrp, ip++);
587 				(void) ea_set_item(ip, CAT_FMA_INODE,
588 				    &eip->ev_log->log_stat.st_ino, 0);
589 				(void) ea_attach_to_group(egrp, ip++);
590 			} else {
591 				if (ea_set_item(ip, CAT_FMA_UUID,
592 				    eip->ev_log->log_uuid, 0) == -1) {
593 					err = EFMD_LOG_EXACCT;
594 					goto exerrcp;
595 				}
596 				if (fe == NULL)
597 					fe = fp = fmd_zalloc(frsize, FMD_SLEEP);
598 				*fp++ = ip;
599 				(void) ea_attach_to_group(egrp, ip++);
600 			}
601 			(void) ea_set_item(ip, CAT_FMA_OFFSET, &eip->ev_off, 0);
602 			(void) ea_attach_to_group(egrp, ip++);
603 			(void) ea_attach_to_group(&grp1, egrp);
604 		}
605 		(void) ea_attach_to_group(&grp0, &grp1);
606 	}
607 
608 	easize = ea_pack_object(&grp0, NULL, 0);
609 	eabuf = fmd_alloc(easize, FMD_SLEEP);
610 	(void) ea_pack_object(&grp0, eabuf, easize);
611 
612 	/*
613 	 * Before writing the record, check to see if this would cause the free
614 	 * space in the filesystem to drop below our minfree threshold.  If so,
615 	 * don't bother attempting the write and instead pretend it failed.  As
616 	 * fmd(1M) runs as root, it will be able to access the space "reserved"
617 	 * for root, and therefore can run the system of out of disk space in a
618 	 * heavy error load situation, violating the basic design principle of
619 	 * fmd(1M) that we don't want to make a bad situation even worse.
620 	 */
621 	(void) pthread_mutex_lock(&lp->log_lock);
622 
623 	if (lp->log_minfree != 0 && fstatvfs64(lp->log_fd, &stv) == 0 &&
624 	    stv.f_bavail * stv.f_frsize < lp->log_minfree + easize) {
625 
626 		TRACE((FMD_DBG_LOG, "append %s crosses minfree", lp->log_tag));
627 		err = EFMD_LOG_MINFREE;
628 
629 	} else if (fmd_log_write(lp, eabuf, easize) == easize) {
630 		TRACE((FMD_DBG_LOG, "append %s %p off=0x%llx",
631 		    lp->log_tag, (void *)ep, (u_longlong_t)lp->log_off));
632 
633 		ep->ev_flags &= ~FMD_EVF_VOLATILE;
634 		ep->ev_log = lp;
635 		ep->ev_off = lp->log_off;
636 		ep->ev_len = easize;
637 
638 		if (lp->log_flags & FMD_LF_REPLAY) {
639 			lp->log_pending++;
640 			ASSERT(lp->log_pending != 0);
641 		}
642 
643 		lp->log_refs++;
644 		ASSERT(lp->log_refs != 0);
645 		lp->log_off += easize;
646 	} else {
647 		err = errno; /* save errno for fmd_error() call below */
648 
649 		/*
650 		 * If we can't write append the record, seek the file back to
651 		 * the original location and truncate it there in order to make
652 		 * sure the file is always in a sane state w.r.t. libexacct.
653 		 */
654 		(void) lseek64(lp->log_fd, lp->log_off, SEEK_SET);
655 		(void) ftruncate64(lp->log_fd, lp->log_off);
656 	}
657 
658 	(void) pthread_mutex_unlock(&lp->log_lock);
659 	(void) pthread_mutex_unlock(&ep->ev_lock);
660 
661 	fmd_free(eabuf, easize);
662 
663 exerrcp:
664 	if (cp != NULL) {
665 		if (fe != NULL) {
666 			ea_object_t **fp = fe;
667 			int i = 0;
668 
669 			for (; *fp != NULL && i < cip->ci_nitems; i++)
670 				(void) ea_free_item(*fp++, EUP_ALLOC);
671 			fmd_free(fe, frsize);
672 		}
673 
674 		fmd_free(items, itsize);
675 	}
676 
677 exerr:
678 	fmd_free(nvbuf, nvsize);
679 
680 	(void) ea_free_item(&i0, EUP_ALLOC);
681 	(void) ea_free_item(&i1, EUP_ALLOC);
682 	(void) ea_free_item(&i2, EUP_ALLOC);
683 
684 	/*
685 	 * Keep track of out-of-space errors using global statistics.  As we're
686 	 * out of disk space, it's unlikely the EFMD_LOG_APPEND will be logged.
687 	 */
688 	if (err == ENOSPC || err == EFMD_LOG_MINFREE) {
689 		fmd_stat_t *sp;
690 
691 		if (lp == fmd.d_errlog)
692 			sp = &fmd.d_stats->ds_err_enospc;
693 		else if (lp == fmd.d_fltlog)
694 			sp = &fmd.d_stats->ds_flt_enospc;
695 		else
696 			sp = &fmd.d_stats->ds_oth_enospc;
697 
698 		(void) pthread_mutex_lock(&fmd.d_stats_lock);
699 		sp->fmds_value.ui64++;
700 		(void) pthread_mutex_unlock(&fmd.d_stats_lock);
701 	}
702 
703 	if (err != 0) {
704 		fmd_error(EFMD_LOG_APPEND, "failed to log_append %s %p: %s\n",
705 		    lp->log_tag, (void *)ep, fmd_strerror(err));
706 	}
707 }
708 
709 /*
710  * Commit an event to the log permanently, indicating that it should not be
711  * replayed on restart.  This is done by overwriting the event group's catalog
712  * code with EXD_GROUP_FMA (from EXD_GROUP_RFMA used in fmd_log_append()).  We
713  * use pwrite64() to update the existing word directly, using somewhat guilty
714  * knowledge that exacct stores the 32-bit catalog word first for each object.
715  * Since we are overwriting an existing log location using pwrite64() and hold
716  * the event lock, we do not need to hold the log_lock during the i/o.
717  */
718 void
719 fmd_log_commit(fmd_log_t *lp, fmd_event_t *e)
720 {
721 	fmd_event_impl_t *ep = (fmd_event_impl_t *)e;
722 	ea_catalog_t c;
723 	int err = 0;
724 
725 	if (!(lp->log_flags & FMD_LF_REPLAY))
726 		return; /* log does not require replay tagging */
727 
728 	ASSERT(MUTEX_HELD(&ep->ev_lock));
729 	ASSERT(ep->ev_log == lp && ep->ev_off != 0);
730 
731 	c = CAT_FMA_GROUP;
732 	exacct_order32(&c);
733 
734 	if (pwrite64(lp->log_fd, &c, sizeof (c), ep->ev_off) == sizeof (c)) {
735 		TRACE((FMD_DBG_LOG, "commit %s %p", lp->log_tag, (void *)ep));
736 		ep->ev_flags &= ~FMD_EVF_REPLAY;
737 
738 		/*
739 		 * If we have committed the event, check to see if the TOC skip
740 		 * offset needs to be updated, and decrement the pending count.
741 		 */
742 		(void) pthread_mutex_lock(&lp->log_lock);
743 
744 		if (lp->log_skip == ep->ev_off) {
745 			lp->log_flags |= FMD_LF_DIRTY;
746 			lp->log_skip += ep->ev_len;
747 		}
748 
749 		ASSERT(lp->log_pending != 0);
750 		lp->log_pending--;
751 
752 		(void) pthread_mutex_unlock(&lp->log_lock);
753 		(void) pthread_cond_broadcast(&lp->log_cv);
754 
755 	} else {
756 		fmd_error(EFMD_LOG_COMMIT, "failed to log_commit %s %p: %s\n",
757 		    lp->log_tag, (void *)ep, fmd_strerror(err));
758 	}
759 }
760 
761 /*
762  * If we need to destroy an event and it wasn't able to be committed, we permit
763  * the owner to decommit from ever trying again.  This operation decrements the
764  * pending count on the log and broadcasts to anyone waiting on log_cv.
765  */
766 void
767 fmd_log_decommit(fmd_log_t *lp, fmd_event_t *e)
768 {
769 	fmd_event_impl_t *ep = (fmd_event_impl_t *)e;
770 
771 	if (!(lp->log_flags & FMD_LF_REPLAY))
772 		return; /* log does not require replay tagging */
773 
774 	ASSERT(MUTEX_HELD(&ep->ev_lock));
775 	ASSERT(ep->ev_log == lp);
776 
777 	(void) pthread_mutex_lock(&lp->log_lock);
778 
779 	TRACE((FMD_DBG_LOG, "decommit %s %p", lp->log_tag, (void *)ep));
780 	ep->ev_flags &= ~FMD_EVF_REPLAY;
781 
782 	ASSERT(lp->log_pending != 0);
783 	lp->log_pending--;
784 
785 	(void) pthread_mutex_unlock(&lp->log_lock);
786 	(void) pthread_cond_broadcast(&lp->log_cv);
787 }
788 
789 static fmd_event_t *
790 fmd_log_unpack(fmd_log_t *lp, ea_object_t *grp, off64_t off)
791 {
792 	fmd_timeval_t ftv = { -1ULL, -1ULL };
793 	nvlist_t *nvl = NULL;
794 
795 	ea_object_t *obj;
796 	char *class;
797 	int err;
798 
799 	for (obj = grp->eo_group.eg_objs; obj != NULL; obj = obj->eo_next) {
800 		switch (obj->eo_catalog) {
801 		case CAT_FMA_NVLIST:
802 			if ((err = nvlist_xunpack(obj->eo_item.ei_raw,
803 			    obj->eo_item.ei_size, &nvl, &fmd.d_nva)) != 0) {
804 				fmd_error(EFMD_LOG_UNPACK, "failed to unpack "
805 				    "log nvpair: %s\n", fmd_strerror(err));
806 				return (NULL);
807 			}
808 			break;
809 
810 		case CAT_FMA_TODSEC:
811 			ftv.ftv_sec = obj->eo_item.ei_uint64;
812 			break;
813 
814 		case CAT_FMA_TODNSEC:
815 			ftv.ftv_nsec = obj->eo_item.ei_uint64;
816 			break;
817 		}
818 	}
819 
820 	if (nvl == NULL || ftv.ftv_sec == -1ULL || ftv.ftv_nsec == -1ULL) {
821 		fmd_error(EFMD_LOG_UNPACK, "failed to unpack log event: "
822 		    "required object(s) missing from record group\n");
823 		nvlist_free(nvl);
824 		return (NULL);
825 	}
826 
827 	if (nvlist_lookup_string(nvl, FM_CLASS, &class) != 0) {
828 		fmd_error(EFMD_LOG_UNPACK, "failed to unpack log event: "
829 		    "record is missing required '%s' nvpair\n", FM_CLASS);
830 		nvlist_free(nvl);
831 		return (NULL);
832 	}
833 
834 	return (fmd_event_recreate(FMD_EVT_PROTOCOL,
835 	    &ftv, nvl, class, lp, off, ea_pack_object(grp, NULL, 0)));
836 }
837 
838 /*
839  * Replay event(s) from the specified log by invoking the specified callback
840  * function 'func' for each event.  If the log has the FMD_LF_REPLAY flag set,
841  * we replay all events after log_skip that have the FMA_RGROUP group tag.
842  * This mode is used for the error telemetry log.  If the log does not have
843  * this flag set (used for ASRU logs), only the most recent event is replayed.
844  */
845 void
846 fmd_log_replay(fmd_log_t *lp, fmd_log_f *func, void *data)
847 {
848 	ea_object_t obj, *grp;
849 	ea_object_type_t type;
850 	ea_catalog_t c;
851 	fmd_event_t *ep;
852 	off64_t off, skp;
853 	uint_t n = 0;
854 
855 	(void) pthread_mutex_lock(&lp->log_lock);
856 
857 	if (lp->log_stat.st_size == 0 && (lp->log_flags & FMD_LF_REPLAY)) {
858 		(void) pthread_mutex_unlock(&lp->log_lock);
859 		return; /* we just created this log: never replay events */
860 	}
861 
862 	while (lp->log_flags & FMD_LF_BUSY)
863 		(void) pthread_cond_wait(&lp->log_cv, &lp->log_lock);
864 
865 	if (lp->log_off == lp->log_beg) {
866 		(void) pthread_mutex_unlock(&lp->log_lock);
867 		return; /* no records appended yet */
868 	}
869 
870 	lp->log_flags |= FMD_LF_BUSY;
871 	skp = lp->log_skip;
872 	ea_clear(&lp->log_ea); /* resync exacct file */
873 
874 	/*
875 	 * If FMD_LF_REPLAY is set, begin our replay at either log_skip (if it
876 	 * is non-zero) or at log_beg.  Otherwise replay from the end (log_off)
877 	 */
878 	if (lp->log_flags & FMD_LF_REPLAY) {
879 		off = MAX(lp->log_beg, lp->log_skip);
880 		c = CAT_FMA_RGROUP;
881 	} else {
882 		off = lp->log_off;
883 		c = CAT_FMA_GROUP;
884 	}
885 
886 	if (lseek64(lp->log_fd, off, SEEK_SET) != off) {
887 		fmd_panic("failed to seek %s to 0x%llx\n",
888 		    lp->log_name, (u_longlong_t)off);
889 	}
890 
891 	/*
892 	 * If FMD_LF_REPLAY is not set, back up to the start of the previous
893 	 * object and make sure this object is an EO_GROUP; otherwise return.
894 	 */
895 	if (!(lp->log_flags & FMD_LF_REPLAY) &&
896 	    (type = ea_previous_object(&lp->log_ea, &obj)) != EO_GROUP) {
897 		fmd_error(EFMD_LOG_REPLAY, "last log object is of unexpected "
898 		    "type %d (log may be truncated or corrupt)\n", type);
899 		goto out;
900 	}
901 
902 	while ((grp = ea_get_object_tree(&lp->log_ea, 1)) != NULL) {
903 		if (!(lp->log_flags & FMD_LF_REPLAY))
904 			off -= ea_pack_object(grp, NULL, 0);
905 		else if (n == 0 && grp->eo_catalog == CAT_FMA_GROUP)
906 			skp = off; /* update skip */
907 
908 		/*
909 		 * We temporarily drop log_lock around the call to unpack the
910 		 * event, hold it, and perform the callback, because these
911 		 * operations may try to acquire log_lock to bump log_refs.
912 		 * We cannot lose control because the FMD_LF_BUSY flag is set.
913 		 */
914 		(void) pthread_mutex_unlock(&lp->log_lock);
915 
916 		if (grp->eo_catalog == c &&
917 		    (ep = fmd_log_unpack(lp, grp, off)) != NULL) {
918 
919 			TRACE((FMD_DBG_LOG, "replay %s %p off %llx",
920 			    lp->log_tag, (void *)ep, (u_longlong_t)off));
921 
922 			fmd_event_hold(ep);
923 			func(lp, ep, data);
924 			fmd_event_rele(ep);
925 			n++;
926 		}
927 
928 		(void) pthread_mutex_lock(&lp->log_lock);
929 		off += ea_pack_object(grp, NULL, 0);
930 		ea_free_object(grp, EUP_ALLOC);
931 	}
932 
933 	if (ea_error() != EXR_EOF) {
934 		fmd_error(EFMD_LOG_REPLAY, "failed to replay %s event at "
935 		    "offset 0x%llx: %s\n", lp->log_name, (u_longlong_t)off,
936 		    fmd_ea_strerror(ea_error()));
937 	}
938 
939 	if (n == 0)
940 		skp = off; /* if no replays, move skip to where we ended up */
941 
942 out:
943 	if (lseek64(lp->log_fd, lp->log_off, SEEK_SET) != lp->log_off) {
944 		fmd_panic("failed to seek %s to 0x%llx\n",
945 		    lp->log_name, (u_longlong_t)lp->log_off);
946 	}
947 
948 	if (skp != lp->log_skip) {
949 		lp->log_flags |= FMD_LF_DIRTY;
950 		lp->log_skip = skp;
951 	}
952 
953 	lp->log_flags &= ~FMD_LF_BUSY;
954 	(void) pthread_mutex_unlock(&lp->log_lock);
955 	(void) pthread_cond_broadcast(&lp->log_cv);
956 }
957 
958 void
959 fmd_log_update(fmd_log_t *lp)
960 {
961 	ea_object_t toc, item;
962 	off64_t skip = 0;
963 	size_t size;
964 	void *buf;
965 
966 	(void) pthread_mutex_lock(&lp->log_lock);
967 
968 	if (lp->log_flags & FMD_LF_DIRTY) {
969 		lp->log_flags &= ~FMD_LF_DIRTY;
970 		skip = lp->log_skip;
971 	}
972 
973 	(void) pthread_mutex_unlock(&lp->log_lock);
974 
975 	/*
976 	 * If the skip needs to be updated, construct a TOC record group
977 	 * containing the skip offset and overwrite the TOC in-place.
978 	 */
979 	if (skip != 0 && ea_set_group(&toc, CAT_FMA_GROUP) == 0 &&
980 	    ea_set_item(&item, CAT_FMA_OFFSET, &skip, 0) == 0) {
981 
982 		(void) ea_attach_to_group(&toc, &item);
983 		size = ea_pack_object(&toc, NULL, 0);
984 		buf = fmd_alloc(size, FMD_SLEEP);
985 
986 		(void) ea_pack_object(&toc, buf, size);
987 		ASSERT(lp->log_toc + size == lp->log_beg);
988 
989 		if (pwrite64(lp->log_fd, buf, size, lp->log_toc) == size) {
990 			TRACE((FMD_DBG_LOG, "updated skip to %llx", skip));
991 		} else {
992 			fmd_error(EFMD_LOG_UPDATE,
993 			    "failed to log_update %s", lp->log_tag);
994 		}
995 
996 		fmd_free(buf, size);
997 		(void) ea_free_item(&item, EUP_ALLOC);
998 	}
999 }
1000 
1001 /*
1002  * Rotate the specified log by renaming its underlying file to a staging file
1003  * that can be handed off to logadm(1M) or an administrator script.  If the
1004  * rename succeeds, open a new log file using the old path and return it.
1005  * Note that we are relying our caller to use some higher-level mechanism to
1006  * ensure that fmd_log_rotate() cannot be called while other threads are
1007  * attempting fmd_log_append() using the same log (fmd's d_log_lock is used
1008  * for the global errlog and fltlog).
1009  */
1010 fmd_log_t *
1011 fmd_log_rotate(fmd_log_t *lp)
1012 {
1013 	char npath[PATH_MAX];
1014 	fmd_log_t *nlp;
1015 
1016 	(void) snprintf(npath, sizeof (npath), "%s.0-", lp->log_name);
1017 	(void) pthread_mutex_lock(&lp->log_lock);
1018 
1019 	/*
1020 	 * Check for any pending commits to drain before proceeding.  We can't
1021 	 * rotate the log out if commits are pending because if we die after
1022 	 * the log is moved aside, we won't be able to replay them on restart.
1023 	 */
1024 	if (lp->log_pending != 0) {
1025 		(void) pthread_mutex_unlock(&lp->log_lock);
1026 		(void) fmd_set_errno(EFMD_LOG_ROTBUSY);
1027 		return (NULL);
1028 	}
1029 
1030 	if (rename(lp->log_name, npath) != 0) {
1031 		(void) pthread_mutex_unlock(&lp->log_lock);
1032 		fmd_error(EFMD_LOG_ROTATE, "failed to rename %s", lp->log_name);
1033 		(void) fmd_set_errno(EFMD_LOG_ROTATE);
1034 		return (NULL);
1035 	}
1036 
1037 	if ((nlp = fmd_log_open("", lp->log_name, lp->log_tag)) == NULL) {
1038 		(void) rename(npath, lp->log_name);
1039 		(void) pthread_mutex_unlock(&lp->log_lock);
1040 		fmd_error(EFMD_LOG_ROTATE, "failed to reopen %s", lp->log_name);
1041 		(void) fmd_set_errno(EFMD_LOG_ROTATE);
1042 		return (NULL);
1043 	}
1044 
1045 	/*
1046 	 * If we've rotated the log, no pending events exist so we don't have
1047 	 * any more commits coming, and our caller should have arranged for
1048 	 * no more calls to append.  As such, we can close log_fd for good.
1049 	 */
1050 	if (lp->log_flags & FMD_LF_EAOPEN) {
1051 		(void) ea_close(&lp->log_ea);
1052 		lp->log_flags &= ~FMD_LF_EAOPEN;
1053 	}
1054 
1055 	(void) close(lp->log_fd);
1056 	lp->log_fd = -1;
1057 
1058 	(void) pthread_mutex_unlock(&lp->log_lock);
1059 	return (nlp);
1060 }
1061