xref: /illumos-gate/usr/src/uts/common/c2/audit_io.c (revision 0153d828c132fdb1a17c11b99386a3d1b87994cf)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2020 The University of Queensland
24  */
25 
26 /*
27  * Routines for writing audit records.
28  */
29 
30 #include <sys/door.h>
31 #include <sys/param.h>
32 #include <sys/time.h>
33 #include <sys/types.h>
34 #include <sys/statvfs.h>	/* for statfs */
35 #include <sys/vnode.h>
36 #include <sys/file.h>
37 #include <sys/vfs.h>
38 #include <sys/user.h>
39 #include <sys/uio.h>
40 #include <sys/reboot.h>
41 #include <sys/kmem.h>		/* for KM_SLEEP */
42 #include <sys/resource.h>	/* for RLIM_INFINITY */
43 #include <sys/cmn_err.h>	/* panic */
44 #include <sys/systm.h>
45 #include <sys/debug.h>
46 #include <sys/sysmacros.h>
47 #include <sys/syscall.h>
48 #include <sys/zone.h>
49 
50 #include <c2/audit.h>
51 #include <c2/audit_kernel.h>
52 #include <c2/audit_record.h>
53 #include <c2/audit_kevents.h>
54 #include <c2/audit_door_infc.h>
55 
56 static void	au_dequeue(au_kcontext_t *, au_buff_t *);
57 static void	audit_async_finish_backend(void *);
58 static int	audit_sync_block(au_kcontext_t *);
59 /*
60  * each of these two tables are indexed by the values AU_DBUF_COMPLETE
61  * through AU_DBUF_LAST; the content is the next state value.  The
62  * first table determines the next state for a buffer which is not the
63  * end of a record and the second table determines the state for a
64  * buffer which is the end of a record.  The initial state is
65  * AU_DBUF_COMPLETE.
66  */
67 static int state_if_part[] = {
68     AU_DBUF_FIRST, AU_DBUF_MIDDLE, AU_DBUF_MIDDLE, AU_DBUF_FIRST};
69 static int state_if_not_part[] = {
70     AU_DBUF_COMPLETE, AU_DBUF_LAST, AU_DBUF_LAST, AU_DBUF_COMPLETE};
71 /*
72  * Write to an audit descriptor.
73  * Add the au_membuf to the descriptor chain and free the chain passed in.
74  */
75 void
76 au_uwrite(token_t *m)
77 {
78 	au_write(&(u_ad), m);
79 }
80 
81 void
82 au_write(caddr_t *d, token_t *m)
83 {
84 	if (d == NULL) {
85 		au_toss_token(m);
86 		return;
87 	}
88 	if (m == (token_t *)0) {
89 		printf("au_write: null token\n");
90 		return;
91 	}
92 
93 	if (*d == NULL)
94 		*d = (caddr_t)m;
95 	else
96 		(void) au_append_rec((au_buff_t *)*d, m, AU_PACK);
97 }
98 
99 /*
100  * Close an audit descriptor.
101  * Use the second parameter to indicate if it should be written or not.
102  */
103 void
104 au_close(au_kcontext_t *kctx, caddr_t *d, int flag, au_event_t e_type,
105     au_emod_t e_mod, timestruc_t *e_time)
106 {
107 	token_t *dchain;	/* au_membuf chain which is the tokens */
108 	t_audit_data_t *tad = U2A(u);
109 
110 	ASSERT(tad != NULL);
111 	ASSERT(d != NULL);
112 	ASSERT(kctx != NULL);
113 
114 	if ((dchain = (token_t *)*d) == (token_t *)NULL)
115 		return;
116 
117 	*d = NULL;
118 
119 	/*
120 	 * If async then defer; or if requested, defer the closing/queueing to
121 	 * syscall end, unless no syscall is active or the syscall is _exit.
122 	 */
123 	if ((flag & AU_DONTBLOCK) || ((flag & AU_DEFER) &&
124 	    (tad->tad_scid != 0) && (tad->tad_scid != SYS_exit))) {
125 		au_close_defer(dchain, flag, e_type, e_mod, e_time);
126 		return;
127 	}
128 	au_close_time(kctx, dchain, flag, e_type, e_mod, e_time);
129 }
130 
131 /*
132  * Defer closing/queueing of an audit descriptor. For async events, queue
133  * via softcall. Otherwise, defer by queueing the record onto the tad; at
134  * syscall end time it will be pulled off.
135  */
136 void
137 au_close_defer(token_t *dchain, int flag, au_event_t e_type, au_emod_t e_mod,
138     timestruc_t *e_time)
139 {
140 	au_defer_info_t	*attr;
141 	t_audit_data_t *tad = U2A(u);
142 
143 	ASSERT(tad != NULL);
144 
145 	/* If not to be written, toss the record. */
146 	if ((flag & AU_OK) == 0) {
147 		au_toss_token(dchain);
148 		return;
149 	}
150 
151 	attr = kmem_alloc(sizeof (au_defer_info_t), KM_NOSLEEP);
152 	/* If no mem available, failing silently is the best recourse */
153 	if (attr == NULL) {
154 		au_toss_token(dchain);
155 		return;
156 	}
157 
158 	attr->audi_next = NULL;
159 	attr->audi_ad = dchain;
160 	attr->audi_e_type = e_type;
161 	attr->audi_e_mod = e_mod;
162 	attr->audi_flag = flag;
163 	if (e_time != NULL)
164 		attr->audi_atime = *e_time;
165 	else
166 		gethrestime(&attr->audi_atime);
167 
168 	/*
169 	 * All async events must be queued via softcall to avoid possible
170 	 * sleeping in high interrupt context. softcall will ensure it's
171 	 * done on a dedicated software-level interrupt thread.
172 	 */
173 	if (flag & AU_DONTBLOCK) {
174 		softcall(audit_async_finish_backend, attr);
175 		audit_async_done(NULL, 0);
176 		return;
177 	}
178 
179 	/*
180 	 * If not an async event, defer by queuing onto the tad until
181 	 * syscall end. No locking is needed because the tad is per-thread.
182 	 */
183 	if (tad->tad_defer_head)
184 		tad->tad_defer_tail->audi_next = attr;
185 	else
186 		tad->tad_defer_head = attr;
187 	tad->tad_defer_tail = attr;
188 }
189 
190 
191 /*
192  * Save the time in the event header. If time is not specified (i.e., pointer
193  * is NULL), use the current time.  This code is fairly ugly since it needs
194  * to support both 32- and 64-bit environments and can be called indirectly
195  * from both au_close() (for kernel audit) and from audit() (userland audit).
196  */
197 /*ARGSUSED*/
198 static void
199 au_save_time(adr_t *hadrp, timestruc_t *time, int size)
200 {
201 	struct {
202 		uint32_t sec;
203 		uint32_t usec;
204 	} tv;
205 	timestruc_t	now;
206 
207 	if (time == NULL) {
208 		gethrestime(&now);
209 		time = &now;
210 	}
211 
212 #ifdef _LP64
213 	if (size)
214 		adr_int64(hadrp, (int64_t *)time, 2);
215 	else
216 #endif
217 	{
218 		tv.sec = (uint32_t)time->tv_sec;
219 		tv.usec = (uint32_t)time->tv_nsec;
220 		adr_int32(hadrp, (int32_t *)&tv, 2);
221 	}
222 }
223 
224 
225 /*
226  * Close an audit descriptor.
227  * If time of event is specified, use it in the record, otherwise use the
228  * current time.
229  */
230 void
231 au_close_time(au_kcontext_t *kctx, token_t *dchain, int flag, au_event_t e_type,
232     au_emod_t e_mod, timestruc_t *etime)
233 {
234 	token_t		*record;	/* au_membuf chain == the record */
235 	int		byte_count;
236 	token_t		*m;		/* for potential sequence token */
237 	adr_t		hadr;		/* handle for header token */
238 	adr_t		sadr;		/* handle for sequence token */
239 	size_t		zone_length;	/* length of zonename token */
240 	uint32_t	auditing;
241 
242 	ASSERT(dchain != NULL);
243 
244 	/* If not to be written, toss the record */
245 	if ((flag & AU_OK) == 0) {
246 		au_toss_token(dchain);
247 		return;
248 	}
249 	/* if auditing not enabled, then don't generate an audit record */
250 	ASSERT(U2A(u) != NULL);
251 	ASSERT(kctx != NULL);
252 
253 	auditing = (U2A(u)->tad_audit == AUC_UNSET)
254 	    ? kctx->auk_auditstate
255 	    : U2A(u)->tad_audit;
256 
257 	if (auditing & ~(AUC_AUDITING | AUC_INIT_AUDIT)) {
258 		/*
259 		 * at system boot, neither is set yet we want to generate
260 		 * an audit record.
261 		 */
262 		if (e_type != AUE_SYSTEMBOOT) {
263 			au_toss_token(dchain);
264 			return;
265 		}
266 	}
267 
268 	/* Count up the bytes used in the record. */
269 	byte_count = au_token_size(dchain);
270 
271 	/*
272 	 * add in size of header token (always present).
273 	 */
274 	byte_count += sizeof (char) + sizeof (int32_t) +
275 	    sizeof (char) + 2 * sizeof (short) + sizeof (timestruc_t);
276 
277 	if (kctx->auk_hostaddr_valid)
278 		byte_count += sizeof (int32_t) +
279 		    kctx->auk_info.ai_termid.at_type;
280 
281 	/*
282 	 * add in size of zonename token (zero if !AUDIT_ZONENAME)
283 	 */
284 	if (kctx->auk_policy & AUDIT_ZONENAME) {
285 		zone_length = au_zonename_length(NULL);
286 		byte_count += zone_length;
287 	} else {
288 		zone_length = 0;
289 	}
290 	/* add in size of (optional) trailer token */
291 	if (kctx->auk_policy & AUDIT_TRAIL)
292 		byte_count += 7;
293 
294 	/* add in size of (optional) sequence token */
295 	if (kctx->auk_policy & AUDIT_SEQ)
296 		byte_count += 5;
297 
298 	/* build the header */
299 	if (kctx->auk_hostaddr_valid)
300 		record = au_to_header_ex(byte_count, e_type, e_mod);
301 	else
302 		record = au_to_header(byte_count, e_type, e_mod);
303 
304 	/*
305 	 * If timestamp was specified, save it in header now. Otherwise,
306 	 * save reference to header so we can update time/data later
307 	 * and artificially adjust pointer to the time/date field of header.
308 	 */
309 	adr_start(&hadr, memtod(record, char *));
310 	hadr.adr_now += sizeof (char) + sizeof (int32_t) +
311 	    sizeof (char) + 2 * sizeof (short);
312 	if (kctx->auk_hostaddr_valid)
313 		hadr.adr_now += sizeof (int32_t) +
314 		    kctx->auk_info.ai_termid.at_type;
315 	if (etime != NULL) {
316 		au_save_time(&hadr, etime, 1);
317 		hadr.adr_now = (char *)NULL;
318 	}
319 
320 	/* append body of audit record */
321 	(void) au_append_rec(record, dchain, AU_PACK);
322 
323 	/* add (optional) zonename token */
324 	if (zone_length > 0) {
325 		m = au_to_zonename(zone_length, NULL);
326 		(void) au_append_rec(record, m, AU_PACK);
327 	}
328 
329 	/* Add an (optional) sequence token. NULL offset if none */
330 	if (kctx->auk_policy & AUDIT_SEQ) {
331 		/* get the sequence token */
332 		m = au_to_seq();
333 
334 		/* link to audit record (i.e. don't pack the data) */
335 		(void) au_append_rec(record, m, AU_LINK);
336 
337 		/*
338 		 * advance to count field of sequence token by skipping
339 		 * the token type byte.
340 		 */
341 		adr_start(&sadr, memtod(m, char *));
342 		sadr.adr_now += 1;
343 	} else {
344 		sadr.adr_now = NULL;
345 	}
346 	/* add (optional) trailer token */
347 	if (kctx->auk_policy & AUDIT_TRAIL) {
348 		(void) au_append_rec(record, au_to_trailer(byte_count),
349 		    AU_PACK);
350 	}
351 
352 	/*
353 	 * 1 - use 64 bit version of audit tokens for 64 bit kernels.
354 	 * 0 - use 32 bit version of audit tokens for 32 bit kernels.
355 	 */
356 #ifdef _LP64
357 	au_enqueue(kctx, record, &hadr, &sadr, 1, flag & AU_DONTBLOCK);
358 #else
359 	au_enqueue(kctx, record, &hadr, &sadr, 0, flag & AU_DONTBLOCK);
360 #endif
361 	AS_INC(as_totalsize, byte_count, kctx);
362 }
363 
364 /*ARGSUSED*/
365 void
366 au_enqueue(au_kcontext_t *kctx, au_buff_t *m, adr_t *hadrp, adr_t *sadrp,
367     int size, int dontblock)
368 {
369 	if (kctx == NULL)
370 		return;
371 
372 	mutex_enter(&(kctx->auk_queue.lock));
373 
374 	if (!dontblock && (kctx->auk_queue.cnt >= kctx->auk_queue.hiwater) &&
375 	    audit_sync_block(kctx)) {
376 		mutex_exit(&(kctx->auk_queue.lock));
377 		au_free_rec(m);
378 		return;
379 	}
380 
381 	/* Fill in date and time if needed */
382 	if (hadrp->adr_now) {
383 		au_save_time(hadrp, NULL, size);
384 	}
385 
386 	/* address will be non-zero only if AUDIT_SEQ set */
387 	if (sadrp->adr_now) {
388 		kctx->auk_sequence++;
389 		adr_int32(sadrp, (int32_t *)&(kctx->auk_sequence), 1);
390 	}
391 
392 	if (kctx->auk_queue.head)
393 		kctx->auk_queue.tail->next_rec = m;
394 	else
395 		kctx->auk_queue.head = m;
396 
397 	kctx->auk_queue.tail = m;
398 
399 	if (++(kctx->auk_queue.cnt) >
400 	    kctx->auk_queue.lowater && kctx->auk_queue.rd_block)
401 		cv_broadcast(&(kctx->auk_queue.read_cv));
402 
403 	mutex_exit(&(kctx->auk_queue.lock));
404 
405 	/* count # audit records put onto kernel audit queue */
406 	AS_INC(as_enqueue, 1, kctx);
407 }
408 
409 /*
410  * Dequeue and free buffers upto and including "freeto"
411  * Keeps the queue lock long but acquires it only once when doing
412  * bulk dequeueing.
413  */
414 static void
415 au_dequeue(au_kcontext_t *kctx, au_buff_t *freeto)
416 {
417 	au_buff_t *m, *l, *lastl;
418 	int n = 0;
419 
420 	ASSERT(kctx != NULL);
421 
422 	mutex_enter(&(kctx->auk_queue.lock));
423 
424 	ASSERT(kctx->auk_queue.head != NULL);
425 	ASSERT(freeto != NULL);
426 
427 	l = m = kctx->auk_queue.head;
428 
429 	do {
430 		n++;
431 		lastl = l;
432 		l = l->next_rec;
433 	} while (l != NULL && freeto != lastl);
434 
435 	kctx->auk_queue.cnt -= n;
436 	lastl->next_rec = NULL;
437 	kctx->auk_queue.head = l;
438 
439 	/* Freeto must exist in the list */
440 	ASSERT(freeto == lastl);
441 
442 	if (kctx->auk_queue.cnt <= kctx->auk_queue.lowater &&
443 	    kctx->auk_queue.wt_block)
444 		cv_broadcast(&(kctx->auk_queue.write_cv));
445 
446 	mutex_exit(&(kctx->auk_queue.lock));
447 
448 	while (m) {
449 		l = m->next_rec;
450 		au_free_rec(m);
451 		m = l;
452 	}
453 	AS_INC(as_written, n, kctx);
454 }
455 
456 /*
457  * audit_sync_block()
458  * If we've reached the high water mark, we look at the policy to see
459  * if we sleep or we should drop the audit record.
460  * This function is called with the auk_queue.lock held and the check
461  * performed one time already as an optimization.  Caller should unlock.
462  * Returns 1 if the caller needs to free the record.
463  */
464 static int
465 audit_sync_block(au_kcontext_t *kctx)
466 {
467 	ASSERT(MUTEX_HELD(&(kctx->auk_queue.lock)));
468 	/*
469 	 * Loop while we are at the high watermark.
470 	 */
471 	do {
472 		if (((U2A(u)->tad_audit != AUC_UNSET)
473 		    ? (U2A(u)->tad_audit != AUC_AUDITING)
474 		    : (kctx->auk_auditstate != AUC_AUDITING)) ||
475 		    (kctx->auk_policy & AUDIT_CNT)) {
476 
477 			/* just count # of dropped audit records */
478 			AS_INC(as_dropped, 1, kctx);
479 
480 			return (1);
481 		}
482 
483 		/* kick reader awake if its asleep */
484 		if (kctx->auk_queue.rd_block &&
485 		    kctx->auk_queue.cnt > kctx->auk_queue.lowater)
486 			cv_broadcast(&(kctx->auk_queue.read_cv));
487 
488 		/* keep count of # times blocked */
489 		AS_INC(as_wblocked, 1, kctx);
490 
491 		/* sleep now, until woken by reader */
492 		kctx->auk_queue.wt_block++;
493 		cv_wait(&(kctx->auk_queue.write_cv), &(kctx->auk_queue.lock));
494 		kctx->auk_queue.wt_block--;
495 	} while (kctx->auk_queue.cnt >= kctx->auk_queue.hiwater);
496 
497 	return (0);
498 }
499 
500 /*
501  * audit_async_block()
502  * if we've reached the high water mark, we look at the ahlt policy to see
503  * if we reboot we should drop the audit record.
504  * Returns 1 if blocked.
505  */
506 static int
507 audit_async_block(au_kcontext_t *kctx, caddr_t *rpp)
508 {
509 	ASSERT(kctx != NULL);
510 
511 	mutex_enter(&(kctx->auk_queue.lock));
512 	/* see if we've reached high water mark */
513 	if (kctx->auk_queue.cnt >= kctx->auk_queue.hiwater) {
514 		mutex_exit(&(kctx->auk_queue.lock));
515 
516 		audit_async_drop(rpp, AU_BACKEND);
517 		return (1);
518 	}
519 	mutex_exit(&(kctx->auk_queue.lock));
520 	return (0);
521 }
522 
523 /*
524  * au_door_upcall.  auditdoor() may change vp without notice, so
525  * some locking seems in order.
526  *
527  */
528 #define	AGAIN_TICKS	10
529 
530 static int
531 au_door_upcall(au_kcontext_t *kctx, au_dbuf_t *aubuf)
532 {
533 	int		rc;
534 	door_arg_t	darg;
535 	int		retry = 1;
536 	int		ticks_to_wait;
537 
538 	darg.data_ptr = (char *)aubuf;
539 	darg.data_size = AU_DBUF_HEADER + aubuf->aub_size;
540 
541 	darg.desc_ptr = NULL;
542 	darg.desc_num = 0;
543 
544 	while (retry == 1) {
545 		/* non-zero means return results expected */
546 		darg.rbuf = (char *)aubuf;
547 		darg.rsize = darg.data_size;
548 
549 		retry = 0;
550 		mutex_enter(&(kctx->auk_svc_lock));
551 		/*
552 		 * Only holding auk_svc_lock prevents this from changing, so
553 		 * we need to double-check that the vp isn't NULL before we
554 		 * call door_upcall (which will blindly deref it).
555 		 */
556 		if (kctx->auk_current_vp == NULL) {
557 			mutex_exit(&(kctx->auk_svc_lock));
558 			return (-1);
559 		}
560 		rc = door_upcall(kctx->auk_current_vp, &darg, NULL,
561 		    SIZE_MAX, 0);
562 		if (rc != 0) {
563 			mutex_exit(&(kctx->auk_svc_lock));
564 			if (rc == EAGAIN)
565 				ticks_to_wait = AGAIN_TICKS;
566 			else
567 				return (rc);
568 
569 			mutex_enter(&(kctx->auk_eagain_mutex));
570 			(void) cv_reltimedwait(&(kctx->auk_eagain_cv),
571 			    &(kctx->auk_eagain_mutex), ticks_to_wait,
572 			    TR_CLOCK_TICK);
573 			mutex_exit(&(kctx->auk_eagain_mutex));
574 
575 			retry = 1;
576 		} else
577 			mutex_exit(&(kctx->auk_svc_lock));	/* no retry */
578 	}	/* end while (retry == 1) */
579 	if (darg.rbuf == NULL)
580 		return (-1);
581 
582 	/* return code from door server */
583 	return (*(int *)darg.rbuf);
584 }
585 
586 /*
587  * Write an audit control message to the door handle.  The message
588  * structure depends on message_code and at present the only control
589  * message defined is for a policy change.  These are infrequent,
590  * so no memory is held for control messages.
591  */
592 int
593 au_doormsg(au_kcontext_t *kctx, uint32_t message_code, void *message)
594 {
595 	int		rc;
596 	au_dbuf_t	*buf;
597 	size_t		alloc_size;
598 
599 	switch (message_code) {
600 	case AU_DBUF_POLICY:
601 		alloc_size = AU_DBUF_HEADER + sizeof (uint32_t);
602 		buf = kmem_alloc(alloc_size, KM_SLEEP);
603 		buf->aub_size = sizeof (uint32_t);
604 		*(uint32_t *)buf->aub_buf = *(uint32_t *)message;
605 		break;
606 	case AU_DBUF_SHUTDOWN:
607 		alloc_size = AU_DBUF_HEADER;
608 		buf = kmem_alloc(alloc_size, KM_SLEEP);
609 		buf->aub_size = 0;
610 		break;
611 	default:
612 		return (1);
613 	}
614 
615 	buf->aub_type = AU_DBUF_NOTIFY | message_code;
616 	rc = au_door_upcall(kctx, buf);
617 	kmem_free(buf, alloc_size);
618 
619 	return (rc);
620 }
621 
622 /*
623  * Write audit information to the door handle.  au_doorio is called with
624  * one or more complete audit records on the queue and outputs those
625  * records in buffers of up to auk_queue.buflen in size.
626  */
627 int
628 au_doorio(au_kcontext_t *kctx)
629 {
630 	off_t		off;	/* space used in buffer */
631 	ssize_t		used;	/* space used in au_membuf */
632 	token_t		*cAR;	/* current AR being processed */
633 	token_t		*cMB;	/* current au_membuf being processed */
634 	token_t		*sp;	/* last AR processed */
635 	char		*bp;	/* start of free space in staging buffer */
636 	unsigned char	*cp;	/* ptr to data to be moved */
637 	int		error = 0;  /* return from door upcall */
638 
639 	/*
640 	 * size (data left in au_membuf - space in buffer)
641 	 */
642 	ssize_t		sz;
643 	ssize_t		len;	/* len of data to move, size of AR */
644 	ssize_t		curr_sz = 0;	/* amount of data written during now */
645 	/*
646 	 * partial_state is AU_DBUF_COMPLETE...LAST; see audit_door_infc.h
647 	 */
648 	int		part    = 0;	/* partial audit record written */
649 	int		partial_state = AU_DBUF_COMPLETE;
650 	/*
651 	 * Has the write buffer changed length due to a auditctl(2)?
652 	 * Initial allocation is from audit_start.c/audit_init()
653 	 */
654 	if (kctx->auk_queue.bufsz != kctx->auk_queue.buflen) {
655 		size_t new_sz = kctx->auk_queue.bufsz;
656 
657 		kmem_free(kctx->auk_dbuffer, AU_DBUF_HEADER +
658 		    kctx->auk_queue.buflen);
659 
660 		kctx->auk_dbuffer = kmem_alloc(AU_DBUF_HEADER + new_sz,
661 		    KM_SLEEP);
662 
663 		/* omit the 64 bit header */
664 		kctx->auk_queue.buflen = new_sz;
665 	}
666 	if (!kctx->auk_queue.head)
667 		goto nodata;
668 
669 	sp   = NULL;	/* no record copied */
670 	off  = 0;	/* no space used in buffer */
671 	used = 0;	/* no data processed in au_membuf */
672 	cAR  = kctx->auk_queue.head;	/* start at head of queue */
673 	cMB  = cAR;	/* start with first au_membuf of record */
674 
675 	/* start at beginning of buffer */
676 	bp   = &(kctx->auk_dbuffer->aub_buf[0]);
677 
678 	while (cMB) {
679 		part = 1;	/* indicate audit record being processed */
680 
681 		cp  = memtod(cMB, unsigned char *); /* buffer ptr */
682 
683 		sz  = (ssize_t)cMB->len - used;	/* data left in au_membuf */
684 		/* len to move */
685 		len = (ssize_t)MIN(sz, kctx->auk_queue.buflen - off);
686 
687 		/* move the data */
688 		bcopy(cp + used, bp + off, len);
689 		used += len; /* update used au_membuf */
690 		off  += len; /* update offset into buffer */
691 
692 		if (used >= (ssize_t)cMB->len) {
693 			/* advance to next au_membuf */
694 			used = 0;
695 			cMB  = cMB->next_buf;
696 		}
697 		if (cMB == NULL) {
698 			/* advance to next audit record */
699 			sp   = cAR;
700 			cAR  = cAR->next_rec;
701 			cMB  = cAR;
702 			part = 0;	/* have a complete record */
703 		}
704 		error = 0;
705 		if ((kctx->auk_queue.buflen == off) || (part == 0)) {
706 			if (part)
707 				partial_state = state_if_part[partial_state];
708 			else
709 				partial_state =
710 				    state_if_not_part[partial_state];
711 
712 			kctx->auk_dbuffer->aub_type = partial_state;
713 			kctx->auk_dbuffer->aub_size = off;
714 			error = au_door_upcall(kctx, kctx->auk_dbuffer);
715 			if (error != 0)
716 				goto nodata;
717 			/*
718 			 * if we've successfully written an audit record,
719 			 * free records up to last full record copied
720 			 */
721 			if (sp)
722 				au_dequeue(kctx, sp);
723 
724 				/* Update size */
725 			curr_sz += off;
726 
727 				/* reset auk_dbuffer pointers */
728 			sp = NULL;
729 			off  = 0;
730 		}
731 	}	/* while(cMB) */
732 nodata:
733 	return (error);
734 }
735 
736 /*
737  * Clean up thread audit state to clear out asynchronous audit record
738  * generation error recovery processing. Note that this is done on a
739  * per-thread basis and thus does not need any locking.
740  */
741 void
742 audit_async_done(caddr_t *rpp, int flags)
743 {
744 	t_audit_data_t *tad = U2A(u);
745 
746 	/* clean up the tad unless called from softcall backend */
747 	if (!(flags & AU_BACKEND)) {
748 		ASSERT(tad != NULL);
749 		ASSERT(tad->tad_ctrl & TAD_ERRJMP);
750 
751 		tad->tad_ctrl &= ~TAD_ERRJMP;
752 		tad->tad_errjmp = NULL;
753 	}
754 
755 	/* clean out partial audit record */
756 	if ((rpp != NULL) && (*rpp != NULL)) {
757 		au_toss_token((au_buff_t *)*rpp);
758 		*rpp = NULL;
759 	}
760 }
761 
762 /*
763  * implement the audit policy for asynchronous events generated within
764  * the kernel.
765  * XXX might need locks around audit_policy check.
766  */
767 void
768 audit_async_drop(caddr_t *rpp, int flags)
769 {
770 	au_kcontext_t	*kctx;
771 
772 	/* could not generate audit record, clean up */
773 	audit_async_done((caddr_t *)rpp, flags);
774 
775 	kctx = GET_KCTX_GZ;
776 
777 	/* just drop the record and return */
778 	if (((audit_policy & AUDIT_AHLT) == 0) ||
779 	    (kctx->auk_auditstate == AUC_INIT_AUDIT)) {
780 		/* just count # of dropped audit records */
781 		AS_INC(as_dropped, 1, kctx);
782 		return;
783 	}
784 
785 	/*
786 	 * There can be a lot of data in the audit queue. We
787 	 * will first sync the file systems then attempt to
788 	 * shutdown the kernel so that a memory dump is
789 	 * performed.
790 	 */
791 	sync();
792 	sync();
793 
794 	/*
795 	 * now shut down. What a cruel world it has been
796 	 */
797 	panic("non-attributable halt. should dump core");
798 	/* No return */
799 }
800 
801 int
802 audit_async_start(label_t *jb, au_event_t event, int sorf)
803 {
804 	t_audit_data_t *tad = U2A(u);
805 	au_state_t estate;
806 	int success = 0, failure = 0;
807 	au_kcontext_t	*kctx = GET_KCTX_GZ;
808 
809 	/* if audit state off, then no audit record generation */
810 	if ((kctx->auk_auditstate != AUC_AUDITING) &&
811 	    (kctx->auk_auditstate != AUC_INIT_AUDIT))
812 		return (1);
813 
814 	/*
815 	 * preselect asynchronous event
816 	 * XXX should we check for out-of-range???
817 	 */
818 	estate = kctx->auk_ets[event];
819 
820 	if (sorf & AUM_SUCC)
821 		success = kctx->auk_info.ai_namask.as_success & estate;
822 	if (sorf & AUM_FAIL)
823 		failure = kctx->auk_info.ai_namask.as_failure & estate;
824 
825 	if ((success | failure) == 0)
826 		return (1);
827 
828 	ASSERT(tad->tad_errjmp == NULL);
829 	tad->tad_errjmp = (void *)jb;
830 	tad->tad_ctrl |= TAD_ERRJMP;
831 
832 	return (0);
833 }
834 
835 /*
836  * Complete auditing of an async event. The AU_DONTBLOCK flag to au_close will
837  * result in the backend routine being invoked from softcall, so all the real
838  * work can be done in a safe context.
839  */
840 void
841 audit_async_finish(caddr_t *ad, au_event_t aid, au_emod_t amod,
842     timestruc_t *e_time)
843 {
844 	au_kcontext_t	*kctx;
845 
846 	kctx  = GET_KCTX_GZ;
847 
848 	au_close(kctx, ad, AU_DONTBLOCK | AU_OK, aid, PAD_NONATTR|amod, e_time);
849 }
850 
851 /*
852  * Backend routine to complete an async audit. Invoked from softcall.
853  * (Note: the blocking and the queuing below both involve locking which can't
854  * be done safely in high interrupt context due to the chance of sleeping on
855  * the corresponding adaptive mutex. Hence the softcall.)
856  */
857 static void
858 audit_async_finish_backend(void *addr)
859 {
860 	au_kcontext_t	*kctx;
861 	au_defer_info_t	*attr = (au_defer_info_t *)addr;
862 
863 	if (attr == NULL)
864 		return;		/* won't happen unless softcall is broken */
865 
866 	kctx  = GET_KCTX_GZ;
867 
868 	if (audit_async_block(kctx, (caddr_t *)&attr->audi_ad)) {
869 		kmem_free(attr, sizeof (au_defer_info_t));
870 		return;
871 	}
872 
873 	/*
874 	 * Call au_close_time to complete the audit with the saved values.
875 	 *
876 	 * For the exit-prom event, use the current time instead of the
877 	 * saved time as a better approximation. (Because the time saved via
878 	 * gethrestime during prom-exit handling would not yet be caught up
879 	 * after the system was idled in the debugger for a period of time.)
880 	 */
881 	if (attr->audi_e_type == AUE_EXITPROM) {
882 		au_close_time(kctx, (token_t *)attr->audi_ad, attr->audi_flag,
883 		    attr->audi_e_type, attr->audi_e_mod, NULL);
884 	} else {
885 		au_close_time(kctx, (token_t *)attr->audi_ad, attr->audi_flag,
886 		    attr->audi_e_type, attr->audi_e_mod, &attr->audi_atime);
887 	}
888 
889 	AS_INC(as_generated, 1, kctx);
890 	AS_INC(as_nonattrib, 1, kctx);
891 
892 	kmem_free(attr, sizeof (au_defer_info_t));
893 }
894