xref: /illumos-gate/usr/src/uts/common/c2/audit_io.c (revision 66582b606a8194f7f3ba5b3a3a6dca5b0d346361)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 /*
26  * Routines for writing audit records.
27  */
28 
29 #include <sys/door.h>
30 #include <sys/param.h>
31 #include <sys/time.h>
32 #include <sys/types.h>
33 #include <sys/statvfs.h>	/* for statfs */
34 #include <sys/vnode.h>
35 #include <sys/file.h>
36 #include <sys/vfs.h>
37 #include <sys/user.h>
38 #include <sys/uio.h>
39 #include <sys/reboot.h>
40 #include <sys/kmem.h>		/* for KM_SLEEP */
41 #include <sys/resource.h>	/* for RLIM_INFINITY */
42 #include <sys/cmn_err.h>	/* panic */
43 #include <sys/systm.h>
44 #include <sys/debug.h>
45 #include <sys/sysmacros.h>
46 #include <sys/syscall.h>
47 #include <sys/zone.h>
48 
49 #include <c2/audit.h>
50 #include <c2/audit_kernel.h>
51 #include <c2/audit_record.h>
52 #include <c2/audit_kevents.h>
53 #include <c2/audit_door_infc.h>
54 
55 static void	au_dequeue(au_kcontext_t *, au_buff_t *);
56 static void	audit_async_finish_backend(void *);
57 static int	audit_sync_block(au_kcontext_t *);
58 /*
59  * each of these two tables are indexed by the values AU_DBUF_COMPLETE
60  * through AU_DBUF_LAST; the content is the next state value.  The
61  * first table determines the next state for a buffer which is not the
62  * end of a record and the second table determines the state for a
63  * buffer which is the end of a record.  The initial state is
64  * AU_DBUF_COMPLETE.
65  */
66 static int state_if_part[] = {
67     AU_DBUF_FIRST, AU_DBUF_MIDDLE, AU_DBUF_MIDDLE, AU_DBUF_FIRST};
68 static int state_if_not_part[] = {
69     AU_DBUF_COMPLETE, AU_DBUF_LAST, AU_DBUF_LAST, AU_DBUF_COMPLETE};
70 /*
71  * Write to an audit descriptor.
72  * Add the au_membuf to the descriptor chain and free the chain passed in.
73  */
74 void
75 au_uwrite(token_t *m)
76 {
77 	au_write(&(u_ad), m);
78 }
79 
80 void
81 au_write(caddr_t *d, token_t *m)
82 {
83 	if (d == NULL) {
84 		au_toss_token(m);
85 		return;
86 	}
87 	if (m == (token_t *)0) {
88 		printf("au_write: null token\n");
89 		return;
90 	}
91 
92 	if (*d == NULL)
93 		*d = (caddr_t)m;
94 	else
95 		(void) au_append_rec((au_buff_t *)*d, m, AU_PACK);
96 }
97 
98 /*
99  * Close an audit descriptor.
100  * Use the second parameter to indicate if it should be written or not.
101  */
102 void
103 au_close(au_kcontext_t *kctx, caddr_t *d, int flag, au_event_t e_type,
104     au_emod_t e_mod, timestruc_t *e_time)
105 {
106 	token_t *dchain;	/* au_membuf chain which is the tokens */
107 	t_audit_data_t *tad = U2A(u);
108 
109 	ASSERT(tad != NULL);
110 	ASSERT(d != NULL);
111 	ASSERT(kctx != NULL);
112 
113 	if ((dchain = (token_t *)*d) == (token_t *)NULL)
114 		return;
115 
116 	*d = NULL;
117 
118 	/*
119 	 * If async then defer; or if requested, defer the closing/queueing to
120 	 * syscall end, unless no syscall is active or the syscall is _exit.
121 	 */
122 	if ((flag & AU_DONTBLOCK) || ((flag & AU_DEFER) &&
123 	    (tad->tad_scid != 0) && (tad->tad_scid != SYS_exit))) {
124 		au_close_defer(dchain, flag, e_type, e_mod, e_time);
125 		return;
126 	}
127 	au_close_time(kctx, dchain, flag, e_type, e_mod, e_time);
128 }
129 
130 /*
131  * Defer closing/queueing of an audit descriptor. For async events, queue
132  * via softcall. Otherwise, defer by queueing the record onto the tad; at
133  * syscall end time it will be pulled off.
134  */
135 void
136 au_close_defer(token_t *dchain, int flag, au_event_t e_type, au_emod_t e_mod,
137     timestruc_t *e_time)
138 {
139 	au_defer_info_t	*attr;
140 	t_audit_data_t *tad = U2A(u);
141 
142 	ASSERT(tad != NULL);
143 
144 	/* If not to be written, toss the record. */
145 	if ((flag & AU_OK) == 0) {
146 		au_toss_token(dchain);
147 		return;
148 	}
149 
150 	attr = kmem_alloc(sizeof (au_defer_info_t), KM_NOSLEEP);
151 	/* If no mem available, failing silently is the best recourse */
152 	if (attr == NULL) {
153 		au_toss_token(dchain);
154 		return;
155 	}
156 
157 	attr->audi_next = NULL;
158 	attr->audi_ad = dchain;
159 	attr->audi_e_type = e_type;
160 	attr->audi_e_mod = e_mod;
161 	attr->audi_flag = flag;
162 	if (e_time != NULL)
163 		attr->audi_atime = *e_time;
164 	else
165 		gethrestime(&attr->audi_atime);
166 
167 	/*
168 	 * All async events must be queued via softcall to avoid possible
169 	 * sleeping in high interrupt context. softcall will ensure it's
170 	 * done on a dedicated software-level interrupt thread.
171 	 */
172 	if (flag & AU_DONTBLOCK) {
173 		softcall(audit_async_finish_backend, attr);
174 		audit_async_done(NULL, 0);
175 		return;
176 	}
177 
178 	/*
179 	 * If not an async event, defer by queuing onto the tad until
180 	 * syscall end. No locking is needed because the tad is per-thread.
181 	 */
182 	if (tad->tad_defer_head)
183 		tad->tad_defer_tail->audi_next = attr;
184 	else
185 		tad->tad_defer_head = attr;
186 	tad->tad_defer_tail = attr;
187 }
188 
189 
190 /*
191  * Save the time in the event header. If time is not specified (i.e., pointer
192  * is NULL), use the current time.  This code is fairly ugly since it needs
193  * to support both 32- and 64-bit environments and can be called indirectly
194  * from both au_close() (for kernel audit) and from audit() (userland audit).
195  */
196 /*ARGSUSED*/
197 static void
198 au_save_time(adr_t *hadrp, timestruc_t *time, int size)
199 {
200 	struct {
201 		uint32_t sec;
202 		uint32_t usec;
203 	} tv;
204 	timestruc_t	now;
205 
206 	if (time == NULL) {
207 		gethrestime(&now);
208 		time = &now;
209 	}
210 
211 #ifdef _LP64
212 	if (size)
213 		adr_int64(hadrp, (int64_t *)time, 2);
214 	else
215 #endif
216 	{
217 		tv.sec = (uint32_t)time->tv_sec;
218 		tv.usec = (uint32_t)time->tv_nsec;
219 		adr_int32(hadrp, (int32_t *)&tv, 2);
220 	}
221 }
222 
223 
224 /*
225  * Close an audit descriptor.
226  * If time of event is specified, use it in the record, otherwise use the
227  * current time.
228  */
229 void
230 au_close_time(au_kcontext_t *kctx, token_t *dchain, int flag, au_event_t e_type,
231     au_emod_t e_mod, timestruc_t *etime)
232 {
233 	token_t		*record;	/* au_membuf chain == the record */
234 	int		byte_count;
235 	token_t		*m;		/* for potential sequence token */
236 	adr_t		hadr;		/* handle for header token */
237 	adr_t		sadr;		/* handle for sequence token */
238 	size_t		zone_length;	/* length of zonename token */
239 	uint32_t	auditing;
240 
241 	ASSERT(dchain != NULL);
242 
243 	/* If not to be written, toss the record */
244 	if ((flag & AU_OK) == 0) {
245 		au_toss_token(dchain);
246 		return;
247 	}
248 	/* if auditing not enabled, then don't generate an audit record */
249 	ASSERT(U2A(u) != NULL);
250 	ASSERT(kctx != NULL);
251 
252 	auditing = (U2A(u)->tad_audit == AUC_UNSET)
253 	    ? kctx->auk_auditstate
254 	    : U2A(u)->tad_audit;
255 
256 	if (auditing & ~(AUC_AUDITING | AUC_INIT_AUDIT)) {
257 		/*
258 		 * at system boot, neither is set yet we want to generate
259 		 * an audit record.
260 		 */
261 		if (e_type != AUE_SYSTEMBOOT) {
262 			au_toss_token(dchain);
263 			return;
264 		}
265 	}
266 
267 	/* Count up the bytes used in the record. */
268 	byte_count = au_token_size(dchain);
269 
270 	/*
271 	 * add in size of header token (always present).
272 	 */
273 	byte_count += sizeof (char) + sizeof (int32_t) +
274 	    sizeof (char) + 2 * sizeof (short) + sizeof (timestruc_t);
275 
276 	if (kctx->auk_hostaddr_valid)
277 		byte_count += sizeof (int32_t) +
278 		    kctx->auk_info.ai_termid.at_type;
279 
280 	/*
281 	 * add in size of zonename token (zero if !AUDIT_ZONENAME)
282 	 */
283 	if (kctx->auk_policy & AUDIT_ZONENAME) {
284 		zone_length = au_zonename_length(NULL);
285 		byte_count += zone_length;
286 	} else {
287 		zone_length = 0;
288 	}
289 	/* add in size of (optional) trailer token */
290 	if (kctx->auk_policy & AUDIT_TRAIL)
291 		byte_count += 7;
292 
293 	/* add in size of (optional) sequence token */
294 	if (kctx->auk_policy & AUDIT_SEQ)
295 		byte_count += 5;
296 
297 	/* build the header */
298 	if (kctx->auk_hostaddr_valid)
299 		record = au_to_header_ex(byte_count, e_type, e_mod);
300 	else
301 		record = au_to_header(byte_count, e_type, e_mod);
302 
303 	/*
304 	 * If timestamp was specified, save it in header now. Otherwise,
305 	 * save reference to header so we can update time/data later
306 	 * and artificially adjust pointer to the time/date field of header.
307 	 */
308 	adr_start(&hadr, memtod(record, char *));
309 	hadr.adr_now += sizeof (char) + sizeof (int32_t) +
310 	    sizeof (char) + 2 * sizeof (short);
311 	if (kctx->auk_hostaddr_valid)
312 		hadr.adr_now += sizeof (int32_t) +
313 		    kctx->auk_info.ai_termid.at_type;
314 	if (etime != NULL) {
315 		au_save_time(&hadr, etime, 1);
316 		hadr.adr_now = (char *)NULL;
317 	}
318 
319 	/* append body of audit record */
320 	(void) au_append_rec(record, dchain, AU_PACK);
321 
322 	/* add (optional) zonename token */
323 	if (zone_length > 0) {
324 		m = au_to_zonename(zone_length, NULL);
325 		(void) au_append_rec(record, m, AU_PACK);
326 	}
327 
328 	/* Add an (optional) sequence token. NULL offset if none */
329 	if (kctx->auk_policy & AUDIT_SEQ) {
330 		/* get the sequence token */
331 		m = au_to_seq();
332 
333 		/* link to audit record (i.e. don't pack the data) */
334 		(void) au_append_rec(record, m, AU_LINK);
335 
336 		/*
337 		 * advance to count field of sequence token by skipping
338 		 * the token type byte.
339 		 */
340 		adr_start(&sadr, memtod(m, char *));
341 		sadr.adr_now += 1;
342 	} else {
343 		sadr.adr_now = NULL;
344 	}
345 	/* add (optional) trailer token */
346 	if (kctx->auk_policy & AUDIT_TRAIL) {
347 		(void) au_append_rec(record, au_to_trailer(byte_count),
348 		    AU_PACK);
349 	}
350 
351 	/*
352 	 * 1 - use 64 bit version of audit tokens for 64 bit kernels.
353 	 * 0 - use 32 bit version of audit tokens for 32 bit kernels.
354 	 */
355 #ifdef _LP64
356 	au_enqueue(kctx, record, &hadr, &sadr, 1, flag & AU_DONTBLOCK);
357 #else
358 	au_enqueue(kctx, record, &hadr, &sadr, 0, flag & AU_DONTBLOCK);
359 #endif
360 	AS_INC(as_totalsize, byte_count, kctx);
361 }
362 
363 /*ARGSUSED*/
364 void
365 au_enqueue(au_kcontext_t *kctx, au_buff_t *m, adr_t *hadrp, adr_t *sadrp,
366     int size, int dontblock)
367 {
368 	if (kctx == NULL)
369 		return;
370 
371 	mutex_enter(&(kctx->auk_queue.lock));
372 
373 	if (!dontblock && (kctx->auk_queue.cnt >= kctx->auk_queue.hiwater) &&
374 	    audit_sync_block(kctx)) {
375 		mutex_exit(&(kctx->auk_queue.lock));
376 		au_free_rec(m);
377 		return;
378 	}
379 
380 	/* Fill in date and time if needed */
381 	if (hadrp->adr_now) {
382 		au_save_time(hadrp, NULL, size);
383 	}
384 
385 	/* address will be non-zero only if AUDIT_SEQ set */
386 	if (sadrp->adr_now) {
387 		kctx->auk_sequence++;
388 		adr_int32(sadrp, (int32_t *)&(kctx->auk_sequence), 1);
389 	}
390 
391 	if (kctx->auk_queue.head)
392 		kctx->auk_queue.tail->next_rec = m;
393 	else
394 		kctx->auk_queue.head = m;
395 
396 	kctx->auk_queue.tail = m;
397 
398 	if (++(kctx->auk_queue.cnt) >
399 	    kctx->auk_queue.lowater && kctx->auk_queue.rd_block)
400 		cv_broadcast(&(kctx->auk_queue.read_cv));
401 
402 	mutex_exit(&(kctx->auk_queue.lock));
403 
404 	/* count # audit records put onto kernel audit queue */
405 	AS_INC(as_enqueue, 1, kctx);
406 }
407 
408 /*
409  * Dequeue and free buffers upto and including "freeto"
410  * Keeps the queue lock long but acquires it only once when doing
411  * bulk dequeueing.
412  */
413 static void
414 au_dequeue(au_kcontext_t *kctx, au_buff_t *freeto)
415 {
416 	au_buff_t *m, *l, *lastl;
417 	int n = 0;
418 
419 	ASSERT(kctx != NULL);
420 
421 	mutex_enter(&(kctx->auk_queue.lock));
422 
423 	ASSERT(kctx->auk_queue.head != NULL);
424 	ASSERT(freeto != NULL);
425 
426 	l = m = kctx->auk_queue.head;
427 
428 	do {
429 		n++;
430 		lastl = l;
431 		l = l->next_rec;
432 	} while (l != NULL && freeto != lastl);
433 
434 	kctx->auk_queue.cnt -= n;
435 	lastl->next_rec = NULL;
436 	kctx->auk_queue.head = l;
437 
438 	/* Freeto must exist in the list */
439 	ASSERT(freeto == lastl);
440 
441 	if (kctx->auk_queue.cnt <= kctx->auk_queue.lowater &&
442 	    kctx->auk_queue.wt_block)
443 		cv_broadcast(&(kctx->auk_queue.write_cv));
444 
445 	mutex_exit(&(kctx->auk_queue.lock));
446 
447 	while (m) {
448 		l = m->next_rec;
449 		au_free_rec(m);
450 		m = l;
451 	}
452 	AS_INC(as_written, n, kctx);
453 }
454 
455 /*
456  * audit_sync_block()
457  * If we've reached the high water mark, we look at the policy to see
458  * if we sleep or we should drop the audit record.
459  * This function is called with the auk_queue.lock held and the check
460  * performed one time already as an optimization.  Caller should unlock.
461  * Returns 1 if the caller needs to free the record.
462  */
463 static int
464 audit_sync_block(au_kcontext_t *kctx)
465 {
466 	ASSERT(MUTEX_HELD(&(kctx->auk_queue.lock)));
467 	/*
468 	 * Loop while we are at the high watermark.
469 	 */
470 	do {
471 		if (((U2A(u)->tad_audit != AUC_UNSET)
472 		    ? (U2A(u)->tad_audit != AUC_AUDITING)
473 		    : (kctx->auk_auditstate != AUC_AUDITING)) ||
474 		    (kctx->auk_policy & AUDIT_CNT)) {
475 
476 			/* just count # of dropped audit records */
477 			AS_INC(as_dropped, 1, kctx);
478 
479 			return (1);
480 		}
481 
482 		/* kick reader awake if its asleep */
483 		if (kctx->auk_queue.rd_block &&
484 		    kctx->auk_queue.cnt > kctx->auk_queue.lowater)
485 			cv_broadcast(&(kctx->auk_queue.read_cv));
486 
487 		/* keep count of # times blocked */
488 		AS_INC(as_wblocked, 1, kctx);
489 
490 		/* sleep now, until woken by reader */
491 		kctx->auk_queue.wt_block++;
492 		cv_wait(&(kctx->auk_queue.write_cv), &(kctx->auk_queue.lock));
493 		kctx->auk_queue.wt_block--;
494 	} while (kctx->auk_queue.cnt >= kctx->auk_queue.hiwater);
495 
496 	return (0);
497 }
498 
499 /*
500  * audit_async_block()
501  * if we've reached the high water mark, we look at the ahlt policy to see
502  * if we reboot we should drop the audit record.
503  * Returns 1 if blocked.
504  */
505 static int
506 audit_async_block(au_kcontext_t *kctx, caddr_t *rpp)
507 {
508 	ASSERT(kctx != NULL);
509 
510 	mutex_enter(&(kctx->auk_queue.lock));
511 	/* see if we've reached high water mark */
512 	if (kctx->auk_queue.cnt >= kctx->auk_queue.hiwater) {
513 		mutex_exit(&(kctx->auk_queue.lock));
514 
515 		audit_async_drop(rpp, AU_BACKEND);
516 		return (1);
517 	}
518 	mutex_exit(&(kctx->auk_queue.lock));
519 	return (0);
520 }
521 
522 /*
523  * au_door_upcall.  auditdoor() may change vp without notice, so
524  * some locking seems in order.
525  *
526  */
527 #define	AGAIN_TICKS	10
528 
529 static int
530 au_door_upcall(au_kcontext_t *kctx, au_dbuf_t *aubuf)
531 {
532 	int		rc;
533 	door_arg_t	darg;
534 	int		retry = 1;
535 	int		ticks_to_wait;
536 
537 	darg.data_ptr = (char *)aubuf;
538 	darg.data_size = AU_DBUF_HEADER + aubuf->aub_size;
539 
540 	darg.desc_ptr = NULL;
541 	darg.desc_num = 0;
542 
543 	while (retry == 1) {
544 		/* non-zero means return results expected */
545 		darg.rbuf = (char *)aubuf;
546 		darg.rsize = darg.data_size;
547 
548 		retry = 0;
549 		mutex_enter(&(kctx->auk_svc_lock));
550 		rc = door_upcall(kctx->auk_current_vp, &darg, NULL,
551 		    SIZE_MAX, 0);
552 		if (rc != 0) {
553 			mutex_exit(&(kctx->auk_svc_lock));
554 			if (rc == EAGAIN)
555 				ticks_to_wait = AGAIN_TICKS;
556 			else
557 				return (rc);
558 
559 			mutex_enter(&(kctx->auk_eagain_mutex));
560 			(void) cv_reltimedwait(&(kctx->auk_eagain_cv),
561 			    &(kctx->auk_eagain_mutex), ticks_to_wait,
562 			    TR_CLOCK_TICK);
563 			mutex_exit(&(kctx->auk_eagain_mutex));
564 
565 			retry = 1;
566 		} else
567 			mutex_exit(&(kctx->auk_svc_lock));	/* no retry */
568 	}	/* end while (retry == 1) */
569 	if (darg.rbuf == NULL)
570 		return (-1);
571 
572 	/* return code from door server */
573 	return (*(int *)darg.rbuf);
574 }
575 
576 /*
577  * Write an audit control message to the door handle.  The message
578  * structure depends on message_code and at present the only control
579  * message defined is for a policy change.  These are infrequent,
580  * so no memory is held for control messages.
581  */
582 int
583 au_doormsg(au_kcontext_t *kctx, uint32_t message_code, void *message)
584 {
585 	int		rc;
586 	au_dbuf_t	*buf;
587 	size_t		alloc_size;
588 
589 	switch (message_code) {
590 	case AU_DBUF_POLICY:
591 		alloc_size = AU_DBUF_HEADER + sizeof (uint32_t);
592 		buf = kmem_alloc(alloc_size, KM_SLEEP);
593 		buf->aub_size = sizeof (uint32_t);
594 		*(uint32_t *)buf->aub_buf = *(uint32_t *)message;
595 		break;
596 	case AU_DBUF_SHUTDOWN:
597 		alloc_size = AU_DBUF_HEADER;
598 		buf = kmem_alloc(alloc_size, KM_SLEEP);
599 		buf->aub_size = 0;
600 		break;
601 	default:
602 		return (1);
603 	}
604 
605 	buf->aub_type = AU_DBUF_NOTIFY | message_code;
606 	rc = au_door_upcall(kctx, buf);
607 	kmem_free(buf, alloc_size);
608 
609 	return (rc);
610 }
611 
612 /*
613  * Write audit information to the door handle.  au_doorio is called with
614  * one or more complete audit records on the queue and outputs those
615  * records in buffers of up to auk_queue.buflen in size.
616  */
617 int
618 au_doorio(au_kcontext_t *kctx)
619 {
620 	off_t		off;	/* space used in buffer */
621 	ssize_t		used;	/* space used in au_membuf */
622 	token_t		*cAR;	/* current AR being processed */
623 	token_t		*cMB;	/* current au_membuf being processed */
624 	token_t		*sp;	/* last AR processed */
625 	char		*bp;	/* start of free space in staging buffer */
626 	unsigned char	*cp;	/* ptr to data to be moved */
627 	int		error = 0;  /* return from door upcall */
628 
629 	/*
630 	 * size (data left in au_membuf - space in buffer)
631 	 */
632 	ssize_t		sz;
633 	ssize_t		len;	/* len of data to move, size of AR */
634 	ssize_t		curr_sz = 0;	/* amount of data written during now */
635 	/*
636 	 * partial_state is AU_DBUF_COMPLETE...LAST; see audit_door_infc.h
637 	 */
638 	int		part    = 0;	/* partial audit record written */
639 	int		partial_state = AU_DBUF_COMPLETE;
640 	/*
641 	 * Has the write buffer changed length due to a auditctl(2)?
642 	 * Initial allocation is from audit_start.c/audit_init()
643 	 */
644 	if (kctx->auk_queue.bufsz != kctx->auk_queue.buflen) {
645 		size_t new_sz = kctx->auk_queue.bufsz;
646 
647 		kmem_free(kctx->auk_dbuffer, AU_DBUF_HEADER +
648 		    kctx->auk_queue.buflen);
649 
650 		kctx->auk_dbuffer = kmem_alloc(AU_DBUF_HEADER + new_sz,
651 		    KM_SLEEP);
652 
653 		/* omit the 64 bit header */
654 		kctx->auk_queue.buflen = new_sz;
655 	}
656 	if (!kctx->auk_queue.head)
657 		goto nodata;
658 
659 	sp   = NULL;	/* no record copied */
660 	off  = 0;	/* no space used in buffer */
661 	used = 0;	/* no data processed in au_membuf */
662 	cAR  = kctx->auk_queue.head;	/* start at head of queue */
663 	cMB  = cAR;	/* start with first au_membuf of record */
664 
665 	/* start at beginning of buffer */
666 	bp   = &(kctx->auk_dbuffer->aub_buf[0]);
667 
668 	while (cMB) {
669 		part = 1;	/* indicate audit record being processed */
670 
671 		cp  = memtod(cMB, unsigned char *); /* buffer ptr */
672 
673 		sz  = (ssize_t)cMB->len - used;	/* data left in au_membuf */
674 		/* len to move */
675 		len = (ssize_t)MIN(sz, kctx->auk_queue.buflen - off);
676 
677 		/* move the data */
678 		bcopy(cp + used, bp + off, len);
679 		used += len; /* update used au_membuf */
680 		off  += len; /* update offset into buffer */
681 
682 		if (used >= (ssize_t)cMB->len) {
683 			/* advance to next au_membuf */
684 			used = 0;
685 			cMB  = cMB->next_buf;
686 		}
687 		if (cMB == NULL) {
688 			/* advance to next audit record */
689 			sp   = cAR;
690 			cAR  = cAR->next_rec;
691 			cMB  = cAR;
692 			part = 0;	/* have a complete record */
693 		}
694 		error = 0;
695 		if ((kctx->auk_queue.buflen == off) || (part == 0)) {
696 			if (part)
697 				partial_state = state_if_part[partial_state];
698 			else
699 				partial_state =
700 				    state_if_not_part[partial_state];
701 
702 			kctx->auk_dbuffer->aub_type = partial_state;
703 			kctx->auk_dbuffer->aub_size = off;
704 			error = au_door_upcall(kctx, kctx->auk_dbuffer);
705 			if (error != 0)
706 				goto nodata;
707 			/*
708 			 * if we've successfully written an audit record,
709 			 * free records up to last full record copied
710 			 */
711 			if (sp)
712 				au_dequeue(kctx, sp);
713 
714 				/* Update size */
715 			curr_sz += off;
716 
717 				/* reset auk_dbuffer pointers */
718 			sp = NULL;
719 			off  = 0;
720 		}
721 	}	/* while(cMB) */
722 nodata:
723 	return (error);
724 }
725 
726 /*
727  * Clean up thread audit state to clear out asynchronous audit record
728  * generation error recovery processing. Note that this is done on a
729  * per-thread basis and thus does not need any locking.
730  */
731 void
732 audit_async_done(caddr_t *rpp, int flags)
733 {
734 	t_audit_data_t *tad = U2A(u);
735 
736 	/* clean up the tad unless called from softcall backend */
737 	if (!(flags & AU_BACKEND)) {
738 		ASSERT(tad != NULL);
739 		ASSERT(tad->tad_ctrl & TAD_ERRJMP);
740 
741 		tad->tad_ctrl &= ~TAD_ERRJMP;
742 		tad->tad_errjmp = NULL;
743 	}
744 
745 	/* clean out partial audit record */
746 	if ((rpp != NULL) && (*rpp != NULL)) {
747 		au_toss_token((au_buff_t *)*rpp);
748 		*rpp = NULL;
749 	}
750 }
751 
752 /*
753  * implement the audit policy for asynchronous events generated within
754  * the kernel.
755  * XXX might need locks around audit_policy check.
756  */
757 void
758 audit_async_drop(caddr_t *rpp, int flags)
759 {
760 	au_kcontext_t	*kctx;
761 
762 	/* could not generate audit record, clean up */
763 	audit_async_done((caddr_t *)rpp, flags);
764 
765 	kctx = GET_KCTX_GZ;
766 
767 	/* just drop the record and return */
768 	if (((audit_policy & AUDIT_AHLT) == 0) ||
769 	    (kctx->auk_auditstate == AUC_INIT_AUDIT)) {
770 		/* just count # of dropped audit records */
771 		AS_INC(as_dropped, 1, kctx);
772 		return;
773 	}
774 
775 	/*
776 	 * There can be a lot of data in the audit queue. We
777 	 * will first sync the file systems then attempt to
778 	 * shutdown the kernel so that a memory dump is
779 	 * performed.
780 	 */
781 	sync();
782 	sync();
783 
784 	/*
785 	 * now shut down. What a cruel world it has been
786 	 */
787 	panic("non-attributable halt. should dump core");
788 	/* No return */
789 }
790 
791 int
792 audit_async_start(label_t *jb, au_event_t event, int sorf)
793 {
794 	t_audit_data_t *tad = U2A(u);
795 	au_state_t estate;
796 	int success = 0, failure = 0;
797 	au_kcontext_t	*kctx = GET_KCTX_GZ;
798 
799 	/* if audit state off, then no audit record generation */
800 	if ((kctx->auk_auditstate != AUC_AUDITING) &&
801 	    (kctx->auk_auditstate != AUC_INIT_AUDIT))
802 		return (1);
803 
804 	/*
805 	 * preselect asynchronous event
806 	 * XXX should we check for out-of-range???
807 	 */
808 	estate = kctx->auk_ets[event];
809 
810 	if (sorf & AUM_SUCC)
811 		success = kctx->auk_info.ai_namask.as_success & estate;
812 	if (sorf & AUM_FAIL)
813 		failure = kctx->auk_info.ai_namask.as_failure & estate;
814 
815 	if ((success | failure) == 0)
816 		return (1);
817 
818 	ASSERT(tad->tad_errjmp == NULL);
819 	tad->tad_errjmp = (void *)jb;
820 	tad->tad_ctrl |= TAD_ERRJMP;
821 
822 	return (0);
823 }
824 
825 /*
826  * Complete auditing of an async event. The AU_DONTBLOCK flag to au_close will
827  * result in the backend routine being invoked from softcall, so all the real
828  * work can be done in a safe context.
829  */
830 void
831 audit_async_finish(caddr_t *ad, au_event_t aid, au_emod_t amod,
832     timestruc_t *e_time)
833 {
834 	au_kcontext_t	*kctx;
835 
836 	kctx  = GET_KCTX_GZ;
837 
838 	au_close(kctx, ad, AU_DONTBLOCK | AU_OK, aid, PAD_NONATTR|amod, e_time);
839 }
840 
841 /*
842  * Backend routine to complete an async audit. Invoked from softcall.
843  * (Note: the blocking and the queuing below both involve locking which can't
844  * be done safely in high interrupt context due to the chance of sleeping on
845  * the corresponding adaptive mutex. Hence the softcall.)
846  */
847 static void
848 audit_async_finish_backend(void *addr)
849 {
850 	au_kcontext_t	*kctx;
851 	au_defer_info_t	*attr = (au_defer_info_t *)addr;
852 
853 	if (attr == NULL)
854 		return;		/* won't happen unless softcall is broken */
855 
856 	kctx  = GET_KCTX_GZ;
857 
858 	if (audit_async_block(kctx, (caddr_t *)&attr->audi_ad)) {
859 		kmem_free(attr, sizeof (au_defer_info_t));
860 		return;
861 	}
862 
863 	/*
864 	 * Call au_close_time to complete the audit with the saved values.
865 	 *
866 	 * For the exit-prom event, use the current time instead of the
867 	 * saved time as a better approximation. (Because the time saved via
868 	 * gethrestime during prom-exit handling would not yet be caught up
869 	 * after the system was idled in the debugger for a period of time.)
870 	 */
871 	if (attr->audi_e_type == AUE_EXITPROM) {
872 		au_close_time(kctx, (token_t *)attr->audi_ad, attr->audi_flag,
873 		    attr->audi_e_type, attr->audi_e_mod, NULL);
874 	} else {
875 		au_close_time(kctx, (token_t *)attr->audi_ad, attr->audi_flag,
876 		    attr->audi_e_type, attr->audi_e_mod, &attr->audi_atime);
877 	}
878 
879 	AS_INC(as_generated, 1, kctx);
880 	AS_INC(as_nonattrib, 1, kctx);
881 
882 	kmem_free(attr, sizeof (au_defer_info_t));
883 }
884