xref: /freebsd/contrib/openbsm/libbsm/bsm_mask.c (revision ca0716f5714781ac39461f60647d795321921363)
1 /*
2  * Copyright (c) 2004 Apple Computer, Inc.
3  * Copyright (c) 2005 Robert N. M. Watson
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR
22  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  *
30  * $P4: //depot/projects/trustedbsd/openbsm/libbsm/bsm_mask.c#11 $
31  */
32 
33 #include <sys/types.h>
34 #include <sys/queue.h>
35 
36 #include <bsm/libbsm.h>
37 
38 #include <pthread.h>
39 #include <stdlib.h>
40 #include <string.h>
41 
42 /* MT-Safe */
43 static pthread_mutex_t	mutex = PTHREAD_MUTEX_INITIALIZER;
44 static int		firsttime = 1;
45 
46 /*
47  * XXX ev_cache, once created, sticks around until the calling program exits.
48  * This may or may not be a problem as far as absolute memory usage goes, but
49  * at least there don't appear to be any leaks in using the cache.
50  *
51  * XXXRW: Note that despite (mutex), load_event_table() could race with
52  * other consumers of the getauevents() API.
53  */
54 struct audit_event_map {
55 	char				 ev_name[AU_EVENT_NAME_MAX];
56 	char				 ev_desc[AU_EVENT_DESC_MAX];
57 	struct au_event_ent		 ev;
58 	LIST_ENTRY(audit_event_map)	 ev_list;
59 };
60 static LIST_HEAD(, audit_event_map)	ev_cache;
61 
62 static struct audit_event_map *
63 audit_event_map_alloc(void)
64 {
65 	struct audit_event_map *aemp;
66 
67 	aemp = malloc(sizeof(*aemp));
68 	if (aemp == NULL)
69 		return (aemp);
70 	bzero(aemp, sizeof(*aemp));
71 	aemp->ev.ae_name = aemp->ev_name;
72 	aemp->ev.ae_desc = aemp->ev_desc;
73 	return (aemp);
74 }
75 
76 static void
77 audit_event_map_free(struct audit_event_map *aemp)
78 {
79 
80 	free(aemp);
81 }
82 
83 /*
84  * When reading into the cache fails, we need to flush the entire cache to
85  * prevent it from containing some but not all records.
86  */
87 static void
88 flush_cache(void)
89 {
90 	struct audit_event_map *aemp;
91 
92 	/* XXX: Would assert 'mutex'. */
93 
94 	while ((aemp = LIST_FIRST(&ev_cache)) != NULL) {
95 		LIST_REMOVE(aemp, ev_list);
96 		audit_event_map_free(aemp);
97 	}
98 }
99 
100 static int
101 load_event_table(void)
102 {
103 	struct audit_event_map *aemp;
104 	struct au_event_ent *ep;
105 
106 	/*
107 	 * XXX: Would assert 'mutex'.
108 	 * Loading of the cache happens only once; dont check if cache is
109 	 * already loaded.
110 	 */
111 	LIST_INIT(&ev_cache);
112 	setauevent();	/* Rewind to beginning of entries. */
113 	do {
114 		aemp = audit_event_map_alloc();
115 		if (aemp == NULL) {
116 			flush_cache();
117 			return (-1);
118 		}
119 		ep = getauevent_r(&aemp->ev);
120 		if (ep != NULL)
121 			LIST_INSERT_HEAD(&ev_cache, aemp, ev_list);
122 		else
123 			audit_event_map_free(aemp);
124 	} while (ep != NULL);
125 	return (1);
126 }
127 
128 /*
129  * Read the event with the matching event number from the cache.
130  */
131 static struct au_event_ent *
132 read_from_cache(au_event_t event)
133 {
134 	struct audit_event_map *elem;
135 
136 	/* XXX: Would assert 'mutex'. */
137 
138 	LIST_FOREACH(elem, &ev_cache, ev_list) {
139 		if (elem->ev.ae_number == event)
140 			return (&elem->ev);
141 	}
142 
143 	return (NULL);
144 }
145 
146 /*
147  * Check if the audit event is preselected against the preselection mask.
148  */
149 int
150 au_preselect(au_event_t event, au_mask_t *mask_p, int sorf, int flag)
151 {
152 	struct au_event_ent *ev;
153 	au_class_t effmask = 0;
154 
155 	if (mask_p == NULL)
156 		return (-1);
157 
158 
159 	pthread_mutex_lock(&mutex);
160 	if (firsttime) {
161 		firsttime = 0;
162 		if ( -1 == load_event_table()) {
163 			pthread_mutex_unlock(&mutex);
164 			return (-1);
165 		}
166 	}
167 	switch (flag) {
168 	case AU_PRS_REREAD:
169 		flush_cache();
170 		if (load_event_table() == -1) {
171 			pthread_mutex_unlock(&mutex);
172 			return (-1);
173 		}
174 		ev = read_from_cache(event);
175 		break;
176 	case AU_PRS_USECACHE:
177 		ev = read_from_cache(event);
178 		break;
179 	default:
180 		ev = NULL;
181 	}
182 	if (ev == NULL) {
183 		pthread_mutex_unlock(&mutex);
184 		return (-1);
185 	}
186 	if (sorf & AU_PRS_SUCCESS)
187 		effmask |= (mask_p->am_success & ev->ae_class);
188 	if (sorf & AU_PRS_FAILURE)
189 		effmask |= (mask_p->am_failure & ev->ae_class);
190 	pthread_mutex_unlock(&mutex);
191 	if (effmask != 0)
192 		return (1);
193 	return (0);
194 }
195