xref: /freebsd/contrib/openbsm/libbsm/bsm_mask.c (revision 1e413cf93298b5b97441a21d9a50fdcd0ee9945e)
1 /*
2  * Copyright (c) 2004 Apple Computer, Inc.
3  * Copyright (c) 2005 Robert N. M. Watson
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR
22  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  *
30  * $P4: //depot/projects/trustedbsd/openbsm/libbsm/bsm_mask.c#13 $
31  */
32 
33 #include <sys/types.h>
34 
35 #include <config/config.h>
36 #ifdef HAVE_FULL_QUEUE_H
37 #include <sys/queue.h>
38 #else /* !HAVE_FULL_QUEUE_H */
39 #include <compat/queue.h>
40 #endif /* !HAVE_FULL_QUEUE_H */
41 
42 #include <bsm/libbsm.h>
43 
44 #include <pthread.h>
45 #include <stdlib.h>
46 #include <string.h>
47 
48 /* MT-Safe */
49 static pthread_mutex_t	mutex = PTHREAD_MUTEX_INITIALIZER;
50 static int		firsttime = 1;
51 
52 /*
53  * XXX ev_cache, once created, sticks around until the calling program exits.
54  * This may or may not be a problem as far as absolute memory usage goes, but
55  * at least there don't appear to be any leaks in using the cache.
56  *
57  * XXXRW: Note that despite (mutex), load_event_table() could race with
58  * other consumers of the getauevents() API.
59  */
60 struct audit_event_map {
61 	char				 ev_name[AU_EVENT_NAME_MAX];
62 	char				 ev_desc[AU_EVENT_DESC_MAX];
63 	struct au_event_ent		 ev;
64 	LIST_ENTRY(audit_event_map)	 ev_list;
65 };
66 static LIST_HEAD(, audit_event_map)	ev_cache;
67 
68 static struct audit_event_map *
69 audit_event_map_alloc(void)
70 {
71 	struct audit_event_map *aemp;
72 
73 	aemp = malloc(sizeof(*aemp));
74 	if (aemp == NULL)
75 		return (aemp);
76 	bzero(aemp, sizeof(*aemp));
77 	aemp->ev.ae_name = aemp->ev_name;
78 	aemp->ev.ae_desc = aemp->ev_desc;
79 	return (aemp);
80 }
81 
82 static void
83 audit_event_map_free(struct audit_event_map *aemp)
84 {
85 
86 	free(aemp);
87 }
88 
89 /*
90  * When reading into the cache fails, we need to flush the entire cache to
91  * prevent it from containing some but not all records.
92  */
93 static void
94 flush_cache(void)
95 {
96 	struct audit_event_map *aemp;
97 
98 	/* XXX: Would assert 'mutex'. */
99 
100 	while ((aemp = LIST_FIRST(&ev_cache)) != NULL) {
101 		LIST_REMOVE(aemp, ev_list);
102 		audit_event_map_free(aemp);
103 	}
104 }
105 
106 static int
107 load_event_table(void)
108 {
109 	struct audit_event_map *aemp;
110 	struct au_event_ent *ep;
111 
112 	/*
113 	 * XXX: Would assert 'mutex'.
114 	 * Loading of the cache happens only once; dont check if cache is
115 	 * already loaded.
116 	 */
117 	LIST_INIT(&ev_cache);
118 	setauevent();	/* Rewind to beginning of entries. */
119 	do {
120 		aemp = audit_event_map_alloc();
121 		if (aemp == NULL) {
122 			flush_cache();
123 			return (-1);
124 		}
125 		ep = getauevent_r(&aemp->ev);
126 		if (ep != NULL)
127 			LIST_INSERT_HEAD(&ev_cache, aemp, ev_list);
128 		else
129 			audit_event_map_free(aemp);
130 	} while (ep != NULL);
131 	return (1);
132 }
133 
134 /*
135  * Read the event with the matching event number from the cache.
136  */
137 static struct au_event_ent *
138 read_from_cache(au_event_t event)
139 {
140 	struct audit_event_map *elem;
141 
142 	/* XXX: Would assert 'mutex'. */
143 
144 	LIST_FOREACH(elem, &ev_cache, ev_list) {
145 		if (elem->ev.ae_number == event)
146 			return (&elem->ev);
147 	}
148 
149 	return (NULL);
150 }
151 
152 /*
153  * Check if the audit event is preselected against the preselection mask.
154  */
155 int
156 au_preselect(au_event_t event, au_mask_t *mask_p, int sorf, int flag)
157 {
158 	struct au_event_ent *ev;
159 	au_class_t effmask = 0;
160 
161 	if (mask_p == NULL)
162 		return (-1);
163 
164 
165 	pthread_mutex_lock(&mutex);
166 	if (firsttime) {
167 		firsttime = 0;
168 		if ( -1 == load_event_table()) {
169 			pthread_mutex_unlock(&mutex);
170 			return (-1);
171 		}
172 	}
173 	switch (flag) {
174 	case AU_PRS_REREAD:
175 		flush_cache();
176 		if (load_event_table() == -1) {
177 			pthread_mutex_unlock(&mutex);
178 			return (-1);
179 		}
180 		ev = read_from_cache(event);
181 		break;
182 	case AU_PRS_USECACHE:
183 		ev = read_from_cache(event);
184 		break;
185 	default:
186 		ev = NULL;
187 	}
188 	if (ev == NULL) {
189 		pthread_mutex_unlock(&mutex);
190 		return (-1);
191 	}
192 	if (sorf & AU_PRS_SUCCESS)
193 		effmask |= (mask_p->am_success & ev->ae_class);
194 	if (sorf & AU_PRS_FAILURE)
195 		effmask |= (mask_p->am_failure & ev->ae_class);
196 	pthread_mutex_unlock(&mutex);
197 	if (effmask != 0)
198 		return (1);
199 	return (0);
200 }
201