1 /*- 2 * Copyright (c) 2004 Apple Inc. 3 * Copyright (c) 2005 Robert N. M. Watson 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the name of Apple Inc. ("Apple") nor the names of 15 * its contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR 22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 * 30 * $P4: //depot/projects/trustedbsd/openbsm/libbsm/bsm_mask.c#15 $ 31 */ 32 33 #include <sys/types.h> 34 35 #include <config/config.h> 36 #ifdef HAVE_FULL_QUEUE_H 37 #include <sys/queue.h> 38 #else /* !HAVE_FULL_QUEUE_H */ 39 #include <compat/queue.h> 40 #endif /* !HAVE_FULL_QUEUE_H */ 41 42 #include <bsm/libbsm.h> 43 44 #ifdef HAVE_PTHREAD_MUTEX_LOCK 45 #include <pthread.h> 46 #endif 47 #include <stdlib.h> 48 #include <string.h> 49 50 /* MT-Safe */ 51 #ifdef HAVE_PTHREAD_MUTEX_LOCK 52 static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; 53 #endif 54 static int firsttime = 1; 55 56 /* 57 * XXX ev_cache, once created, sticks around until the calling program exits. 58 * This may or may not be a problem as far as absolute memory usage goes, but 59 * at least there don't appear to be any leaks in using the cache. 60 * 61 * XXXRW: Note that despite (mutex), load_event_table() could race with 62 * other consumers of the getauevents() API. 63 */ 64 struct audit_event_map { 65 char ev_name[AU_EVENT_NAME_MAX]; 66 char ev_desc[AU_EVENT_DESC_MAX]; 67 struct au_event_ent ev; 68 LIST_ENTRY(audit_event_map) ev_list; 69 }; 70 static LIST_HEAD(, audit_event_map) ev_cache; 71 72 static struct audit_event_map * 73 audit_event_map_alloc(void) 74 { 75 struct audit_event_map *aemp; 76 77 aemp = malloc(sizeof(*aemp)); 78 if (aemp == NULL) 79 return (aemp); 80 bzero(aemp, sizeof(*aemp)); 81 aemp->ev.ae_name = aemp->ev_name; 82 aemp->ev.ae_desc = aemp->ev_desc; 83 return (aemp); 84 } 85 86 static void 87 audit_event_map_free(struct audit_event_map *aemp) 88 { 89 90 free(aemp); 91 } 92 93 /* 94 * When reading into the cache fails, we need to flush the entire cache to 95 * prevent it from containing some but not all records. 96 */ 97 static void 98 flush_cache(void) 99 { 100 struct audit_event_map *aemp; 101 102 /* XXX: Would assert 'mutex'. */ 103 104 while ((aemp = LIST_FIRST(&ev_cache)) != NULL) { 105 LIST_REMOVE(aemp, ev_list); 106 audit_event_map_free(aemp); 107 } 108 } 109 110 static int 111 load_event_table(void) 112 { 113 struct audit_event_map *aemp; 114 struct au_event_ent *ep; 115 116 /* 117 * XXX: Would assert 'mutex'. 118 * Loading of the cache happens only once; dont check if cache is 119 * already loaded. 120 */ 121 LIST_INIT(&ev_cache); 122 setauevent(); /* Rewind to beginning of entries. */ 123 do { 124 aemp = audit_event_map_alloc(); 125 if (aemp == NULL) { 126 flush_cache(); 127 return (-1); 128 } 129 ep = getauevent_r(&aemp->ev); 130 if (ep != NULL) 131 LIST_INSERT_HEAD(&ev_cache, aemp, ev_list); 132 else 133 audit_event_map_free(aemp); 134 } while (ep != NULL); 135 return (1); 136 } 137 138 /* 139 * Read the event with the matching event number from the cache. 140 */ 141 static struct au_event_ent * 142 read_from_cache(au_event_t event) 143 { 144 struct audit_event_map *elem; 145 146 /* XXX: Would assert 'mutex'. */ 147 148 LIST_FOREACH(elem, &ev_cache, ev_list) { 149 if (elem->ev.ae_number == event) 150 return (&elem->ev); 151 } 152 153 return (NULL); 154 } 155 156 /* 157 * Check if the audit event is preselected against the preselection mask. 158 */ 159 int 160 au_preselect(au_event_t event, au_mask_t *mask_p, int sorf, int flag) 161 { 162 struct au_event_ent *ev; 163 au_class_t effmask = 0; 164 165 if (mask_p == NULL) 166 return (-1); 167 168 169 #ifdef HAVE_PTHREAD_MUTEX_LOCK 170 pthread_mutex_lock(&mutex); 171 #endif 172 if (firsttime) { 173 firsttime = 0; 174 if ( -1 == load_event_table()) { 175 #ifdef HAVE_PTHREAD_MUTEX_LOCK 176 pthread_mutex_unlock(&mutex); 177 #endif 178 return (-1); 179 } 180 } 181 switch (flag) { 182 case AU_PRS_REREAD: 183 flush_cache(); 184 if (load_event_table() == -1) { 185 #ifdef HAVE_PTHREAD_MUTEX_LOCK 186 pthread_mutex_unlock(&mutex); 187 #endif 188 return (-1); 189 } 190 ev = read_from_cache(event); 191 break; 192 case AU_PRS_USECACHE: 193 ev = read_from_cache(event); 194 break; 195 default: 196 ev = NULL; 197 } 198 if (ev == NULL) { 199 #ifdef HAVE_PTHREAD_MUTEX_LOCK 200 pthread_mutex_unlock(&mutex); 201 #endif 202 return (-1); 203 } 204 if (sorf & AU_PRS_SUCCESS) 205 effmask |= (mask_p->am_success & ev->ae_class); 206 if (sorf & AU_PRS_FAILURE) 207 effmask |= (mask_p->am_failure & ev->ae_class); 208 #ifdef HAVE_PTHREAD_MUTEX_LOCK 209 pthread_mutex_unlock(&mutex); 210 #endif 211 if (effmask != 0) 212 return (1); 213 return (0); 214 } 215