152267f74SRobert Watson /*- 252267f74SRobert Watson * Copyright (c) 2004 Apple Inc. 3ca0716f5SRobert Watson * Copyright (c) 2005 Robert N. M. Watson 4ca0716f5SRobert Watson * All rights reserved. 5ca0716f5SRobert Watson * 6ca0716f5SRobert Watson * Redistribution and use in source and binary forms, with or without 7ca0716f5SRobert Watson * modification, are permitted provided that the following conditions 8ca0716f5SRobert Watson * are met: 9ca0716f5SRobert Watson * 1. Redistributions of source code must retain the above copyright 10ca0716f5SRobert Watson * notice, this list of conditions and the following disclaimer. 11ca0716f5SRobert Watson * 2. Redistributions in binary form must reproduce the above copyright 12ca0716f5SRobert Watson * notice, this list of conditions and the following disclaimer in the 13ca0716f5SRobert Watson * documentation and/or other materials provided with the distribution. 1452267f74SRobert Watson * 3. Neither the name of Apple Inc. ("Apple") nor the names of 15ca0716f5SRobert Watson * its contributors may be used to endorse or promote products derived 16ca0716f5SRobert Watson * from this software without specific prior written permission. 17ca0716f5SRobert Watson * 18ca0716f5SRobert Watson * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND 19ca0716f5SRobert Watson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20ca0716f5SRobert Watson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21ca0716f5SRobert Watson * ARE DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR 22ca0716f5SRobert Watson * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23ca0716f5SRobert Watson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24ca0716f5SRobert Watson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25ca0716f5SRobert Watson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 26ca0716f5SRobert Watson * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 27ca0716f5SRobert Watson * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28ca0716f5SRobert Watson * POSSIBILITY OF SUCH DAMAGE. 29ca0716f5SRobert Watson * 3052267f74SRobert Watson * $P4: //depot/projects/trustedbsd/openbsm/libbsm/bsm_mask.c#14 $ 31ca0716f5SRobert Watson */ 32ca0716f5SRobert Watson 33ca0716f5SRobert Watson #include <sys/types.h> 343b97a967SRobert Watson 353b97a967SRobert Watson #include <config/config.h> 363b97a967SRobert Watson #ifdef HAVE_FULL_QUEUE_H 37ca0716f5SRobert Watson #include <sys/queue.h> 383b97a967SRobert Watson #else /* !HAVE_FULL_QUEUE_H */ 393b97a967SRobert Watson #include <compat/queue.h> 403b97a967SRobert Watson #endif /* !HAVE_FULL_QUEUE_H */ 41ca0716f5SRobert Watson 42ca0716f5SRobert Watson #include <bsm/libbsm.h> 43ca0716f5SRobert Watson 44ca0716f5SRobert Watson #include <pthread.h> 45ca0716f5SRobert Watson #include <stdlib.h> 46ca0716f5SRobert Watson #include <string.h> 47ca0716f5SRobert Watson 48ca0716f5SRobert Watson /* MT-Safe */ 49ca0716f5SRobert Watson static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; 50ca0716f5SRobert Watson static int firsttime = 1; 51ca0716f5SRobert Watson 52ca0716f5SRobert Watson /* 53ca0716f5SRobert Watson * XXX ev_cache, once created, sticks around until the calling program exits. 54ca0716f5SRobert Watson * This may or may not be a problem as far as absolute memory usage goes, but 55ca0716f5SRobert Watson * at least there don't appear to be any leaks in using the cache. 56ca0716f5SRobert Watson * 57ca0716f5SRobert Watson * XXXRW: Note that despite (mutex), load_event_table() could race with 58ca0716f5SRobert Watson * other consumers of the getauevents() API. 59ca0716f5SRobert Watson */ 60ca0716f5SRobert Watson struct audit_event_map { 61ca0716f5SRobert Watson char ev_name[AU_EVENT_NAME_MAX]; 62ca0716f5SRobert Watson char ev_desc[AU_EVENT_DESC_MAX]; 63ca0716f5SRobert Watson struct au_event_ent ev; 64ca0716f5SRobert Watson LIST_ENTRY(audit_event_map) ev_list; 65ca0716f5SRobert Watson }; 66ca0716f5SRobert Watson static LIST_HEAD(, audit_event_map) ev_cache; 67ca0716f5SRobert Watson 68ca0716f5SRobert Watson static struct audit_event_map * 69ca0716f5SRobert Watson audit_event_map_alloc(void) 70ca0716f5SRobert Watson { 71ca0716f5SRobert Watson struct audit_event_map *aemp; 72ca0716f5SRobert Watson 73ca0716f5SRobert Watson aemp = malloc(sizeof(*aemp)); 74ca0716f5SRobert Watson if (aemp == NULL) 75ca0716f5SRobert Watson return (aemp); 76ca0716f5SRobert Watson bzero(aemp, sizeof(*aemp)); 77ca0716f5SRobert Watson aemp->ev.ae_name = aemp->ev_name; 78ca0716f5SRobert Watson aemp->ev.ae_desc = aemp->ev_desc; 79ca0716f5SRobert Watson return (aemp); 80ca0716f5SRobert Watson } 81ca0716f5SRobert Watson 82ca0716f5SRobert Watson static void 83ca0716f5SRobert Watson audit_event_map_free(struct audit_event_map *aemp) 84ca0716f5SRobert Watson { 85ca0716f5SRobert Watson 86ca0716f5SRobert Watson free(aemp); 87ca0716f5SRobert Watson } 88ca0716f5SRobert Watson 89ca0716f5SRobert Watson /* 90ca0716f5SRobert Watson * When reading into the cache fails, we need to flush the entire cache to 91ca0716f5SRobert Watson * prevent it from containing some but not all records. 92ca0716f5SRobert Watson */ 93ca0716f5SRobert Watson static void 94ca0716f5SRobert Watson flush_cache(void) 95ca0716f5SRobert Watson { 96ca0716f5SRobert Watson struct audit_event_map *aemp; 97ca0716f5SRobert Watson 98ca0716f5SRobert Watson /* XXX: Would assert 'mutex'. */ 99ca0716f5SRobert Watson 100ca0716f5SRobert Watson while ((aemp = LIST_FIRST(&ev_cache)) != NULL) { 101ca0716f5SRobert Watson LIST_REMOVE(aemp, ev_list); 102ca0716f5SRobert Watson audit_event_map_free(aemp); 103ca0716f5SRobert Watson } 104ca0716f5SRobert Watson } 105ca0716f5SRobert Watson 106ca0716f5SRobert Watson static int 107ca0716f5SRobert Watson load_event_table(void) 108ca0716f5SRobert Watson { 109ca0716f5SRobert Watson struct audit_event_map *aemp; 110ca0716f5SRobert Watson struct au_event_ent *ep; 111ca0716f5SRobert Watson 112ca0716f5SRobert Watson /* 113ca0716f5SRobert Watson * XXX: Would assert 'mutex'. 114ca0716f5SRobert Watson * Loading of the cache happens only once; dont check if cache is 115ca0716f5SRobert Watson * already loaded. 116ca0716f5SRobert Watson */ 117ca0716f5SRobert Watson LIST_INIT(&ev_cache); 118ca0716f5SRobert Watson setauevent(); /* Rewind to beginning of entries. */ 119ca0716f5SRobert Watson do { 120ca0716f5SRobert Watson aemp = audit_event_map_alloc(); 121ca0716f5SRobert Watson if (aemp == NULL) { 122ca0716f5SRobert Watson flush_cache(); 123ca0716f5SRobert Watson return (-1); 124ca0716f5SRobert Watson } 125ca0716f5SRobert Watson ep = getauevent_r(&aemp->ev); 126ca0716f5SRobert Watson if (ep != NULL) 127ca0716f5SRobert Watson LIST_INSERT_HEAD(&ev_cache, aemp, ev_list); 128ca0716f5SRobert Watson else 129ca0716f5SRobert Watson audit_event_map_free(aemp); 130ca0716f5SRobert Watson } while (ep != NULL); 131ca0716f5SRobert Watson return (1); 132ca0716f5SRobert Watson } 133ca0716f5SRobert Watson 134ca0716f5SRobert Watson /* 135ca0716f5SRobert Watson * Read the event with the matching event number from the cache. 136ca0716f5SRobert Watson */ 137ca0716f5SRobert Watson static struct au_event_ent * 138ca0716f5SRobert Watson read_from_cache(au_event_t event) 139ca0716f5SRobert Watson { 140ca0716f5SRobert Watson struct audit_event_map *elem; 141ca0716f5SRobert Watson 142ca0716f5SRobert Watson /* XXX: Would assert 'mutex'. */ 143ca0716f5SRobert Watson 144ca0716f5SRobert Watson LIST_FOREACH(elem, &ev_cache, ev_list) { 145ca0716f5SRobert Watson if (elem->ev.ae_number == event) 146ca0716f5SRobert Watson return (&elem->ev); 147ca0716f5SRobert Watson } 148ca0716f5SRobert Watson 149ca0716f5SRobert Watson return (NULL); 150ca0716f5SRobert Watson } 151ca0716f5SRobert Watson 152ca0716f5SRobert Watson /* 153ca0716f5SRobert Watson * Check if the audit event is preselected against the preselection mask. 154ca0716f5SRobert Watson */ 155ca0716f5SRobert Watson int 156ca0716f5SRobert Watson au_preselect(au_event_t event, au_mask_t *mask_p, int sorf, int flag) 157ca0716f5SRobert Watson { 158ca0716f5SRobert Watson struct au_event_ent *ev; 159ca0716f5SRobert Watson au_class_t effmask = 0; 160ca0716f5SRobert Watson 161ca0716f5SRobert Watson if (mask_p == NULL) 162ca0716f5SRobert Watson return (-1); 163ca0716f5SRobert Watson 164ca0716f5SRobert Watson 165ca0716f5SRobert Watson pthread_mutex_lock(&mutex); 166ca0716f5SRobert Watson if (firsttime) { 167ca0716f5SRobert Watson firsttime = 0; 168ca0716f5SRobert Watson if ( -1 == load_event_table()) { 169ca0716f5SRobert Watson pthread_mutex_unlock(&mutex); 170ca0716f5SRobert Watson return (-1); 171ca0716f5SRobert Watson } 172ca0716f5SRobert Watson } 173ca0716f5SRobert Watson switch (flag) { 174ca0716f5SRobert Watson case AU_PRS_REREAD: 175ca0716f5SRobert Watson flush_cache(); 176ca0716f5SRobert Watson if (load_event_table() == -1) { 177ca0716f5SRobert Watson pthread_mutex_unlock(&mutex); 178ca0716f5SRobert Watson return (-1); 179ca0716f5SRobert Watson } 180ca0716f5SRobert Watson ev = read_from_cache(event); 181ca0716f5SRobert Watson break; 182ca0716f5SRobert Watson case AU_PRS_USECACHE: 183ca0716f5SRobert Watson ev = read_from_cache(event); 184ca0716f5SRobert Watson break; 185ca0716f5SRobert Watson default: 186ca0716f5SRobert Watson ev = NULL; 187ca0716f5SRobert Watson } 188ca0716f5SRobert Watson if (ev == NULL) { 189ca0716f5SRobert Watson pthread_mutex_unlock(&mutex); 190ca0716f5SRobert Watson return (-1); 191ca0716f5SRobert Watson } 192ca0716f5SRobert Watson if (sorf & AU_PRS_SUCCESS) 193ca0716f5SRobert Watson effmask |= (mask_p->am_success & ev->ae_class); 194ca0716f5SRobert Watson if (sorf & AU_PRS_FAILURE) 195ca0716f5SRobert Watson effmask |= (mask_p->am_failure & ev->ae_class); 196ca0716f5SRobert Watson pthread_mutex_unlock(&mutex); 197ca0716f5SRobert Watson if (effmask != 0) 198ca0716f5SRobert Watson return (1); 199ca0716f5SRobert Watson return (0); 200ca0716f5SRobert Watson } 201