1 /*- 2 * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in thereg 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/param.h> 32 #include <sys/event.h> 33 #include <sys/socket.h> 34 #include <sys/stat.h> 35 #include <sys/time.h> 36 #include <sys/un.h> 37 38 #include <assert.h> 39 #include <err.h> 40 #include <errno.h> 41 #include <fcntl.h> 42 #include <libutil.h> 43 #include <pthread.h> 44 #include <signal.h> 45 #include <stdio.h> 46 #include <stdlib.h> 47 #include <string.h> 48 #include <unistd.h> 49 50 #include "agents/passwd.h" 51 #include "agents/group.h" 52 #include "agents/services.h" 53 #include "cachelib.h" 54 #include "config.h" 55 #include "debug.h" 56 #include "log.h" 57 #include "nscdcli.h" 58 #include "parser.h" 59 #include "query.h" 60 #include "singletons.h" 61 62 #ifndef CONFIG_PATH 63 #define CONFIG_PATH "/etc/nscd.conf" 64 #endif 65 #define DEFAULT_CONFIG_PATH "nscd.conf" 66 67 #define MAX_SOCKET_IO_SIZE 4096 68 69 struct processing_thread_args { 70 cache the_cache; 71 struct configuration *the_configuration; 72 struct runtime_env *the_runtime_env; 73 }; 74 75 static void accept_connection(struct kevent *, struct runtime_env *, 76 struct configuration *); 77 static void destroy_cache_(cache); 78 static void destroy_runtime_env(struct runtime_env *); 79 static cache init_cache_(struct configuration *); 80 static struct runtime_env *init_runtime_env(struct configuration *); 81 static void processing_loop(cache, struct runtime_env *, 82 struct configuration *); 83 static void process_socket_event(struct kevent *, struct runtime_env *, 84 struct configuration *); 85 static void process_timer_event(struct kevent *, struct runtime_env *, 86 struct configuration *); 87 static void *processing_thread(void *); 88 static void usage(void); 89 90 void get_time_func(struct timeval *); 91 92 static void 93 usage(void) 94 { 95 fprintf(stderr, 96 "usage: nscd [-dnst] [-i cachename] [-I cachename]\n"); 97 exit(1); 98 } 99 100 static cache 101 init_cache_(struct configuration *config) 102 { 103 struct cache_params params; 104 cache retval; 105 106 struct configuration_entry *config_entry; 107 size_t size, i; 108 int res; 109 110 TRACE_IN(init_cache_); 111 112 memset(¶ms, 0, sizeof(struct cache_params)); 113 params.get_time_func = get_time_func; 114 retval = init_cache(¶ms); 115 116 size = configuration_get_entries_size(config); 117 for (i = 0; i < size; ++i) { 118 config_entry = configuration_get_entry(config, i); 119 /* 120 * We should register common entries now - multipart entries 121 * would be registered automatically during the queries. 122 */ 123 res = register_cache_entry(retval, (struct cache_entry_params *) 124 &config_entry->positive_cache_params); 125 config_entry->positive_cache_entry = find_cache_entry(retval, 126 config_entry->positive_cache_params.cep.entry_name); 127 assert(config_entry->positive_cache_entry != 128 INVALID_CACHE_ENTRY); 129 130 res = register_cache_entry(retval, (struct cache_entry_params *) 131 &config_entry->negative_cache_params); 132 config_entry->negative_cache_entry = find_cache_entry(retval, 133 config_entry->negative_cache_params.cep.entry_name); 134 assert(config_entry->negative_cache_entry != 135 INVALID_CACHE_ENTRY); 136 } 137 138 LOG_MSG_2("cache", "cache was successfully initialized"); 139 TRACE_OUT(init_cache_); 140 return (retval); 141 } 142 143 static void 144 destroy_cache_(cache the_cache) 145 { 146 TRACE_IN(destroy_cache_); 147 destroy_cache(the_cache); 148 TRACE_OUT(destroy_cache_); 149 } 150 151 /* 152 * Socket and kqueues are prepared here. We have one global queue for both 153 * socket and timers events. 154 */ 155 static struct runtime_env * 156 init_runtime_env(struct configuration *config) 157 { 158 int serv_addr_len; 159 struct sockaddr_un serv_addr; 160 161 struct kevent eventlist; 162 struct timespec timeout; 163 164 struct runtime_env *retval; 165 166 TRACE_IN(init_runtime_env); 167 retval = calloc(1, sizeof(*retval)); 168 assert(retval != NULL); 169 170 retval->sockfd = socket(PF_LOCAL, SOCK_STREAM|SOCK_NONBLOCK, 0); 171 172 if (config->force_unlink == 1) 173 unlink(config->socket_path); 174 175 memset(&serv_addr, 0, sizeof(struct sockaddr_un)); 176 serv_addr.sun_family = PF_LOCAL; 177 strlcpy(serv_addr.sun_path, config->socket_path, 178 sizeof(serv_addr.sun_path)); 179 serv_addr_len = sizeof(serv_addr.sun_family) + 180 strlen(serv_addr.sun_path) + 1; 181 182 if (bind(retval->sockfd, (struct sockaddr *)&serv_addr, 183 serv_addr_len) == -1) { 184 close(retval->sockfd); 185 free(retval); 186 187 LOG_ERR_2("runtime environment", "can't bind socket to path: " 188 "%s", config->socket_path); 189 TRACE_OUT(init_runtime_env); 190 return (NULL); 191 } 192 LOG_MSG_2("runtime environment", "using socket %s", 193 config->socket_path); 194 195 /* 196 * Here we're marking socket as non-blocking and setting its backlog 197 * to the maximum value 198 */ 199 chmod(config->socket_path, config->socket_mode); 200 listen(retval->sockfd, -1); 201 202 retval->queue = kqueue(); 203 assert(retval->queue != -1); 204 205 EV_SET(&eventlist, retval->sockfd, EVFILT_READ, EV_ADD | EV_ONESHOT, 206 0, 0, 0); 207 memset(&timeout, 0, sizeof(struct timespec)); 208 kevent(retval->queue, &eventlist, 1, NULL, 0, &timeout); 209 210 LOG_MSG_2("runtime environment", "successfully initialized"); 211 TRACE_OUT(init_runtime_env); 212 return (retval); 213 } 214 215 static void 216 destroy_runtime_env(struct runtime_env *env) 217 { 218 TRACE_IN(destroy_runtime_env); 219 close(env->queue); 220 close(env->sockfd); 221 free(env); 222 TRACE_OUT(destroy_runtime_env); 223 } 224 225 static void 226 accept_connection(struct kevent *event_data, struct runtime_env *env, 227 struct configuration *config) 228 { 229 struct kevent eventlist[2]; 230 struct timespec timeout; 231 struct query_state *qstate; 232 233 int fd; 234 int res; 235 236 uid_t euid; 237 gid_t egid; 238 239 TRACE_IN(accept_connection); 240 fd = accept(event_data->ident, NULL, NULL); 241 if (fd == -1) { 242 LOG_ERR_2("accept_connection", "error %d during accept()", 243 errno); 244 TRACE_OUT(accept_connection); 245 return; 246 } 247 248 if (getpeereid(fd, &euid, &egid) != 0) { 249 LOG_ERR_2("accept_connection", "error %d during getpeereid()", 250 errno); 251 TRACE_OUT(accept_connection); 252 return; 253 } 254 255 qstate = init_query_state(fd, sizeof(int), euid, egid); 256 if (qstate == NULL) { 257 LOG_ERR_2("accept_connection", "can't init query_state"); 258 TRACE_OUT(accept_connection); 259 return; 260 } 261 262 memset(&timeout, 0, sizeof(struct timespec)); 263 EV_SET(&eventlist[0], fd, EVFILT_TIMER, EV_ADD | EV_ONESHOT, 264 0, qstate->timeout.tv_sec * 1000, qstate); 265 EV_SET(&eventlist[1], fd, EVFILT_READ, EV_ADD | EV_ONESHOT, 266 NOTE_LOWAT, qstate->kevent_watermark, qstate); 267 res = kevent(env->queue, eventlist, 2, NULL, 0, &timeout); 268 if (res < 0) 269 LOG_ERR_2("accept_connection", "kevent error"); 270 271 TRACE_OUT(accept_connection); 272 } 273 274 static void 275 process_socket_event(struct kevent *event_data, struct runtime_env *env, 276 struct configuration *config) 277 { 278 struct kevent eventlist[2]; 279 struct timeval query_timeout; 280 struct timespec kevent_timeout; 281 int nevents; 282 int eof_res, res; 283 ssize_t io_res; 284 struct query_state *qstate; 285 286 TRACE_IN(process_socket_event); 287 eof_res = event_data->flags & EV_EOF ? 1 : 0; 288 res = 0; 289 290 memset(&kevent_timeout, 0, sizeof(struct timespec)); 291 EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER, EV_DELETE, 292 0, 0, NULL); 293 nevents = kevent(env->queue, eventlist, 1, NULL, 0, &kevent_timeout); 294 if (nevents == -1) { 295 if (errno == ENOENT) { 296 /* the timer is already handling this event */ 297 TRACE_OUT(process_socket_event); 298 return; 299 } else { 300 /* some other error happened */ 301 LOG_ERR_2("process_socket_event", "kevent error, errno" 302 " is %d", errno); 303 TRACE_OUT(process_socket_event); 304 return; 305 } 306 } 307 qstate = (struct query_state *)event_data->udata; 308 309 /* 310 * If the buffer that is to be send/received is too large, 311 * we send it implicitly, by using query_io_buffer_read and 312 * query_io_buffer_write functions in the query_state. These functions 313 * use the temporary buffer, which is later send/received in parts. 314 * The code below implements buffer splitting/mergind for send/receive 315 * operations. It also does the actual socket IO operations. 316 */ 317 if (((qstate->use_alternate_io == 0) && 318 (qstate->kevent_watermark <= (size_t)event_data->data)) || 319 ((qstate->use_alternate_io != 0) && 320 (qstate->io_buffer_watermark <= (size_t)event_data->data))) { 321 if (qstate->use_alternate_io != 0) { 322 switch (qstate->io_buffer_filter) { 323 case EVFILT_READ: 324 io_res = query_socket_read(qstate, 325 qstate->io_buffer_p, 326 qstate->io_buffer_watermark); 327 if (io_res < 0) { 328 qstate->use_alternate_io = 0; 329 qstate->process_func = NULL; 330 } else { 331 qstate->io_buffer_p += io_res; 332 if (qstate->io_buffer_p == 333 qstate->io_buffer + 334 qstate->io_buffer_size) { 335 qstate->io_buffer_p = 336 qstate->io_buffer; 337 qstate->use_alternate_io = 0; 338 } 339 } 340 break; 341 default: 342 break; 343 } 344 } 345 346 if (qstate->use_alternate_io == 0) { 347 do { 348 res = qstate->process_func(qstate); 349 } while ((qstate->kevent_watermark == 0) && 350 (qstate->process_func != NULL) && 351 (res == 0)); 352 353 if (res != 0) 354 qstate->process_func = NULL; 355 } 356 357 if ((qstate->use_alternate_io != 0) && 358 (qstate->io_buffer_filter == EVFILT_WRITE)) { 359 io_res = query_socket_write(qstate, qstate->io_buffer_p, 360 qstate->io_buffer_watermark); 361 if (io_res < 0) { 362 qstate->use_alternate_io = 0; 363 qstate->process_func = NULL; 364 } else 365 qstate->io_buffer_p += io_res; 366 } 367 } else { 368 /* assuming that socket was closed */ 369 qstate->process_func = NULL; 370 qstate->use_alternate_io = 0; 371 } 372 373 if (((qstate->process_func == NULL) && 374 (qstate->use_alternate_io == 0)) || 375 (eof_res != 0) || (res != 0)) { 376 destroy_query_state(qstate); 377 close(event_data->ident); 378 TRACE_OUT(process_socket_event); 379 return; 380 } 381 382 /* updating the query_state lifetime variable */ 383 get_time_func(&query_timeout); 384 query_timeout.tv_usec = 0; 385 query_timeout.tv_sec -= qstate->creation_time.tv_sec; 386 if (query_timeout.tv_sec > qstate->timeout.tv_sec) 387 query_timeout.tv_sec = 0; 388 else 389 query_timeout.tv_sec = qstate->timeout.tv_sec - 390 query_timeout.tv_sec; 391 392 if ((qstate->use_alternate_io != 0) && (qstate->io_buffer_p == 393 qstate->io_buffer + qstate->io_buffer_size)) 394 qstate->use_alternate_io = 0; 395 396 if (qstate->use_alternate_io == 0) { 397 /* 398 * If we must send/receive the large block of data, 399 * we should prepare the query_state's io_XXX fields. 400 * We should also substitute its write_func and read_func 401 * with the query_io_buffer_write and query_io_buffer_read, 402 * which will allow us to implicitly send/receive this large 403 * buffer later (in the subsequent calls to the 404 * process_socket_event). 405 */ 406 if (qstate->kevent_watermark > MAX_SOCKET_IO_SIZE) { 407 if (qstate->io_buffer != NULL) 408 free(qstate->io_buffer); 409 410 qstate->io_buffer = calloc(1, 411 qstate->kevent_watermark); 412 assert(qstate->io_buffer != NULL); 413 414 qstate->io_buffer_p = qstate->io_buffer; 415 qstate->io_buffer_size = qstate->kevent_watermark; 416 qstate->io_buffer_filter = qstate->kevent_filter; 417 418 qstate->write_func = query_io_buffer_write; 419 qstate->read_func = query_io_buffer_read; 420 421 if (qstate->kevent_filter == EVFILT_READ) 422 qstate->use_alternate_io = 1; 423 424 qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE; 425 EV_SET(&eventlist[1], event_data->ident, 426 qstate->kevent_filter, EV_ADD | EV_ONESHOT, 427 NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate); 428 } else { 429 EV_SET(&eventlist[1], event_data->ident, 430 qstate->kevent_filter, EV_ADD | EV_ONESHOT, 431 NOTE_LOWAT, qstate->kevent_watermark, qstate); 432 } 433 } else { 434 if (qstate->io_buffer + qstate->io_buffer_size - 435 qstate->io_buffer_p < 436 MAX_SOCKET_IO_SIZE) { 437 qstate->io_buffer_watermark = qstate->io_buffer + 438 qstate->io_buffer_size - qstate->io_buffer_p; 439 EV_SET(&eventlist[1], event_data->ident, 440 qstate->io_buffer_filter, 441 EV_ADD | EV_ONESHOT, NOTE_LOWAT, 442 qstate->io_buffer_watermark, 443 qstate); 444 } else { 445 qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE; 446 EV_SET(&eventlist[1], event_data->ident, 447 qstate->io_buffer_filter, EV_ADD | EV_ONESHOT, 448 NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate); 449 } 450 } 451 EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER, 452 EV_ADD | EV_ONESHOT, 0, query_timeout.tv_sec * 1000, qstate); 453 kevent(env->queue, eventlist, 2, NULL, 0, &kevent_timeout); 454 455 TRACE_OUT(process_socket_event); 456 } 457 458 /* 459 * This routine is called if timer event has been signaled in the kqueue. It 460 * just closes the socket and destroys the query_state. 461 */ 462 static void 463 process_timer_event(struct kevent *event_data, struct runtime_env *env, 464 struct configuration *config) 465 { 466 struct query_state *qstate; 467 468 TRACE_IN(process_timer_event); 469 qstate = (struct query_state *)event_data->udata; 470 destroy_query_state(qstate); 471 close(event_data->ident); 472 TRACE_OUT(process_timer_event); 473 } 474 475 /* 476 * Processing loop is the basic processing routine, that forms a body of each 477 * procssing thread 478 */ 479 static void 480 processing_loop(cache the_cache, struct runtime_env *env, 481 struct configuration *config) 482 { 483 struct timespec timeout; 484 const int eventlist_size = 1; 485 struct kevent eventlist[eventlist_size]; 486 int nevents, i; 487 488 TRACE_MSG("=> processing_loop"); 489 memset(&timeout, 0, sizeof(struct timespec)); 490 memset(&eventlist, 0, sizeof(struct kevent) * eventlist_size); 491 492 for (;;) { 493 nevents = kevent(env->queue, NULL, 0, eventlist, 494 eventlist_size, NULL); 495 /* 496 * we can only receive 1 event on success 497 */ 498 if (nevents == 1) { 499 struct kevent *event_data; 500 event_data = &eventlist[0]; 501 502 if ((int)event_data->ident == env->sockfd) { 503 for (i = 0; i < event_data->data; ++i) 504 accept_connection(event_data, env, config); 505 506 EV_SET(eventlist, s_runtime_env->sockfd, 507 EVFILT_READ, EV_ADD | EV_ONESHOT, 508 0, 0, 0); 509 memset(&timeout, 0, 510 sizeof(struct timespec)); 511 kevent(s_runtime_env->queue, eventlist, 512 1, NULL, 0, &timeout); 513 514 } else { 515 switch (event_data->filter) { 516 case EVFILT_READ: 517 case EVFILT_WRITE: 518 process_socket_event(event_data, 519 env, config); 520 break; 521 case EVFILT_TIMER: 522 process_timer_event(event_data, 523 env, config); 524 break; 525 default: 526 break; 527 } 528 } 529 } else { 530 /* this branch shouldn't be currently executed */ 531 } 532 } 533 534 TRACE_MSG("<= processing_loop"); 535 } 536 537 /* 538 * Wrapper above the processing loop function. It sets the thread signal mask 539 * to avoid SIGPIPE signals (which can happen if the client works incorrectly). 540 */ 541 static void * 542 processing_thread(void *data) 543 { 544 struct processing_thread_args *args; 545 sigset_t new; 546 547 TRACE_MSG("=> processing_thread"); 548 args = (struct processing_thread_args *)data; 549 550 sigemptyset(&new); 551 sigaddset(&new, SIGPIPE); 552 if (pthread_sigmask(SIG_BLOCK, &new, NULL) != 0) 553 LOG_ERR_1("processing thread", 554 "thread can't block the SIGPIPE signal"); 555 556 processing_loop(args->the_cache, args->the_runtime_env, 557 args->the_configuration); 558 free(args); 559 TRACE_MSG("<= processing_thread"); 560 561 return (NULL); 562 } 563 564 void 565 get_time_func(struct timeval *time) 566 { 567 struct timespec res; 568 memset(&res, 0, sizeof(struct timespec)); 569 clock_gettime(CLOCK_MONOTONIC, &res); 570 571 time->tv_sec = res.tv_sec; 572 time->tv_usec = 0; 573 } 574 575 /* 576 * The idea of _nss_cache_cycle_prevention_function is that nsdispatch 577 * will search for this symbol in the executable. This symbol is the 578 * attribute of the caching daemon. So, if it exists, nsdispatch won't try 579 * to connect to the caching daemon and will just ignore the 'cache' 580 * source in the nsswitch.conf. This method helps to avoid cycles and 581 * organize self-performing requests. 582 * 583 * (not actually a function; it used to be, but it doesn't make any 584 * difference, as long as it has external linkage) 585 */ 586 void *_nss_cache_cycle_prevention_function; 587 588 int 589 main(int argc, char *argv[]) 590 { 591 struct processing_thread_args *thread_args; 592 pthread_t *threads; 593 594 struct pidfh *pidfile; 595 pid_t pid; 596 597 char const *config_file; 598 char const *error_str; 599 int error_line; 600 int i, res; 601 602 int trace_mode_enabled; 603 int force_single_threaded; 604 int do_not_daemonize; 605 int clear_user_cache_entries, clear_all_cache_entries; 606 char *user_config_entry_name, *global_config_entry_name; 607 int show_statistics; 608 int daemon_mode, interactive_mode; 609 610 611 /* by default all debug messages are omitted */ 612 TRACE_OFF(); 613 614 /* parsing command line arguments */ 615 trace_mode_enabled = 0; 616 force_single_threaded = 0; 617 do_not_daemonize = 0; 618 clear_user_cache_entries = 0; 619 clear_all_cache_entries = 0; 620 show_statistics = 0; 621 user_config_entry_name = NULL; 622 global_config_entry_name = NULL; 623 while ((res = getopt(argc, argv, "nstdi:I:")) != -1) { 624 switch (res) { 625 case 'n': 626 do_not_daemonize = 1; 627 break; 628 case 's': 629 force_single_threaded = 1; 630 break; 631 case 't': 632 trace_mode_enabled = 1; 633 break; 634 case 'i': 635 clear_user_cache_entries = 1; 636 if (optarg != NULL) 637 if (strcmp(optarg, "all") != 0) 638 user_config_entry_name = strdup(optarg); 639 break; 640 case 'I': 641 clear_all_cache_entries = 1; 642 if (optarg != NULL) 643 if (strcmp(optarg, "all") != 0) 644 global_config_entry_name = 645 strdup(optarg); 646 break; 647 case 'd': 648 show_statistics = 1; 649 break; 650 case '?': 651 default: 652 usage(); 653 /* NOT REACHED */ 654 } 655 } 656 657 daemon_mode = do_not_daemonize | force_single_threaded | 658 trace_mode_enabled; 659 interactive_mode = clear_user_cache_entries | clear_all_cache_entries | 660 show_statistics; 661 662 if ((daemon_mode != 0) && (interactive_mode != 0)) { 663 LOG_ERR_1("main", "daemon mode and interactive_mode arguments " 664 "can't be used together"); 665 usage(); 666 } 667 668 if (interactive_mode != 0) { 669 FILE *pidfin = fopen(DEFAULT_PIDFILE_PATH, "r"); 670 char pidbuf[256]; 671 672 struct nscd_connection_params connection_params; 673 nscd_connection connection; 674 675 int result; 676 677 if (pidfin == NULL) 678 errx(EXIT_FAILURE, "There is no daemon running."); 679 680 memset(pidbuf, 0, sizeof(pidbuf)); 681 fread(pidbuf, sizeof(pidbuf) - 1, 1, pidfin); 682 fclose(pidfin); 683 684 if (ferror(pidfin) != 0) 685 errx(EXIT_FAILURE, "Can't read from pidfile."); 686 687 if (sscanf(pidbuf, "%d", &pid) != 1) 688 errx(EXIT_FAILURE, "Invalid pidfile."); 689 LOG_MSG_1("main", "daemon PID is %d", pid); 690 691 692 memset(&connection_params, 0, 693 sizeof(struct nscd_connection_params)); 694 connection_params.socket_path = DEFAULT_SOCKET_PATH; 695 connection = open_nscd_connection__(&connection_params); 696 if (connection == INVALID_NSCD_CONNECTION) 697 errx(EXIT_FAILURE, "Can't connect to the daemon."); 698 699 if (clear_user_cache_entries != 0) { 700 result = nscd_transform__(connection, 701 user_config_entry_name, TT_USER); 702 if (result != 0) 703 LOG_MSG_1("main", 704 "user cache transformation failed"); 705 else 706 LOG_MSG_1("main", 707 "user cache_transformation " 708 "succeeded"); 709 } 710 711 if (clear_all_cache_entries != 0) { 712 if (geteuid() != 0) 713 errx(EXIT_FAILURE, "Only root can initiate " 714 "global cache transformation."); 715 716 result = nscd_transform__(connection, 717 global_config_entry_name, TT_ALL); 718 if (result != 0) 719 LOG_MSG_1("main", 720 "global cache transformation " 721 "failed"); 722 else 723 LOG_MSG_1("main", 724 "global cache transformation " 725 "succeeded"); 726 } 727 728 close_nscd_connection__(connection); 729 730 free(user_config_entry_name); 731 free(global_config_entry_name); 732 return (EXIT_SUCCESS); 733 } 734 735 pidfile = pidfile_open(DEFAULT_PIDFILE_PATH, 0644, &pid); 736 if (pidfile == NULL) { 737 if (errno == EEXIST) 738 errx(EXIT_FAILURE, "Daemon already running, pid: %d.", 739 pid); 740 warn("Cannot open or create pidfile"); 741 } 742 743 if (trace_mode_enabled == 1) 744 TRACE_ON(); 745 746 /* blocking the main thread from receiving SIGPIPE signal */ 747 sigblock(sigmask(SIGPIPE)); 748 749 /* daemonization */ 750 if (do_not_daemonize == 0) { 751 res = daemon(0, trace_mode_enabled == 0 ? 0 : 1); 752 if (res != 0) { 753 LOG_ERR_1("main", "can't daemonize myself: %s", 754 strerror(errno)); 755 pidfile_remove(pidfile); 756 goto fin; 757 } else 758 LOG_MSG_1("main", "successfully daemonized"); 759 } 760 761 pidfile_write(pidfile); 762 763 s_agent_table = init_agent_table(); 764 register_agent(s_agent_table, init_passwd_agent()); 765 register_agent(s_agent_table, init_passwd_mp_agent()); 766 register_agent(s_agent_table, init_group_agent()); 767 register_agent(s_agent_table, init_group_mp_agent()); 768 register_agent(s_agent_table, init_services_agent()); 769 register_agent(s_agent_table, init_services_mp_agent()); 770 LOG_MSG_1("main", "request agents registered successfully"); 771 772 /* 773 * Hosts agent can't work properly until we have access to the 774 * appropriate dtab structures, which are used in nsdispatch 775 * calls 776 * 777 register_agent(s_agent_table, init_hosts_agent()); 778 */ 779 780 /* configuration initialization */ 781 s_configuration = init_configuration(); 782 fill_configuration_defaults(s_configuration); 783 784 error_str = NULL; 785 error_line = 0; 786 config_file = CONFIG_PATH; 787 788 res = parse_config_file(s_configuration, config_file, &error_str, 789 &error_line); 790 if ((res != 0) && (error_str == NULL)) { 791 config_file = DEFAULT_CONFIG_PATH; 792 res = parse_config_file(s_configuration, config_file, 793 &error_str, &error_line); 794 } 795 796 if (res != 0) { 797 if (error_str != NULL) { 798 LOG_ERR_1("main", "error in configuration file(%s, %d): %s\n", 799 config_file, error_line, error_str); 800 } else { 801 LOG_ERR_1("main", "no configuration file found " 802 "- was looking for %s and %s", 803 CONFIG_PATH, DEFAULT_CONFIG_PATH); 804 } 805 destroy_configuration(s_configuration); 806 return (-1); 807 } 808 809 if (force_single_threaded == 1) 810 s_configuration->threads_num = 1; 811 812 /* cache initialization */ 813 s_cache = init_cache_(s_configuration); 814 if (s_cache == NULL) { 815 LOG_ERR_1("main", "can't initialize the cache"); 816 destroy_configuration(s_configuration); 817 return (-1); 818 } 819 820 /* runtime environment initialization */ 821 s_runtime_env = init_runtime_env(s_configuration); 822 if (s_runtime_env == NULL) { 823 LOG_ERR_1("main", "can't initialize the runtime environment"); 824 destroy_configuration(s_configuration); 825 destroy_cache_(s_cache); 826 return (-1); 827 } 828 829 if (s_configuration->threads_num > 1) { 830 threads = calloc(1, sizeof(*threads) * 831 s_configuration->threads_num); 832 for (i = 0; i < s_configuration->threads_num; ++i) { 833 thread_args = malloc( 834 sizeof(*thread_args)); 835 thread_args->the_cache = s_cache; 836 thread_args->the_runtime_env = s_runtime_env; 837 thread_args->the_configuration = s_configuration; 838 839 LOG_MSG_1("main", "thread #%d was successfully created", 840 i); 841 pthread_create(&threads[i], NULL, processing_thread, 842 thread_args); 843 844 thread_args = NULL; 845 } 846 847 for (i = 0; i < s_configuration->threads_num; ++i) 848 pthread_join(threads[i], NULL); 849 } else { 850 LOG_MSG_1("main", "working in single-threaded mode"); 851 processing_loop(s_cache, s_runtime_env, s_configuration); 852 } 853 854 fin: 855 /* runtime environment destruction */ 856 destroy_runtime_env(s_runtime_env); 857 858 /* cache destruction */ 859 destroy_cache_(s_cache); 860 861 /* configuration destruction */ 862 destroy_configuration(s_configuration); 863 864 /* agents table destruction */ 865 destroy_agent_table(s_agent_table); 866 867 pidfile_remove(pidfile); 868 return (EXIT_SUCCESS); 869 } 870