xref: /freebsd/usr.sbin/nscd/nscd.c (revision 66fd12cf4896eb08ad8e7a2627537f84ead84dd3)
1 /*-
2  * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in thereg
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/event.h>
33 #include <sys/socket.h>
34 #include <sys/stat.h>
35 #include <sys/time.h>
36 #include <sys/un.h>
37 
38 #include <assert.h>
39 #include <err.h>
40 #include <errno.h>
41 #include <fcntl.h>
42 #include <libutil.h>
43 #include <pthread.h>
44 #include <signal.h>
45 #include <stdio.h>
46 #include <stdlib.h>
47 #include <string.h>
48 #include <unistd.h>
49 
50 #include "agents/passwd.h"
51 #include "agents/group.h"
52 #include "agents/services.h"
53 #include "cachelib.h"
54 #include "config.h"
55 #include "debug.h"
56 #include "log.h"
57 #include "nscdcli.h"
58 #include "parser.h"
59 #include "query.h"
60 #include "singletons.h"
61 
62 #ifndef CONFIG_PATH
63 #define CONFIG_PATH "/etc/nscd.conf"
64 #endif
65 #define DEFAULT_CONFIG_PATH	"nscd.conf"
66 
67 #define MAX_SOCKET_IO_SIZE	4096
68 
69 struct processing_thread_args {
70 	cache	the_cache;
71 	struct configuration	*the_configuration;
72 	struct runtime_env		*the_runtime_env;
73 };
74 
75 static void accept_connection(struct kevent *, struct runtime_env *,
76 	struct configuration *);
77 static void destroy_cache_(cache);
78 static void destroy_runtime_env(struct runtime_env *);
79 static cache init_cache_(struct configuration *);
80 static struct runtime_env *init_runtime_env(struct configuration *);
81 static void processing_loop(cache, struct runtime_env *,
82 	struct configuration *);
83 static void process_socket_event(struct kevent *, struct runtime_env *,
84 	struct configuration *);
85 static void process_timer_event(struct kevent *, struct runtime_env *,
86 	struct configuration *);
87 static void *processing_thread(void *);
88 static void usage(void) __dead2;
89 
90 void get_time_func(struct timeval *);
91 
92 static void
93 usage(void)
94 {
95 	fprintf(stderr,
96 	    "usage: nscd [-dnst] [-i cachename] [-I cachename]\n");
97 	exit(1);
98 }
99 
100 static cache
101 init_cache_(struct configuration *config)
102 {
103 	struct cache_params params;
104 	cache retval;
105 
106 	struct configuration_entry *config_entry;
107 	size_t	size, i;
108 
109 	TRACE_IN(init_cache_);
110 
111 	memset(&params, 0, sizeof(struct cache_params));
112 	params.get_time_func = get_time_func;
113 	retval = init_cache(&params);
114 
115 	size = configuration_get_entries_size(config);
116 	for (i = 0; i < size; ++i) {
117 		config_entry = configuration_get_entry(config, i);
118 	    	/*
119 	    	 * We should register common entries now - multipart entries
120 	    	 * would be registered automatically during the queries.
121 	    	 */
122 		register_cache_entry(retval, (struct cache_entry_params *)
123 			&config_entry->positive_cache_params);
124 		config_entry->positive_cache_entry = find_cache_entry(retval,
125 			config_entry->positive_cache_params.cep.entry_name);
126 		assert(config_entry->positive_cache_entry !=
127 			INVALID_CACHE_ENTRY);
128 
129 		register_cache_entry(retval, (struct cache_entry_params *)
130 			&config_entry->negative_cache_params);
131 		config_entry->negative_cache_entry = find_cache_entry(retval,
132 			config_entry->negative_cache_params.cep.entry_name);
133 		assert(config_entry->negative_cache_entry !=
134 			INVALID_CACHE_ENTRY);
135 	}
136 
137 	LOG_MSG_2("cache", "cache was successfully initialized");
138 	TRACE_OUT(init_cache_);
139 	return (retval);
140 }
141 
142 static void
143 destroy_cache_(cache the_cache)
144 {
145 	TRACE_IN(destroy_cache_);
146 	destroy_cache(the_cache);
147 	TRACE_OUT(destroy_cache_);
148 }
149 
150 /*
151  * Socket and kqueues are prepared here. We have one global queue for both
152  * socket and timers events.
153  */
154 static struct runtime_env *
155 init_runtime_env(struct configuration *config)
156 {
157 	int serv_addr_len;
158 	struct sockaddr_un serv_addr;
159 
160 	struct kevent eventlist;
161 	struct timespec timeout;
162 
163 	struct runtime_env *retval;
164 
165 	TRACE_IN(init_runtime_env);
166 	retval = calloc(1, sizeof(*retval));
167 	assert(retval != NULL);
168 
169 	retval->sockfd = socket(PF_LOCAL, SOCK_STREAM, 0);
170 
171 	if (config->force_unlink == 1)
172 		unlink(config->socket_path);
173 
174 	memset(&serv_addr, 0, sizeof(struct sockaddr_un));
175 	serv_addr.sun_family = PF_LOCAL;
176 	strlcpy(serv_addr.sun_path, config->socket_path,
177 		sizeof(serv_addr.sun_path));
178 	serv_addr_len = sizeof(serv_addr.sun_family) +
179 		strlen(serv_addr.sun_path) + 1;
180 
181 	if (bind(retval->sockfd, (struct sockaddr *)&serv_addr,
182 		serv_addr_len) == -1) {
183 		close(retval->sockfd);
184 		free(retval);
185 
186 		LOG_ERR_2("runtime environment", "can't bind socket to path: "
187 			"%s", config->socket_path);
188 		TRACE_OUT(init_runtime_env);
189 		return (NULL);
190 	}
191 	LOG_MSG_2("runtime environment", "using socket %s",
192 		config->socket_path);
193 
194 	/*
195 	 * Here we're marking socket as non-blocking and setting its backlog
196 	 * to the maximum value
197 	 */
198 	chmod(config->socket_path, config->socket_mode);
199 	listen(retval->sockfd, -1);
200 	fcntl(retval->sockfd, F_SETFL, O_NONBLOCK);
201 
202 	retval->queue = kqueue();
203 	assert(retval->queue != -1);
204 
205 	EV_SET(&eventlist, retval->sockfd, EVFILT_READ, EV_ADD | EV_ONESHOT,
206 		0, 0, 0);
207 	memset(&timeout, 0, sizeof(struct timespec));
208 	kevent(retval->queue, &eventlist, 1, NULL, 0, &timeout);
209 
210 	LOG_MSG_2("runtime environment", "successfully initialized");
211 	TRACE_OUT(init_runtime_env);
212 	return (retval);
213 }
214 
215 static void
216 destroy_runtime_env(struct runtime_env *env)
217 {
218 	TRACE_IN(destroy_runtime_env);
219 	close(env->queue);
220 	close(env->sockfd);
221 	free(env);
222 	TRACE_OUT(destroy_runtime_env);
223 }
224 
225 static void
226 accept_connection(struct kevent *event_data, struct runtime_env *env,
227 	struct configuration *config)
228 {
229 	struct kevent	eventlist[2];
230 	struct timespec	timeout;
231 	struct query_state	*qstate;
232 
233 	int	fd;
234 	int	res;
235 
236 	uid_t	euid;
237 	gid_t	egid;
238 
239 	TRACE_IN(accept_connection);
240 	fd = accept(event_data->ident, NULL, NULL);
241 	if (fd == -1) {
242 		LOG_ERR_2("accept_connection", "error %d during accept()",
243 		    errno);
244 		TRACE_OUT(accept_connection);
245 		return;
246 	}
247 
248 	if (getpeereid(fd, &euid, &egid) != 0) {
249 		LOG_ERR_2("accept_connection", "error %d during getpeereid()",
250 			errno);
251 		TRACE_OUT(accept_connection);
252 		return;
253 	}
254 
255 	qstate = init_query_state(fd, sizeof(int), euid, egid);
256 	if (qstate == NULL) {
257 		LOG_ERR_2("accept_connection", "can't init query_state");
258 		TRACE_OUT(accept_connection);
259 		return;
260 	}
261 
262 	memset(&timeout, 0, sizeof(struct timespec));
263 	EV_SET(&eventlist[0], fd, EVFILT_TIMER, EV_ADD | EV_ONESHOT,
264 		0, qstate->timeout.tv_sec * 1000, qstate);
265 	EV_SET(&eventlist[1], fd, EVFILT_READ, EV_ADD | EV_ONESHOT,
266 		NOTE_LOWAT, qstate->kevent_watermark, qstate);
267 	res = kevent(env->queue, eventlist, 2, NULL, 0, &timeout);
268 	if (res < 0)
269 		LOG_ERR_2("accept_connection", "kevent error");
270 
271 	TRACE_OUT(accept_connection);
272 }
273 
274 static void
275 process_socket_event(struct kevent *event_data, struct runtime_env *env,
276 	struct configuration *config)
277 {
278 	struct kevent	eventlist[2];
279 	struct timeval	query_timeout;
280 	struct timespec	kevent_timeout;
281 	int	nevents;
282 	int	eof_res, res;
283 	ssize_t	io_res;
284 	struct query_state *qstate;
285 
286 	TRACE_IN(process_socket_event);
287 	eof_res = event_data->flags & EV_EOF ? 1 : 0;
288 	res = 0;
289 
290 	memset(&kevent_timeout, 0, sizeof(struct timespec));
291 	EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER, EV_DELETE,
292 		0, 0, NULL);
293 	nevents = kevent(env->queue, eventlist, 1, NULL, 0, &kevent_timeout);
294 	if (nevents == -1) {
295 		if (errno == ENOENT) {
296 			/* the timer is already handling this event */
297 			TRACE_OUT(process_socket_event);
298 			return;
299 		} else {
300 			/* some other error happened */
301 			LOG_ERR_2("process_socket_event", "kevent error, errno"
302 				" is %d", errno);
303 			TRACE_OUT(process_socket_event);
304 			return;
305 		}
306 	}
307 	qstate = (struct query_state *)event_data->udata;
308 
309 	/*
310 	 * If the buffer that is to be send/received is too large,
311 	 * we send it implicitly, by using query_io_buffer_read and
312 	 * query_io_buffer_write functions in the query_state. These functions
313 	 * use the temporary buffer, which is later send/received in parts.
314 	 * The code below implements buffer splitting/mergind for send/receive
315 	 * operations. It also does the actual socket IO operations.
316 	 */
317 	if (((qstate->use_alternate_io == 0) &&
318 		(qstate->kevent_watermark <= (size_t)event_data->data)) ||
319 		((qstate->use_alternate_io != 0) &&
320 		(qstate->io_buffer_watermark <= (size_t)event_data->data))) {
321 		if (qstate->use_alternate_io != 0) {
322 			switch (qstate->io_buffer_filter) {
323 			case EVFILT_READ:
324 				io_res = query_socket_read(qstate,
325 					qstate->io_buffer_p,
326 					qstate->io_buffer_watermark);
327 				if (io_res < 0) {
328 					qstate->use_alternate_io = 0;
329 					qstate->process_func = NULL;
330 				} else {
331 					qstate->io_buffer_p += io_res;
332 					if (qstate->io_buffer_p ==
333 					    	qstate->io_buffer +
334 						qstate->io_buffer_size) {
335 						qstate->io_buffer_p =
336 						    qstate->io_buffer;
337 						qstate->use_alternate_io = 0;
338 					}
339 				}
340 			break;
341 			default:
342 			break;
343 			}
344 		}
345 
346 		if (qstate->use_alternate_io == 0) {
347 			do {
348 				res = qstate->process_func(qstate);
349 			} while ((qstate->kevent_watermark == 0) &&
350 					(qstate->process_func != NULL) &&
351 					(res == 0));
352 
353 			if (res != 0)
354 				qstate->process_func = NULL;
355 		}
356 
357 		if ((qstate->use_alternate_io != 0) &&
358 			(qstate->io_buffer_filter == EVFILT_WRITE)) {
359 			io_res = query_socket_write(qstate, qstate->io_buffer_p,
360 				qstate->io_buffer_watermark);
361 			if (io_res < 0) {
362 				qstate->use_alternate_io = 0;
363 				qstate->process_func = NULL;
364 			} else
365 				qstate->io_buffer_p += io_res;
366 		}
367 	} else {
368 		/* assuming that socket was closed */
369 		qstate->process_func = NULL;
370 		qstate->use_alternate_io = 0;
371 	}
372 
373 	if (((qstate->process_func == NULL) &&
374 	    	(qstate->use_alternate_io == 0)) ||
375 		(eof_res != 0) || (res != 0)) {
376 		destroy_query_state(qstate);
377 		close(event_data->ident);
378 		TRACE_OUT(process_socket_event);
379 		return;
380 	}
381 
382 	/* updating the query_state lifetime variable */
383 	get_time_func(&query_timeout);
384 	query_timeout.tv_usec = 0;
385 	query_timeout.tv_sec -= qstate->creation_time.tv_sec;
386 	if (query_timeout.tv_sec > qstate->timeout.tv_sec)
387 		query_timeout.tv_sec = 0;
388 	else
389 		query_timeout.tv_sec = qstate->timeout.tv_sec -
390 			query_timeout.tv_sec;
391 
392 	if ((qstate->use_alternate_io != 0) && (qstate->io_buffer_p ==
393 		qstate->io_buffer + qstate->io_buffer_size))
394 		qstate->use_alternate_io = 0;
395 
396 	if (qstate->use_alternate_io == 0) {
397 		/*
398 		 * If we must send/receive the large block of data,
399 		 * we should prepare the query_state's io_XXX fields.
400 		 * We should also substitute its write_func and read_func
401 		 * with the query_io_buffer_write and query_io_buffer_read,
402 		 * which will allow us to implicitly send/receive this large
403 		 * buffer later (in the subsequent calls to the
404 		 * process_socket_event).
405 		 */
406 		if (qstate->kevent_watermark > MAX_SOCKET_IO_SIZE) {
407 #if 0
408 			/*
409 			 * XXX: Uncommenting this code makes nscd(8) fail for
410 			 *      entries larger than a few kB, causing few second
411 			 *      worth of delay for each call to retrieve them.
412 			 */
413 			if (qstate->io_buffer != NULL)
414 				free(qstate->io_buffer);
415 
416 			qstate->io_buffer = calloc(1,
417 				qstate->kevent_watermark);
418 			assert(qstate->io_buffer != NULL);
419 
420 			qstate->io_buffer_p = qstate->io_buffer;
421 			qstate->io_buffer_size = qstate->kevent_watermark;
422 			qstate->io_buffer_filter = qstate->kevent_filter;
423 
424 			qstate->write_func = query_io_buffer_write;
425 			qstate->read_func = query_io_buffer_read;
426 
427 			if (qstate->kevent_filter == EVFILT_READ)
428 				qstate->use_alternate_io = 1;
429 #endif
430 
431 			qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE;
432 			EV_SET(&eventlist[1], event_data->ident,
433 				qstate->kevent_filter, EV_ADD | EV_ONESHOT,
434 				NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate);
435 		} else {
436 			EV_SET(&eventlist[1], event_data->ident,
437 		    		qstate->kevent_filter, EV_ADD | EV_ONESHOT,
438 		    		NOTE_LOWAT, qstate->kevent_watermark, qstate);
439 		}
440 	} else {
441 		if (qstate->io_buffer + qstate->io_buffer_size -
442 		    	qstate->io_buffer_p <
443 			MAX_SOCKET_IO_SIZE) {
444 			qstate->io_buffer_watermark = qstate->io_buffer +
445 				qstate->io_buffer_size - qstate->io_buffer_p;
446 			EV_SET(&eventlist[1], event_data->ident,
447 			    	qstate->io_buffer_filter,
448 				EV_ADD | EV_ONESHOT, NOTE_LOWAT,
449 				qstate->io_buffer_watermark,
450 				qstate);
451 		} else {
452 			qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE;
453 			EV_SET(&eventlist[1], event_data->ident,
454 		    		qstate->io_buffer_filter, EV_ADD | EV_ONESHOT,
455 		    		NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate);
456 		}
457 	}
458 	EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER,
459 		EV_ADD | EV_ONESHOT, 0, query_timeout.tv_sec * 1000, qstate);
460 	kevent(env->queue, eventlist, 2, NULL, 0, &kevent_timeout);
461 
462 	TRACE_OUT(process_socket_event);
463 }
464 
465 /*
466  * This routine is called if timer event has been signaled in the kqueue. It
467  * just closes the socket and destroys the query_state.
468  */
469 static void
470 process_timer_event(struct kevent *event_data, struct runtime_env *env,
471 	struct configuration *config)
472 {
473 	struct query_state	*qstate;
474 
475 	TRACE_IN(process_timer_event);
476 	qstate = (struct query_state *)event_data->udata;
477 	destroy_query_state(qstate);
478 	close(event_data->ident);
479 	TRACE_OUT(process_timer_event);
480 }
481 
482 /*
483  * Processing loop is the basic processing routine, that forms a body of each
484  * procssing thread
485  */
486 static void
487 processing_loop(cache the_cache, struct runtime_env *env,
488 	struct configuration *config)
489 {
490 	struct timespec timeout;
491 	const int eventlist_size = 1;
492 	struct kevent eventlist[eventlist_size];
493 	int nevents, i;
494 
495 	TRACE_MSG("=> processing_loop");
496 	memset(&timeout, 0, sizeof(struct timespec));
497 	memset(&eventlist, 0, sizeof(struct kevent) * eventlist_size);
498 
499 	for (;;) {
500 		nevents = kevent(env->queue, NULL, 0, eventlist,
501 	    		eventlist_size, NULL);
502 		/*
503 		 * we can only receive 1 event on success
504 		 */
505 		if (nevents == 1) {
506 			struct kevent *event_data;
507 			event_data = &eventlist[0];
508 
509 			if ((int)event_data->ident == env->sockfd) {
510 				for (i = 0; i < event_data->data; ++i)
511 				    accept_connection(event_data, env, config);
512 
513 				EV_SET(eventlist, s_runtime_env->sockfd,
514 				    EVFILT_READ, EV_ADD | EV_ONESHOT,
515 				    0, 0, 0);
516 				memset(&timeout, 0,
517 				    sizeof(struct timespec));
518 				kevent(s_runtime_env->queue, eventlist,
519 				    1, NULL, 0, &timeout);
520 
521 			} else {
522 				switch (event_data->filter) {
523 				case EVFILT_READ:
524 				case EVFILT_WRITE:
525 					process_socket_event(event_data,
526 						env, config);
527 					break;
528 				case EVFILT_TIMER:
529 					process_timer_event(event_data,
530 						env, config);
531 					break;
532 				default:
533 					break;
534 				}
535 			}
536 		} else {
537 			/* this branch shouldn't be currently executed */
538 		}
539 	}
540 
541 	TRACE_MSG("<= processing_loop");
542 }
543 
544 /*
545  * Wrapper above the processing loop function. It sets the thread signal mask
546  * to avoid SIGPIPE signals (which can happen if the client works incorrectly).
547  */
548 static void *
549 processing_thread(void *data)
550 {
551 	struct processing_thread_args	*args;
552 	sigset_t new;
553 
554 	TRACE_MSG("=> processing_thread");
555 	args = (struct processing_thread_args *)data;
556 
557 	sigemptyset(&new);
558 	sigaddset(&new, SIGPIPE);
559 	if (pthread_sigmask(SIG_BLOCK, &new, NULL) != 0)
560 		LOG_ERR_1("processing thread",
561 			"thread can't block the SIGPIPE signal");
562 
563 	processing_loop(args->the_cache, args->the_runtime_env,
564 		args->the_configuration);
565 	free(args);
566 	TRACE_MSG("<= processing_thread");
567 
568 	return (NULL);
569 }
570 
571 void
572 get_time_func(struct timeval *time)
573 {
574 	struct timespec res;
575 	memset(&res, 0, sizeof(struct timespec));
576 	clock_gettime(CLOCK_MONOTONIC, &res);
577 
578 	time->tv_sec = res.tv_sec;
579 	time->tv_usec = 0;
580 }
581 
582 /*
583  * The idea of _nss_cache_cycle_prevention_function is that nsdispatch
584  * will search for this symbol in the executable. This symbol is the
585  * attribute of the caching daemon. So, if it exists, nsdispatch won't try
586  * to connect to the caching daemon and will just ignore the 'cache'
587  * source in the nsswitch.conf. This method helps to avoid cycles and
588  * organize self-performing requests.
589  *
590  * (not actually a function; it used to be, but it doesn't make any
591  * difference, as long as it has external linkage)
592  */
593 void *_nss_cache_cycle_prevention_function;
594 
595 int
596 main(int argc, char *argv[])
597 {
598 	struct processing_thread_args *thread_args;
599 	pthread_t *threads;
600 
601 	struct pidfh *pidfile;
602 	pid_t pid;
603 
604 	char const *config_file;
605 	char const *error_str;
606 	int error_line;
607 	int i, res;
608 
609 	int trace_mode_enabled;
610 	int force_single_threaded;
611 	int do_not_daemonize;
612 	int clear_user_cache_entries, clear_all_cache_entries;
613 	char *user_config_entry_name, *global_config_entry_name;
614 	int show_statistics;
615 	int daemon_mode, interactive_mode;
616 
617 
618 	/* by default all debug messages are omitted */
619 	TRACE_OFF();
620 
621 	/* parsing command line arguments */
622 	trace_mode_enabled = 0;
623 	force_single_threaded = 0;
624 	do_not_daemonize = 0;
625 	clear_user_cache_entries = 0;
626 	clear_all_cache_entries = 0;
627 	show_statistics = 0;
628 	user_config_entry_name = NULL;
629 	global_config_entry_name = NULL;
630 	while ((res = getopt(argc, argv, "nstdi:I:")) != -1) {
631 		switch (res) {
632 		case 'n':
633 			do_not_daemonize = 1;
634 			break;
635 		case 's':
636 			force_single_threaded = 1;
637 			break;
638 		case 't':
639 			trace_mode_enabled = 1;
640 			break;
641 		case 'i':
642 			clear_user_cache_entries = 1;
643 			if (optarg != NULL)
644 				if (strcmp(optarg, "all") != 0)
645 					user_config_entry_name = strdup(optarg);
646 			break;
647 		case 'I':
648 			clear_all_cache_entries = 1;
649 			if (optarg != NULL)
650 				if (strcmp(optarg, "all") != 0)
651 					global_config_entry_name =
652 						strdup(optarg);
653 			break;
654 		case 'd':
655 			show_statistics = 1;
656 			break;
657 		case '?':
658 		default:
659 			usage();
660 			/* NOT REACHED */
661 		}
662 	}
663 
664 	daemon_mode = do_not_daemonize | force_single_threaded |
665 		trace_mode_enabled;
666 	interactive_mode = clear_user_cache_entries | clear_all_cache_entries |
667 		show_statistics;
668 
669 	if ((daemon_mode != 0) && (interactive_mode != 0)) {
670 		LOG_ERR_1("main", "daemon mode and interactive_mode arguments "
671 			"can't be used together");
672 		usage();
673 	}
674 
675 	if (interactive_mode != 0) {
676 		FILE *pidfin = fopen(DEFAULT_PIDFILE_PATH, "r");
677 		char pidbuf[256];
678 
679 		struct nscd_connection_params connection_params;
680 		nscd_connection connection;
681 
682 		int result;
683 
684 		if (pidfin == NULL)
685 			errx(EXIT_FAILURE, "There is no daemon running.");
686 
687 		memset(pidbuf, 0, sizeof(pidbuf));
688 		fread(pidbuf, sizeof(pidbuf) - 1, 1, pidfin);
689 		fclose(pidfin);
690 
691 		if (ferror(pidfin) != 0)
692 			errx(EXIT_FAILURE, "Can't read from pidfile.");
693 
694 		if (sscanf(pidbuf, "%d", &pid) != 1)
695 			errx(EXIT_FAILURE, "Invalid pidfile.");
696 		LOG_MSG_1("main", "daemon PID is %d", pid);
697 
698 
699 		memset(&connection_params, 0,
700 			sizeof(struct nscd_connection_params));
701 		connection_params.socket_path = DEFAULT_SOCKET_PATH;
702 		connection = open_nscd_connection__(&connection_params);
703 		if (connection == INVALID_NSCD_CONNECTION)
704 			errx(EXIT_FAILURE, "Can't connect to the daemon.");
705 
706 		if (clear_user_cache_entries != 0) {
707 			result = nscd_transform__(connection,
708 				user_config_entry_name, TT_USER);
709 			if (result != 0)
710 				LOG_MSG_1("main",
711 					"user cache transformation failed");
712 			else
713 				LOG_MSG_1("main",
714 					"user cache_transformation "
715 					"succeeded");
716 		}
717 
718 		if (clear_all_cache_entries != 0) {
719 			if (geteuid() != 0)
720 				errx(EXIT_FAILURE, "Only root can initiate "
721 					"global cache transformation.");
722 
723 			result = nscd_transform__(connection,
724 				global_config_entry_name, TT_ALL);
725 			if (result != 0)
726 				LOG_MSG_1("main",
727 					"global cache transformation "
728 					"failed");
729 			else
730 				LOG_MSG_1("main",
731 					"global cache transformation "
732 					"succeeded");
733 		}
734 
735 		close_nscd_connection__(connection);
736 
737 		free(user_config_entry_name);
738 		free(global_config_entry_name);
739 		return (EXIT_SUCCESS);
740 	}
741 
742 	pidfile = pidfile_open(DEFAULT_PIDFILE_PATH, 0644, &pid);
743 	if (pidfile == NULL) {
744 		if (errno == EEXIST)
745 			errx(EXIT_FAILURE, "Daemon already running, pid: %d.",
746 				pid);
747 		warn("Cannot open or create pidfile");
748 	}
749 
750 	if (trace_mode_enabled == 1)
751 		TRACE_ON();
752 
753 	/* blocking the main thread from receiving SIGPIPE signal */
754 	sigblock(sigmask(SIGPIPE));
755 
756 	/* daemonization */
757 	if (do_not_daemonize == 0) {
758 		res = daemon(0, trace_mode_enabled == 0 ? 0 : 1);
759 		if (res != 0) {
760 			LOG_ERR_1("main", "can't daemonize myself: %s",
761 		    		strerror(errno));
762 			pidfile_remove(pidfile);
763 			goto fin;
764 		} else
765 			LOG_MSG_1("main", "successfully daemonized");
766 	}
767 
768 	pidfile_write(pidfile);
769 
770 	s_agent_table = init_agent_table();
771 	register_agent(s_agent_table, init_passwd_agent());
772 	register_agent(s_agent_table, init_passwd_mp_agent());
773 	register_agent(s_agent_table, init_group_agent());
774 	register_agent(s_agent_table, init_group_mp_agent());
775 	register_agent(s_agent_table, init_services_agent());
776 	register_agent(s_agent_table, init_services_mp_agent());
777 	LOG_MSG_1("main", "request agents registered successfully");
778 
779 	/*
780  	 * Hosts agent can't work properly until we have access to the
781 	 * appropriate dtab structures, which are used in nsdispatch
782 	 * calls
783 	 *
784 	 register_agent(s_agent_table, init_hosts_agent());
785 	*/
786 
787 	/* configuration initialization */
788 	s_configuration = init_configuration();
789 	fill_configuration_defaults(s_configuration);
790 
791 	error_str = NULL;
792 	error_line = 0;
793 	config_file = CONFIG_PATH;
794 
795 	res = parse_config_file(s_configuration, config_file, &error_str,
796 		&error_line);
797 	if ((res != 0) && (error_str == NULL)) {
798 		config_file = DEFAULT_CONFIG_PATH;
799 		res = parse_config_file(s_configuration, config_file,
800 			&error_str, &error_line);
801 	}
802 
803 	if (res != 0) {
804 		if (error_str != NULL) {
805 		LOG_ERR_1("main", "error in configuration file(%s, %d): %s\n",
806 			config_file, error_line, error_str);
807 		} else {
808 		LOG_ERR_1("main", "no configuration file found "
809 		    	"- was looking for %s and %s",
810 			CONFIG_PATH, DEFAULT_CONFIG_PATH);
811 		}
812 		destroy_configuration(s_configuration);
813 		return (-1);
814 	}
815 
816 	if (force_single_threaded == 1)
817 		s_configuration->threads_num = 1;
818 
819 	/* cache initialization */
820 	s_cache = init_cache_(s_configuration);
821 	if (s_cache == NULL) {
822 		LOG_ERR_1("main", "can't initialize the cache");
823 		destroy_configuration(s_configuration);
824 		return (-1);
825 	}
826 
827 	/* runtime environment initialization */
828 	s_runtime_env = init_runtime_env(s_configuration);
829 	if (s_runtime_env == NULL) {
830 		LOG_ERR_1("main", "can't initialize the runtime environment");
831 		destroy_configuration(s_configuration);
832 		destroy_cache_(s_cache);
833 		return (-1);
834 	}
835 
836 	if (s_configuration->threads_num > 1) {
837 		threads = calloc(s_configuration->threads_num,
838 			sizeof(*threads));
839 		for (i = 0; i < s_configuration->threads_num; ++i) {
840 			thread_args = malloc(
841 				sizeof(*thread_args));
842 			thread_args->the_cache = s_cache;
843 			thread_args->the_runtime_env = s_runtime_env;
844 			thread_args->the_configuration = s_configuration;
845 
846 			LOG_MSG_1("main", "thread #%d was successfully created",
847 				i);
848 			pthread_create(&threads[i], NULL, processing_thread,
849 				thread_args);
850 
851 			thread_args = NULL;
852 		}
853 
854 		for (i = 0; i < s_configuration->threads_num; ++i)
855 			pthread_join(threads[i], NULL);
856 	} else {
857 		LOG_MSG_1("main", "working in single-threaded mode");
858 		processing_loop(s_cache, s_runtime_env, s_configuration);
859 	}
860 
861 fin:
862 	/* runtime environment destruction */
863 	destroy_runtime_env(s_runtime_env);
864 
865 	/* cache destruction */
866 	destroy_cache_(s_cache);
867 
868 	/* configuration destruction */
869 	destroy_configuration(s_configuration);
870 
871 	/* agents table destruction */
872 	destroy_agent_table(s_agent_table);
873 
874 	pidfile_remove(pidfile);
875 	return (EXIT_SUCCESS);
876 }
877