xref: /freebsd/usr.sbin/nscd/nscd.c (revision d0b2dbfa0ecf2bbc9709efc5e20baf8e4b44bbbf)
1 /*-
2  * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in thereg
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include <sys/cdefs.h>
29 #include <sys/param.h>
30 #include <sys/event.h>
31 #include <sys/socket.h>
32 #include <sys/stat.h>
33 #include <sys/time.h>
34 #include <sys/un.h>
35 
36 #include <assert.h>
37 #include <err.h>
38 #include <errno.h>
39 #include <fcntl.h>
40 #include <libutil.h>
41 #include <pthread.h>
42 #include <signal.h>
43 #include <stdio.h>
44 #include <stdlib.h>
45 #include <string.h>
46 #include <unistd.h>
47 
48 #include "agents/passwd.h"
49 #include "agents/group.h"
50 #include "agents/services.h"
51 #include "cachelib.h"
52 #include "config.h"
53 #include "debug.h"
54 #include "log.h"
55 #include "nscdcli.h"
56 #include "parser.h"
57 #include "query.h"
58 #include "singletons.h"
59 
60 #ifndef CONFIG_PATH
61 #define CONFIG_PATH "/etc/nscd.conf"
62 #endif
63 #define DEFAULT_CONFIG_PATH	"nscd.conf"
64 
65 #define MAX_SOCKET_IO_SIZE	4096
66 
67 struct processing_thread_args {
68 	cache	the_cache;
69 	struct configuration	*the_configuration;
70 	struct runtime_env		*the_runtime_env;
71 };
72 
73 static void accept_connection(struct kevent *, struct runtime_env *,
74 	struct configuration *);
75 static void destroy_cache_(cache);
76 static void destroy_runtime_env(struct runtime_env *);
77 static cache init_cache_(struct configuration *);
78 static struct runtime_env *init_runtime_env(struct configuration *);
79 static void processing_loop(cache, struct runtime_env *,
80 	struct configuration *);
81 static void process_socket_event(struct kevent *, struct runtime_env *,
82 	struct configuration *);
83 static void process_timer_event(struct kevent *, struct runtime_env *,
84 	struct configuration *);
85 static void *processing_thread(void *);
86 static void usage(void) __dead2;
87 
88 void get_time_func(struct timeval *);
89 
90 static void
91 usage(void)
92 {
93 	fprintf(stderr,
94 	    "usage: nscd [-dnst] [-i cachename] [-I cachename]\n");
95 	exit(1);
96 }
97 
98 static cache
99 init_cache_(struct configuration *config)
100 {
101 	struct cache_params params;
102 	cache retval;
103 
104 	struct configuration_entry *config_entry;
105 	size_t	size, i;
106 
107 	TRACE_IN(init_cache_);
108 
109 	memset(&params, 0, sizeof(struct cache_params));
110 	params.get_time_func = get_time_func;
111 	retval = init_cache(&params);
112 
113 	size = configuration_get_entries_size(config);
114 	for (i = 0; i < size; ++i) {
115 		config_entry = configuration_get_entry(config, i);
116 	    	/*
117 	    	 * We should register common entries now - multipart entries
118 	    	 * would be registered automatically during the queries.
119 	    	 */
120 		register_cache_entry(retval, (struct cache_entry_params *)
121 			&config_entry->positive_cache_params);
122 		config_entry->positive_cache_entry = find_cache_entry(retval,
123 			config_entry->positive_cache_params.cep.entry_name);
124 		assert(config_entry->positive_cache_entry !=
125 			INVALID_CACHE_ENTRY);
126 
127 		register_cache_entry(retval, (struct cache_entry_params *)
128 			&config_entry->negative_cache_params);
129 		config_entry->negative_cache_entry = find_cache_entry(retval,
130 			config_entry->negative_cache_params.cep.entry_name);
131 		assert(config_entry->negative_cache_entry !=
132 			INVALID_CACHE_ENTRY);
133 	}
134 
135 	LOG_MSG_2("cache", "cache was successfully initialized");
136 	TRACE_OUT(init_cache_);
137 	return (retval);
138 }
139 
140 static void
141 destroy_cache_(cache the_cache)
142 {
143 	TRACE_IN(destroy_cache_);
144 	destroy_cache(the_cache);
145 	TRACE_OUT(destroy_cache_);
146 }
147 
148 /*
149  * Socket and kqueues are prepared here. We have one global queue for both
150  * socket and timers events.
151  */
152 static struct runtime_env *
153 init_runtime_env(struct configuration *config)
154 {
155 	int serv_addr_len;
156 	struct sockaddr_un serv_addr;
157 
158 	struct kevent eventlist;
159 	struct timespec timeout;
160 
161 	struct runtime_env *retval;
162 
163 	TRACE_IN(init_runtime_env);
164 	retval = calloc(1, sizeof(*retval));
165 	assert(retval != NULL);
166 
167 	retval->sockfd = socket(PF_LOCAL, SOCK_STREAM, 0);
168 
169 	if (config->force_unlink == 1)
170 		unlink(config->socket_path);
171 
172 	memset(&serv_addr, 0, sizeof(struct sockaddr_un));
173 	serv_addr.sun_family = PF_LOCAL;
174 	strlcpy(serv_addr.sun_path, config->socket_path,
175 		sizeof(serv_addr.sun_path));
176 	serv_addr_len = sizeof(serv_addr.sun_family) +
177 		strlen(serv_addr.sun_path) + 1;
178 
179 	if (bind(retval->sockfd, (struct sockaddr *)&serv_addr,
180 		serv_addr_len) == -1) {
181 		close(retval->sockfd);
182 		free(retval);
183 
184 		LOG_ERR_2("runtime environment", "can't bind socket to path: "
185 			"%s", config->socket_path);
186 		TRACE_OUT(init_runtime_env);
187 		return (NULL);
188 	}
189 	LOG_MSG_2("runtime environment", "using socket %s",
190 		config->socket_path);
191 
192 	/*
193 	 * Here we're marking socket as non-blocking and setting its backlog
194 	 * to the maximum value
195 	 */
196 	chmod(config->socket_path, config->socket_mode);
197 	listen(retval->sockfd, -1);
198 	fcntl(retval->sockfd, F_SETFL, O_NONBLOCK);
199 
200 	retval->queue = kqueue();
201 	assert(retval->queue != -1);
202 
203 	EV_SET(&eventlist, retval->sockfd, EVFILT_READ, EV_ADD | EV_ONESHOT,
204 		0, 0, 0);
205 	memset(&timeout, 0, sizeof(struct timespec));
206 	kevent(retval->queue, &eventlist, 1, NULL, 0, &timeout);
207 
208 	LOG_MSG_2("runtime environment", "successfully initialized");
209 	TRACE_OUT(init_runtime_env);
210 	return (retval);
211 }
212 
213 static void
214 destroy_runtime_env(struct runtime_env *env)
215 {
216 	TRACE_IN(destroy_runtime_env);
217 	close(env->queue);
218 	close(env->sockfd);
219 	free(env);
220 	TRACE_OUT(destroy_runtime_env);
221 }
222 
223 static void
224 accept_connection(struct kevent *event_data, struct runtime_env *env,
225 	struct configuration *config)
226 {
227 	struct kevent	eventlist[2];
228 	struct timespec	timeout;
229 	struct query_state	*qstate;
230 
231 	int	fd;
232 	int	res;
233 
234 	uid_t	euid;
235 	gid_t	egid;
236 
237 	TRACE_IN(accept_connection);
238 	fd = accept(event_data->ident, NULL, NULL);
239 	if (fd == -1) {
240 		LOG_ERR_2("accept_connection", "error %d during accept()",
241 		    errno);
242 		TRACE_OUT(accept_connection);
243 		return;
244 	}
245 
246 	if (getpeereid(fd, &euid, &egid) != 0) {
247 		LOG_ERR_2("accept_connection", "error %d during getpeereid()",
248 			errno);
249 		TRACE_OUT(accept_connection);
250 		return;
251 	}
252 
253 	qstate = init_query_state(fd, sizeof(int), euid, egid);
254 	if (qstate == NULL) {
255 		LOG_ERR_2("accept_connection", "can't init query_state");
256 		TRACE_OUT(accept_connection);
257 		return;
258 	}
259 
260 	memset(&timeout, 0, sizeof(struct timespec));
261 	EV_SET(&eventlist[0], fd, EVFILT_TIMER, EV_ADD | EV_ONESHOT,
262 		0, qstate->timeout.tv_sec * 1000, qstate);
263 	EV_SET(&eventlist[1], fd, EVFILT_READ, EV_ADD | EV_ONESHOT,
264 		NOTE_LOWAT, qstate->kevent_watermark, qstate);
265 	res = kevent(env->queue, eventlist, 2, NULL, 0, &timeout);
266 	if (res < 0)
267 		LOG_ERR_2("accept_connection", "kevent error");
268 
269 	TRACE_OUT(accept_connection);
270 }
271 
272 static void
273 process_socket_event(struct kevent *event_data, struct runtime_env *env,
274 	struct configuration *config)
275 {
276 	struct kevent	eventlist[2];
277 	struct timeval	query_timeout;
278 	struct timespec	kevent_timeout;
279 	int	nevents;
280 	int	eof_res, res;
281 	ssize_t	io_res;
282 	struct query_state *qstate;
283 
284 	TRACE_IN(process_socket_event);
285 	eof_res = event_data->flags & EV_EOF ? 1 : 0;
286 	res = 0;
287 
288 	memset(&kevent_timeout, 0, sizeof(struct timespec));
289 	EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER, EV_DELETE,
290 		0, 0, NULL);
291 	nevents = kevent(env->queue, eventlist, 1, NULL, 0, &kevent_timeout);
292 	if (nevents == -1) {
293 		if (errno == ENOENT) {
294 			/* the timer is already handling this event */
295 			TRACE_OUT(process_socket_event);
296 			return;
297 		} else {
298 			/* some other error happened */
299 			LOG_ERR_2("process_socket_event", "kevent error, errno"
300 				" is %d", errno);
301 			TRACE_OUT(process_socket_event);
302 			return;
303 		}
304 	}
305 	qstate = (struct query_state *)event_data->udata;
306 
307 	/*
308 	 * If the buffer that is to be send/received is too large,
309 	 * we send it implicitly, by using query_io_buffer_read and
310 	 * query_io_buffer_write functions in the query_state. These functions
311 	 * use the temporary buffer, which is later send/received in parts.
312 	 * The code below implements buffer splitting/mergind for send/receive
313 	 * operations. It also does the actual socket IO operations.
314 	 */
315 	if (((qstate->use_alternate_io == 0) &&
316 		(qstate->kevent_watermark <= (size_t)event_data->data)) ||
317 		((qstate->use_alternate_io != 0) &&
318 		(qstate->io_buffer_watermark <= (size_t)event_data->data))) {
319 		if (qstate->use_alternate_io != 0) {
320 			switch (qstate->io_buffer_filter) {
321 			case EVFILT_READ:
322 				io_res = query_socket_read(qstate,
323 					qstate->io_buffer_p,
324 					qstate->io_buffer_watermark);
325 				if (io_res < 0) {
326 					qstate->use_alternate_io = 0;
327 					qstate->process_func = NULL;
328 				} else {
329 					qstate->io_buffer_p += io_res;
330 					if (qstate->io_buffer_p ==
331 					    	qstate->io_buffer +
332 						qstate->io_buffer_size) {
333 						qstate->io_buffer_p =
334 						    qstate->io_buffer;
335 						qstate->use_alternate_io = 0;
336 					}
337 				}
338 			break;
339 			default:
340 			break;
341 			}
342 		}
343 
344 		if (qstate->use_alternate_io == 0) {
345 			do {
346 				res = qstate->process_func(qstate);
347 			} while ((qstate->kevent_watermark == 0) &&
348 					(qstate->process_func != NULL) &&
349 					(res == 0));
350 
351 			if (res != 0)
352 				qstate->process_func = NULL;
353 		}
354 
355 		if ((qstate->use_alternate_io != 0) &&
356 			(qstate->io_buffer_filter == EVFILT_WRITE)) {
357 			io_res = query_socket_write(qstate, qstate->io_buffer_p,
358 				qstate->io_buffer_watermark);
359 			if (io_res < 0) {
360 				qstate->use_alternate_io = 0;
361 				qstate->process_func = NULL;
362 			} else
363 				qstate->io_buffer_p += io_res;
364 		}
365 	} else {
366 		/* assuming that socket was closed */
367 		qstate->process_func = NULL;
368 		qstate->use_alternate_io = 0;
369 	}
370 
371 	if (((qstate->process_func == NULL) &&
372 	    	(qstate->use_alternate_io == 0)) ||
373 		(eof_res != 0) || (res != 0)) {
374 		destroy_query_state(qstate);
375 		close(event_data->ident);
376 		TRACE_OUT(process_socket_event);
377 		return;
378 	}
379 
380 	/* updating the query_state lifetime variable */
381 	get_time_func(&query_timeout);
382 	query_timeout.tv_usec = 0;
383 	query_timeout.tv_sec -= qstate->creation_time.tv_sec;
384 	if (query_timeout.tv_sec > qstate->timeout.tv_sec)
385 		query_timeout.tv_sec = 0;
386 	else
387 		query_timeout.tv_sec = qstate->timeout.tv_sec -
388 			query_timeout.tv_sec;
389 
390 	if ((qstate->use_alternate_io != 0) && (qstate->io_buffer_p ==
391 		qstate->io_buffer + qstate->io_buffer_size))
392 		qstate->use_alternate_io = 0;
393 
394 	if (qstate->use_alternate_io == 0) {
395 		/*
396 		 * If we must send/receive the large block of data,
397 		 * we should prepare the query_state's io_XXX fields.
398 		 * We should also substitute its write_func and read_func
399 		 * with the query_io_buffer_write and query_io_buffer_read,
400 		 * which will allow us to implicitly send/receive this large
401 		 * buffer later (in the subsequent calls to the
402 		 * process_socket_event).
403 		 */
404 		if (qstate->kevent_watermark > MAX_SOCKET_IO_SIZE) {
405 #if 0
406 			/*
407 			 * XXX: Uncommenting this code makes nscd(8) fail for
408 			 *      entries larger than a few kB, causing few second
409 			 *      worth of delay for each call to retrieve them.
410 			 */
411 			if (qstate->io_buffer != NULL)
412 				free(qstate->io_buffer);
413 
414 			qstate->io_buffer = calloc(1,
415 				qstate->kevent_watermark);
416 			assert(qstate->io_buffer != NULL);
417 
418 			qstate->io_buffer_p = qstate->io_buffer;
419 			qstate->io_buffer_size = qstate->kevent_watermark;
420 			qstate->io_buffer_filter = qstate->kevent_filter;
421 
422 			qstate->write_func = query_io_buffer_write;
423 			qstate->read_func = query_io_buffer_read;
424 
425 			if (qstate->kevent_filter == EVFILT_READ)
426 				qstate->use_alternate_io = 1;
427 #endif
428 
429 			qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE;
430 			EV_SET(&eventlist[1], event_data->ident,
431 				qstate->kevent_filter, EV_ADD | EV_ONESHOT,
432 				NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate);
433 		} else {
434 			EV_SET(&eventlist[1], event_data->ident,
435 		    		qstate->kevent_filter, EV_ADD | EV_ONESHOT,
436 		    		NOTE_LOWAT, qstate->kevent_watermark, qstate);
437 		}
438 	} else {
439 		if (qstate->io_buffer + qstate->io_buffer_size -
440 		    	qstate->io_buffer_p <
441 			MAX_SOCKET_IO_SIZE) {
442 			qstate->io_buffer_watermark = qstate->io_buffer +
443 				qstate->io_buffer_size - qstate->io_buffer_p;
444 			EV_SET(&eventlist[1], event_data->ident,
445 			    	qstate->io_buffer_filter,
446 				EV_ADD | EV_ONESHOT, NOTE_LOWAT,
447 				qstate->io_buffer_watermark,
448 				qstate);
449 		} else {
450 			qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE;
451 			EV_SET(&eventlist[1], event_data->ident,
452 		    		qstate->io_buffer_filter, EV_ADD | EV_ONESHOT,
453 		    		NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate);
454 		}
455 	}
456 	EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER,
457 		EV_ADD | EV_ONESHOT, 0, query_timeout.tv_sec * 1000, qstate);
458 	kevent(env->queue, eventlist, 2, NULL, 0, &kevent_timeout);
459 
460 	TRACE_OUT(process_socket_event);
461 }
462 
463 /*
464  * This routine is called if timer event has been signaled in the kqueue. It
465  * just closes the socket and destroys the query_state.
466  */
467 static void
468 process_timer_event(struct kevent *event_data, struct runtime_env *env,
469 	struct configuration *config)
470 {
471 	struct query_state	*qstate;
472 
473 	TRACE_IN(process_timer_event);
474 	qstate = (struct query_state *)event_data->udata;
475 	destroy_query_state(qstate);
476 	close(event_data->ident);
477 	TRACE_OUT(process_timer_event);
478 }
479 
480 /*
481  * Processing loop is the basic processing routine, that forms a body of each
482  * procssing thread
483  */
484 static void
485 processing_loop(cache the_cache, struct runtime_env *env,
486 	struct configuration *config)
487 {
488 	struct timespec timeout;
489 	const int eventlist_size = 1;
490 	struct kevent eventlist[eventlist_size];
491 	int nevents, i;
492 
493 	TRACE_MSG("=> processing_loop");
494 	memset(&timeout, 0, sizeof(struct timespec));
495 	memset(&eventlist, 0, sizeof(struct kevent) * eventlist_size);
496 
497 	for (;;) {
498 		nevents = kevent(env->queue, NULL, 0, eventlist,
499 	    		eventlist_size, NULL);
500 		/*
501 		 * we can only receive 1 event on success
502 		 */
503 		if (nevents == 1) {
504 			struct kevent *event_data;
505 			event_data = &eventlist[0];
506 
507 			if ((int)event_data->ident == env->sockfd) {
508 				for (i = 0; i < event_data->data; ++i)
509 				    accept_connection(event_data, env, config);
510 
511 				EV_SET(eventlist, s_runtime_env->sockfd,
512 				    EVFILT_READ, EV_ADD | EV_ONESHOT,
513 				    0, 0, 0);
514 				memset(&timeout, 0,
515 				    sizeof(struct timespec));
516 				kevent(s_runtime_env->queue, eventlist,
517 				    1, NULL, 0, &timeout);
518 
519 			} else {
520 				switch (event_data->filter) {
521 				case EVFILT_READ:
522 				case EVFILT_WRITE:
523 					process_socket_event(event_data,
524 						env, config);
525 					break;
526 				case EVFILT_TIMER:
527 					process_timer_event(event_data,
528 						env, config);
529 					break;
530 				default:
531 					break;
532 				}
533 			}
534 		} else {
535 			/* this branch shouldn't be currently executed */
536 		}
537 	}
538 
539 	TRACE_MSG("<= processing_loop");
540 }
541 
542 /*
543  * Wrapper above the processing loop function. It sets the thread signal mask
544  * to avoid SIGPIPE signals (which can happen if the client works incorrectly).
545  */
546 static void *
547 processing_thread(void *data)
548 {
549 	struct processing_thread_args	*args;
550 	sigset_t new;
551 
552 	TRACE_MSG("=> processing_thread");
553 	args = (struct processing_thread_args *)data;
554 
555 	sigemptyset(&new);
556 	sigaddset(&new, SIGPIPE);
557 	if (pthread_sigmask(SIG_BLOCK, &new, NULL) != 0)
558 		LOG_ERR_1("processing thread",
559 			"thread can't block the SIGPIPE signal");
560 
561 	processing_loop(args->the_cache, args->the_runtime_env,
562 		args->the_configuration);
563 	free(args);
564 	TRACE_MSG("<= processing_thread");
565 
566 	return (NULL);
567 }
568 
569 void
570 get_time_func(struct timeval *time)
571 {
572 	struct timespec res;
573 	memset(&res, 0, sizeof(struct timespec));
574 	clock_gettime(CLOCK_MONOTONIC, &res);
575 
576 	time->tv_sec = res.tv_sec;
577 	time->tv_usec = 0;
578 }
579 
580 /*
581  * The idea of _nss_cache_cycle_prevention_function is that nsdispatch
582  * will search for this symbol in the executable. This symbol is the
583  * attribute of the caching daemon. So, if it exists, nsdispatch won't try
584  * to connect to the caching daemon and will just ignore the 'cache'
585  * source in the nsswitch.conf. This method helps to avoid cycles and
586  * organize self-performing requests.
587  *
588  * (not actually a function; it used to be, but it doesn't make any
589  * difference, as long as it has external linkage)
590  */
591 void *_nss_cache_cycle_prevention_function;
592 
593 int
594 main(int argc, char *argv[])
595 {
596 	struct processing_thread_args *thread_args;
597 	pthread_t *threads;
598 
599 	struct pidfh *pidfile;
600 	pid_t pid;
601 
602 	char const *config_file;
603 	char const *error_str;
604 	int error_line;
605 	int i, res;
606 
607 	int trace_mode_enabled;
608 	int force_single_threaded;
609 	int do_not_daemonize;
610 	int clear_user_cache_entries, clear_all_cache_entries;
611 	char *user_config_entry_name, *global_config_entry_name;
612 	int show_statistics;
613 	int daemon_mode, interactive_mode;
614 
615 
616 	/* by default all debug messages are omitted */
617 	TRACE_OFF();
618 
619 	/* parsing command line arguments */
620 	trace_mode_enabled = 0;
621 	force_single_threaded = 0;
622 	do_not_daemonize = 0;
623 	clear_user_cache_entries = 0;
624 	clear_all_cache_entries = 0;
625 	show_statistics = 0;
626 	user_config_entry_name = NULL;
627 	global_config_entry_name = NULL;
628 	while ((res = getopt(argc, argv, "nstdi:I:")) != -1) {
629 		switch (res) {
630 		case 'n':
631 			do_not_daemonize = 1;
632 			break;
633 		case 's':
634 			force_single_threaded = 1;
635 			break;
636 		case 't':
637 			trace_mode_enabled = 1;
638 			break;
639 		case 'i':
640 			clear_user_cache_entries = 1;
641 			if (optarg != NULL)
642 				if (strcmp(optarg, "all") != 0)
643 					user_config_entry_name = strdup(optarg);
644 			break;
645 		case 'I':
646 			clear_all_cache_entries = 1;
647 			if (optarg != NULL)
648 				if (strcmp(optarg, "all") != 0)
649 					global_config_entry_name =
650 						strdup(optarg);
651 			break;
652 		case 'd':
653 			show_statistics = 1;
654 			break;
655 		case '?':
656 		default:
657 			usage();
658 			/* NOT REACHED */
659 		}
660 	}
661 
662 	daemon_mode = do_not_daemonize | force_single_threaded |
663 		trace_mode_enabled;
664 	interactive_mode = clear_user_cache_entries | clear_all_cache_entries |
665 		show_statistics;
666 
667 	if ((daemon_mode != 0) && (interactive_mode != 0)) {
668 		LOG_ERR_1("main", "daemon mode and interactive_mode arguments "
669 			"can't be used together");
670 		usage();
671 	}
672 
673 	if (interactive_mode != 0) {
674 		FILE *pidfin = fopen(DEFAULT_PIDFILE_PATH, "r");
675 		char pidbuf[256];
676 
677 		struct nscd_connection_params connection_params;
678 		nscd_connection connection;
679 
680 		int result;
681 
682 		if (pidfin == NULL)
683 			errx(EXIT_FAILURE, "There is no daemon running.");
684 
685 		memset(pidbuf, 0, sizeof(pidbuf));
686 		fread(pidbuf, sizeof(pidbuf) - 1, 1, pidfin);
687 		fclose(pidfin);
688 
689 		if (ferror(pidfin) != 0)
690 			errx(EXIT_FAILURE, "Can't read from pidfile.");
691 
692 		if (sscanf(pidbuf, "%d", &pid) != 1)
693 			errx(EXIT_FAILURE, "Invalid pidfile.");
694 		LOG_MSG_1("main", "daemon PID is %d", pid);
695 
696 
697 		memset(&connection_params, 0,
698 			sizeof(struct nscd_connection_params));
699 		connection_params.socket_path = DEFAULT_SOCKET_PATH;
700 		connection = open_nscd_connection__(&connection_params);
701 		if (connection == INVALID_NSCD_CONNECTION)
702 			errx(EXIT_FAILURE, "Can't connect to the daemon.");
703 
704 		if (clear_user_cache_entries != 0) {
705 			result = nscd_transform__(connection,
706 				user_config_entry_name, TT_USER);
707 			if (result != 0)
708 				LOG_MSG_1("main",
709 					"user cache transformation failed");
710 			else
711 				LOG_MSG_1("main",
712 					"user cache_transformation "
713 					"succeeded");
714 		}
715 
716 		if (clear_all_cache_entries != 0) {
717 			if (geteuid() != 0)
718 				errx(EXIT_FAILURE, "Only root can initiate "
719 					"global cache transformation.");
720 
721 			result = nscd_transform__(connection,
722 				global_config_entry_name, TT_ALL);
723 			if (result != 0)
724 				LOG_MSG_1("main",
725 					"global cache transformation "
726 					"failed");
727 			else
728 				LOG_MSG_1("main",
729 					"global cache transformation "
730 					"succeeded");
731 		}
732 
733 		close_nscd_connection__(connection);
734 
735 		free(user_config_entry_name);
736 		free(global_config_entry_name);
737 		return (EXIT_SUCCESS);
738 	}
739 
740 	pidfile = pidfile_open(DEFAULT_PIDFILE_PATH, 0644, &pid);
741 	if (pidfile == NULL) {
742 		if (errno == EEXIST)
743 			errx(EXIT_FAILURE, "Daemon already running, pid: %d.",
744 				pid);
745 		warn("Cannot open or create pidfile");
746 	}
747 
748 	if (trace_mode_enabled == 1)
749 		TRACE_ON();
750 
751 	/* blocking the main thread from receiving SIGPIPE signal */
752 	sigblock(sigmask(SIGPIPE));
753 
754 	/* daemonization */
755 	if (do_not_daemonize == 0) {
756 		res = daemon(0, trace_mode_enabled == 0 ? 0 : 1);
757 		if (res != 0) {
758 			LOG_ERR_1("main", "can't daemonize myself: %s",
759 		    		strerror(errno));
760 			pidfile_remove(pidfile);
761 			goto fin;
762 		} else
763 			LOG_MSG_1("main", "successfully daemonized");
764 	}
765 
766 	pidfile_write(pidfile);
767 
768 	s_agent_table = init_agent_table();
769 	register_agent(s_agent_table, init_passwd_agent());
770 	register_agent(s_agent_table, init_passwd_mp_agent());
771 	register_agent(s_agent_table, init_group_agent());
772 	register_agent(s_agent_table, init_group_mp_agent());
773 	register_agent(s_agent_table, init_services_agent());
774 	register_agent(s_agent_table, init_services_mp_agent());
775 	LOG_MSG_1("main", "request agents registered successfully");
776 
777 	/*
778  	 * Hosts agent can't work properly until we have access to the
779 	 * appropriate dtab structures, which are used in nsdispatch
780 	 * calls
781 	 *
782 	 register_agent(s_agent_table, init_hosts_agent());
783 	*/
784 
785 	/* configuration initialization */
786 	s_configuration = init_configuration();
787 	fill_configuration_defaults(s_configuration);
788 
789 	error_str = NULL;
790 	error_line = 0;
791 	config_file = CONFIG_PATH;
792 
793 	res = parse_config_file(s_configuration, config_file, &error_str,
794 		&error_line);
795 	if ((res != 0) && (error_str == NULL)) {
796 		config_file = DEFAULT_CONFIG_PATH;
797 		res = parse_config_file(s_configuration, config_file,
798 			&error_str, &error_line);
799 	}
800 
801 	if (res != 0) {
802 		if (error_str != NULL) {
803 		LOG_ERR_1("main", "error in configuration file(%s, %d): %s\n",
804 			config_file, error_line, error_str);
805 		} else {
806 		LOG_ERR_1("main", "no configuration file found "
807 		    	"- was looking for %s and %s",
808 			CONFIG_PATH, DEFAULT_CONFIG_PATH);
809 		}
810 		destroy_configuration(s_configuration);
811 		return (-1);
812 	}
813 
814 	if (force_single_threaded == 1)
815 		s_configuration->threads_num = 1;
816 
817 	/* cache initialization */
818 	s_cache = init_cache_(s_configuration);
819 	if (s_cache == NULL) {
820 		LOG_ERR_1("main", "can't initialize the cache");
821 		destroy_configuration(s_configuration);
822 		return (-1);
823 	}
824 
825 	/* runtime environment initialization */
826 	s_runtime_env = init_runtime_env(s_configuration);
827 	if (s_runtime_env == NULL) {
828 		LOG_ERR_1("main", "can't initialize the runtime environment");
829 		destroy_configuration(s_configuration);
830 		destroy_cache_(s_cache);
831 		return (-1);
832 	}
833 
834 	if (s_configuration->threads_num > 1) {
835 		threads = calloc(s_configuration->threads_num,
836 			sizeof(*threads));
837 		for (i = 0; i < s_configuration->threads_num; ++i) {
838 			thread_args = malloc(
839 				sizeof(*thread_args));
840 			thread_args->the_cache = s_cache;
841 			thread_args->the_runtime_env = s_runtime_env;
842 			thread_args->the_configuration = s_configuration;
843 
844 			LOG_MSG_1("main", "thread #%d was successfully created",
845 				i);
846 			pthread_create(&threads[i], NULL, processing_thread,
847 				thread_args);
848 
849 			thread_args = NULL;
850 		}
851 
852 		for (i = 0; i < s_configuration->threads_num; ++i)
853 			pthread_join(threads[i], NULL);
854 	} else {
855 		LOG_MSG_1("main", "working in single-threaded mode");
856 		processing_loop(s_cache, s_runtime_env, s_configuration);
857 	}
858 
859 fin:
860 	/* runtime environment destruction */
861 	destroy_runtime_env(s_runtime_env);
862 
863 	/* cache destruction */
864 	destroy_cache_(s_cache);
865 
866 	/* configuration destruction */
867 	destroy_configuration(s_configuration);
868 
869 	/* agents table destruction */
870 	destroy_agent_table(s_agent_table);
871 
872 	pidfile_remove(pidfile);
873 	return (EXIT_SUCCESS);
874 }
875