xref: /freebsd/usr.sbin/nscd/query.c (revision 744bfb213144c63cbaf38d91a1c4f7aebb9b9fbc)
1 /*-
2  * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/types.h>
32 #include <sys/event.h>
33 #include <sys/socket.h>
34 #include <sys/time.h>
35 
36 #include <assert.h>
37 #include <errno.h>
38 #include <nsswitch.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <string.h>
42 #include <unistd.h>
43 
44 #include "config.h"
45 #include "debug.h"
46 #include "query.h"
47 #include "log.h"
48 #include "mp_ws_query.h"
49 #include "mp_rs_query.h"
50 #include "singletons.h"
51 
52 static const char negative_data[1] = { 0 };
53 
54 extern	void get_time_func(struct timeval *);
55 
56 static 	void clear_config_entry(struct configuration_entry *);
57 static 	void clear_config_entry_part(struct configuration_entry *,
58 	const char *, size_t);
59 
60 static	int on_query_startup(struct query_state *);
61 static	void on_query_destroy(struct query_state *);
62 
63 static	int on_read_request_read1(struct query_state *);
64 static	int on_read_request_read2(struct query_state *);
65 static	int on_read_request_process(struct query_state *);
66 static	int on_read_response_write1(struct query_state *);
67 static	int on_read_response_write2(struct query_state *);
68 
69 static	int on_rw_mapper(struct query_state *);
70 
71 static	int on_transform_request_read1(struct query_state *);
72 static	int on_transform_request_read2(struct query_state *);
73 static	int on_transform_request_process(struct query_state *);
74 static	int on_transform_response_write1(struct query_state *);
75 
76 static	int on_write_request_read1(struct query_state *);
77 static	int on_write_request_read2(struct query_state *);
78 static	int on_negative_write_request_process(struct query_state *);
79 static	int on_write_request_process(struct query_state *);
80 static	int on_write_response_write1(struct query_state *);
81 
82 /*
83  * Clears the specified configuration entry (clears the cache for positive and
84  * and negative entries) and also for all multipart entries.
85  */
86 static void
87 clear_config_entry(struct configuration_entry *config_entry)
88 {
89 	size_t i;
90 
91 	TRACE_IN(clear_config_entry);
92 	configuration_lock_entry(config_entry, CELT_POSITIVE);
93 	if (config_entry->positive_cache_entry != NULL)
94 		transform_cache_entry(
95 			config_entry->positive_cache_entry,
96 			CTT_CLEAR);
97 	configuration_unlock_entry(config_entry, CELT_POSITIVE);
98 
99 	configuration_lock_entry(config_entry, CELT_NEGATIVE);
100 	if (config_entry->negative_cache_entry != NULL)
101 		transform_cache_entry(
102 			config_entry->negative_cache_entry,
103 			CTT_CLEAR);
104 	configuration_unlock_entry(config_entry, CELT_NEGATIVE);
105 
106 	configuration_lock_entry(config_entry, CELT_MULTIPART);
107 	for (i = 0; i < config_entry->mp_cache_entries_size; ++i)
108 		transform_cache_entry(
109 			config_entry->mp_cache_entries[i],
110 			CTT_CLEAR);
111 	configuration_unlock_entry(config_entry, CELT_MULTIPART);
112 
113 	TRACE_OUT(clear_config_entry);
114 }
115 
116 /*
117  * Clears the specified configuration entry by deleting only the elements,
118  * that are owned by the user with specified eid_str.
119  */
120 static void
121 clear_config_entry_part(struct configuration_entry *config_entry,
122 	const char *eid_str, size_t eid_str_length)
123 {
124 	cache_entry *start, *finish, *mp_entry;
125 	TRACE_IN(clear_config_entry_part);
126 	configuration_lock_entry(config_entry, CELT_POSITIVE);
127 	if (config_entry->positive_cache_entry != NULL)
128 		transform_cache_entry_part(
129 			config_entry->positive_cache_entry,
130 			CTT_CLEAR, eid_str, eid_str_length, KPPT_LEFT);
131 	configuration_unlock_entry(config_entry, CELT_POSITIVE);
132 
133 	configuration_lock_entry(config_entry, CELT_NEGATIVE);
134 	if (config_entry->negative_cache_entry != NULL)
135 		transform_cache_entry_part(
136 			config_entry->negative_cache_entry,
137 			CTT_CLEAR, eid_str, eid_str_length, KPPT_LEFT);
138 	configuration_unlock_entry(config_entry, CELT_NEGATIVE);
139 
140 	configuration_lock_entry(config_entry, CELT_MULTIPART);
141 	if (configuration_entry_find_mp_cache_entries(config_entry,
142 		eid_str, &start, &finish) == 0) {
143 		for (mp_entry = start; mp_entry != finish; ++mp_entry)
144 			transform_cache_entry(*mp_entry, CTT_CLEAR);
145 	}
146 	configuration_unlock_entry(config_entry, CELT_MULTIPART);
147 
148 	TRACE_OUT(clear_config_entry_part);
149 }
150 
151 /*
152  * This function is assigned to the query_state structue on its creation.
153  * It's main purpose is to receive credentials from the client.
154  */
155 static int
156 on_query_startup(struct query_state *qstate)
157 {
158 	union {
159 		struct cmsghdr hdr;
160 		char pad[CMSG_SPACE(sizeof(struct cmsgcred))];
161 	} cmsg;
162 	struct msghdr mhdr;
163 	struct iovec iov;
164 	struct cmsgcred *cred;
165 	int elem_type;
166 
167 	TRACE_IN(on_query_startup);
168 	assert(qstate != NULL);
169 
170 	memset(&mhdr, 0, sizeof(mhdr));
171 	mhdr.msg_iov = &iov;
172 	mhdr.msg_iovlen = 1;
173 	mhdr.msg_control = &cmsg;
174 	mhdr.msg_controllen = sizeof(cmsg);
175 
176 	memset(&iov, 0, sizeof(iov));
177 	iov.iov_base = &elem_type;
178 	iov.iov_len = sizeof(elem_type);
179 
180 	if (recvmsg(qstate->sockfd, &mhdr, 0) == -1) {
181 		TRACE_OUT(on_query_startup);
182 		return (-1);
183 	}
184 
185 	if (mhdr.msg_controllen != CMSG_SPACE(sizeof(struct cmsgcred)) ||
186 	    cmsg.hdr.cmsg_len != CMSG_LEN(sizeof(struct cmsgcred)) ||
187 	    cmsg.hdr.cmsg_level != SOL_SOCKET ||
188 	    cmsg.hdr.cmsg_type != SCM_CREDS) {
189 		TRACE_OUT(on_query_startup);
190 		return (-1);
191 	}
192 
193 	cred = (struct cmsgcred *)CMSG_DATA(&cmsg);
194 	qstate->uid = cred->cmcred_uid;
195 	qstate->gid = cred->cmcred_gid;
196 
197 #if defined(NS_NSCD_EID_CHECKING) || defined(NS_STRICT_NSCD_EID_CHECKING)
198 /*
199  * This check is probably a bit redundant - per-user cache is always separated
200  * by the euid/egid pair
201  */
202 	if (check_query_eids(qstate) != 0) {
203 #ifdef NS_STRICT_NSCD_EID_CHECKING
204 		TRACE_OUT(on_query_startup);
205 		return (-1);
206 #else
207 		if ((elem_type != CET_READ_REQUEST) &&
208 		    (elem_type != CET_MP_READ_SESSION_REQUEST) &&
209 		    (elem_type != CET_WRITE_REQUEST) &&
210 		    (elem_type != CET_MP_WRITE_SESSION_REQUEST)) {
211 			TRACE_OUT(on_query_startup);
212 			return (-1);
213 		}
214 #endif
215 	}
216 #endif
217 
218 	switch (elem_type) {
219 	case CET_WRITE_REQUEST:
220 		qstate->process_func = on_write_request_read1;
221 		break;
222 	case CET_READ_REQUEST:
223 		qstate->process_func = on_read_request_read1;
224 		break;
225 	case CET_TRANSFORM_REQUEST:
226 		qstate->process_func = on_transform_request_read1;
227 		break;
228 	case CET_MP_WRITE_SESSION_REQUEST:
229 		qstate->process_func = on_mp_write_session_request_read1;
230 		break;
231 	case CET_MP_READ_SESSION_REQUEST:
232 		qstate->process_func = on_mp_read_session_request_read1;
233 		break;
234 	default:
235 		TRACE_OUT(on_query_startup);
236 		return (-1);
237 	}
238 
239 	qstate->kevent_watermark = 0;
240 	TRACE_OUT(on_query_startup);
241 	return (0);
242 }
243 
244 /*
245  * on_rw_mapper is used to process multiple read/write requests during
246  * one connection session. It's never called in the beginning (on query_state
247  * creation) as it does not process the multipart requests and does not
248  * receive credentials
249  */
250 static int
251 on_rw_mapper(struct query_state *qstate)
252 {
253 	ssize_t	result;
254 	int	elem_type;
255 
256 	TRACE_IN(on_rw_mapper);
257 	if (qstate->kevent_watermark == 0) {
258 		qstate->kevent_watermark = sizeof(int);
259 	} else {
260 		result = qstate->read_func(qstate, &elem_type, sizeof(int));
261 		if (result != sizeof(int)) {
262 			TRACE_OUT(on_rw_mapper);
263 			return (-1);
264 		}
265 
266 		switch (elem_type) {
267 		case CET_WRITE_REQUEST:
268 			qstate->kevent_watermark = sizeof(size_t);
269 			qstate->process_func = on_write_request_read1;
270 		break;
271 		case CET_READ_REQUEST:
272 			qstate->kevent_watermark = sizeof(size_t);
273 			qstate->process_func = on_read_request_read1;
274 		break;
275 		default:
276 			TRACE_OUT(on_rw_mapper);
277 			return (-1);
278 		break;
279 		}
280 	}
281 	TRACE_OUT(on_rw_mapper);
282 	return (0);
283 }
284 
285 /*
286  * The default query_destroy function
287  */
288 static void
289 on_query_destroy(struct query_state *qstate)
290 {
291 
292 	TRACE_IN(on_query_destroy);
293 	finalize_comm_element(&qstate->response);
294 	finalize_comm_element(&qstate->request);
295 	TRACE_OUT(on_query_destroy);
296 }
297 
298 /*
299  * The functions below are used to process write requests.
300  * - on_write_request_read1 and on_write_request_read2 read the request itself
301  * - on_write_request_process processes it (if the client requests to
302  *    cache the negative result, the on_negative_write_request_process is used)
303  * - on_write_response_write1 sends the response
304  */
305 static int
306 on_write_request_read1(struct query_state *qstate)
307 {
308 	struct cache_write_request	*write_request;
309 	ssize_t	result;
310 
311 	TRACE_IN(on_write_request_read1);
312 	if (qstate->kevent_watermark == 0)
313 		qstate->kevent_watermark = sizeof(size_t) * 3;
314 	else {
315 		init_comm_element(&qstate->request, CET_WRITE_REQUEST);
316 		write_request = get_cache_write_request(&qstate->request);
317 
318 		result = qstate->read_func(qstate, &write_request->entry_length,
319 	    		sizeof(size_t));
320 		result += qstate->read_func(qstate,
321 	    		&write_request->cache_key_size, sizeof(size_t));
322 		result += qstate->read_func(qstate,
323 	    		&write_request->data_size, sizeof(size_t));
324 
325 		if (result != sizeof(size_t) * 3) {
326 			TRACE_OUT(on_write_request_read1);
327 			return (-1);
328 		}
329 
330 		if (BUFSIZE_INVALID(write_request->entry_length) ||
331 			BUFSIZE_INVALID(write_request->cache_key_size) ||
332 			(BUFSIZE_INVALID(write_request->data_size) &&
333 			(write_request->data_size != 0))) {
334 			TRACE_OUT(on_write_request_read1);
335 			return (-1);
336 		}
337 
338 		write_request->entry = calloc(1,
339 			write_request->entry_length + 1);
340 		assert(write_request->entry != NULL);
341 
342 		write_request->cache_key = calloc(1,
343 			write_request->cache_key_size +
344 			qstate->eid_str_length);
345 		assert(write_request->cache_key != NULL);
346 		memcpy(write_request->cache_key, qstate->eid_str,
347 			qstate->eid_str_length);
348 
349 		if (write_request->data_size != 0) {
350 			write_request->data = calloc(1,
351 				write_request->data_size);
352 			assert(write_request->data != NULL);
353 		}
354 
355 		qstate->kevent_watermark = write_request->entry_length +
356 			write_request->cache_key_size +
357 			write_request->data_size;
358 		qstate->process_func = on_write_request_read2;
359 	}
360 
361 	TRACE_OUT(on_write_request_read1);
362 	return (0);
363 }
364 
365 static int
366 on_write_request_read2(struct query_state *qstate)
367 {
368 	struct cache_write_request	*write_request;
369 	ssize_t	result;
370 
371 	TRACE_IN(on_write_request_read2);
372 	write_request = get_cache_write_request(&qstate->request);
373 
374 	result = qstate->read_func(qstate, write_request->entry,
375 		write_request->entry_length);
376 	result += qstate->read_func(qstate, write_request->cache_key +
377 		qstate->eid_str_length, write_request->cache_key_size);
378 	if (write_request->data_size != 0)
379 		result += qstate->read_func(qstate, write_request->data,
380 			write_request->data_size);
381 
382 	if (result != (ssize_t)qstate->kevent_watermark) {
383 		TRACE_OUT(on_write_request_read2);
384 		return (-1);
385 	}
386 	write_request->cache_key_size += qstate->eid_str_length;
387 
388 	qstate->kevent_watermark = 0;
389 	if (write_request->data_size != 0)
390 		qstate->process_func = on_write_request_process;
391 	else
392 	    	qstate->process_func = on_negative_write_request_process;
393 	TRACE_OUT(on_write_request_read2);
394 	return (0);
395 }
396 
397 static	int
398 on_write_request_process(struct query_state *qstate)
399 {
400 	struct cache_write_request	*write_request;
401 	struct cache_write_response	*write_response;
402 	cache_entry c_entry;
403 
404 	TRACE_IN(on_write_request_process);
405 	init_comm_element(&qstate->response, CET_WRITE_RESPONSE);
406 	write_response = get_cache_write_response(&qstate->response);
407 	write_request = get_cache_write_request(&qstate->request);
408 
409 	qstate->config_entry = configuration_find_entry(
410 		s_configuration, write_request->entry);
411 
412 	if (qstate->config_entry == NULL) {
413 		write_response->error_code = ENOENT;
414 
415 		LOG_ERR_2("write_request", "can't find configuration"
416 		    " entry '%s'. aborting request", write_request->entry);
417 		goto fin;
418 	}
419 
420 	if (qstate->config_entry->enabled == 0) {
421 		write_response->error_code = EACCES;
422 
423 		LOG_ERR_2("write_request",
424 			"configuration entry '%s' is disabled",
425 			write_request->entry);
426 		goto fin;
427 	}
428 
429 	if (qstate->config_entry->perform_actual_lookups != 0) {
430 		write_response->error_code = EOPNOTSUPP;
431 
432 		LOG_ERR_2("write_request",
433 			"entry '%s' performs lookups by itself: "
434 			"can't write to it", write_request->entry);
435 		goto fin;
436 	}
437 
438 	configuration_lock_rdlock(s_configuration);
439 	c_entry = find_cache_entry(s_cache,
440 		qstate->config_entry->positive_cache_params.cep.entry_name);
441 	configuration_unlock(s_configuration);
442 	if (c_entry != NULL) {
443 		configuration_lock_entry(qstate->config_entry, CELT_POSITIVE);
444 		qstate->config_entry->positive_cache_entry = c_entry;
445 		write_response->error_code = cache_write(c_entry,
446 			write_request->cache_key,
447 	    		write_request->cache_key_size,
448 	    		write_request->data,
449 			write_request->data_size);
450 		configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE);
451 
452 		if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
453 		    (qstate->config_entry->common_query_timeout.tv_usec != 0))
454 			memcpy(&qstate->timeout,
455 				&qstate->config_entry->common_query_timeout,
456 				sizeof(struct timeval));
457 
458 	} else
459 		write_response->error_code = -1;
460 
461 fin:
462 	qstate->kevent_filter = EVFILT_WRITE;
463 	qstate->kevent_watermark = sizeof(int);
464 	qstate->process_func = on_write_response_write1;
465 
466 	TRACE_OUT(on_write_request_process);
467 	return (0);
468 }
469 
470 static int
471 on_negative_write_request_process(struct query_state *qstate)
472 {
473 	struct cache_write_request	*write_request;
474 	struct cache_write_response	*write_response;
475 	cache_entry c_entry;
476 
477 	TRACE_IN(on_negative_write_request_process);
478 	init_comm_element(&qstate->response, CET_WRITE_RESPONSE);
479 	write_response = get_cache_write_response(&qstate->response);
480 	write_request = get_cache_write_request(&qstate->request);
481 
482 	qstate->config_entry = configuration_find_entry	(
483 		s_configuration, write_request->entry);
484 
485 	if (qstate->config_entry == NULL) {
486 		write_response->error_code = ENOENT;
487 
488 		LOG_ERR_2("negative_write_request",
489 			"can't find configuration"
490 		   	" entry '%s'. aborting request", write_request->entry);
491 		goto fin;
492 	}
493 
494 	if (qstate->config_entry->enabled == 0) {
495 		write_response->error_code = EACCES;
496 
497 		LOG_ERR_2("negative_write_request",
498 			"configuration entry '%s' is disabled",
499 			write_request->entry);
500 		goto fin;
501 	}
502 
503 	if (qstate->config_entry->perform_actual_lookups != 0) {
504 		write_response->error_code = EOPNOTSUPP;
505 
506 		LOG_ERR_2("negative_write_request",
507 			"entry '%s' performs lookups by itself: "
508 			"can't write to it", write_request->entry);
509 		goto fin;
510 	} else {
511 #ifdef NS_NSCD_EID_CHECKING
512 		if (check_query_eids(qstate) != 0) {
513 			write_response->error_code = EPERM;
514 			goto fin;
515 		}
516 #endif
517 	}
518 
519 	configuration_lock_rdlock(s_configuration);
520 	c_entry = find_cache_entry(s_cache,
521 		qstate->config_entry->negative_cache_params.cep.entry_name);
522 	configuration_unlock(s_configuration);
523 	if (c_entry != NULL) {
524 		configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE);
525 		qstate->config_entry->negative_cache_entry = c_entry;
526 		write_response->error_code = cache_write(c_entry,
527 			write_request->cache_key,
528 	    		write_request->cache_key_size,
529 	    		negative_data,
530 			sizeof(negative_data));
531 		configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE);
532 
533 		if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
534 		    (qstate->config_entry->common_query_timeout.tv_usec != 0))
535 			memcpy(&qstate->timeout,
536 				&qstate->config_entry->common_query_timeout,
537 				sizeof(struct timeval));
538 	} else
539 		write_response->error_code = -1;
540 
541 fin:
542 	qstate->kevent_filter = EVFILT_WRITE;
543 	qstate->kevent_watermark = sizeof(int);
544 	qstate->process_func = on_write_response_write1;
545 
546 	TRACE_OUT(on_negative_write_request_process);
547 	return (0);
548 }
549 
550 static int
551 on_write_response_write1(struct query_state *qstate)
552 {
553 	struct cache_write_response	*write_response;
554 	ssize_t	result;
555 
556 	TRACE_IN(on_write_response_write1);
557 	write_response = get_cache_write_response(&qstate->response);
558 	result = qstate->write_func(qstate, &write_response->error_code,
559 		sizeof(int));
560 	if (result != sizeof(int)) {
561 		TRACE_OUT(on_write_response_write1);
562 		return (-1);
563 	}
564 
565 	finalize_comm_element(&qstate->request);
566 	finalize_comm_element(&qstate->response);
567 
568 	qstate->kevent_watermark = sizeof(int);
569 	qstate->kevent_filter = EVFILT_READ;
570 	qstate->process_func = on_rw_mapper;
571 
572 	TRACE_OUT(on_write_response_write1);
573 	return (0);
574 }
575 
576 /*
577  * The functions below are used to process read requests.
578  * - on_read_request_read1 and on_read_request_read2 read the request itself
579  * - on_read_request_process processes it
580  * - on_read_response_write1 and on_read_response_write2 send the response
581  */
582 static int
583 on_read_request_read1(struct query_state *qstate)
584 {
585 	struct cache_read_request *read_request;
586 	ssize_t	result;
587 
588 	TRACE_IN(on_read_request_read1);
589 	if (qstate->kevent_watermark == 0)
590 		qstate->kevent_watermark = sizeof(size_t) * 2;
591 	else {
592 		init_comm_element(&qstate->request, CET_READ_REQUEST);
593 		read_request = get_cache_read_request(&qstate->request);
594 
595 		result = qstate->read_func(qstate,
596 	    		&read_request->entry_length, sizeof(size_t));
597 		result += qstate->read_func(qstate,
598 	    		&read_request->cache_key_size, sizeof(size_t));
599 
600 		if (result != sizeof(size_t) * 2) {
601 			TRACE_OUT(on_read_request_read1);
602 			return (-1);
603 		}
604 
605 		if (BUFSIZE_INVALID(read_request->entry_length) ||
606 			BUFSIZE_INVALID(read_request->cache_key_size)) {
607 			TRACE_OUT(on_read_request_read1);
608 			return (-1);
609 		}
610 
611 		read_request->entry = calloc(1,
612 			read_request->entry_length + 1);
613 		assert(read_request->entry != NULL);
614 
615 		read_request->cache_key = calloc(1,
616 			read_request->cache_key_size +
617 			qstate->eid_str_length);
618 		assert(read_request->cache_key != NULL);
619 		memcpy(read_request->cache_key, qstate->eid_str,
620 			qstate->eid_str_length);
621 
622 		qstate->kevent_watermark = read_request->entry_length +
623 			read_request->cache_key_size;
624 		qstate->process_func = on_read_request_read2;
625 	}
626 
627 	TRACE_OUT(on_read_request_read1);
628 	return (0);
629 }
630 
631 static int
632 on_read_request_read2(struct query_state *qstate)
633 {
634 	struct cache_read_request	*read_request;
635 	ssize_t	result;
636 
637 	TRACE_IN(on_read_request_read2);
638 	read_request = get_cache_read_request(&qstate->request);
639 
640 	result = qstate->read_func(qstate, read_request->entry,
641 		read_request->entry_length);
642 	result += qstate->read_func(qstate,
643 		read_request->cache_key + qstate->eid_str_length,
644 		read_request->cache_key_size);
645 
646 	if (result != (ssize_t)qstate->kevent_watermark) {
647 		TRACE_OUT(on_read_request_read2);
648 		return (-1);
649 	}
650 	read_request->cache_key_size += qstate->eid_str_length;
651 
652 	qstate->kevent_watermark = 0;
653 	qstate->process_func = on_read_request_process;
654 
655 	TRACE_OUT(on_read_request_read2);
656 	return (0);
657 }
658 
659 static int
660 on_read_request_process(struct query_state *qstate)
661 {
662 	struct cache_read_request *read_request;
663 	struct cache_read_response *read_response;
664 	cache_entry	c_entry, neg_c_entry;
665 
666 	struct agent	*lookup_agent;
667 	struct common_agent *c_agent;
668 	int res;
669 
670 	TRACE_IN(on_read_request_process);
671 	init_comm_element(&qstate->response, CET_READ_RESPONSE);
672 	read_response = get_cache_read_response(&qstate->response);
673 	read_request = get_cache_read_request(&qstate->request);
674 
675 	qstate->config_entry = configuration_find_entry(
676 		s_configuration, read_request->entry);
677 	if (qstate->config_entry == NULL) {
678 		read_response->error_code = ENOENT;
679 
680 		LOG_ERR_2("read_request",
681 			"can't find configuration "
682 	    		"entry '%s'. aborting request", read_request->entry);
683 	    	goto fin;
684 	}
685 
686 	if (qstate->config_entry->enabled == 0) {
687 		read_response->error_code = EACCES;
688 
689 		LOG_ERR_2("read_request",
690 			"configuration entry '%s' is disabled",
691 			read_request->entry);
692 		goto fin;
693 	}
694 
695 	/*
696 	 * if we perform lookups by ourselves, then we don't need to separate
697 	 * cache entries by euid and egid
698 	 */
699 	if (qstate->config_entry->perform_actual_lookups != 0)
700 		memset(read_request->cache_key, 0, qstate->eid_str_length);
701 	else {
702 #ifdef NS_NSCD_EID_CHECKING
703 		if (check_query_eids(qstate) != 0) {
704 		/* if the lookup is not self-performing, we check for clients euid/egid */
705 			read_response->error_code = EPERM;
706 			goto fin;
707 		}
708 #endif
709 	}
710 
711 	configuration_lock_rdlock(s_configuration);
712 	c_entry = find_cache_entry(s_cache,
713 		qstate->config_entry->positive_cache_params.cep.entry_name);
714 	neg_c_entry = find_cache_entry(s_cache,
715 		qstate->config_entry->negative_cache_params.cep.entry_name);
716 	configuration_unlock(s_configuration);
717 	if ((c_entry != NULL) && (neg_c_entry != NULL)) {
718 		configuration_lock_entry(qstate->config_entry, CELT_POSITIVE);
719 		qstate->config_entry->positive_cache_entry = c_entry;
720 		read_response->error_code = cache_read(c_entry,
721 	    		read_request->cache_key,
722 	    		read_request->cache_key_size, NULL,
723 	    		&read_response->data_size);
724 
725 		if (read_response->error_code == -2) {
726 			read_response->data = malloc(
727 				read_response->data_size);
728 			assert(read_response->data != NULL);
729 			read_response->error_code = cache_read(c_entry,
730 				read_request->cache_key,
731 		    		read_request->cache_key_size,
732 		    		read_response->data,
733 		    		&read_response->data_size);
734 		}
735 		configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE);
736 
737 		configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE);
738 		qstate->config_entry->negative_cache_entry = neg_c_entry;
739 		if (read_response->error_code == -1) {
740 			read_response->error_code = cache_read(neg_c_entry,
741 				read_request->cache_key,
742 				read_request->cache_key_size, NULL,
743 				&read_response->data_size);
744 
745 			if (read_response->error_code == -2) {
746 				read_response->data = malloc(
747 					read_response->data_size);
748 				assert(read_response->data != NULL);
749 				read_response->error_code = cache_read(neg_c_entry,
750 					read_request->cache_key,
751 		    			read_request->cache_key_size,
752 		    			read_response->data,
753 		    			&read_response->data_size);
754 			}
755 		}
756 		configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE);
757 
758 		if ((read_response->error_code == -1) &&
759 			(qstate->config_entry->perform_actual_lookups != 0)) {
760 			free(read_response->data);
761 			read_response->data = NULL;
762 			read_response->data_size = 0;
763 
764 			lookup_agent = find_agent(s_agent_table,
765 				read_request->entry, COMMON_AGENT);
766 
767 			if ((lookup_agent != NULL) &&
768 			(lookup_agent->type == COMMON_AGENT)) {
769 				c_agent = (struct common_agent *)lookup_agent;
770 				res = c_agent->lookup_func(
771 					read_request->cache_key +
772 						qstate->eid_str_length,
773 					read_request->cache_key_size -
774 						qstate->eid_str_length,
775 					&read_response->data,
776 					&read_response->data_size);
777 
778 				if (res == NS_SUCCESS) {
779 					read_response->error_code = 0;
780 					configuration_lock_entry(
781 						qstate->config_entry,
782 						CELT_POSITIVE);
783 					cache_write(c_entry,
784 						read_request->cache_key,
785 	    					read_request->cache_key_size,
786 	    					read_response->data,
787 						read_response->data_size);
788 					configuration_unlock_entry(
789 						qstate->config_entry,
790 						CELT_POSITIVE);
791 				} else if ((res == NS_NOTFOUND) ||
792 					  (res == NS_RETURN)) {
793 					configuration_lock_entry(
794 						  qstate->config_entry,
795 						  CELT_NEGATIVE);
796 					cache_write(neg_c_entry,
797 						read_request->cache_key,
798 						read_request->cache_key_size,
799 						negative_data,
800 						sizeof(negative_data));
801 					configuration_unlock_entry(
802 						  qstate->config_entry,
803 						  CELT_NEGATIVE);
804 
805 					read_response->error_code = 0;
806 					read_response->data = NULL;
807 					read_response->data_size = 0;
808 				}
809 			}
810 		}
811 
812 		if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
813 		    (qstate->config_entry->common_query_timeout.tv_usec != 0))
814 			memcpy(&qstate->timeout,
815 				&qstate->config_entry->common_query_timeout,
816 				sizeof(struct timeval));
817 	} else
818 		read_response->error_code = -1;
819 
820 fin:
821 	qstate->kevent_filter = EVFILT_WRITE;
822 	if (read_response->error_code == 0)
823 		qstate->kevent_watermark = sizeof(int) + sizeof(size_t);
824 	else
825 		qstate->kevent_watermark = sizeof(int);
826 	qstate->process_func = on_read_response_write1;
827 
828 	TRACE_OUT(on_read_request_process);
829 	return (0);
830 }
831 
832 static int
833 on_read_response_write1(struct query_state *qstate)
834 {
835 	struct cache_read_response	*read_response;
836 	ssize_t	result;
837 
838 	TRACE_IN(on_read_response_write1);
839 	read_response = get_cache_read_response(&qstate->response);
840 
841 	result = qstate->write_func(qstate, &read_response->error_code,
842 		sizeof(int));
843 
844 	if (read_response->error_code == 0) {
845 		result += qstate->write_func(qstate, &read_response->data_size,
846 			sizeof(size_t));
847 		if (result != (ssize_t)qstate->kevent_watermark) {
848 			TRACE_OUT(on_read_response_write1);
849 			return (-1);
850 		}
851 
852 		qstate->kevent_watermark = read_response->data_size;
853 		qstate->process_func = on_read_response_write2;
854 	} else {
855 		if (result != (ssize_t)qstate->kevent_watermark) {
856 			TRACE_OUT(on_read_response_write1);
857 			return (-1);
858 		}
859 
860 		qstate->kevent_watermark = 0;
861 		qstate->process_func = NULL;
862 	}
863 
864 	TRACE_OUT(on_read_response_write1);
865 	return (0);
866 }
867 
868 static int
869 on_read_response_write2(struct query_state *qstate)
870 {
871 	struct cache_read_response	*read_response;
872 	ssize_t	result;
873 
874 	TRACE_IN(on_read_response_write2);
875 	read_response = get_cache_read_response(&qstate->response);
876 	if (read_response->data_size > 0) {
877 		result = qstate->write_func(qstate, read_response->data,
878 			read_response->data_size);
879 		if (result != (ssize_t)qstate->kevent_watermark) {
880 			TRACE_OUT(on_read_response_write2);
881 			return (-1);
882 		}
883 	}
884 
885 	finalize_comm_element(&qstate->request);
886 	finalize_comm_element(&qstate->response);
887 
888 	qstate->kevent_watermark = sizeof(int);
889 	qstate->kevent_filter = EVFILT_READ;
890 	qstate->process_func = on_rw_mapper;
891 	TRACE_OUT(on_read_response_write2);
892 	return (0);
893 }
894 
895 /*
896  * The functions below are used to process write requests.
897  * - on_transform_request_read1 and on_transform_request_read2 read the
898  *   request itself
899  * - on_transform_request_process processes it
900  * - on_transform_response_write1 sends the response
901  */
902 static int
903 on_transform_request_read1(struct query_state *qstate)
904 {
905 	struct cache_transform_request *transform_request;
906 	ssize_t	result;
907 
908 	TRACE_IN(on_transform_request_read1);
909 	if (qstate->kevent_watermark == 0)
910 		qstate->kevent_watermark = sizeof(size_t) + sizeof(int);
911 	else {
912 		init_comm_element(&qstate->request, CET_TRANSFORM_REQUEST);
913 		transform_request =
914 			get_cache_transform_request(&qstate->request);
915 
916 		result = qstate->read_func(qstate,
917 	    		&transform_request->entry_length, sizeof(size_t));
918 		result += qstate->read_func(qstate,
919 	    		&transform_request->transformation_type, sizeof(int));
920 
921 		if (result != sizeof(size_t) + sizeof(int)) {
922 			TRACE_OUT(on_transform_request_read1);
923 			return (-1);
924 		}
925 
926 		if ((transform_request->transformation_type != TT_USER) &&
927 		    (transform_request->transformation_type != TT_ALL)) {
928 			TRACE_OUT(on_transform_request_read1);
929 			return (-1);
930 		}
931 
932 		if (transform_request->entry_length != 0) {
933 			if (BUFSIZE_INVALID(transform_request->entry_length)) {
934 				TRACE_OUT(on_transform_request_read1);
935 				return (-1);
936 			}
937 
938 			transform_request->entry = calloc(1,
939 				transform_request->entry_length + 1);
940 			assert(transform_request->entry != NULL);
941 
942 			qstate->process_func = on_transform_request_read2;
943 		} else
944 			qstate->process_func = on_transform_request_process;
945 
946 		qstate->kevent_watermark = transform_request->entry_length;
947 	}
948 
949 	TRACE_OUT(on_transform_request_read1);
950 	return (0);
951 }
952 
953 static int
954 on_transform_request_read2(struct query_state *qstate)
955 {
956 	struct cache_transform_request	*transform_request;
957 	ssize_t	result;
958 
959 	TRACE_IN(on_transform_request_read2);
960 	transform_request = get_cache_transform_request(&qstate->request);
961 
962 	result = qstate->read_func(qstate, transform_request->entry,
963 		transform_request->entry_length);
964 
965 	if (result != (ssize_t)qstate->kevent_watermark) {
966 		TRACE_OUT(on_transform_request_read2);
967 		return (-1);
968 	}
969 
970 	qstate->kevent_watermark = 0;
971 	qstate->process_func = on_transform_request_process;
972 
973 	TRACE_OUT(on_transform_request_read2);
974 	return (0);
975 }
976 
977 static int
978 on_transform_request_process(struct query_state *qstate)
979 {
980 	struct cache_transform_request *transform_request;
981 	struct cache_transform_response *transform_response;
982 	struct configuration_entry *config_entry;
983 	size_t	i, size;
984 
985 	TRACE_IN(on_transform_request_process);
986 	init_comm_element(&qstate->response, CET_TRANSFORM_RESPONSE);
987 	transform_response = get_cache_transform_response(&qstate->response);
988 	transform_request = get_cache_transform_request(&qstate->request);
989 
990 	switch (transform_request->transformation_type) {
991 	case TT_USER:
992 		if (transform_request->entry == NULL) {
993 			size = configuration_get_entries_size(s_configuration);
994 			for (i = 0; i < size; ++i) {
995 			    config_entry = configuration_get_entry(
996 				s_configuration, i);
997 
998 			    if (config_entry->perform_actual_lookups == 0)
999 			    	clear_config_entry_part(config_entry,
1000 				    qstate->eid_str, qstate->eid_str_length);
1001 			}
1002 		} else {
1003 			qstate->config_entry = configuration_find_entry(
1004 				s_configuration, transform_request->entry);
1005 
1006 			if (qstate->config_entry == NULL) {
1007 				LOG_ERR_2("transform_request",
1008 					"can't find configuration"
1009 		   			" entry '%s'. aborting request",
1010 					transform_request->entry);
1011 				transform_response->error_code = -1;
1012 				goto fin;
1013 			}
1014 
1015 			if (qstate->config_entry->perform_actual_lookups != 0) {
1016 				LOG_ERR_2("transform_request",
1017 					"can't transform the cache entry %s"
1018 					", because it ised for actual lookups",
1019 					transform_request->entry);
1020 				transform_response->error_code = -1;
1021 				goto fin;
1022 			}
1023 
1024 			clear_config_entry_part(qstate->config_entry,
1025 				qstate->eid_str, qstate->eid_str_length);
1026 		}
1027 		break;
1028 	case TT_ALL:
1029 		if (qstate->euid != 0)
1030 			transform_response->error_code = -1;
1031 		else {
1032 			if (transform_request->entry == NULL) {
1033 				size = configuration_get_entries_size(
1034 					s_configuration);
1035 				for (i = 0; i < size; ++i) {
1036 				    clear_config_entry(
1037 					configuration_get_entry(
1038 						s_configuration, i));
1039 				}
1040 			} else {
1041 				qstate->config_entry = configuration_find_entry(
1042 					s_configuration,
1043 					transform_request->entry);
1044 
1045 				if (qstate->config_entry == NULL) {
1046 					LOG_ERR_2("transform_request",
1047 						"can't find configuration"
1048 		   				" entry '%s'. aborting request",
1049 						transform_request->entry);
1050 					transform_response->error_code = -1;
1051 					goto fin;
1052 				}
1053 
1054 				clear_config_entry(qstate->config_entry);
1055 			}
1056 		}
1057 		break;
1058 	default:
1059 		transform_response->error_code = -1;
1060 	}
1061 
1062 fin:
1063 	qstate->kevent_watermark = 0;
1064 	qstate->process_func = on_transform_response_write1;
1065 	TRACE_OUT(on_transform_request_process);
1066 	return (0);
1067 }
1068 
1069 static int
1070 on_transform_response_write1(struct query_state *qstate)
1071 {
1072 	struct cache_transform_response	*transform_response;
1073 	ssize_t	result;
1074 
1075 	TRACE_IN(on_transform_response_write1);
1076 	transform_response = get_cache_transform_response(&qstate->response);
1077 	result = qstate->write_func(qstate, &transform_response->error_code,
1078 		sizeof(int));
1079 	if (result != sizeof(int)) {
1080 		TRACE_OUT(on_transform_response_write1);
1081 		return (-1);
1082 	}
1083 
1084 	finalize_comm_element(&qstate->request);
1085 	finalize_comm_element(&qstate->response);
1086 
1087 	qstate->kevent_watermark = 0;
1088 	qstate->process_func = NULL;
1089 	TRACE_OUT(on_transform_response_write1);
1090 	return (0);
1091 }
1092 
1093 /*
1094  * Checks if the client's euid and egid do not differ from its uid and gid.
1095  * Returns 0 on success.
1096  */
1097 int
1098 check_query_eids(struct query_state *qstate)
1099 {
1100 
1101 	return ((qstate->uid != qstate->euid) || (qstate->gid != qstate->egid) ? -1 : 0);
1102 }
1103 
1104 /*
1105  * Uses the qstate fields to process an "alternate" read - when the buffer is
1106  * too large to be received during one socket read operation
1107  */
1108 ssize_t
1109 query_io_buffer_read(struct query_state *qstate, void *buf, size_t nbytes)
1110 {
1111 	size_t remaining;
1112 	ssize_t	result;
1113 
1114 	TRACE_IN(query_io_buffer_read);
1115 	if ((qstate->io_buffer_size == 0) || (qstate->io_buffer == NULL))
1116 		return (-1);
1117 
1118 	assert(qstate->io_buffer_p <=
1119 		qstate->io_buffer + qstate->io_buffer_size);
1120 	remaining = qstate->io_buffer + qstate->io_buffer_size -
1121 		qstate->io_buffer_p;
1122 	if (nbytes < remaining)
1123 		result = nbytes;
1124 	else
1125 		result = remaining;
1126 
1127 	memcpy(buf, qstate->io_buffer_p, result);
1128 	qstate->io_buffer_p += result;
1129 
1130 	if (remaining == 0) {
1131 		free(qstate->io_buffer);
1132 		qstate->io_buffer = NULL;
1133 
1134 		qstate->write_func = query_socket_write;
1135 		qstate->read_func = query_socket_read;
1136 	}
1137 
1138 	TRACE_OUT(query_io_buffer_read);
1139 	return (result);
1140 }
1141 
1142 /*
1143  * Uses the qstate fields to process an "alternate" write - when the buffer is
1144  * too large to be sent during one socket write operation
1145  */
1146 ssize_t
1147 query_io_buffer_write(struct query_state *qstate, const void *buf,
1148 	size_t nbytes)
1149 {
1150 	size_t remaining;
1151 	ssize_t	result;
1152 
1153 	TRACE_IN(query_io_buffer_write);
1154 	if ((qstate->io_buffer_size == 0) || (qstate->io_buffer == NULL))
1155 		return (-1);
1156 
1157 	assert(qstate->io_buffer_p <=
1158 		qstate->io_buffer + qstate->io_buffer_size);
1159 	remaining = qstate->io_buffer + qstate->io_buffer_size -
1160 		qstate->io_buffer_p;
1161 	if (nbytes < remaining)
1162 		result = nbytes;
1163 	else
1164 		result = remaining;
1165 
1166 	memcpy(qstate->io_buffer_p, buf, result);
1167 	qstate->io_buffer_p += result;
1168 
1169 	if (remaining == 0) {
1170 		qstate->use_alternate_io = 1;
1171 		qstate->io_buffer_p = qstate->io_buffer;
1172 
1173 		qstate->write_func = query_socket_write;
1174 		qstate->read_func = query_socket_read;
1175 	}
1176 
1177 	TRACE_OUT(query_io_buffer_write);
1178 	return (result);
1179 }
1180 
1181 /*
1182  * The default "read" function, which reads data directly from socket
1183  */
1184 ssize_t
1185 query_socket_read(struct query_state *qstate, void *buf, size_t nbytes)
1186 {
1187 	ssize_t	result;
1188 
1189 	TRACE_IN(query_socket_read);
1190 	if (qstate->socket_failed != 0) {
1191 		TRACE_OUT(query_socket_read);
1192 		return (-1);
1193 	}
1194 
1195 	result = read(qstate->sockfd, buf, nbytes);
1196 	if (result < 0 || (size_t)result < nbytes)
1197 		qstate->socket_failed = 1;
1198 
1199 	TRACE_OUT(query_socket_read);
1200 	return (result);
1201 }
1202 
1203 /*
1204  * The default "write" function, which writes data directly to socket
1205  */
1206 ssize_t
1207 query_socket_write(struct query_state *qstate, const void *buf, size_t nbytes)
1208 {
1209 	ssize_t	result;
1210 
1211 	TRACE_IN(query_socket_write);
1212 	if (qstate->socket_failed != 0) {
1213 		TRACE_OUT(query_socket_write);
1214 		return (-1);
1215 	}
1216 
1217 	result = write(qstate->sockfd, buf, nbytes);
1218 	if (result < 0 || (size_t)result < nbytes)
1219 		qstate->socket_failed = 1;
1220 
1221 	TRACE_OUT(query_socket_write);
1222 	return (result);
1223 }
1224 
1225 /*
1226  * Initializes the query_state structure by filling it with the default values.
1227  */
1228 struct query_state *
1229 init_query_state(int sockfd, size_t kevent_watermark, uid_t euid, gid_t egid)
1230 {
1231 	struct query_state	*retval;
1232 
1233 	TRACE_IN(init_query_state);
1234 	retval = calloc(1, sizeof(*retval));
1235 	assert(retval != NULL);
1236 
1237 	retval->sockfd = sockfd;
1238 	retval->kevent_filter = EVFILT_READ;
1239 	retval->kevent_watermark = kevent_watermark;
1240 
1241 	retval->euid = euid;
1242 	retval->egid = egid;
1243 	retval->uid = retval->gid = -1;
1244 
1245 	if (asprintf(&retval->eid_str, "%d_%d_", retval->euid,
1246 		retval->egid) == -1) {
1247 		free(retval);
1248 		return (NULL);
1249 	}
1250 	retval->eid_str_length = strlen(retval->eid_str);
1251 
1252 	init_comm_element(&retval->request, CET_UNDEFINED);
1253 	init_comm_element(&retval->response, CET_UNDEFINED);
1254 	retval->process_func = on_query_startup;
1255 	retval->destroy_func = on_query_destroy;
1256 
1257 	retval->write_func = query_socket_write;
1258 	retval->read_func = query_socket_read;
1259 
1260 	get_time_func(&retval->creation_time);
1261 	retval->timeout.tv_sec = s_configuration->query_timeout;
1262 	retval->timeout.tv_usec = 0;
1263 
1264 	TRACE_OUT(init_query_state);
1265 	return (retval);
1266 }
1267 
1268 void
1269 destroy_query_state(struct query_state *qstate)
1270 {
1271 
1272 	TRACE_IN(destroy_query_state);
1273 	if (qstate->eid_str != NULL)
1274 	    free(qstate->eid_str);
1275 
1276 	if (qstate->io_buffer != NULL)
1277 		free(qstate->io_buffer);
1278 
1279 	qstate->destroy_func(qstate);
1280 	free(qstate);
1281 	TRACE_OUT(destroy_query_state);
1282 }
1283