xref: /freebsd/usr.sbin/nscd/query.c (revision fe2494903422ba3b924eba82cb63a6a9188fad7a)
1 /*-
2  * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/types.h>
32 #include <sys/event.h>
33 #include <sys/socket.h>
34 #include <sys/time.h>
35 
36 #include <assert.h>
37 #include <errno.h>
38 #include <nsswitch.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <string.h>
42 #include <unistd.h>
43 
44 #include "config.h"
45 #include "debug.h"
46 #include "query.h"
47 #include "log.h"
48 #include "mp_ws_query.h"
49 #include "mp_rs_query.h"
50 #include "singletons.h"
51 
52 static const char negative_data[1] = { 0 };
53 
54 extern	void get_time_func(struct timeval *);
55 
56 static 	void clear_config_entry(struct configuration_entry *);
57 static 	void clear_config_entry_part(struct configuration_entry *,
58 	const char *, size_t);
59 
60 static	int on_query_startup(struct query_state *);
61 static	void on_query_destroy(struct query_state *);
62 
63 static	int on_read_request_read1(struct query_state *);
64 static	int on_read_request_read2(struct query_state *);
65 static	int on_read_request_process(struct query_state *);
66 static	int on_read_response_write1(struct query_state *);
67 static	int on_read_response_write2(struct query_state *);
68 
69 static	int on_rw_mapper(struct query_state *);
70 
71 static	int on_transform_request_read1(struct query_state *);
72 static	int on_transform_request_read2(struct query_state *);
73 static	int on_transform_request_process(struct query_state *);
74 static	int on_transform_response_write1(struct query_state *);
75 
76 static	int on_write_request_read1(struct query_state *);
77 static	int on_write_request_read2(struct query_state *);
78 static	int on_negative_write_request_process(struct query_state *);
79 static	int on_write_request_process(struct query_state *);
80 static	int on_write_response_write1(struct query_state *);
81 
82 /*
83  * Clears the specified configuration entry (clears the cache for positive and
84  * and negative entries) and also for all multipart entries.
85  */
86 static void
87 clear_config_entry(struct configuration_entry *config_entry)
88 {
89 	size_t i;
90 
91 	TRACE_IN(clear_config_entry);
92 	configuration_lock_entry(config_entry, CELT_POSITIVE);
93 	if (config_entry->positive_cache_entry != NULL)
94 		transform_cache_entry(
95 			config_entry->positive_cache_entry,
96 			CTT_CLEAR);
97 	configuration_unlock_entry(config_entry, CELT_POSITIVE);
98 
99 	configuration_lock_entry(config_entry, CELT_NEGATIVE);
100 	if (config_entry->negative_cache_entry != NULL)
101 		transform_cache_entry(
102 			config_entry->negative_cache_entry,
103 			CTT_CLEAR);
104 	configuration_unlock_entry(config_entry, CELT_NEGATIVE);
105 
106 	configuration_lock_entry(config_entry, CELT_MULTIPART);
107 	for (i = 0; i < config_entry->mp_cache_entries_size; ++i)
108 		transform_cache_entry(
109 			config_entry->mp_cache_entries[i],
110 			CTT_CLEAR);
111 	configuration_unlock_entry(config_entry, CELT_MULTIPART);
112 
113 	TRACE_OUT(clear_config_entry);
114 }
115 
116 /*
117  * Clears the specified configuration entry by deleting only the elements,
118  * that are owned by the user with specified eid_str.
119  */
120 static void
121 clear_config_entry_part(struct configuration_entry *config_entry,
122 	const char *eid_str, size_t eid_str_length)
123 {
124 	cache_entry *start, *finish, *mp_entry;
125 	TRACE_IN(clear_config_entry_part);
126 	configuration_lock_entry(config_entry, CELT_POSITIVE);
127 	if (config_entry->positive_cache_entry != NULL)
128 		transform_cache_entry_part(
129 			config_entry->positive_cache_entry,
130 			CTT_CLEAR, eid_str, eid_str_length, KPPT_LEFT);
131 	configuration_unlock_entry(config_entry, CELT_POSITIVE);
132 
133 	configuration_lock_entry(config_entry, CELT_NEGATIVE);
134 	if (config_entry->negative_cache_entry != NULL)
135 		transform_cache_entry_part(
136 			config_entry->negative_cache_entry,
137 			CTT_CLEAR, eid_str, eid_str_length, KPPT_LEFT);
138 	configuration_unlock_entry(config_entry, CELT_NEGATIVE);
139 
140 	configuration_lock_entry(config_entry, CELT_MULTIPART);
141 	if (configuration_entry_find_mp_cache_entries(config_entry,
142 		eid_str, &start, &finish) == 0) {
143 		for (mp_entry = start; mp_entry != finish; ++mp_entry)
144 			transform_cache_entry(*mp_entry, CTT_CLEAR);
145 	}
146 	configuration_unlock_entry(config_entry, CELT_MULTIPART);
147 
148 	TRACE_OUT(clear_config_entry_part);
149 }
150 
151 /*
152  * This function is assigned to the query_state structue on its creation.
153  * It's main purpose is to receive credentials from the client.
154  */
155 static int
156 on_query_startup(struct query_state *qstate)
157 {
158 	struct msghdr	cred_hdr;
159 	struct iovec	iov;
160 	struct cmsgcred *cred;
161 	int elem_type;
162 
163 	union {
164 		struct cmsghdr	hdr;
165 		char cred[CMSG_SPACE(sizeof(struct cmsgcred))];
166 	} cmsg;
167 
168 	TRACE_IN(on_query_startup);
169 	assert(qstate != NULL);
170 
171 	memset(&cred_hdr, 0, sizeof(struct msghdr));
172 	cred_hdr.msg_iov = &iov;
173 	cred_hdr.msg_iovlen = 1;
174 	cred_hdr.msg_control = &cmsg;
175 	cred_hdr.msg_controllen = CMSG_SPACE(sizeof(struct cmsgcred));
176 
177 	memset(&iov, 0, sizeof(struct iovec));
178 	iov.iov_base = &elem_type;
179 	iov.iov_len = sizeof(int);
180 
181 	if (recvmsg(qstate->sockfd, &cred_hdr, 0) == -1) {
182 		TRACE_OUT(on_query_startup);
183 		return (-1);
184 	}
185 
186 	if (cred_hdr.msg_controllen < CMSG_LEN(sizeof(struct cmsgcred))
187 		|| cmsg.hdr.cmsg_len < CMSG_LEN(sizeof(struct cmsgcred))
188 		|| cmsg.hdr.cmsg_level != SOL_SOCKET
189 		|| cmsg.hdr.cmsg_type != SCM_CREDS) {
190 		TRACE_OUT(on_query_startup);
191 		return (-1);
192 	}
193 
194 	cred = (struct cmsgcred *)CMSG_DATA(&cmsg);
195 	qstate->uid = cred->cmcred_uid;
196 	qstate->gid = cred->cmcred_gid;
197 
198 #if defined(NS_NSCD_EID_CHECKING) || defined(NS_STRICT_NSCD_EID_CHECKING)
199 /*
200  * This check is probably a bit redundant - per-user cache is always separated
201  * by the euid/egid pair
202  */
203 	if (check_query_eids(qstate) != 0) {
204 #ifdef NS_STRICT_NSCD_EID_CHECKING
205 		TRACE_OUT(on_query_startup);
206 		return (-1);
207 #else
208 		if ((elem_type != CET_READ_REQUEST) &&
209 			(elem_type != CET_MP_READ_SESSION_REQUEST) &&
210 			(elem_type != CET_WRITE_REQUEST) &&
211 			(elem_type != CET_MP_WRITE_SESSION_REQUEST)) {
212 			TRACE_OUT(on_query_startup);
213 			return (-1);
214 		}
215 #endif
216 	}
217 #endif
218 
219 	switch (elem_type) {
220 	case CET_WRITE_REQUEST:
221 		qstate->process_func = on_write_request_read1;
222 		break;
223 	case CET_READ_REQUEST:
224 		qstate->process_func = on_read_request_read1;
225 		break;
226 	case CET_TRANSFORM_REQUEST:
227 		qstate->process_func = on_transform_request_read1;
228 		break;
229 	case CET_MP_WRITE_SESSION_REQUEST:
230 		qstate->process_func = on_mp_write_session_request_read1;
231 		break;
232 	case CET_MP_READ_SESSION_REQUEST:
233 		qstate->process_func = on_mp_read_session_request_read1;
234 		break;
235 	default:
236 		TRACE_OUT(on_query_startup);
237 		return (-1);
238 	}
239 
240 	qstate->kevent_watermark = 0;
241 	TRACE_OUT(on_query_startup);
242 	return (0);
243 }
244 
245 /*
246  * on_rw_mapper is used to process multiple read/write requests during
247  * one connection session. It's never called in the beginning (on query_state
248  * creation) as it does not process the multipart requests and does not
249  * receive credentials
250  */
251 static int
252 on_rw_mapper(struct query_state *qstate)
253 {
254 	ssize_t	result;
255 	int	elem_type;
256 
257 	TRACE_IN(on_rw_mapper);
258 	if (qstate->kevent_watermark == 0) {
259 		qstate->kevent_watermark = sizeof(int);
260 	} else {
261 		result = qstate->read_func(qstate, &elem_type, sizeof(int));
262 		if (result != sizeof(int)) {
263 			TRACE_OUT(on_rw_mapper);
264 			return (-1);
265 		}
266 
267 		switch (elem_type) {
268 		case CET_WRITE_REQUEST:
269 			qstate->kevent_watermark = sizeof(size_t);
270 			qstate->process_func = on_write_request_read1;
271 		break;
272 		case CET_READ_REQUEST:
273 			qstate->kevent_watermark = sizeof(size_t);
274 			qstate->process_func = on_read_request_read1;
275 		break;
276 		default:
277 			TRACE_OUT(on_rw_mapper);
278 			return (-1);
279 		break;
280 		}
281 	}
282 	TRACE_OUT(on_rw_mapper);
283 	return (0);
284 }
285 
286 /*
287  * The default query_destroy function
288  */
289 static void
290 on_query_destroy(struct query_state *qstate)
291 {
292 
293 	TRACE_IN(on_query_destroy);
294 	finalize_comm_element(&qstate->response);
295 	finalize_comm_element(&qstate->request);
296 	TRACE_OUT(on_query_destroy);
297 }
298 
299 /*
300  * The functions below are used to process write requests.
301  * - on_write_request_read1 and on_write_request_read2 read the request itself
302  * - on_write_request_process processes it (if the client requests to
303  *    cache the negative result, the on_negative_write_request_process is used)
304  * - on_write_response_write1 sends the response
305  */
306 static int
307 on_write_request_read1(struct query_state *qstate)
308 {
309 	struct cache_write_request	*write_request;
310 	ssize_t	result;
311 
312 	TRACE_IN(on_write_request_read1);
313 	if (qstate->kevent_watermark == 0)
314 		qstate->kevent_watermark = sizeof(size_t) * 3;
315 	else {
316 		init_comm_element(&qstate->request, CET_WRITE_REQUEST);
317 		write_request = get_cache_write_request(&qstate->request);
318 
319 		result = qstate->read_func(qstate, &write_request->entry_length,
320 	    		sizeof(size_t));
321 		result += qstate->read_func(qstate,
322 	    		&write_request->cache_key_size, sizeof(size_t));
323 		result += qstate->read_func(qstate,
324 	    		&write_request->data_size, sizeof(size_t));
325 
326 		if (result != sizeof(size_t) * 3) {
327 			TRACE_OUT(on_write_request_read1);
328 			return (-1);
329 		}
330 
331 		if (BUFSIZE_INVALID(write_request->entry_length) ||
332 			BUFSIZE_INVALID(write_request->cache_key_size) ||
333 			(BUFSIZE_INVALID(write_request->data_size) &&
334 			(write_request->data_size != 0))) {
335 			TRACE_OUT(on_write_request_read1);
336 			return (-1);
337 		}
338 
339 		write_request->entry = calloc(1,
340 			write_request->entry_length + 1);
341 		assert(write_request->entry != NULL);
342 
343 		write_request->cache_key = calloc(1,
344 			write_request->cache_key_size +
345 			qstate->eid_str_length);
346 		assert(write_request->cache_key != NULL);
347 		memcpy(write_request->cache_key, qstate->eid_str,
348 			qstate->eid_str_length);
349 
350 		if (write_request->data_size != 0) {
351 			write_request->data = calloc(1,
352 				write_request->data_size);
353 			assert(write_request->data != NULL);
354 		}
355 
356 		qstate->kevent_watermark = write_request->entry_length +
357 			write_request->cache_key_size +
358 			write_request->data_size;
359 		qstate->process_func = on_write_request_read2;
360 	}
361 
362 	TRACE_OUT(on_write_request_read1);
363 	return (0);
364 }
365 
366 static int
367 on_write_request_read2(struct query_state *qstate)
368 {
369 	struct cache_write_request	*write_request;
370 	ssize_t	result;
371 
372 	TRACE_IN(on_write_request_read2);
373 	write_request = get_cache_write_request(&qstate->request);
374 
375 	result = qstate->read_func(qstate, write_request->entry,
376 		write_request->entry_length);
377 	result += qstate->read_func(qstate, write_request->cache_key +
378 		qstate->eid_str_length, write_request->cache_key_size);
379 	if (write_request->data_size != 0)
380 		result += qstate->read_func(qstate, write_request->data,
381 			write_request->data_size);
382 
383 	if (result != (ssize_t)qstate->kevent_watermark) {
384 		TRACE_OUT(on_write_request_read2);
385 		return (-1);
386 	}
387 	write_request->cache_key_size += qstate->eid_str_length;
388 
389 	qstate->kevent_watermark = 0;
390 	if (write_request->data_size != 0)
391 		qstate->process_func = on_write_request_process;
392 	else
393 	    	qstate->process_func = on_negative_write_request_process;
394 	TRACE_OUT(on_write_request_read2);
395 	return (0);
396 }
397 
398 static	int
399 on_write_request_process(struct query_state *qstate)
400 {
401 	struct cache_write_request	*write_request;
402 	struct cache_write_response	*write_response;
403 	cache_entry c_entry;
404 
405 	TRACE_IN(on_write_request_process);
406 	init_comm_element(&qstate->response, CET_WRITE_RESPONSE);
407 	write_response = get_cache_write_response(&qstate->response);
408 	write_request = get_cache_write_request(&qstate->request);
409 
410 	qstate->config_entry = configuration_find_entry(
411 		s_configuration, write_request->entry);
412 
413 	if (qstate->config_entry == NULL) {
414 		write_response->error_code = ENOENT;
415 
416 		LOG_ERR_2("write_request", "can't find configuration"
417 		    " entry '%s'. aborting request", write_request->entry);
418 		goto fin;
419 	}
420 
421 	if (qstate->config_entry->enabled == 0) {
422 		write_response->error_code = EACCES;
423 
424 		LOG_ERR_2("write_request",
425 			"configuration entry '%s' is disabled",
426 			write_request->entry);
427 		goto fin;
428 	}
429 
430 	if (qstate->config_entry->perform_actual_lookups != 0) {
431 		write_response->error_code = EOPNOTSUPP;
432 
433 		LOG_ERR_2("write_request",
434 			"entry '%s' performs lookups by itself: "
435 			"can't write to it", write_request->entry);
436 		goto fin;
437 	}
438 
439 	configuration_lock_rdlock(s_configuration);
440 	c_entry = find_cache_entry(s_cache,
441 		qstate->config_entry->positive_cache_params.cep.entry_name);
442 	configuration_unlock(s_configuration);
443 	if (c_entry != NULL) {
444 		configuration_lock_entry(qstate->config_entry, CELT_POSITIVE);
445 		qstate->config_entry->positive_cache_entry = c_entry;
446 		write_response->error_code = cache_write(c_entry,
447 			write_request->cache_key,
448 	    		write_request->cache_key_size,
449 	    		write_request->data,
450 			write_request->data_size);
451 		configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE);
452 
453 		if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
454 		    (qstate->config_entry->common_query_timeout.tv_usec != 0))
455 			memcpy(&qstate->timeout,
456 				&qstate->config_entry->common_query_timeout,
457 				sizeof(struct timeval));
458 
459 	} else
460 		write_response->error_code = -1;
461 
462 fin:
463 	qstate->kevent_filter = EVFILT_WRITE;
464 	qstate->kevent_watermark = sizeof(int);
465 	qstate->process_func = on_write_response_write1;
466 
467 	TRACE_OUT(on_write_request_process);
468 	return (0);
469 }
470 
471 static int
472 on_negative_write_request_process(struct query_state *qstate)
473 {
474 	struct cache_write_request	*write_request;
475 	struct cache_write_response	*write_response;
476 	cache_entry c_entry;
477 
478 	TRACE_IN(on_negative_write_request_process);
479 	init_comm_element(&qstate->response, CET_WRITE_RESPONSE);
480 	write_response = get_cache_write_response(&qstate->response);
481 	write_request = get_cache_write_request(&qstate->request);
482 
483 	qstate->config_entry = configuration_find_entry	(
484 		s_configuration, write_request->entry);
485 
486 	if (qstate->config_entry == NULL) {
487 		write_response->error_code = ENOENT;
488 
489 		LOG_ERR_2("negative_write_request",
490 			"can't find configuration"
491 		   	" entry '%s'. aborting request", write_request->entry);
492 		goto fin;
493 	}
494 
495 	if (qstate->config_entry->enabled == 0) {
496 		write_response->error_code = EACCES;
497 
498 		LOG_ERR_2("negative_write_request",
499 			"configuration entry '%s' is disabled",
500 			write_request->entry);
501 		goto fin;
502 	}
503 
504 	if (qstate->config_entry->perform_actual_lookups != 0) {
505 		write_response->error_code = EOPNOTSUPP;
506 
507 		LOG_ERR_2("negative_write_request",
508 			"entry '%s' performs lookups by itself: "
509 			"can't write to it", write_request->entry);
510 		goto fin;
511 	} else {
512 #ifdef NS_NSCD_EID_CHECKING
513 		if (check_query_eids(qstate) != 0) {
514 			write_response->error_code = EPERM;
515 			goto fin;
516 		}
517 #endif
518 	}
519 
520 	configuration_lock_rdlock(s_configuration);
521 	c_entry = find_cache_entry(s_cache,
522 		qstate->config_entry->negative_cache_params.cep.entry_name);
523 	configuration_unlock(s_configuration);
524 	if (c_entry != NULL) {
525 		configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE);
526 		qstate->config_entry->negative_cache_entry = c_entry;
527 		write_response->error_code = cache_write(c_entry,
528 			write_request->cache_key,
529 	    		write_request->cache_key_size,
530 	    		negative_data,
531 			sizeof(negative_data));
532 		configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE);
533 
534 		if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
535 		    (qstate->config_entry->common_query_timeout.tv_usec != 0))
536 			memcpy(&qstate->timeout,
537 				&qstate->config_entry->common_query_timeout,
538 				sizeof(struct timeval));
539 	} else
540 		write_response->error_code = -1;
541 
542 fin:
543 	qstate->kevent_filter = EVFILT_WRITE;
544 	qstate->kevent_watermark = sizeof(int);
545 	qstate->process_func = on_write_response_write1;
546 
547 	TRACE_OUT(on_negative_write_request_process);
548 	return (0);
549 }
550 
551 static int
552 on_write_response_write1(struct query_state *qstate)
553 {
554 	struct cache_write_response	*write_response;
555 	ssize_t	result;
556 
557 	TRACE_IN(on_write_response_write1);
558 	write_response = get_cache_write_response(&qstate->response);
559 	result = qstate->write_func(qstate, &write_response->error_code,
560 		sizeof(int));
561 	if (result != sizeof(int)) {
562 		TRACE_OUT(on_write_response_write1);
563 		return (-1);
564 	}
565 
566 	finalize_comm_element(&qstate->request);
567 	finalize_comm_element(&qstate->response);
568 
569 	qstate->kevent_watermark = sizeof(int);
570 	qstate->kevent_filter = EVFILT_READ;
571 	qstate->process_func = on_rw_mapper;
572 
573 	TRACE_OUT(on_write_response_write1);
574 	return (0);
575 }
576 
577 /*
578  * The functions below are used to process read requests.
579  * - on_read_request_read1 and on_read_request_read2 read the request itself
580  * - on_read_request_process processes it
581  * - on_read_response_write1 and on_read_response_write2 send the response
582  */
583 static int
584 on_read_request_read1(struct query_state *qstate)
585 {
586 	struct cache_read_request *read_request;
587 	ssize_t	result;
588 
589 	TRACE_IN(on_read_request_read1);
590 	if (qstate->kevent_watermark == 0)
591 		qstate->kevent_watermark = sizeof(size_t) * 2;
592 	else {
593 		init_comm_element(&qstate->request, CET_READ_REQUEST);
594 		read_request = get_cache_read_request(&qstate->request);
595 
596 		result = qstate->read_func(qstate,
597 	    		&read_request->entry_length, sizeof(size_t));
598 		result += qstate->read_func(qstate,
599 	    		&read_request->cache_key_size, sizeof(size_t));
600 
601 		if (result != sizeof(size_t) * 2) {
602 			TRACE_OUT(on_read_request_read1);
603 			return (-1);
604 		}
605 
606 		if (BUFSIZE_INVALID(read_request->entry_length) ||
607 			BUFSIZE_INVALID(read_request->cache_key_size)) {
608 			TRACE_OUT(on_read_request_read1);
609 			return (-1);
610 		}
611 
612 		read_request->entry = calloc(1,
613 			read_request->entry_length + 1);
614 		assert(read_request->entry != NULL);
615 
616 		read_request->cache_key = calloc(1,
617 			read_request->cache_key_size +
618 			qstate->eid_str_length);
619 		assert(read_request->cache_key != NULL);
620 		memcpy(read_request->cache_key, qstate->eid_str,
621 			qstate->eid_str_length);
622 
623 		qstate->kevent_watermark = read_request->entry_length +
624 			read_request->cache_key_size;
625 		qstate->process_func = on_read_request_read2;
626 	}
627 
628 	TRACE_OUT(on_read_request_read1);
629 	return (0);
630 }
631 
632 static int
633 on_read_request_read2(struct query_state *qstate)
634 {
635 	struct cache_read_request	*read_request;
636 	ssize_t	result;
637 
638 	TRACE_IN(on_read_request_read2);
639 	read_request = get_cache_read_request(&qstate->request);
640 
641 	result = qstate->read_func(qstate, read_request->entry,
642 		read_request->entry_length);
643 	result += qstate->read_func(qstate,
644 		read_request->cache_key + qstate->eid_str_length,
645 		read_request->cache_key_size);
646 
647 	if (result != (ssize_t)qstate->kevent_watermark) {
648 		TRACE_OUT(on_read_request_read2);
649 		return (-1);
650 	}
651 	read_request->cache_key_size += qstate->eid_str_length;
652 
653 	qstate->kevent_watermark = 0;
654 	qstate->process_func = on_read_request_process;
655 
656 	TRACE_OUT(on_read_request_read2);
657 	return (0);
658 }
659 
660 static int
661 on_read_request_process(struct query_state *qstate)
662 {
663 	struct cache_read_request *read_request;
664 	struct cache_read_response *read_response;
665 	cache_entry	c_entry, neg_c_entry;
666 
667 	struct agent	*lookup_agent;
668 	struct common_agent *c_agent;
669 	int res;
670 
671 	TRACE_IN(on_read_request_process);
672 	init_comm_element(&qstate->response, CET_READ_RESPONSE);
673 	read_response = get_cache_read_response(&qstate->response);
674 	read_request = get_cache_read_request(&qstate->request);
675 
676 	qstate->config_entry = configuration_find_entry(
677 		s_configuration, read_request->entry);
678 	if (qstate->config_entry == NULL) {
679 		read_response->error_code = ENOENT;
680 
681 		LOG_ERR_2("read_request",
682 			"can't find configuration "
683 	    		"entry '%s'. aborting request", read_request->entry);
684 	    	goto fin;
685 	}
686 
687 	if (qstate->config_entry->enabled == 0) {
688 		read_response->error_code = EACCES;
689 
690 		LOG_ERR_2("read_request",
691 			"configuration entry '%s' is disabled",
692 			read_request->entry);
693 		goto fin;
694 	}
695 
696 	/*
697 	 * if we perform lookups by ourselves, then we don't need to separate
698 	 * cache entries by euid and egid
699 	 */
700 	if (qstate->config_entry->perform_actual_lookups != 0)
701 		memset(read_request->cache_key, 0, qstate->eid_str_length);
702 	else {
703 #ifdef NS_NSCD_EID_CHECKING
704 		if (check_query_eids(qstate) != 0) {
705 		/* if the lookup is not self-performing, we check for clients euid/egid */
706 			read_response->error_code = EPERM;
707 			goto fin;
708 		}
709 #endif
710 	}
711 
712 	configuration_lock_rdlock(s_configuration);
713 	c_entry = find_cache_entry(s_cache,
714 		qstate->config_entry->positive_cache_params.cep.entry_name);
715 	neg_c_entry = find_cache_entry(s_cache,
716 		qstate->config_entry->negative_cache_params.cep.entry_name);
717 	configuration_unlock(s_configuration);
718 	if ((c_entry != NULL) && (neg_c_entry != NULL)) {
719 		configuration_lock_entry(qstate->config_entry, CELT_POSITIVE);
720 		qstate->config_entry->positive_cache_entry = c_entry;
721 		read_response->error_code = cache_read(c_entry,
722 	    		read_request->cache_key,
723 	    		read_request->cache_key_size, NULL,
724 	    		&read_response->data_size);
725 
726 		if (read_response->error_code == -2) {
727 			read_response->data = malloc(
728 				read_response->data_size);
729 			assert(read_response->data != NULL);
730 			read_response->error_code = cache_read(c_entry,
731 				read_request->cache_key,
732 		    		read_request->cache_key_size,
733 		    		read_response->data,
734 		    		&read_response->data_size);
735 		}
736 		configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE);
737 
738 		configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE);
739 		qstate->config_entry->negative_cache_entry = neg_c_entry;
740 		if (read_response->error_code == -1) {
741 			read_response->error_code = cache_read(neg_c_entry,
742 				read_request->cache_key,
743 				read_request->cache_key_size, NULL,
744 				&read_response->data_size);
745 
746 			if (read_response->error_code == -2) {
747 				read_response->data = malloc(
748 					read_response->data_size);
749 				assert(read_response->data != NULL);
750 				read_response->error_code = cache_read(neg_c_entry,
751 					read_request->cache_key,
752 		    			read_request->cache_key_size,
753 		    			read_response->data,
754 		    			&read_response->data_size);
755 			}
756 		}
757 		configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE);
758 
759 		if ((read_response->error_code == -1) &&
760 			(qstate->config_entry->perform_actual_lookups != 0)) {
761 			free(read_response->data);
762 			read_response->data = NULL;
763 			read_response->data_size = 0;
764 
765 			lookup_agent = find_agent(s_agent_table,
766 				read_request->entry, COMMON_AGENT);
767 
768 			if ((lookup_agent != NULL) &&
769 			(lookup_agent->type == COMMON_AGENT)) {
770 				c_agent = (struct common_agent *)lookup_agent;
771 				res = c_agent->lookup_func(
772 					read_request->cache_key +
773 						qstate->eid_str_length,
774 					read_request->cache_key_size -
775 						qstate->eid_str_length,
776 					&read_response->data,
777 					&read_response->data_size);
778 
779 				if (res == NS_SUCCESS) {
780 					read_response->error_code = 0;
781 					configuration_lock_entry(
782 						qstate->config_entry,
783 						CELT_POSITIVE);
784 					cache_write(c_entry,
785 						read_request->cache_key,
786 	    					read_request->cache_key_size,
787 	    					read_response->data,
788 						read_response->data_size);
789 					configuration_unlock_entry(
790 						qstate->config_entry,
791 						CELT_POSITIVE);
792 				} else if ((res == NS_NOTFOUND) ||
793 					  (res == NS_RETURN)) {
794 					configuration_lock_entry(
795 						  qstate->config_entry,
796 						  CELT_NEGATIVE);
797 					cache_write(neg_c_entry,
798 						read_request->cache_key,
799 						read_request->cache_key_size,
800 						negative_data,
801 						sizeof(negative_data));
802 					configuration_unlock_entry(
803 						  qstate->config_entry,
804 						  CELT_NEGATIVE);
805 
806 					read_response->error_code = 0;
807 					read_response->data = NULL;
808 					read_response->data_size = 0;
809 				}
810 			}
811 		}
812 
813 		if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
814 		    (qstate->config_entry->common_query_timeout.tv_usec != 0))
815 			memcpy(&qstate->timeout,
816 				&qstate->config_entry->common_query_timeout,
817 				sizeof(struct timeval));
818 	} else
819 		read_response->error_code = -1;
820 
821 fin:
822 	qstate->kevent_filter = EVFILT_WRITE;
823 	if (read_response->error_code == 0)
824 		qstate->kevent_watermark = sizeof(int) + sizeof(size_t);
825 	else
826 		qstate->kevent_watermark = sizeof(int);
827 	qstate->process_func = on_read_response_write1;
828 
829 	TRACE_OUT(on_read_request_process);
830 	return (0);
831 }
832 
833 static int
834 on_read_response_write1(struct query_state *qstate)
835 {
836 	struct cache_read_response	*read_response;
837 	ssize_t	result;
838 
839 	TRACE_IN(on_read_response_write1);
840 	read_response = get_cache_read_response(&qstate->response);
841 
842 	result = qstate->write_func(qstate, &read_response->error_code,
843 		sizeof(int));
844 
845 	if (read_response->error_code == 0) {
846 		result += qstate->write_func(qstate, &read_response->data_size,
847 			sizeof(size_t));
848 		if (result != (ssize_t)qstate->kevent_watermark) {
849 			TRACE_OUT(on_read_response_write1);
850 			return (-1);
851 		}
852 
853 		qstate->kevent_watermark = read_response->data_size;
854 		qstate->process_func = on_read_response_write2;
855 	} else {
856 		if (result != (ssize_t)qstate->kevent_watermark) {
857 			TRACE_OUT(on_read_response_write1);
858 			return (-1);
859 		}
860 
861 		qstate->kevent_watermark = 0;
862 		qstate->process_func = NULL;
863 	}
864 
865 	TRACE_OUT(on_read_response_write1);
866 	return (0);
867 }
868 
869 static int
870 on_read_response_write2(struct query_state *qstate)
871 {
872 	struct cache_read_response	*read_response;
873 	ssize_t	result;
874 
875 	TRACE_IN(on_read_response_write2);
876 	read_response = get_cache_read_response(&qstate->response);
877 	if (read_response->data_size > 0) {
878 		result = qstate->write_func(qstate, read_response->data,
879 			read_response->data_size);
880 		if (result != (ssize_t)qstate->kevent_watermark) {
881 			TRACE_OUT(on_read_response_write2);
882 			return (-1);
883 		}
884 	}
885 
886 	finalize_comm_element(&qstate->request);
887 	finalize_comm_element(&qstate->response);
888 
889 	qstate->kevent_watermark = sizeof(int);
890 	qstate->kevent_filter = EVFILT_READ;
891 	qstate->process_func = on_rw_mapper;
892 	TRACE_OUT(on_read_response_write2);
893 	return (0);
894 }
895 
896 /*
897  * The functions below are used to process write requests.
898  * - on_transform_request_read1 and on_transform_request_read2 read the
899  *   request itself
900  * - on_transform_request_process processes it
901  * - on_transform_response_write1 sends the response
902  */
903 static int
904 on_transform_request_read1(struct query_state *qstate)
905 {
906 	struct cache_transform_request *transform_request;
907 	ssize_t	result;
908 
909 	TRACE_IN(on_transform_request_read1);
910 	if (qstate->kevent_watermark == 0)
911 		qstate->kevent_watermark = sizeof(size_t) + sizeof(int);
912 	else {
913 		init_comm_element(&qstate->request, CET_TRANSFORM_REQUEST);
914 		transform_request =
915 			get_cache_transform_request(&qstate->request);
916 
917 		result = qstate->read_func(qstate,
918 	    		&transform_request->entry_length, sizeof(size_t));
919 		result += qstate->read_func(qstate,
920 	    		&transform_request->transformation_type, sizeof(int));
921 
922 		if (result != sizeof(size_t) + sizeof(int)) {
923 			TRACE_OUT(on_transform_request_read1);
924 			return (-1);
925 		}
926 
927 		if ((transform_request->transformation_type != TT_USER) &&
928 		    (transform_request->transformation_type != TT_ALL)) {
929 			TRACE_OUT(on_transform_request_read1);
930 			return (-1);
931 		}
932 
933 		if (transform_request->entry_length != 0) {
934 			if (BUFSIZE_INVALID(transform_request->entry_length)) {
935 				TRACE_OUT(on_transform_request_read1);
936 				return (-1);
937 			}
938 
939 			transform_request->entry = calloc(1,
940 				transform_request->entry_length + 1);
941 			assert(transform_request->entry != NULL);
942 
943 			qstate->process_func = on_transform_request_read2;
944 		} else
945 			qstate->process_func = on_transform_request_process;
946 
947 		qstate->kevent_watermark = transform_request->entry_length;
948 	}
949 
950 	TRACE_OUT(on_transform_request_read1);
951 	return (0);
952 }
953 
954 static int
955 on_transform_request_read2(struct query_state *qstate)
956 {
957 	struct cache_transform_request	*transform_request;
958 	ssize_t	result;
959 
960 	TRACE_IN(on_transform_request_read2);
961 	transform_request = get_cache_transform_request(&qstate->request);
962 
963 	result = qstate->read_func(qstate, transform_request->entry,
964 		transform_request->entry_length);
965 
966 	if (result != (ssize_t)qstate->kevent_watermark) {
967 		TRACE_OUT(on_transform_request_read2);
968 		return (-1);
969 	}
970 
971 	qstate->kevent_watermark = 0;
972 	qstate->process_func = on_transform_request_process;
973 
974 	TRACE_OUT(on_transform_request_read2);
975 	return (0);
976 }
977 
978 static int
979 on_transform_request_process(struct query_state *qstate)
980 {
981 	struct cache_transform_request *transform_request;
982 	struct cache_transform_response *transform_response;
983 	struct configuration_entry *config_entry;
984 	size_t	i, size;
985 
986 	TRACE_IN(on_transform_request_process);
987 	init_comm_element(&qstate->response, CET_TRANSFORM_RESPONSE);
988 	transform_response = get_cache_transform_response(&qstate->response);
989 	transform_request = get_cache_transform_request(&qstate->request);
990 
991 	switch (transform_request->transformation_type) {
992 	case TT_USER:
993 		if (transform_request->entry == NULL) {
994 			size = configuration_get_entries_size(s_configuration);
995 			for (i = 0; i < size; ++i) {
996 			    config_entry = configuration_get_entry(
997 				s_configuration, i);
998 
999 			    if (config_entry->perform_actual_lookups == 0)
1000 			    	clear_config_entry_part(config_entry,
1001 				    qstate->eid_str, qstate->eid_str_length);
1002 			}
1003 		} else {
1004 			qstate->config_entry = configuration_find_entry(
1005 				s_configuration, transform_request->entry);
1006 
1007 			if (qstate->config_entry == NULL) {
1008 				LOG_ERR_2("transform_request",
1009 					"can't find configuration"
1010 		   			" entry '%s'. aborting request",
1011 					transform_request->entry);
1012 				transform_response->error_code = -1;
1013 				goto fin;
1014 			}
1015 
1016 			if (qstate->config_entry->perform_actual_lookups != 0) {
1017 				LOG_ERR_2("transform_request",
1018 					"can't transform the cache entry %s"
1019 					", because it ised for actual lookups",
1020 					transform_request->entry);
1021 				transform_response->error_code = -1;
1022 				goto fin;
1023 			}
1024 
1025 			clear_config_entry_part(qstate->config_entry,
1026 				qstate->eid_str, qstate->eid_str_length);
1027 		}
1028 		break;
1029 	case TT_ALL:
1030 		if (qstate->euid != 0)
1031 			transform_response->error_code = -1;
1032 		else {
1033 			if (transform_request->entry == NULL) {
1034 				size = configuration_get_entries_size(
1035 					s_configuration);
1036 				for (i = 0; i < size; ++i) {
1037 				    clear_config_entry(
1038 					configuration_get_entry(
1039 						s_configuration, i));
1040 				}
1041 			} else {
1042 				qstate->config_entry = configuration_find_entry(
1043 					s_configuration,
1044 					transform_request->entry);
1045 
1046 				if (qstate->config_entry == NULL) {
1047 					LOG_ERR_2("transform_request",
1048 						"can't find configuration"
1049 		   				" entry '%s'. aborting request",
1050 						transform_request->entry);
1051 					transform_response->error_code = -1;
1052 					goto fin;
1053 				}
1054 
1055 				clear_config_entry(qstate->config_entry);
1056 			}
1057 		}
1058 		break;
1059 	default:
1060 		transform_response->error_code = -1;
1061 	}
1062 
1063 fin:
1064 	qstate->kevent_watermark = 0;
1065 	qstate->process_func = on_transform_response_write1;
1066 	TRACE_OUT(on_transform_request_process);
1067 	return (0);
1068 }
1069 
1070 static int
1071 on_transform_response_write1(struct query_state *qstate)
1072 {
1073 	struct cache_transform_response	*transform_response;
1074 	ssize_t	result;
1075 
1076 	TRACE_IN(on_transform_response_write1);
1077 	transform_response = get_cache_transform_response(&qstate->response);
1078 	result = qstate->write_func(qstate, &transform_response->error_code,
1079 		sizeof(int));
1080 	if (result != sizeof(int)) {
1081 		TRACE_OUT(on_transform_response_write1);
1082 		return (-1);
1083 	}
1084 
1085 	finalize_comm_element(&qstate->request);
1086 	finalize_comm_element(&qstate->response);
1087 
1088 	qstate->kevent_watermark = 0;
1089 	qstate->process_func = NULL;
1090 	TRACE_OUT(on_transform_response_write1);
1091 	return (0);
1092 }
1093 
1094 /*
1095  * Checks if the client's euid and egid do not differ from its uid and gid.
1096  * Returns 0 on success.
1097  */
1098 int
1099 check_query_eids(struct query_state *qstate)
1100 {
1101 
1102 	return ((qstate->uid != qstate->euid) || (qstate->gid != qstate->egid) ? -1 : 0);
1103 }
1104 
1105 /*
1106  * Uses the qstate fields to process an "alternate" read - when the buffer is
1107  * too large to be received during one socket read operation
1108  */
1109 ssize_t
1110 query_io_buffer_read(struct query_state *qstate, void *buf, size_t nbytes)
1111 {
1112 	size_t remaining;
1113 	ssize_t	result;
1114 
1115 	TRACE_IN(query_io_buffer_read);
1116 	if ((qstate->io_buffer_size == 0) || (qstate->io_buffer == NULL))
1117 		return (-1);
1118 
1119 	assert(qstate->io_buffer_p <=
1120 		qstate->io_buffer + qstate->io_buffer_size);
1121 	remaining = qstate->io_buffer + qstate->io_buffer_size -
1122 		qstate->io_buffer_p;
1123 	if (nbytes < remaining)
1124 		result = nbytes;
1125 	else
1126 		result = remaining;
1127 
1128 	memcpy(buf, qstate->io_buffer_p, result);
1129 	qstate->io_buffer_p += result;
1130 
1131 	if (remaining == 0) {
1132 		free(qstate->io_buffer);
1133 		qstate->io_buffer = NULL;
1134 
1135 		qstate->write_func = query_socket_write;
1136 		qstate->read_func = query_socket_read;
1137 	}
1138 
1139 	TRACE_OUT(query_io_buffer_read);
1140 	return (result);
1141 }
1142 
1143 /*
1144  * Uses the qstate fields to process an "alternate" write - when the buffer is
1145  * too large to be sent during one socket write operation
1146  */
1147 ssize_t
1148 query_io_buffer_write(struct query_state *qstate, const void *buf,
1149 	size_t nbytes)
1150 {
1151 	size_t remaining;
1152 	ssize_t	result;
1153 
1154 	TRACE_IN(query_io_buffer_write);
1155 	if ((qstate->io_buffer_size == 0) || (qstate->io_buffer == NULL))
1156 		return (-1);
1157 
1158 	assert(qstate->io_buffer_p <=
1159 		qstate->io_buffer + qstate->io_buffer_size);
1160 	remaining = qstate->io_buffer + qstate->io_buffer_size -
1161 		qstate->io_buffer_p;
1162 	if (nbytes < remaining)
1163 		result = nbytes;
1164 	else
1165 		result = remaining;
1166 
1167 	memcpy(qstate->io_buffer_p, buf, result);
1168 	qstate->io_buffer_p += result;
1169 
1170 	if (remaining == 0) {
1171 		qstate->use_alternate_io = 1;
1172 		qstate->io_buffer_p = qstate->io_buffer;
1173 
1174 		qstate->write_func = query_socket_write;
1175 		qstate->read_func = query_socket_read;
1176 	}
1177 
1178 	TRACE_OUT(query_io_buffer_write);
1179 	return (result);
1180 }
1181 
1182 /*
1183  * The default "read" function, which reads data directly from socket
1184  */
1185 ssize_t
1186 query_socket_read(struct query_state *qstate, void *buf, size_t nbytes)
1187 {
1188 	ssize_t	result;
1189 
1190 	TRACE_IN(query_socket_read);
1191 	if (qstate->socket_failed != 0) {
1192 		TRACE_OUT(query_socket_read);
1193 		return (-1);
1194 	}
1195 
1196 	result = read(qstate->sockfd, buf, nbytes);
1197 	if (result < 0 || (size_t)result < nbytes)
1198 		qstate->socket_failed = 1;
1199 
1200 	TRACE_OUT(query_socket_read);
1201 	return (result);
1202 }
1203 
1204 /*
1205  * The default "write" function, which writes data directly to socket
1206  */
1207 ssize_t
1208 query_socket_write(struct query_state *qstate, const void *buf, size_t nbytes)
1209 {
1210 	ssize_t	result;
1211 
1212 	TRACE_IN(query_socket_write);
1213 	if (qstate->socket_failed != 0) {
1214 		TRACE_OUT(query_socket_write);
1215 		return (-1);
1216 	}
1217 
1218 	result = write(qstate->sockfd, buf, nbytes);
1219 	if (result < 0 || (size_t)result < nbytes)
1220 		qstate->socket_failed = 1;
1221 
1222 	TRACE_OUT(query_socket_write);
1223 	return (result);
1224 }
1225 
1226 /*
1227  * Initializes the query_state structure by filling it with the default values.
1228  */
1229 struct query_state *
1230 init_query_state(int sockfd, size_t kevent_watermark, uid_t euid, gid_t egid)
1231 {
1232 	struct query_state	*retval;
1233 
1234 	TRACE_IN(init_query_state);
1235 	retval = calloc(1, sizeof(*retval));
1236 	assert(retval != NULL);
1237 
1238 	retval->sockfd = sockfd;
1239 	retval->kevent_filter = EVFILT_READ;
1240 	retval->kevent_watermark = kevent_watermark;
1241 
1242 	retval->euid = euid;
1243 	retval->egid = egid;
1244 	retval->uid = retval->gid = -1;
1245 
1246 	if (asprintf(&retval->eid_str, "%d_%d_", retval->euid,
1247 		retval->egid) == -1) {
1248 		free(retval);
1249 		return (NULL);
1250 	}
1251 	retval->eid_str_length = strlen(retval->eid_str);
1252 
1253 	init_comm_element(&retval->request, CET_UNDEFINED);
1254 	init_comm_element(&retval->response, CET_UNDEFINED);
1255 	retval->process_func = on_query_startup;
1256 	retval->destroy_func = on_query_destroy;
1257 
1258 	retval->write_func = query_socket_write;
1259 	retval->read_func = query_socket_read;
1260 
1261 	get_time_func(&retval->creation_time);
1262 	retval->timeout.tv_sec = s_configuration->query_timeout;
1263 	retval->timeout.tv_usec = 0;
1264 
1265 	TRACE_OUT(init_query_state);
1266 	return (retval);
1267 }
1268 
1269 void
1270 destroy_query_state(struct query_state *qstate)
1271 {
1272 
1273 	TRACE_IN(destroy_query_state);
1274 	if (qstate->eid_str != NULL)
1275 	    free(qstate->eid_str);
1276 
1277 	if (qstate->io_buffer != NULL)
1278 		free(qstate->io_buffer);
1279 
1280 	qstate->destroy_func(qstate);
1281 	free(qstate);
1282 	TRACE_OUT(destroy_query_state);
1283 }
1284