xref: /freebsd/usr.sbin/nscd/query.c (revision 35a04710d7286aa9538917fd7f8e417dbee95b82)
1 /*-
2  * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/types.h>
32 #include <sys/socket.h>
33 #include <sys/time.h>
34 #include <sys/event.h>
35 #include <assert.h>
36 #include <errno.h>
37 #include <nsswitch.h>
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include "config.h"
42 #include "debug.h"
43 #include "query.h"
44 #include "log.h"
45 #include "mp_ws_query.h"
46 #include "mp_rs_query.h"
47 #include "singletons.h"
48 
49 static const char negative_data[1] = { 0 };
50 
51 extern	void get_time_func(struct timeval *);
52 
53 static 	void clear_config_entry(struct configuration_entry *);
54 static 	void clear_config_entry_part(struct configuration_entry *,
55 	const char *, size_t);
56 
57 static	int on_query_startup(struct query_state *);
58 static	void on_query_destroy(struct query_state *);
59 
60 static	int on_read_request_read1(struct query_state *);
61 static	int on_read_request_read2(struct query_state *);
62 static	int on_read_request_process(struct query_state *);
63 static	int on_read_response_write1(struct query_state *);
64 static	int on_read_response_write2(struct query_state *);
65 
66 static	int on_rw_mapper(struct query_state *);
67 
68 static	int on_transform_request_read1(struct query_state *);
69 static	int on_transform_request_read2(struct query_state *);
70 static	int on_transform_request_process(struct query_state *);
71 static	int on_transform_response_write1(struct query_state *);
72 
73 static	int on_write_request_read1(struct query_state *);
74 static	int on_write_request_read2(struct query_state *);
75 static	int on_negative_write_request_process(struct query_state *);
76 static	int on_write_request_process(struct query_state *);
77 static	int on_write_response_write1(struct query_state *);
78 
79 /*
80  * Clears the specified configuration entry (clears the cache for positive and
81  * and negative entries) and also for all multipart entries.
82  */
83 static void
84 clear_config_entry(struct configuration_entry *config_entry)
85 {
86 	size_t i;
87 
88 	TRACE_IN(clear_config_entry);
89 	configuration_lock_entry(config_entry, CELT_POSITIVE);
90 	if (config_entry->positive_cache_entry != NULL)
91 		transform_cache_entry(
92 			config_entry->positive_cache_entry,
93 			CTT_CLEAR);
94 	configuration_unlock_entry(config_entry, CELT_POSITIVE);
95 
96 	configuration_lock_entry(config_entry, CELT_NEGATIVE);
97 	if (config_entry->negative_cache_entry != NULL)
98 		transform_cache_entry(
99 			config_entry->negative_cache_entry,
100 			CTT_CLEAR);
101 	configuration_unlock_entry(config_entry, CELT_NEGATIVE);
102 
103 	configuration_lock_entry(config_entry, CELT_MULTIPART);
104 	for (i = 0; i < config_entry->mp_cache_entries_size; ++i)
105 		transform_cache_entry(
106 			config_entry->mp_cache_entries[i],
107 			CTT_CLEAR);
108 	configuration_unlock_entry(config_entry, CELT_MULTIPART);
109 
110 	TRACE_OUT(clear_config_entry);
111 }
112 
113 /*
114  * Clears the specified configuration entry by deleting only the elements,
115  * that are owned by the user with specified eid_str.
116  */
117 static void
118 clear_config_entry_part(struct configuration_entry *config_entry,
119 	const char *eid_str, size_t eid_str_length)
120 {
121 	cache_entry *start, *finish, *mp_entry;
122 	TRACE_IN(clear_config_entry_part);
123 	configuration_lock_entry(config_entry, CELT_POSITIVE);
124 	if (config_entry->positive_cache_entry != NULL)
125 		transform_cache_entry_part(
126 			config_entry->positive_cache_entry,
127 			CTT_CLEAR, eid_str, eid_str_length, KPPT_LEFT);
128 	configuration_unlock_entry(config_entry, CELT_POSITIVE);
129 
130 	configuration_lock_entry(config_entry, CELT_NEGATIVE);
131 	if (config_entry->negative_cache_entry != NULL)
132 		transform_cache_entry_part(
133 			config_entry->negative_cache_entry,
134 			CTT_CLEAR, eid_str, eid_str_length, KPPT_LEFT);
135 	configuration_unlock_entry(config_entry, CELT_NEGATIVE);
136 
137 	configuration_lock_entry(config_entry, CELT_MULTIPART);
138 	if (configuration_entry_find_mp_cache_entries(config_entry,
139 		eid_str, &start, &finish) == 0) {
140 		for (mp_entry = start; mp_entry != finish; ++mp_entry)
141 			transform_cache_entry(*mp_entry, CTT_CLEAR);
142 	}
143 	configuration_unlock_entry(config_entry, CELT_MULTIPART);
144 
145 	TRACE_OUT(clear_config_entry_part);
146 }
147 
148 /*
149  * This function is assigned to the query_state structue on its creation.
150  * It's main purpose is to receive credentials from the client.
151  */
152 static int
153 on_query_startup(struct query_state *qstate)
154 {
155 	struct msghdr	cred_hdr;
156 	struct iovec	iov;
157 	struct cmsgcred *cred;
158 	int elem_type;
159 
160 	struct {
161 		struct cmsghdr	hdr;
162 		char cred[CMSG_SPACE(sizeof(struct cmsgcred))];
163 	} cmsg;
164 
165 	TRACE_IN(on_query_startup);
166 	assert(qstate != NULL);
167 
168 	memset(&cred_hdr, 0, sizeof(struct msghdr));
169 	cred_hdr.msg_iov = &iov;
170 	cred_hdr.msg_iovlen = 1;
171 	cred_hdr.msg_control = (caddr_t)&cmsg;
172 	cred_hdr.msg_controllen = CMSG_LEN(sizeof(struct cmsgcred));
173 
174 	memset(&iov, 0, sizeof(struct iovec));
175 	iov.iov_base = &elem_type;
176 	iov.iov_len = sizeof(int);
177 
178 	if (recvmsg(qstate->sockfd, &cred_hdr, 0) == -1) {
179 		TRACE_OUT(on_query_startup);
180 		return (-1);
181 	}
182 
183 	if (cmsg.hdr.cmsg_len < CMSG_LEN(sizeof(struct cmsgcred))
184 		|| cmsg.hdr.cmsg_level != SOL_SOCKET
185 		|| cmsg.hdr.cmsg_type != SCM_CREDS) {
186 		TRACE_OUT(on_query_startup);
187 		return (-1);
188 	}
189 
190 	cred = (struct cmsgcred *)CMSG_DATA(&cmsg);
191 	qstate->uid = cred->cmcred_uid;
192 	qstate->gid = cred->cmcred_gid;
193 
194 #if defined(NS_NSCD_EID_CHECKING) || defined(NS_STRICT_NSCD_EID_CHECKING)
195 /*
196  * This check is probably a bit redundant - per-user cache is always separated
197  * by the euid/egid pair
198  */
199 	if (check_query_eids(qstate) != 0) {
200 #ifdef NS_STRICT_NSCD_EID_CHECKING
201 		TRACE_OUT(on_query_startup);
202 		return (-1);
203 #else
204 		if ((elem_type != CET_READ_REQUEST) &&
205 			(elem_type != CET_MP_READ_SESSION_REQUEST) &&
206 			(elem_type != CET_WRITE_REQUEST) &&
207 			(elem_type != CET_MP_WRITE_SESSION_REQUEST)) {
208 			TRACE_OUT(on_query_startup);
209 			return (-1);
210 		}
211 #endif
212 	}
213 #endif
214 
215 	switch (elem_type) {
216 	case CET_WRITE_REQUEST:
217 		qstate->process_func = on_write_request_read1;
218 		break;
219 	case CET_READ_REQUEST:
220 		qstate->process_func = on_read_request_read1;
221 		break;
222 	case CET_TRANSFORM_REQUEST:
223 		qstate->process_func = on_transform_request_read1;
224 		break;
225 	case CET_MP_WRITE_SESSION_REQUEST:
226 		qstate->process_func = on_mp_write_session_request_read1;
227 		break;
228 	case CET_MP_READ_SESSION_REQUEST:
229 		qstate->process_func = on_mp_read_session_request_read1;
230 		break;
231 	default:
232 		TRACE_OUT(on_query_startup);
233 		return (-1);
234 	}
235 
236 	qstate->kevent_watermark = 0;
237 	TRACE_OUT(on_query_startup);
238 	return (0);
239 }
240 
241 /*
242  * on_rw_mapper is used to process multiple read/write requests during
243  * one connection session. It's never called in the beginning (on query_state
244  * creation) as it does not process the multipart requests and does not
245  * receive credentials
246  */
247 static int
248 on_rw_mapper(struct query_state *qstate)
249 {
250 	ssize_t	result;
251 	int	elem_type;
252 
253 	TRACE_IN(on_rw_mapper);
254 	if (qstate->kevent_watermark == 0) {
255 		qstate->kevent_watermark = sizeof(int);
256 	} else {
257 		result = qstate->read_func(qstate, &elem_type, sizeof(int));
258 		if (result != sizeof(int)) {
259 			TRACE_OUT(on_rw_mapper);
260 			return (-1);
261 		}
262 
263 		switch (elem_type) {
264 		case CET_WRITE_REQUEST:
265 			qstate->kevent_watermark = sizeof(size_t);
266 			qstate->process_func = on_write_request_read1;
267 		break;
268 		case CET_READ_REQUEST:
269 			qstate->kevent_watermark = sizeof(size_t);
270 			qstate->process_func = on_read_request_read1;
271 		break;
272 		default:
273 			TRACE_OUT(on_rw_mapper);
274 			return (-1);
275 		break;
276 		}
277 	}
278 	TRACE_OUT(on_rw_mapper);
279 	return (0);
280 }
281 
282 /*
283  * The default query_destroy function
284  */
285 static void
286 on_query_destroy(struct query_state *qstate)
287 {
288 
289 	TRACE_IN(on_query_destroy);
290 	finalize_comm_element(&qstate->response);
291 	finalize_comm_element(&qstate->request);
292 	TRACE_OUT(on_query_destroy);
293 }
294 
295 /*
296  * The functions below are used to process write requests.
297  * - on_write_request_read1 and on_write_request_read2 read the request itself
298  * - on_write_request_process processes it (if the client requests to
299  *    cache the negative result, the on_negative_write_request_process is used)
300  * - on_write_response_write1 sends the response
301  */
302 static int
303 on_write_request_read1(struct query_state *qstate)
304 {
305 	struct cache_write_request	*write_request;
306 	ssize_t	result;
307 
308 	TRACE_IN(on_write_request_read1);
309 	if (qstate->kevent_watermark == 0)
310 		qstate->kevent_watermark = sizeof(size_t) * 3;
311 	else {
312 		init_comm_element(&qstate->request, CET_WRITE_REQUEST);
313 		write_request = get_cache_write_request(&qstate->request);
314 
315 		result = qstate->read_func(qstate, &write_request->entry_length,
316 	    		sizeof(size_t));
317 		result += qstate->read_func(qstate,
318 	    		&write_request->cache_key_size, sizeof(size_t));
319 		result += qstate->read_func(qstate,
320 	    		&write_request->data_size, sizeof(size_t));
321 
322 		if (result != sizeof(size_t) * 3) {
323 			TRACE_OUT(on_write_request_read1);
324 			return (-1);
325 		}
326 
327 		if (BUFSIZE_INVALID(write_request->entry_length) ||
328 			BUFSIZE_INVALID(write_request->cache_key_size) ||
329 			(BUFSIZE_INVALID(write_request->data_size) &&
330 			(write_request->data_size != 0))) {
331 			TRACE_OUT(on_write_request_read1);
332 			return (-1);
333 		}
334 
335 		write_request->entry = (char *)malloc(
336 			write_request->entry_length + 1);
337 		assert(write_request->entry != NULL);
338 		memset(write_request->entry, 0,
339 			write_request->entry_length + 1);
340 
341 		write_request->cache_key = (char *)malloc(
342 			write_request->cache_key_size +
343 			qstate->eid_str_length);
344 		assert(write_request->cache_key != NULL);
345 		memcpy(write_request->cache_key, qstate->eid_str,
346 			qstate->eid_str_length);
347 		memset(write_request->cache_key + qstate->eid_str_length, 0,
348 			write_request->cache_key_size);
349 
350 		if (write_request->data_size != 0) {
351 			write_request->data = (char *)malloc(
352 				write_request->data_size);
353 			assert(write_request->data != NULL);
354 			memset(write_request->data, 0,
355 				write_request->data_size);
356 		}
357 
358 		qstate->kevent_watermark = write_request->entry_length +
359 			write_request->cache_key_size +
360 			write_request->data_size;
361 		qstate->process_func = on_write_request_read2;
362 	}
363 
364 	TRACE_OUT(on_write_request_read1);
365 	return (0);
366 }
367 
368 static int
369 on_write_request_read2(struct query_state *qstate)
370 {
371 	struct cache_write_request	*write_request;
372 	ssize_t	result;
373 
374 	TRACE_IN(on_write_request_read2);
375 	write_request = get_cache_write_request(&qstate->request);
376 
377 	result = qstate->read_func(qstate, write_request->entry,
378 		write_request->entry_length);
379 	result += qstate->read_func(qstate, write_request->cache_key +
380 		qstate->eid_str_length, write_request->cache_key_size);
381 	if (write_request->data_size != 0)
382 		result += qstate->read_func(qstate, write_request->data,
383 			write_request->data_size);
384 
385 	if (result != qstate->kevent_watermark) {
386 		TRACE_OUT(on_write_request_read2);
387 		return (-1);
388 	}
389 	write_request->cache_key_size += qstate->eid_str_length;
390 
391 	qstate->kevent_watermark = 0;
392 	if (write_request->data_size != 0)
393 		qstate->process_func = on_write_request_process;
394 	else
395 	    	qstate->process_func = on_negative_write_request_process;
396 	TRACE_OUT(on_write_request_read2);
397 	return (0);
398 }
399 
400 static	int
401 on_write_request_process(struct query_state *qstate)
402 {
403 	struct cache_write_request	*write_request;
404 	struct cache_write_response	*write_response;
405 	cache_entry c_entry;
406 
407 	TRACE_IN(on_write_request_process);
408 	init_comm_element(&qstate->response, CET_WRITE_RESPONSE);
409 	write_response = get_cache_write_response(&qstate->response);
410 	write_request = get_cache_write_request(&qstate->request);
411 
412 	qstate->config_entry = configuration_find_entry(
413 		s_configuration, write_request->entry);
414 
415 	if (qstate->config_entry == NULL) {
416 		write_response->error_code = ENOENT;
417 
418 		LOG_ERR_2("write_request", "can't find configuration"
419 		    " entry '%s'. aborting request", write_request->entry);
420 		goto fin;
421 	}
422 
423 	if (qstate->config_entry->enabled == 0) {
424 		write_response->error_code = EACCES;
425 
426 		LOG_ERR_2("write_request",
427 			"configuration entry '%s' is disabled",
428 			write_request->entry);
429 		goto fin;
430 	}
431 
432 	if (qstate->config_entry->perform_actual_lookups != 0) {
433 		write_response->error_code = EOPNOTSUPP;
434 
435 		LOG_ERR_2("write_request",
436 			"entry '%s' performs lookups by itself: "
437 			"can't write to it", write_request->entry);
438 		goto fin;
439 	}
440 
441 	configuration_lock_rdlock(s_configuration);
442 	c_entry = find_cache_entry(s_cache,
443     		qstate->config_entry->positive_cache_params.entry_name);
444 	configuration_unlock(s_configuration);
445 	if (c_entry != NULL) {
446 		configuration_lock_entry(qstate->config_entry, CELT_POSITIVE);
447 		qstate->config_entry->positive_cache_entry = c_entry;
448 		write_response->error_code = cache_write(c_entry,
449 			write_request->cache_key,
450 	    		write_request->cache_key_size,
451 	    		write_request->data,
452 			write_request->data_size);
453 		configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE);
454 
455 		if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
456 		    (qstate->config_entry->common_query_timeout.tv_usec != 0))
457 			memcpy(&qstate->timeout,
458 				&qstate->config_entry->common_query_timeout,
459 				sizeof(struct timeval));
460 
461 	} else
462 		write_response->error_code = -1;
463 
464 fin:
465 	qstate->kevent_filter = EVFILT_WRITE;
466 	qstate->kevent_watermark = sizeof(int);
467 	qstate->process_func = on_write_response_write1;
468 
469 	TRACE_OUT(on_write_request_process);
470 	return (0);
471 }
472 
473 static int
474 on_negative_write_request_process(struct query_state *qstate)
475 {
476 	struct cache_write_request	*write_request;
477 	struct cache_write_response	*write_response;
478 	cache_entry c_entry;
479 
480 	TRACE_IN(on_negative_write_request_process);
481 	init_comm_element(&qstate->response, CET_WRITE_RESPONSE);
482 	write_response = get_cache_write_response(&qstate->response);
483 	write_request = get_cache_write_request(&qstate->request);
484 
485 	qstate->config_entry = configuration_find_entry	(
486 		s_configuration, write_request->entry);
487 
488 	if (qstate->config_entry == NULL) {
489 		write_response->error_code = ENOENT;
490 
491 		LOG_ERR_2("negative_write_request",
492 			"can't find configuration"
493 		   	" entry '%s'. aborting request", write_request->entry);
494 		goto fin;
495 	}
496 
497 	if (qstate->config_entry->enabled == 0) {
498 		write_response->error_code = EACCES;
499 
500 		LOG_ERR_2("negative_write_request",
501 			"configuration entry '%s' is disabled",
502 			write_request->entry);
503 		goto fin;
504 	}
505 
506 	if (qstate->config_entry->perform_actual_lookups != 0) {
507 		write_response->error_code = EOPNOTSUPP;
508 
509 		LOG_ERR_2("negative_write_request",
510 			"entry '%s' performs lookups by itself: "
511 			"can't write to it", write_request->entry);
512 		goto fin;
513 	} else {
514 #ifdef NS_NSCD_EID_CHECKING
515 		if (check_query_eids(qstate) != 0) {
516 			write_response->error_code = EPERM;
517 			goto fin;
518 		}
519 #endif
520 	}
521 
522 	configuration_lock_rdlock(s_configuration);
523 	c_entry = find_cache_entry(s_cache,
524     		qstate->config_entry->negative_cache_params.entry_name);
525 	configuration_unlock(s_configuration);
526 	if (c_entry != NULL) {
527 		configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE);
528 		qstate->config_entry->negative_cache_entry = c_entry;
529 		write_response->error_code = cache_write(c_entry,
530 			write_request->cache_key,
531 	    		write_request->cache_key_size,
532 	    		negative_data,
533 			sizeof(negative_data));
534 		configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE);
535 
536 		if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
537 		    (qstate->config_entry->common_query_timeout.tv_usec != 0))
538 			memcpy(&qstate->timeout,
539 				&qstate->config_entry->common_query_timeout,
540 				sizeof(struct timeval));
541 	} else
542 		write_response->error_code = -1;
543 
544 fin:
545 	qstate->kevent_filter = EVFILT_WRITE;
546 	qstate->kevent_watermark = sizeof(int);
547 	qstate->process_func = on_write_response_write1;
548 
549 	TRACE_OUT(on_negative_write_request_process);
550 	return (0);
551 }
552 
553 static int
554 on_write_response_write1(struct query_state *qstate)
555 {
556 	struct cache_write_response	*write_response;
557 	ssize_t	result;
558 
559 	TRACE_IN(on_write_response_write1);
560 	write_response = get_cache_write_response(&qstate->response);
561 	result = qstate->write_func(qstate, &write_response->error_code,
562 		sizeof(int));
563 	if (result != sizeof(int)) {
564 		TRACE_OUT(on_write_response_write1);
565 		return (-1);
566 	}
567 
568 	finalize_comm_element(&qstate->request);
569 	finalize_comm_element(&qstate->response);
570 
571 	qstate->kevent_watermark = sizeof(int);
572 	qstate->kevent_filter = EVFILT_READ;
573 	qstate->process_func = on_rw_mapper;
574 
575 	TRACE_OUT(on_write_response_write1);
576 	return (0);
577 }
578 
579 /*
580  * The functions below are used to process read requests.
581  * - on_read_request_read1 and on_read_request_read2 read the request itself
582  * - on_read_request_process processes it
583  * - on_read_response_write1 and on_read_response_write2 send the response
584  */
585 static int
586 on_read_request_read1(struct query_state *qstate)
587 {
588 	struct cache_read_request *read_request;
589 	ssize_t	result;
590 
591 	TRACE_IN(on_read_request_read1);
592 	if (qstate->kevent_watermark == 0)
593 		qstate->kevent_watermark = sizeof(size_t) * 2;
594 	else {
595 		init_comm_element(&qstate->request, CET_READ_REQUEST);
596 		read_request = get_cache_read_request(&qstate->request);
597 
598 		result = qstate->read_func(qstate,
599 	    		&read_request->entry_length, sizeof(size_t));
600 		result += qstate->read_func(qstate,
601 	    		&read_request->cache_key_size, sizeof(size_t));
602 
603 		if (result != sizeof(size_t) * 2) {
604 			TRACE_OUT(on_read_request_read1);
605 			return (-1);
606 		}
607 
608 		if (BUFSIZE_INVALID(read_request->entry_length) ||
609 			BUFSIZE_INVALID(read_request->cache_key_size)) {
610 			TRACE_OUT(on_read_request_read1);
611 			return (-1);
612 		}
613 
614 		read_request->entry = (char *)malloc(
615 			read_request->entry_length + 1);
616 		assert(read_request->entry != NULL);
617 		memset(read_request->entry, 0, read_request->entry_length + 1);
618 
619 		read_request->cache_key = (char *)malloc(
620 			read_request->cache_key_size +
621 			qstate->eid_str_length);
622 		assert(read_request->cache_key != NULL);
623 		memcpy(read_request->cache_key, qstate->eid_str,
624 			qstate->eid_str_length);
625 		memset(read_request->cache_key + qstate->eid_str_length, 0,
626 			read_request->cache_key_size);
627 
628 		qstate->kevent_watermark = read_request->entry_length +
629 			read_request->cache_key_size;
630 		qstate->process_func = on_read_request_read2;
631 	}
632 
633 	TRACE_OUT(on_read_request_read1);
634 	return (0);
635 }
636 
637 static int
638 on_read_request_read2(struct query_state *qstate)
639 {
640 	struct cache_read_request	*read_request;
641 	ssize_t	result;
642 
643 	TRACE_IN(on_read_request_read2);
644 	read_request = get_cache_read_request(&qstate->request);
645 
646 	result = qstate->read_func(qstate, read_request->entry,
647 		read_request->entry_length);
648 	result += qstate->read_func(qstate,
649 		read_request->cache_key + qstate->eid_str_length,
650 		read_request->cache_key_size);
651 
652 	if (result != qstate->kevent_watermark) {
653 		TRACE_OUT(on_read_request_read2);
654 		return (-1);
655 	}
656 	read_request->cache_key_size += qstate->eid_str_length;
657 
658 	qstate->kevent_watermark = 0;
659 	qstate->process_func = on_read_request_process;
660 
661 	TRACE_OUT(on_read_request_read2);
662 	return (0);
663 }
664 
665 static int
666 on_read_request_process(struct query_state *qstate)
667 {
668 	struct cache_read_request *read_request;
669 	struct cache_read_response *read_response;
670 	cache_entry	c_entry, neg_c_entry;
671 
672 	struct agent	*lookup_agent;
673 	struct common_agent *c_agent;
674 	int res;
675 
676 	TRACE_IN(on_read_request_process);
677 	init_comm_element(&qstate->response, CET_READ_RESPONSE);
678 	read_response = get_cache_read_response(&qstate->response);
679 	read_request = get_cache_read_request(&qstate->request);
680 
681 	qstate->config_entry = configuration_find_entry(
682 		s_configuration, read_request->entry);
683 	if (qstate->config_entry == NULL) {
684 		read_response->error_code = ENOENT;
685 
686 		LOG_ERR_2("read_request",
687 			"can't find configuration "
688 	    		"entry '%s'. aborting request", read_request->entry);
689 	    	goto fin;
690 	}
691 
692 	if (qstate->config_entry->enabled == 0) {
693 		read_response->error_code = EACCES;
694 
695 		LOG_ERR_2("read_request",
696 			"configuration entry '%s' is disabled",
697 			read_request->entry);
698 		goto fin;
699 	}
700 
701 	/*
702 	 * if we perform lookups by ourselves, then we don't need to separate
703 	 * cache entries by euid and egid
704 	 */
705 	if (qstate->config_entry->perform_actual_lookups != 0)
706 		memset(read_request->cache_key, 0, qstate->eid_str_length);
707 	else {
708 #ifdef NS_NSCD_EID_CHECKING
709 		if (check_query_eids(qstate) != 0) {
710 		/* if the lookup is not self-performing, we check for clients euid/egid */
711 			read_response->error_code = EPERM;
712 			goto fin;
713 		}
714 #endif
715 	}
716 
717 	configuration_lock_rdlock(s_configuration);
718 	c_entry = find_cache_entry(s_cache,
719     		qstate->config_entry->positive_cache_params.entry_name);
720 	neg_c_entry = find_cache_entry(s_cache,
721 		qstate->config_entry->negative_cache_params.entry_name);
722 	configuration_unlock(s_configuration);
723 	if ((c_entry != NULL) && (neg_c_entry != NULL)) {
724 		configuration_lock_entry(qstate->config_entry, CELT_POSITIVE);
725 		qstate->config_entry->positive_cache_entry = c_entry;
726 		read_response->error_code = cache_read(c_entry,
727 	    		read_request->cache_key,
728 	    		read_request->cache_key_size, NULL,
729 	    		&read_response->data_size);
730 
731 		if (read_response->error_code == -2) {
732 			read_response->data = (char *)malloc(
733 		    		read_response->data_size);
734 			assert(read_response != NULL);
735 			read_response->error_code = cache_read(c_entry,
736 				read_request->cache_key,
737 		    		read_request->cache_key_size,
738 		    		read_response->data,
739 		    		&read_response->data_size);
740 		}
741 		configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE);
742 
743 		configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE);
744 		qstate->config_entry->negative_cache_entry = neg_c_entry;
745 		if (read_response->error_code == -1) {
746 			read_response->error_code = cache_read(neg_c_entry,
747 				read_request->cache_key,
748 				read_request->cache_key_size, NULL,
749 				&read_response->data_size);
750 
751 			if (read_response->error_code == -2) {
752 				read_response->error_code = 0;
753 				read_response->data = NULL;
754 				read_response->data_size = 0;
755 			}
756 		}
757 		configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE);
758 
759 		if ((read_response->error_code == -1) &&
760 			(qstate->config_entry->perform_actual_lookups != 0)) {
761 			free(read_response->data);
762 			read_response->data = NULL;
763 			read_response->data_size = 0;
764 
765 			lookup_agent = find_agent(s_agent_table,
766 				read_request->entry, COMMON_AGENT);
767 
768 			if ((lookup_agent != NULL) &&
769 			(lookup_agent->type == COMMON_AGENT)) {
770 				c_agent = (struct common_agent *)lookup_agent;
771 				res = c_agent->lookup_func(
772 					read_request->cache_key +
773 						qstate->eid_str_length,
774 					read_request->cache_key_size -
775 						qstate->eid_str_length,
776 					&read_response->data,
777 					&read_response->data_size);
778 
779 				if (res == NS_SUCCESS) {
780 					read_response->error_code = 0;
781 					configuration_lock_entry(
782 						qstate->config_entry,
783 						CELT_POSITIVE);
784 					cache_write(c_entry,
785 						read_request->cache_key,
786 	    					read_request->cache_key_size,
787 	    					read_response->data,
788 						read_response->data_size);
789 					configuration_unlock_entry(
790 						qstate->config_entry,
791 						CELT_POSITIVE);
792 				} else if ((res == NS_NOTFOUND) ||
793 					  (res == NS_RETURN)) {
794 					configuration_lock_entry(
795 						  qstate->config_entry,
796 						  CELT_NEGATIVE);
797 					cache_write(neg_c_entry,
798 						read_request->cache_key,
799 						read_request->cache_key_size,
800 						negative_data,
801 						sizeof(negative_data));
802 					configuration_unlock_entry(
803 						  qstate->config_entry,
804 						  CELT_NEGATIVE);
805 
806 					read_response->error_code = 0;
807 					read_response->data = NULL;
808 					read_response->data_size = 0;
809 				}
810 			}
811 		}
812 
813 		if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
814 		    (qstate->config_entry->common_query_timeout.tv_usec != 0))
815 			memcpy(&qstate->timeout,
816 				&qstate->config_entry->common_query_timeout,
817 				sizeof(struct timeval));
818 	} else
819 		read_response->error_code = -1;
820 
821 fin:
822 	qstate->kevent_filter = EVFILT_WRITE;
823 	if (read_response->error_code == 0)
824 		qstate->kevent_watermark = sizeof(int) + sizeof(size_t);
825 	else
826 		qstate->kevent_watermark = sizeof(int);
827 	qstate->process_func = on_read_response_write1;
828 
829 	TRACE_OUT(on_read_request_process);
830 	return (0);
831 }
832 
833 static int
834 on_read_response_write1(struct query_state *qstate)
835 {
836 	struct cache_read_response	*read_response;
837 	ssize_t	result;
838 
839 	TRACE_IN(on_read_response_write1);
840 	read_response = get_cache_read_response(&qstate->response);
841 
842 	result = qstate->write_func(qstate, &read_response->error_code,
843 		sizeof(int));
844 
845 	if (read_response->error_code == 0) {
846 		result += qstate->write_func(qstate, &read_response->data_size,
847 			sizeof(size_t));
848 		if (result != qstate->kevent_watermark) {
849 			TRACE_OUT(on_read_response_write1);
850 			return (-1);
851 		}
852 
853 		qstate->kevent_watermark = read_response->data_size;
854 		qstate->process_func = on_read_response_write2;
855 	} else {
856 		if (result != qstate->kevent_watermark) {
857 			TRACE_OUT(on_read_response_write1);
858 			return (-1);
859 		}
860 
861 		qstate->kevent_watermark = 0;
862 		qstate->process_func = NULL;
863 	}
864 
865 	TRACE_OUT(on_read_response_write1);
866 	return (0);
867 }
868 
869 static int
870 on_read_response_write2(struct query_state *qstate)
871 {
872 	struct cache_read_response	*read_response;
873 	ssize_t	result;
874 
875 	TRACE_IN(on_read_response_write2);
876 	read_response = get_cache_read_response(&qstate->response);
877 	if (read_response->data_size > 0) {
878 		result = qstate->write_func(qstate, read_response->data,
879 			read_response->data_size);
880 		if (result != qstate->kevent_watermark) {
881 			TRACE_OUT(on_read_response_write2);
882 			return (-1);
883 		}
884 	}
885 
886 	finalize_comm_element(&qstate->request);
887 	finalize_comm_element(&qstate->response);
888 
889 	qstate->kevent_watermark = sizeof(int);
890 	qstate->kevent_filter = EVFILT_READ;
891 	qstate->process_func = on_rw_mapper;
892 	TRACE_OUT(on_read_response_write2);
893 	return (0);
894 }
895 
896 /*
897  * The functions below are used to process write requests.
898  * - on_transform_request_read1 and on_transform_request_read2 read the
899  *   request itself
900  * - on_transform_request_process processes it
901  * - on_transform_response_write1 sends the response
902  */
903 static int
904 on_transform_request_read1(struct query_state *qstate)
905 {
906 	struct cache_transform_request *transform_request;
907 	ssize_t	result;
908 
909 	TRACE_IN(on_transform_request_read1);
910 	if (qstate->kevent_watermark == 0)
911 		qstate->kevent_watermark = sizeof(size_t) + sizeof(int);
912 	else {
913 		init_comm_element(&qstate->request, CET_TRANSFORM_REQUEST);
914 		transform_request =
915 			get_cache_transform_request(&qstate->request);
916 
917 		result = qstate->read_func(qstate,
918 	    		&transform_request->entry_length, sizeof(size_t));
919 		result += qstate->read_func(qstate,
920 	    		&transform_request->transformation_type, sizeof(int));
921 
922 		if (result != sizeof(size_t) + sizeof(int)) {
923 			TRACE_OUT(on_transform_request_read1);
924 			return (-1);
925 		}
926 
927 		if ((transform_request->transformation_type != TT_USER) &&
928 		    (transform_request->transformation_type != TT_ALL)) {
929 			TRACE_OUT(on_transform_request_read1);
930 			return (-1);
931 		}
932 
933 		if (transform_request->entry_length != 0) {
934 			if (BUFSIZE_INVALID(transform_request->entry_length)) {
935 				TRACE_OUT(on_transform_request_read1);
936 				return (-1);
937 			}
938 
939 			transform_request->entry = (char *)malloc(
940 				transform_request->entry_length + 1);
941 			assert(transform_request->entry != NULL);
942 			memset(transform_request->entry, 0,
943 				transform_request->entry_length + 1);
944 
945 			qstate->process_func = on_transform_request_read2;
946 		} else
947 			qstate->process_func = on_transform_request_process;
948 
949 		qstate->kevent_watermark = transform_request->entry_length;
950 	}
951 
952 	TRACE_OUT(on_transform_request_read1);
953 	return (0);
954 }
955 
956 static int
957 on_transform_request_read2(struct query_state *qstate)
958 {
959 	struct cache_transform_request	*transform_request;
960 	ssize_t	result;
961 
962 	TRACE_IN(on_transform_request_read2);
963 	transform_request = get_cache_transform_request(&qstate->request);
964 
965 	result = qstate->read_func(qstate, transform_request->entry,
966 		transform_request->entry_length);
967 
968 	if (result != qstate->kevent_watermark) {
969 		TRACE_OUT(on_transform_request_read2);
970 		return (-1);
971 	}
972 
973 	qstate->kevent_watermark = 0;
974 	qstate->process_func = on_transform_request_process;
975 
976 	TRACE_OUT(on_transform_request_read2);
977 	return (0);
978 }
979 
980 static int
981 on_transform_request_process(struct query_state *qstate)
982 {
983 	struct cache_transform_request *transform_request;
984 	struct cache_transform_response *transform_response;
985 	struct configuration_entry *config_entry;
986 	size_t	i, size;
987 
988 	TRACE_IN(on_transform_request_process);
989 	init_comm_element(&qstate->response, CET_TRANSFORM_RESPONSE);
990 	transform_response = get_cache_transform_response(&qstate->response);
991 	transform_request = get_cache_transform_request(&qstate->request);
992 
993 	switch (transform_request->transformation_type) {
994 	case TT_USER:
995 		if (transform_request->entry == NULL) {
996 			size = configuration_get_entries_size(s_configuration);
997 			for (i = 0; i < size; ++i) {
998 			    config_entry = configuration_get_entry(
999 				s_configuration, i);
1000 
1001 			    if (config_entry->perform_actual_lookups == 0)
1002 			    	clear_config_entry_part(config_entry,
1003 				    qstate->eid_str, qstate->eid_str_length);
1004 			}
1005 		} else {
1006 			qstate->config_entry = configuration_find_entry(
1007 				s_configuration, transform_request->entry);
1008 
1009 			if (qstate->config_entry == NULL) {
1010 				LOG_ERR_2("transform_request",
1011 					"can't find configuration"
1012 		   			" entry '%s'. aborting request",
1013 					transform_request->entry);
1014 				transform_response->error_code = -1;
1015 				goto fin;
1016 			}
1017 
1018 			if (qstate->config_entry->perform_actual_lookups != 0) {
1019 				LOG_ERR_2("transform_request",
1020 					"can't transform the cache entry %s"
1021 					", because it ised for actual lookups",
1022 					transform_request->entry);
1023 				transform_response->error_code = -1;
1024 				goto fin;
1025 			}
1026 
1027 			clear_config_entry_part(qstate->config_entry,
1028 				qstate->eid_str, qstate->eid_str_length);
1029 		}
1030 		break;
1031 	case TT_ALL:
1032 		if (qstate->euid != 0)
1033 			transform_response->error_code = -1;
1034 		else {
1035 			if (transform_request->entry == NULL) {
1036 				size = configuration_get_entries_size(
1037 					s_configuration);
1038 				for (i = 0; i < size; ++i) {
1039 				    clear_config_entry(
1040 					configuration_get_entry(
1041 						s_configuration, i));
1042 				}
1043 			} else {
1044 				qstate->config_entry = configuration_find_entry(
1045 					s_configuration,
1046 					transform_request->entry);
1047 
1048 				if (qstate->config_entry == NULL) {
1049 					LOG_ERR_2("transform_request",
1050 						"can't find configuration"
1051 		   				" entry '%s'. aborting request",
1052 						transform_request->entry);
1053 					transform_response->error_code = -1;
1054 					goto fin;
1055 				}
1056 
1057 				clear_config_entry(qstate->config_entry);
1058 			}
1059 		}
1060 		break;
1061 	default:
1062 		transform_response->error_code = -1;
1063 	}
1064 
1065 fin:
1066 	qstate->kevent_watermark = 0;
1067 	qstate->process_func = on_transform_response_write1;
1068 	TRACE_OUT(on_transform_request_process);
1069 	return (0);
1070 }
1071 
1072 static int
1073 on_transform_response_write1(struct query_state *qstate)
1074 {
1075 	struct cache_transform_response	*transform_response;
1076 	ssize_t	result;
1077 
1078 	TRACE_IN(on_transform_response_write1);
1079 	transform_response = get_cache_transform_response(&qstate->response);
1080 	result = qstate->write_func(qstate, &transform_response->error_code,
1081 		sizeof(int));
1082 	if (result != sizeof(int)) {
1083 		TRACE_OUT(on_transform_response_write1);
1084 		return (-1);
1085 	}
1086 
1087 	finalize_comm_element(&qstate->request);
1088 	finalize_comm_element(&qstate->response);
1089 
1090 	qstate->kevent_watermark = 0;
1091 	qstate->process_func = NULL;
1092 	TRACE_OUT(on_transform_response_write1);
1093 	return (0);
1094 }
1095 
1096 /*
1097  * Checks if the client's euid and egid do not differ from its uid and gid.
1098  * Returns 0 on success.
1099  */
1100 int
1101 check_query_eids(struct query_state *qstate)
1102 {
1103 
1104 	return ((qstate->uid != qstate->euid) || (qstate->gid != qstate->egid) ? -1 : 0);
1105 }
1106 
1107 /*
1108  * Uses the qstate fields to process an "alternate" read - when the buffer is
1109  * too large to be received during one socket read operation
1110  */
1111 ssize_t
1112 query_io_buffer_read(struct query_state *qstate, void *buf, size_t nbytes)
1113 {
1114 	ssize_t	result;
1115 
1116 	TRACE_IN(query_io_buffer_read);
1117 	if ((qstate->io_buffer_size == 0) || (qstate->io_buffer == NULL))
1118 		return (-1);
1119 
1120 	if (nbytes < qstate->io_buffer + qstate->io_buffer_size -
1121 			qstate->io_buffer_p)
1122 		result = nbytes;
1123 	else
1124 		result = qstate->io_buffer + qstate->io_buffer_size -
1125 			qstate->io_buffer_p;
1126 
1127 	memcpy(buf, qstate->io_buffer_p, result);
1128 	qstate->io_buffer_p += result;
1129 
1130 	if (qstate->io_buffer_p == qstate->io_buffer + qstate->io_buffer_size) {
1131 		free(qstate->io_buffer);
1132 		qstate->io_buffer = NULL;
1133 
1134 		qstate->write_func = query_socket_write;
1135 		qstate->read_func = query_socket_read;
1136 	}
1137 
1138 	TRACE_OUT(query_io_buffer_read);
1139 	return (result);
1140 }
1141 
1142 /*
1143  * Uses the qstate fields to process an "alternate" write - when the buffer is
1144  * too large to be sent during one socket write operation
1145  */
1146 ssize_t
1147 query_io_buffer_write(struct query_state *qstate, const void *buf,
1148 	size_t nbytes)
1149 {
1150 	ssize_t	result;
1151 
1152 	TRACE_IN(query_io_buffer_write);
1153 	if ((qstate->io_buffer_size == 0) || (qstate->io_buffer == NULL))
1154 		return (-1);
1155 
1156 	if (nbytes < qstate->io_buffer + qstate->io_buffer_size -
1157 			qstate->io_buffer_p)
1158 		result = nbytes;
1159 	else
1160 		result = qstate->io_buffer + qstate->io_buffer_size -
1161 		qstate->io_buffer_p;
1162 
1163 	memcpy(qstate->io_buffer_p, buf, result);
1164 	qstate->io_buffer_p += result;
1165 
1166 	if (qstate->io_buffer_p == qstate->io_buffer + qstate->io_buffer_size) {
1167 		qstate->use_alternate_io = 1;
1168 		qstate->io_buffer_p = qstate->io_buffer;
1169 
1170 		qstate->write_func = query_socket_write;
1171 		qstate->read_func = query_socket_read;
1172 	}
1173 
1174 	TRACE_OUT(query_io_buffer_write);
1175 	return (result);
1176 }
1177 
1178 /*
1179  * The default "read" function, which reads data directly from socket
1180  */
1181 ssize_t
1182 query_socket_read(struct query_state *qstate, void *buf, size_t nbytes)
1183 {
1184 	ssize_t	result;
1185 
1186 	TRACE_IN(query_socket_read);
1187 	if (qstate->socket_failed != 0) {
1188 		TRACE_OUT(query_socket_read);
1189 		return (-1);
1190 	}
1191 
1192 	result = read(qstate->sockfd, buf, nbytes);
1193 	if ((result == -1) || (result < nbytes))
1194 		qstate->socket_failed = 1;
1195 
1196 	TRACE_OUT(query_socket_read);
1197 	return (result);
1198 }
1199 
1200 /*
1201  * The default "write" function, which writes data directly to socket
1202  */
1203 ssize_t
1204 query_socket_write(struct query_state *qstate, const void *buf, size_t nbytes)
1205 {
1206 	ssize_t	result;
1207 
1208 	TRACE_IN(query_socket_write);
1209 	if (qstate->socket_failed != 0) {
1210 		TRACE_OUT(query_socket_write);
1211 		return (-1);
1212 	}
1213 
1214 	result = write(qstate->sockfd, buf, nbytes);
1215 	if ((result == -1) || (result < nbytes))
1216 		qstate->socket_failed = 1;
1217 
1218 	TRACE_OUT(query_socket_write);
1219 	return (result);
1220 }
1221 
1222 /*
1223  * Initializes the query_state structure by filling it with the default values.
1224  */
1225 struct query_state *
1226 init_query_state(int sockfd, size_t kevent_watermark, uid_t euid, gid_t egid)
1227 {
1228 	struct query_state	*retval;
1229 
1230 	TRACE_IN(init_query_state);
1231 	retval = (struct query_state *)malloc(sizeof(struct query_state));
1232 	assert(retval != NULL);
1233 	memset(retval, 0, sizeof(struct query_state));
1234 
1235 	retval->sockfd = sockfd;
1236 	retval->kevent_filter = EVFILT_READ;
1237 	retval->kevent_watermark = kevent_watermark;
1238 
1239 	retval->euid = euid;
1240 	retval->egid = egid;
1241 	retval->uid = retval->gid = -1;
1242 
1243 	if (asprintf(&retval->eid_str, "%d_%d_", retval->euid,
1244 		retval->egid) == -1) {
1245 		free(retval);
1246 		return (NULL);
1247 	}
1248 	retval->eid_str_length = strlen(retval->eid_str);
1249 
1250 	init_comm_element(&retval->request, CET_UNDEFINED);
1251 	init_comm_element(&retval->response, CET_UNDEFINED);
1252 	retval->process_func = on_query_startup;
1253 	retval->destroy_func = on_query_destroy;
1254 
1255 	retval->write_func = query_socket_write;
1256 	retval->read_func = query_socket_read;
1257 
1258 	get_time_func(&retval->creation_time);
1259 	memcpy(&retval->timeout, &s_configuration->query_timeout,
1260 		sizeof(struct timeval));
1261 
1262 	TRACE_OUT(init_query_state);
1263 	return (retval);
1264 }
1265 
1266 void
1267 destroy_query_state(struct query_state *qstate)
1268 {
1269 
1270 	TRACE_IN(destroy_query_state);
1271 	if (qstate->eid_str != NULL)
1272 	    free(qstate->eid_str);
1273 
1274 	if (qstate->io_buffer != NULL)
1275 		free(qstate->io_buffer);
1276 
1277 	qstate->destroy_func(qstate);
1278 	free(qstate);
1279 	TRACE_OUT(destroy_query_state);
1280 }
1281