1 /*- 2 * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 28 #include <sys/cdefs.h> 29 #include <sys/types.h> 30 #include <sys/event.h> 31 #include <sys/socket.h> 32 #include <sys/time.h> 33 34 #include <assert.h> 35 #include <errno.h> 36 #include <nsswitch.h> 37 #include <stdio.h> 38 #include <stdlib.h> 39 #include <string.h> 40 #include <unistd.h> 41 42 #include "config.h" 43 #include "debug.h" 44 #include "query.h" 45 #include "log.h" 46 #include "mp_ws_query.h" 47 #include "mp_rs_query.h" 48 #include "singletons.h" 49 50 static const char negative_data[1] = { 0 }; 51 52 extern void get_time_func(struct timeval *); 53 54 static void clear_config_entry(struct configuration_entry *); 55 static void clear_config_entry_part(struct configuration_entry *, 56 const char *, size_t); 57 58 static int on_query_startup(struct query_state *); 59 static void on_query_destroy(struct query_state *); 60 61 static int on_read_request_read1(struct query_state *); 62 static int on_read_request_read2(struct query_state *); 63 static int on_read_request_process(struct query_state *); 64 static int on_read_response_write1(struct query_state *); 65 static int on_read_response_write2(struct query_state *); 66 67 static int on_rw_mapper(struct query_state *); 68 69 static int on_transform_request_read1(struct query_state *); 70 static int on_transform_request_read2(struct query_state *); 71 static int on_transform_request_process(struct query_state *); 72 static int on_transform_response_write1(struct query_state *); 73 74 static int on_write_request_read1(struct query_state *); 75 static int on_write_request_read2(struct query_state *); 76 static int on_negative_write_request_process(struct query_state *); 77 static int on_write_request_process(struct query_state *); 78 static int on_write_response_write1(struct query_state *); 79 80 /* 81 * Clears the specified configuration entry (clears the cache for positive and 82 * and negative entries) and also for all multipart entries. 83 */ 84 static void 85 clear_config_entry(struct configuration_entry *config_entry) 86 { 87 size_t i; 88 89 TRACE_IN(clear_config_entry); 90 configuration_lock_entry(config_entry, CELT_POSITIVE); 91 if (config_entry->positive_cache_entry != NULL) 92 transform_cache_entry( 93 config_entry->positive_cache_entry, 94 CTT_CLEAR); 95 configuration_unlock_entry(config_entry, CELT_POSITIVE); 96 97 configuration_lock_entry(config_entry, CELT_NEGATIVE); 98 if (config_entry->negative_cache_entry != NULL) 99 transform_cache_entry( 100 config_entry->negative_cache_entry, 101 CTT_CLEAR); 102 configuration_unlock_entry(config_entry, CELT_NEGATIVE); 103 104 configuration_lock_entry(config_entry, CELT_MULTIPART); 105 for (i = 0; i < config_entry->mp_cache_entries_size; ++i) 106 transform_cache_entry( 107 config_entry->mp_cache_entries[i], 108 CTT_CLEAR); 109 configuration_unlock_entry(config_entry, CELT_MULTIPART); 110 111 TRACE_OUT(clear_config_entry); 112 } 113 114 /* 115 * Clears the specified configuration entry by deleting only the elements, 116 * that are owned by the user with specified eid_str. 117 */ 118 static void 119 clear_config_entry_part(struct configuration_entry *config_entry, 120 const char *eid_str, size_t eid_str_length) 121 { 122 cache_entry *start, *finish, *mp_entry; 123 TRACE_IN(clear_config_entry_part); 124 configuration_lock_entry(config_entry, CELT_POSITIVE); 125 if (config_entry->positive_cache_entry != NULL) 126 transform_cache_entry_part( 127 config_entry->positive_cache_entry, 128 CTT_CLEAR, eid_str, eid_str_length, KPPT_LEFT); 129 configuration_unlock_entry(config_entry, CELT_POSITIVE); 130 131 configuration_lock_entry(config_entry, CELT_NEGATIVE); 132 if (config_entry->negative_cache_entry != NULL) 133 transform_cache_entry_part( 134 config_entry->negative_cache_entry, 135 CTT_CLEAR, eid_str, eid_str_length, KPPT_LEFT); 136 configuration_unlock_entry(config_entry, CELT_NEGATIVE); 137 138 configuration_lock_entry(config_entry, CELT_MULTIPART); 139 if (configuration_entry_find_mp_cache_entries(config_entry, 140 eid_str, &start, &finish) == 0) { 141 for (mp_entry = start; mp_entry != finish; ++mp_entry) 142 transform_cache_entry(*mp_entry, CTT_CLEAR); 143 } 144 configuration_unlock_entry(config_entry, CELT_MULTIPART); 145 146 TRACE_OUT(clear_config_entry_part); 147 } 148 149 /* 150 * This function is assigned to the query_state structue on its creation. 151 * It's main purpose is to receive credentials from the client. 152 */ 153 static int 154 on_query_startup(struct query_state *qstate) 155 { 156 union { 157 struct cmsghdr hdr; 158 char pad[CMSG_SPACE(sizeof(struct cmsgcred))]; 159 } cmsg; 160 struct msghdr mhdr; 161 struct iovec iov; 162 struct cmsgcred *cred; 163 int elem_type; 164 165 TRACE_IN(on_query_startup); 166 assert(qstate != NULL); 167 168 memset(&mhdr, 0, sizeof(mhdr)); 169 mhdr.msg_iov = &iov; 170 mhdr.msg_iovlen = 1; 171 mhdr.msg_control = &cmsg; 172 mhdr.msg_controllen = sizeof(cmsg); 173 174 memset(&iov, 0, sizeof(iov)); 175 iov.iov_base = &elem_type; 176 iov.iov_len = sizeof(elem_type); 177 178 if (recvmsg(qstate->sockfd, &mhdr, 0) == -1) { 179 TRACE_OUT(on_query_startup); 180 return (-1); 181 } 182 183 if (mhdr.msg_controllen != CMSG_SPACE(sizeof(struct cmsgcred)) || 184 cmsg.hdr.cmsg_len != CMSG_LEN(sizeof(struct cmsgcred)) || 185 cmsg.hdr.cmsg_level != SOL_SOCKET || 186 cmsg.hdr.cmsg_type != SCM_CREDS) { 187 TRACE_OUT(on_query_startup); 188 return (-1); 189 } 190 191 cred = (struct cmsgcred *)CMSG_DATA(&cmsg); 192 qstate->uid = cred->cmcred_uid; 193 qstate->gid = cred->cmcred_gid; 194 195 #if defined(NS_NSCD_EID_CHECKING) || defined(NS_STRICT_NSCD_EID_CHECKING) 196 /* 197 * This check is probably a bit redundant - per-user cache is always separated 198 * by the euid/egid pair 199 */ 200 if (check_query_eids(qstate) != 0) { 201 #ifdef NS_STRICT_NSCD_EID_CHECKING 202 TRACE_OUT(on_query_startup); 203 return (-1); 204 #else 205 if ((elem_type != CET_READ_REQUEST) && 206 (elem_type != CET_MP_READ_SESSION_REQUEST) && 207 (elem_type != CET_WRITE_REQUEST) && 208 (elem_type != CET_MP_WRITE_SESSION_REQUEST)) { 209 TRACE_OUT(on_query_startup); 210 return (-1); 211 } 212 #endif 213 } 214 #endif 215 216 switch (elem_type) { 217 case CET_WRITE_REQUEST: 218 qstate->process_func = on_write_request_read1; 219 break; 220 case CET_READ_REQUEST: 221 qstate->process_func = on_read_request_read1; 222 break; 223 case CET_TRANSFORM_REQUEST: 224 qstate->process_func = on_transform_request_read1; 225 break; 226 case CET_MP_WRITE_SESSION_REQUEST: 227 qstate->process_func = on_mp_write_session_request_read1; 228 break; 229 case CET_MP_READ_SESSION_REQUEST: 230 qstate->process_func = on_mp_read_session_request_read1; 231 break; 232 default: 233 TRACE_OUT(on_query_startup); 234 return (-1); 235 } 236 237 qstate->kevent_watermark = 0; 238 TRACE_OUT(on_query_startup); 239 return (0); 240 } 241 242 /* 243 * on_rw_mapper is used to process multiple read/write requests during 244 * one connection session. It's never called in the beginning (on query_state 245 * creation) as it does not process the multipart requests and does not 246 * receive credentials 247 */ 248 static int 249 on_rw_mapper(struct query_state *qstate) 250 { 251 ssize_t result; 252 int elem_type; 253 254 TRACE_IN(on_rw_mapper); 255 if (qstate->kevent_watermark == 0) { 256 qstate->kevent_watermark = sizeof(int); 257 } else { 258 result = qstate->read_func(qstate, &elem_type, sizeof(int)); 259 if (result != sizeof(int)) { 260 TRACE_OUT(on_rw_mapper); 261 return (-1); 262 } 263 264 switch (elem_type) { 265 case CET_WRITE_REQUEST: 266 qstate->kevent_watermark = sizeof(size_t); 267 qstate->process_func = on_write_request_read1; 268 break; 269 case CET_READ_REQUEST: 270 qstate->kevent_watermark = sizeof(size_t); 271 qstate->process_func = on_read_request_read1; 272 break; 273 default: 274 TRACE_OUT(on_rw_mapper); 275 return (-1); 276 break; 277 } 278 } 279 TRACE_OUT(on_rw_mapper); 280 return (0); 281 } 282 283 /* 284 * The default query_destroy function 285 */ 286 static void 287 on_query_destroy(struct query_state *qstate) 288 { 289 290 TRACE_IN(on_query_destroy); 291 finalize_comm_element(&qstate->response); 292 finalize_comm_element(&qstate->request); 293 TRACE_OUT(on_query_destroy); 294 } 295 296 /* 297 * The functions below are used to process write requests. 298 * - on_write_request_read1 and on_write_request_read2 read the request itself 299 * - on_write_request_process processes it (if the client requests to 300 * cache the negative result, the on_negative_write_request_process is used) 301 * - on_write_response_write1 sends the response 302 */ 303 static int 304 on_write_request_read1(struct query_state *qstate) 305 { 306 struct cache_write_request *write_request; 307 ssize_t result; 308 309 TRACE_IN(on_write_request_read1); 310 if (qstate->kevent_watermark == 0) 311 qstate->kevent_watermark = sizeof(size_t) * 3; 312 else { 313 init_comm_element(&qstate->request, CET_WRITE_REQUEST); 314 write_request = get_cache_write_request(&qstate->request); 315 316 result = qstate->read_func(qstate, &write_request->entry_length, 317 sizeof(size_t)); 318 result += qstate->read_func(qstate, 319 &write_request->cache_key_size, sizeof(size_t)); 320 result += qstate->read_func(qstate, 321 &write_request->data_size, sizeof(size_t)); 322 323 if (result != sizeof(size_t) * 3) { 324 TRACE_OUT(on_write_request_read1); 325 return (-1); 326 } 327 328 if (BUFSIZE_INVALID(write_request->entry_length) || 329 BUFSIZE_INVALID(write_request->cache_key_size) || 330 (BUFSIZE_INVALID(write_request->data_size) && 331 (write_request->data_size != 0))) { 332 TRACE_OUT(on_write_request_read1); 333 return (-1); 334 } 335 336 write_request->entry = calloc(1, 337 write_request->entry_length + 1); 338 assert(write_request->entry != NULL); 339 340 write_request->cache_key = calloc(1, 341 write_request->cache_key_size + 342 qstate->eid_str_length); 343 assert(write_request->cache_key != NULL); 344 memcpy(write_request->cache_key, qstate->eid_str, 345 qstate->eid_str_length); 346 347 if (write_request->data_size != 0) { 348 write_request->data = calloc(1, 349 write_request->data_size); 350 assert(write_request->data != NULL); 351 } 352 353 qstate->kevent_watermark = write_request->entry_length + 354 write_request->cache_key_size + 355 write_request->data_size; 356 qstate->process_func = on_write_request_read2; 357 } 358 359 TRACE_OUT(on_write_request_read1); 360 return (0); 361 } 362 363 static int 364 on_write_request_read2(struct query_state *qstate) 365 { 366 struct cache_write_request *write_request; 367 ssize_t result; 368 369 TRACE_IN(on_write_request_read2); 370 write_request = get_cache_write_request(&qstate->request); 371 372 result = qstate->read_func(qstate, write_request->entry, 373 write_request->entry_length); 374 result += qstate->read_func(qstate, write_request->cache_key + 375 qstate->eid_str_length, write_request->cache_key_size); 376 if (write_request->data_size != 0) 377 result += qstate->read_func(qstate, write_request->data, 378 write_request->data_size); 379 380 if (result != (ssize_t)qstate->kevent_watermark) { 381 TRACE_OUT(on_write_request_read2); 382 return (-1); 383 } 384 write_request->cache_key_size += qstate->eid_str_length; 385 386 qstate->kevent_watermark = 0; 387 if (write_request->data_size != 0) 388 qstate->process_func = on_write_request_process; 389 else 390 qstate->process_func = on_negative_write_request_process; 391 TRACE_OUT(on_write_request_read2); 392 return (0); 393 } 394 395 static int 396 on_write_request_process(struct query_state *qstate) 397 { 398 struct cache_write_request *write_request; 399 struct cache_write_response *write_response; 400 cache_entry c_entry; 401 402 TRACE_IN(on_write_request_process); 403 init_comm_element(&qstate->response, CET_WRITE_RESPONSE); 404 write_response = get_cache_write_response(&qstate->response); 405 write_request = get_cache_write_request(&qstate->request); 406 407 qstate->config_entry = configuration_find_entry( 408 s_configuration, write_request->entry); 409 410 if (qstate->config_entry == NULL) { 411 write_response->error_code = ENOENT; 412 413 LOG_ERR_2("write_request", "can't find configuration" 414 " entry '%s'. aborting request", write_request->entry); 415 goto fin; 416 } 417 418 if (qstate->config_entry->enabled == 0) { 419 write_response->error_code = EACCES; 420 421 LOG_ERR_2("write_request", 422 "configuration entry '%s' is disabled", 423 write_request->entry); 424 goto fin; 425 } 426 427 if (qstate->config_entry->perform_actual_lookups != 0) { 428 write_response->error_code = EOPNOTSUPP; 429 430 LOG_ERR_2("write_request", 431 "entry '%s' performs lookups by itself: " 432 "can't write to it", write_request->entry); 433 goto fin; 434 } 435 436 configuration_lock_rdlock(s_configuration); 437 c_entry = find_cache_entry(s_cache, 438 qstate->config_entry->positive_cache_params.cep.entry_name); 439 configuration_unlock(s_configuration); 440 if (c_entry != NULL) { 441 configuration_lock_entry(qstate->config_entry, CELT_POSITIVE); 442 qstate->config_entry->positive_cache_entry = c_entry; 443 write_response->error_code = cache_write(c_entry, 444 write_request->cache_key, 445 write_request->cache_key_size, 446 write_request->data, 447 write_request->data_size); 448 configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE); 449 450 if ((qstate->config_entry->common_query_timeout.tv_sec != 0) || 451 (qstate->config_entry->common_query_timeout.tv_usec != 0)) 452 memcpy(&qstate->timeout, 453 &qstate->config_entry->common_query_timeout, 454 sizeof(struct timeval)); 455 456 } else 457 write_response->error_code = -1; 458 459 fin: 460 qstate->kevent_filter = EVFILT_WRITE; 461 qstate->kevent_watermark = sizeof(int); 462 qstate->process_func = on_write_response_write1; 463 464 TRACE_OUT(on_write_request_process); 465 return (0); 466 } 467 468 static int 469 on_negative_write_request_process(struct query_state *qstate) 470 { 471 struct cache_write_request *write_request; 472 struct cache_write_response *write_response; 473 cache_entry c_entry; 474 475 TRACE_IN(on_negative_write_request_process); 476 init_comm_element(&qstate->response, CET_WRITE_RESPONSE); 477 write_response = get_cache_write_response(&qstate->response); 478 write_request = get_cache_write_request(&qstate->request); 479 480 qstate->config_entry = configuration_find_entry ( 481 s_configuration, write_request->entry); 482 483 if (qstate->config_entry == NULL) { 484 write_response->error_code = ENOENT; 485 486 LOG_ERR_2("negative_write_request", 487 "can't find configuration" 488 " entry '%s'. aborting request", write_request->entry); 489 goto fin; 490 } 491 492 if (qstate->config_entry->enabled == 0) { 493 write_response->error_code = EACCES; 494 495 LOG_ERR_2("negative_write_request", 496 "configuration entry '%s' is disabled", 497 write_request->entry); 498 goto fin; 499 } 500 501 if (qstate->config_entry->perform_actual_lookups != 0) { 502 write_response->error_code = EOPNOTSUPP; 503 504 LOG_ERR_2("negative_write_request", 505 "entry '%s' performs lookups by itself: " 506 "can't write to it", write_request->entry); 507 goto fin; 508 } else { 509 #ifdef NS_NSCD_EID_CHECKING 510 if (check_query_eids(qstate) != 0) { 511 write_response->error_code = EPERM; 512 goto fin; 513 } 514 #endif 515 } 516 517 configuration_lock_rdlock(s_configuration); 518 c_entry = find_cache_entry(s_cache, 519 qstate->config_entry->negative_cache_params.cep.entry_name); 520 configuration_unlock(s_configuration); 521 if (c_entry != NULL) { 522 configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE); 523 qstate->config_entry->negative_cache_entry = c_entry; 524 write_response->error_code = cache_write(c_entry, 525 write_request->cache_key, 526 write_request->cache_key_size, 527 negative_data, 528 sizeof(negative_data)); 529 configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE); 530 531 if ((qstate->config_entry->common_query_timeout.tv_sec != 0) || 532 (qstate->config_entry->common_query_timeout.tv_usec != 0)) 533 memcpy(&qstate->timeout, 534 &qstate->config_entry->common_query_timeout, 535 sizeof(struct timeval)); 536 } else 537 write_response->error_code = -1; 538 539 fin: 540 qstate->kevent_filter = EVFILT_WRITE; 541 qstate->kevent_watermark = sizeof(int); 542 qstate->process_func = on_write_response_write1; 543 544 TRACE_OUT(on_negative_write_request_process); 545 return (0); 546 } 547 548 static int 549 on_write_response_write1(struct query_state *qstate) 550 { 551 struct cache_write_response *write_response; 552 ssize_t result; 553 554 TRACE_IN(on_write_response_write1); 555 write_response = get_cache_write_response(&qstate->response); 556 result = qstate->write_func(qstate, &write_response->error_code, 557 sizeof(int)); 558 if (result != sizeof(int)) { 559 TRACE_OUT(on_write_response_write1); 560 return (-1); 561 } 562 563 finalize_comm_element(&qstate->request); 564 finalize_comm_element(&qstate->response); 565 566 qstate->kevent_watermark = sizeof(int); 567 qstate->kevent_filter = EVFILT_READ; 568 qstate->process_func = on_rw_mapper; 569 570 TRACE_OUT(on_write_response_write1); 571 return (0); 572 } 573 574 /* 575 * The functions below are used to process read requests. 576 * - on_read_request_read1 and on_read_request_read2 read the request itself 577 * - on_read_request_process processes it 578 * - on_read_response_write1 and on_read_response_write2 send the response 579 */ 580 static int 581 on_read_request_read1(struct query_state *qstate) 582 { 583 struct cache_read_request *read_request; 584 ssize_t result; 585 586 TRACE_IN(on_read_request_read1); 587 if (qstate->kevent_watermark == 0) 588 qstate->kevent_watermark = sizeof(size_t) * 2; 589 else { 590 init_comm_element(&qstate->request, CET_READ_REQUEST); 591 read_request = get_cache_read_request(&qstate->request); 592 593 result = qstate->read_func(qstate, 594 &read_request->entry_length, sizeof(size_t)); 595 result += qstate->read_func(qstate, 596 &read_request->cache_key_size, sizeof(size_t)); 597 598 if (result != sizeof(size_t) * 2) { 599 TRACE_OUT(on_read_request_read1); 600 return (-1); 601 } 602 603 if (BUFSIZE_INVALID(read_request->entry_length) || 604 BUFSIZE_INVALID(read_request->cache_key_size)) { 605 TRACE_OUT(on_read_request_read1); 606 return (-1); 607 } 608 609 read_request->entry = calloc(1, 610 read_request->entry_length + 1); 611 assert(read_request->entry != NULL); 612 613 read_request->cache_key = calloc(1, 614 read_request->cache_key_size + 615 qstate->eid_str_length); 616 assert(read_request->cache_key != NULL); 617 memcpy(read_request->cache_key, qstate->eid_str, 618 qstate->eid_str_length); 619 620 qstate->kevent_watermark = read_request->entry_length + 621 read_request->cache_key_size; 622 qstate->process_func = on_read_request_read2; 623 } 624 625 TRACE_OUT(on_read_request_read1); 626 return (0); 627 } 628 629 static int 630 on_read_request_read2(struct query_state *qstate) 631 { 632 struct cache_read_request *read_request; 633 ssize_t result; 634 635 TRACE_IN(on_read_request_read2); 636 read_request = get_cache_read_request(&qstate->request); 637 638 result = qstate->read_func(qstate, read_request->entry, 639 read_request->entry_length); 640 result += qstate->read_func(qstate, 641 read_request->cache_key + qstate->eid_str_length, 642 read_request->cache_key_size); 643 644 if (result != (ssize_t)qstate->kevent_watermark) { 645 TRACE_OUT(on_read_request_read2); 646 return (-1); 647 } 648 read_request->cache_key_size += qstate->eid_str_length; 649 650 qstate->kevent_watermark = 0; 651 qstate->process_func = on_read_request_process; 652 653 TRACE_OUT(on_read_request_read2); 654 return (0); 655 } 656 657 static int 658 on_read_request_process(struct query_state *qstate) 659 { 660 struct cache_read_request *read_request; 661 struct cache_read_response *read_response; 662 cache_entry c_entry, neg_c_entry; 663 664 struct agent *lookup_agent; 665 struct common_agent *c_agent; 666 int res; 667 668 TRACE_IN(on_read_request_process); 669 init_comm_element(&qstate->response, CET_READ_RESPONSE); 670 read_response = get_cache_read_response(&qstate->response); 671 read_request = get_cache_read_request(&qstate->request); 672 673 qstate->config_entry = configuration_find_entry( 674 s_configuration, read_request->entry); 675 if (qstate->config_entry == NULL) { 676 read_response->error_code = ENOENT; 677 678 LOG_ERR_2("read_request", 679 "can't find configuration " 680 "entry '%s'. aborting request", read_request->entry); 681 goto fin; 682 } 683 684 if (qstate->config_entry->enabled == 0) { 685 read_response->error_code = EACCES; 686 687 LOG_ERR_2("read_request", 688 "configuration entry '%s' is disabled", 689 read_request->entry); 690 goto fin; 691 } 692 693 /* 694 * if we perform lookups by ourselves, then we don't need to separate 695 * cache entries by euid and egid 696 */ 697 if (qstate->config_entry->perform_actual_lookups != 0) 698 memset(read_request->cache_key, 0, qstate->eid_str_length); 699 else { 700 #ifdef NS_NSCD_EID_CHECKING 701 if (check_query_eids(qstate) != 0) { 702 /* if the lookup is not self-performing, we check for clients euid/egid */ 703 read_response->error_code = EPERM; 704 goto fin; 705 } 706 #endif 707 } 708 709 configuration_lock_rdlock(s_configuration); 710 c_entry = find_cache_entry(s_cache, 711 qstate->config_entry->positive_cache_params.cep.entry_name); 712 neg_c_entry = find_cache_entry(s_cache, 713 qstate->config_entry->negative_cache_params.cep.entry_name); 714 configuration_unlock(s_configuration); 715 if ((c_entry != NULL) && (neg_c_entry != NULL)) { 716 configuration_lock_entry(qstate->config_entry, CELT_POSITIVE); 717 qstate->config_entry->positive_cache_entry = c_entry; 718 read_response->error_code = cache_read(c_entry, 719 read_request->cache_key, 720 read_request->cache_key_size, NULL, 721 &read_response->data_size); 722 723 if (read_response->error_code == -2) { 724 read_response->data = malloc( 725 read_response->data_size); 726 assert(read_response->data != NULL); 727 read_response->error_code = cache_read(c_entry, 728 read_request->cache_key, 729 read_request->cache_key_size, 730 read_response->data, 731 &read_response->data_size); 732 } 733 configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE); 734 735 configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE); 736 qstate->config_entry->negative_cache_entry = neg_c_entry; 737 if (read_response->error_code == -1) { 738 read_response->error_code = cache_read(neg_c_entry, 739 read_request->cache_key, 740 read_request->cache_key_size, NULL, 741 &read_response->data_size); 742 743 if (read_response->error_code == -2) { 744 read_response->data = malloc( 745 read_response->data_size); 746 assert(read_response->data != NULL); 747 read_response->error_code = cache_read(neg_c_entry, 748 read_request->cache_key, 749 read_request->cache_key_size, 750 read_response->data, 751 &read_response->data_size); 752 } 753 } 754 configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE); 755 756 if ((read_response->error_code == -1) && 757 (qstate->config_entry->perform_actual_lookups != 0)) { 758 free(read_response->data); 759 read_response->data = NULL; 760 read_response->data_size = 0; 761 762 lookup_agent = find_agent(s_agent_table, 763 read_request->entry, COMMON_AGENT); 764 765 if ((lookup_agent != NULL) && 766 (lookup_agent->type == COMMON_AGENT)) { 767 c_agent = (struct common_agent *)lookup_agent; 768 res = c_agent->lookup_func( 769 read_request->cache_key + 770 qstate->eid_str_length, 771 read_request->cache_key_size - 772 qstate->eid_str_length, 773 &read_response->data, 774 &read_response->data_size); 775 776 if (res == NS_SUCCESS) { 777 read_response->error_code = 0; 778 configuration_lock_entry( 779 qstate->config_entry, 780 CELT_POSITIVE); 781 cache_write(c_entry, 782 read_request->cache_key, 783 read_request->cache_key_size, 784 read_response->data, 785 read_response->data_size); 786 configuration_unlock_entry( 787 qstate->config_entry, 788 CELT_POSITIVE); 789 } else if ((res == NS_NOTFOUND) || 790 (res == NS_RETURN)) { 791 configuration_lock_entry( 792 qstate->config_entry, 793 CELT_NEGATIVE); 794 cache_write(neg_c_entry, 795 read_request->cache_key, 796 read_request->cache_key_size, 797 negative_data, 798 sizeof(negative_data)); 799 configuration_unlock_entry( 800 qstate->config_entry, 801 CELT_NEGATIVE); 802 803 read_response->error_code = 0; 804 read_response->data = NULL; 805 read_response->data_size = 0; 806 } 807 } 808 } 809 810 if ((qstate->config_entry->common_query_timeout.tv_sec != 0) || 811 (qstate->config_entry->common_query_timeout.tv_usec != 0)) 812 memcpy(&qstate->timeout, 813 &qstate->config_entry->common_query_timeout, 814 sizeof(struct timeval)); 815 } else 816 read_response->error_code = -1; 817 818 fin: 819 qstate->kevent_filter = EVFILT_WRITE; 820 if (read_response->error_code == 0) 821 qstate->kevent_watermark = sizeof(int) + sizeof(size_t); 822 else 823 qstate->kevent_watermark = sizeof(int); 824 qstate->process_func = on_read_response_write1; 825 826 TRACE_OUT(on_read_request_process); 827 return (0); 828 } 829 830 static int 831 on_read_response_write1(struct query_state *qstate) 832 { 833 struct cache_read_response *read_response; 834 ssize_t result; 835 836 TRACE_IN(on_read_response_write1); 837 read_response = get_cache_read_response(&qstate->response); 838 839 result = qstate->write_func(qstate, &read_response->error_code, 840 sizeof(int)); 841 842 if (read_response->error_code == 0) { 843 result += qstate->write_func(qstate, &read_response->data_size, 844 sizeof(size_t)); 845 if (result != (ssize_t)qstate->kevent_watermark) { 846 TRACE_OUT(on_read_response_write1); 847 return (-1); 848 } 849 850 qstate->kevent_watermark = read_response->data_size; 851 qstate->process_func = on_read_response_write2; 852 } else { 853 if (result != (ssize_t)qstate->kevent_watermark) { 854 TRACE_OUT(on_read_response_write1); 855 return (-1); 856 } 857 858 qstate->kevent_watermark = 0; 859 qstate->process_func = NULL; 860 } 861 862 TRACE_OUT(on_read_response_write1); 863 return (0); 864 } 865 866 static int 867 on_read_response_write2(struct query_state *qstate) 868 { 869 struct cache_read_response *read_response; 870 ssize_t result; 871 872 TRACE_IN(on_read_response_write2); 873 read_response = get_cache_read_response(&qstate->response); 874 if (read_response->data_size > 0) { 875 result = qstate->write_func(qstate, read_response->data, 876 read_response->data_size); 877 if (result != (ssize_t)qstate->kevent_watermark) { 878 TRACE_OUT(on_read_response_write2); 879 return (-1); 880 } 881 } 882 883 finalize_comm_element(&qstate->request); 884 finalize_comm_element(&qstate->response); 885 886 qstate->kevent_watermark = sizeof(int); 887 qstate->kevent_filter = EVFILT_READ; 888 qstate->process_func = on_rw_mapper; 889 TRACE_OUT(on_read_response_write2); 890 return (0); 891 } 892 893 /* 894 * The functions below are used to process write requests. 895 * - on_transform_request_read1 and on_transform_request_read2 read the 896 * request itself 897 * - on_transform_request_process processes it 898 * - on_transform_response_write1 sends the response 899 */ 900 static int 901 on_transform_request_read1(struct query_state *qstate) 902 { 903 struct cache_transform_request *transform_request; 904 ssize_t result; 905 906 TRACE_IN(on_transform_request_read1); 907 if (qstate->kevent_watermark == 0) 908 qstate->kevent_watermark = sizeof(size_t) + sizeof(int); 909 else { 910 init_comm_element(&qstate->request, CET_TRANSFORM_REQUEST); 911 transform_request = 912 get_cache_transform_request(&qstate->request); 913 914 result = qstate->read_func(qstate, 915 &transform_request->entry_length, sizeof(size_t)); 916 result += qstate->read_func(qstate, 917 &transform_request->transformation_type, sizeof(int)); 918 919 if (result != sizeof(size_t) + sizeof(int)) { 920 TRACE_OUT(on_transform_request_read1); 921 return (-1); 922 } 923 924 if ((transform_request->transformation_type != TT_USER) && 925 (transform_request->transformation_type != TT_ALL)) { 926 TRACE_OUT(on_transform_request_read1); 927 return (-1); 928 } 929 930 if (transform_request->entry_length != 0) { 931 if (BUFSIZE_INVALID(transform_request->entry_length)) { 932 TRACE_OUT(on_transform_request_read1); 933 return (-1); 934 } 935 936 transform_request->entry = calloc(1, 937 transform_request->entry_length + 1); 938 assert(transform_request->entry != NULL); 939 940 qstate->process_func = on_transform_request_read2; 941 } else 942 qstate->process_func = on_transform_request_process; 943 944 qstate->kevent_watermark = transform_request->entry_length; 945 } 946 947 TRACE_OUT(on_transform_request_read1); 948 return (0); 949 } 950 951 static int 952 on_transform_request_read2(struct query_state *qstate) 953 { 954 struct cache_transform_request *transform_request; 955 ssize_t result; 956 957 TRACE_IN(on_transform_request_read2); 958 transform_request = get_cache_transform_request(&qstate->request); 959 960 result = qstate->read_func(qstate, transform_request->entry, 961 transform_request->entry_length); 962 963 if (result != (ssize_t)qstate->kevent_watermark) { 964 TRACE_OUT(on_transform_request_read2); 965 return (-1); 966 } 967 968 qstate->kevent_watermark = 0; 969 qstate->process_func = on_transform_request_process; 970 971 TRACE_OUT(on_transform_request_read2); 972 return (0); 973 } 974 975 static int 976 on_transform_request_process(struct query_state *qstate) 977 { 978 struct cache_transform_request *transform_request; 979 struct cache_transform_response *transform_response; 980 struct configuration_entry *config_entry; 981 size_t i, size; 982 983 TRACE_IN(on_transform_request_process); 984 init_comm_element(&qstate->response, CET_TRANSFORM_RESPONSE); 985 transform_response = get_cache_transform_response(&qstate->response); 986 transform_request = get_cache_transform_request(&qstate->request); 987 988 switch (transform_request->transformation_type) { 989 case TT_USER: 990 if (transform_request->entry == NULL) { 991 size = configuration_get_entries_size(s_configuration); 992 for (i = 0; i < size; ++i) { 993 config_entry = configuration_get_entry( 994 s_configuration, i); 995 996 if (config_entry->perform_actual_lookups == 0) 997 clear_config_entry_part(config_entry, 998 qstate->eid_str, qstate->eid_str_length); 999 } 1000 } else { 1001 qstate->config_entry = configuration_find_entry( 1002 s_configuration, transform_request->entry); 1003 1004 if (qstate->config_entry == NULL) { 1005 LOG_ERR_2("transform_request", 1006 "can't find configuration" 1007 " entry '%s'. aborting request", 1008 transform_request->entry); 1009 transform_response->error_code = -1; 1010 goto fin; 1011 } 1012 1013 if (qstate->config_entry->perform_actual_lookups != 0) { 1014 LOG_ERR_2("transform_request", 1015 "can't transform the cache entry %s" 1016 ", because it ised for actual lookups", 1017 transform_request->entry); 1018 transform_response->error_code = -1; 1019 goto fin; 1020 } 1021 1022 clear_config_entry_part(qstate->config_entry, 1023 qstate->eid_str, qstate->eid_str_length); 1024 } 1025 break; 1026 case TT_ALL: 1027 if (qstate->euid != 0) 1028 transform_response->error_code = -1; 1029 else { 1030 if (transform_request->entry == NULL) { 1031 size = configuration_get_entries_size( 1032 s_configuration); 1033 for (i = 0; i < size; ++i) { 1034 clear_config_entry( 1035 configuration_get_entry( 1036 s_configuration, i)); 1037 } 1038 } else { 1039 qstate->config_entry = configuration_find_entry( 1040 s_configuration, 1041 transform_request->entry); 1042 1043 if (qstate->config_entry == NULL) { 1044 LOG_ERR_2("transform_request", 1045 "can't find configuration" 1046 " entry '%s'. aborting request", 1047 transform_request->entry); 1048 transform_response->error_code = -1; 1049 goto fin; 1050 } 1051 1052 clear_config_entry(qstate->config_entry); 1053 } 1054 } 1055 break; 1056 default: 1057 transform_response->error_code = -1; 1058 } 1059 1060 fin: 1061 qstate->kevent_watermark = 0; 1062 qstate->process_func = on_transform_response_write1; 1063 TRACE_OUT(on_transform_request_process); 1064 return (0); 1065 } 1066 1067 static int 1068 on_transform_response_write1(struct query_state *qstate) 1069 { 1070 struct cache_transform_response *transform_response; 1071 ssize_t result; 1072 1073 TRACE_IN(on_transform_response_write1); 1074 transform_response = get_cache_transform_response(&qstate->response); 1075 result = qstate->write_func(qstate, &transform_response->error_code, 1076 sizeof(int)); 1077 if (result != sizeof(int)) { 1078 TRACE_OUT(on_transform_response_write1); 1079 return (-1); 1080 } 1081 1082 finalize_comm_element(&qstate->request); 1083 finalize_comm_element(&qstate->response); 1084 1085 qstate->kevent_watermark = 0; 1086 qstate->process_func = NULL; 1087 TRACE_OUT(on_transform_response_write1); 1088 return (0); 1089 } 1090 1091 /* 1092 * Checks if the client's euid and egid do not differ from its uid and gid. 1093 * Returns 0 on success. 1094 */ 1095 int 1096 check_query_eids(struct query_state *qstate) 1097 { 1098 1099 return ((qstate->uid != qstate->euid) || (qstate->gid != qstate->egid) ? -1 : 0); 1100 } 1101 1102 /* 1103 * Uses the qstate fields to process an "alternate" read - when the buffer is 1104 * too large to be received during one socket read operation 1105 */ 1106 ssize_t 1107 query_io_buffer_read(struct query_state *qstate, void *buf, size_t nbytes) 1108 { 1109 size_t remaining; 1110 ssize_t result; 1111 1112 TRACE_IN(query_io_buffer_read); 1113 if ((qstate->io_buffer_size == 0) || (qstate->io_buffer == NULL)) 1114 return (-1); 1115 1116 assert(qstate->io_buffer_p <= 1117 qstate->io_buffer + qstate->io_buffer_size); 1118 remaining = qstate->io_buffer + qstate->io_buffer_size - 1119 qstate->io_buffer_p; 1120 if (nbytes < remaining) 1121 result = nbytes; 1122 else 1123 result = remaining; 1124 1125 memcpy(buf, qstate->io_buffer_p, result); 1126 qstate->io_buffer_p += result; 1127 1128 if (remaining == 0) { 1129 free(qstate->io_buffer); 1130 qstate->io_buffer = NULL; 1131 1132 qstate->write_func = query_socket_write; 1133 qstate->read_func = query_socket_read; 1134 } 1135 1136 TRACE_OUT(query_io_buffer_read); 1137 return (result); 1138 } 1139 1140 /* 1141 * Uses the qstate fields to process an "alternate" write - when the buffer is 1142 * too large to be sent during one socket write operation 1143 */ 1144 ssize_t 1145 query_io_buffer_write(struct query_state *qstate, const void *buf, 1146 size_t nbytes) 1147 { 1148 size_t remaining; 1149 ssize_t result; 1150 1151 TRACE_IN(query_io_buffer_write); 1152 if ((qstate->io_buffer_size == 0) || (qstate->io_buffer == NULL)) 1153 return (-1); 1154 1155 assert(qstate->io_buffer_p <= 1156 qstate->io_buffer + qstate->io_buffer_size); 1157 remaining = qstate->io_buffer + qstate->io_buffer_size - 1158 qstate->io_buffer_p; 1159 if (nbytes < remaining) 1160 result = nbytes; 1161 else 1162 result = remaining; 1163 1164 memcpy(qstate->io_buffer_p, buf, result); 1165 qstate->io_buffer_p += result; 1166 1167 if (remaining == 0) { 1168 qstate->use_alternate_io = 1; 1169 qstate->io_buffer_p = qstate->io_buffer; 1170 1171 qstate->write_func = query_socket_write; 1172 qstate->read_func = query_socket_read; 1173 } 1174 1175 TRACE_OUT(query_io_buffer_write); 1176 return (result); 1177 } 1178 1179 /* 1180 * The default "read" function, which reads data directly from socket 1181 */ 1182 ssize_t 1183 query_socket_read(struct query_state *qstate, void *buf, size_t nbytes) 1184 { 1185 ssize_t result; 1186 1187 TRACE_IN(query_socket_read); 1188 if (qstate->socket_failed != 0) { 1189 TRACE_OUT(query_socket_read); 1190 return (-1); 1191 } 1192 1193 result = read(qstate->sockfd, buf, nbytes); 1194 if (result < 0 || (size_t)result < nbytes) 1195 qstate->socket_failed = 1; 1196 1197 TRACE_OUT(query_socket_read); 1198 return (result); 1199 } 1200 1201 /* 1202 * The default "write" function, which writes data directly to socket 1203 */ 1204 ssize_t 1205 query_socket_write(struct query_state *qstate, const void *buf, size_t nbytes) 1206 { 1207 ssize_t result; 1208 1209 TRACE_IN(query_socket_write); 1210 if (qstate->socket_failed != 0) { 1211 TRACE_OUT(query_socket_write); 1212 return (-1); 1213 } 1214 1215 result = write(qstate->sockfd, buf, nbytes); 1216 if (result < 0 || (size_t)result < nbytes) 1217 qstate->socket_failed = 1; 1218 1219 TRACE_OUT(query_socket_write); 1220 return (result); 1221 } 1222 1223 /* 1224 * Initializes the query_state structure by filling it with the default values. 1225 */ 1226 struct query_state * 1227 init_query_state(int sockfd, size_t kevent_watermark, uid_t euid, gid_t egid) 1228 { 1229 struct query_state *retval; 1230 1231 TRACE_IN(init_query_state); 1232 retval = calloc(1, sizeof(*retval)); 1233 assert(retval != NULL); 1234 1235 retval->sockfd = sockfd; 1236 retval->kevent_filter = EVFILT_READ; 1237 retval->kevent_watermark = kevent_watermark; 1238 1239 retval->euid = euid; 1240 retval->egid = egid; 1241 retval->uid = retval->gid = -1; 1242 1243 if (asprintf(&retval->eid_str, "%d_%d_", retval->euid, 1244 retval->egid) == -1) { 1245 free(retval); 1246 return (NULL); 1247 } 1248 retval->eid_str_length = strlen(retval->eid_str); 1249 1250 init_comm_element(&retval->request, CET_UNDEFINED); 1251 init_comm_element(&retval->response, CET_UNDEFINED); 1252 retval->process_func = on_query_startup; 1253 retval->destroy_func = on_query_destroy; 1254 1255 retval->write_func = query_socket_write; 1256 retval->read_func = query_socket_read; 1257 1258 get_time_func(&retval->creation_time); 1259 retval->timeout.tv_sec = s_configuration->query_timeout; 1260 retval->timeout.tv_usec = 0; 1261 1262 TRACE_OUT(init_query_state); 1263 return (retval); 1264 } 1265 1266 void 1267 destroy_query_state(struct query_state *qstate) 1268 { 1269 1270 TRACE_IN(destroy_query_state); 1271 if (qstate->eid_str != NULL) 1272 free(qstate->eid_str); 1273 1274 if (qstate->io_buffer != NULL) 1275 free(qstate->io_buffer); 1276 1277 qstate->destroy_func(qstate); 1278 free(qstate); 1279 TRACE_OUT(destroy_query_state); 1280 } 1281