1 /*- 2 * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/time.h> 32 #include <assert.h> 33 #include <stdlib.h> 34 #include <string.h> 35 #include "cachelib.h" 36 #include "debug.h" 37 38 #define INITIAL_ENTRIES_CAPACITY 32 39 #define ENTRIES_CAPACITY_STEP 32 40 41 #define STRING_SIMPLE_HASH_BODY(in_var, var, a, M) \ 42 for ((var) = 0; *(in_var) != '\0'; ++(in_var)) \ 43 (var) = ((a)*(var) + *(in_var)) % (M) 44 45 #define STRING_SIMPLE_MP2_HASH_BODY(in_var, var, a, M) \ 46 for ((var) = 0; *(in_var) != 0; ++(in_var)) \ 47 (var) = ((a)*(var) + *(in_var)) & (M - 1) 48 49 static int cache_elemsize_common_continue_func(struct cache_common_entry_ *, 50 struct cache_policy_item_ *); 51 static int cache_lifetime_common_continue_func(struct cache_common_entry_ *, 52 struct cache_policy_item_ *); 53 static void clear_cache_entry(struct cache_entry_ *); 54 static void destroy_cache_entry(struct cache_entry_ *); 55 static void destroy_cache_mp_read_session(struct cache_mp_read_session_ *); 56 static void destroy_cache_mp_write_session(struct cache_mp_write_session_ *); 57 static int entries_bsearch_cmp_func(const void *, const void *); 58 static int entries_qsort_cmp_func(const void *, const void *); 59 static struct cache_entry_ ** find_cache_entry_p(struct cache_ *, 60 const char *); 61 static void flush_cache_entry(struct cache_entry_ *); 62 static void flush_cache_policy(struct cache_common_entry_ *, 63 struct cache_policy_ *, struct cache_policy_ *, 64 int (*)(struct cache_common_entry_ *, 65 struct cache_policy_item_ *)); 66 static int ht_items_cmp_func(const void *, const void *); 67 static int ht_items_fixed_size_left_cmp_func(const void *, const void *); 68 static hashtable_index_t ht_item_hash_func(const void *, size_t); 69 70 /* 71 * Hashing and comparing routines, that are used with the hash tables 72 */ 73 static int 74 ht_items_cmp_func(const void *p1, const void *p2) 75 { 76 struct cache_ht_item_data_ *hp1, *hp2; 77 size_t min_size; 78 int result; 79 80 hp1 = (struct cache_ht_item_data_ *)p1; 81 hp2 = (struct cache_ht_item_data_ *)p2; 82 83 assert(hp1->key != NULL); 84 assert(hp2->key != NULL); 85 86 if (hp1->key_size != hp2->key_size) { 87 min_size = (hp1->key_size < hp2->key_size) ? hp1->key_size : 88 hp2->key_size; 89 result = memcmp(hp1->key, hp2->key, min_size); 90 91 if (result == 0) 92 return ((hp1->key_size < hp2->key_size) ? -1 : 1); 93 else 94 return (result); 95 } else 96 return (memcmp(hp1->key, hp2->key, hp1->key_size)); 97 } 98 99 static int 100 ht_items_fixed_size_left_cmp_func(const void *p1, const void *p2) 101 { 102 struct cache_ht_item_data_ *hp1, *hp2; 103 size_t min_size; 104 int result; 105 106 hp1 = (struct cache_ht_item_data_ *)p1; 107 hp2 = (struct cache_ht_item_data_ *)p2; 108 109 assert(hp1->key != NULL); 110 assert(hp2->key != NULL); 111 112 if (hp1->key_size != hp2->key_size) { 113 min_size = (hp1->key_size < hp2->key_size) ? hp1->key_size : 114 hp2->key_size; 115 result = memcmp(hp1->key, hp2->key, min_size); 116 117 if (result == 0) 118 if (min_size == hp1->key_size) 119 return (0); 120 else 121 return ((hp1->key_size < hp2->key_size) ? -1 : 1); 122 else 123 return (result); 124 } else 125 return (memcmp(hp1->key, hp2->key, hp1->key_size)); 126 } 127 128 static hashtable_index_t 129 ht_item_hash_func(const void *p, size_t cache_entries_size) 130 { 131 struct cache_ht_item_data_ *hp; 132 size_t i; 133 134 hashtable_index_t retval; 135 136 hp = (struct cache_ht_item_data_ *)p; 137 assert(hp->key != NULL); 138 139 retval = 0; 140 for (i = 0; i < hp->key_size; ++i) 141 retval = (127 * retval + (unsigned char)hp->key[i]) % 142 cache_entries_size; 143 144 return retval; 145 } 146 147 HASHTABLE_GENERATE(cache_ht_, cache_ht_item_, struct cache_ht_item_data_, data, 148 ht_item_hash_func, ht_items_cmp_func); 149 150 /* 151 * Routines to sort and search the entries by name 152 */ 153 static int 154 entries_bsearch_cmp_func(const void *key, const void *ent) 155 { 156 157 assert(key != NULL); 158 assert(ent != NULL); 159 160 return (strcmp((char const *)key, 161 (*(struct cache_entry_ const **)ent)->name)); 162 } 163 164 static int 165 entries_qsort_cmp_func(const void *e1, const void *e2) 166 { 167 168 assert(e1 != NULL); 169 assert(e2 != NULL); 170 171 return (strcmp((*(struct cache_entry_ const **)e1)->name, 172 (*(struct cache_entry_ const **)e2)->name)); 173 } 174 175 static struct cache_entry_ ** 176 find_cache_entry_p(struct cache_ *the_cache, const char *entry_name) 177 { 178 179 return ((struct cache_entry_ **)(bsearch(entry_name, the_cache->entries, 180 the_cache->entries_size, sizeof(struct cache_entry_ *), 181 entries_bsearch_cmp_func))); 182 } 183 184 static void 185 destroy_cache_mp_write_session(struct cache_mp_write_session_ *ws) 186 { 187 188 struct cache_mp_data_item_ *data_item; 189 190 TRACE_IN(destroy_cache_mp_write_session); 191 assert(ws != NULL); 192 while (!TAILQ_EMPTY(&ws->items)) { 193 data_item = TAILQ_FIRST(&ws->items); 194 TAILQ_REMOVE(&ws->items, data_item, entries); 195 free(data_item->value); 196 free(data_item); 197 } 198 199 free(ws); 200 TRACE_OUT(destroy_cache_mp_write_session); 201 } 202 203 static void 204 destroy_cache_mp_read_session(struct cache_mp_read_session_ *rs) 205 { 206 207 TRACE_IN(destroy_cache_mp_read_session); 208 assert(rs != NULL); 209 free(rs); 210 TRACE_OUT(destroy_cache_mp_read_session); 211 } 212 213 static void 214 destroy_cache_entry(struct cache_entry_ *entry) 215 { 216 struct cache_common_entry_ *common_entry; 217 struct cache_mp_entry_ *mp_entry; 218 struct cache_mp_read_session_ *rs; 219 struct cache_mp_write_session_ *ws; 220 struct cache_ht_item_ *ht_item; 221 struct cache_ht_item_data_ *ht_item_data; 222 223 TRACE_IN(destroy_cache_entry); 224 assert(entry != NULL); 225 226 if (entry->params->entry_type == CET_COMMON) { 227 common_entry = (struct cache_common_entry_ *)entry; 228 229 HASHTABLE_FOREACH(&(common_entry->items), ht_item) { 230 HASHTABLE_ENTRY_FOREACH(ht_item, data, ht_item_data) 231 { 232 free(ht_item_data->key); 233 free(ht_item_data->value); 234 } 235 HASHTABLE_ENTRY_CLEAR(ht_item, data); 236 } 237 238 HASHTABLE_DESTROY(&(common_entry->items), data); 239 240 /* FIFO policy is always first */ 241 destroy_cache_fifo_policy(common_entry->policies[0]); 242 switch (common_entry->common_params.policy) { 243 case CPT_LRU: 244 destroy_cache_lru_policy(common_entry->policies[1]); 245 break; 246 case CPT_LFU: 247 destroy_cache_lfu_policy(common_entry->policies[1]); 248 break; 249 default: 250 break; 251 } 252 free(common_entry->policies); 253 } else { 254 mp_entry = (struct cache_mp_entry_ *)entry; 255 256 while (!TAILQ_EMPTY(&mp_entry->ws_head)) { 257 ws = TAILQ_FIRST(&mp_entry->ws_head); 258 TAILQ_REMOVE(&mp_entry->ws_head, ws, entries); 259 destroy_cache_mp_write_session(ws); 260 } 261 262 while (!TAILQ_EMPTY(&mp_entry->rs_head)) { 263 rs = TAILQ_FIRST(&mp_entry->rs_head); 264 TAILQ_REMOVE(&mp_entry->rs_head, rs, entries); 265 destroy_cache_mp_read_session(rs); 266 } 267 268 if (mp_entry->completed_write_session != NULL) 269 destroy_cache_mp_write_session( 270 mp_entry->completed_write_session); 271 272 if (mp_entry->pending_write_session != NULL) 273 destroy_cache_mp_write_session( 274 mp_entry->pending_write_session); 275 } 276 277 free(entry->name); 278 free(entry); 279 TRACE_OUT(destroy_cache_entry); 280 } 281 282 static void 283 clear_cache_entry(struct cache_entry_ *entry) 284 { 285 struct cache_mp_entry_ *mp_entry; 286 struct cache_common_entry_ *common_entry; 287 struct cache_ht_item_ *ht_item; 288 struct cache_ht_item_data_ *ht_item_data; 289 struct cache_policy_ *policy; 290 struct cache_policy_item_ *item, *next_item; 291 size_t entry_size; 292 int i; 293 294 if (entry->params->entry_type == CET_COMMON) { 295 common_entry = (struct cache_common_entry_ *)entry; 296 297 entry_size = 0; 298 HASHTABLE_FOREACH(&(common_entry->items), ht_item) { 299 HASHTABLE_ENTRY_FOREACH(ht_item, data, ht_item_data) 300 { 301 free(ht_item_data->key); 302 free(ht_item_data->value); 303 } 304 entry_size += HASHTABLE_ENTRY_SIZE(ht_item, data); 305 HASHTABLE_ENTRY_CLEAR(ht_item, data); 306 } 307 308 common_entry->items_size -= entry_size; 309 for (i = 0; i < common_entry->policies_size; ++i) { 310 policy = common_entry->policies[i]; 311 312 next_item = NULL; 313 item = policy->get_first_item_func(policy); 314 while (item != NULL) { 315 next_item = policy->get_next_item_func(policy, 316 item); 317 policy->remove_item_func(policy, item); 318 policy->destroy_item_func(item); 319 item = next_item; 320 } 321 } 322 } else { 323 mp_entry = (struct cache_mp_entry_ *)entry; 324 325 if (mp_entry->rs_size == 0) { 326 if (mp_entry->completed_write_session != NULL) { 327 destroy_cache_mp_write_session( 328 mp_entry->completed_write_session); 329 mp_entry->completed_write_session = NULL; 330 } 331 332 memset(&mp_entry->creation_time, 0, 333 sizeof(struct timeval)); 334 memset(&mp_entry->last_request_time, 0, 335 sizeof(struct timeval)); 336 } 337 } 338 } 339 340 /* 341 * When passed to the flush_cache_policy, ensures that all old elements are 342 * deleted. 343 */ 344 static int 345 cache_lifetime_common_continue_func(struct cache_common_entry_ *entry, 346 struct cache_policy_item_ *item) 347 { 348 349 return ((item->last_request_time.tv_sec - item->creation_time.tv_sec > 350 entry->common_params.max_lifetime.tv_sec) ? 1: 0); 351 } 352 353 /* 354 * When passed to the flush_cache_policy, ensures that all elements, that 355 * exceed the size limit, are deleted. 356 */ 357 static int 358 cache_elemsize_common_continue_func(struct cache_common_entry_ *entry, 359 struct cache_policy_item_ *item) 360 { 361 362 return ((entry->items_size > entry->common_params.satisf_elemsize) ? 1 363 : 0); 364 } 365 366 /* 367 * Removes the elements from the cache entry, while the continue_func returns 1. 368 */ 369 static void 370 flush_cache_policy(struct cache_common_entry_ *entry, 371 struct cache_policy_ *policy, 372 struct cache_policy_ *connected_policy, 373 int (*continue_func)(struct cache_common_entry_ *, 374 struct cache_policy_item_ *)) 375 { 376 struct cache_policy_item_ *item, *next_item, *connected_item; 377 struct cache_ht_item_ *ht_item; 378 struct cache_ht_item_data_ *ht_item_data, ht_key; 379 hashtable_index_t hash; 380 381 assert(policy != NULL); 382 383 next_item = NULL; 384 item = policy->get_first_item_func(policy); 385 while ((item != NULL) && (continue_func(entry, item) == 1)) { 386 next_item = policy->get_next_item_func(policy, item); 387 388 connected_item = item->connected_item; 389 policy->remove_item_func(policy, item); 390 391 memset(&ht_key, 0, sizeof(struct cache_ht_item_data_)); 392 ht_key.key = item->key; 393 ht_key.key_size = item->key_size; 394 395 hash = HASHTABLE_CALCULATE_HASH(cache_ht_, &entry->items, 396 &ht_key); 397 assert(hash >= 0); 398 assert(hash < HASHTABLE_ENTRIES_COUNT(&entry->items)); 399 400 ht_item = HASHTABLE_GET_ENTRY(&(entry->items), hash); 401 ht_item_data = HASHTABLE_ENTRY_FIND(cache_ht_, ht_item, 402 &ht_key); 403 assert(ht_item_data != NULL); 404 free(ht_item_data->key); 405 free(ht_item_data->value); 406 HASHTABLE_ENTRY_REMOVE(cache_ht_, ht_item, ht_item_data); 407 --entry->items_size; 408 409 policy->destroy_item_func(item); 410 411 if (connected_item != NULL) { 412 connected_policy->remove_item_func(connected_policy, 413 connected_item); 414 connected_policy->destroy_item_func(connected_item); 415 } 416 417 item = next_item; 418 } 419 } 420 421 static void 422 flush_cache_entry(struct cache_entry_ *entry) 423 { 424 struct cache_mp_entry_ *mp_entry; 425 struct cache_common_entry_ *common_entry; 426 struct cache_policy_ *policy, *connected_policy; 427 428 connected_policy = NULL; 429 if (entry->params->entry_type == CET_COMMON) { 430 common_entry = (struct cache_common_entry_ *)entry; 431 if ((common_entry->common_params.max_lifetime.tv_sec != 0) || 432 (common_entry->common_params.max_lifetime.tv_usec != 0)) { 433 434 policy = common_entry->policies[0]; 435 if (common_entry->policies_size > 1) 436 connected_policy = common_entry->policies[1]; 437 438 flush_cache_policy(common_entry, policy, 439 connected_policy, 440 cache_lifetime_common_continue_func); 441 } 442 443 444 if ((common_entry->common_params.max_elemsize != 0) && 445 common_entry->items_size > 446 common_entry->common_params.max_elemsize) { 447 448 if (common_entry->policies_size > 1) { 449 policy = common_entry->policies[1]; 450 connected_policy = common_entry->policies[0]; 451 } else { 452 policy = common_entry->policies[0]; 453 connected_policy = NULL; 454 } 455 456 flush_cache_policy(common_entry, policy, 457 connected_policy, 458 cache_elemsize_common_continue_func); 459 } 460 } else { 461 mp_entry = (struct cache_mp_entry_ *)entry; 462 463 if ((mp_entry->mp_params.max_lifetime.tv_sec != 0) 464 || (mp_entry->mp_params.max_lifetime.tv_usec != 0)) { 465 466 if (mp_entry->last_request_time.tv_sec - 467 mp_entry->last_request_time.tv_sec > 468 mp_entry->mp_params.max_lifetime.tv_sec) 469 clear_cache_entry(entry); 470 } 471 } 472 } 473 474 struct cache_ * 475 init_cache(struct cache_params const *params) 476 { 477 struct cache_ *retval; 478 479 TRACE_IN(init_cache); 480 assert(params != NULL); 481 482 retval = (struct cache_ *)malloc(sizeof(struct cache_)); 483 assert(retval != NULL); 484 memset(retval, 0, sizeof(struct cache_)); 485 486 assert(params != NULL); 487 memcpy(&retval->params, params, sizeof(struct cache_params)); 488 489 retval->entries = (struct cache_entry_ **)malloc( 490 sizeof(struct cache_entry_ *) * INITIAL_ENTRIES_CAPACITY); 491 assert(retval->entries != NULL); 492 memset(retval->entries, 0, sizeof(sizeof(struct cache_entry_ *) 493 * INITIAL_ENTRIES_CAPACITY)); 494 495 retval->entries_capacity = INITIAL_ENTRIES_CAPACITY; 496 retval->entries_size = 0; 497 498 TRACE_OUT(init_cache); 499 return (retval); 500 } 501 502 void 503 destroy_cache(struct cache_ *the_cache) 504 { 505 506 TRACE_IN(destroy_cache); 507 assert(the_cache != NULL); 508 509 if (the_cache->entries != NULL) { 510 size_t i; 511 for (i = 0; i < the_cache->entries_size; ++i) 512 destroy_cache_entry(the_cache->entries[i]); 513 514 free(the_cache->entries); 515 } 516 517 free(the_cache); 518 TRACE_OUT(destroy_cache); 519 } 520 521 int 522 register_cache_entry(struct cache_ *the_cache, 523 struct cache_entry_params const *params) 524 { 525 int policies_size; 526 size_t entry_name_size; 527 struct cache_common_entry_ *new_common_entry; 528 struct cache_mp_entry_ *new_mp_entry; 529 530 TRACE_IN(register_cache_entry); 531 assert(the_cache != NULL); 532 533 if (find_cache_entry(the_cache, params->entry_name) != NULL) { 534 TRACE_OUT(register_cache_entry); 535 return (-1); 536 } 537 538 if (the_cache->entries_size == the_cache->entries_capacity) { 539 struct cache_entry_ **new_entries; 540 size_t new_capacity; 541 542 new_capacity = the_cache->entries_capacity + 543 ENTRIES_CAPACITY_STEP; 544 new_entries = (struct cache_entry_ **)malloc( 545 sizeof(struct cache_entry_ *) * new_capacity); 546 assert(new_entries != NULL); 547 548 memset(new_entries, 0, sizeof(struct cache_entry_ *) * 549 new_capacity); 550 memcpy(new_entries, the_cache->entries, 551 sizeof(struct cache_entry_ *) 552 * the_cache->entries_size); 553 554 free(the_cache->entries); 555 the_cache->entries = new_entries; 556 } 557 558 entry_name_size = strlen(params->entry_name); 559 switch (params->entry_type) 560 { 561 case CET_COMMON: 562 new_common_entry = (struct cache_common_entry_ *)malloc( 563 sizeof(struct cache_common_entry_)); 564 assert(new_common_entry != NULL); 565 memset(new_common_entry, 0, sizeof(struct cache_common_entry_)); 566 567 memcpy(&new_common_entry->common_params, params, 568 sizeof(struct common_cache_entry_params)); 569 new_common_entry->params = 570 (struct cache_entry_params *)&new_common_entry->common_params; 571 572 new_common_entry->common_params.entry_name = (char *)malloc( 573 entry_name_size+1); 574 assert(new_common_entry->common_params.entry_name != NULL); 575 memset(new_common_entry->common_params.entry_name, 0, 576 entry_name_size + 1); 577 strncpy(new_common_entry->common_params.entry_name, 578 params->entry_name, entry_name_size); 579 new_common_entry->name = 580 new_common_entry->common_params.entry_name; 581 582 HASHTABLE_INIT(&(new_common_entry->items), 583 struct cache_ht_item_data_, data, 584 new_common_entry->common_params.cache_entries_size); 585 586 if (new_common_entry->common_params.policy == CPT_FIFO) 587 policies_size = 1; 588 else 589 policies_size = 2; 590 591 new_common_entry->policies = (struct cache_policy_ **)malloc( 592 sizeof(struct cache_policy_ *) * policies_size); 593 assert(new_common_entry->policies != NULL); 594 memset(new_common_entry->policies, 0, 595 sizeof(struct cache_policy_ *) * policies_size); 596 597 new_common_entry->policies_size = policies_size; 598 new_common_entry->policies[0] = init_cache_fifo_policy(); 599 600 if (policies_size > 1) { 601 switch (new_common_entry->common_params.policy) { 602 case CPT_LRU: 603 new_common_entry->policies[1] = 604 init_cache_lru_policy(); 605 break; 606 case CPT_LFU: 607 new_common_entry->policies[1] = 608 init_cache_lfu_policy(); 609 break; 610 default: 611 break; 612 } 613 } 614 615 new_common_entry->get_time_func = 616 the_cache->params.get_time_func; 617 the_cache->entries[the_cache->entries_size++] = 618 (struct cache_entry_ *)new_common_entry; 619 break; 620 case CET_MULTIPART: 621 new_mp_entry = (struct cache_mp_entry_ *)malloc( 622 sizeof(struct cache_mp_entry_)); 623 assert(new_mp_entry != NULL); 624 memset(new_mp_entry, 0, sizeof(struct cache_mp_entry_)); 625 626 memcpy(&new_mp_entry->mp_params, params, 627 sizeof(struct mp_cache_entry_params)); 628 new_mp_entry->params = 629 (struct cache_entry_params *)&new_mp_entry->mp_params; 630 631 new_mp_entry->mp_params.entry_name = (char *)malloc( 632 entry_name_size+1); 633 assert(new_mp_entry->mp_params.entry_name != NULL); 634 memset(new_mp_entry->mp_params.entry_name, 0, 635 entry_name_size + 1); 636 strncpy(new_mp_entry->mp_params.entry_name, params->entry_name, 637 entry_name_size); 638 new_mp_entry->name = new_mp_entry->mp_params.entry_name; 639 640 TAILQ_INIT(&new_mp_entry->ws_head); 641 TAILQ_INIT(&new_mp_entry->rs_head); 642 643 new_mp_entry->get_time_func = the_cache->params.get_time_func; 644 the_cache->entries[the_cache->entries_size++] = 645 (struct cache_entry_ *)new_mp_entry; 646 break; 647 } 648 649 650 qsort(the_cache->entries, the_cache->entries_size, 651 sizeof(struct cache_entry_ *), entries_qsort_cmp_func); 652 653 TRACE_OUT(register_cache_entry); 654 return (0); 655 } 656 657 int 658 unregister_cache_entry(struct cache_ *the_cache, const char *entry_name) 659 { 660 struct cache_entry_ **del_ent; 661 662 TRACE_IN(unregister_cache_entry); 663 assert(the_cache != NULL); 664 665 del_ent = find_cache_entry_p(the_cache, entry_name); 666 if (del_ent != NULL) { 667 destroy_cache_entry(*del_ent); 668 --the_cache->entries_size; 669 670 memmove(del_ent, del_ent + 1, 671 (&(the_cache->entries[--the_cache->entries_size]) - 672 del_ent) * sizeof(struct cache_entry_ *)); 673 674 TRACE_OUT(unregister_cache_entry); 675 return (0); 676 } else { 677 TRACE_OUT(unregister_cache_entry); 678 return (-1); 679 } 680 } 681 682 struct cache_entry_ * 683 find_cache_entry(struct cache_ *the_cache, const char *entry_name) 684 { 685 struct cache_entry_ **result; 686 687 TRACE_IN(find_cache_entry); 688 result = find_cache_entry_p(the_cache, entry_name); 689 690 if (result == NULL) { 691 TRACE_OUT(find_cache_entry); 692 return (NULL); 693 } else { 694 TRACE_OUT(find_cache_entry); 695 return (*result); 696 } 697 } 698 699 /* 700 * Tries to read the element with the specified key from the cache. If the 701 * value_size is too small, it will be filled with the proper number, and 702 * the user will need to call cache_read again with the value buffer, that 703 * is large enough. 704 * Function returns 0 on success, -1 on error, and -2 if the value_size is too 705 * small. 706 */ 707 int 708 cache_read(struct cache_entry_ *entry, const char *key, size_t key_size, 709 char *value, size_t *value_size) 710 { 711 struct cache_common_entry_ *common_entry; 712 struct cache_ht_item_data_ item_data, *find_res; 713 struct cache_ht_item_ *item; 714 hashtable_index_t hash; 715 struct cache_policy_item_ *connected_item; 716 717 TRACE_IN(cache_read); 718 assert(entry != NULL); 719 assert(key != NULL); 720 assert(value_size != NULL); 721 assert(entry->params->entry_type == CET_COMMON); 722 723 common_entry = (struct cache_common_entry_ *)entry; 724 725 memset(&item_data, 0, sizeof(struct cache_ht_item_data_)); 726 /* can't avoid the cast here */ 727 item_data.key = (char *)key; 728 item_data.key_size = key_size; 729 730 hash = HASHTABLE_CALCULATE_HASH(cache_ht_, &common_entry->items, 731 &item_data); 732 assert(hash >= 0); 733 assert(hash < HASHTABLE_ENTRIES_COUNT(&common_entry->items)); 734 735 item = HASHTABLE_GET_ENTRY(&(common_entry->items), hash); 736 find_res = HASHTABLE_ENTRY_FIND(cache_ht_, item, &item_data); 737 if (find_res == NULL) { 738 TRACE_OUT(cache_read); 739 return (-1); 740 } 741 742 if ((common_entry->common_params.max_lifetime.tv_sec != 0) || 743 (common_entry->common_params.max_lifetime.tv_usec != 0)) { 744 745 if (find_res->fifo_policy_item->last_request_time.tv_sec - 746 find_res->fifo_policy_item->creation_time.tv_sec > 747 common_entry->common_params.max_lifetime.tv_sec) { 748 749 free(find_res->key); 750 free(find_res->value); 751 752 connected_item = 753 find_res->fifo_policy_item->connected_item; 754 if (connected_item != NULL) { 755 common_entry->policies[1]->remove_item_func( 756 common_entry->policies[1], 757 connected_item); 758 common_entry->policies[1]->destroy_item_func( 759 connected_item); 760 } 761 762 common_entry->policies[0]->remove_item_func( 763 common_entry->policies[0], 764 find_res->fifo_policy_item); 765 common_entry->policies[0]->destroy_item_func( 766 find_res->fifo_policy_item); 767 768 HASHTABLE_ENTRY_REMOVE(cache_ht_, item, find_res); 769 --common_entry->items_size; 770 } 771 } 772 773 if ((*value_size < find_res->value_size) || (value == NULL)) { 774 *value_size = find_res->value_size; 775 TRACE_OUT(cache_read); 776 return (-2); 777 } 778 779 *value_size = find_res->value_size; 780 memcpy(value, find_res->value, find_res->value_size); 781 782 ++find_res->fifo_policy_item->request_count; 783 common_entry->get_time_func( 784 &find_res->fifo_policy_item->last_request_time); 785 common_entry->policies[0]->update_item_func(common_entry->policies[0], 786 find_res->fifo_policy_item); 787 788 if (find_res->fifo_policy_item->connected_item != NULL) { 789 connected_item = find_res->fifo_policy_item->connected_item; 790 memcpy(&connected_item->last_request_time, 791 &find_res->fifo_policy_item->last_request_time, 792 sizeof(struct timeval)); 793 connected_item->request_count = 794 find_res->fifo_policy_item->request_count; 795 796 common_entry->policies[1]->update_item_func( 797 common_entry->policies[1], connected_item); 798 } 799 800 TRACE_OUT(cache_read); 801 return (0); 802 } 803 804 /* 805 * Writes the value with the specified key into the cache entry. 806 * Functions returns 0 on success, and -1 on error. 807 */ 808 int 809 cache_write(struct cache_entry_ *entry, const char *key, size_t key_size, 810 char const *value, size_t value_size) 811 { 812 struct cache_common_entry_ *common_entry; 813 struct cache_ht_item_data_ item_data, *find_res; 814 struct cache_ht_item_ *item; 815 hashtable_index_t hash; 816 817 struct cache_policy_ *policy, *connected_policy; 818 struct cache_policy_item_ *policy_item; 819 struct cache_policy_item_ *connected_policy_item; 820 821 TRACE_IN(cache_write); 822 assert(entry != NULL); 823 assert(key != NULL); 824 assert(value != NULL); 825 assert(entry->params->entry_type == CET_COMMON); 826 827 common_entry = (struct cache_common_entry_ *)entry; 828 829 memset(&item_data, 0, sizeof(struct cache_ht_item_data_)); 830 /* can't avoid the cast here */ 831 item_data.key = (char *)key; 832 item_data.key_size = key_size; 833 834 hash = HASHTABLE_CALCULATE_HASH(cache_ht_, &common_entry->items, 835 &item_data); 836 assert(hash >= 0); 837 assert(hash < HASHTABLE_ENTRIES_COUNT(&common_entry->items)); 838 839 item = HASHTABLE_GET_ENTRY(&(common_entry->items), hash); 840 find_res = HASHTABLE_ENTRY_FIND(cache_ht_, item, &item_data); 841 if (find_res != NULL) { 842 TRACE_OUT(cache_write); 843 return (-1); 844 } 845 846 item_data.key = (char *)malloc(key_size); 847 memcpy(item_data.key, key, key_size); 848 849 item_data.value = (char *)malloc(value_size); 850 assert(item_data.value != NULL); 851 852 memcpy(item_data.value, value, value_size); 853 item_data.value_size = value_size; 854 855 policy_item = common_entry->policies[0]->create_item_func(); 856 policy_item->key = item_data.key; 857 policy_item->key_size = item_data.key_size; 858 common_entry->get_time_func(&policy_item->creation_time); 859 860 if (common_entry->policies_size > 1) { 861 connected_policy_item = 862 common_entry->policies[1]->create_item_func(); 863 memcpy(&connected_policy_item->creation_time, 864 &policy_item->creation_time, 865 sizeof(struct timeval)); 866 connected_policy_item->key = policy_item->key; 867 connected_policy_item->key_size = policy_item->key_size; 868 869 connected_policy_item->connected_item = policy_item; 870 policy_item->connected_item = connected_policy_item; 871 } 872 873 item_data.fifo_policy_item = policy_item; 874 875 common_entry->policies[0]->add_item_func(common_entry->policies[0], 876 policy_item); 877 if (common_entry->policies_size > 1) 878 common_entry->policies[1]->add_item_func( 879 common_entry->policies[1], connected_policy_item); 880 881 HASHTABLE_ENTRY_STORE(cache_ht_, item, &item_data); 882 ++common_entry->items_size; 883 884 if ((common_entry->common_params.max_elemsize != 0) && 885 (common_entry->items_size > 886 common_entry->common_params.max_elemsize)) { 887 if (common_entry->policies_size > 1) { 888 policy = common_entry->policies[1]; 889 connected_policy = common_entry->policies[0]; 890 } else { 891 policy = common_entry->policies[0]; 892 connected_policy = NULL; 893 } 894 895 flush_cache_policy(common_entry, policy, connected_policy, 896 cache_elemsize_common_continue_func); 897 } 898 899 TRACE_OUT(cache_write); 900 return (0); 901 } 902 903 /* 904 * Initializes the write session for the specified multipart entry. This 905 * session then should be filled with data either committed or abandoned by 906 * using close_cache_mp_write_session or abandon_cache_mp_write_session 907 * respectively. 908 * Returns NULL on errors (when there are too many opened write sessions for 909 * the entry). 910 */ 911 struct cache_mp_write_session_ * 912 open_cache_mp_write_session(struct cache_entry_ *entry) 913 { 914 struct cache_mp_entry_ *mp_entry; 915 struct cache_mp_write_session_ *retval; 916 917 TRACE_IN(open_cache_mp_write_session); 918 assert(entry != NULL); 919 assert(entry->params->entry_type == CET_MULTIPART); 920 mp_entry = (struct cache_mp_entry_ *)entry; 921 922 if ((mp_entry->mp_params.max_sessions > 0) && 923 (mp_entry->ws_size == mp_entry->mp_params.max_sessions)) { 924 TRACE_OUT(open_cache_mp_write_session); 925 return (NULL); 926 } 927 928 retval = (struct cache_mp_write_session_ *)malloc( 929 sizeof(struct cache_mp_write_session_)); 930 assert(retval != NULL); 931 memset(retval, 0, sizeof(struct cache_mp_write_session_)); 932 933 TAILQ_INIT(&retval->items); 934 retval->parent_entry = mp_entry; 935 936 TAILQ_INSERT_HEAD(&mp_entry->ws_head, retval, entries); 937 ++mp_entry->ws_size; 938 939 TRACE_OUT(open_cache_mp_write_session); 940 return (retval); 941 } 942 943 /* 944 * Writes data to the specified session. Return 0 on success and -1 on errors 945 * (when write session size limit is exceeded). 946 */ 947 int 948 cache_mp_write(struct cache_mp_write_session_ *ws, char *data, 949 size_t data_size) 950 { 951 struct cache_mp_data_item_ *new_item; 952 953 TRACE_IN(cache_mp_write); 954 assert(ws != NULL); 955 assert(ws->parent_entry != NULL); 956 assert(ws->parent_entry->params->entry_type == CET_MULTIPART); 957 958 if ((ws->parent_entry->mp_params.max_elemsize > 0) && 959 (ws->parent_entry->mp_params.max_elemsize == ws->items_size)) { 960 TRACE_OUT(cache_mp_write); 961 return (-1); 962 } 963 964 new_item = (struct cache_mp_data_item_ *)malloc( 965 sizeof(struct cache_mp_data_item_)); 966 assert(new_item != NULL); 967 memset(new_item, 0, sizeof(struct cache_mp_data_item_)); 968 969 new_item->value = (char *)malloc(data_size); 970 assert(new_item->value != NULL); 971 memcpy(new_item->value, data, data_size); 972 new_item->value_size = data_size; 973 974 TAILQ_INSERT_TAIL(&ws->items, new_item, entries); 975 ++ws->items_size; 976 977 TRACE_OUT(cache_mp_write); 978 return (0); 979 } 980 981 /* 982 * Abandons the write session and frees all the connected resources. 983 */ 984 void 985 abandon_cache_mp_write_session(struct cache_mp_write_session_ *ws) 986 { 987 988 TRACE_IN(abandon_cache_mp_write_session); 989 assert(ws != NULL); 990 assert(ws->parent_entry != NULL); 991 assert(ws->parent_entry->params->entry_type == CET_MULTIPART); 992 993 TAILQ_REMOVE(&ws->parent_entry->ws_head, ws, entries); 994 --ws->parent_entry->ws_size; 995 996 destroy_cache_mp_write_session(ws); 997 TRACE_OUT(abandon_cache_mp_write_session); 998 } 999 1000 /* 1001 * Commits the session to the entry, for which it was created. 1002 */ 1003 void 1004 close_cache_mp_write_session(struct cache_mp_write_session_ *ws) 1005 { 1006 1007 TRACE_IN(close_cache_mp_write_session); 1008 assert(ws != NULL); 1009 assert(ws->parent_entry != NULL); 1010 assert(ws->parent_entry->params->entry_type == CET_MULTIPART); 1011 1012 TAILQ_REMOVE(&ws->parent_entry->ws_head, ws, entries); 1013 --ws->parent_entry->ws_size; 1014 1015 if (ws->parent_entry->completed_write_session == NULL) { 1016 /* 1017 * If there is no completed session yet, this will be the one 1018 */ 1019 ws->parent_entry->get_time_func( 1020 &ws->parent_entry->creation_time); 1021 ws->parent_entry->completed_write_session = ws; 1022 } else { 1023 /* 1024 * If there is a completed session, then we'll save our session 1025 * as a pending session. If there is already a pending session, 1026 * it would be destroyed. 1027 */ 1028 if (ws->parent_entry->pending_write_session != NULL) 1029 destroy_cache_mp_write_session( 1030 ws->parent_entry->pending_write_session); 1031 1032 ws->parent_entry->pending_write_session = ws; 1033 } 1034 TRACE_OUT(close_cache_mp_write_session); 1035 } 1036 1037 /* 1038 * Opens read session for the specified entry. Returns NULL on errors (when 1039 * there are no data in the entry, or the data are obsolete). 1040 */ 1041 struct cache_mp_read_session_ * 1042 open_cache_mp_read_session(struct cache_entry_ *entry) 1043 { 1044 struct cache_mp_entry_ *mp_entry; 1045 struct cache_mp_read_session_ *retval; 1046 1047 TRACE_IN(open_cache_mp_read_session); 1048 assert(entry != NULL); 1049 assert(entry->params->entry_type == CET_MULTIPART); 1050 mp_entry = (struct cache_mp_entry_ *)entry; 1051 1052 if (mp_entry->completed_write_session == NULL) { 1053 TRACE_OUT(open_cache_mp_read_session); 1054 return (NULL); 1055 } 1056 1057 if ((mp_entry->mp_params.max_lifetime.tv_sec != 0) 1058 || (mp_entry->mp_params.max_lifetime.tv_usec != 0)) { 1059 if (mp_entry->last_request_time.tv_sec - 1060 mp_entry->last_request_time.tv_sec > 1061 mp_entry->mp_params.max_lifetime.tv_sec) { 1062 flush_cache_entry(entry); 1063 TRACE_OUT(open_cache_mp_read_session); 1064 return (NULL); 1065 } 1066 } 1067 1068 retval = (struct cache_mp_read_session_ *)malloc( 1069 sizeof(struct cache_mp_read_session_)); 1070 assert(retval != NULL); 1071 memset(retval, 0, sizeof(struct cache_mp_read_session_)); 1072 1073 retval->parent_entry = mp_entry; 1074 retval->current_item = TAILQ_FIRST( 1075 &mp_entry->completed_write_session->items); 1076 1077 TAILQ_INSERT_HEAD(&mp_entry->rs_head, retval, entries); 1078 ++mp_entry->rs_size; 1079 1080 mp_entry->get_time_func(&mp_entry->last_request_time); 1081 TRACE_OUT(open_cache_mp_read_session); 1082 return (retval); 1083 } 1084 1085 /* 1086 * Reads the data from the read session - step by step. 1087 * Returns 0 on success, -1 on error (when there are no more data), and -2 if 1088 * the data_size is too small. In the last case, data_size would be filled 1089 * the proper value. 1090 */ 1091 int 1092 cache_mp_read(struct cache_mp_read_session_ *rs, char *data, size_t *data_size) 1093 { 1094 1095 TRACE_IN(cache_mp_read); 1096 assert(rs != NULL); 1097 1098 if (rs->current_item == NULL) { 1099 TRACE_OUT(cache_mp_read); 1100 return (-1); 1101 } 1102 1103 if (rs->current_item->value_size > *data_size) { 1104 *data_size = rs->current_item->value_size; 1105 if (data == NULL) { 1106 TRACE_OUT(cache_mp_read); 1107 return (0); 1108 } 1109 1110 TRACE_OUT(cache_mp_read); 1111 return (-2); 1112 } 1113 1114 *data_size = rs->current_item->value_size; 1115 memcpy(data, rs->current_item->value, rs->current_item->value_size); 1116 rs->current_item = TAILQ_NEXT(rs->current_item, entries); 1117 1118 TRACE_OUT(cache_mp_read); 1119 return (0); 1120 } 1121 1122 /* 1123 * Closes the read session. If there are no more read sessions and there is 1124 * a pending write session, it will be committed and old 1125 * completed_write_session will be destroyed. 1126 */ 1127 void 1128 close_cache_mp_read_session(struct cache_mp_read_session_ *rs) 1129 { 1130 1131 TRACE_IN(close_cache_mp_read_session); 1132 assert(rs != NULL); 1133 assert(rs->parent_entry != NULL); 1134 1135 TAILQ_REMOVE(&rs->parent_entry->rs_head, rs, entries); 1136 --rs->parent_entry->rs_size; 1137 1138 if ((rs->parent_entry->rs_size == 0) && 1139 (rs->parent_entry->pending_write_session != NULL)) { 1140 destroy_cache_mp_write_session( 1141 rs->parent_entry->completed_write_session); 1142 rs->parent_entry->completed_write_session = 1143 rs->parent_entry->pending_write_session; 1144 rs->parent_entry->pending_write_session = NULL; 1145 } 1146 1147 destroy_cache_mp_read_session(rs); 1148 TRACE_OUT(close_cache_mp_read_session); 1149 } 1150 1151 int 1152 transform_cache_entry(struct cache_entry_ *entry, 1153 enum cache_transformation_t transformation) 1154 { 1155 1156 TRACE_IN(transform_cache_entry); 1157 switch (transformation) { 1158 case CTT_CLEAR: 1159 clear_cache_entry(entry); 1160 TRACE_OUT(transform_cache_entry); 1161 return (0); 1162 case CTT_FLUSH: 1163 flush_cache_entry(entry); 1164 TRACE_OUT(transform_cache_entry); 1165 return (0); 1166 default: 1167 TRACE_OUT(transform_cache_entry); 1168 return (-1); 1169 } 1170 } 1171 1172 int 1173 transform_cache_entry_part(struct cache_entry_ *entry, 1174 enum cache_transformation_t transformation, const char *key_part, 1175 size_t key_part_size, enum part_position_t part_position) 1176 { 1177 struct cache_common_entry_ *common_entry; 1178 struct cache_ht_item_ *ht_item; 1179 struct cache_ht_item_data_ *ht_item_data, ht_key; 1180 1181 struct cache_policy_item_ *item, *connected_item; 1182 1183 TRACE_IN(transform_cache_entry_part); 1184 if (entry->params->entry_type != CET_COMMON) { 1185 TRACE_OUT(transform_cache_entry_part); 1186 return (-1); 1187 } 1188 1189 if (transformation != CTT_CLEAR) { 1190 TRACE_OUT(transform_cache_entry_part); 1191 return (-1); 1192 } 1193 1194 memset(&ht_key, 0, sizeof(struct cache_ht_item_data_)); 1195 ht_key.key = (char *)key_part; /* can't avoid casting here */ 1196 ht_key.key_size = key_part_size; 1197 1198 common_entry = (struct cache_common_entry_ *)entry; 1199 HASHTABLE_FOREACH(&(common_entry->items), ht_item) { 1200 do { 1201 ht_item_data = HASHTABLE_ENTRY_FIND_SPECIAL(cache_ht_, 1202 ht_item, &ht_key, 1203 ht_items_fixed_size_left_cmp_func); 1204 1205 if (ht_item_data != NULL) { 1206 item = ht_item_data->fifo_policy_item; 1207 connected_item = item->connected_item; 1208 1209 common_entry->policies[0]->remove_item_func( 1210 common_entry->policies[0], 1211 item); 1212 1213 free(ht_item_data->key); 1214 free(ht_item_data->value); 1215 HASHTABLE_ENTRY_REMOVE(cache_ht_, ht_item, 1216 ht_item_data); 1217 --common_entry->items_size; 1218 1219 common_entry->policies[0]->destroy_item_func( 1220 item); 1221 if (common_entry->policies_size == 2) { 1222 common_entry->policies[1]->remove_item_func( 1223 common_entry->policies[1], 1224 connected_item); 1225 common_entry->policies[1]->destroy_item_func( 1226 connected_item); 1227 } 1228 } 1229 } while (ht_item_data != NULL); 1230 } 1231 1232 TRACE_OUT(transform_cache_entry_part); 1233 return (0); 1234 } 1235