1 #include <sys/zfs_events.h> 2 #include <sys/zev_checksums.h> 3 #include <sys/fs/zev.h> 4 #include <sys/zfs_znode.h> 5 #include <sys/sha1.h> 6 #include <sys/avl.h> 7 #include <sys/sysmacros.h> 8 #include <sys/fs/zev.h> 9 10 typedef struct zev_sig_cache_chksums_t { 11 /* begin of key */ 12 uint64_t offset_l1; 13 /* end of key */ 14 avl_node_t avl_node; 15 uint8_t sigs[ZEV_L1_SIZE/ZEV_L0_SIZE][SHA1_DIGEST_LENGTH]; 16 } zev_sig_cache_chksums_t; 17 18 typedef struct zev_sig_cache_file_t { 19 /* begin of key */ 20 uint64_t guid; 21 uint64_t ino; 22 uint64_t gen; 23 /* end of key */ 24 uint32_t refcnt; 25 struct zev_sig_cache_file_t *lru_prev; 26 struct zev_sig_cache_file_t *lru_next; 27 avl_node_t avl_node; 28 avl_tree_t chksums; 29 } zev_sig_cache_file_t; 30 31 typedef struct zev_sig_cache_t { 32 kmutex_t mutex; 33 uint64_t cache_size; 34 uint64_t max_cache_size; 35 uint64_t hits; 36 uint64_t misses; 37 struct zev_sig_cache_file_t *lru_head; 38 struct zev_sig_cache_file_t *lru_tail; 39 avl_tree_t files; 40 } zev_sig_cache_t; 41 42 extern offset_t zfs_read_chunk_size; /* tuneable from zfs_vnops.c */ 43 44 static uint8_t all_zero_sig[SHA1_DIGEST_LENGTH] = { 45 0x1c, 0xea, 0xf7, 0x3d, 0xf4, 0x0e, 0x53, 0x1d, 0xf3, 0xbf, 46 0xb2, 0x6b, 0x4f, 0xb7, 0xcd, 0x95, 0xfb, 0x7b, 0xff, 0x1d 47 }; 48 49 static uint8_t unknown_sig[SHA1_DIGEST_LENGTH] = { 50 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 51 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 52 }; 53 54 static zev_sig_cache_t zev_sig_cache; 55 56 static int 57 zev_cache_file_cmp(const void *entry_a, const void *entry_b) 58 { 59 const zev_sig_cache_file_t *a = entry_a; 60 const zev_sig_cache_file_t *b = entry_b; 61 62 if (a->guid < b->guid) 63 return -1; 64 if (a->guid > b->guid) 65 return 1; 66 if (a->ino < b->ino) 67 return -1; 68 if (a->ino > b->ino) 69 return 1; 70 if (a->gen < b->gen) 71 return -1; 72 if (a->gen > b->gen) 73 return 1; 74 return 0; 75 } 76 77 static int 78 zev_chksum_cache_cmp(const void *entry_a, const void *entry_b) 79 { 80 const zev_sig_cache_chksums_t *a = entry_a; 81 const zev_sig_cache_chksums_t *b = entry_b; 82 83 if (a->offset_l1 < b->offset_l1) 84 return -1; 85 if (a->offset_l1 > b->offset_l1) 86 return 1; 87 return 0; 88 } 89 90 /* must be called with zev_sig_cache.mutex held */ 91 static void 92 zev_chksum_cache_file_free(zev_sig_cache_file_t *file) 93 { 94 zev_sig_cache_chksums_t *cs; 95 void *c = NULL; /* cookie */ 96 97 /* remove from lru list */ 98 if (!file->lru_prev) { 99 zev_sig_cache.lru_head = file->lru_next; 100 } else { 101 file->lru_prev->lru_next = file->lru_next; 102 } 103 if (!file->lru_next) { 104 zev_sig_cache.lru_tail = file->lru_prev; 105 } else { 106 file->lru_next->lru_prev = file->lru_prev; 107 } 108 /* free resources */ 109 avl_remove(&zev_sig_cache.files, file); 110 while ((cs = avl_destroy_nodes(&file->chksums, &c)) != NULL) { 111 zev_sig_cache.cache_size -= sizeof(*cs); 112 zev_free(cs, sizeof(*cs)); 113 } 114 avl_destroy(&file->chksums); 115 zev_free(file, sizeof(*file)); 116 zev_sig_cache.cache_size -= sizeof(*file); 117 } 118 119 void 120 zev_chksum_init(void) 121 { 122 memset(&zev_sig_cache, 0, sizeof(zev_sig_cache)); 123 mutex_init(&zev_sig_cache.mutex, NULL, MUTEX_DRIVER, NULL); 124 avl_create(&zev_sig_cache.files, zev_cache_file_cmp, 125 sizeof(zev_sig_cache_file_t), 126 offsetof(zev_sig_cache_file_t, avl_node)); 127 zev_sig_cache.max_cache_size = ZEV_CHKSUM_DEFAULT_CACHE_SIZE; 128 } 129 130 void 131 zev_chksum_fini(void) 132 { 133 zev_sig_cache_file_t *file; 134 135 mutex_destroy(&zev_sig_cache.mutex); 136 while ((file = avl_first(&zev_sig_cache.files)) != NULL) 137 zev_chksum_cache_file_free(file); 138 avl_destroy(&zev_sig_cache.files); 139 } 140 141 static zev_sig_cache_file_t * 142 zev_chksum_cache_file_get_and_hold(znode_t *zp) 143 { 144 zev_sig_cache_file_t find_file; 145 zev_sig_cache_file_t *file; 146 avl_index_t where; 147 148 find_file.guid = zp->z_zfsvfs->z_os->os_dsl_dataset->ds_phys->ds_guid; 149 find_file.ino = zp->z_id; 150 find_file.gen = zp->z_gen; 151 152 mutex_enter(&zev_sig_cache.mutex); 153 file = avl_find(&zev_sig_cache.files, &find_file, &where); 154 if (!file) { 155 file = zev_alloc(sizeof(*file)); 156 file->guid = 157 zp->z_zfsvfs->z_os->os_dsl_dataset->ds_phys->ds_guid; 158 file->ino = zp->z_id; 159 file->gen = zp->z_gen; 160 file->refcnt = 0; 161 avl_create(&file->chksums, zev_chksum_cache_cmp, 162 sizeof(zev_sig_cache_chksums_t), 163 offsetof(zev_sig_cache_chksums_t, avl_node)); 164 file->lru_prev = NULL; 165 file->lru_next = zev_sig_cache.lru_head; 166 if (zev_sig_cache.lru_head) 167 zev_sig_cache.lru_head->lru_prev = file; 168 if (!zev_sig_cache.lru_tail) 169 zev_sig_cache.lru_tail = file; 170 zev_sig_cache.lru_head = file; 171 avl_insert(&zev_sig_cache.files, file, where); 172 zev_sig_cache.cache_size += sizeof(*file); 173 } 174 file->refcnt++; 175 mutex_exit(&zev_sig_cache.mutex); 176 return file; 177 } 178 179 static void 180 zev_chksum_cache_file_release(zev_sig_cache_file_t *file) 181 { 182 mutex_enter(&zev_sig_cache.mutex); 183 184 /* We don't invalidate/free/destroy *file. Cache expiry does that */ 185 file->refcnt--; 186 187 /* Move file to front of lru list */ 188 if (file->lru_prev) { 189 /* am not already the head -> move me to front. */ 190 file->lru_prev->lru_next = file->lru_next; 191 if (file->lru_next) 192 file->lru_next->lru_prev = file->lru_prev; 193 zev_sig_cache.lru_head->lru_prev = file; 194 file->lru_next = zev_sig_cache.lru_head; 195 file->lru_prev = NULL; 196 zev_sig_cache.lru_head = file; 197 } 198 199 mutex_exit(&zev_sig_cache.mutex); 200 } 201 202 static zev_sig_cache_chksums_t * 203 zev_chksum_cache_get_lv1_entry(zev_sig_cache_file_t *file, uint64_t off_l1) 204 { 205 zev_sig_cache_chksums_t find_chksum; 206 zev_sig_cache_chksums_t *cs; 207 avl_index_t where; 208 209 find_chksum.offset_l1 = off_l1; 210 cs = avl_find(&file->chksums, &find_chksum, &where); 211 if (!cs) { 212 cs = zev_zalloc(sizeof(*cs)); 213 cs->offset_l1 = off_l1; 214 avl_insert(&file->chksums, cs, where); 215 zev_sig_cache.cache_size += sizeof(*cs); 216 } 217 return cs; 218 } 219 220 void 221 zev_chksum_stats(uint64_t *c_size, uint64_t *c_hits, uint64_t *c_misses) 222 { 223 mutex_enter(&zev_sig_cache.mutex); 224 *c_size = zev_sig_cache.cache_size; 225 *c_hits = zev_sig_cache.hits; 226 *c_misses = zev_sig_cache.misses; 227 mutex_exit(&zev_sig_cache.mutex); 228 } 229 230 static void 231 zev_chksum_cache_invalidate(zev_sig_cache_file_t *file, 232 znode_t *zp, 233 zev_chksum_mode_t mode, 234 uint64_t off, 235 uint64_t len) 236 { 237 zev_sig_cache_chksums_t find_chksum; 238 zev_sig_cache_chksums_t *cs; 239 int idx; 240 uint64_t off_l1; 241 uint64_t len_l1; 242 uint64_t pos_l0; 243 uint64_t pos_l1; 244 245 mutex_enter(&zev_sig_cache.mutex); 246 247 /* start of this megabyte */ 248 off_l1 = P2ALIGN(off, ZEV_L1_SIZE); 249 250 if (len == 0) { 251 /* truncate() to EOF */ 252 len_l1 = ZEV_L1_SIZE; 253 } else { 254 /* full megabytes */ 255 len_l1 = len + (off - off_l1); 256 len_l1 = P2ROUNDUP(len_l1, ZEV_L1_SIZE); 257 } 258 259 for (pos_l1 = off_l1; pos_l1 < (off_l1+len_l1); pos_l1 += ZEV_L1_SIZE) { 260 261 find_chksum.offset_l1 = pos_l1; 262 cs = avl_find(&file->chksums, &find_chksum, NULL); 263 if (!cs) 264 continue; 265 266 for (pos_l0 = MAX(pos_l1, P2ALIGN(off, ZEV_L0_SIZE)); 267 pos_l0 < (pos_l1 + ZEV_L1_SIZE); 268 pos_l0 += ZEV_L0_SIZE){ 269 270 if ((len > 0) && (pos_l0 >= (off + len - 1))) 271 break; 272 273 idx = (pos_l0 % ZEV_L1_SIZE) / ZEV_L0_SIZE; 274 memcpy(cs->sigs[idx], unknown_sig, SHA1_DIGEST_LENGTH); 275 } 276 } 277 278 if (len == 0) { 279 /* truncate() to EOF -> invalidate all l1 sigs beyond EOF */ 280 while ((cs = avl_last(&file->chksums)) != NULL) { 281 if (cs->offset_l1 < zp->z_size) 282 break; 283 avl_remove(&file->chksums, cs); 284 zev_sig_cache.cache_size -= sizeof(*cs); 285 zev_free(cs, sizeof(*cs)); 286 } 287 } 288 289 mutex_exit(&zev_sig_cache.mutex); 290 } 291 292 static int 293 zev_chksum_cache_get(uint8_t *dst, 294 zev_sig_cache_file_t *file, 295 zev_sig_cache_chksums_t *cs, 296 uint64_t off_l0) 297 { 298 int idx; 299 300 mutex_enter(&zev_sig_cache.mutex); 301 302 idx = (off_l0 % ZEV_L1_SIZE) / ZEV_L0_SIZE; 303 if (!memcmp(cs->sigs[idx], unknown_sig, SHA1_DIGEST_LENGTH)) { 304 zev_sig_cache.misses++; 305 mutex_exit(&zev_sig_cache.mutex); 306 return ENOENT; 307 } 308 memcpy(dst, cs->sigs[idx], SHA1_DIGEST_LENGTH); 309 zev_sig_cache.hits++; 310 311 mutex_exit(&zev_sig_cache.mutex); 312 return 0; 313 } 314 315 static void 316 zev_chksum_cache_put(uint8_t *sig, 317 zev_sig_cache_file_t *file, 318 zev_sig_cache_chksums_t *cs, 319 uint64_t off_l0) 320 { 321 zev_sig_cache_file_t *f; 322 int idx; 323 324 mutex_enter(&zev_sig_cache.mutex); 325 326 if (zev_sig_cache.max_cache_size == 0) { 327 /* cache disabled */ 328 mutex_exit(&zev_sig_cache.mutex); 329 return; 330 } 331 332 /* expire entries until there's room in the cache */ 333 for (f = zev_sig_cache.lru_tail; 334 f && (zev_sig_cache.cache_size > zev_sig_cache.max_cache_size); 335 f = f->lru_prev) { 336 if (f->refcnt == 0) 337 zev_chksum_cache_file_free(f); 338 } 339 340 idx = (off_l0 % ZEV_L1_SIZE) / ZEV_L0_SIZE; 341 memcpy(cs->sigs[idx], sig, SHA1_DIGEST_LENGTH); 342 343 mutex_exit(&zev_sig_cache.mutex); 344 return; 345 } 346 347 /* verbatim from zfs_vnops.c (unfortunatly it's declared static, there) */ 348 static int 349 mappedread(vnode_t *vp, int nbytes, uio_t *uio) 350 { 351 znode_t *zp = VTOZ(vp); 352 objset_t *os = zp->z_zfsvfs->z_os; 353 int64_t start, off; 354 int len = nbytes; 355 int error = 0; 356 357 start = uio->uio_loffset; 358 off = start & PAGEOFFSET; 359 for (start &= PAGEMASK; len > 0; start += PAGESIZE) { 360 page_t *pp; 361 uint64_t bytes = MIN(PAGESIZE - off, len); 362 363 if (pp = page_lookup(vp, start, SE_SHARED)) { 364 caddr_t va; 365 366 va = zfs_map_page(pp, S_READ); 367 error = uiomove(va + off, bytes, UIO_READ, uio); 368 zfs_unmap_page(pp, va); 369 page_unlock(pp); 370 } else { 371 error = dmu_read_uio(os, zp->z_id, uio, bytes); 372 } 373 len -= bytes; 374 off = 0; 375 if (error) 376 break; 377 } 378 return (error); 379 } 380 381 static int 382 zev_safe_read(znode_t *zp, char *buf, uint64_t off, uint64_t len) 383 { 384 uio_t uio; 385 struct iovec iov; 386 ssize_t n; 387 ssize_t nbytes; 388 int error = 0; 389 vnode_t *vp = ZTOV(zp); 390 objset_t *os = zp->z_zfsvfs->z_os; 391 392 /* set up uio */ 393 394 iov.iov_base = buf; 395 iov.iov_len = ZEV_L0_SIZE; 396 397 uio.uio_iov = &iov; 398 uio.uio_iovcnt = 1; 399 uio.uio_segflg = (short)UIO_SYSSPACE; 400 uio.uio_llimit = RLIM64_INFINITY; 401 uio.uio_fmode = FREAD; 402 uio.uio_extflg = UIO_COPY_DEFAULT; 403 404 uio.uio_loffset = off; 405 uio.uio_resid = len; 406 407 again: 408 if (uio.uio_loffset >= zp->z_size) 409 return EINVAL; 410 411 /* don't read past EOF */ 412 n = MIN(uio.uio_resid, zp->z_size - uio.uio_loffset); 413 414 /* this block was essentially copied from zfs_read() in zfs_vnops.c */ 415 while (n > 0) { 416 nbytes = MIN(n, zfs_read_chunk_size - 417 P2PHASE(uio.uio_loffset, zfs_read_chunk_size)); 418 419 if (vn_has_cached_data(vp)) { 420 error = mappedread(vp, nbytes, &uio); 421 } else { 422 error = dmu_read_uio(os, zp->z_id, &uio, nbytes); 423 } 424 if (error) { 425 if (error = EINTR) 426 goto again; 427 /* convert checksum errors into IO errors */ 428 if (error == ECKSUM) 429 error = SET_ERROR(EIO); 430 break; 431 } 432 433 n -= nbytes; 434 } 435 436 if (error) 437 return error; 438 return len - uio.uio_resid; 439 } 440 441 static void 442 zev_l0_sig(uint8_t *sig, char *buf) 443 { 444 SHA1_CTX ctx; 445 446 SHA1Init(&ctx); 447 SHA1Update(&ctx, buf, ZEV_L0_SIZE); 448 SHA1Final(sig, &ctx); 449 return; 450 } 451 452 static void 453 zev_l0_blocksig(uint8_t *blk_sig, uint8_t *l0_sig, uint8_t block_no) 454 { 455 SHA1_CTX ctx; 456 457 SHA1Init(&ctx); 458 SHA1Update(&ctx, l0_sig, SHA1_DIGEST_LENGTH); 459 SHA1Update(&ctx, &block_no, sizeof(block_no)); 460 SHA1Final(blk_sig, &ctx); 461 return; 462 } 463 464 static void 465 zev_l1_add(uint8_t *sig_l1, uint8_t *sig_l0) 466 { 467 int i; 468 int s; 469 int carry = 0; 470 471 for (i = SHA1_DIGEST_LENGTH - 1; i >= 0; --i) { 472 s = sig_l1[i] + sig_l0[i] + carry; 473 carry = s > 255 ? 1 : 0; 474 sig_l1[i] = s & 0xff; 475 } 476 } 477 478 static void 479 zev_get_result_buffer(zev_sig_t **buffer, 480 uint64_t *buffer_len, 481 znode_t *zp, 482 uint64_t off, 483 uint64_t len, 484 zev_chksum_mode_t mode) 485 { 486 uint64_t blk_start; 487 uint64_t blk_end; 488 uint64_t l0_blocks; 489 uint64_t l1_blocks; 490 uint64_t sigs; 491 int buflen; 492 493 /* calculate result set size: how many checksums will we provide? */ 494 495 ASSERT(len > 0 || (mode == zev_truncate && len == 0)); 496 497 if (len == 0) { 498 /* truncate */ 499 l0_blocks = ((off % ZEV_L0_SIZE) == 0) ? 0 : 1; 500 l1_blocks = ((off % ZEV_L1_SIZE) == 0) ? 0 : 1; 501 } else { 502 /* how many lv1 checksums do we update? */ 503 blk_start = off / ZEV_L1_SIZE; 504 blk_end = (off + len - 1) / ZEV_L1_SIZE; 505 l1_blocks = blk_end - blk_start + 1; 506 /* how many lv0 checksums do we update? */ 507 blk_start = off / ZEV_L0_SIZE; 508 blk_end = (off + len - 1) / ZEV_L0_SIZE; 509 l0_blocks = blk_end - blk_start + 1; 510 } 511 512 sigs = l1_blocks + l0_blocks; 513 if (sigs == 0) { 514 *buffer = NULL; 515 *buffer_len = 0; 516 return; 517 } 518 519 buflen = sigs * sizeof(zev_sig_t); 520 *buffer_len = buflen; 521 *buffer = zev_alloc(buflen); 522 } 523 524 static void 525 zev_append_sig(zev_sig_t *s, int level, uint64_t off, uint8_t *sig) 526 { 527 s->level = level; 528 s->block_offset = off; 529 memcpy(s->value, sig, SHA1_DIGEST_LENGTH); 530 } 531 532 /* 533 * Calculate all l0 and l1 checksums that are affected by the given range. 534 * 535 * This function assumes that the ranges it needs to read are already 536 * range-locked. 537 */ 538 int 539 zev_get_checksums(zev_sig_t **result, 540 uint64_t *result_buf_len, 541 uint64_t *signature_cnt, 542 znode_t *zp, 543 uint64_t off, 544 uint64_t len, 545 zev_chksum_mode_t mode) 546 { 547 uint64_t off_l1; 548 uint64_t len_l1; 549 uint64_t pos_l1; 550 uint64_t pos_l0; 551 char *buf; 552 int64_t ret; 553 uint8_t sig_l0[SHA1_DIGEST_LENGTH]; 554 uint8_t blk_sig_l0[SHA1_DIGEST_LENGTH]; 555 uint8_t sig_l1[SHA1_DIGEST_LENGTH]; 556 uint8_t l0_block_no; 557 zev_sig_t *sig; 558 int non_empty_l0_blocks; 559 zev_sig_cache_file_t *file; 560 zev_sig_cache_chksums_t *cs; 561 562 /* 563 * Note: for write events, the callback is called via 564 * zfs_write() -> zfs_log_write() -> zev_znode_write_cb() 565 * 566 * The transaction is not commited, yet. 567 * 568 * A write() syscall might be split into smaller chunks by zfs_write() 569 * 570 * zfs_write() has a range lock when this is called. (zfs_vnops.c:925) 571 * In zev mode, the range lock will encompass all data we need 572 * to calculate our checksums. 573 * 574 * The same is true for truncates with non-zero length. ("punch hole") 575 */ 576 577 ASSERT(len > 0 || (mode == zev_truncate && len == 0)); 578 *signature_cnt = 0; 579 580 /* start of this megabyte */ 581 off_l1 = P2ALIGN(off, ZEV_L1_SIZE); 582 /* full megabytes */ 583 if (len == 0) { 584 /* truncate(): we'll look at the last lv1 block, only. */ 585 len_l1 = ZEV_L1_SIZE; 586 } else { 587 len_l1 = len + (off - off_l1); 588 len_l1 = P2ROUNDUP(len_l1, ZEV_L1_SIZE); 589 } 590 591 file = zev_chksum_cache_file_get_and_hold(zp); 592 zev_chksum_cache_invalidate(file, zp, mode, off, len); 593 buf = zev_alloc(ZEV_L0_SIZE); 594 595 zev_get_result_buffer(result, result_buf_len, zp, off, len, mode); 596 if (*result == NULL) { 597 /* we're done */ 598 zev_free(buf, ZEV_L0_SIZE); 599 zev_chksum_cache_file_release(file); 600 return 0; 601 } 602 sig = *result; 603 604 for (pos_l1 = off_l1; pos_l1 < (off_l1+len_l1); pos_l1 += ZEV_L1_SIZE) { 605 606 if (pos_l1 > zp->z_size) { 607 cmn_err(CE_WARN, "zev_get_checksums: off+len beyond " 608 "EOF. Unexpected behaviour; please fix!"); 609 break; 610 } 611 612 /* 613 * Since we have a reference to 'file' 'cs' can't be expired. 614 * Since our ranges are range locked, other threads woun't 615 * touch our checksum entries. (not even read them) 616 * Hence, we don't need to hold() or release() 'cs'. 617 */ 618 cs = zev_chksum_cache_get_lv1_entry(file, pos_l1); 619 620 l0_block_no = 0; 621 non_empty_l0_blocks = 0; 622 bzero(sig_l1, sizeof(sig_l1)); 623 for (pos_l0 = pos_l1; 624 pos_l0 < (pos_l1 + ZEV_L1_SIZE); 625 pos_l0 += ZEV_L0_SIZE){ 626 627 if (pos_l0 >= zp->z_size) 628 break; /* EOF */ 629 630 if (zev_chksum_cache_get(sig_l0, file,cs,pos_l0) != 0) { 631 632 /* signature is not cached, yet. */ 633 ret = zev_safe_read(zp, buf, 634 pos_l0, ZEV_L0_SIZE); 635 if (ret < 0) { 636 zev_free(*result, *result_buf_len); 637 zev_free(buf, ZEV_L0_SIZE); 638 zev_chksum_cache_file_release(file); 639 return ret; 640 } 641 /* pad buffer with zeros if necessary */ 642 if (ret < ZEV_L0_SIZE) 643 bzero(buf + ret, ZEV_L0_SIZE - ret); 644 645 /* calculate signature */ 646 zev_l0_sig(sig_l0, buf); 647 648 zev_chksum_cache_put(sig_l0, file, cs, pos_l0); 649 } 650 651 if (!memcmp(sig_l0, all_zero_sig, SHA1_DIGEST_LENGTH)) { 652 /* all-zero l0 block. omit signature. */ 653 l0_block_no++; 654 continue; 655 } 656 non_empty_l0_blocks++; 657 zev_l0_blocksig(blk_sig_l0, sig_l0, l0_block_no); 658 zev_l1_add(sig_l1, blk_sig_l0); 659 660 if (((pos_l0 + ZEV_L0_SIZE - 1) >= off) && 661 (pos_l0 < (off + len - 1))) { 662 zev_append_sig(sig++, 0, pos_l0, sig_l0); 663 } 664 665 l0_block_no++; 666 } 667 668 if (non_empty_l0_blocks && (zp->z_size > ZEV_L0_SIZE)) 669 zev_append_sig(sig++, 1, pos_l1, sig_l1); 670 } 671 672 *signature_cnt = ((char *)sig - (char *)*result) / sizeof(zev_sig_t); 673 674 zev_free(buf, ZEV_L0_SIZE); 675 zev_chksum_cache_file_release(file); 676 return 0; 677 } 678