xref: /titanic_52/usr/src/uts/common/fs/zev/zev_checksums.c (revision b9710123bf6fc26197428ae089042fb387a55032)
1 #include <sys/zfs_events.h>
2 #include <sys/zev_checksums.h>
3 #include <sys/fs/zev.h>
4 #include <sys/zfs_znode.h>
5 #include <sys/sha1.h>
6 #include <sys/avl.h>
7 #include <sys/sysmacros.h>
8 #include <sys/fs/zev.h>
9 #include <sys/zfs_rlock.h>
10 
11 typedef struct zev_sig_cache_chksums_t {
12 	/* begin of key */
13 	uint64_t			offset_l1;
14 	/* end of key */
15 	avl_node_t			avl_node;
16 	uint8_t		sigs[ZEV_L1_SIZE/ZEV_L0_SIZE][SHA1_DIGEST_LENGTH];
17 } zev_sig_cache_chksums_t;
18 
19 typedef struct zev_sig_cache_file_t {
20 	/* begin of key */
21 	uint64_t			guid;
22 	uint64_t			ino;
23 	uint64_t			gen;
24 	/* end of key */
25 	uint32_t			refcnt;
26 	struct zev_sig_cache_file_t 	*lru_prev;
27 	struct zev_sig_cache_file_t 	*lru_next;
28 	avl_node_t			avl_node;
29 	avl_tree_t			chksums;
30 } zev_sig_cache_file_t;
31 
32 typedef struct zev_sig_cache_t {
33 	kmutex_t			mutex;
34 	uint64_t			cache_size;
35 	uint64_t			max_cache_size;
36 	uint64_t			hits;
37 	uint64_t			misses;
38 	struct zev_sig_cache_file_t	*lru_head;
39 	struct zev_sig_cache_file_t	*lru_tail;
40 	avl_tree_t			files;
41 } zev_sig_cache_t;
42 
43 extern offset_t zfs_read_chunk_size;	/* tuneable from zfs_vnops.c */
44 
45 static uint8_t all_zero_sig[SHA1_DIGEST_LENGTH] = {
46 	0x1c, 0xea, 0xf7, 0x3d, 0xf4, 0x0e, 0x53, 0x1d, 0xf3, 0xbf,
47 	0xb2, 0x6b, 0x4f, 0xb7, 0xcd, 0x95, 0xfb, 0x7b, 0xff, 0x1d
48 };
49 
50 static uint8_t unknown_sig[SHA1_DIGEST_LENGTH] = {
51 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
52 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
53 };
54 
55 static zev_sig_cache_t	zev_sig_cache;
56 
57 static int
58 zev_cache_file_cmp(const void *entry_a, const void *entry_b)
59 {
60 	const zev_sig_cache_file_t *a = entry_a;
61 	const zev_sig_cache_file_t *b = entry_b;
62 
63 	if (a->guid < b->guid)
64 		return -1;
65 	if (a->guid > b->guid)
66 		return 1;
67 	if (a->ino < b->ino)
68 		return -1;
69 	if (a->ino > b->ino)
70 		return 1;
71 	if (a->gen < b->gen)
72 		return -1;
73 	if (a->gen > b->gen)
74 		return 1;
75 	return 0;
76 }
77 
78 static int
79 zev_chksum_cache_cmp(const void *entry_a, const void *entry_b)
80 {
81 	const zev_sig_cache_chksums_t *a = entry_a;
82 	const zev_sig_cache_chksums_t *b = entry_b;
83 
84 	if (a->offset_l1 < b->offset_l1)
85 		return -1;
86 	if (a->offset_l1 > b->offset_l1)
87 		return 1;
88 	return 0;
89 }
90 
91 /* must be called with zev_sig_cache.mutex held */
92 static void
93 zev_chksum_cache_file_free(zev_sig_cache_file_t *file)
94 {
95 	zev_sig_cache_chksums_t *cs;
96 	void *c = NULL; /* cookie */
97 
98 	/* remove from lru list */
99 	if (!file->lru_prev) {
100 		zev_sig_cache.lru_head = file->lru_next;
101 	} else {
102 		file->lru_prev->lru_next = file->lru_next;
103 	}
104 	if (!file->lru_next) {
105 		zev_sig_cache.lru_tail = file->lru_prev;
106 	} else {
107 		file->lru_next->lru_prev = file->lru_prev;
108 	}
109 	/* free resources */
110 	avl_remove(&zev_sig_cache.files, file);
111 	while ((cs = avl_destroy_nodes(&file->chksums, &c)) != NULL) {
112 		zev_sig_cache.cache_size -= sizeof(*cs);
113 		zev_free(cs, sizeof(*cs));
114 	}
115 	avl_destroy(&file->chksums);
116 	zev_free(file, sizeof(*file));
117 	zev_sig_cache.cache_size -= sizeof(*file);
118 }
119 
120 void
121 zev_chksum_init(void)
122 {
123 	memset(&zev_sig_cache, 0, sizeof(zev_sig_cache));
124 	mutex_init(&zev_sig_cache.mutex, NULL, MUTEX_DRIVER, NULL);
125 	avl_create(&zev_sig_cache.files, zev_cache_file_cmp,
126 	           sizeof(zev_sig_cache_file_t),
127 	           offsetof(zev_sig_cache_file_t, avl_node));
128 	zev_sig_cache.max_cache_size = ZEV_CHKSUM_DEFAULT_CACHE_SIZE;
129 }
130 
131 void
132 zev_chksum_fini(void)
133 {
134 	zev_sig_cache_file_t *file;
135 
136 	mutex_destroy(&zev_sig_cache.mutex);
137 	while ((file = avl_first(&zev_sig_cache.files)) != NULL)
138 		zev_chksum_cache_file_free(file);
139 	avl_destroy(&zev_sig_cache.files);
140 }
141 
142 static zev_sig_cache_file_t *
143 zev_chksum_cache_file_get_and_hold(znode_t *zp)
144 {
145 	zev_sig_cache_file_t find_file;
146 	zev_sig_cache_file_t *file;
147 	avl_index_t where;
148 
149 	find_file.guid = zp->z_zfsvfs->z_os->os_dsl_dataset->ds_phys->ds_guid;
150 	find_file.ino = zp->z_id;
151 	find_file.gen = zp->z_gen;
152 
153 	mutex_enter(&zev_sig_cache.mutex);
154 	file = avl_find(&zev_sig_cache.files, &find_file, &where);
155 	if (!file) {
156 		file = zev_alloc(sizeof(*file));
157 		file->guid =
158 		    zp->z_zfsvfs->z_os->os_dsl_dataset->ds_phys->ds_guid;
159 		file->ino = zp->z_id;
160 		file->gen = zp->z_gen;
161 		file->refcnt = 0;
162 		avl_create(&file->chksums, zev_chksum_cache_cmp,
163 		           sizeof(zev_sig_cache_chksums_t),
164 		           offsetof(zev_sig_cache_chksums_t, avl_node));
165 		file->lru_prev = NULL;
166 		file->lru_next = zev_sig_cache.lru_head;
167 		if (zev_sig_cache.lru_head)
168 			zev_sig_cache.lru_head->lru_prev = file;
169 		if (!zev_sig_cache.lru_tail)
170 			zev_sig_cache.lru_tail = file;
171 		zev_sig_cache.lru_head = file;
172 		avl_insert(&zev_sig_cache.files, file, where);
173 		zev_sig_cache.cache_size += sizeof(*file);
174 	}
175 	file->refcnt++;
176 	mutex_exit(&zev_sig_cache.mutex);
177 	return file;
178 }
179 
180 static void
181 zev_chksum_cache_file_release(zev_sig_cache_file_t *file)
182 {
183 	mutex_enter(&zev_sig_cache.mutex);
184 
185 	/* We don't invalidate/free/destroy *file. Cache expiry does that */
186 	file->refcnt--;
187 
188 	/* Move file to front of lru list */
189 	if (file->lru_prev) {
190 		/* am not already the head -> move me to front. */
191 		file->lru_prev->lru_next = file->lru_next;
192 		if (file->lru_next)
193 			file->lru_next->lru_prev = file->lru_prev;
194 		zev_sig_cache.lru_head->lru_prev = file;
195 		file->lru_next = zev_sig_cache.lru_head;
196 		file->lru_prev = NULL;
197 		zev_sig_cache.lru_head = file;
198 	}
199 
200 	mutex_exit(&zev_sig_cache.mutex);
201 }
202 
203 static  zev_sig_cache_chksums_t *
204 zev_chksum_cache_get_lv1_entry(zev_sig_cache_file_t *file, uint64_t off_l1)
205 {
206 	zev_sig_cache_chksums_t find_chksum;
207 	zev_sig_cache_chksums_t *cs;
208 	avl_index_t where;
209 
210 	find_chksum.offset_l1 = off_l1;
211 	cs = avl_find(&file->chksums, &find_chksum, &where);
212 	if (!cs) {
213 		cs = zev_zalloc(sizeof(*cs));
214 		cs->offset_l1 = off_l1;
215 		avl_insert(&file->chksums, cs, where);
216 		zev_sig_cache.cache_size += sizeof(*cs);
217 	}
218 	return cs;
219 }
220 
221 void
222 zev_chksum_stats(uint64_t *c_size, uint64_t *c_hits, uint64_t *c_misses)
223 {
224 	mutex_enter(&zev_sig_cache.mutex);
225 	*c_size = zev_sig_cache.cache_size;
226 	*c_hits = zev_sig_cache.hits;
227 	*c_misses = zev_sig_cache.misses;
228 	mutex_exit(&zev_sig_cache.mutex);
229 }
230 
231 static void
232 zev_chksum_cache_invalidate(zev_sig_cache_file_t *file,
233                             znode_t *zp,
234                             zev_chksum_mode_t mode,
235                             uint64_t off,
236                             uint64_t len)
237 {
238 	zev_sig_cache_chksums_t find_chksum;
239 	zev_sig_cache_chksums_t *cs;
240 	int idx;
241 	uint64_t off_l1;
242 	uint64_t len_l1;
243 	uint64_t pos_l0;
244 	uint64_t pos_l1;
245 
246 	mutex_enter(&zev_sig_cache.mutex);
247 
248 	/* start of this megabyte */
249 	off_l1 = P2ALIGN(off, ZEV_L1_SIZE);
250 
251 	if (len == 0) {
252 		/* truncate() to EOF */
253 		len_l1 = ZEV_L1_SIZE;
254 	} else {
255 		/* full megabytes */
256 		len_l1 = len + (off - off_l1);
257 		len_l1 = P2ROUNDUP(len_l1, ZEV_L1_SIZE);
258 	}
259 
260 	for (pos_l1 = off_l1; pos_l1 < (off_l1+len_l1); pos_l1 += ZEV_L1_SIZE) {
261 
262 		find_chksum.offset_l1 = pos_l1;
263 		cs = avl_find(&file->chksums, &find_chksum, NULL);
264 		if (!cs)
265 			continue;
266 
267 		for (pos_l0 = MAX(pos_l1, P2ALIGN(off, ZEV_L0_SIZE));
268 		     pos_l0 < (pos_l1 + ZEV_L1_SIZE);
269 		     pos_l0 += ZEV_L0_SIZE){
270 
271 			if ((len > 0) && (pos_l0 >= (off + len - 1)))
272 				break;
273 
274 			idx = (pos_l0 % ZEV_L1_SIZE) / ZEV_L0_SIZE;
275 			memcpy(cs->sigs[idx], unknown_sig, SHA1_DIGEST_LENGTH);
276 		}
277 	}
278 
279 	if (len == 0) {
280 		/* truncate() to EOF -> invalidate all l1 sigs beyond EOF */
281 		while ((cs = avl_last(&file->chksums)) != NULL) {
282 			if (cs->offset_l1 < zp->z_size)
283 				break;
284 			avl_remove(&file->chksums, cs);
285 			zev_sig_cache.cache_size -= sizeof(*cs);
286 			zev_free(cs, sizeof(*cs));
287 		}
288 	}
289 
290 	mutex_exit(&zev_sig_cache.mutex);
291 }
292 
293 static int
294 zev_chksum_cache_get(uint8_t *dst,
295                      zev_sig_cache_file_t *file,
296                      zev_sig_cache_chksums_t *cs,
297                      uint64_t off_l0)
298 {
299 	int idx;
300 
301 	mutex_enter(&zev_sig_cache.mutex);
302 
303 	idx = (off_l0 % ZEV_L1_SIZE) / ZEV_L0_SIZE;
304 	if (!memcmp(cs->sigs[idx], unknown_sig, SHA1_DIGEST_LENGTH)) {
305 		zev_sig_cache.misses++;
306 		mutex_exit(&zev_sig_cache.mutex);
307 		return ENOENT;
308 	}
309 	memcpy(dst, cs->sigs[idx], SHA1_DIGEST_LENGTH);
310 	zev_sig_cache.hits++;
311 
312 	mutex_exit(&zev_sig_cache.mutex);
313 	return 0;
314 }
315 
316 static void
317 zev_chksum_cache_put(uint8_t *sig,
318                      zev_sig_cache_file_t *file,
319                      zev_sig_cache_chksums_t *cs,
320                      uint64_t off_l0)
321 {
322 	zev_sig_cache_file_t *f;
323 	int idx;
324 
325 	mutex_enter(&zev_sig_cache.mutex);
326 
327 	if (zev_sig_cache.max_cache_size == 0) {
328 		/* cache disabled */
329 		mutex_exit(&zev_sig_cache.mutex);
330 		return;
331 	}
332 
333 	/* expire entries until there's room in the cache */
334 	for (f = zev_sig_cache.lru_tail;
335 	     f && (zev_sig_cache.cache_size > zev_sig_cache.max_cache_size);
336 	     f = f->lru_prev) {
337 		if (f->refcnt == 0)
338 			zev_chksum_cache_file_free(f);
339 	}
340 
341 	idx = (off_l0 % ZEV_L1_SIZE) / ZEV_L0_SIZE;
342 	memcpy(cs->sigs[idx], sig, SHA1_DIGEST_LENGTH);
343 
344 	mutex_exit(&zev_sig_cache.mutex);
345 	return;
346 }
347 
348 /* verbatim from zfs_vnops.c (unfortunatly it's declared static, there) */
349 static int
350 mappedread(vnode_t *vp, int nbytes, uio_t *uio)
351 {
352 	znode_t *zp = VTOZ(vp);
353 	objset_t *os = zp->z_zfsvfs->z_os;
354 	int64_t	start, off;
355 	int len = nbytes;
356 	int error = 0;
357 
358 	start = uio->uio_loffset;
359 	off = start & PAGEOFFSET;
360 	for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
361 		page_t *pp;
362 		uint64_t bytes = MIN(PAGESIZE - off, len);
363 
364 		if (pp = page_lookup(vp, start, SE_SHARED)) {
365 			caddr_t va;
366 
367 			va = zfs_map_page(pp, S_READ);
368 			error = uiomove(va + off, bytes, UIO_READ, uio);
369 			zfs_unmap_page(pp, va);
370 			page_unlock(pp);
371 		} else {
372 			error = dmu_read_uio(os, zp->z_id, uio, bytes);
373 		}
374 		len -= bytes;
375 		off = 0;
376 		if (error)
377 			break;
378 	}
379 	return (error);
380 }
381 
382 static int
383 zev_safe_read(znode_t *zp, char *buf, uint64_t off, uint64_t len)
384 {
385 	uio_t		uio;
386 	struct iovec	iov;
387 	ssize_t		n;
388 	ssize_t		nbytes;
389 	int		error = 0;
390 	vnode_t		*vp = ZTOV(zp);
391 	objset_t	*os = zp->z_zfsvfs->z_os;
392 
393 	/* set up uio */
394 
395 	iov.iov_base = buf;
396 	iov.iov_len = ZEV_L0_SIZE;
397 
398 	uio.uio_iov = &iov;
399 	uio.uio_iovcnt = 1;
400 	uio.uio_segflg = (short)UIO_SYSSPACE;
401 	uio.uio_llimit = RLIM64_INFINITY;
402 	uio.uio_fmode = FREAD;
403 	uio.uio_extflg = UIO_COPY_DEFAULT;
404 
405 	uio.uio_loffset = off;
406 	uio.uio_resid = len;
407 
408 again:
409 	if (uio.uio_loffset >= zp->z_size)
410 		return EINVAL;
411 
412 	/* don't read past EOF */
413 	n = MIN(uio.uio_resid, zp->z_size - uio.uio_loffset);
414 
415 	/* this block was essentially copied from zfs_read() in zfs_vnops.c */
416 	while (n > 0) {
417 		nbytes = MIN(n, zfs_read_chunk_size -
418 		    P2PHASE(uio.uio_loffset, zfs_read_chunk_size));
419 
420 		if (vn_has_cached_data(vp)) {
421 			error = mappedread(vp, nbytes, &uio);
422 		} else {
423 			error = dmu_read_uio(os, zp->z_id, &uio, nbytes);
424 		}
425 		if (error) {
426 			if (error = EINTR)
427 				goto again;
428 			/* convert checksum errors into IO errors */
429 			if (error == ECKSUM)
430 				error = SET_ERROR(EIO);
431 			break;
432 		}
433 
434 		n -= nbytes;
435 	}
436 
437 	if (error)
438 		return error;
439 	return len - uio.uio_resid;
440 }
441 
442 static void
443 zev_l0_sig(uint8_t *sig, char *buf)
444 {
445 	SHA1_CTX	ctx;
446 
447 	SHA1Init(&ctx);
448 	SHA1Update(&ctx, buf, ZEV_L0_SIZE);
449 	SHA1Final(sig, &ctx);
450 	return;
451 }
452 
453 static void
454 zev_l0_blocksig(uint8_t *blk_sig, uint8_t *l0_sig, uint8_t block_no)
455 {
456 	SHA1_CTX	ctx;
457 
458 	SHA1Init(&ctx);
459 	SHA1Update(&ctx, l0_sig, SHA1_DIGEST_LENGTH);
460 	SHA1Update(&ctx, &block_no, sizeof(block_no));
461 	SHA1Final(blk_sig, &ctx);
462 	return;
463 }
464 
465 static void
466 zev_l1_add(uint8_t *sig_l1, uint8_t *sig_l0)
467 {
468 	int	i;
469 	int	s;
470 	int	carry = 0;
471 
472 	for (i = SHA1_DIGEST_LENGTH - 1; i >= 0; --i) {
473 		s = sig_l1[i] + sig_l0[i] + carry;
474 		carry = s > 255 ? 1 : 0;
475 		sig_l1[i] = s & 0xff;
476 	}
477 }
478 
479 static int
480 zev_get_result_buffer(zev_sig_t **buffer,
481                       uint64_t *buffer_len,
482                       uint64_t max_buffer_len,
483                       znode_t *zp,
484                       uint64_t off,
485                       uint64_t len,
486                       zev_chksum_mode_t mode)
487 {
488 	uint64_t	blk_start;
489 	uint64_t	blk_end;
490 	uint64_t	l0_blocks;
491 	uint64_t	l1_blocks;
492 	uint64_t	sigs;
493 	int buflen;
494 
495 	/* calculate result set size: how many checksums will we provide? */
496 
497 	ASSERT(len > 0 || (mode == zev_truncate && len == 0));
498 
499 	if (len == 0) {
500 		/* truncate */
501 		l0_blocks = ((off % ZEV_L0_SIZE) == 0) ? 0 : 1;
502 		l1_blocks = ((off % ZEV_L1_SIZE) == 0) ? 0 : 1;
503 	} else {
504 		/* how many lv1 checksums do we update? */
505 		blk_start = off / ZEV_L1_SIZE;
506 		blk_end = (off + len - 1) / ZEV_L1_SIZE;
507 		l1_blocks = blk_end - blk_start + 1;
508 		/* how many lv0 checksums do we update? */
509 		blk_start = off / ZEV_L0_SIZE;
510 		blk_end = (off + len - 1) / ZEV_L0_SIZE;
511 		l0_blocks = blk_end - blk_start + 1;
512 	}
513 
514 	sigs = l1_blocks + l0_blocks;
515 	if (sigs == 0) {
516 		*buffer = NULL;
517 		*buffer_len = 0;
518 		return 0;
519 	}
520 
521 	buflen = sigs * sizeof(zev_sig_t);
522 	if (max_buffer_len && (buflen > max_buffer_len)) {
523 		*buffer = NULL;
524 		*buffer_len = 0;
525 		return ENOSPC;
526 	}
527 	*buffer_len = buflen;
528 	*buffer = zev_alloc(buflen);
529 	return 0;
530 }
531 
532 static void
533 zev_append_sig(zev_sig_t *s, int level, uint64_t off, uint8_t *sig)
534 {
535 	s->level = level;
536 	s->block_offset = off;
537 	memcpy(s->value, sig, SHA1_DIGEST_LENGTH);
538 }
539 
540 /*
541  * Calculate all l0 and l1 checksums that are affected by the given range.
542  *
543  * This function assumes that the ranges it needs to read are already
544  * range-locked.
545  */
546 int
547 zev_get_checksums(zev_sig_t **result,
548                   uint64_t *result_buf_len,
549                   uint64_t *signature_cnt,
550                   uint64_t max_result_len,
551                   znode_t *zp,
552                   uint64_t off,
553                   uint64_t len,
554                   zev_chksum_mode_t mode)
555 {
556 	uint64_t	off_l1;
557 	uint64_t	len_l1;
558 	uint64_t	pos_l1;
559 	uint64_t	pos_l0;
560 	char		*buf;
561 	int64_t		ret;
562 	uint8_t		sig_l0[SHA1_DIGEST_LENGTH];
563 	uint8_t		blk_sig_l0[SHA1_DIGEST_LENGTH];
564 	uint8_t		sig_l1[SHA1_DIGEST_LENGTH];
565 	uint8_t		l0_block_no;
566 	zev_sig_t	*sig;
567 	int		non_empty_l0_blocks;
568 	zev_sig_cache_file_t *file;
569 	zev_sig_cache_chksums_t *cs;
570 
571 	/*
572 	 * Note: for write events, the callback is called via
573 	 *    zfs_write() -> zfs_log_write() -> zev_znode_write_cb()
574 	 *
575 	 * The transaction is not commited, yet.
576 	 *
577 	 * A write() syscall might be split into smaller chunks by zfs_write()
578 	 *
579 	 * zfs_write() has a range lock when this is called. (zfs_vnops.c:925)
580 	 * In zev mode, the range lock will encompass all data we need
581 	 * to calculate our checksums.
582 	 *
583 	 * The same is true for truncates with non-zero length. ("punch hole")
584 	 */
585 
586 	ASSERT(len > 0 || (mode == zev_truncate && len == 0));
587 	*signature_cnt = 0;
588 
589 	/* start of this megabyte */
590 	off_l1 = P2ALIGN(off, ZEV_L1_SIZE);
591 	/* full megabytes */
592 	if (len == 0) {
593 		/* truncate(): we'll look at the last lv1 block, only. */
594 		len_l1 = ZEV_L1_SIZE;
595 	} else {
596 		len_l1 = len + (off - off_l1);
597 		len_l1 = P2ROUNDUP(len_l1, ZEV_L1_SIZE);
598 	}
599 
600 	file = zev_chksum_cache_file_get_and_hold(zp);
601 	zev_chksum_cache_invalidate(file, zp, mode, off, len);
602 	buf = zev_alloc(ZEV_L0_SIZE);
603 
604 	ret = zev_get_result_buffer(result, result_buf_len, max_result_len,
605 	                            zp, off, len, mode);
606 	if (ret) {
607 		zev_free(buf, ZEV_L0_SIZE);
608 		zev_chksum_cache_file_release(file);
609 		return ret;
610 	}
611 	if (*result == NULL) {
612 		/* we're done */
613 		zev_free(buf, ZEV_L0_SIZE);
614 		zev_chksum_cache_file_release(file);
615 		return 0;
616 	}
617 	sig = *result;
618 
619 	for (pos_l1 = off_l1; pos_l1 < (off_l1+len_l1); pos_l1 += ZEV_L1_SIZE) {
620 
621 		if (pos_l1 > zp->z_size) {
622 			cmn_err(CE_WARN, "zev_get_checksums: off+len beyond "
623 			        "EOF.  Unexpected behaviour; please fix!");
624 			break;
625 		}
626 
627 		/*
628 		 * Since we have a reference to 'file' 'cs' can't be expired.
629 		 * Since our ranges are range locked, other threads woun't
630 		 * touch our checksum entries. (not even read them)
631 		 * Hence, we don't need to hold() or release() 'cs'.
632 		 */
633 		cs = zev_chksum_cache_get_lv1_entry(file, pos_l1);
634 
635 		l0_block_no = 0;
636 		non_empty_l0_blocks = 0;
637 		bzero(sig_l1, sizeof(sig_l1));
638 		for (pos_l0 = pos_l1;
639 		     pos_l0 < (pos_l1 + ZEV_L1_SIZE);
640 		     pos_l0 += ZEV_L0_SIZE){
641 
642 			if (pos_l0 >= zp->z_size)
643 				break;	/* EOF */
644 
645 			if (zev_chksum_cache_get(sig_l0, file,cs,pos_l0) != 0) {
646 
647 				/* signature is not cached, yet. */
648 				ret = zev_safe_read(zp, buf,
649 				                    pos_l0, ZEV_L0_SIZE);
650 				if (ret < 0) {
651 					zev_free(*result, *result_buf_len);
652 					zev_free(buf, ZEV_L0_SIZE);
653 					zev_chksum_cache_file_release(file);
654 					return ret;
655 				}
656 				/* pad buffer with zeros if necessary */
657 				if (ret < ZEV_L0_SIZE)
658 					bzero(buf + ret, ZEV_L0_SIZE - ret);
659 
660 				/* calculate signature */
661 				zev_l0_sig(sig_l0, buf);
662 
663 				zev_chksum_cache_put(sig_l0, file, cs, pos_l0);
664 			}
665 
666 			if (!memcmp(sig_l0, all_zero_sig, SHA1_DIGEST_LENGTH)) {
667 				/* all-zero l0 block.  omit signature. */
668 				l0_block_no++;
669 				continue;
670 			}
671 			non_empty_l0_blocks++;
672 			zev_l0_blocksig(blk_sig_l0, sig_l0, l0_block_no);
673 			zev_l1_add(sig_l1, blk_sig_l0);
674 
675 			if (((pos_l0 + ZEV_L0_SIZE - 1) >= off) &&
676 			    (pos_l0 < (off + len - 1))) {
677 				zev_append_sig(sig++, 0, pos_l0, sig_l0);
678 			}
679 
680 			l0_block_no++;
681 		}
682 
683 		if (non_empty_l0_blocks && (zp->z_size > ZEV_L0_SIZE))
684 			zev_append_sig(sig++, 1, pos_l1, sig_l1);
685 	}
686 
687 	*signature_cnt = ((char *)sig - (char *)*result) / sizeof(zev_sig_t);
688 
689 	zev_free(buf, ZEV_L0_SIZE);
690 	zev_chksum_cache_file_release(file);
691 	return 0;
692 }
693 
694 int
695 zev_ioc_get_signatures(intptr_t arg, int mode)
696 {
697 	zev_ioctl_get_signatures_t gs;
698 	file_t *fp;
699 	int ret = 0;
700 	znode_t *zp;
701 	zev_sig_t *sig_buf = NULL;
702 	uint64_t sig_buf_len;
703 	uint64_t sig_cnt = 0;
704 	uint64_t sig_len;
705 	char *dst;
706 	int range_locked = 0;
707 	rl_t *rl;
708 	ssize_t	lock_off;
709 	ssize_t lock_len;
710 
711 	if (ddi_copyin((void *)arg, &gs, sizeof(gs), mode) != 0)
712 		return EFAULT;
713 	fp = getf(gs.zev_fd);
714 	if (fp == NULL)
715 		return EBADF;
716 	if (fp->f_vnode->v_vfsp->vfs_fstype != zfsfstype) {
717 		ret = EINVAL;
718 		goto out;
719 	}
720 	if (fp->f_vnode->v_type != VREG) {
721 		ret = EINVAL;
722 		goto out;
723 	}
724 	zp = VTOZ(fp->f_vnode);
725 	if (gs.zev_offset >= zp->z_size) {
726 		ret = EINVAL;
727 		goto out;
728 	}
729 
730 	/* range lock data */
731 	lock_off = P2ALIGN(gs.zev_offset, ZEV_L1_SIZE);
732 	lock_len = gs.zev_len + (gs.zev_offset - lock_off);
733 	lock_len = P2ROUNDUP(lock_len, ZEV_L1_SIZE);
734 	rl = zfs_range_lock(zp, lock_off, lock_len, RL_READER);
735 	range_locked = 1;
736 
737 	/* get checksums */
738 	ret = zev_get_checksums(&sig_buf, &sig_buf_len, &sig_cnt,
739 	                        gs.zev_bufsize,
740 	                        zp, gs.zev_offset, gs.zev_len, zev_write);
741 	if (ret)
742 		goto out;
743 
744 	/* copy to userland */
745 	sig_len = sig_cnt * sizeof(zev_sig_t);
746 	gs.zev_signature_cnt = sig_cnt;
747 	if (ddi_copyout(&gs, (void *)arg, sizeof(gs), mode) != 0) {
748 		ret = EFAULT;
749 		goto out;
750 	}
751 	if (sig_cnt && sig_buf) {
752 		dst = (char *)arg + sizeof(gs);
753 		if (ddi_copyout(sig_buf, (void *)dst, sig_len, mode) != 0) {
754 			ret = EFAULT;
755 			goto out;
756 		}
757 	}
758 out:
759 	if (sig_buf)
760 		zev_free(sig_buf, sig_buf_len);
761 	if (range_locked)
762 		zfs_range_unlock(rl);
763 	releasef(gs.zev_fd);
764 	return ret;
765 }
766 
767