xref: /illumos-gate/usr/src/uts/common/fs/ufs/lufs_debug.c (revision 2d6eb4a5e0a47d30189497241345dc5466bb68ab)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/systm.h>
28 #include <sys/types.h>
29 #include <sys/vnode.h>
30 #include <sys/buf.h>
31 #include <sys/ddi.h>
32 #include <sys/errno.h>
33 #include <sys/sysmacros.h>
34 #include <sys/debug.h>
35 #include <sys/kmem.h>
36 #include <sys/conf.h>
37 #include <sys/proc.h>
38 #include <sys/cmn_err.h>
39 #include <sys/fs/ufs_inode.h>
40 #include <sys/fs/ufs_filio.h>
41 #include <sys/fs/ufs_log.h>
42 
43 
44 #ifdef	DEBUG
45 
46 /*
47  * DEBUG ROUTINES
48  *	THESE ROUTINES ARE ONLY USED WHEN ASSERTS ARE ENABLED
49  */
50 
51 static	kmutex_t	toptracelock;
52 static	int		toptraceindex;
53 int			toptracemax	= 1024;	/* global so it can be set */
54 struct toptrace {
55 	enum delta_type	dtyp;
56 	kthread_t	*thread;
57 	dev_t		dev;
58 	long		arg2;
59 	long		arg3;
60 	long long	arg1;
61 } *toptrace;
62 
63 static void
top_trace(enum delta_type dtyp,dev_t dev,long long arg1,long arg2,long arg3)64 top_trace(enum delta_type dtyp, dev_t dev, long long arg1, long arg2, long arg3)
65 {
66 	if (toptrace == NULL) {
67 		toptraceindex = 0;
68 		toptrace = kmem_zalloc((size_t)
69 		    (sizeof (struct toptrace) * toptracemax), KM_SLEEP);
70 	}
71 	mutex_enter(&toptracelock);
72 	toptrace[toptraceindex].dtyp = dtyp;
73 	toptrace[toptraceindex].thread = curthread;
74 	toptrace[toptraceindex].dev = dev;
75 	toptrace[toptraceindex].arg1 = arg1;
76 	toptrace[toptraceindex].arg2 = arg2;
77 	toptrace[toptraceindex].arg3 = arg3;
78 	if (++toptraceindex == toptracemax)
79 		toptraceindex = 0;
80 	else {
81 		toptrace[toptraceindex].dtyp = (enum delta_type)-1;
82 		toptrace[toptraceindex].thread = (kthread_t *)-1;
83 		toptrace[toptraceindex].dev = (dev_t)-1;
84 		toptrace[toptraceindex].arg1 = -1;
85 		toptrace[toptraceindex].arg2 = -1;
86 	}
87 
88 	mutex_exit(&toptracelock);
89 }
90 
91 /*
92  * add a range into the metadata map
93  */
94 void
top_mataadd(ufsvfs_t * ufsvfsp,offset_t mof,off_t nb)95 top_mataadd(ufsvfs_t *ufsvfsp, offset_t mof, off_t nb)
96 {
97 	ml_unit_t	*ul	= ufsvfsp->vfs_log;
98 
99 	ASSERT(ufsvfsp->vfs_dev == ul->un_dev);
100 	deltamap_add(ul->un_matamap, mof, nb, 0, 0, 0, NULL);
101 }
102 
103 /*
104  * delete a range from the metadata map
105  */
106 void
top_matadel(ufsvfs_t * ufsvfsp,offset_t mof,off_t nb)107 top_matadel(ufsvfs_t *ufsvfsp, offset_t mof, off_t nb)
108 {
109 	ml_unit_t	*ul	= ufsvfsp->vfs_log;
110 
111 	ASSERT(ufsvfsp->vfs_dev == ul->un_dev);
112 	ASSERT(!matamap_overlap(ul->un_deltamap, mof, nb));
113 	deltamap_del(ul->un_matamap, mof, nb);
114 }
115 
116 /*
117  * clear the entries from the metadata map
118  */
119 void
top_mataclr(ufsvfs_t * ufsvfsp)120 top_mataclr(ufsvfs_t *ufsvfsp)
121 {
122 	ml_unit_t	*ul	= ufsvfsp->vfs_log;
123 
124 	ASSERT(ufsvfsp->vfs_dev == ul->un_dev);
125 	map_free_entries(ul->un_matamap);
126 	map_free_entries(ul->un_deltamap);
127 }
128 
129 int
top_begin_debug(ml_unit_t * ul,top_t topid,ulong_t size)130 top_begin_debug(ml_unit_t *ul, top_t topid, ulong_t size)
131 {
132 	threadtrans_t *tp;
133 
134 	if (ul->un_debug & MT_TRACE)
135 		top_trace(DT_BOT, ul->un_dev,
136 		    (long long)topid, (long)size, (long)0);
137 
138 	ASSERT(curthread->t_flag & T_DONTBLOCK);
139 
140 	tp = tsd_get(topkey);
141 	if (tp == NULL) {
142 		tp = kmem_zalloc(sizeof (threadtrans_t), KM_SLEEP);
143 		(void) tsd_set(topkey, tp);
144 	}
145 	tp->topid  = topid;
146 	tp->esize  = size;
147 	tp->rsize  = 0;
148 	tp->dev    = ul->un_dev;
149 	return (1);
150 }
151 
152 int
top_end_debug(ml_unit_t * ul,mt_map_t * mtm,top_t topid,ulong_t size)153 top_end_debug(ml_unit_t *ul, mt_map_t *mtm, top_t topid, ulong_t size)
154 {
155 	threadtrans_t *tp;
156 
157 	ASSERT(curthread->t_flag & T_DONTBLOCK);
158 
159 	ASSERT((tp = (threadtrans_t *)tsd_get(topkey)) != NULL);
160 
161 	ASSERT((tp->dev == ul->un_dev) && (tp->topid == topid) &&
162 	    (tp->esize == size));
163 
164 	ASSERT(((ul->un_debug & MT_SIZE) == 0) || (tp->rsize <= tp->esize));
165 
166 	mtm->mtm_tops->mtm_top_num[topid]++;
167 	mtm->mtm_tops->mtm_top_size_etot[topid] += tp->esize;
168 	mtm->mtm_tops->mtm_top_size_rtot[topid] += tp->rsize;
169 
170 	if (tp->rsize > mtm->mtm_tops->mtm_top_size_max[topid])
171 		mtm->mtm_tops->mtm_top_size_max[topid] = tp->rsize;
172 	if (mtm->mtm_tops->mtm_top_size_min[topid] == 0)
173 			mtm->mtm_tops->mtm_top_size_min[topid] =
174 			    tp->rsize;
175 	else
176 		if (tp->rsize < mtm->mtm_tops->mtm_top_size_min[topid])
177 			mtm->mtm_tops->mtm_top_size_min[topid] =
178 			    tp->rsize;
179 
180 	if (ul->un_debug & MT_TRACE)
181 		top_trace(DT_EOT, ul->un_dev, (long long)topid,
182 		    (long)tp->rsize, (long)0);
183 
184 	return (1);
185 }
186 
187 int
top_delta_debug(ml_unit_t * ul,offset_t mof,off_t nb,delta_t dtyp)188 top_delta_debug(
189 	ml_unit_t *ul,
190 	offset_t mof,
191 	off_t nb,
192 	delta_t dtyp)
193 {
194 	struct threadtrans	*tp;
195 
196 	ASSERT(curthread->t_flag & T_DONTBLOCK);
197 
198 	/*
199 	 * check for delta contained fully within matamap
200 	 */
201 	ASSERT((ul->un_matamap == NULL) ||
202 	    matamap_within(ul->un_matamap, mof, nb));
203 
204 	/*
205 	 * maintain transaction info
206 	 */
207 	if (ul->un_debug & MT_TRANSACT)
208 		ul->un_logmap->mtm_tops->mtm_delta_num[dtyp]++;
209 
210 	/*
211 	 * check transaction stuff
212 	 */
213 	if (ul->un_debug & MT_TRANSACT) {
214 		tp = (struct threadtrans *)tsd_get(topkey);
215 		ASSERT(tp);
216 		switch (dtyp) {
217 		case DT_CANCEL:
218 		case DT_ABZERO:
219 			if (!matamap_within(ul->un_deltamap, mof, nb))
220 				tp->rsize += sizeof (struct delta);
221 			break;
222 		default:
223 			if (!matamap_within(ul->un_deltamap, mof, nb))
224 				tp->rsize += nb + sizeof (struct delta);
225 			break;
226 		}
227 	} else
228 		return (1);
229 
230 	if (ul->un_debug & MT_TRACE)
231 		top_trace(dtyp, ul->un_dev, mof, (long)nb, (long)0);
232 
233 	return (1);
234 }
235 
236 int
top_roll_debug(ml_unit_t * ul)237 top_roll_debug(ml_unit_t *ul)
238 {
239 	logmap_roll_dev(ul);
240 	return (1);
241 }
242 
243 int
top_init_debug(void)244 top_init_debug(void)
245 {
246 	mutex_init(&toptracelock, NULL, MUTEX_DEFAULT, NULL);
247 	return (1);
248 }
249 
250 struct topstats_link {
251 	struct topstats_link	*ts_next;
252 	dev_t			ts_dev;
253 	struct topstats		ts_stats;
254 };
255 struct topstats_link *topstats_anchor = NULL;
256 
257 /*
258  * DEBUG ROUTINES
259  *	from debug portion of *_map.c
260  */
261 /*
262  * scan test support
263  */
264 int
logmap_logscan_debug(mt_map_t * mtm,mapentry_t * age)265 logmap_logscan_debug(mt_map_t *mtm, mapentry_t *age)
266 {
267 	mapentry_t	*me;
268 	ml_unit_t	*ul;
269 	off_t		head, trimroll, lof;
270 
271 	/*
272 	 * remember location of youngest rolled delta
273 	 */
274 	mutex_enter(&mtm->mtm_mutex);
275 	ul = mtm->mtm_ul;
276 	head = ul->un_head_lof;
277 	trimroll = mtm->mtm_trimrlof;
278 	for (me = age; me; me = me->me_agenext) {
279 		lof = me->me_lof;
280 		if (trimroll == 0)
281 			trimroll = lof;
282 		if (lof >= head) {
283 			if (trimroll >= head && trimroll <= lof)
284 				trimroll = lof;
285 		} else {
286 			if (trimroll <= lof || trimroll >= head)
287 				trimroll = lof;
288 		}
289 	}
290 	mtm->mtm_trimrlof = trimroll;
291 	mutex_exit(&mtm->mtm_mutex);
292 	return (1);
293 }
294 
295 /*
296  * scan test support
297  */
298 int
logmap_logscan_commit_debug(off_t lof,mt_map_t * mtm)299 logmap_logscan_commit_debug(off_t lof, mt_map_t *mtm)
300 {
301 	off_t	oldtrimc, newtrimc, trimroll;
302 
303 	trimroll = mtm->mtm_trimrlof;
304 	oldtrimc = mtm->mtm_trimclof;
305 	newtrimc = mtm->mtm_trimclof = dbtob(btod(lof));
306 
307 	/*
308 	 * can't trim prior to transaction w/rolled delta
309 	 */
310 	if (trimroll)
311 		if (newtrimc >= oldtrimc) {
312 			if (trimroll <= newtrimc && trimroll >= oldtrimc)
313 				mtm->mtm_trimalof = newtrimc;
314 		} else {
315 			if (trimroll >= oldtrimc || trimroll <= newtrimc)
316 				mtm->mtm_trimalof = newtrimc;
317 		}
318 	return (1);
319 }
320 
321 int
logmap_logscan_add_debug(struct delta * dp,mt_map_t * mtm)322 logmap_logscan_add_debug(struct delta *dp, mt_map_t *mtm)
323 {
324 	if ((dp->d_typ == DT_AB) || (dp->d_typ == DT_INODE))
325 		mtm->mtm_trimalof = mtm->mtm_trimclof;
326 	return (1);
327 }
328 
329 /*
330  * log-read after log-write
331  */
332 int
map_check_ldl_write(ml_unit_t * ul,caddr_t va,offset_t vamof,mapentry_t * me)333 map_check_ldl_write(ml_unit_t *ul, caddr_t va, offset_t vamof, mapentry_t *me)
334 {
335 	caddr_t		bufp;
336 
337 	ASSERT(me->me_nb);
338 	ASSERT((me->me_flags & ME_AGE) == 0);
339 
340 	/* Alloc a buf */
341 	bufp = kmem_alloc(me->me_nb, KM_SLEEP);
342 
343 	/* Do the read */
344 	me->me_agenext = NULL;
345 	if (ldl_read(ul, bufp, me->me_mof, me->me_nb, me) == 0) {
346 		ASSERT(bcmp(bufp, va + (me->me_mof - vamof), me->me_nb) == 0);
347 	}
348 
349 	kmem_free(bufp, me->me_nb);
350 	return (1);
351 }
352 
353 /*
354  * Cleanup a map struct
355  */
356 int
map_put_debug(mt_map_t * mtm)357 map_put_debug(mt_map_t *mtm)
358 {
359 	struct topstats_link	*tsl, **ptsl;
360 
361 	if (mtm->mtm_tops == NULL)
362 		return (1);
363 
364 	/* Don't free this, cause the next snarf will want it */
365 	if ((lufs_debug & MT_TRANSACT) != 0)
366 		return (1);
367 
368 	ptsl = &topstats_anchor;
369 	tsl = topstats_anchor;
370 	while (tsl) {
371 		if (mtm->mtm_tops == &tsl->ts_stats) {
372 			mtm->mtm_tops = NULL;
373 			*ptsl = tsl->ts_next;
374 			kmem_free(tsl, sizeof (*tsl));
375 			return (1);
376 		}
377 		ptsl = &tsl->ts_next;
378 		tsl = tsl->ts_next;
379 	}
380 
381 	return (1);
382 }
383 
384 int
map_get_debug(ml_unit_t * ul,mt_map_t * mtm)385 map_get_debug(ml_unit_t *ul, mt_map_t *mtm)
386 {
387 	struct topstats_link	*tsl;
388 
389 	if ((ul->un_debug & MT_TRANSACT) == 0)
390 		return (1);
391 
392 	if (mtm->mtm_type != logmaptype)
393 		return (1);
394 
395 	tsl = topstats_anchor;
396 	while (tsl) {
397 		if (tsl->ts_dev == ul->un_dev) {
398 			mtm->mtm_tops = &(tsl->ts_stats);
399 			return (1);
400 		}
401 		tsl = tsl->ts_next;
402 	}
403 
404 	tsl = kmem_zalloc(sizeof (*tsl), KM_SLEEP);
405 	tsl->ts_dev = ul->un_dev;
406 	tsl->ts_next = topstats_anchor;
407 	topstats_anchor = tsl;
408 	mtm->mtm_tops = &tsl->ts_stats;
409 	return (1);
410 }
411 
412 /*
413  * check a map's list
414  */
415 int
map_check_linkage(mt_map_t * mtm)416 map_check_linkage(mt_map_t *mtm)
417 {
418 	int		i;
419 	int		hashed;
420 	int		nexted;
421 	int		preved;
422 	int		ncancel;
423 	mapentry_t	*me;
424 	off_t		olof;
425 	off_t		firstlof;
426 	int		wrapped;
427 
428 	mutex_enter(&mtm->mtm_mutex);
429 
430 	ASSERT(mtm->mtm_nme >= 0);
431 
432 	/*
433 	 * verify the entries on the hash
434 	 */
435 	hashed = 0;
436 	for (i = 0; i < mtm->mtm_nhash; ++i) {
437 		for (me = *(mtm->mtm_hash+i); me; me = me->me_hash) {
438 			++hashed;
439 			ASSERT(me->me_flags & ME_HASH);
440 			ASSERT((me->me_flags & ME_LIST) == 0);
441 		}
442 	}
443 	ASSERT(hashed >= mtm->mtm_nme);
444 	/*
445 	 * verify the doubly linked list of all entries
446 	 */
447 	nexted = 0;
448 	for (me = mtm->mtm_next; me != (mapentry_t *)mtm; me = me->me_next)
449 		nexted++;
450 	preved = 0;
451 	for (me = mtm->mtm_prev; me != (mapentry_t *)mtm; me = me->me_prev)
452 		preved++;
453 	ASSERT(nexted == preved);
454 	ASSERT(nexted == hashed);
455 
456 	/*
457 	 * verify the cancel list
458 	 */
459 	ncancel = 0;
460 	for (me = mtm->mtm_cancel; me; me = me->me_cancel) {
461 		++ncancel;
462 		ASSERT(me->me_flags & ME_CANCEL);
463 	}
464 	/*
465 	 * verify the logmap's log offsets
466 	 */
467 	if (mtm->mtm_type == logmaptype) {
468 		olof = mtm->mtm_next->me_lof;
469 		firstlof = olof;
470 		wrapped = 0;
471 		/*
472 		 * Make sure to skip any mapentries whose me_lof = 0
473 		 * and me_type == DT_CANCEL, these are mapentries
474 		 * in place just to mark user block deletions as not
475 		 * available for allocate within the same moby transaction
476 		 * in case we crash before it is comitted.  Skip these
477 		 * entries in the checks below as they are not applicable.
478 		 */
479 		for (me = mtm->mtm_next->me_next;
480 		    me != (mapentry_t *)mtm;
481 		    me = me->me_next) {
482 
483 			if (me->me_lof == 0 && me->me_dt == DT_CANCEL)
484 				continue;
485 			if (firstlof == 0) {
486 				olof = me->me_lof;
487 				firstlof = olof;
488 				if (me->me_next != (mapentry_t *)mtm)
489 					me = me->me_next;
490 				continue;
491 			}
492 			ASSERT(me->me_lof != olof);
493 
494 			if (wrapped) {
495 				ASSERT(me->me_lof > olof);
496 				ASSERT(me->me_lof < firstlof);
497 				olof = me->me_lof;
498 				continue;
499 			}
500 			if (me->me_lof < olof) {
501 				ASSERT(me->me_lof < firstlof);
502 				wrapped = 1;
503 				olof = me->me_lof;
504 				continue;
505 			}
506 			ASSERT(me->me_lof > firstlof);
507 			ASSERT(me->me_lof < mtm->mtm_ul->un_eol_lof);
508 			olof = me->me_lof;
509 		}
510 	}
511 
512 	mutex_exit(&mtm->mtm_mutex);
513 	return (1);
514 }
515 
516 /*
517  * check for overlap
518  */
519 int
matamap_overlap(mt_map_t * mtm,offset_t mof,off_t nb)520 matamap_overlap(mt_map_t *mtm, offset_t mof, off_t nb)
521 {
522 	off_t		hnb;
523 	mapentry_t	*me;
524 	mapentry_t	**mep;
525 
526 	for (hnb = 0; nb; nb -= hnb, mof += hnb) {
527 
528 		hnb = MAPBLOCKSIZE - (mof & MAPBLOCKOFF);
529 		if (hnb > nb)
530 			hnb = nb;
531 		/*
532 		 * search for dup entry
533 		 */
534 		mep = MAP_HASH(mof, mtm);
535 		mutex_enter(&mtm->mtm_mutex);
536 		for (me = *mep; me; me = me->me_hash)
537 			if (DATAoverlapME(mof, hnb, me))
538 				break;
539 		mutex_exit(&mtm->mtm_mutex);
540 
541 		/*
542 		 * overlap detected
543 		 */
544 		if (me)
545 			return (1);
546 	}
547 	return (0);
548 }
549 /*
550  * check for within
551  */
552 int
matamap_within(mt_map_t * mtm,offset_t mof,off_t nb)553 matamap_within(mt_map_t *mtm, offset_t mof, off_t nb)
554 {
555 	off_t		hnb;
556 	mapentry_t	*me;
557 	mapentry_t	**mep;
558 	int		scans	= 0;
559 	int		withins	= 0;
560 
561 	for (hnb = 0; nb && scans == withins; nb -= hnb, mof += hnb) {
562 		scans++;
563 
564 		hnb = MAPBLOCKSIZE - (mof & MAPBLOCKOFF);
565 		if (hnb > nb)
566 			hnb = nb;
567 		/*
568 		 * search for within entry
569 		 */
570 		mep = MAP_HASH(mof, mtm);
571 		mutex_enter(&mtm->mtm_mutex);
572 		for (me = *mep; me; me = me->me_hash)
573 			if (DATAwithinME(mof, hnb, me)) {
574 				withins++;
575 				break;
576 			}
577 		mutex_exit(&mtm->mtm_mutex);
578 	}
579 	return (scans == withins);
580 }
581 
582 int
ldl_sethead_debug(ml_unit_t * ul)583 ldl_sethead_debug(ml_unit_t *ul)
584 {
585 	mt_map_t	*mtm	= ul->un_logmap;
586 	off_t		trimr	= mtm->mtm_trimrlof;
587 	off_t		head	= ul->un_head_lof;
588 	off_t		tail	= ul->un_tail_lof;
589 
590 	if (head <= tail) {
591 		if (trimr < head || trimr >= tail)
592 			mtm->mtm_trimrlof = 0;
593 	} else {
594 		if (trimr >= tail && trimr < head)
595 			mtm->mtm_trimrlof = 0;
596 	}
597 	return (1);
598 }
599 
600 int
lufs_initialize_debug(ml_odunit_t * ud)601 lufs_initialize_debug(ml_odunit_t *ud)
602 {
603 	ud->od_debug = lufs_debug;
604 	return (1);
605 }
606 
607 #endif	/* DEBUG */
608 
609 /*
610  * lufs_debug controls the debug level for TSufs, and is only used
611  * for a debug kernel. It's referenced by ufs_ioctl() and so is
612  * not under #ifdef DEBUG compilation.
613  */
614 uint_t lufs_debug;
615