xref: /titanic_52/usr/src/uts/common/fs/ufs/lufs_debug.c (revision 47911a7d5f24c2fc37e7b5bcc696fe32e750c16c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 #pragma ident	"%Z%%M%	%I%	%E% SMI"
22 
23 /*
24  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
25  * Use is subject to license terms.
26  */
27 
28 #include <sys/systm.h>
29 #include <sys/types.h>
30 #include <sys/vnode.h>
31 #include <sys/buf.h>
32 #include <sys/ddi.h>
33 #include <sys/errno.h>
34 #include <sys/sysmacros.h>
35 #include <sys/debug.h>
36 #include <sys/kmem.h>
37 #include <sys/conf.h>
38 #include <sys/proc.h>
39 #include <sys/cmn_err.h>
40 #include <sys/fs/ufs_inode.h>
41 #include <sys/fs/ufs_filio.h>
42 #include <sys/fs/ufs_log.h>
43 
44 
45 #ifdef	DEBUG
46 
47 /*
48  * DEBUG ROUTINES
49  *	THESE ROUTINES ARE ONLY USED WHEN ASSERTS ARE ENABLED
50  */
51 
52 static	kmutex_t	toptracelock;
53 static	int		toptraceindex;
54 int			toptracemax	= 1024;	/* global so it can be set */
55 struct toptrace {
56 	enum delta_type	dtyp;
57 	kthread_t	*thread;
58 	dev_t		dev;
59 	long		arg2;
60 	long		arg3;
61 	long long	arg1;
62 } *toptrace;
63 
64 static void
65 top_trace(enum delta_type dtyp, dev_t dev, long long arg1, long arg2, long arg3)
66 {
67 	if (toptrace == NULL) {
68 		toptraceindex = 0;
69 		toptrace = kmem_zalloc((size_t)
70 		    (sizeof (struct toptrace) * toptracemax), KM_SLEEP);
71 	}
72 	mutex_enter(&toptracelock);
73 	toptrace[toptraceindex].dtyp = dtyp;
74 	toptrace[toptraceindex].thread = curthread;
75 	toptrace[toptraceindex].dev = dev;
76 	toptrace[toptraceindex].arg1 = arg1;
77 	toptrace[toptraceindex].arg2 = arg2;
78 	toptrace[toptraceindex].arg3 = arg3;
79 	if (++toptraceindex == toptracemax)
80 		toptraceindex = 0;
81 	else {
82 		toptrace[toptraceindex].dtyp = (enum delta_type)-1;
83 		toptrace[toptraceindex].thread = (kthread_t *)-1;
84 		toptrace[toptraceindex].dev = (dev_t)-1;
85 		toptrace[toptraceindex].arg1 = -1;
86 		toptrace[toptraceindex].arg2 = -1;
87 	}
88 
89 	mutex_exit(&toptracelock);
90 }
91 
92 /*
93  * add a range into the metadata map
94  */
95 void
96 top_mataadd(ufsvfs_t *ufsvfsp, offset_t mof, off_t nb)
97 {
98 	ml_unit_t	*ul	= ufsvfsp->vfs_log;
99 
100 	ASSERT(ufsvfsp->vfs_dev == ul->un_dev);
101 	deltamap_add(ul->un_matamap, mof, nb, 0, 0, 0, NULL);
102 }
103 
104 /*
105  * delete a range from the metadata map
106  */
107 void
108 top_matadel(ufsvfs_t *ufsvfsp, offset_t mof, off_t nb)
109 {
110 	ml_unit_t	*ul	= ufsvfsp->vfs_log;
111 
112 	ASSERT(ufsvfsp->vfs_dev == ul->un_dev);
113 	ASSERT(!matamap_overlap(ul->un_deltamap, mof, nb));
114 	deltamap_del(ul->un_matamap, mof, nb);
115 }
116 
117 /*
118  * clear the entries from the metadata map
119  */
120 void
121 top_mataclr(ufsvfs_t *ufsvfsp)
122 {
123 	ml_unit_t	*ul	= ufsvfsp->vfs_log;
124 
125 	ASSERT(ufsvfsp->vfs_dev == ul->un_dev);
126 	map_free_entries(ul->un_matamap);
127 	map_free_entries(ul->un_deltamap);
128 }
129 
130 int
131 top_begin_debug(ml_unit_t *ul, top_t topid, ulong_t size)
132 {
133 	threadtrans_t *tp;
134 
135 	if (ul->un_debug & MT_TRACE)
136 		top_trace(DT_BOT, ul->un_dev,
137 		    (long long)topid, (long)size, (long)0);
138 
139 	ASSERT(curthread->t_flag & T_DONTBLOCK);
140 
141 	tp = tsd_get(topkey);
142 	if (tp == NULL) {
143 		tp = kmem_zalloc(sizeof (threadtrans_t), KM_SLEEP);
144 		(void) tsd_set(topkey, tp);
145 	}
146 	tp->topid  = topid;
147 	tp->esize  = size;
148 	tp->rsize  = 0;
149 	tp->dev    = ul->un_dev;
150 	return (1);
151 }
152 
153 int
154 top_end_debug(ml_unit_t *ul, mt_map_t *mtm, top_t topid, ulong_t size)
155 {
156 	threadtrans_t *tp;
157 
158 	ASSERT(curthread->t_flag & T_DONTBLOCK);
159 
160 	ASSERT((tp = (threadtrans_t *)tsd_get(topkey)) != NULL);
161 
162 	ASSERT((tp->dev == ul->un_dev) && (tp->topid == topid) &&
163 	    (tp->esize == size));
164 
165 	ASSERT(((ul->un_debug & MT_SIZE) == 0) || (tp->rsize <= tp->esize));
166 
167 	mtm->mtm_tops->mtm_top_num[topid]++;
168 	mtm->mtm_tops->mtm_top_size_etot[topid] += tp->esize;
169 	mtm->mtm_tops->mtm_top_size_rtot[topid] += tp->rsize;
170 
171 	if (tp->rsize > mtm->mtm_tops->mtm_top_size_max[topid])
172 		mtm->mtm_tops->mtm_top_size_max[topid] = tp->rsize;
173 	if (mtm->mtm_tops->mtm_top_size_min[topid] == 0)
174 			mtm->mtm_tops->mtm_top_size_min[topid] =
175 			    tp->rsize;
176 	else
177 		if (tp->rsize < mtm->mtm_tops->mtm_top_size_min[topid])
178 			mtm->mtm_tops->mtm_top_size_min[topid] =
179 			    tp->rsize;
180 
181 	if (ul->un_debug & MT_TRACE)
182 		top_trace(DT_EOT, ul->un_dev, (long long)topid,
183 		    (long)tp->rsize, (long)0);
184 
185 	return (1);
186 }
187 
188 int
189 top_delta_debug(
190 	ml_unit_t *ul,
191 	offset_t mof,
192 	off_t nb,
193 	delta_t dtyp)
194 {
195 	struct threadtrans	*tp;
196 
197 	ASSERT(curthread->t_flag & T_DONTBLOCK);
198 
199 	/*
200 	 * check for delta contained fully within matamap
201 	 */
202 	ASSERT((ul->un_matamap == NULL) ||
203 	    matamap_within(ul->un_matamap, mof, nb));
204 
205 	/*
206 	 * maintain transaction info
207 	 */
208 	if (ul->un_debug & MT_TRANSACT)
209 		ul->un_logmap->mtm_tops->mtm_delta_num[dtyp]++;
210 
211 	/*
212 	 * check transaction stuff
213 	 */
214 	if (ul->un_debug & MT_TRANSACT) {
215 		tp = (struct threadtrans *)tsd_get(topkey);
216 		ASSERT(tp);
217 		switch (dtyp) {
218 		case DT_CANCEL:
219 		case DT_ABZERO:
220 			if (!matamap_within(ul->un_deltamap, mof, nb))
221 				tp->rsize += sizeof (struct delta);
222 			break;
223 		default:
224 			if (!matamap_within(ul->un_deltamap, mof, nb))
225 				tp->rsize += nb + sizeof (struct delta);
226 			break;
227 		}
228 	} else
229 		return (1);
230 
231 	if (ul->un_debug & MT_TRACE)
232 		top_trace(dtyp, ul->un_dev, mof, (long)nb, (long)0);
233 
234 	return (1);
235 }
236 
237 int
238 top_roll_debug(ml_unit_t *ul)
239 {
240 	logmap_roll_dev(ul);
241 	return (1);
242 }
243 
244 int
245 top_init_debug(void)
246 {
247 	mutex_init(&toptracelock, NULL, MUTEX_DEFAULT, NULL);
248 	return (1);
249 }
250 
251 struct topstats_link {
252 	struct topstats_link	*ts_next;
253 	dev_t			ts_dev;
254 	struct topstats		ts_stats;
255 };
256 struct topstats_link *topstats_anchor = NULL;
257 
258 /*
259  * DEBUG ROUTINES
260  *	from debug portion of *_map.c
261  */
262 /*
263  * scan test support
264  */
265 int
266 logmap_logscan_debug(mt_map_t *mtm, mapentry_t *age)
267 {
268 	mapentry_t	*me;
269 	ml_unit_t	*ul;
270 	off_t		head, trimroll, lof;
271 
272 	/*
273 	 * remember location of youngest rolled delta
274 	 */
275 	mutex_enter(&mtm->mtm_mutex);
276 	ul = mtm->mtm_ul;
277 	head = ul->un_head_lof;
278 	trimroll = mtm->mtm_trimrlof;
279 	for (me = age; me; me = me->me_agenext) {
280 		lof = me->me_lof;
281 		if (trimroll == 0)
282 			trimroll = lof;
283 		if (lof >= head) {
284 			if (trimroll >= head && trimroll <= lof)
285 				trimroll = lof;
286 		} else {
287 			if (trimroll <= lof || trimroll >= head)
288 				trimroll = lof;
289 		}
290 	}
291 	mtm->mtm_trimrlof = trimroll;
292 	mutex_exit(&mtm->mtm_mutex);
293 	return (1);
294 }
295 
296 /*
297  * scan test support
298  */
299 int
300 logmap_logscan_commit_debug(off_t lof, mt_map_t *mtm)
301 {
302 	off_t	oldtrimc, newtrimc, trimroll;
303 
304 	trimroll = mtm->mtm_trimrlof;
305 	oldtrimc = mtm->mtm_trimclof;
306 	newtrimc = mtm->mtm_trimclof = dbtob(btod(lof));
307 
308 	/*
309 	 * can't trim prior to transaction w/rolled delta
310 	 */
311 	if (trimroll)
312 		if (newtrimc >= oldtrimc) {
313 			if (trimroll <= newtrimc && trimroll >= oldtrimc)
314 				mtm->mtm_trimalof = newtrimc;
315 		} else {
316 			if (trimroll >= oldtrimc || trimroll <= newtrimc)
317 				mtm->mtm_trimalof = newtrimc;
318 		}
319 	return (1);
320 }
321 
322 int
323 logmap_logscan_add_debug(struct delta *dp, mt_map_t *mtm)
324 {
325 	if ((dp->d_typ == DT_AB) || (dp->d_typ == DT_INODE))
326 		mtm->mtm_trimalof = mtm->mtm_trimclof;
327 	return (1);
328 }
329 
330 /*
331  * log-read after log-write
332  */
333 int
334 map_check_ldl_write(ml_unit_t *ul, caddr_t va, offset_t vamof, mapentry_t *me)
335 {
336 	caddr_t		bufp;
337 
338 	ASSERT(me->me_nb);
339 	ASSERT((me->me_flags & ME_AGE) == 0);
340 
341 	/* Alloc a buf */
342 	bufp = kmem_alloc(me->me_nb, KM_SLEEP);
343 
344 	/* Do the read */
345 	me->me_agenext = NULL;
346 	if (ldl_read(ul, bufp, me->me_mof, me->me_nb, me) == 0) {
347 		ASSERT(bcmp(bufp, va + (me->me_mof - vamof), me->me_nb) == 0);
348 	}
349 
350 	kmem_free(bufp, me->me_nb);
351 	return (1);
352 }
353 
354 /*
355  * Cleanup a map struct
356  */
357 int
358 map_put_debug(mt_map_t *mtm)
359 {
360 	struct topstats_link	*tsl, **ptsl;
361 
362 	if (mtm->mtm_tops == NULL)
363 		return (1);
364 
365 	/* Don't free this, cause the next snarf will want it */
366 	if ((lufs_debug & MT_TRANSACT) != 0)
367 		return (1);
368 
369 	ptsl = &topstats_anchor;
370 	tsl = topstats_anchor;
371 	while (tsl) {
372 		if (mtm->mtm_tops == &tsl->ts_stats) {
373 			mtm->mtm_tops = NULL;
374 			*ptsl = tsl->ts_next;
375 			kmem_free(tsl, sizeof (*tsl));
376 			return (1);
377 		}
378 		ptsl = &tsl->ts_next;
379 		tsl = tsl->ts_next;
380 	}
381 
382 	return (1);
383 }
384 
385 int
386 map_get_debug(ml_unit_t *ul, mt_map_t *mtm)
387 {
388 	struct topstats_link	*tsl;
389 
390 	if ((ul->un_debug & MT_TRANSACT) == 0)
391 		return (1);
392 
393 	if (mtm->mtm_type != logmaptype)
394 		return (1);
395 
396 	tsl = topstats_anchor;
397 	while (tsl) {
398 		if (tsl->ts_dev == ul->un_dev) {
399 			mtm->mtm_tops = &(tsl->ts_stats);
400 			return (1);
401 		}
402 		tsl = tsl->ts_next;
403 	}
404 
405 	tsl = kmem_zalloc(sizeof (*tsl), KM_SLEEP);
406 	tsl->ts_dev = ul->un_dev;
407 	tsl->ts_next = topstats_anchor;
408 	topstats_anchor = tsl;
409 	mtm->mtm_tops = &tsl->ts_stats;
410 	return (1);
411 }
412 
413 /*
414  * check a map's list
415  */
416 int
417 map_check_linkage(mt_map_t *mtm)
418 {
419 	int		i;
420 	int		hashed;
421 	int		nexted;
422 	int		preved;
423 	int		ncancel;
424 	mapentry_t	*me;
425 	off_t		olof;
426 	off_t		firstlof;
427 	int		wrapped;
428 
429 	mutex_enter(&mtm->mtm_mutex);
430 
431 	ASSERT(mtm->mtm_nme >= 0);
432 
433 	/*
434 	 * verify the entries on the hash
435 	 */
436 	hashed = 0;
437 	for (i = 0; i < mtm->mtm_nhash; ++i) {
438 		for (me = *(mtm->mtm_hash+i); me; me = me->me_hash) {
439 			++hashed;
440 			ASSERT(me->me_flags & ME_HASH);
441 			ASSERT((me->me_flags & ME_LIST) == 0);
442 		}
443 	}
444 	ASSERT(hashed >= mtm->mtm_nme);
445 	/*
446 	 * verify the doubly linked list of all entries
447 	 */
448 	nexted = 0;
449 	for (me = mtm->mtm_next; me != (mapentry_t *)mtm; me = me->me_next)
450 		nexted++;
451 	preved = 0;
452 	for (me = mtm->mtm_prev; me != (mapentry_t *)mtm; me = me->me_prev)
453 		preved++;
454 	ASSERT(nexted == preved);
455 	ASSERT(nexted == hashed);
456 
457 	/*
458 	 * verify the cancel list
459 	 */
460 	ncancel = 0;
461 	for (me = mtm->mtm_cancel; me; me = me->me_cancel) {
462 		++ncancel;
463 		ASSERT(me->me_flags & ME_CANCEL);
464 	}
465 	/*
466 	 * verify the logmap's log offsets
467 	 */
468 	if (mtm->mtm_type == logmaptype) {
469 		olof = mtm->mtm_next->me_lof;
470 		firstlof = olof;
471 		wrapped = 0;
472 		/*
473 		 * Make sure to skip any mapentries whose me_lof = 0
474 		 * and me_type == DT_CANCEL, these are mapentries
475 		 * in place just to mark user block deletions as not
476 		 * available for allocate within the same moby transaction
477 		 * in case we crash before it is comitted.  Skip these
478 		 * entries in the checks below as they are not applicable.
479 		 */
480 		for (me = mtm->mtm_next->me_next;
481 		    me != (mapentry_t *)mtm;
482 		    me = me->me_next) {
483 
484 			if (me->me_lof == 0 && me->me_dt == DT_CANCEL)
485 				continue;
486 			if (firstlof == 0) {
487 				olof = me->me_lof;
488 				firstlof = olof;
489 				if (me->me_next != (mapentry_t *)mtm)
490 					me = me->me_next;
491 				continue;
492 			}
493 			ASSERT(me->me_lof != olof);
494 
495 			if (wrapped) {
496 				ASSERT(me->me_lof > olof);
497 				ASSERT(me->me_lof < firstlof);
498 				olof = me->me_lof;
499 				continue;
500 			}
501 			if (me->me_lof < olof) {
502 				ASSERT(me->me_lof < firstlof);
503 				wrapped = 1;
504 				olof = me->me_lof;
505 				continue;
506 			}
507 			ASSERT(me->me_lof > firstlof);
508 			ASSERT(me->me_lof < mtm->mtm_ul->un_eol_lof);
509 			olof = me->me_lof;
510 		}
511 	}
512 
513 	mutex_exit(&mtm->mtm_mutex);
514 	return (1);
515 }
516 
517 /*
518  * check for overlap
519  */
520 int
521 matamap_overlap(mt_map_t *mtm, offset_t mof, off_t nb)
522 {
523 	off_t		hnb;
524 	mapentry_t	*me;
525 	mapentry_t	**mep;
526 
527 	for (hnb = 0; nb; nb -= hnb, mof += hnb) {
528 
529 		hnb = MAPBLOCKSIZE - (mof & MAPBLOCKOFF);
530 		if (hnb > nb)
531 			hnb = nb;
532 		/*
533 		 * search for dup entry
534 		 */
535 		mep = MAP_HASH(mof, mtm);
536 		mutex_enter(&mtm->mtm_mutex);
537 		for (me = *mep; me; me = me->me_hash)
538 			if (DATAoverlapME(mof, hnb, me))
539 				break;
540 		mutex_exit(&mtm->mtm_mutex);
541 
542 		/*
543 		 * overlap detected
544 		 */
545 		if (me)
546 			return (1);
547 	}
548 	return (0);
549 }
550 /*
551  * check for within
552  */
553 int
554 matamap_within(mt_map_t *mtm, offset_t mof, off_t nb)
555 {
556 	off_t		hnb;
557 	mapentry_t	*me;
558 	mapentry_t	**mep;
559 	int		scans	= 0;
560 	int		withins	= 0;
561 
562 	for (hnb = 0; nb && scans == withins; nb -= hnb, mof += hnb) {
563 		scans++;
564 
565 		hnb = MAPBLOCKSIZE - (mof & MAPBLOCKOFF);
566 		if (hnb > nb)
567 			hnb = nb;
568 		/*
569 		 * search for within entry
570 		 */
571 		mep = MAP_HASH(mof, mtm);
572 		mutex_enter(&mtm->mtm_mutex);
573 		for (me = *mep; me; me = me->me_hash)
574 			if (DATAwithinME(mof, hnb, me)) {
575 				withins++;
576 				break;
577 			}
578 		mutex_exit(&mtm->mtm_mutex);
579 	}
580 	return (scans == withins);
581 }
582 
583 int
584 ldl_sethead_debug(ml_unit_t *ul)
585 {
586 	mt_map_t	*mtm	= ul->un_logmap;
587 	off_t		trimr	= mtm->mtm_trimrlof;
588 	off_t		head	= ul->un_head_lof;
589 	off_t		tail	= ul->un_tail_lof;
590 
591 	if (head <= tail) {
592 		if (trimr < head || trimr >= tail)
593 			mtm->mtm_trimrlof = 0;
594 	} else {
595 		if (trimr >= tail && trimr < head)
596 			mtm->mtm_trimrlof = 0;
597 	}
598 	return (1);
599 }
600 
601 int
602 lufs_initialize_debug(ml_odunit_t *ud)
603 {
604 	ud->od_debug = lufs_debug;
605 	return (1);
606 }
607 
608 #endif	/* DEBUG */
609 
610 /*
611  * lufs_debug controls the debug level for TSufs, and is only used
612  * for a debug kernel. It's referenced by ufs_ioctl() and so is
613  * not under #ifdef DEBUG compilation.
614  */
615 uint_t lufs_debug;
616