xref: /linux/fs/xfs/xfs_trans_ail.c (revision 47902f3611b392209e2a412bf7ec02dca95e666d)
1 /*
2  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3  * Copyright (c) 2008 Dave Chinner
4  * All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it would be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write the Free Software Foundation,
17  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18  */
19 #include "xfs.h"
20 #include "xfs_fs.h"
21 #include "xfs_types.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_dmapi.h"
28 #include "xfs_mount.h"
29 #include "xfs_trans_priv.h"
30 #include "xfs_error.h"
31 
32 STATIC void xfs_ail_insert(struct xfs_ail *, xfs_log_item_t *);
33 STATIC xfs_log_item_t * xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *);
34 STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *);
35 STATIC xfs_log_item_t * xfs_ail_next(struct xfs_ail *, xfs_log_item_t *);
36 
37 #ifdef DEBUG
38 STATIC void xfs_ail_check(struct xfs_ail *, xfs_log_item_t *);
39 #else
40 #define	xfs_ail_check(a,l)
41 #endif /* DEBUG */
42 
43 
44 /*
45  * This is called by the log manager code to determine the LSN
46  * of the tail of the log.  This is exactly the LSN of the first
47  * item in the AIL.  If the AIL is empty, then this function
48  * returns 0.
49  *
50  * We need the AIL lock in order to get a coherent read of the
51  * lsn of the last item in the AIL.
52  */
53 xfs_lsn_t
54 xfs_trans_ail_tail(
55 	struct xfs_ail	*ailp)
56 {
57 	xfs_lsn_t	lsn;
58 	xfs_log_item_t	*lip;
59 
60 	spin_lock(&ailp->xa_lock);
61 	lip = xfs_ail_min(ailp);
62 	if (lip == NULL) {
63 		lsn = (xfs_lsn_t)0;
64 	} else {
65 		lsn = lip->li_lsn;
66 	}
67 	spin_unlock(&ailp->xa_lock);
68 
69 	return lsn;
70 }
71 
72 /*
73  * xfs_trans_push_ail
74  *
75  * This routine is called to move the tail of the AIL forward.  It does this by
76  * trying to flush items in the AIL whose lsns are below the given
77  * threshold_lsn.
78  *
79  * the push is run asynchronously in a separate thread, so we return the tail
80  * of the log right now instead of the tail after the push. This means we will
81  * either continue right away, or we will sleep waiting on the async thread to
82  * do its work.
83  *
84  * We do this unlocked - we only need to know whether there is anything in the
85  * AIL at the time we are called. We don't need to access the contents of
86  * any of the objects, so the lock is not needed.
87  */
88 void
89 xfs_trans_ail_push(
90 	struct xfs_ail	*ailp,
91 	xfs_lsn_t	threshold_lsn)
92 {
93 	xfs_log_item_t	*lip;
94 
95 	lip = xfs_ail_min(ailp);
96 	if (lip && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
97 		if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0)
98 			xfsaild_wakeup(ailp, threshold_lsn);
99 	}
100 }
101 
102 /*
103  * AIL traversal cursor initialisation.
104  *
105  * The cursor keeps track of where our current traversal is up
106  * to by tracking the next ƣtem in the list for us. However, for
107  * this to be safe, removing an object from the AIL needs to invalidate
108  * any cursor that points to it. hence the traversal cursor needs to
109  * be linked to the struct xfs_ail so that deletion can search all the
110  * active cursors for invalidation.
111  *
112  * We don't link the push cursor because it is embedded in the struct
113  * xfs_ail and hence easily findable.
114  */
115 STATIC void
116 xfs_trans_ail_cursor_init(
117 	struct xfs_ail		*ailp,
118 	struct xfs_ail_cursor	*cur)
119 {
120 	cur->item = NULL;
121 	if (cur == &ailp->xa_cursors)
122 		return;
123 
124 	cur->next = ailp->xa_cursors.next;
125 	ailp->xa_cursors.next = cur;
126 }
127 
128 /*
129  * Set the cursor to the next item, because when we look
130  * up the cursor the current item may have been freed.
131  */
132 STATIC void
133 xfs_trans_ail_cursor_set(
134 	struct xfs_ail		*ailp,
135 	struct xfs_ail_cursor	*cur,
136 	struct xfs_log_item	*lip)
137 {
138 	if (lip)
139 		cur->item = xfs_ail_next(ailp, lip);
140 }
141 
142 /*
143  * Get the next item in the traversal and advance the cursor.
144  * If the cursor was invalidated (inidicated by a lip of 1),
145  * restart the traversal.
146  */
147 struct xfs_log_item *
148 xfs_trans_ail_cursor_next(
149 	struct xfs_ail		*ailp,
150 	struct xfs_ail_cursor	*cur)
151 {
152 	struct xfs_log_item	*lip = cur->item;
153 
154 	if ((__psint_t)lip & 1)
155 		lip = xfs_ail_min(ailp);
156 	xfs_trans_ail_cursor_set(ailp, cur, lip);
157 	return lip;
158 }
159 
160 /*
161  * Now that the traversal is complete, we need to remove the cursor
162  * from the list of traversing cursors. Avoid removing the embedded
163  * push cursor, but use the fact it is always present to make the
164  * list deletion simple.
165  */
166 void
167 xfs_trans_ail_cursor_done(
168 	struct xfs_ail		*ailp,
169 	struct xfs_ail_cursor	*done)
170 {
171 	struct xfs_ail_cursor	*prev = NULL;
172 	struct xfs_ail_cursor	*cur;
173 
174 	done->item = NULL;
175 	if (done == &ailp->xa_cursors)
176 		return;
177 	prev = &ailp->xa_cursors;
178 	for (cur = prev->next; cur; prev = cur, cur = prev->next) {
179 		if (cur == done) {
180 			prev->next = cur->next;
181 			break;
182 		}
183 	}
184 	ASSERT(cur);
185 }
186 
187 /*
188  * Invalidate any cursor that is pointing to this item. This is
189  * called when an item is removed from the AIL. Any cursor pointing
190  * to this object is now invalid and the traversal needs to be
191  * terminated so it doesn't reference a freed object. We set the
192  * cursor item to a value of 1 so we can distinguish between an
193  * invalidation and the end of the list when getting the next item
194  * from the cursor.
195  */
196 STATIC void
197 xfs_trans_ail_cursor_clear(
198 	struct xfs_ail		*ailp,
199 	struct xfs_log_item	*lip)
200 {
201 	struct xfs_ail_cursor	*cur;
202 
203 	/* need to search all cursors */
204 	for (cur = &ailp->xa_cursors; cur; cur = cur->next) {
205 		if (cur->item == lip)
206 			cur->item = (struct xfs_log_item *)
207 					((__psint_t)cur->item | 1);
208 	}
209 }
210 
211 /*
212  * Return the item in the AIL with the current lsn.
213  * Return the current tree generation number for use
214  * in calls to xfs_trans_next_ail().
215  */
216 xfs_log_item_t *
217 xfs_trans_ail_cursor_first(
218 	struct xfs_ail		*ailp,
219 	struct xfs_ail_cursor	*cur,
220 	xfs_lsn_t		lsn)
221 {
222 	xfs_log_item_t		*lip;
223 
224 	xfs_trans_ail_cursor_init(ailp, cur);
225 	lip = xfs_ail_min(ailp);
226 	if (lsn == 0)
227 		goto out;
228 
229 	list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
230 		if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0)
231 			goto out;
232 	}
233 	lip = NULL;
234 out:
235 	xfs_trans_ail_cursor_set(ailp, cur, lip);
236 	return lip;
237 }
238 
239 /*
240  * xfsaild_push does the work of pushing on the AIL.  Returning a timeout of
241  * zero indicates that the caller should sleep until woken.
242  */
243 long
244 xfsaild_push(
245 	struct xfs_ail	*ailp,
246 	xfs_lsn_t	*last_lsn)
247 {
248 	long		tout = 0;
249 	xfs_lsn_t	last_pushed_lsn = *last_lsn;
250 	xfs_lsn_t	target =  ailp->xa_target;
251 	xfs_lsn_t	lsn;
252 	xfs_log_item_t	*lip;
253 	int		flush_log, count, stuck;
254 	xfs_mount_t	*mp = ailp->xa_mount;
255 	struct xfs_ail_cursor	*cur = &ailp->xa_cursors;
256 	int		push_xfsbufd = 0;
257 
258 	spin_lock(&ailp->xa_lock);
259 	xfs_trans_ail_cursor_init(ailp, cur);
260 	lip = xfs_trans_ail_cursor_first(ailp, cur, *last_lsn);
261 	if (!lip || XFS_FORCED_SHUTDOWN(mp)) {
262 		/*
263 		 * AIL is empty or our push has reached the end.
264 		 */
265 		xfs_trans_ail_cursor_done(ailp, cur);
266 		spin_unlock(&ailp->xa_lock);
267 		*last_lsn = 0;
268 		return tout;
269 	}
270 
271 	XFS_STATS_INC(xs_push_ail);
272 
273 	/*
274 	 * While the item we are looking at is below the given threshold
275 	 * try to flush it out. We'd like not to stop until we've at least
276 	 * tried to push on everything in the AIL with an LSN less than
277 	 * the given threshold.
278 	 *
279 	 * However, we will stop after a certain number of pushes and wait
280 	 * for a reduced timeout to fire before pushing further. This
281 	 * prevents use from spinning when we can't do anything or there is
282 	 * lots of contention on the AIL lists.
283 	 */
284 	lsn = lip->li_lsn;
285 	flush_log = stuck = count = 0;
286 	while ((XFS_LSN_CMP(lip->li_lsn, target) < 0)) {
287 		int	lock_result;
288 		/*
289 		 * If we can lock the item without sleeping, unlock the AIL
290 		 * lock and flush the item.  Then re-grab the AIL lock so we
291 		 * can look for the next item on the AIL. List changes are
292 		 * handled by the AIL lookup functions internally
293 		 *
294 		 * If we can't lock the item, either its holder will flush it
295 		 * or it is already being flushed or it is being relogged.  In
296 		 * any of these case it is being taken care of and we can just
297 		 * skip to the next item in the list.
298 		 */
299 		lock_result = IOP_TRYLOCK(lip);
300 		spin_unlock(&ailp->xa_lock);
301 		switch (lock_result) {
302 		case XFS_ITEM_SUCCESS:
303 			XFS_STATS_INC(xs_push_ail_success);
304 			IOP_PUSH(lip);
305 			last_pushed_lsn = lsn;
306 			break;
307 
308 		case XFS_ITEM_PUSHBUF:
309 			XFS_STATS_INC(xs_push_ail_pushbuf);
310 			IOP_PUSHBUF(lip);
311 			last_pushed_lsn = lsn;
312 			push_xfsbufd = 1;
313 			break;
314 
315 		case XFS_ITEM_PINNED:
316 			XFS_STATS_INC(xs_push_ail_pinned);
317 			stuck++;
318 			flush_log = 1;
319 			break;
320 
321 		case XFS_ITEM_LOCKED:
322 			XFS_STATS_INC(xs_push_ail_locked);
323 			last_pushed_lsn = lsn;
324 			stuck++;
325 			break;
326 
327 		default:
328 			ASSERT(0);
329 			break;
330 		}
331 
332 		spin_lock(&ailp->xa_lock);
333 		/* should we bother continuing? */
334 		if (XFS_FORCED_SHUTDOWN(mp))
335 			break;
336 		ASSERT(mp->m_log);
337 
338 		count++;
339 
340 		/*
341 		 * Are there too many items we can't do anything with?
342 		 * If we we are skipping too many items because we can't flush
343 		 * them or they are already being flushed, we back off and
344 		 * given them time to complete whatever operation is being
345 		 * done. i.e. remove pressure from the AIL while we can't make
346 		 * progress so traversals don't slow down further inserts and
347 		 * removals to/from the AIL.
348 		 *
349 		 * The value of 100 is an arbitrary magic number based on
350 		 * observation.
351 		 */
352 		if (stuck > 100)
353 			break;
354 
355 		lip = xfs_trans_ail_cursor_next(ailp, cur);
356 		if (lip == NULL)
357 			break;
358 		lsn = lip->li_lsn;
359 	}
360 	xfs_trans_ail_cursor_done(ailp, cur);
361 	spin_unlock(&ailp->xa_lock);
362 
363 	if (flush_log) {
364 		/*
365 		 * If something we need to push out was pinned, then
366 		 * push out the log so it will become unpinned and
367 		 * move forward in the AIL.
368 		 */
369 		XFS_STATS_INC(xs_push_ail_flush);
370 		xfs_log_force(mp, 0);
371 	}
372 
373 	if (push_xfsbufd) {
374 		/* we've got delayed write buffers to flush */
375 		wake_up_process(mp->m_ddev_targp->bt_task);
376 	}
377 
378 	if (!count) {
379 		/* We're past our target or empty, so idle */
380 		last_pushed_lsn = 0;
381 	} else if (XFS_LSN_CMP(lsn, target) >= 0) {
382 		/*
383 		 * We reached the target so wait a bit longer for I/O to
384 		 * complete and remove pushed items from the AIL before we
385 		 * start the next scan from the start of the AIL.
386 		 */
387 		tout = 50;
388 		last_pushed_lsn = 0;
389 	} else if ((stuck * 100) / count > 90) {
390 		/*
391 		 * Either there is a lot of contention on the AIL or we
392 		 * are stuck due to operations in progress. "Stuck" in this
393 		 * case is defined as >90% of the items we tried to push
394 		 * were stuck.
395 		 *
396 		 * Backoff a bit more to allow some I/O to complete before
397 		 * continuing from where we were.
398 		 */
399 		tout = 20;
400 	} else {
401 		/* more to do, but wait a short while before continuing */
402 		tout = 10;
403 	}
404 	*last_lsn = last_pushed_lsn;
405 	return tout;
406 }
407 
408 
409 /*
410  * This is to be called when an item is unlocked that may have
411  * been in the AIL.  It will wake up the first member of the AIL
412  * wait list if this item's unlocking might allow it to progress.
413  * If the item is in the AIL, then we need to get the AIL lock
414  * while doing our checking so we don't race with someone going
415  * to sleep waiting for this event in xfs_trans_push_ail().
416  */
417 void
418 xfs_trans_unlocked_item(
419 	struct xfs_ail	*ailp,
420 	xfs_log_item_t	*lip)
421 {
422 	xfs_log_item_t	*min_lip;
423 
424 	/*
425 	 * If we're forcibly shutting down, we may have
426 	 * unlocked log items arbitrarily. The last thing
427 	 * we want to do is to move the tail of the log
428 	 * over some potentially valid data.
429 	 */
430 	if (!(lip->li_flags & XFS_LI_IN_AIL) ||
431 	    XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
432 		return;
433 	}
434 
435 	/*
436 	 * This is the one case where we can call into xfs_ail_min()
437 	 * without holding the AIL lock because we only care about the
438 	 * case where we are at the tail of the AIL.  If the object isn't
439 	 * at the tail, it doesn't matter what result we get back.  This
440 	 * is slightly racy because since we were just unlocked, we could
441 	 * go to sleep between the call to xfs_ail_min and the call to
442 	 * xfs_log_move_tail, have someone else lock us, commit to us disk,
443 	 * move us out of the tail of the AIL, and then we wake up.  However,
444 	 * the call to xfs_log_move_tail() doesn't do anything if there's
445 	 * not enough free space to wake people up so we're safe calling it.
446 	 */
447 	min_lip = xfs_ail_min(ailp);
448 
449 	if (min_lip == lip)
450 		xfs_log_move_tail(ailp->xa_mount, 1);
451 }	/* xfs_trans_unlocked_item */
452 
453 
454 /*
455  * Update the position of the item in the AIL with the new
456  * lsn.  If it is not yet in the AIL, add it.  Otherwise, move
457  * it to its new position by removing it and re-adding it.
458  *
459  * Wakeup anyone with an lsn less than the item's lsn.  If the item
460  * we move in the AIL is the minimum one, update the tail lsn in the
461  * log manager.
462  *
463  * This function must be called with the AIL lock held.  The lock
464  * is dropped before returning.
465  */
466 void
467 xfs_trans_ail_update(
468 	struct xfs_ail	*ailp,
469 	xfs_log_item_t	*lip,
470 	xfs_lsn_t	lsn) __releases(ailp->xa_lock)
471 {
472 	xfs_log_item_t		*dlip = NULL;
473 	xfs_log_item_t		*mlip;	/* ptr to minimum lip */
474 	xfs_lsn_t		tail_lsn;
475 
476 	mlip = xfs_ail_min(ailp);
477 
478 	if (lip->li_flags & XFS_LI_IN_AIL) {
479 		dlip = xfs_ail_delete(ailp, lip);
480 		ASSERT(dlip == lip);
481 		xfs_trans_ail_cursor_clear(ailp, dlip);
482 	} else {
483 		lip->li_flags |= XFS_LI_IN_AIL;
484 	}
485 
486 	lip->li_lsn = lsn;
487 	xfs_ail_insert(ailp, lip);
488 
489 	if (mlip == dlip) {
490 		mlip = xfs_ail_min(ailp);
491 		/*
492 		 * It is not safe to access mlip after the AIL lock is
493 		 * dropped, so we must get a copy of li_lsn before we do
494 		 * so.  This is especially important on 32-bit platforms
495 		 * where accessing and updating 64-bit values like li_lsn
496 		 * is not atomic.
497 		 */
498 		tail_lsn = mlip->li_lsn;
499 		spin_unlock(&ailp->xa_lock);
500 		xfs_log_move_tail(ailp->xa_mount, tail_lsn);
501 	} else {
502 		spin_unlock(&ailp->xa_lock);
503 	}
504 
505 
506 }	/* xfs_trans_update_ail */
507 
508 /*
509  * Delete the given item from the AIL.  It must already be in
510  * the AIL.
511  *
512  * Wakeup anyone with an lsn less than item's lsn.    If the item
513  * we delete in the AIL is the minimum one, update the tail lsn in the
514  * log manager.
515  *
516  * Clear the IN_AIL flag from the item, reset its lsn to 0, and
517  * bump the AIL's generation count to indicate that the tree
518  * has changed.
519  *
520  * This function must be called with the AIL lock held.  The lock
521  * is dropped before returning.
522  */
523 void
524 xfs_trans_ail_delete(
525 	struct xfs_ail	*ailp,
526 	xfs_log_item_t	*lip) __releases(ailp->xa_lock)
527 {
528 	xfs_log_item_t		*dlip;
529 	xfs_log_item_t		*mlip;
530 	xfs_lsn_t		tail_lsn;
531 
532 	if (lip->li_flags & XFS_LI_IN_AIL) {
533 		mlip = xfs_ail_min(ailp);
534 		dlip = xfs_ail_delete(ailp, lip);
535 		ASSERT(dlip == lip);
536 		xfs_trans_ail_cursor_clear(ailp, dlip);
537 
538 
539 		lip->li_flags &= ~XFS_LI_IN_AIL;
540 		lip->li_lsn = 0;
541 
542 		if (mlip == dlip) {
543 			mlip = xfs_ail_min(ailp);
544 			/*
545 			 * It is not safe to access mlip after the AIL lock
546 			 * is dropped, so we must get a copy of li_lsn
547 			 * before we do so.  This is especially important
548 			 * on 32-bit platforms where accessing and updating
549 			 * 64-bit values like li_lsn is not atomic.
550 			 */
551 			tail_lsn = mlip ? mlip->li_lsn : 0;
552 			spin_unlock(&ailp->xa_lock);
553 			xfs_log_move_tail(ailp->xa_mount, tail_lsn);
554 		} else {
555 			spin_unlock(&ailp->xa_lock);
556 		}
557 	}
558 	else {
559 		/*
560 		 * If the file system is not being shutdown, we are in
561 		 * serious trouble if we get to this stage.
562 		 */
563 		struct xfs_mount	*mp = ailp->xa_mount;
564 
565 		spin_unlock(&ailp->xa_lock);
566 		if (!XFS_FORCED_SHUTDOWN(mp)) {
567 			xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp,
568 		"%s: attempting to delete a log item that is not in the AIL",
569 					__func__);
570 			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
571 		}
572 	}
573 }
574 
575 
576 
577 /*
578  * The active item list (AIL) is a doubly linked list of log
579  * items sorted by ascending lsn.  The base of the list is
580  * a forw/back pointer pair embedded in the xfs mount structure.
581  * The base is initialized with both pointers pointing to the
582  * base.  This case always needs to be distinguished, because
583  * the base has no lsn to look at.  We almost always insert
584  * at the end of the list, so on inserts we search from the
585  * end of the list to find where the new item belongs.
586  */
587 
588 /*
589  * Initialize the doubly linked list to point only to itself.
590  */
591 int
592 xfs_trans_ail_init(
593 	xfs_mount_t	*mp)
594 {
595 	struct xfs_ail	*ailp;
596 	int		error;
597 
598 	ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL);
599 	if (!ailp)
600 		return ENOMEM;
601 
602 	ailp->xa_mount = mp;
603 	INIT_LIST_HEAD(&ailp->xa_ail);
604 	spin_lock_init(&ailp->xa_lock);
605 	error = xfsaild_start(ailp);
606 	if (error)
607 		goto out_free_ailp;
608 	mp->m_ail = ailp;
609 	return 0;
610 
611 out_free_ailp:
612 	kmem_free(ailp);
613 	return error;
614 }
615 
616 void
617 xfs_trans_ail_destroy(
618 	xfs_mount_t	*mp)
619 {
620 	struct xfs_ail	*ailp = mp->m_ail;
621 
622 	xfsaild_stop(ailp);
623 	kmem_free(ailp);
624 }
625 
626 /*
627  * Insert the given log item into the AIL.
628  * We almost always insert at the end of the list, so on inserts
629  * we search from the end of the list to find where the
630  * new item belongs.
631  */
632 STATIC void
633 xfs_ail_insert(
634 	struct xfs_ail	*ailp,
635 	xfs_log_item_t	*lip)
636 /* ARGSUSED */
637 {
638 	xfs_log_item_t	*next_lip;
639 
640 	/*
641 	 * If the list is empty, just insert the item.
642 	 */
643 	if (list_empty(&ailp->xa_ail)) {
644 		list_add(&lip->li_ail, &ailp->xa_ail);
645 		return;
646 	}
647 
648 	list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
649 		if (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0)
650 			break;
651 	}
652 
653 	ASSERT((&next_lip->li_ail == &ailp->xa_ail) ||
654 	       (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0));
655 
656 	list_add(&lip->li_ail, &next_lip->li_ail);
657 
658 	xfs_ail_check(ailp, lip);
659 	return;
660 }
661 
662 /*
663  * Delete the given item from the AIL.  Return a pointer to the item.
664  */
665 /*ARGSUSED*/
666 STATIC xfs_log_item_t *
667 xfs_ail_delete(
668 	struct xfs_ail	*ailp,
669 	xfs_log_item_t	*lip)
670 /* ARGSUSED */
671 {
672 	xfs_ail_check(ailp, lip);
673 
674 	list_del(&lip->li_ail);
675 
676 	return lip;
677 }
678 
679 /*
680  * Return a pointer to the first item in the AIL.
681  * If the AIL is empty, then return NULL.
682  */
683 STATIC xfs_log_item_t *
684 xfs_ail_min(
685 	struct xfs_ail	*ailp)
686 /* ARGSUSED */
687 {
688 	if (list_empty(&ailp->xa_ail))
689 		return NULL;
690 
691 	return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
692 }
693 
694 /*
695  * Return a pointer to the item which follows
696  * the given item in the AIL.  If the given item
697  * is the last item in the list, then return NULL.
698  */
699 STATIC xfs_log_item_t *
700 xfs_ail_next(
701 	struct xfs_ail	*ailp,
702 	xfs_log_item_t	*lip)
703 /* ARGSUSED */
704 {
705 	if (lip->li_ail.next == &ailp->xa_ail)
706 		return NULL;
707 
708 	return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
709 }
710 
711 #ifdef DEBUG
712 /*
713  * Check that the list is sorted as it should be.
714  */
715 STATIC void
716 xfs_ail_check(
717 	struct xfs_ail	*ailp,
718 	xfs_log_item_t	*lip)
719 {
720 	xfs_log_item_t	*prev_lip;
721 
722 	if (list_empty(&ailp->xa_ail))
723 		return;
724 
725 	/*
726 	 * Check the next and previous entries are valid.
727 	 */
728 	ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
729 	prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
730 	if (&prev_lip->li_ail != &ailp->xa_ail)
731 		ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
732 
733 	prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
734 	if (&prev_lip->li_ail != &ailp->xa_ail)
735 		ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
736 
737 
738 #ifdef XFS_TRANS_DEBUG
739 	/*
740 	 * Walk the list checking lsn ordering, and that every entry has the
741 	 * XFS_LI_IN_AIL flag set. This is really expensive, so only do it
742 	 * when specifically debugging the transaction subsystem.
743 	 */
744 	prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
745 	list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
746 		if (&prev_lip->li_ail != &ailp->xa_ail)
747 			ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
748 		ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
749 		prev_lip = lip;
750 	}
751 #endif /* XFS_TRANS_DEBUG */
752 }
753 #endif /* DEBUG */
754