xref: /linux/fs/jfs/jfs_txnmgr.c (revision 13abf8130139c2ccd4962a7e5a8902be5e6cb5a7)
1 /*
2  *   Copyright (C) International Business Machines Corp., 2000-2005
3  *   Portions Copyright (C) Christoph Hellwig, 2001-2002
4  *
5  *   This program is free software;  you can redistribute it and/or modify
6  *   it under the terms of the GNU General Public License as published by
7  *   the Free Software Foundation; either version 2 of the License, or
8  *   (at your option) any later version.
9  *
10  *   This program is distributed in the hope that it will be useful,
11  *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
12  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
13  *   the GNU General Public License for more details.
14  *
15  *   You should have received a copy of the GNU General Public License
16  *   along with this program;  if not, write to the Free Software
17  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18  */
19 
20 /*
21  *      jfs_txnmgr.c: transaction manager
22  *
23  * notes:
24  * transaction starts with txBegin() and ends with txCommit()
25  * or txAbort().
26  *
27  * tlock is acquired at the time of update;
28  * (obviate scan at commit time for xtree and dtree)
29  * tlock and mp points to each other;
30  * (no hashlist for mp -> tlock).
31  *
32  * special cases:
33  * tlock on in-memory inode:
34  * in-place tlock in the in-memory inode itself;
35  * converted to page lock by iWrite() at commit time.
36  *
37  * tlock during write()/mmap() under anonymous transaction (tid = 0):
38  * transferred (?) to transaction at commit time.
39  *
40  * use the page itself to update allocation maps
41  * (obviate intermediate replication of allocation/deallocation data)
42  * hold on to mp+lock thru update of maps
43  */
44 
45 #include <linux/fs.h>
46 #include <linux/vmalloc.h>
47 #include <linux/smp_lock.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/module.h>
51 #include <linux/moduleparam.h>
52 #include "jfs_incore.h"
53 #include "jfs_inode.h"
54 #include "jfs_filsys.h"
55 #include "jfs_metapage.h"
56 #include "jfs_dinode.h"
57 #include "jfs_imap.h"
58 #include "jfs_dmap.h"
59 #include "jfs_superblock.h"
60 #include "jfs_debug.h"
61 
62 /*
63  *      transaction management structures
64  */
65 static struct {
66 	int freetid;		/* index of a free tid structure */
67 	int freelock;		/* index first free lock word */
68 	wait_queue_head_t freewait;	/* eventlist of free tblock */
69 	wait_queue_head_t freelockwait;	/* eventlist of free tlock */
70 	wait_queue_head_t lowlockwait;	/* eventlist of ample tlocks */
71 	int tlocksInUse;	/* Number of tlocks in use */
72 	spinlock_t LazyLock;	/* synchronize sync_queue & unlock_queue */
73 /*	struct tblock *sync_queue; * Transactions waiting for data sync */
74 	struct list_head unlock_queue;	/* Txns waiting to be released */
75 	struct list_head anon_list;	/* inodes having anonymous txns */
76 	struct list_head anon_list2;	/* inodes having anonymous txns
77 					   that couldn't be sync'ed */
78 } TxAnchor;
79 
80 int jfs_tlocks_low;		/* Indicates low number of available tlocks */
81 
82 #ifdef CONFIG_JFS_STATISTICS
83 static struct {
84 	uint txBegin;
85 	uint txBegin_barrier;
86 	uint txBegin_lockslow;
87 	uint txBegin_freetid;
88 	uint txBeginAnon;
89 	uint txBeginAnon_barrier;
90 	uint txBeginAnon_lockslow;
91 	uint txLockAlloc;
92 	uint txLockAlloc_freelock;
93 } TxStat;
94 #endif
95 
96 static int nTxBlock = -1;	/* number of transaction blocks */
97 module_param(nTxBlock, int, 0);
98 MODULE_PARM_DESC(nTxBlock,
99 		 "Number of transaction blocks (max:65536)");
100 
101 static int nTxLock = -1;	/* number of transaction locks */
102 module_param(nTxLock, int, 0);
103 MODULE_PARM_DESC(nTxLock,
104 		 "Number of transaction locks (max:65536)");
105 
106 struct tblock *TxBlock;	        /* transaction block table */
107 static int TxLockLWM;		/* Low water mark for number of txLocks used */
108 static int TxLockHWM;		/* High water mark for number of txLocks used */
109 static int TxLockVHWM;		/* Very High water mark */
110 struct tlock *TxLock;           /* transaction lock table */
111 
112 /*
113  *      transaction management lock
114  */
115 static DEFINE_SPINLOCK(jfsTxnLock);
116 
117 #define TXN_LOCK()              spin_lock(&jfsTxnLock)
118 #define TXN_UNLOCK()            spin_unlock(&jfsTxnLock)
119 
120 #define LAZY_LOCK_INIT()	spin_lock_init(&TxAnchor.LazyLock);
121 #define LAZY_LOCK(flags)	spin_lock_irqsave(&TxAnchor.LazyLock, flags)
122 #define LAZY_UNLOCK(flags) spin_unlock_irqrestore(&TxAnchor.LazyLock, flags)
123 
124 DECLARE_WAIT_QUEUE_HEAD(jfs_sync_thread_wait);
125 DECLARE_WAIT_QUEUE_HEAD(jfs_commit_thread_wait);
126 static int jfs_commit_thread_waking;
127 
128 /*
129  * Retry logic exist outside these macros to protect from spurrious wakeups.
130  */
131 static inline void TXN_SLEEP_DROP_LOCK(wait_queue_head_t * event)
132 {
133 	DECLARE_WAITQUEUE(wait, current);
134 
135 	add_wait_queue(event, &wait);
136 	set_current_state(TASK_UNINTERRUPTIBLE);
137 	TXN_UNLOCK();
138 	schedule();
139 	current->state = TASK_RUNNING;
140 	remove_wait_queue(event, &wait);
141 }
142 
143 #define TXN_SLEEP(event)\
144 {\
145 	TXN_SLEEP_DROP_LOCK(event);\
146 	TXN_LOCK();\
147 }
148 
149 #define TXN_WAKEUP(event) wake_up_all(event)
150 
151 /*
152  *      statistics
153  */
154 static struct {
155 	tid_t maxtid;		/* 4: biggest tid ever used */
156 	lid_t maxlid;		/* 4: biggest lid ever used */
157 	int ntid;		/* 4: # of transactions performed */
158 	int nlid;		/* 4: # of tlocks acquired */
159 	int waitlock;		/* 4: # of tlock wait */
160 } stattx;
161 
162 /*
163  * forward references
164  */
165 static int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
166 		struct tlock * tlck, struct commit * cd);
167 static int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
168 		struct tlock * tlck);
169 static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
170 		struct tlock * tlck);
171 static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
172 		struct tlock * tlck);
173 static void txAllocPMap(struct inode *ip, struct maplock * maplock,
174 		struct tblock * tblk);
175 static void txForce(struct tblock * tblk);
176 static int txLog(struct jfs_log * log, struct tblock * tblk,
177 		struct commit * cd);
178 static void txUpdateMap(struct tblock * tblk);
179 static void txRelease(struct tblock * tblk);
180 static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
181 	   struct tlock * tlck);
182 static void LogSyncRelease(struct metapage * mp);
183 
184 /*
185  *              transaction block/lock management
186  *              ---------------------------------
187  */
188 
189 /*
190  * Get a transaction lock from the free list.  If the number in use is
191  * greater than the high water mark, wake up the sync daemon.  This should
192  * free some anonymous transaction locks.  (TXN_LOCK must be held.)
193  */
194 static lid_t txLockAlloc(void)
195 {
196 	lid_t lid;
197 
198 	INCREMENT(TxStat.txLockAlloc);
199 	if (!TxAnchor.freelock) {
200 		INCREMENT(TxStat.txLockAlloc_freelock);
201 	}
202 
203 	while (!(lid = TxAnchor.freelock))
204 		TXN_SLEEP(&TxAnchor.freelockwait);
205 	TxAnchor.freelock = TxLock[lid].next;
206 	HIGHWATERMARK(stattx.maxlid, lid);
207 	if ((++TxAnchor.tlocksInUse > TxLockHWM) && (jfs_tlocks_low == 0)) {
208 		jfs_info("txLockAlloc tlocks low");
209 		jfs_tlocks_low = 1;
210 		wake_up(&jfs_sync_thread_wait);
211 	}
212 
213 	return lid;
214 }
215 
216 static void txLockFree(lid_t lid)
217 {
218 	TxLock[lid].tid = 0;
219 	TxLock[lid].next = TxAnchor.freelock;
220 	TxAnchor.freelock = lid;
221 	TxAnchor.tlocksInUse--;
222 	if (jfs_tlocks_low && (TxAnchor.tlocksInUse < TxLockLWM)) {
223 		jfs_info("txLockFree jfs_tlocks_low no more");
224 		jfs_tlocks_low = 0;
225 		TXN_WAKEUP(&TxAnchor.lowlockwait);
226 	}
227 	TXN_WAKEUP(&TxAnchor.freelockwait);
228 }
229 
230 /*
231  * NAME:        txInit()
232  *
233  * FUNCTION:    initialize transaction management structures
234  *
235  * RETURN:
236  *
237  * serialization: single thread at jfs_init()
238  */
239 int txInit(void)
240 {
241 	int k, size;
242 	struct sysinfo si;
243 
244 	/* Set defaults for nTxLock and nTxBlock if unset */
245 
246 	if (nTxLock == -1) {
247 		if (nTxBlock == -1) {
248 			/* Base default on memory size */
249 			si_meminfo(&si);
250 			if (si.totalram > (256 * 1024)) /* 1 GB */
251 				nTxLock = 64 * 1024;
252 			else
253 				nTxLock = si.totalram >> 2;
254 		} else if (nTxBlock > (8 * 1024))
255 			nTxLock = 64 * 1024;
256 		else
257 			nTxLock = nTxBlock << 3;
258 	}
259 	if (nTxBlock == -1)
260 		nTxBlock = nTxLock >> 3;
261 
262 	/* Verify tunable parameters */
263 	if (nTxBlock < 16)
264 		nTxBlock = 16;	/* No one should set it this low */
265 	if (nTxBlock > 65536)
266 		nTxBlock = 65536;
267 	if (nTxLock < 256)
268 		nTxLock = 256;	/* No one should set it this low */
269 	if (nTxLock > 65536)
270 		nTxLock = 65536;
271 
272 	printk(KERN_INFO "JFS: nTxBlock = %d, nTxLock = %d\n",
273 	       nTxBlock, nTxLock);
274 	/*
275 	 * initialize transaction block (tblock) table
276 	 *
277 	 * transaction id (tid) = tblock index
278 	 * tid = 0 is reserved.
279 	 */
280 	TxLockLWM = (nTxLock * 4) / 10;
281 	TxLockHWM = (nTxLock * 7) / 10;
282 	TxLockVHWM = (nTxLock * 8) / 10;
283 
284 	size = sizeof(struct tblock) * nTxBlock;
285 	TxBlock = (struct tblock *) vmalloc(size);
286 	if (TxBlock == NULL)
287 		return -ENOMEM;
288 
289 	for (k = 1; k < nTxBlock - 1; k++) {
290 		TxBlock[k].next = k + 1;
291 		init_waitqueue_head(&TxBlock[k].gcwait);
292 		init_waitqueue_head(&TxBlock[k].waitor);
293 	}
294 	TxBlock[k].next = 0;
295 	init_waitqueue_head(&TxBlock[k].gcwait);
296 	init_waitqueue_head(&TxBlock[k].waitor);
297 
298 	TxAnchor.freetid = 1;
299 	init_waitqueue_head(&TxAnchor.freewait);
300 
301 	stattx.maxtid = 1;	/* statistics */
302 
303 	/*
304 	 * initialize transaction lock (tlock) table
305 	 *
306 	 * transaction lock id = tlock index
307 	 * tlock id = 0 is reserved.
308 	 */
309 	size = sizeof(struct tlock) * nTxLock;
310 	TxLock = (struct tlock *) vmalloc(size);
311 	if (TxLock == NULL) {
312 		vfree(TxBlock);
313 		return -ENOMEM;
314 	}
315 
316 	/* initialize tlock table */
317 	for (k = 1; k < nTxLock - 1; k++)
318 		TxLock[k].next = k + 1;
319 	TxLock[k].next = 0;
320 	init_waitqueue_head(&TxAnchor.freelockwait);
321 	init_waitqueue_head(&TxAnchor.lowlockwait);
322 
323 	TxAnchor.freelock = 1;
324 	TxAnchor.tlocksInUse = 0;
325 	INIT_LIST_HEAD(&TxAnchor.anon_list);
326 	INIT_LIST_HEAD(&TxAnchor.anon_list2);
327 
328 	LAZY_LOCK_INIT();
329 	INIT_LIST_HEAD(&TxAnchor.unlock_queue);
330 
331 	stattx.maxlid = 1;	/* statistics */
332 
333 	return 0;
334 }
335 
336 /*
337  * NAME:        txExit()
338  *
339  * FUNCTION:    clean up when module is unloaded
340  */
341 void txExit(void)
342 {
343 	vfree(TxLock);
344 	TxLock = NULL;
345 	vfree(TxBlock);
346 	TxBlock = NULL;
347 }
348 
349 /*
350  * NAME:        txBegin()
351  *
352  * FUNCTION:    start a transaction.
353  *
354  * PARAMETER:   sb	- superblock
355  *              flag	- force for nested tx;
356  *
357  * RETURN:	tid	- transaction id
358  *
359  * note: flag force allows to start tx for nested tx
360  * to prevent deadlock on logsync barrier;
361  */
362 tid_t txBegin(struct super_block *sb, int flag)
363 {
364 	tid_t t;
365 	struct tblock *tblk;
366 	struct jfs_log *log;
367 
368 	jfs_info("txBegin: flag = 0x%x", flag);
369 	log = JFS_SBI(sb)->log;
370 
371 	TXN_LOCK();
372 
373 	INCREMENT(TxStat.txBegin);
374 
375       retry:
376 	if (!(flag & COMMIT_FORCE)) {
377 		/*
378 		 * synchronize with logsync barrier
379 		 */
380 		if (test_bit(log_SYNCBARRIER, &log->flag) ||
381 		    test_bit(log_QUIESCE, &log->flag)) {
382 			INCREMENT(TxStat.txBegin_barrier);
383 			TXN_SLEEP(&log->syncwait);
384 			goto retry;
385 		}
386 	}
387 	if (flag == 0) {
388 		/*
389 		 * Don't begin transaction if we're getting starved for tlocks
390 		 * unless COMMIT_FORCE or COMMIT_INODE (which may ultimately
391 		 * free tlocks)
392 		 */
393 		if (TxAnchor.tlocksInUse > TxLockVHWM) {
394 			INCREMENT(TxStat.txBegin_lockslow);
395 			TXN_SLEEP(&TxAnchor.lowlockwait);
396 			goto retry;
397 		}
398 	}
399 
400 	/*
401 	 * allocate transaction id/block
402 	 */
403 	if ((t = TxAnchor.freetid) == 0) {
404 		jfs_info("txBegin: waiting for free tid");
405 		INCREMENT(TxStat.txBegin_freetid);
406 		TXN_SLEEP(&TxAnchor.freewait);
407 		goto retry;
408 	}
409 
410 	tblk = tid_to_tblock(t);
411 
412 	if ((tblk->next == 0) && !(flag & COMMIT_FORCE)) {
413 		/* Don't let a non-forced transaction take the last tblk */
414 		jfs_info("txBegin: waiting for free tid");
415 		INCREMENT(TxStat.txBegin_freetid);
416 		TXN_SLEEP(&TxAnchor.freewait);
417 		goto retry;
418 	}
419 
420 	TxAnchor.freetid = tblk->next;
421 
422 	/*
423 	 * initialize transaction
424 	 */
425 
426 	/*
427 	 * We can't zero the whole thing or we screw up another thread being
428 	 * awakened after sleeping on tblk->waitor
429 	 *
430 	 * memset(tblk, 0, sizeof(struct tblock));
431 	 */
432 	tblk->next = tblk->last = tblk->xflag = tblk->flag = tblk->lsn = 0;
433 
434 	tblk->sb = sb;
435 	++log->logtid;
436 	tblk->logtid = log->logtid;
437 
438 	++log->active;
439 
440 	HIGHWATERMARK(stattx.maxtid, t);	/* statistics */
441 	INCREMENT(stattx.ntid);	/* statistics */
442 
443 	TXN_UNLOCK();
444 
445 	jfs_info("txBegin: returning tid = %d", t);
446 
447 	return t;
448 }
449 
450 /*
451  * NAME:        txBeginAnon()
452  *
453  * FUNCTION:    start an anonymous transaction.
454  *		Blocks if logsync or available tlocks are low to prevent
455  *		anonymous tlocks from depleting supply.
456  *
457  * PARAMETER:   sb	- superblock
458  *
459  * RETURN:	none
460  */
461 void txBeginAnon(struct super_block *sb)
462 {
463 	struct jfs_log *log;
464 
465 	log = JFS_SBI(sb)->log;
466 
467 	TXN_LOCK();
468 	INCREMENT(TxStat.txBeginAnon);
469 
470       retry:
471 	/*
472 	 * synchronize with logsync barrier
473 	 */
474 	if (test_bit(log_SYNCBARRIER, &log->flag) ||
475 	    test_bit(log_QUIESCE, &log->flag)) {
476 		INCREMENT(TxStat.txBeginAnon_barrier);
477 		TXN_SLEEP(&log->syncwait);
478 		goto retry;
479 	}
480 
481 	/*
482 	 * Don't begin transaction if we're getting starved for tlocks
483 	 */
484 	if (TxAnchor.tlocksInUse > TxLockVHWM) {
485 		INCREMENT(TxStat.txBeginAnon_lockslow);
486 		TXN_SLEEP(&TxAnchor.lowlockwait);
487 		goto retry;
488 	}
489 	TXN_UNLOCK();
490 }
491 
492 /*
493  *      txEnd()
494  *
495  * function: free specified transaction block.
496  *
497  *      logsync barrier processing:
498  *
499  * serialization:
500  */
501 void txEnd(tid_t tid)
502 {
503 	struct tblock *tblk = tid_to_tblock(tid);
504 	struct jfs_log *log;
505 
506 	jfs_info("txEnd: tid = %d", tid);
507 	TXN_LOCK();
508 
509 	/*
510 	 * wakeup transactions waiting on the page locked
511 	 * by the current transaction
512 	 */
513 	TXN_WAKEUP(&tblk->waitor);
514 
515 	log = JFS_SBI(tblk->sb)->log;
516 
517 	/*
518 	 * Lazy commit thread can't free this guy until we mark it UNLOCKED,
519 	 * otherwise, we would be left with a transaction that may have been
520 	 * reused.
521 	 *
522 	 * Lazy commit thread will turn off tblkGC_LAZY before calling this
523 	 * routine.
524 	 */
525 	if (tblk->flag & tblkGC_LAZY) {
526 		jfs_info("txEnd called w/lazy tid: %d, tblk = 0x%p", tid, tblk);
527 		TXN_UNLOCK();
528 
529 		spin_lock_irq(&log->gclock);	// LOGGC_LOCK
530 		tblk->flag |= tblkGC_UNLOCKED;
531 		spin_unlock_irq(&log->gclock);	// LOGGC_UNLOCK
532 		return;
533 	}
534 
535 	jfs_info("txEnd: tid: %d, tblk = 0x%p", tid, tblk);
536 
537 	assert(tblk->next == 0);
538 
539 	/*
540 	 * insert tblock back on freelist
541 	 */
542 	tblk->next = TxAnchor.freetid;
543 	TxAnchor.freetid = tid;
544 
545 	/*
546 	 * mark the tblock not active
547 	 */
548 	if (--log->active == 0) {
549 		clear_bit(log_FLUSH, &log->flag);
550 
551 		/*
552 		 * synchronize with logsync barrier
553 		 */
554 		if (test_bit(log_SYNCBARRIER, &log->flag)) {
555 			TXN_UNLOCK();
556 
557 			/* write dirty metadata & forward log syncpt */
558 			jfs_syncpt(log, 1);
559 
560 			jfs_info("log barrier off: 0x%x", log->lsn);
561 
562 			/* enable new transactions start */
563 			clear_bit(log_SYNCBARRIER, &log->flag);
564 
565 			/* wakeup all waitors for logsync barrier */
566 			TXN_WAKEUP(&log->syncwait);
567 
568 			goto wakeup;
569 		}
570 	}
571 
572 	TXN_UNLOCK();
573 wakeup:
574 	/*
575 	 * wakeup all waitors for a free tblock
576 	 */
577 	TXN_WAKEUP(&TxAnchor.freewait);
578 }
579 
580 /*
581  *      txLock()
582  *
583  * function: acquire a transaction lock on the specified <mp>
584  *
585  * parameter:
586  *
587  * return:      transaction lock id
588  *
589  * serialization:
590  */
591 struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp,
592 		     int type)
593 {
594 	struct jfs_inode_info *jfs_ip = JFS_IP(ip);
595 	int dir_xtree = 0;
596 	lid_t lid;
597 	tid_t xtid;
598 	struct tlock *tlck;
599 	struct xtlock *xtlck;
600 	struct linelock *linelock;
601 	xtpage_t *p;
602 	struct tblock *tblk;
603 
604 	TXN_LOCK();
605 
606 	if (S_ISDIR(ip->i_mode) && (type & tlckXTREE) &&
607 	    !(mp->xflag & COMMIT_PAGE)) {
608 		/*
609 		 * Directory inode is special.  It can have both an xtree tlock
610 		 * and a dtree tlock associated with it.
611 		 */
612 		dir_xtree = 1;
613 		lid = jfs_ip->xtlid;
614 	} else
615 		lid = mp->lid;
616 
617 	/* is page not locked by a transaction ? */
618 	if (lid == 0)
619 		goto allocateLock;
620 
621 	jfs_info("txLock: tid:%d ip:0x%p mp:0x%p lid:%d", tid, ip, mp, lid);
622 
623 	/* is page locked by the requester transaction ? */
624 	tlck = lid_to_tlock(lid);
625 	if ((xtid = tlck->tid) == tid) {
626 		TXN_UNLOCK();
627 		goto grantLock;
628 	}
629 
630 	/*
631 	 * is page locked by anonymous transaction/lock ?
632 	 *
633 	 * (page update without transaction (i.e., file write) is
634 	 * locked under anonymous transaction tid = 0:
635 	 * anonymous tlocks maintained on anonymous tlock list of
636 	 * the inode of the page and available to all anonymous
637 	 * transactions until txCommit() time at which point
638 	 * they are transferred to the transaction tlock list of
639 	 * the commiting transaction of the inode)
640 	 */
641 	if (xtid == 0) {
642 		tlck->tid = tid;
643 		TXN_UNLOCK();
644 		tblk = tid_to_tblock(tid);
645 		/*
646 		 * The order of the tlocks in the transaction is important
647 		 * (during truncate, child xtree pages must be freed before
648 		 * parent's tlocks change the working map).
649 		 * Take tlock off anonymous list and add to tail of
650 		 * transaction list
651 		 *
652 		 * Note:  We really need to get rid of the tid & lid and
653 		 * use list_head's.  This code is getting UGLY!
654 		 */
655 		if (jfs_ip->atlhead == lid) {
656 			if (jfs_ip->atltail == lid) {
657 				/* only anonymous txn.
658 				 * Remove from anon_list
659 				 */
660 				TXN_LOCK();
661 				list_del_init(&jfs_ip->anon_inode_list);
662 				TXN_UNLOCK();
663 			}
664 			jfs_ip->atlhead = tlck->next;
665 		} else {
666 			lid_t last;
667 			for (last = jfs_ip->atlhead;
668 			     lid_to_tlock(last)->next != lid;
669 			     last = lid_to_tlock(last)->next) {
670 				assert(last);
671 			}
672 			lid_to_tlock(last)->next = tlck->next;
673 			if (jfs_ip->atltail == lid)
674 				jfs_ip->atltail = last;
675 		}
676 
677 		/* insert the tlock at tail of transaction tlock list */
678 
679 		if (tblk->next)
680 			lid_to_tlock(tblk->last)->next = lid;
681 		else
682 			tblk->next = lid;
683 		tlck->next = 0;
684 		tblk->last = lid;
685 
686 		goto grantLock;
687 	}
688 
689 	goto waitLock;
690 
691 	/*
692 	 * allocate a tlock
693 	 */
694       allocateLock:
695 	lid = txLockAlloc();
696 	tlck = lid_to_tlock(lid);
697 
698 	/*
699 	 * initialize tlock
700 	 */
701 	tlck->tid = tid;
702 
703 	TXN_UNLOCK();
704 
705 	/* mark tlock for meta-data page */
706 	if (mp->xflag & COMMIT_PAGE) {
707 
708 		tlck->flag = tlckPAGELOCK;
709 
710 		/* mark the page dirty and nohomeok */
711 		metapage_nohomeok(mp);
712 
713 		jfs_info("locking mp = 0x%p, nohomeok = %d tid = %d tlck = 0x%p",
714 			 mp, mp->nohomeok, tid, tlck);
715 
716 		/* if anonymous transaction, and buffer is on the group
717 		 * commit synclist, mark inode to show this.  This will
718 		 * prevent the buffer from being marked nohomeok for too
719 		 * long a time.
720 		 */
721 		if ((tid == 0) && mp->lsn)
722 			set_cflag(COMMIT_Synclist, ip);
723 	}
724 	/* mark tlock for in-memory inode */
725 	else
726 		tlck->flag = tlckINODELOCK;
727 
728 	tlck->type = 0;
729 
730 	/* bind the tlock and the page */
731 	tlck->ip = ip;
732 	tlck->mp = mp;
733 	if (dir_xtree)
734 		jfs_ip->xtlid = lid;
735 	else
736 		mp->lid = lid;
737 
738 	/*
739 	 * enqueue transaction lock to transaction/inode
740 	 */
741 	/* insert the tlock at tail of transaction tlock list */
742 	if (tid) {
743 		tblk = tid_to_tblock(tid);
744 		if (tblk->next)
745 			lid_to_tlock(tblk->last)->next = lid;
746 		else
747 			tblk->next = lid;
748 		tlck->next = 0;
749 		tblk->last = lid;
750 	}
751 	/* anonymous transaction:
752 	 * insert the tlock at head of inode anonymous tlock list
753 	 */
754 	else {
755 		tlck->next = jfs_ip->atlhead;
756 		jfs_ip->atlhead = lid;
757 		if (tlck->next == 0) {
758 			/* This inode's first anonymous transaction */
759 			jfs_ip->atltail = lid;
760 			TXN_LOCK();
761 			list_add_tail(&jfs_ip->anon_inode_list,
762 				      &TxAnchor.anon_list);
763 			TXN_UNLOCK();
764 		}
765 	}
766 
767 	/* initialize type dependent area for linelock */
768 	linelock = (struct linelock *) & tlck->lock;
769 	linelock->next = 0;
770 	linelock->flag = tlckLINELOCK;
771 	linelock->maxcnt = TLOCKSHORT;
772 	linelock->index = 0;
773 
774 	switch (type & tlckTYPE) {
775 	case tlckDTREE:
776 		linelock->l2linesize = L2DTSLOTSIZE;
777 		break;
778 
779 	case tlckXTREE:
780 		linelock->l2linesize = L2XTSLOTSIZE;
781 
782 		xtlck = (struct xtlock *) linelock;
783 		xtlck->header.offset = 0;
784 		xtlck->header.length = 2;
785 
786 		if (type & tlckNEW) {
787 			xtlck->lwm.offset = XTENTRYSTART;
788 		} else {
789 			if (mp->xflag & COMMIT_PAGE)
790 				p = (xtpage_t *) mp->data;
791 			else
792 				p = &jfs_ip->i_xtroot;
793 			xtlck->lwm.offset =
794 			    le16_to_cpu(p->header.nextindex);
795 		}
796 		xtlck->lwm.length = 0;	/* ! */
797 		xtlck->twm.offset = 0;
798 		xtlck->hwm.offset = 0;
799 
800 		xtlck->index = 2;
801 		break;
802 
803 	case tlckINODE:
804 		linelock->l2linesize = L2INODESLOTSIZE;
805 		break;
806 
807 	case tlckDATA:
808 		linelock->l2linesize = L2DATASLOTSIZE;
809 		break;
810 
811 	default:
812 		jfs_err("UFO tlock:0x%p", tlck);
813 	}
814 
815 	/*
816 	 * update tlock vector
817 	 */
818       grantLock:
819 	tlck->type |= type;
820 
821 	return tlck;
822 
823 	/*
824 	 * page is being locked by another transaction:
825 	 */
826       waitLock:
827 	/* Only locks on ipimap or ipaimap should reach here */
828 	/* assert(jfs_ip->fileset == AGGREGATE_I); */
829 	if (jfs_ip->fileset != AGGREGATE_I) {
830 		jfs_err("txLock: trying to lock locked page!");
831 		dump_mem("ip", ip, sizeof(struct inode));
832 		dump_mem("mp", mp, sizeof(struct metapage));
833 		dump_mem("Locker's tblk", tid_to_tblock(tid),
834 			 sizeof(struct tblock));
835 		dump_mem("Tlock", tlck, sizeof(struct tlock));
836 		BUG();
837 	}
838 	INCREMENT(stattx.waitlock);	/* statistics */
839 	TXN_UNLOCK();
840 	release_metapage(mp);
841 	TXN_LOCK();
842 	xtid = tlck->tid;	/* reaquire after dropping TXN_LOCK */
843 
844 	jfs_info("txLock: in waitLock, tid = %d, xtid = %d, lid = %d",
845 		 tid, xtid, lid);
846 
847 	/* Recheck everything since dropping TXN_LOCK */
848 	if (xtid && (tlck->mp == mp) && (mp->lid == lid))
849 		TXN_SLEEP_DROP_LOCK(&tid_to_tblock(xtid)->waitor);
850 	else
851 		TXN_UNLOCK();
852 	jfs_info("txLock: awakened     tid = %d, lid = %d", tid, lid);
853 
854 	return NULL;
855 }
856 
857 /*
858  * NAME:        txRelease()
859  *
860  * FUNCTION:    Release buffers associated with transaction locks, but don't
861  *		mark homeok yet.  The allows other transactions to modify
862  *		buffers, but won't let them go to disk until commit record
863  *		actually gets written.
864  *
865  * PARAMETER:
866  *              tblk    -
867  *
868  * RETURN:      Errors from subroutines.
869  */
870 static void txRelease(struct tblock * tblk)
871 {
872 	struct metapage *mp;
873 	lid_t lid;
874 	struct tlock *tlck;
875 
876 	TXN_LOCK();
877 
878 	for (lid = tblk->next; lid; lid = tlck->next) {
879 		tlck = lid_to_tlock(lid);
880 		if ((mp = tlck->mp) != NULL &&
881 		    (tlck->type & tlckBTROOT) == 0) {
882 			assert(mp->xflag & COMMIT_PAGE);
883 			mp->lid = 0;
884 		}
885 	}
886 
887 	/*
888 	 * wakeup transactions waiting on a page locked
889 	 * by the current transaction
890 	 */
891 	TXN_WAKEUP(&tblk->waitor);
892 
893 	TXN_UNLOCK();
894 }
895 
896 /*
897  * NAME:        txUnlock()
898  *
899  * FUNCTION:    Initiates pageout of pages modified by tid in journalled
900  *              objects and frees their lockwords.
901  */
902 static void txUnlock(struct tblock * tblk)
903 {
904 	struct tlock *tlck;
905 	struct linelock *linelock;
906 	lid_t lid, next, llid, k;
907 	struct metapage *mp;
908 	struct jfs_log *log;
909 	int difft, diffp;
910 	unsigned long flags;
911 
912 	jfs_info("txUnlock: tblk = 0x%p", tblk);
913 	log = JFS_SBI(tblk->sb)->log;
914 
915 	/*
916 	 * mark page under tlock homeok (its log has been written):
917 	 */
918 	for (lid = tblk->next; lid; lid = next) {
919 		tlck = lid_to_tlock(lid);
920 		next = tlck->next;
921 
922 		jfs_info("unlocking lid = %d, tlck = 0x%p", lid, tlck);
923 
924 		/* unbind page from tlock */
925 		if ((mp = tlck->mp) != NULL &&
926 		    (tlck->type & tlckBTROOT) == 0) {
927 			assert(mp->xflag & COMMIT_PAGE);
928 
929 			/* hold buffer
930 			 */
931 			hold_metapage(mp);
932 
933 			assert(mp->nohomeok > 0);
934 			_metapage_homeok(mp);
935 
936 			/* inherit younger/larger clsn */
937 			LOGSYNC_LOCK(log, flags);
938 			if (mp->clsn) {
939 				logdiff(difft, tblk->clsn, log);
940 				logdiff(diffp, mp->clsn, log);
941 				if (difft > diffp)
942 					mp->clsn = tblk->clsn;
943 			} else
944 				mp->clsn = tblk->clsn;
945 			LOGSYNC_UNLOCK(log, flags);
946 
947 			assert(!(tlck->flag & tlckFREEPAGE));
948 
949 			put_metapage(mp);
950 		}
951 
952 		/* insert tlock, and linelock(s) of the tlock if any,
953 		 * at head of freelist
954 		 */
955 		TXN_LOCK();
956 
957 		llid = ((struct linelock *) & tlck->lock)->next;
958 		while (llid) {
959 			linelock = (struct linelock *) lid_to_tlock(llid);
960 			k = linelock->next;
961 			txLockFree(llid);
962 			llid = k;
963 		}
964 		txLockFree(lid);
965 
966 		TXN_UNLOCK();
967 	}
968 	tblk->next = tblk->last = 0;
969 
970 	/*
971 	 * remove tblock from logsynclist
972 	 * (allocation map pages inherited lsn of tblk and
973 	 * has been inserted in logsync list at txUpdateMap())
974 	 */
975 	if (tblk->lsn) {
976 		LOGSYNC_LOCK(log, flags);
977 		log->count--;
978 		list_del(&tblk->synclist);
979 		LOGSYNC_UNLOCK(log, flags);
980 	}
981 }
982 
983 /*
984  *      txMaplock()
985  *
986  * function: allocate a transaction lock for freed page/entry;
987  *      for freed page, maplock is used as xtlock/dtlock type;
988  */
989 struct tlock *txMaplock(tid_t tid, struct inode *ip, int type)
990 {
991 	struct jfs_inode_info *jfs_ip = JFS_IP(ip);
992 	lid_t lid;
993 	struct tblock *tblk;
994 	struct tlock *tlck;
995 	struct maplock *maplock;
996 
997 	TXN_LOCK();
998 
999 	/*
1000 	 * allocate a tlock
1001 	 */
1002 	lid = txLockAlloc();
1003 	tlck = lid_to_tlock(lid);
1004 
1005 	/*
1006 	 * initialize tlock
1007 	 */
1008 	tlck->tid = tid;
1009 
1010 	/* bind the tlock and the object */
1011 	tlck->flag = tlckINODELOCK;
1012 	tlck->ip = ip;
1013 	tlck->mp = NULL;
1014 
1015 	tlck->type = type;
1016 
1017 	/*
1018 	 * enqueue transaction lock to transaction/inode
1019 	 */
1020 	/* insert the tlock at tail of transaction tlock list */
1021 	if (tid) {
1022 		tblk = tid_to_tblock(tid);
1023 		if (tblk->next)
1024 			lid_to_tlock(tblk->last)->next = lid;
1025 		else
1026 			tblk->next = lid;
1027 		tlck->next = 0;
1028 		tblk->last = lid;
1029 	}
1030 	/* anonymous transaction:
1031 	 * insert the tlock at head of inode anonymous tlock list
1032 	 */
1033 	else {
1034 		tlck->next = jfs_ip->atlhead;
1035 		jfs_ip->atlhead = lid;
1036 		if (tlck->next == 0) {
1037 			/* This inode's first anonymous transaction */
1038 			jfs_ip->atltail = lid;
1039 			list_add_tail(&jfs_ip->anon_inode_list,
1040 				      &TxAnchor.anon_list);
1041 		}
1042 	}
1043 
1044 	TXN_UNLOCK();
1045 
1046 	/* initialize type dependent area for maplock */
1047 	maplock = (struct maplock *) & tlck->lock;
1048 	maplock->next = 0;
1049 	maplock->maxcnt = 0;
1050 	maplock->index = 0;
1051 
1052 	return tlck;
1053 }
1054 
1055 /*
1056  *      txLinelock()
1057  *
1058  * function: allocate a transaction lock for log vector list
1059  */
1060 struct linelock *txLinelock(struct linelock * tlock)
1061 {
1062 	lid_t lid;
1063 	struct tlock *tlck;
1064 	struct linelock *linelock;
1065 
1066 	TXN_LOCK();
1067 
1068 	/* allocate a TxLock structure */
1069 	lid = txLockAlloc();
1070 	tlck = lid_to_tlock(lid);
1071 
1072 	TXN_UNLOCK();
1073 
1074 	/* initialize linelock */
1075 	linelock = (struct linelock *) tlck;
1076 	linelock->next = 0;
1077 	linelock->flag = tlckLINELOCK;
1078 	linelock->maxcnt = TLOCKLONG;
1079 	linelock->index = 0;
1080 
1081 	/* append linelock after tlock */
1082 	linelock->next = tlock->next;
1083 	tlock->next = lid;
1084 
1085 	return linelock;
1086 }
1087 
1088 /*
1089  *              transaction commit management
1090  *              -----------------------------
1091  */
1092 
1093 /*
1094  * NAME:        txCommit()
1095  *
1096  * FUNCTION:    commit the changes to the objects specified in
1097  *              clist.  For journalled segments only the
1098  *              changes of the caller are committed, ie by tid.
1099  *              for non-journalled segments the data are flushed to
1100  *              disk and then the change to the disk inode and indirect
1101  *              blocks committed (so blocks newly allocated to the
1102  *              segment will be made a part of the segment atomically).
1103  *
1104  *              all of the segments specified in clist must be in
1105  *              one file system. no more than 6 segments are needed
1106  *              to handle all unix svcs.
1107  *
1108  *              if the i_nlink field (i.e. disk inode link count)
1109  *              is zero, and the type of inode is a regular file or
1110  *              directory, or symbolic link , the inode is truncated
1111  *              to zero length. the truncation is committed but the
1112  *              VM resources are unaffected until it is closed (see
1113  *              iput and iclose).
1114  *
1115  * PARAMETER:
1116  *
1117  * RETURN:
1118  *
1119  * serialization:
1120  *              on entry the inode lock on each segment is assumed
1121  *              to be held.
1122  *
1123  * i/o error:
1124  */
1125 int txCommit(tid_t tid,		/* transaction identifier */
1126 	     int nip,		/* number of inodes to commit */
1127 	     struct inode **iplist,	/* list of inode to commit */
1128 	     int flag)
1129 {
1130 	int rc = 0;
1131 	struct commit cd;
1132 	struct jfs_log *log;
1133 	struct tblock *tblk;
1134 	struct lrd *lrd;
1135 	int lsn;
1136 	struct inode *ip;
1137 	struct jfs_inode_info *jfs_ip;
1138 	int k, n;
1139 	ino_t top;
1140 	struct super_block *sb;
1141 
1142 	jfs_info("txCommit, tid = %d, flag = %d", tid, flag);
1143 	/* is read-only file system ? */
1144 	if (isReadOnly(iplist[0])) {
1145 		rc = -EROFS;
1146 		goto TheEnd;
1147 	}
1148 
1149 	sb = cd.sb = iplist[0]->i_sb;
1150 	cd.tid = tid;
1151 
1152 	if (tid == 0)
1153 		tid = txBegin(sb, 0);
1154 	tblk = tid_to_tblock(tid);
1155 
1156 	/*
1157 	 * initialize commit structure
1158 	 */
1159 	log = JFS_SBI(sb)->log;
1160 	cd.log = log;
1161 
1162 	/* initialize log record descriptor in commit */
1163 	lrd = &cd.lrd;
1164 	lrd->logtid = cpu_to_le32(tblk->logtid);
1165 	lrd->backchain = 0;
1166 
1167 	tblk->xflag |= flag;
1168 
1169 	if ((flag & (COMMIT_FORCE | COMMIT_SYNC)) == 0)
1170 		tblk->xflag |= COMMIT_LAZY;
1171 	/*
1172 	 *      prepare non-journaled objects for commit
1173 	 *
1174 	 * flush data pages of non-journaled file
1175 	 * to prevent the file getting non-initialized disk blocks
1176 	 * in case of crash.
1177 	 * (new blocks - )
1178 	 */
1179 	cd.iplist = iplist;
1180 	cd.nip = nip;
1181 
1182 	/*
1183 	 *      acquire transaction lock on (on-disk) inodes
1184 	 *
1185 	 * update on-disk inode from in-memory inode
1186 	 * acquiring transaction locks for AFTER records
1187 	 * on the on-disk inode of file object
1188 	 *
1189 	 * sort the inodes array by inode number in descending order
1190 	 * to prevent deadlock when acquiring transaction lock
1191 	 * of on-disk inodes on multiple on-disk inode pages by
1192 	 * multiple concurrent transactions
1193 	 */
1194 	for (k = 0; k < cd.nip; k++) {
1195 		top = (cd.iplist[k])->i_ino;
1196 		for (n = k + 1; n < cd.nip; n++) {
1197 			ip = cd.iplist[n];
1198 			if (ip->i_ino > top) {
1199 				top = ip->i_ino;
1200 				cd.iplist[n] = cd.iplist[k];
1201 				cd.iplist[k] = ip;
1202 			}
1203 		}
1204 
1205 		ip = cd.iplist[k];
1206 		jfs_ip = JFS_IP(ip);
1207 
1208 		/*
1209 		 * BUGBUG - This code has temporarily been removed.  The
1210 		 * intent is to ensure that any file data is written before
1211 		 * the metadata is committed to the journal.  This prevents
1212 		 * uninitialized data from appearing in a file after the
1213 		 * journal has been replayed.  (The uninitialized data
1214 		 * could be sensitive data removed by another user.)
1215 		 *
1216 		 * The problem now is that we are holding the IWRITELOCK
1217 		 * on the inode, and calling filemap_fdatawrite on an
1218 		 * unmapped page will cause a deadlock in jfs_get_block.
1219 		 *
1220 		 * The long term solution is to pare down the use of
1221 		 * IWRITELOCK.  We are currently holding it too long.
1222 		 * We could also be smarter about which data pages need
1223 		 * to be written before the transaction is committed and
1224 		 * when we don't need to worry about it at all.
1225 		 *
1226 		 * if ((!S_ISDIR(ip->i_mode))
1227 		 *    && (tblk->flag & COMMIT_DELETE) == 0) {
1228 		 *	filemap_fdatawrite(ip->i_mapping);
1229 		 *	filemap_fdatawait(ip->i_mapping);
1230 		 * }
1231 		 */
1232 
1233 		/*
1234 		 * Mark inode as not dirty.  It will still be on the dirty
1235 		 * inode list, but we'll know not to commit it again unless
1236 		 * it gets marked dirty again
1237 		 */
1238 		clear_cflag(COMMIT_Dirty, ip);
1239 
1240 		/* inherit anonymous tlock(s) of inode */
1241 		if (jfs_ip->atlhead) {
1242 			lid_to_tlock(jfs_ip->atltail)->next = tblk->next;
1243 			tblk->next = jfs_ip->atlhead;
1244 			if (!tblk->last)
1245 				tblk->last = jfs_ip->atltail;
1246 			jfs_ip->atlhead = jfs_ip->atltail = 0;
1247 			TXN_LOCK();
1248 			list_del_init(&jfs_ip->anon_inode_list);
1249 			TXN_UNLOCK();
1250 		}
1251 
1252 		/*
1253 		 * acquire transaction lock on on-disk inode page
1254 		 * (become first tlock of the tblk's tlock list)
1255 		 */
1256 		if (((rc = diWrite(tid, ip))))
1257 			goto out;
1258 	}
1259 
1260 	/*
1261 	 *      write log records from transaction locks
1262 	 *
1263 	 * txUpdateMap() resets XAD_NEW in XAD.
1264 	 */
1265 	if ((rc = txLog(log, tblk, &cd)))
1266 		goto TheEnd;
1267 
1268 	/*
1269 	 * Ensure that inode isn't reused before
1270 	 * lazy commit thread finishes processing
1271 	 */
1272 	if (tblk->xflag & COMMIT_DELETE) {
1273 		atomic_inc(&tblk->u.ip->i_count);
1274 		/*
1275 		 * Avoid a rare deadlock
1276 		 *
1277 		 * If the inode is locked, we may be blocked in
1278 		 * jfs_commit_inode.  If so, we don't want the
1279 		 * lazy_commit thread doing the last iput() on the inode
1280 		 * since that may block on the locked inode.  Instead,
1281 		 * commit the transaction synchronously, so the last iput
1282 		 * will be done by the calling thread (or later)
1283 		 */
1284 		if (tblk->u.ip->i_state & I_LOCK)
1285 			tblk->xflag &= ~COMMIT_LAZY;
1286 	}
1287 
1288 	ASSERT((!(tblk->xflag & COMMIT_DELETE)) ||
1289 	       ((tblk->u.ip->i_nlink == 0) &&
1290 		!test_cflag(COMMIT_Nolink, tblk->u.ip)));
1291 
1292 	/*
1293 	 *      write COMMIT log record
1294 	 */
1295 	lrd->type = cpu_to_le16(LOG_COMMIT);
1296 	lrd->length = 0;
1297 	lsn = lmLog(log, tblk, lrd, NULL);
1298 
1299 	lmGroupCommit(log, tblk);
1300 
1301 	/*
1302 	 *      - transaction is now committed -
1303 	 */
1304 
1305 	/*
1306 	 * force pages in careful update
1307 	 * (imap addressing structure update)
1308 	 */
1309 	if (flag & COMMIT_FORCE)
1310 		txForce(tblk);
1311 
1312 	/*
1313 	 *      update allocation map.
1314 	 *
1315 	 * update inode allocation map and inode:
1316 	 * free pager lock on memory object of inode if any.
1317 	 * update  block allocation map.
1318 	 *
1319 	 * txUpdateMap() resets XAD_NEW in XAD.
1320 	 */
1321 	if (tblk->xflag & COMMIT_FORCE)
1322 		txUpdateMap(tblk);
1323 
1324 	/*
1325 	 *      free transaction locks and pageout/free pages
1326 	 */
1327 	txRelease(tblk);
1328 
1329 	if ((tblk->flag & tblkGC_LAZY) == 0)
1330 		txUnlock(tblk);
1331 
1332 
1333 	/*
1334 	 *      reset in-memory object state
1335 	 */
1336 	for (k = 0; k < cd.nip; k++) {
1337 		ip = cd.iplist[k];
1338 		jfs_ip = JFS_IP(ip);
1339 
1340 		/*
1341 		 * reset in-memory inode state
1342 		 */
1343 		jfs_ip->bxflag = 0;
1344 		jfs_ip->blid = 0;
1345 	}
1346 
1347       out:
1348 	if (rc != 0)
1349 		txAbort(tid, 1);
1350 
1351       TheEnd:
1352 	jfs_info("txCommit: tid = %d, returning %d", tid, rc);
1353 	return rc;
1354 }
1355 
1356 /*
1357  * NAME:        txLog()
1358  *
1359  * FUNCTION:    Writes AFTER log records for all lines modified
1360  *              by tid for segments specified by inodes in comdata.
1361  *              Code assumes only WRITELOCKS are recorded in lockwords.
1362  *
1363  * PARAMETERS:
1364  *
1365  * RETURN :
1366  */
1367 static int txLog(struct jfs_log * log, struct tblock * tblk, struct commit * cd)
1368 {
1369 	int rc = 0;
1370 	struct inode *ip;
1371 	lid_t lid;
1372 	struct tlock *tlck;
1373 	struct lrd *lrd = &cd->lrd;
1374 
1375 	/*
1376 	 * write log record(s) for each tlock of transaction,
1377 	 */
1378 	for (lid = tblk->next; lid; lid = tlck->next) {
1379 		tlck = lid_to_tlock(lid);
1380 
1381 		tlck->flag |= tlckLOG;
1382 
1383 		/* initialize lrd common */
1384 		ip = tlck->ip;
1385 		lrd->aggregate = cpu_to_le32(JFS_SBI(ip->i_sb)->aggregate);
1386 		lrd->log.redopage.fileset = cpu_to_le32(JFS_IP(ip)->fileset);
1387 		lrd->log.redopage.inode = cpu_to_le32(ip->i_ino);
1388 
1389 		/* write log record of page from the tlock */
1390 		switch (tlck->type & tlckTYPE) {
1391 		case tlckXTREE:
1392 			xtLog(log, tblk, lrd, tlck);
1393 			break;
1394 
1395 		case tlckDTREE:
1396 			dtLog(log, tblk, lrd, tlck);
1397 			break;
1398 
1399 		case tlckINODE:
1400 			diLog(log, tblk, lrd, tlck, cd);
1401 			break;
1402 
1403 		case tlckMAP:
1404 			mapLog(log, tblk, lrd, tlck);
1405 			break;
1406 
1407 		case tlckDATA:
1408 			dataLog(log, tblk, lrd, tlck);
1409 			break;
1410 
1411 		default:
1412 			jfs_err("UFO tlock:0x%p", tlck);
1413 		}
1414 	}
1415 
1416 	return rc;
1417 }
1418 
1419 /*
1420  *      diLog()
1421  *
1422  * function:    log inode tlock and format maplock to update bmap;
1423  */
1424 static int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1425 	  struct tlock * tlck, struct commit * cd)
1426 {
1427 	int rc = 0;
1428 	struct metapage *mp;
1429 	pxd_t *pxd;
1430 	struct pxd_lock *pxdlock;
1431 
1432 	mp = tlck->mp;
1433 
1434 	/* initialize as REDOPAGE record format */
1435 	lrd->log.redopage.type = cpu_to_le16(LOG_INODE);
1436 	lrd->log.redopage.l2linesize = cpu_to_le16(L2INODESLOTSIZE);
1437 
1438 	pxd = &lrd->log.redopage.pxd;
1439 
1440 	/*
1441 	 *      inode after image
1442 	 */
1443 	if (tlck->type & tlckENTRY) {
1444 		/* log after-image for logredo(): */
1445 		lrd->type = cpu_to_le16(LOG_REDOPAGE);
1446 		PXDaddress(pxd, mp->index);
1447 		PXDlength(pxd,
1448 			  mp->logical_size >> tblk->sb->s_blocksize_bits);
1449 		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1450 
1451 		/* mark page as homeward bound */
1452 		tlck->flag |= tlckWRITEPAGE;
1453 	} else if (tlck->type & tlckFREE) {
1454 		/*
1455 		 *      free inode extent
1456 		 *
1457 		 * (pages of the freed inode extent have been invalidated and
1458 		 * a maplock for free of the extent has been formatted at
1459 		 * txLock() time);
1460 		 *
1461 		 * the tlock had been acquired on the inode allocation map page
1462 		 * (iag) that specifies the freed extent, even though the map
1463 		 * page is not itself logged, to prevent pageout of the map
1464 		 * page before the log;
1465 		 */
1466 
1467 		/* log LOG_NOREDOINOEXT of the freed inode extent for
1468 		 * logredo() to start NoRedoPage filters, and to update
1469 		 * imap and bmap for free of the extent;
1470 		 */
1471 		lrd->type = cpu_to_le16(LOG_NOREDOINOEXT);
1472 		/*
1473 		 * For the LOG_NOREDOINOEXT record, we need
1474 		 * to pass the IAG number and inode extent
1475 		 * index (within that IAG) from which the
1476 		 * the extent being released.  These have been
1477 		 * passed to us in the iplist[1] and iplist[2].
1478 		 */
1479 		lrd->log.noredoinoext.iagnum =
1480 		    cpu_to_le32((u32) (size_t) cd->iplist[1]);
1481 		lrd->log.noredoinoext.inoext_idx =
1482 		    cpu_to_le32((u32) (size_t) cd->iplist[2]);
1483 
1484 		pxdlock = (struct pxd_lock *) & tlck->lock;
1485 		*pxd = pxdlock->pxd;
1486 		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1487 
1488 		/* update bmap */
1489 		tlck->flag |= tlckUPDATEMAP;
1490 
1491 		/* mark page as homeward bound */
1492 		tlck->flag |= tlckWRITEPAGE;
1493 	} else
1494 		jfs_err("diLog: UFO type tlck:0x%p", tlck);
1495 #ifdef  _JFS_WIP
1496 	/*
1497 	 *      alloc/free external EA extent
1498 	 *
1499 	 * a maplock for txUpdateMap() to update bPWMAP for alloc/free
1500 	 * of the extent has been formatted at txLock() time;
1501 	 */
1502 	else {
1503 		assert(tlck->type & tlckEA);
1504 
1505 		/* log LOG_UPDATEMAP for logredo() to update bmap for
1506 		 * alloc of new (and free of old) external EA extent;
1507 		 */
1508 		lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1509 		pxdlock = (struct pxd_lock *) & tlck->lock;
1510 		nlock = pxdlock->index;
1511 		for (i = 0; i < nlock; i++, pxdlock++) {
1512 			if (pxdlock->flag & mlckALLOCPXD)
1513 				lrd->log.updatemap.type =
1514 				    cpu_to_le16(LOG_ALLOCPXD);
1515 			else
1516 				lrd->log.updatemap.type =
1517 				    cpu_to_le16(LOG_FREEPXD);
1518 			lrd->log.updatemap.nxd = cpu_to_le16(1);
1519 			lrd->log.updatemap.pxd = pxdlock->pxd;
1520 			lrd->backchain =
1521 			    cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1522 		}
1523 
1524 		/* update bmap */
1525 		tlck->flag |= tlckUPDATEMAP;
1526 	}
1527 #endif				/* _JFS_WIP */
1528 
1529 	return rc;
1530 }
1531 
1532 /*
1533  *      dataLog()
1534  *
1535  * function:    log data tlock
1536  */
1537 static int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1538 	    struct tlock * tlck)
1539 {
1540 	struct metapage *mp;
1541 	pxd_t *pxd;
1542 
1543 	mp = tlck->mp;
1544 
1545 	/* initialize as REDOPAGE record format */
1546 	lrd->log.redopage.type = cpu_to_le16(LOG_DATA);
1547 	lrd->log.redopage.l2linesize = cpu_to_le16(L2DATASLOTSIZE);
1548 
1549 	pxd = &lrd->log.redopage.pxd;
1550 
1551 	/* log after-image for logredo(): */
1552 	lrd->type = cpu_to_le16(LOG_REDOPAGE);
1553 
1554 	if (jfs_dirtable_inline(tlck->ip)) {
1555 		/*
1556 		 * The table has been truncated, we've must have deleted
1557 		 * the last entry, so don't bother logging this
1558 		 */
1559 		mp->lid = 0;
1560 		grab_metapage(mp);
1561 		metapage_homeok(mp);
1562 		discard_metapage(mp);
1563 		tlck->mp = NULL;
1564 		return 0;
1565 	}
1566 
1567 	PXDaddress(pxd, mp->index);
1568 	PXDlength(pxd, mp->logical_size >> tblk->sb->s_blocksize_bits);
1569 
1570 	lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1571 
1572 	/* mark page as homeward bound */
1573 	tlck->flag |= tlckWRITEPAGE;
1574 
1575 	return 0;
1576 }
1577 
1578 /*
1579  *      dtLog()
1580  *
1581  * function:    log dtree tlock and format maplock to update bmap;
1582  */
1583 static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1584 	   struct tlock * tlck)
1585 {
1586 	struct metapage *mp;
1587 	struct pxd_lock *pxdlock;
1588 	pxd_t *pxd;
1589 
1590 	mp = tlck->mp;
1591 
1592 	/* initialize as REDOPAGE/NOREDOPAGE record format */
1593 	lrd->log.redopage.type = cpu_to_le16(LOG_DTREE);
1594 	lrd->log.redopage.l2linesize = cpu_to_le16(L2DTSLOTSIZE);
1595 
1596 	pxd = &lrd->log.redopage.pxd;
1597 
1598 	if (tlck->type & tlckBTROOT)
1599 		lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
1600 
1601 	/*
1602 	 *      page extension via relocation: entry insertion;
1603 	 *      page extension in-place: entry insertion;
1604 	 *      new right page from page split, reinitialized in-line
1605 	 *      root from root page split: entry insertion;
1606 	 */
1607 	if (tlck->type & (tlckNEW | tlckEXTEND)) {
1608 		/* log after-image of the new page for logredo():
1609 		 * mark log (LOG_NEW) for logredo() to initialize
1610 		 * freelist and update bmap for alloc of the new page;
1611 		 */
1612 		lrd->type = cpu_to_le16(LOG_REDOPAGE);
1613 		if (tlck->type & tlckEXTEND)
1614 			lrd->log.redopage.type |= cpu_to_le16(LOG_EXTEND);
1615 		else
1616 			lrd->log.redopage.type |= cpu_to_le16(LOG_NEW);
1617 		PXDaddress(pxd, mp->index);
1618 		PXDlength(pxd,
1619 			  mp->logical_size >> tblk->sb->s_blocksize_bits);
1620 		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1621 
1622 		/* format a maplock for txUpdateMap() to update bPMAP for
1623 		 * alloc of the new page;
1624 		 */
1625 		if (tlck->type & tlckBTROOT)
1626 			return;
1627 		tlck->flag |= tlckUPDATEMAP;
1628 		pxdlock = (struct pxd_lock *) & tlck->lock;
1629 		pxdlock->flag = mlckALLOCPXD;
1630 		pxdlock->pxd = *pxd;
1631 
1632 		pxdlock->index = 1;
1633 
1634 		/* mark page as homeward bound */
1635 		tlck->flag |= tlckWRITEPAGE;
1636 		return;
1637 	}
1638 
1639 	/*
1640 	 *      entry insertion/deletion,
1641 	 *      sibling page link update (old right page before split);
1642 	 */
1643 	if (tlck->type & (tlckENTRY | tlckRELINK)) {
1644 		/* log after-image for logredo(): */
1645 		lrd->type = cpu_to_le16(LOG_REDOPAGE);
1646 		PXDaddress(pxd, mp->index);
1647 		PXDlength(pxd,
1648 			  mp->logical_size >> tblk->sb->s_blocksize_bits);
1649 		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1650 
1651 		/* mark page as homeward bound */
1652 		tlck->flag |= tlckWRITEPAGE;
1653 		return;
1654 	}
1655 
1656 	/*
1657 	 *      page deletion: page has been invalidated
1658 	 *      page relocation: source extent
1659 	 *
1660 	 *      a maplock for free of the page has been formatted
1661 	 *      at txLock() time);
1662 	 */
1663 	if (tlck->type & (tlckFREE | tlckRELOCATE)) {
1664 		/* log LOG_NOREDOPAGE of the deleted page for logredo()
1665 		 * to start NoRedoPage filter and to update bmap for free
1666 		 * of the deletd page
1667 		 */
1668 		lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
1669 		pxdlock = (struct pxd_lock *) & tlck->lock;
1670 		*pxd = pxdlock->pxd;
1671 		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1672 
1673 		/* a maplock for txUpdateMap() for free of the page
1674 		 * has been formatted at txLock() time;
1675 		 */
1676 		tlck->flag |= tlckUPDATEMAP;
1677 	}
1678 	return;
1679 }
1680 
1681 /*
1682  *      xtLog()
1683  *
1684  * function:    log xtree tlock and format maplock to update bmap;
1685  */
1686 static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1687 	   struct tlock * tlck)
1688 {
1689 	struct inode *ip;
1690 	struct metapage *mp;
1691 	xtpage_t *p;
1692 	struct xtlock *xtlck;
1693 	struct maplock *maplock;
1694 	struct xdlistlock *xadlock;
1695 	struct pxd_lock *pxdlock;
1696 	pxd_t *page_pxd;
1697 	int next, lwm, hwm;
1698 
1699 	ip = tlck->ip;
1700 	mp = tlck->mp;
1701 
1702 	/* initialize as REDOPAGE/NOREDOPAGE record format */
1703 	lrd->log.redopage.type = cpu_to_le16(LOG_XTREE);
1704 	lrd->log.redopage.l2linesize = cpu_to_le16(L2XTSLOTSIZE);
1705 
1706 	page_pxd = &lrd->log.redopage.pxd;
1707 
1708 	if (tlck->type & tlckBTROOT) {
1709 		lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
1710 		p = &JFS_IP(ip)->i_xtroot;
1711 		if (S_ISDIR(ip->i_mode))
1712 			lrd->log.redopage.type |=
1713 			    cpu_to_le16(LOG_DIR_XTREE);
1714 	} else
1715 		p = (xtpage_t *) mp->data;
1716 	next = le16_to_cpu(p->header.nextindex);
1717 
1718 	xtlck = (struct xtlock *) & tlck->lock;
1719 
1720 	maplock = (struct maplock *) & tlck->lock;
1721 	xadlock = (struct xdlistlock *) maplock;
1722 
1723 	/*
1724 	 *      entry insertion/extension;
1725 	 *      sibling page link update (old right page before split);
1726 	 */
1727 	if (tlck->type & (tlckNEW | tlckGROW | tlckRELINK)) {
1728 		/* log after-image for logredo():
1729 		 * logredo() will update bmap for alloc of new/extended
1730 		 * extents (XAD_NEW|XAD_EXTEND) of XAD[lwm:next) from
1731 		 * after-image of XADlist;
1732 		 * logredo() resets (XAD_NEW|XAD_EXTEND) flag when
1733 		 * applying the after-image to the meta-data page.
1734 		 */
1735 		lrd->type = cpu_to_le16(LOG_REDOPAGE);
1736 		PXDaddress(page_pxd, mp->index);
1737 		PXDlength(page_pxd,
1738 			  mp->logical_size >> tblk->sb->s_blocksize_bits);
1739 		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1740 
1741 		/* format a maplock for txUpdateMap() to update bPMAP
1742 		 * for alloc of new/extended extents of XAD[lwm:next)
1743 		 * from the page itself;
1744 		 * txUpdateMap() resets (XAD_NEW|XAD_EXTEND) flag.
1745 		 */
1746 		lwm = xtlck->lwm.offset;
1747 		if (lwm == 0)
1748 			lwm = XTPAGEMAXSLOT;
1749 
1750 		if (lwm == next)
1751 			goto out;
1752 		if (lwm > next) {
1753 			jfs_err("xtLog: lwm > next\n");
1754 			goto out;
1755 		}
1756 		tlck->flag |= tlckUPDATEMAP;
1757 		xadlock->flag = mlckALLOCXADLIST;
1758 		xadlock->count = next - lwm;
1759 		if ((xadlock->count <= 4) && (tblk->xflag & COMMIT_LAZY)) {
1760 			int i;
1761 			pxd_t *pxd;
1762 			/*
1763 			 * Lazy commit may allow xtree to be modified before
1764 			 * txUpdateMap runs.  Copy xad into linelock to
1765 			 * preserve correct data.
1766 			 *
1767 			 * We can fit twice as may pxd's as xads in the lock
1768 			 */
1769 			xadlock->flag = mlckALLOCPXDLIST;
1770 			pxd = xadlock->xdlist = &xtlck->pxdlock;
1771 			for (i = 0; i < xadlock->count; i++) {
1772 				PXDaddress(pxd, addressXAD(&p->xad[lwm + i]));
1773 				PXDlength(pxd, lengthXAD(&p->xad[lwm + i]));
1774 				p->xad[lwm + i].flag &=
1775 				    ~(XAD_NEW | XAD_EXTENDED);
1776 				pxd++;
1777 			}
1778 		} else {
1779 			/*
1780 			 * xdlist will point to into inode's xtree, ensure
1781 			 * that transaction is not committed lazily.
1782 			 */
1783 			xadlock->flag = mlckALLOCXADLIST;
1784 			xadlock->xdlist = &p->xad[lwm];
1785 			tblk->xflag &= ~COMMIT_LAZY;
1786 		}
1787 		jfs_info("xtLog: alloc ip:0x%p mp:0x%p tlck:0x%p lwm:%d "
1788 			 "count:%d", tlck->ip, mp, tlck, lwm, xadlock->count);
1789 
1790 		maplock->index = 1;
1791 
1792 	      out:
1793 		/* mark page as homeward bound */
1794 		tlck->flag |= tlckWRITEPAGE;
1795 
1796 		return;
1797 	}
1798 
1799 	/*
1800 	 *      page deletion: file deletion/truncation (ref. xtTruncate())
1801 	 *
1802 	 * (page will be invalidated after log is written and bmap
1803 	 * is updated from the page);
1804 	 */
1805 	if (tlck->type & tlckFREE) {
1806 		/* LOG_NOREDOPAGE log for NoRedoPage filter:
1807 		 * if page free from file delete, NoRedoFile filter from
1808 		 * inode image of zero link count will subsume NoRedoPage
1809 		 * filters for each page;
1810 		 * if page free from file truncattion, write NoRedoPage
1811 		 * filter;
1812 		 *
1813 		 * upadte of block allocation map for the page itself:
1814 		 * if page free from deletion and truncation, LOG_UPDATEMAP
1815 		 * log for the page itself is generated from processing
1816 		 * its parent page xad entries;
1817 		 */
1818 		/* if page free from file truncation, log LOG_NOREDOPAGE
1819 		 * of the deleted page for logredo() to start NoRedoPage
1820 		 * filter for the page;
1821 		 */
1822 		if (tblk->xflag & COMMIT_TRUNCATE) {
1823 			/* write NOREDOPAGE for the page */
1824 			lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
1825 			PXDaddress(page_pxd, mp->index);
1826 			PXDlength(page_pxd,
1827 				  mp->logical_size >> tblk->sb->
1828 				  s_blocksize_bits);
1829 			lrd->backchain =
1830 			    cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1831 
1832 			if (tlck->type & tlckBTROOT) {
1833 				/* Empty xtree must be logged */
1834 				lrd->type = cpu_to_le16(LOG_REDOPAGE);
1835 				lrd->backchain =
1836 				    cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1837 			}
1838 		}
1839 
1840 		/* init LOG_UPDATEMAP of the freed extents
1841 		 * XAD[XTENTRYSTART:hwm) from the deleted page itself
1842 		 * for logredo() to update bmap;
1843 		 */
1844 		lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1845 		lrd->log.updatemap.type = cpu_to_le16(LOG_FREEXADLIST);
1846 		xtlck = (struct xtlock *) & tlck->lock;
1847 		hwm = xtlck->hwm.offset;
1848 		lrd->log.updatemap.nxd =
1849 		    cpu_to_le16(hwm - XTENTRYSTART + 1);
1850 		/* reformat linelock for lmLog() */
1851 		xtlck->header.offset = XTENTRYSTART;
1852 		xtlck->header.length = hwm - XTENTRYSTART + 1;
1853 		xtlck->index = 1;
1854 		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1855 
1856 		/* format a maplock for txUpdateMap() to update bmap
1857 		 * to free extents of XAD[XTENTRYSTART:hwm) from the
1858 		 * deleted page itself;
1859 		 */
1860 		tlck->flag |= tlckUPDATEMAP;
1861 		xadlock->count = hwm - XTENTRYSTART + 1;
1862 		if ((xadlock->count <= 4) && (tblk->xflag & COMMIT_LAZY)) {
1863 			int i;
1864 			pxd_t *pxd;
1865 			/*
1866 			 * Lazy commit may allow xtree to be modified before
1867 			 * txUpdateMap runs.  Copy xad into linelock to
1868 			 * preserve correct data.
1869 			 *
1870 			 * We can fit twice as may pxd's as xads in the lock
1871 			 */
1872 			xadlock->flag = mlckFREEPXDLIST;
1873 			pxd = xadlock->xdlist = &xtlck->pxdlock;
1874 			for (i = 0; i < xadlock->count; i++) {
1875 				PXDaddress(pxd,
1876 					addressXAD(&p->xad[XTENTRYSTART + i]));
1877 				PXDlength(pxd,
1878 					lengthXAD(&p->xad[XTENTRYSTART + i]));
1879 				pxd++;
1880 			}
1881 		} else {
1882 			/*
1883 			 * xdlist will point to into inode's xtree, ensure
1884 			 * that transaction is not committed lazily.
1885 			 */
1886 			xadlock->flag = mlckFREEXADLIST;
1887 			xadlock->xdlist = &p->xad[XTENTRYSTART];
1888 			tblk->xflag &= ~COMMIT_LAZY;
1889 		}
1890 		jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d lwm:2",
1891 			 tlck->ip, mp, xadlock->count);
1892 
1893 		maplock->index = 1;
1894 
1895 		/* mark page as invalid */
1896 		if (((tblk->xflag & COMMIT_PWMAP) || S_ISDIR(ip->i_mode))
1897 		    && !(tlck->type & tlckBTROOT))
1898 			tlck->flag |= tlckFREEPAGE;
1899 		/*
1900 		   else (tblk->xflag & COMMIT_PMAP)
1901 		   ? release the page;
1902 		 */
1903 		return;
1904 	}
1905 
1906 	/*
1907 	 *      page/entry truncation: file truncation (ref. xtTruncate())
1908 	 *
1909 	 *     |----------+------+------+---------------|
1910 	 *                |      |      |
1911 	 *                |      |     hwm - hwm before truncation
1912 	 *                |     next - truncation point
1913 	 *               lwm - lwm before truncation
1914 	 * header ?
1915 	 */
1916 	if (tlck->type & tlckTRUNCATE) {
1917 		pxd_t pxd;	/* truncated extent of xad */
1918 		int twm;
1919 
1920 		/*
1921 		 * For truncation the entire linelock may be used, so it would
1922 		 * be difficult to store xad list in linelock itself.
1923 		 * Therefore, we'll just force transaction to be committed
1924 		 * synchronously, so that xtree pages won't be changed before
1925 		 * txUpdateMap runs.
1926 		 */
1927 		tblk->xflag &= ~COMMIT_LAZY;
1928 		lwm = xtlck->lwm.offset;
1929 		if (lwm == 0)
1930 			lwm = XTPAGEMAXSLOT;
1931 		hwm = xtlck->hwm.offset;
1932 		twm = xtlck->twm.offset;
1933 
1934 		/*
1935 		 *      write log records
1936 		 */
1937 		/* log after-image for logredo():
1938 		 *
1939 		 * logredo() will update bmap for alloc of new/extended
1940 		 * extents (XAD_NEW|XAD_EXTEND) of XAD[lwm:next) from
1941 		 * after-image of XADlist;
1942 		 * logredo() resets (XAD_NEW|XAD_EXTEND) flag when
1943 		 * applying the after-image to the meta-data page.
1944 		 */
1945 		lrd->type = cpu_to_le16(LOG_REDOPAGE);
1946 		PXDaddress(page_pxd, mp->index);
1947 		PXDlength(page_pxd,
1948 			  mp->logical_size >> tblk->sb->s_blocksize_bits);
1949 		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1950 
1951 		/*
1952 		 * truncate entry XAD[twm == next - 1]:
1953 		 */
1954 		if (twm == next - 1) {
1955 			/* init LOG_UPDATEMAP for logredo() to update bmap for
1956 			 * free of truncated delta extent of the truncated
1957 			 * entry XAD[next - 1]:
1958 			 * (xtlck->pxdlock = truncated delta extent);
1959 			 */
1960 			pxdlock = (struct pxd_lock *) & xtlck->pxdlock;
1961 			/* assert(pxdlock->type & tlckTRUNCATE); */
1962 			lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1963 			lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
1964 			lrd->log.updatemap.nxd = cpu_to_le16(1);
1965 			lrd->log.updatemap.pxd = pxdlock->pxd;
1966 			pxd = pxdlock->pxd;	/* save to format maplock */
1967 			lrd->backchain =
1968 			    cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1969 		}
1970 
1971 		/*
1972 		 * free entries XAD[next:hwm]:
1973 		 */
1974 		if (hwm >= next) {
1975 			/* init LOG_UPDATEMAP of the freed extents
1976 			 * XAD[next:hwm] from the deleted page itself
1977 			 * for logredo() to update bmap;
1978 			 */
1979 			lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1980 			lrd->log.updatemap.type =
1981 			    cpu_to_le16(LOG_FREEXADLIST);
1982 			xtlck = (struct xtlock *) & tlck->lock;
1983 			hwm = xtlck->hwm.offset;
1984 			lrd->log.updatemap.nxd =
1985 			    cpu_to_le16(hwm - next + 1);
1986 			/* reformat linelock for lmLog() */
1987 			xtlck->header.offset = next;
1988 			xtlck->header.length = hwm - next + 1;
1989 			xtlck->index = 1;
1990 			lrd->backchain =
1991 			    cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1992 		}
1993 
1994 		/*
1995 		 *      format maplock(s) for txUpdateMap() to update bmap
1996 		 */
1997 		maplock->index = 0;
1998 
1999 		/*
2000 		 * allocate entries XAD[lwm:next):
2001 		 */
2002 		if (lwm < next) {
2003 			/* format a maplock for txUpdateMap() to update bPMAP
2004 			 * for alloc of new/extended extents of XAD[lwm:next)
2005 			 * from the page itself;
2006 			 * txUpdateMap() resets (XAD_NEW|XAD_EXTEND) flag.
2007 			 */
2008 			tlck->flag |= tlckUPDATEMAP;
2009 			xadlock->flag = mlckALLOCXADLIST;
2010 			xadlock->count = next - lwm;
2011 			xadlock->xdlist = &p->xad[lwm];
2012 
2013 			jfs_info("xtLog: alloc ip:0x%p mp:0x%p count:%d "
2014 				 "lwm:%d next:%d",
2015 				 tlck->ip, mp, xadlock->count, lwm, next);
2016 			maplock->index++;
2017 			xadlock++;
2018 		}
2019 
2020 		/*
2021 		 * truncate entry XAD[twm == next - 1]:
2022 		 */
2023 		if (twm == next - 1) {
2024 			struct pxd_lock *pxdlock;
2025 
2026 			/* format a maplock for txUpdateMap() to update bmap
2027 			 * to free truncated delta extent of the truncated
2028 			 * entry XAD[next - 1];
2029 			 * (xtlck->pxdlock = truncated delta extent);
2030 			 */
2031 			tlck->flag |= tlckUPDATEMAP;
2032 			pxdlock = (struct pxd_lock *) xadlock;
2033 			pxdlock->flag = mlckFREEPXD;
2034 			pxdlock->count = 1;
2035 			pxdlock->pxd = pxd;
2036 
2037 			jfs_info("xtLog: truncate ip:0x%p mp:0x%p count:%d "
2038 				 "hwm:%d", ip, mp, pxdlock->count, hwm);
2039 			maplock->index++;
2040 			xadlock++;
2041 		}
2042 
2043 		/*
2044 		 * free entries XAD[next:hwm]:
2045 		 */
2046 		if (hwm >= next) {
2047 			/* format a maplock for txUpdateMap() to update bmap
2048 			 * to free extents of XAD[next:hwm] from thedeleted
2049 			 * page itself;
2050 			 */
2051 			tlck->flag |= tlckUPDATEMAP;
2052 			xadlock->flag = mlckFREEXADLIST;
2053 			xadlock->count = hwm - next + 1;
2054 			xadlock->xdlist = &p->xad[next];
2055 
2056 			jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d "
2057 				 "next:%d hwm:%d",
2058 				 tlck->ip, mp, xadlock->count, next, hwm);
2059 			maplock->index++;
2060 		}
2061 
2062 		/* mark page as homeward bound */
2063 		tlck->flag |= tlckWRITEPAGE;
2064 	}
2065 	return;
2066 }
2067 
2068 /*
2069  *      mapLog()
2070  *
2071  * function:    log from maplock of freed data extents;
2072  */
2073 void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
2074 	    struct tlock * tlck)
2075 {
2076 	struct pxd_lock *pxdlock;
2077 	int i, nlock;
2078 	pxd_t *pxd;
2079 
2080 	/*
2081 	 *      page relocation: free the source page extent
2082 	 *
2083 	 * a maplock for txUpdateMap() for free of the page
2084 	 * has been formatted at txLock() time saving the src
2085 	 * relocated page address;
2086 	 */
2087 	if (tlck->type & tlckRELOCATE) {
2088 		/* log LOG_NOREDOPAGE of the old relocated page
2089 		 * for logredo() to start NoRedoPage filter;
2090 		 */
2091 		lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
2092 		pxdlock = (struct pxd_lock *) & tlck->lock;
2093 		pxd = &lrd->log.redopage.pxd;
2094 		*pxd = pxdlock->pxd;
2095 		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2096 
2097 		/* (N.B. currently, logredo() does NOT update bmap
2098 		 * for free of the page itself for (LOG_XTREE|LOG_NOREDOPAGE);
2099 		 * if page free from relocation, LOG_UPDATEMAP log is
2100 		 * specifically generated now for logredo()
2101 		 * to update bmap for free of src relocated page;
2102 		 * (new flag LOG_RELOCATE may be introduced which will
2103 		 * inform logredo() to start NORedoPage filter and also
2104 		 * update block allocation map at the same time, thus
2105 		 * avoiding an extra log write);
2106 		 */
2107 		lrd->type = cpu_to_le16(LOG_UPDATEMAP);
2108 		lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
2109 		lrd->log.updatemap.nxd = cpu_to_le16(1);
2110 		lrd->log.updatemap.pxd = pxdlock->pxd;
2111 		lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2112 
2113 		/* a maplock for txUpdateMap() for free of the page
2114 		 * has been formatted at txLock() time;
2115 		 */
2116 		tlck->flag |= tlckUPDATEMAP;
2117 		return;
2118 	}
2119 	/*
2120 
2121 	 * Otherwise it's not a relocate request
2122 	 *
2123 	 */
2124 	else {
2125 		/* log LOG_UPDATEMAP for logredo() to update bmap for
2126 		 * free of truncated/relocated delta extent of the data;
2127 		 * e.g.: external EA extent, relocated/truncated extent
2128 		 * from xtTailgate();
2129 		 */
2130 		lrd->type = cpu_to_le16(LOG_UPDATEMAP);
2131 		pxdlock = (struct pxd_lock *) & tlck->lock;
2132 		nlock = pxdlock->index;
2133 		for (i = 0; i < nlock; i++, pxdlock++) {
2134 			if (pxdlock->flag & mlckALLOCPXD)
2135 				lrd->log.updatemap.type =
2136 				    cpu_to_le16(LOG_ALLOCPXD);
2137 			else
2138 				lrd->log.updatemap.type =
2139 				    cpu_to_le16(LOG_FREEPXD);
2140 			lrd->log.updatemap.nxd = cpu_to_le16(1);
2141 			lrd->log.updatemap.pxd = pxdlock->pxd;
2142 			lrd->backchain =
2143 			    cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2144 			jfs_info("mapLog: xaddr:0x%lx xlen:0x%x",
2145 				 (ulong) addressPXD(&pxdlock->pxd),
2146 				 lengthPXD(&pxdlock->pxd));
2147 		}
2148 
2149 		/* update bmap */
2150 		tlck->flag |= tlckUPDATEMAP;
2151 	}
2152 }
2153 
2154 /*
2155  *      txEA()
2156  *
2157  * function:    acquire maplock for EA/ACL extents or
2158  *              set COMMIT_INLINE flag;
2159  */
2160 void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea)
2161 {
2162 	struct tlock *tlck = NULL;
2163 	struct pxd_lock *maplock = NULL, *pxdlock = NULL;
2164 
2165 	/*
2166 	 * format maplock for alloc of new EA extent
2167 	 */
2168 	if (newea) {
2169 		/* Since the newea could be a completely zeroed entry we need to
2170 		 * check for the two flags which indicate we should actually
2171 		 * commit new EA data
2172 		 */
2173 		if (newea->flag & DXD_EXTENT) {
2174 			tlck = txMaplock(tid, ip, tlckMAP);
2175 			maplock = (struct pxd_lock *) & tlck->lock;
2176 			pxdlock = (struct pxd_lock *) maplock;
2177 			pxdlock->flag = mlckALLOCPXD;
2178 			PXDaddress(&pxdlock->pxd, addressDXD(newea));
2179 			PXDlength(&pxdlock->pxd, lengthDXD(newea));
2180 			pxdlock++;
2181 			maplock->index = 1;
2182 		} else if (newea->flag & DXD_INLINE) {
2183 			tlck = NULL;
2184 
2185 			set_cflag(COMMIT_Inlineea, ip);
2186 		}
2187 	}
2188 
2189 	/*
2190 	 * format maplock for free of old EA extent
2191 	 */
2192 	if (!test_cflag(COMMIT_Nolink, ip) && oldea->flag & DXD_EXTENT) {
2193 		if (tlck == NULL) {
2194 			tlck = txMaplock(tid, ip, tlckMAP);
2195 			maplock = (struct pxd_lock *) & tlck->lock;
2196 			pxdlock = (struct pxd_lock *) maplock;
2197 			maplock->index = 0;
2198 		}
2199 		pxdlock->flag = mlckFREEPXD;
2200 		PXDaddress(&pxdlock->pxd, addressDXD(oldea));
2201 		PXDlength(&pxdlock->pxd, lengthDXD(oldea));
2202 		maplock->index++;
2203 	}
2204 }
2205 
2206 /*
2207  *      txForce()
2208  *
2209  * function: synchronously write pages locked by transaction
2210  *              after txLog() but before txUpdateMap();
2211  */
2212 void txForce(struct tblock * tblk)
2213 {
2214 	struct tlock *tlck;
2215 	lid_t lid, next;
2216 	struct metapage *mp;
2217 
2218 	/*
2219 	 * reverse the order of transaction tlocks in
2220 	 * careful update order of address index pages
2221 	 * (right to left, bottom up)
2222 	 */
2223 	tlck = lid_to_tlock(tblk->next);
2224 	lid = tlck->next;
2225 	tlck->next = 0;
2226 	while (lid) {
2227 		tlck = lid_to_tlock(lid);
2228 		next = tlck->next;
2229 		tlck->next = tblk->next;
2230 		tblk->next = lid;
2231 		lid = next;
2232 	}
2233 
2234 	/*
2235 	 * synchronously write the page, and
2236 	 * hold the page for txUpdateMap();
2237 	 */
2238 	for (lid = tblk->next; lid; lid = next) {
2239 		tlck = lid_to_tlock(lid);
2240 		next = tlck->next;
2241 
2242 		if ((mp = tlck->mp) != NULL &&
2243 		    (tlck->type & tlckBTROOT) == 0) {
2244 			assert(mp->xflag & COMMIT_PAGE);
2245 
2246 			if (tlck->flag & tlckWRITEPAGE) {
2247 				tlck->flag &= ~tlckWRITEPAGE;
2248 
2249 				/* do not release page to freelist */
2250 				force_metapage(mp);
2251 #if 0
2252 				/*
2253 				 * The "right" thing to do here is to
2254 				 * synchronously write the metadata.
2255 				 * With the current implementation this
2256 				 * is hard since write_metapage requires
2257 				 * us to kunmap & remap the page.  If we
2258 				 * have tlocks pointing into the metadata
2259 				 * pages, we don't want to do this.  I think
2260 				 * we can get by with synchronously writing
2261 				 * the pages when they are released.
2262 				 */
2263 				assert(mp->nohomeok);
2264 				set_bit(META_dirty, &mp->flag);
2265 				set_bit(META_sync, &mp->flag);
2266 #endif
2267 			}
2268 		}
2269 	}
2270 }
2271 
2272 /*
2273  *      txUpdateMap()
2274  *
2275  * function:    update persistent allocation map (and working map
2276  *              if appropriate);
2277  *
2278  * parameter:
2279  */
2280 static void txUpdateMap(struct tblock * tblk)
2281 {
2282 	struct inode *ip;
2283 	struct inode *ipimap;
2284 	lid_t lid;
2285 	struct tlock *tlck;
2286 	struct maplock *maplock;
2287 	struct pxd_lock pxdlock;
2288 	int maptype;
2289 	int k, nlock;
2290 	struct metapage *mp = NULL;
2291 
2292 	ipimap = JFS_SBI(tblk->sb)->ipimap;
2293 
2294 	maptype = (tblk->xflag & COMMIT_PMAP) ? COMMIT_PMAP : COMMIT_PWMAP;
2295 
2296 
2297 	/*
2298 	 *      update block allocation map
2299 	 *
2300 	 * update allocation state in pmap (and wmap) and
2301 	 * update lsn of the pmap page;
2302 	 */
2303 	/*
2304 	 * scan each tlock/page of transaction for block allocation/free:
2305 	 *
2306 	 * for each tlock/page of transaction, update map.
2307 	 *  ? are there tlock for pmap and pwmap at the same time ?
2308 	 */
2309 	for (lid = tblk->next; lid; lid = tlck->next) {
2310 		tlck = lid_to_tlock(lid);
2311 
2312 		if ((tlck->flag & tlckUPDATEMAP) == 0)
2313 			continue;
2314 
2315 		if (tlck->flag & tlckFREEPAGE) {
2316 			/*
2317 			 * Another thread may attempt to reuse freed space
2318 			 * immediately, so we want to get rid of the metapage
2319 			 * before anyone else has a chance to get it.
2320 			 * Lock metapage, update maps, then invalidate
2321 			 * the metapage.
2322 			 */
2323 			mp = tlck->mp;
2324 			ASSERT(mp->xflag & COMMIT_PAGE);
2325 			grab_metapage(mp);
2326 		}
2327 
2328 		/*
2329 		 * extent list:
2330 		 * . in-line PXD list:
2331 		 * . out-of-line XAD list:
2332 		 */
2333 		maplock = (struct maplock *) & tlck->lock;
2334 		nlock = maplock->index;
2335 
2336 		for (k = 0; k < nlock; k++, maplock++) {
2337 			/*
2338 			 * allocate blocks in persistent map:
2339 			 *
2340 			 * blocks have been allocated from wmap at alloc time;
2341 			 */
2342 			if (maplock->flag & mlckALLOC) {
2343 				txAllocPMap(ipimap, maplock, tblk);
2344 			}
2345 			/*
2346 			 * free blocks in persistent and working map:
2347 			 * blocks will be freed in pmap and then in wmap;
2348 			 *
2349 			 * ? tblock specifies the PMAP/PWMAP based upon
2350 			 * transaction
2351 			 *
2352 			 * free blocks in persistent map:
2353 			 * blocks will be freed from wmap at last reference
2354 			 * release of the object for regular files;
2355 			 *
2356 			 * Alway free blocks from both persistent & working
2357 			 * maps for directories
2358 			 */
2359 			else {	/* (maplock->flag & mlckFREE) */
2360 
2361 				if (S_ISDIR(tlck->ip->i_mode))
2362 					txFreeMap(ipimap, maplock,
2363 						  tblk, COMMIT_PWMAP);
2364 				else
2365 					txFreeMap(ipimap, maplock,
2366 						  tblk, maptype);
2367 			}
2368 		}
2369 		if (tlck->flag & tlckFREEPAGE) {
2370 			if (!(tblk->flag & tblkGC_LAZY)) {
2371 				/* This is equivalent to txRelease */
2372 				ASSERT(mp->lid == lid);
2373 				tlck->mp->lid = 0;
2374 			}
2375 			assert(mp->nohomeok == 1);
2376 			metapage_homeok(mp);
2377 			discard_metapage(mp);
2378 			tlck->mp = NULL;
2379 		}
2380 	}
2381 	/*
2382 	 *      update inode allocation map
2383 	 *
2384 	 * update allocation state in pmap and
2385 	 * update lsn of the pmap page;
2386 	 * update in-memory inode flag/state
2387 	 *
2388 	 * unlock mapper/write lock
2389 	 */
2390 	if (tblk->xflag & COMMIT_CREATE) {
2391 		diUpdatePMap(ipimap, tblk->ino, FALSE, tblk);
2392 		ipimap->i_state |= I_DIRTY;
2393 		/* update persistent block allocation map
2394 		 * for the allocation of inode extent;
2395 		 */
2396 		pxdlock.flag = mlckALLOCPXD;
2397 		pxdlock.pxd = tblk->u.ixpxd;
2398 		pxdlock.index = 1;
2399 		txAllocPMap(ipimap, (struct maplock *) & pxdlock, tblk);
2400 	} else if (tblk->xflag & COMMIT_DELETE) {
2401 		ip = tblk->u.ip;
2402 		diUpdatePMap(ipimap, ip->i_ino, TRUE, tblk);
2403 		ipimap->i_state |= I_DIRTY;
2404 		iput(ip);
2405 	}
2406 }
2407 
2408 /*
2409  *      txAllocPMap()
2410  *
2411  * function: allocate from persistent map;
2412  *
2413  * parameter:
2414  *      ipbmap  -
2415  *      malock -
2416  *              xad list:
2417  *              pxd:
2418  *
2419  *      maptype -
2420  *              allocate from persistent map;
2421  *              free from persistent map;
2422  *              (e.g., tmp file - free from working map at releae
2423  *               of last reference);
2424  *              free from persistent and working map;
2425  *
2426  *      lsn     - log sequence number;
2427  */
2428 static void txAllocPMap(struct inode *ip, struct maplock * maplock,
2429 			struct tblock * tblk)
2430 {
2431 	struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
2432 	struct xdlistlock *xadlistlock;
2433 	xad_t *xad;
2434 	s64 xaddr;
2435 	int xlen;
2436 	struct pxd_lock *pxdlock;
2437 	struct xdlistlock *pxdlistlock;
2438 	pxd_t *pxd;
2439 	int n;
2440 
2441 	/*
2442 	 * allocate from persistent map;
2443 	 */
2444 	if (maplock->flag & mlckALLOCXADLIST) {
2445 		xadlistlock = (struct xdlistlock *) maplock;
2446 		xad = xadlistlock->xdlist;
2447 		for (n = 0; n < xadlistlock->count; n++, xad++) {
2448 			if (xad->flag & (XAD_NEW | XAD_EXTENDED)) {
2449 				xaddr = addressXAD(xad);
2450 				xlen = lengthXAD(xad);
2451 				dbUpdatePMap(ipbmap, FALSE, xaddr,
2452 					     (s64) xlen, tblk);
2453 				xad->flag &= ~(XAD_NEW | XAD_EXTENDED);
2454 				jfs_info("allocPMap: xaddr:0x%lx xlen:%d",
2455 					 (ulong) xaddr, xlen);
2456 			}
2457 		}
2458 	} else if (maplock->flag & mlckALLOCPXD) {
2459 		pxdlock = (struct pxd_lock *) maplock;
2460 		xaddr = addressPXD(&pxdlock->pxd);
2461 		xlen = lengthPXD(&pxdlock->pxd);
2462 		dbUpdatePMap(ipbmap, FALSE, xaddr, (s64) xlen, tblk);
2463 		jfs_info("allocPMap: xaddr:0x%lx xlen:%d", (ulong) xaddr, xlen);
2464 	} else {		/* (maplock->flag & mlckALLOCPXDLIST) */
2465 
2466 		pxdlistlock = (struct xdlistlock *) maplock;
2467 		pxd = pxdlistlock->xdlist;
2468 		for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2469 			xaddr = addressPXD(pxd);
2470 			xlen = lengthPXD(pxd);
2471 			dbUpdatePMap(ipbmap, FALSE, xaddr, (s64) xlen,
2472 				     tblk);
2473 			jfs_info("allocPMap: xaddr:0x%lx xlen:%d",
2474 				 (ulong) xaddr, xlen);
2475 		}
2476 	}
2477 }
2478 
2479 /*
2480  *      txFreeMap()
2481  *
2482  * function:    free from persistent and/or working map;
2483  *
2484  * todo: optimization
2485  */
2486 void txFreeMap(struct inode *ip,
2487 	       struct maplock * maplock, struct tblock * tblk, int maptype)
2488 {
2489 	struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
2490 	struct xdlistlock *xadlistlock;
2491 	xad_t *xad;
2492 	s64 xaddr;
2493 	int xlen;
2494 	struct pxd_lock *pxdlock;
2495 	struct xdlistlock *pxdlistlock;
2496 	pxd_t *pxd;
2497 	int n;
2498 
2499 	jfs_info("txFreeMap: tblk:0x%p maplock:0x%p maptype:0x%x",
2500 		 tblk, maplock, maptype);
2501 
2502 	/*
2503 	 * free from persistent map;
2504 	 */
2505 	if (maptype == COMMIT_PMAP || maptype == COMMIT_PWMAP) {
2506 		if (maplock->flag & mlckFREEXADLIST) {
2507 			xadlistlock = (struct xdlistlock *) maplock;
2508 			xad = xadlistlock->xdlist;
2509 			for (n = 0; n < xadlistlock->count; n++, xad++) {
2510 				if (!(xad->flag & XAD_NEW)) {
2511 					xaddr = addressXAD(xad);
2512 					xlen = lengthXAD(xad);
2513 					dbUpdatePMap(ipbmap, TRUE, xaddr,
2514 						     (s64) xlen, tblk);
2515 					jfs_info("freePMap: xaddr:0x%lx "
2516 						 "xlen:%d",
2517 						 (ulong) xaddr, xlen);
2518 				}
2519 			}
2520 		} else if (maplock->flag & mlckFREEPXD) {
2521 			pxdlock = (struct pxd_lock *) maplock;
2522 			xaddr = addressPXD(&pxdlock->pxd);
2523 			xlen = lengthPXD(&pxdlock->pxd);
2524 			dbUpdatePMap(ipbmap, TRUE, xaddr, (s64) xlen,
2525 				     tblk);
2526 			jfs_info("freePMap: xaddr:0x%lx xlen:%d",
2527 				 (ulong) xaddr, xlen);
2528 		} else {	/* (maplock->flag & mlckALLOCPXDLIST) */
2529 
2530 			pxdlistlock = (struct xdlistlock *) maplock;
2531 			pxd = pxdlistlock->xdlist;
2532 			for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2533 				xaddr = addressPXD(pxd);
2534 				xlen = lengthPXD(pxd);
2535 				dbUpdatePMap(ipbmap, TRUE, xaddr,
2536 					     (s64) xlen, tblk);
2537 				jfs_info("freePMap: xaddr:0x%lx xlen:%d",
2538 					 (ulong) xaddr, xlen);
2539 			}
2540 		}
2541 	}
2542 
2543 	/*
2544 	 * free from working map;
2545 	 */
2546 	if (maptype == COMMIT_PWMAP || maptype == COMMIT_WMAP) {
2547 		if (maplock->flag & mlckFREEXADLIST) {
2548 			xadlistlock = (struct xdlistlock *) maplock;
2549 			xad = xadlistlock->xdlist;
2550 			for (n = 0; n < xadlistlock->count; n++, xad++) {
2551 				xaddr = addressXAD(xad);
2552 				xlen = lengthXAD(xad);
2553 				dbFree(ip, xaddr, (s64) xlen);
2554 				xad->flag = 0;
2555 				jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2556 					 (ulong) xaddr, xlen);
2557 			}
2558 		} else if (maplock->flag & mlckFREEPXD) {
2559 			pxdlock = (struct pxd_lock *) maplock;
2560 			xaddr = addressPXD(&pxdlock->pxd);
2561 			xlen = lengthPXD(&pxdlock->pxd);
2562 			dbFree(ip, xaddr, (s64) xlen);
2563 			jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2564 				 (ulong) xaddr, xlen);
2565 		} else {	/* (maplock->flag & mlckFREEPXDLIST) */
2566 
2567 			pxdlistlock = (struct xdlistlock *) maplock;
2568 			pxd = pxdlistlock->xdlist;
2569 			for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2570 				xaddr = addressPXD(pxd);
2571 				xlen = lengthPXD(pxd);
2572 				dbFree(ip, xaddr, (s64) xlen);
2573 				jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2574 					 (ulong) xaddr, xlen);
2575 			}
2576 		}
2577 	}
2578 }
2579 
2580 /*
2581  *      txFreelock()
2582  *
2583  * function:    remove tlock from inode anonymous locklist
2584  */
2585 void txFreelock(struct inode *ip)
2586 {
2587 	struct jfs_inode_info *jfs_ip = JFS_IP(ip);
2588 	struct tlock *xtlck, *tlck;
2589 	lid_t xlid = 0, lid;
2590 
2591 	if (!jfs_ip->atlhead)
2592 		return;
2593 
2594 	TXN_LOCK();
2595 	xtlck = (struct tlock *) &jfs_ip->atlhead;
2596 
2597 	while ((lid = xtlck->next) != 0) {
2598 		tlck = lid_to_tlock(lid);
2599 		if (tlck->flag & tlckFREELOCK) {
2600 			xtlck->next = tlck->next;
2601 			txLockFree(lid);
2602 		} else {
2603 			xtlck = tlck;
2604 			xlid = lid;
2605 		}
2606 	}
2607 
2608 	if (jfs_ip->atlhead)
2609 		jfs_ip->atltail = xlid;
2610 	else {
2611 		jfs_ip->atltail = 0;
2612 		/*
2613 		 * If inode was on anon_list, remove it
2614 		 */
2615 		list_del_init(&jfs_ip->anon_inode_list);
2616 	}
2617 	TXN_UNLOCK();
2618 }
2619 
2620 /*
2621  *      txAbort()
2622  *
2623  * function: abort tx before commit;
2624  *
2625  * frees line-locks and segment locks for all
2626  * segments in comdata structure.
2627  * Optionally sets state of file-system to FM_DIRTY in super-block.
2628  * log age of page-frames in memory for which caller has
2629  * are reset to 0 (to avoid logwarap).
2630  */
2631 void txAbort(tid_t tid, int dirty)
2632 {
2633 	lid_t lid, next;
2634 	struct metapage *mp;
2635 	struct tblock *tblk = tid_to_tblock(tid);
2636 	struct tlock *tlck;
2637 
2638 	/*
2639 	 * free tlocks of the transaction
2640 	 */
2641 	for (lid = tblk->next; lid; lid = next) {
2642 		tlck = lid_to_tlock(lid);
2643 		next = tlck->next;
2644 		mp = tlck->mp;
2645 		JFS_IP(tlck->ip)->xtlid = 0;
2646 
2647 		if (mp) {
2648 			mp->lid = 0;
2649 
2650 			/*
2651 			 * reset lsn of page to avoid logwarap:
2652 			 *
2653 			 * (page may have been previously committed by another
2654 			 * transaction(s) but has not been paged, i.e.,
2655 			 * it may be on logsync list even though it has not
2656 			 * been logged for the current tx.)
2657 			 */
2658 			if (mp->xflag & COMMIT_PAGE && mp->lsn)
2659 				LogSyncRelease(mp);
2660 		}
2661 		/* insert tlock at head of freelist */
2662 		TXN_LOCK();
2663 		txLockFree(lid);
2664 		TXN_UNLOCK();
2665 	}
2666 
2667 	/* caller will free the transaction block */
2668 
2669 	tblk->next = tblk->last = 0;
2670 
2671 	/*
2672 	 * mark filesystem dirty
2673 	 */
2674 	if (dirty)
2675 		jfs_error(tblk->sb, "txAbort");
2676 
2677 	return;
2678 }
2679 
2680 /*
2681  *      txLazyCommit(void)
2682  *
2683  *	All transactions except those changing ipimap (COMMIT_FORCE) are
2684  *	processed by this routine.  This insures that the inode and block
2685  *	allocation maps are updated in order.  For synchronous transactions,
2686  *	let the user thread finish processing after txUpdateMap() is called.
2687  */
2688 static void txLazyCommit(struct tblock * tblk)
2689 {
2690 	struct jfs_log *log;
2691 
2692 	while (((tblk->flag & tblkGC_READY) == 0) &&
2693 	       ((tblk->flag & tblkGC_UNLOCKED) == 0)) {
2694 		/* We must have gotten ahead of the user thread
2695 		 */
2696 		jfs_info("jfs_lazycommit: tblk 0x%p not unlocked", tblk);
2697 		yield();
2698 	}
2699 
2700 	jfs_info("txLazyCommit: processing tblk 0x%p", tblk);
2701 
2702 	txUpdateMap(tblk);
2703 
2704 	log = (struct jfs_log *) JFS_SBI(tblk->sb)->log;
2705 
2706 	spin_lock_irq(&log->gclock);	// LOGGC_LOCK
2707 
2708 	tblk->flag |= tblkGC_COMMITTED;
2709 
2710 	if (tblk->flag & tblkGC_READY)
2711 		log->gcrtc--;
2712 
2713 	wake_up_all(&tblk->gcwait);	// LOGGC_WAKEUP
2714 
2715 	/*
2716 	 * Can't release log->gclock until we've tested tblk->flag
2717 	 */
2718 	if (tblk->flag & tblkGC_LAZY) {
2719 		spin_unlock_irq(&log->gclock);	// LOGGC_UNLOCK
2720 		txUnlock(tblk);
2721 		tblk->flag &= ~tblkGC_LAZY;
2722 		txEnd(tblk - TxBlock);	/* Convert back to tid */
2723 	} else
2724 		spin_unlock_irq(&log->gclock);	// LOGGC_UNLOCK
2725 
2726 	jfs_info("txLazyCommit: done: tblk = 0x%p", tblk);
2727 }
2728 
2729 /*
2730  *      jfs_lazycommit(void)
2731  *
2732  *	To be run as a kernel daemon.  If lbmIODone is called in an interrupt
2733  *	context, or where blocking is not wanted, this routine will process
2734  *	committed transactions from the unlock queue.
2735  */
2736 int jfs_lazycommit(void *arg)
2737 {
2738 	int WorkDone;
2739 	struct tblock *tblk;
2740 	unsigned long flags;
2741 	struct jfs_sb_info *sbi;
2742 
2743 	daemonize("jfsCommit");
2744 
2745 	complete(&jfsIOwait);
2746 
2747 	do {
2748 		LAZY_LOCK(flags);
2749 		jfs_commit_thread_waking = 0;	/* OK to wake another thread */
2750 		while (!list_empty(&TxAnchor.unlock_queue)) {
2751 			WorkDone = 0;
2752 			list_for_each_entry(tblk, &TxAnchor.unlock_queue,
2753 					    cqueue) {
2754 
2755 				sbi = JFS_SBI(tblk->sb);
2756 				/*
2757 				 * For each volume, the transactions must be
2758 				 * handled in order.  If another commit thread
2759 				 * is handling a tblk for this superblock,
2760 				 * skip it
2761 				 */
2762 				if (sbi->commit_state & IN_LAZYCOMMIT)
2763 					continue;
2764 
2765 				sbi->commit_state |= IN_LAZYCOMMIT;
2766 				WorkDone = 1;
2767 
2768 				/*
2769 				 * Remove transaction from queue
2770 				 */
2771 				list_del(&tblk->cqueue);
2772 
2773 				LAZY_UNLOCK(flags);
2774 				txLazyCommit(tblk);
2775 				LAZY_LOCK(flags);
2776 
2777 				sbi->commit_state &= ~IN_LAZYCOMMIT;
2778 				/*
2779 				 * Don't continue in the for loop.  (We can't
2780 				 * anyway, it's unsafe!)  We want to go back to
2781 				 * the beginning of the list.
2782 				 */
2783 				break;
2784 			}
2785 
2786 			/* If there was nothing to do, don't continue */
2787 			if (!WorkDone)
2788 				break;
2789 		}
2790 		/* In case a wakeup came while all threads were active */
2791 		jfs_commit_thread_waking = 0;
2792 
2793 		if (freezing(current)) {
2794 			LAZY_UNLOCK(flags);
2795 			refrigerator();
2796 		} else {
2797 			DECLARE_WAITQUEUE(wq, current);
2798 
2799 			add_wait_queue(&jfs_commit_thread_wait, &wq);
2800 			set_current_state(TASK_INTERRUPTIBLE);
2801 			LAZY_UNLOCK(flags);
2802 			schedule();
2803 			current->state = TASK_RUNNING;
2804 			remove_wait_queue(&jfs_commit_thread_wait, &wq);
2805 		}
2806 	} while (!jfs_stop_threads);
2807 
2808 	if (!list_empty(&TxAnchor.unlock_queue))
2809 		jfs_err("jfs_lazycommit being killed w/pending transactions!");
2810 	else
2811 		jfs_info("jfs_lazycommit being killed\n");
2812 	complete_and_exit(&jfsIOwait, 0);
2813 }
2814 
2815 void txLazyUnlock(struct tblock * tblk)
2816 {
2817 	unsigned long flags;
2818 
2819 	LAZY_LOCK(flags);
2820 
2821 	list_add_tail(&tblk->cqueue, &TxAnchor.unlock_queue);
2822 	/*
2823 	 * Don't wake up a commit thread if there is already one servicing
2824 	 * this superblock, or if the last one we woke up hasn't started yet.
2825 	 */
2826 	if (!(JFS_SBI(tblk->sb)->commit_state & IN_LAZYCOMMIT) &&
2827 	    !jfs_commit_thread_waking) {
2828 		jfs_commit_thread_waking = 1;
2829 		wake_up(&jfs_commit_thread_wait);
2830 	}
2831 	LAZY_UNLOCK(flags);
2832 }
2833 
2834 static void LogSyncRelease(struct metapage * mp)
2835 {
2836 	struct jfs_log *log = mp->log;
2837 
2838 	assert(mp->nohomeok);
2839 	assert(log);
2840 	metapage_homeok(mp);
2841 }
2842 
2843 /*
2844  *	txQuiesce
2845  *
2846  *	Block all new transactions and push anonymous transactions to
2847  *	completion
2848  *
2849  *	This does almost the same thing as jfs_sync below.  We don't
2850  *	worry about deadlocking when jfs_tlocks_low is set, since we would
2851  *	expect jfs_sync to get us out of that jam.
2852  */
2853 void txQuiesce(struct super_block *sb)
2854 {
2855 	struct inode *ip;
2856 	struct jfs_inode_info *jfs_ip;
2857 	struct jfs_log *log = JFS_SBI(sb)->log;
2858 	tid_t tid;
2859 
2860 	set_bit(log_QUIESCE, &log->flag);
2861 
2862 	TXN_LOCK();
2863 restart:
2864 	while (!list_empty(&TxAnchor.anon_list)) {
2865 		jfs_ip = list_entry(TxAnchor.anon_list.next,
2866 				    struct jfs_inode_info,
2867 				    anon_inode_list);
2868 		ip = &jfs_ip->vfs_inode;
2869 
2870 		/*
2871 		 * inode will be removed from anonymous list
2872 		 * when it is committed
2873 		 */
2874 		TXN_UNLOCK();
2875 		tid = txBegin(ip->i_sb, COMMIT_INODE | COMMIT_FORCE);
2876 		down(&jfs_ip->commit_sem);
2877 		txCommit(tid, 1, &ip, 0);
2878 		txEnd(tid);
2879 		up(&jfs_ip->commit_sem);
2880 		/*
2881 		 * Just to be safe.  I don't know how
2882 		 * long we can run without blocking
2883 		 */
2884 		cond_resched();
2885 		TXN_LOCK();
2886 	}
2887 
2888 	/*
2889 	 * If jfs_sync is running in parallel, there could be some inodes
2890 	 * on anon_list2.  Let's check.
2891 	 */
2892 	if (!list_empty(&TxAnchor.anon_list2)) {
2893 		list_splice(&TxAnchor.anon_list2, &TxAnchor.anon_list);
2894 		INIT_LIST_HEAD(&TxAnchor.anon_list2);
2895 		goto restart;
2896 	}
2897 	TXN_UNLOCK();
2898 
2899 	/*
2900 	 * We may need to kick off the group commit
2901 	 */
2902 	jfs_flush_journal(log, 0);
2903 }
2904 
2905 /*
2906  * txResume()
2907  *
2908  * Allows transactions to start again following txQuiesce
2909  */
2910 void txResume(struct super_block *sb)
2911 {
2912 	struct jfs_log *log = JFS_SBI(sb)->log;
2913 
2914 	clear_bit(log_QUIESCE, &log->flag);
2915 	TXN_WAKEUP(&log->syncwait);
2916 }
2917 
2918 /*
2919  *      jfs_sync(void)
2920  *
2921  *	To be run as a kernel daemon.  This is awakened when tlocks run low.
2922  *	We write any inodes that have anonymous tlocks so they will become
2923  *	available.
2924  */
2925 int jfs_sync(void *arg)
2926 {
2927 	struct inode *ip;
2928 	struct jfs_inode_info *jfs_ip;
2929 	int rc;
2930 	tid_t tid;
2931 
2932 	daemonize("jfsSync");
2933 
2934 	complete(&jfsIOwait);
2935 
2936 	do {
2937 		/*
2938 		 * write each inode on the anonymous inode list
2939 		 */
2940 		TXN_LOCK();
2941 		while (jfs_tlocks_low && !list_empty(&TxAnchor.anon_list)) {
2942 			jfs_ip = list_entry(TxAnchor.anon_list.next,
2943 					    struct jfs_inode_info,
2944 					    anon_inode_list);
2945 			ip = &jfs_ip->vfs_inode;
2946 
2947 			if (! igrab(ip)) {
2948 				/*
2949 				 * Inode is being freed
2950 				 */
2951 				list_del_init(&jfs_ip->anon_inode_list);
2952 			} else if (! down_trylock(&jfs_ip->commit_sem)) {
2953 				/*
2954 				 * inode will be removed from anonymous list
2955 				 * when it is committed
2956 				 */
2957 				TXN_UNLOCK();
2958 				tid = txBegin(ip->i_sb, COMMIT_INODE);
2959 				rc = txCommit(tid, 1, &ip, 0);
2960 				txEnd(tid);
2961 				up(&jfs_ip->commit_sem);
2962 
2963 				iput(ip);
2964 				/*
2965 				 * Just to be safe.  I don't know how
2966 				 * long we can run without blocking
2967 				 */
2968 				cond_resched();
2969 				TXN_LOCK();
2970 			} else {
2971 				/* We can't get the commit semaphore.  It may
2972 				 * be held by a thread waiting for tlock's
2973 				 * so let's not block here.  Save it to
2974 				 * put back on the anon_list.
2975 				 */
2976 
2977 				/* Take off anon_list */
2978 				list_del(&jfs_ip->anon_inode_list);
2979 
2980 				/* Put on anon_list2 */
2981 				list_add(&jfs_ip->anon_inode_list,
2982 					 &TxAnchor.anon_list2);
2983 
2984 				TXN_UNLOCK();
2985 				iput(ip);
2986 				TXN_LOCK();
2987 			}
2988 		}
2989 		/* Add anon_list2 back to anon_list */
2990 		list_splice_init(&TxAnchor.anon_list2, &TxAnchor.anon_list);
2991 
2992 		if (freezing(current)) {
2993 			TXN_UNLOCK();
2994 			refrigerator();
2995 		} else {
2996 			DECLARE_WAITQUEUE(wq, current);
2997 
2998 			add_wait_queue(&jfs_sync_thread_wait, &wq);
2999 			set_current_state(TASK_INTERRUPTIBLE);
3000 			TXN_UNLOCK();
3001 			schedule();
3002 			current->state = TASK_RUNNING;
3003 			remove_wait_queue(&jfs_sync_thread_wait, &wq);
3004 		}
3005 	} while (!jfs_stop_threads);
3006 
3007 	jfs_info("jfs_sync being killed");
3008 	complete_and_exit(&jfsIOwait, 0);
3009 }
3010 
3011 #if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_DEBUG)
3012 int jfs_txanchor_read(char *buffer, char **start, off_t offset, int length,
3013 		      int *eof, void *data)
3014 {
3015 	int len = 0;
3016 	off_t begin;
3017 	char *freewait;
3018 	char *freelockwait;
3019 	char *lowlockwait;
3020 
3021 	freewait =
3022 	    waitqueue_active(&TxAnchor.freewait) ? "active" : "empty";
3023 	freelockwait =
3024 	    waitqueue_active(&TxAnchor.freelockwait) ? "active" : "empty";
3025 	lowlockwait =
3026 	    waitqueue_active(&TxAnchor.lowlockwait) ? "active" : "empty";
3027 
3028 	len += sprintf(buffer,
3029 		       "JFS TxAnchor\n"
3030 		       "============\n"
3031 		       "freetid = %d\n"
3032 		       "freewait = %s\n"
3033 		       "freelock = %d\n"
3034 		       "freelockwait = %s\n"
3035 		       "lowlockwait = %s\n"
3036 		       "tlocksInUse = %d\n"
3037 		       "jfs_tlocks_low = %d\n"
3038 		       "unlock_queue is %sempty\n",
3039 		       TxAnchor.freetid,
3040 		       freewait,
3041 		       TxAnchor.freelock,
3042 		       freelockwait,
3043 		       lowlockwait,
3044 		       TxAnchor.tlocksInUse,
3045 		       jfs_tlocks_low,
3046 		       list_empty(&TxAnchor.unlock_queue) ? "" : "not ");
3047 
3048 	begin = offset;
3049 	*start = buffer + begin;
3050 	len -= begin;
3051 
3052 	if (len > length)
3053 		len = length;
3054 	else
3055 		*eof = 1;
3056 
3057 	if (len < 0)
3058 		len = 0;
3059 
3060 	return len;
3061 }
3062 #endif
3063 
3064 #if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_STATISTICS)
3065 int jfs_txstats_read(char *buffer, char **start, off_t offset, int length,
3066 		     int *eof, void *data)
3067 {
3068 	int len = 0;
3069 	off_t begin;
3070 
3071 	len += sprintf(buffer,
3072 		       "JFS TxStats\n"
3073 		       "===========\n"
3074 		       "calls to txBegin = %d\n"
3075 		       "txBegin blocked by sync barrier = %d\n"
3076 		       "txBegin blocked by tlocks low = %d\n"
3077 		       "txBegin blocked by no free tid = %d\n"
3078 		       "calls to txBeginAnon = %d\n"
3079 		       "txBeginAnon blocked by sync barrier = %d\n"
3080 		       "txBeginAnon blocked by tlocks low = %d\n"
3081 		       "calls to txLockAlloc = %d\n"
3082 		       "tLockAlloc blocked by no free lock = %d\n",
3083 		       TxStat.txBegin,
3084 		       TxStat.txBegin_barrier,
3085 		       TxStat.txBegin_lockslow,
3086 		       TxStat.txBegin_freetid,
3087 		       TxStat.txBeginAnon,
3088 		       TxStat.txBeginAnon_barrier,
3089 		       TxStat.txBeginAnon_lockslow,
3090 		       TxStat.txLockAlloc,
3091 		       TxStat.txLockAlloc_freelock);
3092 
3093 	begin = offset;
3094 	*start = buffer + begin;
3095 	len -= begin;
3096 
3097 	if (len > length)
3098 		len = length;
3099 	else
3100 		*eof = 1;
3101 
3102 	if (len < 0)
3103 		len = 0;
3104 
3105 	return len;
3106 }
3107 #endif
3108