Lines Matching +full:sync +full:- +full:write

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) International Business Machines Corp., 2000-2004
4 * Portions Copyright (C) Christoph Hellwig, 2001-2002
24 * transactions which wrote COMMIT records in the same in-memory
33 * . a per log lock serialize log write.
38 * careful-write (ping-pong) of last logpage to recover from crash
40 * detection of split (out-of-order) write of physical sectors
45 * lsn - 64-bit monotonically increasing integer vs
46 * 32-bit lspn and page eor.
78 * log read/write serialization (per log)
80 #define LOG_LOCK_INIT(log) mutex_init(&(log)->loglock)
81 #define LOG_LOCK(log) mutex_lock(&((log)->loglock))
82 #define LOG_UNLOCK(log) mutex_unlock(&((log)->loglock))
89 #define LOGGC_LOCK_INIT(log) spin_lock_init(&(log)->gclock)
90 #define LOGGC_LOCK(log) spin_lock_irq(&(log)->gclock)
91 #define LOGGC_UNLOCK(log) spin_unlock_irq(&(log)->gclock)
92 #define LOGGC_WAKEUP(tblk) wake_up_all(&(tblk)->gcwait)
95 * log sync serialization (per log)
131 #define lbmWRITE 0x0002 /* enqueue at tail of write queue;
134 #define lbmRELEASE 0x0004 /* remove from write queue
140 * when removed from write queue;
148 #define lbmGC 0x0080 /* lbmIODone to perform post-GC processing
206 list_for_each_entry(sbi, &log->sb_list, log_list) { in write_special_inodes()
207 writer(sbi->ipbmap->i_mapping); in write_special_inodes()
208 writer(sbi->ipimap->i_mapping); in write_special_inodes()
209 writer(sbi->direct_inode->i_mapping); in write_special_inodes()
216 * FUNCTION: write a log record;
220 * RETURN: lsn - offset to the next log record to write (end-of-log);
221 * -1 - error;
238 /* log by (out-of-transaction) JFS ? */ in lmLog()
244 tlck->type & tlckBTROOT || (mp = tlck->mp) == NULL) in lmLog()
250 lsn = log->lsn; in lmLog()
255 * initialize page lsn if first log write of the page in lmLog()
257 if (mp->lsn == 0) { in lmLog()
258 mp->log = log; in lmLog()
259 mp->lsn = lsn; in lmLog()
260 log->count++; in lmLog()
263 list_add_tail(&mp->synclist, &log->synclist); in lmLog()
273 * B+-tree index of extent descriptors for block in lmLog()
284 if (tblk->lsn == 0) { in lmLog()
286 tblk->lsn = mp->lsn; in lmLog()
287 log->count++; in lmLog()
290 list_add(&tblk->synclist, &mp->synclist); in lmLog()
297 logdiff(diffp, mp->lsn, log); in lmLog()
298 logdiff(difft, tblk->lsn, log); in lmLog()
301 tblk->lsn = mp->lsn; in lmLog()
304 list_move(&tblk->synclist, &mp->synclist); in lmLog()
311 * write the log record in lmLog()
320 if (diffp >= log->nextsync) in lmLog()
323 /* update end-of-log lsn */ in lmLog()
324 log->lsn = lsn; in lmLog()
328 /* return end-of-log address */ in lmLog()
337 * PARAMETER: cd - commit descriptor
339 * RETURN: end-of-log address
347 int lsn = 0; /* end-of-log address */ in lmWriteRecord()
351 int dstoffset; /* end-of-log offset in log page */ in lmWriteRecord()
353 caddr_t p; /* src meta-data page */ in lmWriteRecord()
366 /* retrieve destination log page to write */ in lmWriteRecord()
367 bp = (struct lbuf *) log->bp; in lmWriteRecord()
368 lp = (struct logpage *) bp->l_ldata; in lmWriteRecord()
369 dstoffset = log->eor; in lmWriteRecord()
371 /* any log data to write ? */ in lmWriteRecord()
378 /* retrieve source meta-data page to log */ in lmWriteRecord()
379 if (tlck->flag & tlckPAGELOCK) { in lmWriteRecord()
380 p = (caddr_t) (tlck->mp->data); in lmWriteRecord()
381 linelock = (struct linelock *) & tlck->lock; in lmWriteRecord()
383 /* retrieve source in-memory inode to log */ in lmWriteRecord()
384 else if (tlck->flag & tlckINODELOCK) { in lmWriteRecord()
385 if (tlck->type & tlckDTREE) in lmWriteRecord()
386 p = (caddr_t) &JFS_IP(tlck->ip)->i_dtroot; in lmWriteRecord()
388 p = (caddr_t) &JFS_IP(tlck->ip)->i_xtroot; in lmWriteRecord()
389 linelock = (struct linelock *) & tlck->lock; in lmWriteRecord()
395 l2linesize = linelock->l2linesize; in lmWriteRecord()
398 ASSERT(linelock->index <= linelock->maxcnt); in lmWriteRecord()
400 lv = linelock->lv; in lmWriteRecord()
401 for (i = 0; i < linelock->index; i++, lv++) { in lmWriteRecord()
402 if (lv->length == 0) in lmWriteRecord()
406 if (dstoffset >= LOGPSIZE - LOGPTLRSIZE) { in lmWriteRecord()
410 bp = log->bp; in lmWriteRecord()
411 lp = (struct logpage *) bp->l_ldata; in lmWriteRecord()
418 src = (u8 *) p + (lv->offset << l2linesize); in lmWriteRecord()
419 srclen = lv->length << l2linesize; in lmWriteRecord()
422 freespace = (LOGPSIZE - LOGPTLRSIZE) - dstoffset; in lmWriteRecord()
429 if (dstoffset < LOGPSIZE - LOGPTLRSIZE) in lmWriteRecord()
435 bp = (struct lbuf *) log->bp; in lmWriteRecord()
436 lp = (struct logpage *) bp->l_ldata; in lmWriteRecord()
439 srclen -= nbytes; in lmWriteRecord()
448 lvd->offset = cpu_to_le16(lv->offset); in lmWriteRecord()
449 lvd->length = cpu_to_le16(lv->length); in lmWriteRecord()
452 lv->offset, lv->length); in lmWriteRecord()
455 if ((i = linelock->next)) { in lmWriteRecord()
464 lrd->length = cpu_to_le16(len); in lmWriteRecord()
470 freespace = (LOGPSIZE - LOGPTLRSIZE) - dstoffset; in lmWriteRecord()
476 srclen -= nbytes; in lmWriteRecord()
487 log->eor = dstoffset; in lmWriteRecord()
488 bp->l_eor = dstoffset; in lmWriteRecord()
489 lsn = (log->page << L2LOGPSIZE) + dstoffset; in lmWriteRecord()
491 if (lrd->type & cpu_to_le16(LOG_COMMIT)) { in lmWriteRecord()
492 tblk->clsn = lsn; in lmWriteRecord()
493 jfs_info("wr: tclsn:0x%x, beor:0x%x", tblk->clsn, in lmWriteRecord()
494 bp->l_eor); in lmWriteRecord()
501 * enqueue tblock of non-trivial/synchronous COMMIT in lmWriteRecord()
509 tblk->flag = tblkGC_QUEUE; in lmWriteRecord()
510 tblk->bp = log->bp; in lmWriteRecord()
511 tblk->pn = log->page; in lmWriteRecord()
512 tblk->eor = log->eor; in lmWriteRecord()
515 list_add_tail(&tblk->cqueue, &log->cqueue); in lmWriteRecord()
521 le16_to_cpu(lrd->type), log->bp, log->page, dstoffset); in lmWriteRecord()
524 if (dstoffset < LOGPSIZE - LOGPTLRSIZE) in lmWriteRecord()
531 bp = (struct lbuf *) log->bp; in lmWriteRecord()
532 lp = (struct logpage *) bp->l_ldata; in lmWriteRecord()
544 * FUNCTION: write current page and allocate next page.
562 pn = log->page; in lmNextPage()
563 bp = log->bp; in lmNextPage()
564 lp = (struct logpage *) bp->l_ldata; in lmNextPage()
565 lspn = le32_to_cpu(lp->h.page); in lmNextPage()
570 * write or queue the full page at the tail of write queue in lmNextPage()
573 if (list_empty(&log->cqueue)) in lmNextPage()
576 tblk = list_entry(log->cqueue.prev, struct tblock, cqueue); in lmNextPage()
588 if (tblk && tblk->pn == pn) { in lmNextPage()
589 /* mark tblk for end-of-page */ in lmNextPage()
590 tblk->flag |= tblkGC_EOP; in lmNextPage()
592 if (log->cflag & logGC_PAGEOUT) { in lmNextPage()
593 /* if page is not already on write queue, in lmNextPage()
599 if (bp->l_wqnext == NULL) in lmNextPage()
605 log->cflag |= logGC_PAGEOUT; in lmNextPage()
610 * init write or mark it to be redriven (lbmWRITE) in lmNextPage()
614 bp->l_ceor = bp->l_eor; in lmNextPage()
615 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); in lmNextPage()
626 log->page = (pn == log->size - 1) ? 2 : pn + 1; in lmNextPage()
627 log->eor = LOGPHDRSIZE; /* ? valid page empty/full at logRedo() */ in lmNextPage()
630 nextbp = lbmAllocate(log, log->page); in lmNextPage()
631 nextbp->l_eor = log->eor; in lmNextPage()
632 log->bp = nextbp; in lmNextPage()
635 lp = (struct logpage *) nextbp->l_ldata; in lmNextPage()
636 lp->h.page = lp->t.page = cpu_to_le32(lspn + 1); in lmNextPage()
637 lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE); in lmNextPage()
648 * page number - redrive pageout of the page at the head of
665 if (tblk->flag & tblkGC_COMMITTED) { in lmGroupCommit()
666 if (tblk->flag & tblkGC_ERROR) in lmGroupCommit()
667 rc = -EIO; in lmGroupCommit()
672 jfs_info("lmGroup Commit: tblk = 0x%p, gcrtc = %d", tblk, log->gcrtc); in lmGroupCommit()
674 if (tblk->xflag & COMMIT_LAZY) in lmGroupCommit()
675 tblk->flag |= tblkGC_LAZY; in lmGroupCommit()
677 if ((!(log->cflag & logGC_PAGEOUT)) && (!list_empty(&log->cqueue)) && in lmGroupCommit()
678 (!(tblk->xflag & COMMIT_LAZY) || test_bit(log_FLUSH, &log->flag) in lmGroupCommit()
685 log->cflag |= logGC_PAGEOUT; in lmGroupCommit()
690 if (tblk->xflag & COMMIT_LAZY) { in lmGroupCommit()
700 if (tblk->flag & tblkGC_COMMITTED) { in lmGroupCommit()
701 if (tblk->flag & tblkGC_ERROR) in lmGroupCommit()
702 rc = -EIO; in lmGroupCommit()
710 log->gcrtc++; in lmGroupCommit()
711 tblk->flag |= tblkGC_READY; in lmGroupCommit()
713 __SLEEP_COND(tblk->gcwait, (tblk->flag & tblkGC_COMMITTED), in lmGroupCommit()
717 if (tblk->flag & tblkGC_ERROR) in lmGroupCommit()
718 rc = -EIO; in lmGroupCommit()
727 * FUNCTION: group commit write
728 * initiate write of log page, building a group of all transactions
752 gcpn = list_entry(log->cqueue.next, struct tblock, cqueue)->pn; in lmGCwrite()
754 list_for_each_entry(tblk, &log->cqueue, cqueue) { in lmGCwrite()
755 if (tblk->pn != gcpn) in lmGCwrite()
760 /* state transition: (QUEUE, READY) -> COMMIT */ in lmGCwrite()
761 tblk->flag |= tblkGC_COMMIT; in lmGCwrite()
768 bp = (struct lbuf *) tblk->bp; in lmGCwrite()
769 lp = (struct logpage *) bp->l_ldata; in lmGCwrite()
771 if (tblk->flag & tblkGC_EOP) { in lmGCwrite()
773 tblk->flag &= ~tblkGC_EOP; in lmGCwrite()
774 tblk->flag |= tblkGC_FREE; in lmGCwrite()
775 bp->l_ceor = bp->l_eor; in lmGCwrite()
776 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); in lmGCwrite()
783 bp->l_ceor = tblk->eor; /* ? bp->l_ceor = bp->l_eor; */ in lmGCwrite()
784 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); in lmGCwrite()
793 * FUNCTION: group commit post-processing
805 struct jfs_log *log = bp->l_log; in lmPostGC()
810 spin_lock_irqsave(&log->gclock, flags); in lmPostGC()
817 list_for_each_entry_safe(tblk, temp, &log->cqueue, cqueue) { in lmPostGC()
818 if (!(tblk->flag & tblkGC_COMMIT)) in lmPostGC()
822 * and made it to disk - it is committed. in lmPostGC()
825 if (bp->l_flag & lbmERROR) in lmPostGC()
826 tblk->flag |= tblkGC_ERROR; in lmPostGC()
829 list_del(&tblk->cqueue); in lmPostGC()
830 tblk->flag &= ~tblkGC_QUEUE; in lmPostGC()
832 if (tblk == log->flush_tblk) { in lmPostGC()
834 clear_bit(log_FLUSH, &log->flag); in lmPostGC()
835 log->flush_tblk = NULL; in lmPostGC()
839 tblk->flag); in lmPostGC()
841 if (!(tblk->xflag & COMMIT_FORCE)) in lmPostGC()
847 /* state transition: COMMIT -> COMMITTED */ in lmPostGC()
848 tblk->flag |= tblkGC_COMMITTED; in lmPostGC()
850 if (tblk->flag & tblkGC_READY) in lmPostGC()
851 log->gcrtc--; in lmPostGC()
859 if (tblk->flag & tblkGC_FREE) in lmPostGC()
864 else if (tblk->flag & tblkGC_EOP) { in lmPostGC()
866 lp = (struct logpage *) bp->l_ldata; in lmPostGC()
867 bp->l_ceor = bp->l_eor; in lmPostGC()
868 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); in lmPostGC()
883 if ((!list_empty(&log->cqueue)) && in lmPostGC()
884 ((log->gcrtc > 0) || (tblk->bp->l_wqnext != NULL) || in lmPostGC()
885 test_bit(log_FLUSH, &log->flag) || jfs_tlocks_low)) in lmPostGC()
897 log->cflag &= ~logGC_PAGEOUT; in lmPostGC()
900 spin_unlock_irqrestore(&log->gclock, flags); in lmPostGC()
907 * FUNCTION: write log SYNCPT record for specified log
908 * if new sync address is available
909 * (normally the case if sync() is executed by back-ground
914 * PARAMETERS: log - log structure
915 * hard_sync - 1 to force all metadata to be written
926 int delta; /* additional delta to write normally */ in lmLogSync()
927 int more; /* additional write granted */ in lmLogSync()
942 /* if last sync is same as last syncpt, in lmLogSync()
943 * invoke sync point forward processing to update sync. in lmLogSync()
946 if (log->sync == log->syncpt) { in lmLogSync()
948 if (list_empty(&log->synclist)) in lmLogSync()
949 log->sync = log->lsn; in lmLogSync()
951 lp = list_entry(log->synclist.next, in lmLogSync()
953 log->sync = lp->lsn; in lmLogSync()
959 /* if sync is different from last syncpt, in lmLogSync()
960 * write a SYNCPT record with syncpt = sync. in lmLogSync()
961 * reset syncpt = sync in lmLogSync()
963 if (log->sync != log->syncpt) { in lmLogSync()
968 lrd.log.syncpt.sync = cpu_to_le32(log->sync); in lmLogSync()
971 log->syncpt = log->sync; in lmLogSync()
973 lsn = log->lsn; in lmLogSync()
978 logsize = log->logsize; in lmLogSync()
981 free = logsize - written; in lmLogSync()
989 * option 1 - panic ? No.! in lmLogSync()
990 * option 2 - shutdown file systems in lmLogSync()
992 * option 3 - extend log ? in lmLogSync()
993 * option 4 - second chance in lmLogSync()
1002 /* log->state = LOGWRAP; */ in lmLogSync()
1004 /* reset sync point computation */ in lmLogSync()
1005 log->syncpt = log->sync = lsn; in lmLogSync()
1006 log->nextsync = delta; in lmLogSync()
1009 log->nextsync = written + more; in lmLogSync()
1011 /* if number of bytes written from last sync point is more in lmLogSync()
1016 if (!test_bit(log_SYNCBARRIER, &log->flag) && in lmLogSync()
1017 (written > LOGSYNC_BARRIER(logsize)) && log->active) { in lmLogSync()
1018 set_bit(log_SYNCBARRIER, &log->flag); in lmLogSync()
1020 log->syncpt); in lmLogSync()
1033 * FUNCTION: write log SYNCPT record for specified log
1035 * PARAMETERS: log - log structure
1036 * hard_sync - set to 1 to force metadata to be written
1040 if (!test_bit(log_QUIESCE, &log->flag)) in jfs_syncpt()
1051 * PARAMETER: ipmnt - file system mount inode
1052 * iplog - log inode (out)
1065 if (sbi->flag & JFS_NOINTEGRITY) in lmLogOpen()
1068 if (sbi->mntflag & JFS_INLINELOG) in lmLogOpen()
1073 if (file_bdev(log->bdev_file)->bd_dev == sbi->logdev) { in lmLogOpen()
1074 if (!uuid_equal(&log->uuid, &sbi->loguuid)) { in lmLogOpen()
1077 return -EINVAL; in lmLogOpen()
1092 return -ENOMEM; in lmLogOpen()
1094 INIT_LIST_HEAD(&log->sb_list); in lmLogOpen()
1095 init_waitqueue_head(&log->syncwait); in lmLogOpen()
1100 * file systems to log may have n-to-1 relationship; in lmLogOpen()
1103 bdev_file = bdev_file_open_by_dev(sbi->logdev, in lmLogOpen()
1110 log->bdev_file = bdev_file; in lmLogOpen()
1111 uuid_copy(&log->uuid, &sbi->loguuid); in lmLogOpen()
1119 list_add(&log->journal_list, &jfs_external_logs); in lmLogOpen()
1129 list_add(&sbi->log_list, &log->sb_list); in lmLogOpen()
1130 sbi->log = log; in lmLogOpen()
1140 list_del(&log->journal_list); in lmLogOpen()
1160 return -ENOMEM; in open_inline_log()
1161 INIT_LIST_HEAD(&log->sb_list); in open_inline_log()
1162 init_waitqueue_head(&log->syncwait); in open_inline_log()
1164 set_bit(log_INLINELOG, &log->flag); in open_inline_log()
1165 log->bdev_file = sb->s_bdev_file; in open_inline_log()
1166 log->base = addressPXD(&JFS_SBI(sb)->logpxd); in open_inline_log()
1167 log->size = lengthPXD(&JFS_SBI(sb)->logpxd) >> in open_inline_log()
1168 (L2LOGPSIZE - sb->s_blocksize_bits); in open_inline_log()
1169 log->l2bsize = sb->s_blocksize_bits; in open_inline_log()
1170 ASSERT(L2LOGPSIZE >= sb->s_blocksize_bits); in open_inline_log()
1181 list_add(&JFS_SBI(sb)->log_list, &log->sb_list); in open_inline_log()
1182 JFS_SBI(sb)->log = log; in open_inline_log()
1196 return -ENOMEM; in open_dummy_log()
1198 INIT_LIST_HEAD(&dummy_log->sb_list); in open_dummy_log()
1199 init_waitqueue_head(&dummy_log->syncwait); in open_dummy_log()
1200 dummy_log->no_integrity = 1; in open_dummy_log()
1202 dummy_log->size = 1024; in open_dummy_log()
1213 list_add(&JFS_SBI(sb)->log_list, &dummy_log->sb_list); in open_dummy_log()
1214 JFS_SBI(sb)->log = dummy_log; in open_dummy_log()
1229 * write SYNCPT log record.
1231 * PARAMETER: log - log structure
1233 * RETURN: 0 - if ok
1234 * -EINVAL - bad log magic number or superblock dirty
1254 /* allocate/initialize the log write serialization lock */ in lmLogInit()
1259 INIT_LIST_HEAD(&log->synclist); in lmLogInit()
1261 INIT_LIST_HEAD(&log->cqueue); in lmLogInit()
1262 log->flush_tblk = NULL; in lmLogInit()
1264 log->count = 0; in lmLogInit()
1272 if (!test_bit(log_INLINELOG, &log->flag)) in lmLogInit()
1273 log->l2bsize = L2LOGPSIZE; in lmLogInit()
1276 if (log->no_integrity) { in lmLogInit()
1279 * to actually do the I/O, the write is not done, and the in lmLogInit()
1283 log->bp = bp; in lmLogInit()
1284 bp->l_pn = bp->l_eor = 0; in lmLogInit()
1292 logsuper = (struct logsuper *) bpsuper->l_ldata; in lmLogInit()
1294 if (logsuper->magic != cpu_to_le32(LOGMAGIC)) { in lmLogInit()
1296 rc = -EINVAL; in lmLogInit()
1301 if (logsuper->state != cpu_to_le32(LOGREDONE)) { in lmLogInit()
1303 rc = -EINVAL; in lmLogInit()
1308 if (test_bit(log_INLINELOG,&log->flag)) { in lmLogInit()
1309 if (log->size != le32_to_cpu(logsuper->size)) { in lmLogInit()
1310 rc = -EINVAL; in lmLogInit()
1314 log, (unsigned long long)log->base, log->size); in lmLogInit()
1316 if (!uuid_equal(&logsuper->uuid, &log->uuid)) { in lmLogInit()
1318 rc = -EINVAL; in lmLogInit()
1321 log->size = le32_to_cpu(logsuper->size); in lmLogInit()
1322 log->l2bsize = le32_to_cpu(logsuper->l2bsize); in lmLogInit()
1324 log, (unsigned long long)log->base, log->size); in lmLogInit()
1327 log->page = le32_to_cpu(logsuper->end) / LOGPSIZE; in lmLogInit()
1328 log->eor = le32_to_cpu(logsuper->end) - (LOGPSIZE * log->page); in lmLogInit()
1331 * initialize for log append write mode in lmLogInit()
1333 /* establish current/end-of-log page/buffer */ in lmLogInit()
1334 if ((rc = lbmRead(log, log->page, &bp))) in lmLogInit()
1337 lp = (struct logpage *) bp->l_ldata; in lmLogInit()
1340 le32_to_cpu(logsuper->end), log->page, log->eor, in lmLogInit()
1341 le16_to_cpu(lp->h.eor)); in lmLogInit()
1343 log->bp = bp; in lmLogInit()
1344 bp->l_pn = log->page; in lmLogInit()
1345 bp->l_eor = log->eor; in lmLogInit()
1348 if (log->eor >= LOGPSIZE - LOGPTLRSIZE) in lmLogInit()
1355 * write the first SYNCPT record with syncpoint = 0 in lmLogInit()
1357 * remove current page from lbm write queue at end of pageout in lmLogInit()
1358 * (to write log superblock update), but do not release to in lmLogInit()
1365 lrd.log.syncpt.sync = 0; in lmLogInit()
1367 bp = log->bp; in lmLogInit()
1368 bp->l_ceor = bp->l_eor; in lmLogInit()
1369 lp = (struct logpage *) bp->l_ldata; in lmLogInit()
1370 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); in lmLogInit()
1376 * update/write superblock in lmLogInit()
1378 logsuper->state = cpu_to_le32(LOGMOUNT); in lmLogInit()
1379 log->serial = le32_to_cpu(logsuper->serial) + 1; in lmLogInit()
1380 logsuper->serial = cpu_to_le32(log->serial); in lmLogInit()
1387 log->logsize = (log->size - 2) << L2LOGPSIZE; in lmLogInit()
1388 log->lsn = lsn; in lmLogInit()
1389 log->syncpt = lsn; in lmLogInit()
1390 log->sync = log->syncpt; in lmLogInit()
1391 log->nextsync = LOGSYNC_DELTA(log->logsize); in lmLogInit()
1393 jfs_info("lmLogInit: lsn:0x%x syncpt:0x%x sync:0x%x", in lmLogInit()
1394 log->lsn, log->syncpt, log->sync); in lmLogInit()
1399 log->clsn = lsn; in lmLogInit()
1407 log->wqueue = NULL; in lmLogInit()
1408 bp->l_wqnext = NULL; in lmLogInit()
1428 * PARAMETER: sb - superblock
1437 struct jfs_log *log = sbi->log; in lmLogClose()
1445 list_del(&sbi->log_list); in lmLogClose()
1447 sbi->log = NULL; in lmLogClose()
1453 sync_blockdev(sb->s_bdev); in lmLogClose()
1455 if (test_bit(log_INLINELOG, &log->flag)) { in lmLogClose()
1457 * in-line log in host file system in lmLogClose()
1464 if (!log->no_integrity) in lmLogClose()
1467 if (!list_empty(&log->sb_list)) in lmLogClose()
1474 * buffers in memory, and resuse if another no-integrity mount in lmLogClose()
1477 if (log->no_integrity) in lmLogClose()
1483 list_del(&log->journal_list); in lmLogClose()
1484 bdev_file = log->bdev_file; in lmLogClose()
1501 * FUNCTION: initiate write of any outstanding transactions to the journal
1513 /* jfs_write_inode may call us during read-only mount */ in jfs_flush_journal()
1521 if (!list_empty(&log->cqueue)) { in jfs_flush_journal()
1526 target = list_entry(log->cqueue.prev, struct tblock, cqueue); in jfs_flush_journal()
1528 if (test_bit(log_FLUSH, &log->flag)) { in jfs_flush_journal()
1535 if (log->flush_tblk) in jfs_flush_journal()
1536 log->flush_tblk = target; in jfs_flush_journal()
1539 log->flush_tblk = target; in jfs_flush_journal()
1540 set_bit(log_FLUSH, &log->flag); in jfs_flush_journal()
1545 if (!(log->cflag & logGC_PAGEOUT)) { in jfs_flush_journal()
1546 log->cflag |= logGC_PAGEOUT; in jfs_flush_journal()
1551 if ((wait > 1) || test_bit(log_SYNCBARRIER, &log->flag)) { in jfs_flush_journal()
1553 set_bit(log_FLUSH, &log->flag); in jfs_flush_journal()
1554 log->flush_tblk = NULL; in jfs_flush_journal()
1557 if (wait && target && !(target->flag & tblkGC_COMMITTED)) { in jfs_flush_journal()
1560 add_wait_queue(&target->gcwait, &__wait); in jfs_flush_journal()
1565 remove_wait_queue(&target->gcwait, &__wait); in jfs_flush_journal()
1578 if ((!list_empty(&log->cqueue)) || !list_empty(&log->synclist)) { in jfs_flush_journal()
1582 if (list_empty(&log->cqueue) && in jfs_flush_journal()
1583 list_empty(&log->synclist)) in jfs_flush_journal()
1587 assert(list_empty(&log->cqueue)); in jfs_flush_journal()
1590 if (!list_empty(&log->synclist)) { in jfs_flush_journal()
1594 list_for_each_entry(lp, &log->synclist, synclist) { in jfs_flush_journal()
1595 if (lp->xflag & COMMIT_PAGE) { in jfs_flush_journal()
1602 sizeof(long), mp->folio, in jfs_flush_journal()
1611 WARN_ON(!list_empty(&log->synclist)); in jfs_flush_journal()
1613 clear_bit(log_FLUSH, &log->flag); in jfs_flush_journal()
1621 * write log syncpt record.
1624 * PARAMETER: log - log inode
1626 * RETURN: 0 - success
1645 * write the last SYNCPT record with syncpoint = 0 in lmLogShutdown()
1652 lrd.log.syncpt.sync = 0; in lmLogShutdown()
1655 bp = log->bp; in lmLogShutdown()
1656 lp = (struct logpage *) bp->l_ldata; in lmLogShutdown()
1657 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); in lmLogShutdown()
1658 lbmWrite(log, log->bp, lbmWRITE | lbmRELEASE | lbmSYNC, 0); in lmLogShutdown()
1659 lbmIOWait(log->bp, lbmFREE); in lmLogShutdown()
1660 log->bp = NULL; in lmLogShutdown()
1670 logsuper = (struct logsuper *) bpsuper->l_ldata; in lmLogShutdown()
1671 logsuper->state = cpu_to_le32(LOGREDONE); in lmLogShutdown()
1672 logsuper->end = cpu_to_le32(lsn); in lmLogShutdown()
1677 lsn, log->page, log->eor); in lmLogShutdown()
1698 * PARAMETE: log - pointer to logs inode.
1699 * fsdev - kdev_t of filesystem.
1700 * serial - pointer to returned log serial number
1701 * activate - insert/remove device from active list.
1703 * RETURN: 0 - success
1713 uuid_t *uuid = &sbi->uuid; in lmLogFileSystem()
1721 logsuper = (struct logsuper *) bpsuper->l_ldata; in lmLogFileSystem()
1724 if (uuid_is_null(&logsuper->active[i].uuid)) { in lmLogFileSystem()
1725 uuid_copy(&logsuper->active[i].uuid, uuid); in lmLogFileSystem()
1726 sbi->aggregate = i; in lmLogFileSystem()
1732 return -EMFILE; /* Is there a better rc? */ in lmLogFileSystem()
1736 if (uuid_equal(&logsuper->active[i].uuid, uuid)) { in lmLogFileSystem()
1737 uuid_copy(&logsuper->active[i].uuid, in lmLogFileSystem()
1744 return -EIO; in lmLogFileSystem()
1750 * synchronous write log superblock: in lmLogFileSystem()
1752 * write sidestream bypassing write queue: in lmLogFileSystem()
1768 * ------------------------
1772 * per log write queue:
1773 * log pageout occurs in serial order by fifo write queue and
1775 * a circular singly-linked list
1776 * (log->wrqueue points to the tail, and buffers are linked via
1777 * bp->wrqueue field), and
1794 log->bp = NULL; in lbmLogInit()
1796 /* initialize log device write queue */ in lbmLogInit()
1797 log->wqueue = NULL; in lbmLogInit()
1808 init_waitqueue_head(&log->free_wait); in lbmLogInit()
1810 log->lbuf_free = NULL; in lbmLogInit()
1829 lbuf->l_offset = offset; in lbmLogInit()
1830 lbuf->l_ldata = buffer + offset; in lbmLogInit()
1831 lbuf->l_page = page; in lbmLogInit()
1832 lbuf->l_log = log; in lbmLogInit()
1833 init_waitqueue_head(&lbuf->l_ioevent); in lbmLogInit()
1835 lbuf->l_freelist = log->lbuf_free; in lbmLogInit()
1836 log->lbuf_free = lbuf; in lbmLogInit()
1845 return -ENOMEM; in lbmLogInit()
1860 lbuf = log->lbuf_free; in lbmLogShutdown()
1862 struct lbuf *next = lbuf->l_freelist; in lbmLogShutdown()
1863 __free_page(lbuf->l_page); in lbmLogShutdown()
1884 LCACHE_SLEEP_COND(log->free_wait, (bp = log->lbuf_free), flags); in lbmAllocate()
1885 log->lbuf_free = bp->l_freelist; in lbmAllocate()
1888 bp->l_flag = 0; in lbmAllocate()
1890 bp->l_wqnext = NULL; in lbmAllocate()
1891 bp->l_freelist = NULL; in lbmAllocate()
1893 bp->l_pn = pn; in lbmAllocate()
1894 bp->l_blkno = log->base + (pn << (L2LOGPSIZE - log->l2bsize)); in lbmAllocate()
1895 bp->l_ceor = 0; in lbmAllocate()
1919 struct jfs_log *log = bp->l_log; in lbmfree()
1921 assert(bp->l_wqnext == NULL); in lbmfree()
1926 bp->l_freelist = log->lbuf_free; in lbmfree()
1927 log->lbuf_free = bp; in lbmfree()
1929 wake_up(&log->free_wait); in lbmfree()
1940 * bp - log buffer
1950 bp->l_redrive_next = log_redrive_list; in lbmRedrive()
1972 bp->l_flag |= lbmREAD; in lbmRead()
1974 bio = bio_alloc(file_bdev(log->bdev_file), 1, REQ_OP_READ, GFP_NOFS); in lbmRead()
1975 bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); in lbmRead()
1976 __bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset); in lbmRead()
1977 BUG_ON(bio->bi_iter.bi_size != LOGPSIZE); in lbmRead()
1979 bio->bi_end_io = lbmIODone; in lbmRead()
1980 bio->bi_private = bp; in lbmRead()
1982 if (log->no_integrity) { in lbmRead()
1983 bio->bi_iter.bi_size = 0; in lbmRead()
1989 wait_event(bp->l_ioevent, (bp->l_flag != lbmREAD)); in lbmRead()
1999 * partial-page pageout and redriven by explicit initiation of
2000 * pageout by caller until full-page pageout is completed and
2005 * queue is released at the completion of its full-page pageout.
2016 jfs_info("lbmWrite: bp:0x%p flag:0x%x pn:0x%x", bp, flag, bp->l_pn); in lbmWrite()
2019 bp->l_blkno = in lbmWrite()
2020 log->base + (bp->l_pn << (L2LOGPSIZE - log->l2bsize)); in lbmWrite()
2027 bp->l_flag = flag; in lbmWrite()
2030 * insert bp at tail of write queue associated with log in lbmWrite()
2035 tail = log->wqueue; in lbmWrite()
2037 /* is buffer not already on write queue ? */ in lbmWrite()
2038 if (bp->l_wqnext == NULL) { in lbmWrite()
2041 log->wqueue = bp; in lbmWrite()
2042 bp->l_wqnext = bp; in lbmWrite()
2044 log->wqueue = bp; in lbmWrite()
2045 bp->l_wqnext = tail->l_wqnext; in lbmWrite()
2046 tail->l_wqnext = bp; in lbmWrite()
2052 /* is buffer at head of wqueue and for write ? */ in lbmWrite()
2053 if ((bp != tail->l_wqnext) || !(flag & lbmWRITE)) { in lbmWrite()
2075 * initiate pageout bypassing write queue for sidestream
2076 * (e.g., log superblock) write;
2081 bp, flag, bp->l_pn); in lbmDirectWrite()
2086 bp->l_flag = flag | lbmDIRECT; in lbmDirectWrite()
2089 bp->l_blkno = in lbmDirectWrite()
2090 log->base + (bp->l_pn << (L2LOGPSIZE - log->l2bsize)); in lbmDirectWrite()
2111 struct jfs_log *log = bp->l_log; in lbmStartIO()
2116 if (!log->no_integrity) in lbmStartIO()
2117 bdev = file_bdev(log->bdev_file); in lbmStartIO()
2121 bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); in lbmStartIO()
2122 __bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset); in lbmStartIO()
2123 BUG_ON(bio->bi_iter.bi_size != LOGPSIZE); in lbmStartIO()
2125 bio->bi_end_io = lbmIODone; in lbmStartIO()
2126 bio->bi_private = bp; in lbmStartIO()
2129 if (log->no_integrity) { in lbmStartIO()
2130 bio->bi_iter.bi_size = 0; in lbmStartIO()
2147 jfs_info("lbmIOWait1: bp:0x%p flag:0x%x:0x%x", bp, bp->l_flag, flag); in lbmIOWait()
2151 LCACHE_SLEEP_COND(bp->l_ioevent, (bp->l_flag & lbmDONE), flags); in lbmIOWait()
2153 rc = (bp->l_flag & lbmERROR) ? -EIO : 0; in lbmIOWait()
2160 jfs_info("lbmIOWait2: bp:0x%p flag:0x%x:0x%x", bp, bp->l_flag, flag); in lbmIOWait()
2171 struct lbuf *bp = bio->bi_private; in lbmIODone()
2179 jfs_info("lbmIODone: bp:0x%p flag:0x%x", bp, bp->l_flag); in lbmIODone()
2183 bp->l_flag |= lbmDONE; in lbmIODone()
2185 if (bio->bi_status) { in lbmIODone()
2186 bp->l_flag |= lbmERROR; in lbmIODone()
2196 if (bp->l_flag & lbmREAD) { in lbmIODone()
2197 bp->l_flag &= ~lbmREAD; in lbmIODone()
2202 LCACHE_WAKEUP(&bp->l_ioevent); in lbmIODone()
2210 * the bp at the head of write queue has completed pageout. in lbmIODone()
2212 * if single-commit/full-page pageout, remove the current buffer in lbmIODone()
2215 * otherwise, the partial-page pageout buffer stays at in lbmIODone()
2217 * by lmGroupCommit() until full-page pageout is completed. in lbmIODone()
2219 bp->l_flag &= ~lbmWRITE; in lbmIODone()
2223 log = bp->l_log; in lbmIODone()
2224 log->clsn = (bp->l_pn << L2LOGPSIZE) + bp->l_ceor; in lbmIODone()
2226 if (bp->l_flag & lbmDIRECT) { in lbmIODone()
2227 LCACHE_WAKEUP(&bp->l_ioevent); in lbmIODone()
2232 tail = log->wqueue; in lbmIODone()
2236 /* remove head buffer of full-page pageout in lbmIODone()
2237 * from log device write queue in lbmIODone()
2239 if (bp->l_flag & lbmRELEASE) { in lbmIODone()
2240 log->wqueue = NULL; in lbmIODone()
2241 bp->l_wqnext = NULL; in lbmIODone()
2246 /* remove head buffer of full-page pageout in lbmIODone()
2247 * from log device write queue in lbmIODone()
2249 if (bp->l_flag & lbmRELEASE) { in lbmIODone()
2250 nextbp = tail->l_wqnext = bp->l_wqnext; in lbmIODone()
2251 bp->l_wqnext = NULL; in lbmIODone()
2254 * redrive pageout of next page at head of write queue: in lbmIODone()
2263 if (nextbp->l_flag & lbmWRITE) { in lbmIODone()
2276 * buffer has not necessarily been removed from write queue in lbmIODone()
2277 * (e.g., synchronous write of partial-page with COMMIT): in lbmIODone()
2280 if (bp->l_flag & lbmSYNC) { in lbmIODone()
2284 LCACHE_WAKEUP(&bp->l_ioevent); in lbmIODone()
2290 else if (bp->l_flag & lbmGC) { in lbmIODone()
2298 * buffer must have been removed from write queue: in lbmIODone()
2302 assert(bp->l_flag & lbmRELEASE); in lbmIODone()
2303 assert(bp->l_flag & lbmFREE); in lbmIODone()
2317 log_redrive_list = bp->l_redrive_next; in jfsIOWait()
2318 bp->l_redrive_next = NULL; in jfsIOWait()
2344 * log - volume log
2345 * logAddress - start address of log space in FS block
2346 * logSize - length of log space in FS block;
2348 * RETURN: 0 - success
2349 * -EIO - i/o error
2356 int rc = -EIO; in lmLogFormat()
2368 sbi = list_entry(log->sb_list.next, struct jfs_sb_info, log_list); in lmLogFormat()
2373 npages = logSize >> sbi->l2nbperpage; in lmLogFormat()
2378 * page 0 - reserved; in lmLogFormat()
2379 * page 1 - log superblock; in lmLogFormat()
2380 * page 2 - log data page: A SYNC log record is written in lmLogFormat()
2382 * pages 3-N - log data page: set to empty log data pages; in lmLogFormat()
2387 logsuper = (struct logsuper *) bp->l_ldata; in lmLogFormat()
2389 logsuper->magic = cpu_to_le32(LOGMAGIC); in lmLogFormat()
2390 logsuper->version = cpu_to_le32(LOGVERSION); in lmLogFormat()
2391 logsuper->state = cpu_to_le32(LOGREDONE); in lmLogFormat()
2392 logsuper->flag = cpu_to_le32(sbi->mntflag); /* ? */ in lmLogFormat()
2393 logsuper->size = cpu_to_le32(npages); in lmLogFormat()
2394 logsuper->bsize = cpu_to_le32(sbi->bsize); in lmLogFormat()
2395 logsuper->l2bsize = cpu_to_le32(sbi->l2bsize); in lmLogFormat()
2396 logsuper->end = cpu_to_le32(2 * LOGPSIZE + LOGPHDRSIZE + LOGRDSIZE); in lmLogFormat()
2398 bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT; in lmLogFormat()
2399 bp->l_blkno = logAddress + sbi->nbperpage; in lmLogFormat()
2405 * init pages 2 to npages-1 as log data pages: in lmLogFormat()
2409 * pn: 0 1 2 3 n-1 in lmLogFormat()
2410 * +-----+-----+=====+=====+===.....===+=====+ in lmLogFormat()
2411 * lspn: N-1 0 1 N-2 in lmLogFormat()
2412 * <--- N page circular file ----> in lmLogFormat()
2414 * the N (= npages-2) data pages of the log is maintained as in lmLogFormat()
2424 * the lspn starting from 0, ... (N-2) in lmLogFormat()
2426 lp = (struct logpage *) bp->l_ldata; in lmLogFormat()
2428 * initialize 1st log page to be written: lpsn = N - 1, in lmLogFormat()
2429 * write a SYNCPT log record is written to this page in lmLogFormat()
2431 lp->h.page = lp->t.page = cpu_to_le32(npages - 3); in lmLogFormat()
2432 lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE + LOGRDSIZE); in lmLogFormat()
2434 lrd_ptr = (struct lrd *) &lp->data; in lmLogFormat()
2435 lrd_ptr->logtid = 0; in lmLogFormat()
2436 lrd_ptr->backchain = 0; in lmLogFormat()
2437 lrd_ptr->type = cpu_to_le16(LOG_SYNCPT); in lmLogFormat()
2438 lrd_ptr->length = 0; in lmLogFormat()
2439 lrd_ptr->log.syncpt.sync = 0; in lmLogFormat()
2441 bp->l_blkno += sbi->nbperpage; in lmLogFormat()
2442 bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT; in lmLogFormat()
2448 * initialize succeeding log pages: lpsn = 0, 1, ..., (N-2) in lmLogFormat()
2450 for (lspn = 0; lspn < npages - 3; lspn++) { in lmLogFormat()
2451 lp->h.page = lp->t.page = cpu_to_le32(lspn); in lmLogFormat()
2452 lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE); in lmLogFormat()
2454 bp->l_blkno += sbi->nbperpage; in lmLogFormat()
2455 bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT; in lmLogFormat()