10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0 21da177e4SLinus Torvalds /* 37b718769SNathan Scott * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 47b718769SNathan Scott * All Rights Reserved. 51da177e4SLinus Torvalds */ 61da177e4SLinus Torvalds #ifndef __XFS_LOG_PRIV_H__ 71da177e4SLinus Torvalds #define __XFS_LOG_PRIV_H__ 81da177e4SLinus Torvalds 91da177e4SLinus Torvalds struct xfs_buf; 10ad223e60SMark Tinguely struct xlog; 11a844f451SNathan Scott struct xlog_ticket; 121da177e4SLinus Torvalds struct xfs_mount; 131da177e4SLinus Torvalds 141da177e4SLinus Torvalds /* 151da177e4SLinus Torvalds * get client id from packed copy. 161da177e4SLinus Torvalds * 171da177e4SLinus Torvalds * this hack is here because the xlog_pack code copies four bytes 181da177e4SLinus Torvalds * of xlog_op_header containing the fields oh_clientid, oh_flags 191da177e4SLinus Torvalds * and oh_res2 into the packed copy. 201da177e4SLinus Torvalds * 211da177e4SLinus Torvalds * later on this four byte chunk is treated as an int and the 221da177e4SLinus Torvalds * client id is pulled out. 231da177e4SLinus Torvalds * 241da177e4SLinus Torvalds * this has endian issues, of course. 251da177e4SLinus Torvalds */ 26b53e675dSChristoph Hellwig static inline uint xlog_get_client_id(__be32 i) 2703bea6feSChristoph Hellwig { 28b53e675dSChristoph Hellwig return be32_to_cpu(i) >> 24; 2903bea6feSChristoph Hellwig } 301da177e4SLinus Torvalds 311da177e4SLinus Torvalds /* 321da177e4SLinus Torvalds * In core log state 331da177e4SLinus Torvalds */ 341858bb0bSChristoph Hellwig enum xlog_iclog_state { 351858bb0bSChristoph Hellwig XLOG_STATE_ACTIVE, /* Current IC log being written to */ 361858bb0bSChristoph Hellwig XLOG_STATE_WANT_SYNC, /* Want to sync this iclog; no more writes */ 371858bb0bSChristoph Hellwig XLOG_STATE_SYNCING, /* This IC log is syncing */ 381858bb0bSChristoph Hellwig XLOG_STATE_DONE_SYNC, /* Done syncing to disk */ 391858bb0bSChristoph Hellwig XLOG_STATE_CALLBACK, /* Callback functions now */ 401858bb0bSChristoph Hellwig XLOG_STATE_DIRTY, /* Dirty IC log, not ready for ACTIVE status */ 411858bb0bSChristoph Hellwig }; 421da177e4SLinus Torvalds 43956f6daaSDave Chinner #define XLOG_STATE_STRINGS \ 44956f6daaSDave Chinner { XLOG_STATE_ACTIVE, "XLOG_STATE_ACTIVE" }, \ 45956f6daaSDave Chinner { XLOG_STATE_WANT_SYNC, "XLOG_STATE_WANT_SYNC" }, \ 46956f6daaSDave Chinner { XLOG_STATE_SYNCING, "XLOG_STATE_SYNCING" }, \ 47956f6daaSDave Chinner { XLOG_STATE_DONE_SYNC, "XLOG_STATE_DONE_SYNC" }, \ 48956f6daaSDave Chinner { XLOG_STATE_CALLBACK, "XLOG_STATE_CALLBACK" }, \ 495112e206SDave Chinner { XLOG_STATE_DIRTY, "XLOG_STATE_DIRTY" } 50956f6daaSDave Chinner 51b2ae3a9eSDave Chinner /* 52b2ae3a9eSDave Chinner * In core log flags 53b2ae3a9eSDave Chinner */ 54c60d13eaSDave Chinner #define XLOG_ICL_NEED_FLUSH (1u << 0) /* iclog needs REQ_PREFLUSH */ 55c60d13eaSDave Chinner #define XLOG_ICL_NEED_FUA (1u << 1) /* iclog needs REQ_FUA */ 56b2ae3a9eSDave Chinner 57b2ae3a9eSDave Chinner #define XLOG_ICL_STRINGS \ 58b2ae3a9eSDave Chinner { XLOG_ICL_NEED_FLUSH, "XLOG_ICL_NEED_FLUSH" }, \ 59b2ae3a9eSDave Chinner { XLOG_ICL_NEED_FUA, "XLOG_ICL_NEED_FUA" } 60b2ae3a9eSDave Chinner 61956f6daaSDave Chinner 621da177e4SLinus Torvalds /* 6370e42f2dSDave Chinner * Log ticket flags 641da177e4SLinus Torvalds */ 65c60d13eaSDave Chinner #define XLOG_TIC_PERM_RESERV (1u << 0) /* permanent reservation */ 660b1b213fSChristoph Hellwig 670b1b213fSChristoph Hellwig #define XLOG_TIC_FLAGS \ 6810547941SDave Chinner { XLOG_TIC_PERM_RESERV, "XLOG_TIC_PERM_RESERV" } 690b1b213fSChristoph Hellwig 701da177e4SLinus Torvalds /* 711da177e4SLinus Torvalds * Below are states for covering allocation transactions. 721da177e4SLinus Torvalds * By covering, we mean changing the h_tail_lsn in the last on-disk 731da177e4SLinus Torvalds * log write such that no allocation transactions will be re-done during 741da177e4SLinus Torvalds * recovery after a system crash. Recovery starts at the last on-disk 751da177e4SLinus Torvalds * log write. 761da177e4SLinus Torvalds * 771da177e4SLinus Torvalds * These states are used to insert dummy log entries to cover 781da177e4SLinus Torvalds * space allocation transactions which can undo non-transactional changes 791da177e4SLinus Torvalds * after a crash. Writes to a file with space 801da177e4SLinus Torvalds * already allocated do not result in any transactions. Allocations 811da177e4SLinus Torvalds * might include space beyond the EOF. So if we just push the EOF a 821da177e4SLinus Torvalds * little, the last transaction for the file could contain the wrong 831da177e4SLinus Torvalds * size. If there is no file system activity, after an allocation 841da177e4SLinus Torvalds * transaction, and the system crashes, the allocation transaction 851da177e4SLinus Torvalds * will get replayed and the file will be truncated. This could 861da177e4SLinus Torvalds * be hours/days/... after the allocation occurred. 871da177e4SLinus Torvalds * 881da177e4SLinus Torvalds * The fix for this is to do two dummy transactions when the 891da177e4SLinus Torvalds * system is idle. We need two dummy transaction because the h_tail_lsn 901da177e4SLinus Torvalds * in the log record header needs to point beyond the last possible 911da177e4SLinus Torvalds * non-dummy transaction. The first dummy changes the h_tail_lsn to 921da177e4SLinus Torvalds * the first transaction before the dummy. The second dummy causes 931da177e4SLinus Torvalds * h_tail_lsn to point to the first dummy. Recovery starts at h_tail_lsn. 941da177e4SLinus Torvalds * 951da177e4SLinus Torvalds * These dummy transactions get committed when everything 961da177e4SLinus Torvalds * is idle (after there has been some activity). 971da177e4SLinus Torvalds * 981da177e4SLinus Torvalds * There are 5 states used to control this. 991da177e4SLinus Torvalds * 1001da177e4SLinus Torvalds * IDLE -- no logging has been done on the file system or 1011da177e4SLinus Torvalds * we are done covering previous transactions. 1021da177e4SLinus Torvalds * NEED -- logging has occurred and we need a dummy transaction 1031da177e4SLinus Torvalds * when the log becomes idle. 1041da177e4SLinus Torvalds * DONE -- we were in the NEED state and have committed a dummy 1051da177e4SLinus Torvalds * transaction. 1061da177e4SLinus Torvalds * NEED2 -- we detected that a dummy transaction has gone to the 1071da177e4SLinus Torvalds * on disk log with no other transactions. 1081da177e4SLinus Torvalds * DONE2 -- we committed a dummy transaction when in the NEED2 state. 1091da177e4SLinus Torvalds * 1101da177e4SLinus Torvalds * There are two places where we switch states: 1111da177e4SLinus Torvalds * 1121da177e4SLinus Torvalds * 1.) In xfs_sync, when we detect an idle log and are in NEED or NEED2. 1131da177e4SLinus Torvalds * We commit the dummy transaction and switch to DONE or DONE2, 1141da177e4SLinus Torvalds * respectively. In all other states, we don't do anything. 1151da177e4SLinus Torvalds * 1161da177e4SLinus Torvalds * 2.) When we finish writing the on-disk log (xlog_state_clean_log). 1171da177e4SLinus Torvalds * 1181da177e4SLinus Torvalds * No matter what state we are in, if this isn't the dummy 1191da177e4SLinus Torvalds * transaction going out, the next state is NEED. 1201da177e4SLinus Torvalds * So, if we aren't in the DONE or DONE2 states, the next state 1211da177e4SLinus Torvalds * is NEED. We can't be finishing a write of the dummy record 1221da177e4SLinus Torvalds * unless it was committed and the state switched to DONE or DONE2. 1231da177e4SLinus Torvalds * 1241da177e4SLinus Torvalds * If we are in the DONE state and this was a write of the 1251da177e4SLinus Torvalds * dummy transaction, we move to NEED2. 1261da177e4SLinus Torvalds * 1271da177e4SLinus Torvalds * If we are in the DONE2 state and this was a write of the 1281da177e4SLinus Torvalds * dummy transaction, we move to IDLE. 1291da177e4SLinus Torvalds * 1301da177e4SLinus Torvalds * 1311da177e4SLinus Torvalds * Writing only one dummy transaction can get appended to 1321da177e4SLinus Torvalds * one file space allocation. When this happens, the log recovery 1331da177e4SLinus Torvalds * code replays the space allocation and a file could be truncated. 1341da177e4SLinus Torvalds * This is why we have the NEED2 and DONE2 states before going idle. 1351da177e4SLinus Torvalds */ 1361da177e4SLinus Torvalds 1371da177e4SLinus Torvalds #define XLOG_STATE_COVER_IDLE 0 1381da177e4SLinus Torvalds #define XLOG_STATE_COVER_NEED 1 1391da177e4SLinus Torvalds #define XLOG_STATE_COVER_DONE 2 1401da177e4SLinus Torvalds #define XLOG_STATE_COVER_NEED2 3 1411da177e4SLinus Torvalds #define XLOG_STATE_COVER_DONE2 4 1421da177e4SLinus Torvalds 1431da177e4SLinus Torvalds #define XLOG_COVER_OPS 5 1441da177e4SLinus Torvalds 1451da177e4SLinus Torvalds typedef struct xlog_ticket { 14610547941SDave Chinner struct list_head t_queue; /* reserve/write queue */ 14714a7235fSChristoph Hellwig struct task_struct *t_task; /* task that owns this ticket */ 1481da177e4SLinus Torvalds xlog_tid_t t_tid; /* transaction identifier : 4 */ 149cc09c0dcSDave Chinner atomic_t t_ref; /* ticket reference count : 4 */ 1501da177e4SLinus Torvalds int t_curr_res; /* current reservation in bytes : 4 */ 1511da177e4SLinus Torvalds int t_unit_res; /* unit reservation in bytes : 4 */ 1527e9c6396STim Shimmin char t_ocnt; /* original count : 1 */ 1537e9c6396STim Shimmin char t_cnt; /* current count : 1 */ 154c60d13eaSDave Chinner uint8_t t_flags; /* properties of reservation : 1 */ 1551da177e4SLinus Torvalds } xlog_ticket_t; 1567e9c6396STim Shimmin 1571da177e4SLinus Torvalds /* 1581da177e4SLinus Torvalds * - A log record header is 512 bytes. There is plenty of room to grow the 1591da177e4SLinus Torvalds * xlog_rec_header_t into the reserved space. 1601da177e4SLinus Torvalds * - ic_data follows, so a write to disk can start at the beginning of 1611da177e4SLinus Torvalds * the iclog. 16212017fafSDavid Chinner * - ic_forcewait is used to implement synchronous forcing of the iclog to disk. 1631da177e4SLinus Torvalds * - ic_next is the pointer to the next iclog in the ring. 1641da177e4SLinus Torvalds * - ic_log is a pointer back to the global log structure. 16579b54d9bSChristoph Hellwig * - ic_size is the full size of the log buffer, minus the cycle headers. 1661da177e4SLinus Torvalds * - ic_offset is the current number of bytes written to in this iclog. 1671da177e4SLinus Torvalds * - ic_refcnt is bumped when someone is writing to the log. 1681da177e4SLinus Torvalds * - ic_state is the state of the iclog. 169114d23aaSDavid Chinner * 170114d23aaSDavid Chinner * Because of cacheline contention on large machines, we need to separate 171114d23aaSDavid Chinner * various resources onto different cachelines. To start with, make the 172114d23aaSDavid Chinner * structure cacheline aligned. The following fields can be contended on 173114d23aaSDavid Chinner * by independent processes: 174114d23aaSDavid Chinner * 17589ae379dSChristoph Hellwig * - ic_callbacks 176114d23aaSDavid Chinner * - ic_refcnt 177114d23aaSDavid Chinner * - fields protected by the global l_icloglock 178114d23aaSDavid Chinner * 179114d23aaSDavid Chinner * so we need to ensure that these fields are located in separate cachelines. 180114d23aaSDavid Chinner * We'll put all the read-only and l_icloglock fields in the first cacheline, 181114d23aaSDavid Chinner * and move everything else out to subsequent cachelines. 1821da177e4SLinus Torvalds */ 183b28708d6SChristoph Hellwig typedef struct xlog_in_core { 184eb40a875SDave Chinner wait_queue_head_t ic_force_wait; 185eb40a875SDave Chinner wait_queue_head_t ic_write_wait; 1861da177e4SLinus Torvalds struct xlog_in_core *ic_next; 1871da177e4SLinus Torvalds struct xlog_in_core *ic_prev; 188ad223e60SMark Tinguely struct xlog *ic_log; 18979b54d9bSChristoph Hellwig u32 ic_size; 19079b54d9bSChristoph Hellwig u32 ic_offset; 1911858bb0bSChristoph Hellwig enum xlog_iclog_state ic_state; 192eef983ffSDave Chinner unsigned int ic_flags; 193decb545fSChristoph Hellwig void *ic_datap; /* pointer to iclog data */ 19489ae379dSChristoph Hellwig struct list_head ic_callbacks; 195114d23aaSDavid Chinner 196114d23aaSDavid Chinner /* reference counts need their own cacheline */ 197114d23aaSDavid Chinner atomic_t ic_refcnt ____cacheline_aligned_in_smp; 198b28708d6SChristoph Hellwig xlog_in_core_2_t *ic_data; 199b28708d6SChristoph Hellwig #define ic_header ic_data->hic_header 200366fc4b8SChristoph Hellwig #ifdef DEBUG 201366fc4b8SChristoph Hellwig bool ic_fail_crc : 1; 202366fc4b8SChristoph Hellwig #endif 20379b54d9bSChristoph Hellwig struct semaphore ic_sema; 20479b54d9bSChristoph Hellwig struct work_struct ic_end_io_work; 20579b54d9bSChristoph Hellwig struct bio ic_bio; 20679b54d9bSChristoph Hellwig struct bio_vec ic_bvec[]; 2071da177e4SLinus Torvalds } xlog_in_core_t; 2081da177e4SLinus Torvalds 2091da177e4SLinus Torvalds /* 21071e330b5SDave Chinner * The CIL context is used to aggregate per-transaction details as well be 21171e330b5SDave Chinner * passed to the iclog for checkpoint post-commit processing. After being 21271e330b5SDave Chinner * passed to the iclog, another context needs to be allocated for tracking the 21371e330b5SDave Chinner * next set of transactions to be aggregated into a checkpoint. 21471e330b5SDave Chinner */ 21571e330b5SDave Chinner struct xfs_cil; 21671e330b5SDave Chinner 21771e330b5SDave Chinner struct xfs_cil_ctx { 21871e330b5SDave Chinner struct xfs_cil *cil; 2195f9b4b0dSDave Chinner xfs_csn_t sequence; /* chkpt sequence # */ 22071e330b5SDave Chinner xfs_lsn_t start_lsn; /* first LSN of chkpt commit */ 22171e330b5SDave Chinner xfs_lsn_t commit_lsn; /* chkpt commit record lsn */ 222caa80090SDave Chinner struct xlog_in_core *commit_iclog; 22371e330b5SDave Chinner struct xlog_ticket *ticket; /* chkpt ticket */ 22471e330b5SDave Chinner int space_used; /* aggregate size of regions */ 22571e330b5SDave Chinner struct list_head busy_extents; /* busy extents in chkpt */ 22671e330b5SDave Chinner struct xfs_log_vec *lv_chain; /* logvecs being pushed */ 22789ae379dSChristoph Hellwig struct list_head iclog_entry; 22871e330b5SDave Chinner struct list_head committing; /* ctx committing list */ 2294560e78fSChristoph Hellwig struct work_struct discard_endio_work; 23039823d0fSDave Chinner struct work_struct push_work; 23171e330b5SDave Chinner }; 23271e330b5SDave Chinner 23371e330b5SDave Chinner /* 23471e330b5SDave Chinner * Committed Item List structure 23571e330b5SDave Chinner * 23671e330b5SDave Chinner * This structure is used to track log items that have been committed but not 23771e330b5SDave Chinner * yet written into the log. It is used only when the delayed logging mount 23871e330b5SDave Chinner * option is enabled. 23971e330b5SDave Chinner * 24071e330b5SDave Chinner * This structure tracks the list of committing checkpoint contexts so 24171e330b5SDave Chinner * we can avoid the problem of having to hold out new transactions during a 24271e330b5SDave Chinner * flush until we have a the commit record LSN of the checkpoint. We can 24371e330b5SDave Chinner * traverse the list of committing contexts in xlog_cil_push_lsn() to find a 24471e330b5SDave Chinner * sequence match and extract the commit LSN directly from there. If the 24571e330b5SDave Chinner * checkpoint is still in the process of committing, we can block waiting for 24671e330b5SDave Chinner * the commit LSN to be determined as well. This should make synchronous 24771e330b5SDave Chinner * operations almost as efficient as the old logging methods. 24871e330b5SDave Chinner */ 24971e330b5SDave Chinner struct xfs_cil { 250ad223e60SMark Tinguely struct xlog *xc_log; 25171e330b5SDave Chinner struct list_head xc_cil; 25271e330b5SDave Chinner spinlock_t xc_cil_lock; 25333c0dd78SDave Chinner struct workqueue_struct *xc_push_wq; 2544bb928cdSDave Chinner 2554bb928cdSDave Chinner struct rw_semaphore xc_ctx_lock ____cacheline_aligned_in_smp; 25671e330b5SDave Chinner struct xfs_cil_ctx *xc_ctx; 2574bb928cdSDave Chinner 2584bb928cdSDave Chinner spinlock_t xc_push_lock ____cacheline_aligned_in_smp; 2595f9b4b0dSDave Chinner xfs_csn_t xc_push_seq; 2600020a190SDave Chinner bool xc_push_commit_stable; 26171e330b5SDave Chinner struct list_head xc_committing; 262eb40a875SDave Chinner wait_queue_head_t xc_commit_wait; 26368a74dcaSDave Chinner wait_queue_head_t xc_start_wait; 2645f9b4b0dSDave Chinner xfs_csn_t xc_current_sequence; 265c7f87f39SDave Chinner wait_queue_head_t xc_push_wait; /* background push throttle */ 2664bb928cdSDave Chinner } ____cacheline_aligned_in_smp; 26771e330b5SDave Chinner 26871e330b5SDave Chinner /* 26980168676SDave Chinner * The amount of log space we allow the CIL to aggregate is difficult to size. 27080168676SDave Chinner * Whatever we choose, we have to make sure we can get a reservation for the 27180168676SDave Chinner * log space effectively, that it is large enough to capture sufficient 27280168676SDave Chinner * relogging to reduce log buffer IO significantly, but it is not too large for 27380168676SDave Chinner * the log or induces too much latency when writing out through the iclogs. We 27480168676SDave Chinner * track both space consumed and the number of vectors in the checkpoint 27580168676SDave Chinner * context, so we need to decide which to use for limiting. 276df806158SDave Chinner * 277df806158SDave Chinner * Every log buffer we write out during a push needs a header reserved, which 278df806158SDave Chinner * is at least one sector and more for v2 logs. Hence we need a reservation of 279df806158SDave Chinner * at least 512 bytes per 32k of log space just for the LR headers. That means 280df806158SDave Chinner * 16KB of reservation per megabyte of delayed logging space we will consume, 281df806158SDave Chinner * plus various headers. The number of headers will vary based on the num of 282df806158SDave Chinner * io vectors, so limiting on a specific number of vectors is going to result 283df806158SDave Chinner * in transactions of varying size. IOWs, it is more consistent to track and 284df806158SDave Chinner * limit space consumed in the log rather than by the number of objects being 285df806158SDave Chinner * logged in order to prevent checkpoint ticket overruns. 286df806158SDave Chinner * 287df806158SDave Chinner * Further, use of static reservations through the log grant mechanism is 288df806158SDave Chinner * problematic. It introduces a lot of complexity (e.g. reserve grant vs write 289df806158SDave Chinner * grant) and a significant deadlock potential because regranting write space 290df806158SDave Chinner * can block on log pushes. Hence if we have to regrant log space during a log 291df806158SDave Chinner * push, we can deadlock. 292df806158SDave Chinner * 293df806158SDave Chinner * However, we can avoid this by use of a dynamic "reservation stealing" 294df806158SDave Chinner * technique during transaction commit whereby unused reservation space in the 295df806158SDave Chinner * transaction ticket is transferred to the CIL ctx commit ticket to cover the 296df806158SDave Chinner * space needed by the checkpoint transaction. This means that we never need to 297df806158SDave Chinner * specifically reserve space for the CIL checkpoint transaction, nor do we 298df806158SDave Chinner * need to regrant space once the checkpoint completes. This also means the 299df806158SDave Chinner * checkpoint transaction ticket is specific to the checkpoint context, rather 300df806158SDave Chinner * than the CIL itself. 301df806158SDave Chinner * 30280168676SDave Chinner * With dynamic reservations, we can effectively make up arbitrary limits for 30380168676SDave Chinner * the checkpoint size so long as they don't violate any other size rules. 30480168676SDave Chinner * Recovery imposes a rule that no transaction exceed half the log, so we are 30580168676SDave Chinner * limited by that. Furthermore, the log transaction reservation subsystem 30680168676SDave Chinner * tries to keep 25% of the log free, so we need to keep below that limit or we 30780168676SDave Chinner * risk running out of free log space to start any new transactions. 30880168676SDave Chinner * 309108a4235SDave Chinner * In order to keep background CIL push efficient, we only need to ensure the 310108a4235SDave Chinner * CIL is large enough to maintain sufficient in-memory relogging to avoid 311108a4235SDave Chinner * repeated physical writes of frequently modified metadata. If we allow the CIL 312108a4235SDave Chinner * to grow to a substantial fraction of the log, then we may be pinning hundreds 313108a4235SDave Chinner * of megabytes of metadata in memory until the CIL flushes. This can cause 314108a4235SDave Chinner * issues when we are running low on memory - pinned memory cannot be reclaimed, 315108a4235SDave Chinner * and the CIL consumes a lot of memory. Hence we need to set an upper physical 316108a4235SDave Chinner * size limit for the CIL that limits the maximum amount of memory pinned by the 317108a4235SDave Chinner * CIL but does not limit performance by reducing relogging efficiency 318108a4235SDave Chinner * significantly. 319108a4235SDave Chinner * 320108a4235SDave Chinner * As such, the CIL push threshold ends up being the smaller of two thresholds: 321108a4235SDave Chinner * - a threshold large enough that it allows CIL to be pushed and progress to be 322108a4235SDave Chinner * made without excessive blocking of incoming transaction commits. This is 323108a4235SDave Chinner * defined to be 12.5% of the log space - half the 25% push threshold of the 324108a4235SDave Chinner * AIL. 325108a4235SDave Chinner * - small enough that it doesn't pin excessive amounts of memory but maintains 326108a4235SDave Chinner * close to peak relogging efficiency. This is defined to be 16x the iclog 327108a4235SDave Chinner * buffer window (32MB) as measurements have shown this to be roughly the 328108a4235SDave Chinner * point of diminishing performance increases under highly concurrent 329108a4235SDave Chinner * modification workloads. 3300e7ab7efSDave Chinner * 3310e7ab7efSDave Chinner * To prevent the CIL from overflowing upper commit size bounds, we introduce a 3320e7ab7efSDave Chinner * new threshold at which we block committing transactions until the background 3330e7ab7efSDave Chinner * CIL commit commences and switches to a new context. While this is not a hard 3340e7ab7efSDave Chinner * limit, it forces the process committing a transaction to the CIL to block and 3350e7ab7efSDave Chinner * yeild the CPU, giving the CIL push work a chance to be scheduled and start 3360e7ab7efSDave Chinner * work. This prevents a process running lots of transactions from overfilling 3370e7ab7efSDave Chinner * the CIL because it is not yielding the CPU. We set the blocking limit at 3380e7ab7efSDave Chinner * twice the background push space threshold so we keep in line with the AIL 3390e7ab7efSDave Chinner * push thresholds. 3400e7ab7efSDave Chinner * 3410e7ab7efSDave Chinner * Note: this is not a -hard- limit as blocking is applied after the transaction 3420e7ab7efSDave Chinner * is inserted into the CIL and the push has been triggered. It is largely a 3430e7ab7efSDave Chinner * throttling mechanism that allows the CIL push to be scheduled and run. A hard 3440e7ab7efSDave Chinner * limit will be difficult to implement without introducing global serialisation 3450e7ab7efSDave Chinner * in the CIL commit fast path, and it's not at all clear that we actually need 3460e7ab7efSDave Chinner * such hard limits given the ~7 years we've run without a hard limit before 3470e7ab7efSDave Chinner * finding the first situation where a checkpoint size overflow actually 3480e7ab7efSDave Chinner * occurred. Hence the simple throttle, and an ASSERT check to tell us that 3490e7ab7efSDave Chinner * we've overrun the max size. 350df806158SDave Chinner */ 351108a4235SDave Chinner #define XLOG_CIL_SPACE_LIMIT(log) \ 352108a4235SDave Chinner min_t(int, (log)->l_logsize >> 3, BBTOB(XLOG_TOTAL_REC_SHIFT(log)) << 4) 353df806158SDave Chinner 3540e7ab7efSDave Chinner #define XLOG_CIL_BLOCKING_SPACE_LIMIT(log) \ 3550e7ab7efSDave Chinner (XLOG_CIL_SPACE_LIMIT(log) * 2) 3560e7ab7efSDave Chinner 357df806158SDave Chinner /* 35828496968SChristoph Hellwig * ticket grant locks, queues and accounting have their own cachlines 35928496968SChristoph Hellwig * as these are quite hot and can be operated on concurrently. 36028496968SChristoph Hellwig */ 36128496968SChristoph Hellwig struct xlog_grant_head { 36228496968SChristoph Hellwig spinlock_t lock ____cacheline_aligned_in_smp; 36328496968SChristoph Hellwig struct list_head waiters; 36428496968SChristoph Hellwig atomic64_t grant; 36528496968SChristoph Hellwig }; 36628496968SChristoph Hellwig 36728496968SChristoph Hellwig /* 3681da177e4SLinus Torvalds * The reservation head lsn is not made up of a cycle number and block number. 3691da177e4SLinus Torvalds * Instead, it uses a cycle number and byte number. Logs don't expect to 3701da177e4SLinus Torvalds * overflow 31 bits worth of byte offset, so using a byte number will mean 3711da177e4SLinus Torvalds * that round off problems won't occur when releasing partial reservations. 3721da177e4SLinus Torvalds */ 3739a8d2fdbSMark Tinguely struct xlog { 3744679b2d3SDavid Chinner /* The following fields don't need locking */ 3754679b2d3SDavid Chinner struct xfs_mount *l_mp; /* mount point */ 376a9c21c1bSDavid Chinner struct xfs_ail *l_ailp; /* AIL log is working with */ 37771e330b5SDave Chinner struct xfs_cil *l_cilp; /* CIL log is working with */ 3784679b2d3SDavid Chinner struct xfs_buftarg *l_targ; /* buftarg of log */ 3791058d0f5SChristoph Hellwig struct workqueue_struct *l_ioend_workqueue; /* for I/O completions */ 380f661f1e0SDave Chinner struct delayed_work l_work; /* background flush work */ 381e1d06e5fSDave Chinner long l_opstate; /* operational state */ 3824679b2d3SDavid Chinner uint l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */ 383d5689eaaSChristoph Hellwig struct list_head *l_buf_cancel_table; 3844679b2d3SDavid Chinner int l_iclog_hsize; /* size of iclog header */ 3854679b2d3SDavid Chinner int l_iclog_heads; /* # of iclog header sectors */ 38648389ef1SAlex Elder uint l_sectBBsize; /* sector size in BBs (2^n) */ 3874679b2d3SDavid Chinner int l_iclog_size; /* size of log in bytes */ 3884679b2d3SDavid Chinner int l_iclog_bufs; /* number of iclog buffers */ 3894679b2d3SDavid Chinner xfs_daddr_t l_logBBstart; /* start block of log */ 3904679b2d3SDavid Chinner int l_logsize; /* size of log in bytes */ 3914679b2d3SDavid Chinner int l_logBBsize; /* size of log in BB chunks */ 3924679b2d3SDavid Chinner 3931da177e4SLinus Torvalds /* The following block of fields are changed while holding icloglock */ 394eb40a875SDave Chinner wait_queue_head_t l_flush_wait ____cacheline_aligned_in_smp; 395d748c623SMatthew Wilcox /* waiting for iclog flush */ 3961da177e4SLinus Torvalds int l_covered_state;/* state of "covering disk 3971da177e4SLinus Torvalds * log entries" */ 3981da177e4SLinus Torvalds xlog_in_core_t *l_iclog; /* head log queue */ 399b22cd72cSEric Sandeen spinlock_t l_icloglock; /* grab to change iclog state */ 4001da177e4SLinus Torvalds int l_curr_cycle; /* Cycle number of log writes */ 4011da177e4SLinus Torvalds int l_prev_cycle; /* Cycle number before last 4021da177e4SLinus Torvalds * block increment */ 4031da177e4SLinus Torvalds int l_curr_block; /* current logical log block */ 4041da177e4SLinus Torvalds int l_prev_block; /* previous logical log block */ 4051da177e4SLinus Torvalds 40684f3c683SDave Chinner /* 4071c3cb9ecSDave Chinner * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and 4081c3cb9ecSDave Chinner * read without needing to hold specific locks. To avoid operations 4091c3cb9ecSDave Chinner * contending with other hot objects, place each of them on a separate 4101c3cb9ecSDave Chinner * cacheline. 41184f3c683SDave Chinner */ 41284f3c683SDave Chinner /* lsn of last LR on disk */ 41384f3c683SDave Chinner atomic64_t l_last_sync_lsn ____cacheline_aligned_in_smp; 4141c3cb9ecSDave Chinner /* lsn of 1st LR with unflushed * buffers */ 4151c3cb9ecSDave Chinner atomic64_t l_tail_lsn ____cacheline_aligned_in_smp; 41684f3c683SDave Chinner 41728496968SChristoph Hellwig struct xlog_grant_head l_reserve_head; 41828496968SChristoph Hellwig struct xlog_grant_head l_write_head; 4193f16b985SDave Chinner 420baff4e44SBrian Foster struct xfs_kobj l_kobj; 421baff4e44SBrian Foster 42212818d24SBrian Foster /* log recovery lsn tracking (for buffer submission */ 42312818d24SBrian Foster xfs_lsn_t l_recovery_lsn; 424a6a65fefSDave Chinner 425a6a65fefSDave Chinner uint32_t l_iclog_roundoff;/* padding roundoff */ 4262b73a2c8SDarrick J. Wong 4272b73a2c8SDarrick J. Wong /* Users of log incompat features should take a read lock. */ 4282b73a2c8SDarrick J. Wong struct rw_semaphore l_incompat_users; 4299a8d2fdbSMark Tinguely }; 4301da177e4SLinus Torvalds 431d5689eaaSChristoph Hellwig #define XLOG_BUF_CANCEL_BUCKET(log, blkno) \ 432c8ce540dSDarrick J. Wong ((log)->l_buf_cancel_table + ((uint64_t)blkno % XLOG_BC_TABLE_SIZE)) 433d5689eaaSChristoph Hellwig 434e1d06e5fSDave Chinner /* 435e1d06e5fSDave Chinner * Bits for operational state 436e1d06e5fSDave Chinner */ 437e1d06e5fSDave Chinner #define XLOG_ACTIVE_RECOVERY 0 /* in the middle of recovery */ 438e1d06e5fSDave Chinner #define XLOG_RECOVERY_NEEDED 1 /* log was recovered */ 439e1d06e5fSDave Chinner #define XLOG_IO_ERROR 2 /* log hit an I/O error, and being 440e1d06e5fSDave Chinner shutdown */ 441e1d06e5fSDave Chinner #define XLOG_TAIL_WARN 3 /* log tail verify warning issued */ 442e1d06e5fSDave Chinner 443e1d06e5fSDave Chinner static inline bool 444e1d06e5fSDave Chinner xlog_recovery_needed(struct xlog *log) 445e1d06e5fSDave Chinner { 446e1d06e5fSDave Chinner return test_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate); 447e1d06e5fSDave Chinner } 448e1d06e5fSDave Chinner 449e1d06e5fSDave Chinner static inline bool 450e1d06e5fSDave Chinner xlog_in_recovery(struct xlog *log) 451e1d06e5fSDave Chinner { 452e1d06e5fSDave Chinner return test_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate); 453e1d06e5fSDave Chinner } 454e1d06e5fSDave Chinner 4552039a272SDave Chinner static inline bool 4562039a272SDave Chinner xlog_is_shutdown(struct xlog *log) 4572039a272SDave Chinner { 458e1d06e5fSDave Chinner return test_bit(XLOG_IO_ERROR, &log->l_opstate); 4592039a272SDave Chinner } 460cfcbbbd0SNathan Scott 46141e63621SDave Chinner /* 46241e63621SDave Chinner * Wait until the xlog_force_shutdown() has marked the log as shut down 46341e63621SDave Chinner * so xlog_is_shutdown() will always return true. 46441e63621SDave Chinner */ 46541e63621SDave Chinner static inline void 46641e63621SDave Chinner xlog_shutdown_wait( 46741e63621SDave Chinner struct xlog *log) 46841e63621SDave Chinner { 46941e63621SDave Chinner wait_var_event(&log->l_opstate, xlog_is_shutdown(log)); 47041e63621SDave Chinner } 47141e63621SDave Chinner 4721da177e4SLinus Torvalds /* common routines */ 4739a8d2fdbSMark Tinguely extern int 4749a8d2fdbSMark Tinguely xlog_recover( 4759a8d2fdbSMark Tinguely struct xlog *log); 4769a8d2fdbSMark Tinguely extern int 4779a8d2fdbSMark Tinguely xlog_recover_finish( 4789a8d2fdbSMark Tinguely struct xlog *log); 479a7a9250eSHariprasad Kelam extern void 480f0b2efadSBrian Foster xlog_recover_cancel(struct xlog *); 4810e446be4SChristoph Hellwig 482f9668a09SDave Chinner extern __le32 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead, 4830e446be4SChristoph Hellwig char *dp, int size); 4841da177e4SLinus Torvalds 485182696fbSDarrick J. Wong extern struct kmem_cache *xfs_log_ticket_cache; 486c7610dceSDave Chinner struct xlog_ticket *xlog_ticket_alloc(struct xlog *log, int unit_bytes, 487c7610dceSDave Chinner int count, bool permanent); 488e6b1f273SChristoph Hellwig 48971e330b5SDave Chinner void xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket); 490d4ca1d55SBrian Foster void xlog_print_trans(struct xfs_trans *); 491c45aba40SDave Chinner int xlog_write(struct xlog *log, struct xfs_cil_ctx *ctx, 492c45aba40SDave Chinner struct xfs_log_vec *log_vector, struct xlog_ticket *tic, 49314b07ecdSDave Chinner uint32_t len); 4948b41e3f9SChristoph Hellwig void xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket); 4958b41e3f9SChristoph Hellwig void xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket); 49671e330b5SDave Chinner 4970020a190SDave Chinner void xlog_state_switch_iclogs(struct xlog *log, struct xlog_in_core *iclog, 4980020a190SDave Chinner int eventual_size); 499919edbadSDave Chinner int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog); 500eef983ffSDave Chinner 50171e330b5SDave Chinner /* 5021c3cb9ecSDave Chinner * When we crack an atomic LSN, we sample it first so that the value will not 5031c3cb9ecSDave Chinner * change while we are cracking it into the component values. This means we 5041c3cb9ecSDave Chinner * will always get consistent component values to work from. This should always 50525985edcSLucas De Marchi * be used to sample and crack LSNs that are stored and updated in atomic 5061c3cb9ecSDave Chinner * variables. 5071c3cb9ecSDave Chinner */ 5081c3cb9ecSDave Chinner static inline void 5091c3cb9ecSDave Chinner xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block) 5101c3cb9ecSDave Chinner { 5111c3cb9ecSDave Chinner xfs_lsn_t val = atomic64_read(lsn); 5121c3cb9ecSDave Chinner 5131c3cb9ecSDave Chinner *cycle = CYCLE_LSN(val); 5141c3cb9ecSDave Chinner *block = BLOCK_LSN(val); 5151c3cb9ecSDave Chinner } 5161c3cb9ecSDave Chinner 5171c3cb9ecSDave Chinner /* 5181c3cb9ecSDave Chinner * Calculate and assign a value to an atomic LSN variable from component pieces. 5191c3cb9ecSDave Chinner */ 5201c3cb9ecSDave Chinner static inline void 5211c3cb9ecSDave Chinner xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block) 5221c3cb9ecSDave Chinner { 5231c3cb9ecSDave Chinner atomic64_set(lsn, xlog_assign_lsn(cycle, block)); 5241c3cb9ecSDave Chinner } 5251c3cb9ecSDave Chinner 5261c3cb9ecSDave Chinner /* 527d0eb2f38SDave Chinner * When we crack the grant head, we sample it first so that the value will not 528a69ed03cSDave Chinner * change while we are cracking it into the component values. This means we 529a69ed03cSDave Chinner * will always get consistent component values to work from. 530a69ed03cSDave Chinner */ 531a69ed03cSDave Chinner static inline void 532d0eb2f38SDave Chinner xlog_crack_grant_head_val(int64_t val, int *cycle, int *space) 533a69ed03cSDave Chinner { 534a69ed03cSDave Chinner *cycle = val >> 32; 535a69ed03cSDave Chinner *space = val & 0xffffffff; 536a69ed03cSDave Chinner } 537a69ed03cSDave Chinner 538a69ed03cSDave Chinner static inline void 539d0eb2f38SDave Chinner xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space) 540d0eb2f38SDave Chinner { 541d0eb2f38SDave Chinner xlog_crack_grant_head_val(atomic64_read(head), cycle, space); 542d0eb2f38SDave Chinner } 543d0eb2f38SDave Chinner 544d0eb2f38SDave Chinner static inline int64_t 545d0eb2f38SDave Chinner xlog_assign_grant_head_val(int cycle, int space) 546d0eb2f38SDave Chinner { 547d0eb2f38SDave Chinner return ((int64_t)cycle << 32) | space; 548d0eb2f38SDave Chinner } 549d0eb2f38SDave Chinner 550d0eb2f38SDave Chinner static inline void 551c8a09ff8SDave Chinner xlog_assign_grant_head(atomic64_t *head, int cycle, int space) 552a69ed03cSDave Chinner { 553d0eb2f38SDave Chinner atomic64_set(head, xlog_assign_grant_head_val(cycle, space)); 554a69ed03cSDave Chinner } 555a69ed03cSDave Chinner 556a69ed03cSDave Chinner /* 55771e330b5SDave Chinner * Committed Item List interfaces 55871e330b5SDave Chinner */ 5592c6e24ceSDave Chinner int xlog_cil_init(struct xlog *log); 5602c6e24ceSDave Chinner void xlog_cil_init_post_recovery(struct xlog *log); 5612c6e24ceSDave Chinner void xlog_cil_destroy(struct xlog *log); 5622c6e24ceSDave Chinner bool xlog_cil_empty(struct xlog *log); 5635f9b4b0dSDave Chinner void xlog_cil_commit(struct xlog *log, struct xfs_trans *tp, 5645f9b4b0dSDave Chinner xfs_csn_t *commit_seq, bool regrant); 565c45aba40SDave Chinner void xlog_cil_set_ctx_write_state(struct xfs_cil_ctx *ctx, 566c45aba40SDave Chinner struct xlog_in_core *iclog); 567c45aba40SDave Chinner 56871e330b5SDave Chinner 569a44f13edSDave Chinner /* 570a44f13edSDave Chinner * CIL force routines 571a44f13edSDave Chinner */ 5720020a190SDave Chinner void xlog_cil_flush(struct xlog *log); 5735f9b4b0dSDave Chinner xfs_lsn_t xlog_cil_force_seq(struct xlog *log, xfs_csn_t sequence); 574a44f13edSDave Chinner 575a44f13edSDave Chinner static inline void 576ad223e60SMark Tinguely xlog_cil_force(struct xlog *log) 577a44f13edSDave Chinner { 5785f9b4b0dSDave Chinner xlog_cil_force_seq(log, log->l_cilp->xc_current_sequence); 579a44f13edSDave Chinner } 58071e330b5SDave Chinner 581955e47adSTim Shimmin /* 582eb40a875SDave Chinner * Wrapper function for waiting on a wait queue serialised against wakeups 583eb40a875SDave Chinner * by a spinlock. This matches the semantics of all the wait queues used in the 584eb40a875SDave Chinner * log code. 585eb40a875SDave Chinner */ 586f7559793SDarrick J. Wong static inline void 587f7559793SDarrick J. Wong xlog_wait( 588f7559793SDarrick J. Wong struct wait_queue_head *wq, 589f7559793SDarrick J. Wong struct spinlock *lock) 590f7559793SDarrick J. Wong __releases(lock) 591eb40a875SDave Chinner { 592eb40a875SDave Chinner DECLARE_WAITQUEUE(wait, current); 593eb40a875SDave Chinner 594eb40a875SDave Chinner add_wait_queue_exclusive(wq, &wait); 595eb40a875SDave Chinner __set_current_state(TASK_UNINTERRUPTIBLE); 596eb40a875SDave Chinner spin_unlock(lock); 597eb40a875SDave Chinner schedule(); 598eb40a875SDave Chinner remove_wait_queue(wq, &wait); 599eb40a875SDave Chinner } 6001da177e4SLinus Torvalds 601a79b28c2SDave Chinner int xlog_wait_on_iclog(struct xlog_in_core *iclog); 602a79b28c2SDave Chinner 603a45086e2SBrian Foster /* 604a45086e2SBrian Foster * The LSN is valid so long as it is behind the current LSN. If it isn't, this 605a45086e2SBrian Foster * means that the next log record that includes this metadata could have a 606a45086e2SBrian Foster * smaller LSN. In turn, this means that the modification in the log would not 607a45086e2SBrian Foster * replay. 608a45086e2SBrian Foster */ 609a45086e2SBrian Foster static inline bool 610a45086e2SBrian Foster xlog_valid_lsn( 611a45086e2SBrian Foster struct xlog *log, 612a45086e2SBrian Foster xfs_lsn_t lsn) 613a45086e2SBrian Foster { 614a45086e2SBrian Foster int cur_cycle; 615a45086e2SBrian Foster int cur_block; 616a45086e2SBrian Foster bool valid = true; 617a45086e2SBrian Foster 618a45086e2SBrian Foster /* 619a45086e2SBrian Foster * First, sample the current lsn without locking to avoid added 620a45086e2SBrian Foster * contention from metadata I/O. The current cycle and block are updated 621a45086e2SBrian Foster * (in xlog_state_switch_iclogs()) and read here in a particular order 622a45086e2SBrian Foster * to avoid false negatives (e.g., thinking the metadata LSN is valid 623a45086e2SBrian Foster * when it is not). 624a45086e2SBrian Foster * 625a45086e2SBrian Foster * The current block is always rewound before the cycle is bumped in 626a45086e2SBrian Foster * xlog_state_switch_iclogs() to ensure the current LSN is never seen in 627a45086e2SBrian Foster * a transiently forward state. Instead, we can see the LSN in a 628a45086e2SBrian Foster * transiently behind state if we happen to race with a cycle wrap. 629a45086e2SBrian Foster */ 6306aa7de05SMark Rutland cur_cycle = READ_ONCE(log->l_curr_cycle); 631a45086e2SBrian Foster smp_rmb(); 6326aa7de05SMark Rutland cur_block = READ_ONCE(log->l_curr_block); 633a45086e2SBrian Foster 634a45086e2SBrian Foster if ((CYCLE_LSN(lsn) > cur_cycle) || 635a45086e2SBrian Foster (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) { 636a45086e2SBrian Foster /* 637a45086e2SBrian Foster * If the metadata LSN appears invalid, it's possible the check 638a45086e2SBrian Foster * above raced with a wrap to the next log cycle. Grab the lock 639a45086e2SBrian Foster * to check for sure. 640a45086e2SBrian Foster */ 641a45086e2SBrian Foster spin_lock(&log->l_icloglock); 642a45086e2SBrian Foster cur_cycle = log->l_curr_cycle; 643a45086e2SBrian Foster cur_block = log->l_curr_block; 644a45086e2SBrian Foster spin_unlock(&log->l_icloglock); 645a45086e2SBrian Foster 646a45086e2SBrian Foster if ((CYCLE_LSN(lsn) > cur_cycle) || 647a45086e2SBrian Foster (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) 648a45086e2SBrian Foster valid = false; 649a45086e2SBrian Foster } 650a45086e2SBrian Foster 651a45086e2SBrian Foster return valid; 652a45086e2SBrian Foster } 653a45086e2SBrian Foster 654*45ff8b47SDave Chinner /* 655*45ff8b47SDave Chinner * Log vector and shadow buffers can be large, so we need to use kvmalloc() here 656*45ff8b47SDave Chinner * to ensure success. Unfortunately, kvmalloc() only allows GFP_KERNEL contexts 657*45ff8b47SDave Chinner * to fall back to vmalloc, so we can't actually do anything useful with gfp 658*45ff8b47SDave Chinner * flags to control the kmalloc() behaviour within kvmalloc(). Hence kmalloc() 659*45ff8b47SDave Chinner * will do direct reclaim and compaction in the slow path, both of which are 660*45ff8b47SDave Chinner * horrendously expensive. We just want kmalloc to fail fast and fall back to 661*45ff8b47SDave Chinner * vmalloc if it can't get somethign straight away from the free lists or 662*45ff8b47SDave Chinner * buddy allocator. Hence we have to open code kvmalloc outselves here. 663*45ff8b47SDave Chinner * 664*45ff8b47SDave Chinner * This assumes that the caller uses memalloc_nofs_save task context here, so 665*45ff8b47SDave Chinner * despite the use of GFP_KERNEL here, we are going to be doing GFP_NOFS 666*45ff8b47SDave Chinner * allocations. This is actually the only way to make vmalloc() do GFP_NOFS 667*45ff8b47SDave Chinner * allocations, so lets just all pretend this is a GFP_KERNEL context 668*45ff8b47SDave Chinner * operation.... 669*45ff8b47SDave Chinner */ 670*45ff8b47SDave Chinner static inline void * 671*45ff8b47SDave Chinner xlog_kvmalloc( 672*45ff8b47SDave Chinner size_t buf_size) 673*45ff8b47SDave Chinner { 674*45ff8b47SDave Chinner gfp_t flags = GFP_KERNEL; 675*45ff8b47SDave Chinner void *p; 676*45ff8b47SDave Chinner 677*45ff8b47SDave Chinner flags &= ~__GFP_DIRECT_RECLAIM; 678*45ff8b47SDave Chinner flags |= __GFP_NOWARN | __GFP_NORETRY; 679*45ff8b47SDave Chinner do { 680*45ff8b47SDave Chinner p = kmalloc(buf_size, flags); 681*45ff8b47SDave Chinner if (!p) 682*45ff8b47SDave Chinner p = vmalloc(buf_size); 683*45ff8b47SDave Chinner } while (!p); 684*45ff8b47SDave Chinner 685*45ff8b47SDave Chinner return p; 686*45ff8b47SDave Chinner } 687*45ff8b47SDave Chinner 6881da177e4SLinus Torvalds #endif /* __XFS_LOG_PRIV_H__ */ 689