xref: /linux/fs/xfs/xfs_mount.c (revision 3932b9ca55b0be314a36d3e84faff3e823c081f5)
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_inum.h"
26 #include "xfs_sb.h"
27 #include "xfs_ag.h"
28 #include "xfs_mount.h"
29 #include "xfs_da_format.h"
30 #include "xfs_inode.h"
31 #include "xfs_dir2.h"
32 #include "xfs_ialloc.h"
33 #include "xfs_alloc.h"
34 #include "xfs_rtalloc.h"
35 #include "xfs_bmap.h"
36 #include "xfs_trans.h"
37 #include "xfs_trans_priv.h"
38 #include "xfs_log.h"
39 #include "xfs_error.h"
40 #include "xfs_quota.h"
41 #include "xfs_fsops.h"
42 #include "xfs_trace.h"
43 #include "xfs_icache.h"
44 #include "xfs_dinode.h"
45 #include "xfs_sysfs.h"
46 
47 
48 #ifdef HAVE_PERCPU_SB
49 STATIC void	xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t,
50 						int);
51 STATIC void	xfs_icsb_balance_counter_locked(xfs_mount_t *, xfs_sb_field_t,
52 						int);
53 STATIC void	xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
54 #else
55 
56 #define xfs_icsb_balance_counter(mp, a, b)		do { } while (0)
57 #define xfs_icsb_balance_counter_locked(mp, a, b)	do { } while (0)
58 #endif
59 
60 static DEFINE_MUTEX(xfs_uuid_table_mutex);
61 static int xfs_uuid_table_size;
62 static uuid_t *xfs_uuid_table;
63 
64 extern struct kset *xfs_kset;
65 
66 /*
67  * See if the UUID is unique among mounted XFS filesystems.
68  * Mount fails if UUID is nil or a FS with the same UUID is already mounted.
69  */
70 STATIC int
71 xfs_uuid_mount(
72 	struct xfs_mount	*mp)
73 {
74 	uuid_t			*uuid = &mp->m_sb.sb_uuid;
75 	int			hole, i;
76 
77 	if (mp->m_flags & XFS_MOUNT_NOUUID)
78 		return 0;
79 
80 	if (uuid_is_nil(uuid)) {
81 		xfs_warn(mp, "Filesystem has nil UUID - can't mount");
82 		return -EINVAL;
83 	}
84 
85 	mutex_lock(&xfs_uuid_table_mutex);
86 	for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) {
87 		if (uuid_is_nil(&xfs_uuid_table[i])) {
88 			hole = i;
89 			continue;
90 		}
91 		if (uuid_equal(uuid, &xfs_uuid_table[i]))
92 			goto out_duplicate;
93 	}
94 
95 	if (hole < 0) {
96 		xfs_uuid_table = kmem_realloc(xfs_uuid_table,
97 			(xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table),
98 			xfs_uuid_table_size  * sizeof(*xfs_uuid_table),
99 			KM_SLEEP);
100 		hole = xfs_uuid_table_size++;
101 	}
102 	xfs_uuid_table[hole] = *uuid;
103 	mutex_unlock(&xfs_uuid_table_mutex);
104 
105 	return 0;
106 
107  out_duplicate:
108 	mutex_unlock(&xfs_uuid_table_mutex);
109 	xfs_warn(mp, "Filesystem has duplicate UUID %pU - can't mount", uuid);
110 	return -EINVAL;
111 }
112 
113 STATIC void
114 xfs_uuid_unmount(
115 	struct xfs_mount	*mp)
116 {
117 	uuid_t			*uuid = &mp->m_sb.sb_uuid;
118 	int			i;
119 
120 	if (mp->m_flags & XFS_MOUNT_NOUUID)
121 		return;
122 
123 	mutex_lock(&xfs_uuid_table_mutex);
124 	for (i = 0; i < xfs_uuid_table_size; i++) {
125 		if (uuid_is_nil(&xfs_uuid_table[i]))
126 			continue;
127 		if (!uuid_equal(uuid, &xfs_uuid_table[i]))
128 			continue;
129 		memset(&xfs_uuid_table[i], 0, sizeof(uuid_t));
130 		break;
131 	}
132 	ASSERT(i < xfs_uuid_table_size);
133 	mutex_unlock(&xfs_uuid_table_mutex);
134 }
135 
136 
137 STATIC void
138 __xfs_free_perag(
139 	struct rcu_head	*head)
140 {
141 	struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
142 
143 	ASSERT(atomic_read(&pag->pag_ref) == 0);
144 	kmem_free(pag);
145 }
146 
147 /*
148  * Free up the per-ag resources associated with the mount structure.
149  */
150 STATIC void
151 xfs_free_perag(
152 	xfs_mount_t	*mp)
153 {
154 	xfs_agnumber_t	agno;
155 	struct xfs_perag *pag;
156 
157 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
158 		spin_lock(&mp->m_perag_lock);
159 		pag = radix_tree_delete(&mp->m_perag_tree, agno);
160 		spin_unlock(&mp->m_perag_lock);
161 		ASSERT(pag);
162 		ASSERT(atomic_read(&pag->pag_ref) == 0);
163 		call_rcu(&pag->rcu_head, __xfs_free_perag);
164 	}
165 }
166 
167 /*
168  * Check size of device based on the (data/realtime) block count.
169  * Note: this check is used by the growfs code as well as mount.
170  */
171 int
172 xfs_sb_validate_fsb_count(
173 	xfs_sb_t	*sbp,
174 	__uint64_t	nblocks)
175 {
176 	ASSERT(PAGE_SHIFT >= sbp->sb_blocklog);
177 	ASSERT(sbp->sb_blocklog >= BBSHIFT);
178 
179 	/* Limited by ULONG_MAX of page cache index */
180 	if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
181 		return -EFBIG;
182 	return 0;
183 }
184 
185 int
186 xfs_initialize_perag(
187 	xfs_mount_t	*mp,
188 	xfs_agnumber_t	agcount,
189 	xfs_agnumber_t	*maxagi)
190 {
191 	xfs_agnumber_t	index;
192 	xfs_agnumber_t	first_initialised = 0;
193 	xfs_perag_t	*pag;
194 	xfs_agino_t	agino;
195 	xfs_ino_t	ino;
196 	xfs_sb_t	*sbp = &mp->m_sb;
197 	int		error = -ENOMEM;
198 
199 	/*
200 	 * Walk the current per-ag tree so we don't try to initialise AGs
201 	 * that already exist (growfs case). Allocate and insert all the
202 	 * AGs we don't find ready for initialisation.
203 	 */
204 	for (index = 0; index < agcount; index++) {
205 		pag = xfs_perag_get(mp, index);
206 		if (pag) {
207 			xfs_perag_put(pag);
208 			continue;
209 		}
210 		if (!first_initialised)
211 			first_initialised = index;
212 
213 		pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL);
214 		if (!pag)
215 			goto out_unwind;
216 		pag->pag_agno = index;
217 		pag->pag_mount = mp;
218 		spin_lock_init(&pag->pag_ici_lock);
219 		mutex_init(&pag->pag_ici_reclaim_lock);
220 		INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
221 		spin_lock_init(&pag->pag_buf_lock);
222 		pag->pag_buf_tree = RB_ROOT;
223 
224 		if (radix_tree_preload(GFP_NOFS))
225 			goto out_unwind;
226 
227 		spin_lock(&mp->m_perag_lock);
228 		if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
229 			BUG();
230 			spin_unlock(&mp->m_perag_lock);
231 			radix_tree_preload_end();
232 			error = -EEXIST;
233 			goto out_unwind;
234 		}
235 		spin_unlock(&mp->m_perag_lock);
236 		radix_tree_preload_end();
237 	}
238 
239 	/*
240 	 * If we mount with the inode64 option, or no inode overflows
241 	 * the legacy 32-bit address space clear the inode32 option.
242 	 */
243 	agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0);
244 	ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
245 
246 	if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
247 		mp->m_flags |= XFS_MOUNT_32BITINODES;
248 	else
249 		mp->m_flags &= ~XFS_MOUNT_32BITINODES;
250 
251 	if (mp->m_flags & XFS_MOUNT_32BITINODES)
252 		index = xfs_set_inode32(mp, agcount);
253 	else
254 		index = xfs_set_inode64(mp, agcount);
255 
256 	if (maxagi)
257 		*maxagi = index;
258 	return 0;
259 
260 out_unwind:
261 	kmem_free(pag);
262 	for (; index > first_initialised; index--) {
263 		pag = radix_tree_delete(&mp->m_perag_tree, index);
264 		kmem_free(pag);
265 	}
266 	return error;
267 }
268 
269 /*
270  * xfs_readsb
271  *
272  * Does the initial read of the superblock.
273  */
274 int
275 xfs_readsb(
276 	struct xfs_mount *mp,
277 	int		flags)
278 {
279 	unsigned int	sector_size;
280 	struct xfs_buf	*bp;
281 	struct xfs_sb	*sbp = &mp->m_sb;
282 	int		error;
283 	int		loud = !(flags & XFS_MFSI_QUIET);
284 	const struct xfs_buf_ops *buf_ops;
285 
286 	ASSERT(mp->m_sb_bp == NULL);
287 	ASSERT(mp->m_ddev_targp != NULL);
288 
289 	/*
290 	 * For the initial read, we must guess at the sector
291 	 * size based on the block device.  It's enough to
292 	 * get the sb_sectsize out of the superblock and
293 	 * then reread with the proper length.
294 	 * We don't verify it yet, because it may not be complete.
295 	 */
296 	sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
297 	buf_ops = NULL;
298 
299 	/*
300 	 * Allocate a (locked) buffer to hold the superblock.
301 	 * This will be kept around at all times to optimize
302 	 * access to the superblock.
303 	 */
304 reread:
305 	bp = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR,
306 				   BTOBB(sector_size), 0, buf_ops);
307 	if (!bp) {
308 		if (loud)
309 			xfs_warn(mp, "SB buffer read failed");
310 		return -EIO;
311 	}
312 	if (bp->b_error) {
313 		error = bp->b_error;
314 		if (loud)
315 			xfs_warn(mp, "SB validate failed with error %d.", error);
316 		/* bad CRC means corrupted metadata */
317 		if (error == -EFSBADCRC)
318 			error = -EFSCORRUPTED;
319 		goto release_buf;
320 	}
321 
322 	/*
323 	 * Initialize the mount structure from the superblock.
324 	 */
325 	xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
326 
327 	/*
328 	 * If we haven't validated the superblock, do so now before we try
329 	 * to check the sector size and reread the superblock appropriately.
330 	 */
331 	if (sbp->sb_magicnum != XFS_SB_MAGIC) {
332 		if (loud)
333 			xfs_warn(mp, "Invalid superblock magic number");
334 		error = -EINVAL;
335 		goto release_buf;
336 	}
337 
338 	/*
339 	 * We must be able to do sector-sized and sector-aligned IO.
340 	 */
341 	if (sector_size > sbp->sb_sectsize) {
342 		if (loud)
343 			xfs_warn(mp, "device supports %u byte sectors (not %u)",
344 				sector_size, sbp->sb_sectsize);
345 		error = -ENOSYS;
346 		goto release_buf;
347 	}
348 
349 	if (buf_ops == NULL) {
350 		/*
351 		 * Re-read the superblock so the buffer is correctly sized,
352 		 * and properly verified.
353 		 */
354 		xfs_buf_relse(bp);
355 		sector_size = sbp->sb_sectsize;
356 		buf_ops = loud ? &xfs_sb_buf_ops : &xfs_sb_quiet_buf_ops;
357 		goto reread;
358 	}
359 
360 	/* Initialize per-cpu counters */
361 	xfs_icsb_reinit_counters(mp);
362 
363 	/* no need to be quiet anymore, so reset the buf ops */
364 	bp->b_ops = &xfs_sb_buf_ops;
365 
366 	mp->m_sb_bp = bp;
367 	xfs_buf_unlock(bp);
368 	return 0;
369 
370 release_buf:
371 	xfs_buf_relse(bp);
372 	return error;
373 }
374 
375 /*
376  * Update alignment values based on mount options and sb values
377  */
378 STATIC int
379 xfs_update_alignment(xfs_mount_t *mp)
380 {
381 	xfs_sb_t	*sbp = &(mp->m_sb);
382 
383 	if (mp->m_dalign) {
384 		/*
385 		 * If stripe unit and stripe width are not multiples
386 		 * of the fs blocksize turn off alignment.
387 		 */
388 		if ((BBTOB(mp->m_dalign) & mp->m_blockmask) ||
389 		    (BBTOB(mp->m_swidth) & mp->m_blockmask)) {
390 			xfs_warn(mp,
391 		"alignment check failed: sunit/swidth vs. blocksize(%d)",
392 				sbp->sb_blocksize);
393 			return -EINVAL;
394 		} else {
395 			/*
396 			 * Convert the stripe unit and width to FSBs.
397 			 */
398 			mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign);
399 			if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) {
400 				xfs_warn(mp,
401 			"alignment check failed: sunit/swidth vs. agsize(%d)",
402 					 sbp->sb_agblocks);
403 				return -EINVAL;
404 			} else if (mp->m_dalign) {
405 				mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth);
406 			} else {
407 				xfs_warn(mp,
408 			"alignment check failed: sunit(%d) less than bsize(%d)",
409 					 mp->m_dalign, sbp->sb_blocksize);
410 				return -EINVAL;
411 			}
412 		}
413 
414 		/*
415 		 * Update superblock with new values
416 		 * and log changes
417 		 */
418 		if (xfs_sb_version_hasdalign(sbp)) {
419 			if (sbp->sb_unit != mp->m_dalign) {
420 				sbp->sb_unit = mp->m_dalign;
421 				mp->m_update_flags |= XFS_SB_UNIT;
422 			}
423 			if (sbp->sb_width != mp->m_swidth) {
424 				sbp->sb_width = mp->m_swidth;
425 				mp->m_update_flags |= XFS_SB_WIDTH;
426 			}
427 		} else {
428 			xfs_warn(mp,
429 	"cannot change alignment: superblock does not support data alignment");
430 			return -EINVAL;
431 		}
432 	} else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN &&
433 		    xfs_sb_version_hasdalign(&mp->m_sb)) {
434 			mp->m_dalign = sbp->sb_unit;
435 			mp->m_swidth = sbp->sb_width;
436 	}
437 
438 	return 0;
439 }
440 
441 /*
442  * Set the maximum inode count for this filesystem
443  */
444 STATIC void
445 xfs_set_maxicount(xfs_mount_t *mp)
446 {
447 	xfs_sb_t	*sbp = &(mp->m_sb);
448 	__uint64_t	icount;
449 
450 	if (sbp->sb_imax_pct) {
451 		/*
452 		 * Make sure the maximum inode count is a multiple
453 		 * of the units we allocate inodes in.
454 		 */
455 		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
456 		do_div(icount, 100);
457 		do_div(icount, mp->m_ialloc_blks);
458 		mp->m_maxicount = (icount * mp->m_ialloc_blks)  <<
459 				   sbp->sb_inopblog;
460 	} else {
461 		mp->m_maxicount = 0;
462 	}
463 }
464 
465 /*
466  * Set the default minimum read and write sizes unless
467  * already specified in a mount option.
468  * We use smaller I/O sizes when the file system
469  * is being used for NFS service (wsync mount option).
470  */
471 STATIC void
472 xfs_set_rw_sizes(xfs_mount_t *mp)
473 {
474 	xfs_sb_t	*sbp = &(mp->m_sb);
475 	int		readio_log, writeio_log;
476 
477 	if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) {
478 		if (mp->m_flags & XFS_MOUNT_WSYNC) {
479 			readio_log = XFS_WSYNC_READIO_LOG;
480 			writeio_log = XFS_WSYNC_WRITEIO_LOG;
481 		} else {
482 			readio_log = XFS_READIO_LOG_LARGE;
483 			writeio_log = XFS_WRITEIO_LOG_LARGE;
484 		}
485 	} else {
486 		readio_log = mp->m_readio_log;
487 		writeio_log = mp->m_writeio_log;
488 	}
489 
490 	if (sbp->sb_blocklog > readio_log) {
491 		mp->m_readio_log = sbp->sb_blocklog;
492 	} else {
493 		mp->m_readio_log = readio_log;
494 	}
495 	mp->m_readio_blocks = 1 << (mp->m_readio_log - sbp->sb_blocklog);
496 	if (sbp->sb_blocklog > writeio_log) {
497 		mp->m_writeio_log = sbp->sb_blocklog;
498 	} else {
499 		mp->m_writeio_log = writeio_log;
500 	}
501 	mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog);
502 }
503 
504 /*
505  * precalculate the low space thresholds for dynamic speculative preallocation.
506  */
507 void
508 xfs_set_low_space_thresholds(
509 	struct xfs_mount	*mp)
510 {
511 	int i;
512 
513 	for (i = 0; i < XFS_LOWSP_MAX; i++) {
514 		__uint64_t space = mp->m_sb.sb_dblocks;
515 
516 		do_div(space, 100);
517 		mp->m_low_space[i] = space * (i + 1);
518 	}
519 }
520 
521 
522 /*
523  * Set whether we're using inode alignment.
524  */
525 STATIC void
526 xfs_set_inoalignment(xfs_mount_t *mp)
527 {
528 	if (xfs_sb_version_hasalign(&mp->m_sb) &&
529 	    mp->m_sb.sb_inoalignmt >=
530 	    XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size))
531 		mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1;
532 	else
533 		mp->m_inoalign_mask = 0;
534 	/*
535 	 * If we are using stripe alignment, check whether
536 	 * the stripe unit is a multiple of the inode alignment
537 	 */
538 	if (mp->m_dalign && mp->m_inoalign_mask &&
539 	    !(mp->m_dalign & mp->m_inoalign_mask))
540 		mp->m_sinoalign = mp->m_dalign;
541 	else
542 		mp->m_sinoalign = 0;
543 }
544 
545 /*
546  * Check that the data (and log if separate) is an ok size.
547  */
548 STATIC int
549 xfs_check_sizes(xfs_mount_t *mp)
550 {
551 	xfs_buf_t	*bp;
552 	xfs_daddr_t	d;
553 
554 	d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
555 	if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) {
556 		xfs_warn(mp, "filesystem size mismatch detected");
557 		return -EFBIG;
558 	}
559 	bp = xfs_buf_read_uncached(mp->m_ddev_targp,
560 					d - XFS_FSS_TO_BB(mp, 1),
561 					XFS_FSS_TO_BB(mp, 1), 0, NULL);
562 	if (!bp) {
563 		xfs_warn(mp, "last sector read failed");
564 		return -EIO;
565 	}
566 	xfs_buf_relse(bp);
567 
568 	if (mp->m_logdev_targp != mp->m_ddev_targp) {
569 		d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
570 		if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
571 			xfs_warn(mp, "log size mismatch detected");
572 			return -EFBIG;
573 		}
574 		bp = xfs_buf_read_uncached(mp->m_logdev_targp,
575 					d - XFS_FSB_TO_BB(mp, 1),
576 					XFS_FSB_TO_BB(mp, 1), 0, NULL);
577 		if (!bp) {
578 			xfs_warn(mp, "log device read failed");
579 			return -EIO;
580 		}
581 		xfs_buf_relse(bp);
582 	}
583 	return 0;
584 }
585 
586 /*
587  * Clear the quotaflags in memory and in the superblock.
588  */
589 int
590 xfs_mount_reset_sbqflags(
591 	struct xfs_mount	*mp)
592 {
593 	int			error;
594 	struct xfs_trans	*tp;
595 
596 	mp->m_qflags = 0;
597 
598 	/*
599 	 * It is OK to look at sb_qflags here in mount path,
600 	 * without m_sb_lock.
601 	 */
602 	if (mp->m_sb.sb_qflags == 0)
603 		return 0;
604 	spin_lock(&mp->m_sb_lock);
605 	mp->m_sb.sb_qflags = 0;
606 	spin_unlock(&mp->m_sb_lock);
607 
608 	/*
609 	 * If the fs is readonly, let the incore superblock run
610 	 * with quotas off but don't flush the update out to disk
611 	 */
612 	if (mp->m_flags & XFS_MOUNT_RDONLY)
613 		return 0;
614 
615 	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
616 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0);
617 	if (error) {
618 		xfs_trans_cancel(tp, 0);
619 		xfs_alert(mp, "%s: Superblock update failed!", __func__);
620 		return error;
621 	}
622 
623 	xfs_mod_sb(tp, XFS_SB_QFLAGS);
624 	return xfs_trans_commit(tp, 0);
625 }
626 
627 __uint64_t
628 xfs_default_resblks(xfs_mount_t *mp)
629 {
630 	__uint64_t resblks;
631 
632 	/*
633 	 * We default to 5% or 8192 fsbs of space reserved, whichever is
634 	 * smaller.  This is intended to cover concurrent allocation
635 	 * transactions when we initially hit enospc. These each require a 4
636 	 * block reservation. Hence by default we cover roughly 2000 concurrent
637 	 * allocation reservations.
638 	 */
639 	resblks = mp->m_sb.sb_dblocks;
640 	do_div(resblks, 20);
641 	resblks = min_t(__uint64_t, resblks, 8192);
642 	return resblks;
643 }
644 
645 /*
646  * This function does the following on an initial mount of a file system:
647  *	- reads the superblock from disk and init the mount struct
648  *	- if we're a 32-bit kernel, do a size check on the superblock
649  *		so we don't mount terabyte filesystems
650  *	- init mount struct realtime fields
651  *	- allocate inode hash table for fs
652  *	- init directory manager
653  *	- perform recovery and init the log manager
654  */
655 int
656 xfs_mountfs(
657 	xfs_mount_t	*mp)
658 {
659 	xfs_sb_t	*sbp = &(mp->m_sb);
660 	xfs_inode_t	*rip;
661 	__uint64_t	resblks;
662 	uint		quotamount = 0;
663 	uint		quotaflags = 0;
664 	int		error = 0;
665 
666 	xfs_sb_mount_common(mp, sbp);
667 
668 	/*
669 	 * Check for a mismatched features2 values.  Older kernels
670 	 * read & wrote into the wrong sb offset for sb_features2
671 	 * on some platforms due to xfs_sb_t not being 64bit size aligned
672 	 * when sb_features2 was added, which made older superblock
673 	 * reading/writing routines swap it as a 64-bit value.
674 	 *
675 	 * For backwards compatibility, we make both slots equal.
676 	 *
677 	 * If we detect a mismatched field, we OR the set bits into the
678 	 * existing features2 field in case it has already been modified; we
679 	 * don't want to lose any features.  We then update the bad location
680 	 * with the ORed value so that older kernels will see any features2
681 	 * flags, and mark the two fields as needing updates once the
682 	 * transaction subsystem is online.
683 	 */
684 	if (xfs_sb_has_mismatched_features2(sbp)) {
685 		xfs_warn(mp, "correcting sb_features alignment problem");
686 		sbp->sb_features2 |= sbp->sb_bad_features2;
687 		sbp->sb_bad_features2 = sbp->sb_features2;
688 		mp->m_update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2;
689 
690 		/*
691 		 * Re-check for ATTR2 in case it was found in bad_features2
692 		 * slot.
693 		 */
694 		if (xfs_sb_version_hasattr2(&mp->m_sb) &&
695 		   !(mp->m_flags & XFS_MOUNT_NOATTR2))
696 			mp->m_flags |= XFS_MOUNT_ATTR2;
697 	}
698 
699 	if (xfs_sb_version_hasattr2(&mp->m_sb) &&
700 	   (mp->m_flags & XFS_MOUNT_NOATTR2)) {
701 		xfs_sb_version_removeattr2(&mp->m_sb);
702 		mp->m_update_flags |= XFS_SB_FEATURES2;
703 
704 		/* update sb_versionnum for the clearing of the morebits */
705 		if (!sbp->sb_features2)
706 			mp->m_update_flags |= XFS_SB_VERSIONNUM;
707 	}
708 
709 	/* always use v2 inodes by default now */
710 	if (!(mp->m_sb.sb_versionnum & XFS_SB_VERSION_NLINKBIT)) {
711 		mp->m_sb.sb_versionnum |= XFS_SB_VERSION_NLINKBIT;
712 		mp->m_update_flags |= XFS_SB_VERSIONNUM;
713 	}
714 
715 	/*
716 	 * Check if sb_agblocks is aligned at stripe boundary
717 	 * If sb_agblocks is NOT aligned turn off m_dalign since
718 	 * allocator alignment is within an ag, therefore ag has
719 	 * to be aligned at stripe boundary.
720 	 */
721 	error = xfs_update_alignment(mp);
722 	if (error)
723 		goto out;
724 
725 	xfs_alloc_compute_maxlevels(mp);
726 	xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK);
727 	xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK);
728 	xfs_ialloc_compute_maxlevels(mp);
729 
730 	xfs_set_maxicount(mp);
731 
732 	mp->m_kobj.kobject.kset = xfs_kset;
733 	error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname);
734 	if (error)
735 		goto out;
736 
737 	error = xfs_uuid_mount(mp);
738 	if (error)
739 		goto out_remove_sysfs;
740 
741 	/*
742 	 * Set the minimum read and write sizes
743 	 */
744 	xfs_set_rw_sizes(mp);
745 
746 	/* set the low space thresholds for dynamic preallocation */
747 	xfs_set_low_space_thresholds(mp);
748 
749 	/*
750 	 * Set the inode cluster size.
751 	 * This may still be overridden by the file system
752 	 * block size if it is larger than the chosen cluster size.
753 	 *
754 	 * For v5 filesystems, scale the cluster size with the inode size to
755 	 * keep a constant ratio of inode per cluster buffer, but only if mkfs
756 	 * has set the inode alignment value appropriately for larger cluster
757 	 * sizes.
758 	 */
759 	mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE;
760 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
761 		int	new_size = mp->m_inode_cluster_size;
762 
763 		new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE;
764 		if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size))
765 			mp->m_inode_cluster_size = new_size;
766 	}
767 
768 	/*
769 	 * Set inode alignment fields
770 	 */
771 	xfs_set_inoalignment(mp);
772 
773 	/*
774 	 * Check that the data (and log if separate) is an ok size.
775 	 */
776 	error = xfs_check_sizes(mp);
777 	if (error)
778 		goto out_remove_uuid;
779 
780 	/*
781 	 * Initialize realtime fields in the mount structure
782 	 */
783 	error = xfs_rtmount_init(mp);
784 	if (error) {
785 		xfs_warn(mp, "RT mount failed");
786 		goto out_remove_uuid;
787 	}
788 
789 	/*
790 	 *  Copies the low order bits of the timestamp and the randomly
791 	 *  set "sequence" number out of a UUID.
792 	 */
793 	uuid_getnodeuniq(&sbp->sb_uuid, mp->m_fixedfsid);
794 
795 	mp->m_dmevmask = 0;	/* not persistent; set after each mount */
796 
797 	error = xfs_da_mount(mp);
798 	if (error) {
799 		xfs_warn(mp, "Failed dir/attr init: %d", error);
800 		goto out_remove_uuid;
801 	}
802 
803 	/*
804 	 * Initialize the precomputed transaction reservations values.
805 	 */
806 	xfs_trans_init(mp);
807 
808 	/*
809 	 * Allocate and initialize the per-ag data.
810 	 */
811 	spin_lock_init(&mp->m_perag_lock);
812 	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
813 	error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
814 	if (error) {
815 		xfs_warn(mp, "Failed per-ag init: %d", error);
816 		goto out_free_dir;
817 	}
818 
819 	if (!sbp->sb_logblocks) {
820 		xfs_warn(mp, "no log defined");
821 		XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW, mp);
822 		error = -EFSCORRUPTED;
823 		goto out_free_perag;
824 	}
825 
826 	/*
827 	 * log's mount-time initialization. Perform 1st part recovery if needed
828 	 */
829 	error = xfs_log_mount(mp, mp->m_logdev_targp,
830 			      XFS_FSB_TO_DADDR(mp, sbp->sb_logstart),
831 			      XFS_FSB_TO_BB(mp, sbp->sb_logblocks));
832 	if (error) {
833 		xfs_warn(mp, "log mount failed");
834 		goto out_fail_wait;
835 	}
836 
837 	/*
838 	 * Now the log is mounted, we know if it was an unclean shutdown or
839 	 * not. If it was, with the first phase of recovery has completed, we
840 	 * have consistent AG blocks on disk. We have not recovered EFIs yet,
841 	 * but they are recovered transactionally in the second recovery phase
842 	 * later.
843 	 *
844 	 * Hence we can safely re-initialise incore superblock counters from
845 	 * the per-ag data. These may not be correct if the filesystem was not
846 	 * cleanly unmounted, so we need to wait for recovery to finish before
847 	 * doing this.
848 	 *
849 	 * If the filesystem was cleanly unmounted, then we can trust the
850 	 * values in the superblock to be correct and we don't need to do
851 	 * anything here.
852 	 *
853 	 * If we are currently making the filesystem, the initialisation will
854 	 * fail as the perag data is in an undefined state.
855 	 */
856 	if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
857 	    !XFS_LAST_UNMOUNT_WAS_CLEAN(mp) &&
858 	     !mp->m_sb.sb_inprogress) {
859 		error = xfs_initialize_perag_data(mp, sbp->sb_agcount);
860 		if (error)
861 			goto out_log_dealloc;
862 	}
863 
864 	/*
865 	 * Get and sanity-check the root inode.
866 	 * Save the pointer to it in the mount structure.
867 	 */
868 	error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip);
869 	if (error) {
870 		xfs_warn(mp, "failed to read root inode");
871 		goto out_log_dealloc;
872 	}
873 
874 	ASSERT(rip != NULL);
875 
876 	if (unlikely(!S_ISDIR(rip->i_d.di_mode))) {
877 		xfs_warn(mp, "corrupted root inode %llu: not a directory",
878 			(unsigned long long)rip->i_ino);
879 		xfs_iunlock(rip, XFS_ILOCK_EXCL);
880 		XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW,
881 				 mp);
882 		error = -EFSCORRUPTED;
883 		goto out_rele_rip;
884 	}
885 	mp->m_rootip = rip;	/* save it */
886 
887 	xfs_iunlock(rip, XFS_ILOCK_EXCL);
888 
889 	/*
890 	 * Initialize realtime inode pointers in the mount structure
891 	 */
892 	error = xfs_rtmount_inodes(mp);
893 	if (error) {
894 		/*
895 		 * Free up the root inode.
896 		 */
897 		xfs_warn(mp, "failed to read RT inodes");
898 		goto out_rele_rip;
899 	}
900 
901 	/*
902 	 * If this is a read-only mount defer the superblock updates until
903 	 * the next remount into writeable mode.  Otherwise we would never
904 	 * perform the update e.g. for the root filesystem.
905 	 */
906 	if (mp->m_update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
907 		error = xfs_mount_log_sb(mp, mp->m_update_flags);
908 		if (error) {
909 			xfs_warn(mp, "failed to write sb changes");
910 			goto out_rtunmount;
911 		}
912 	}
913 
914 	/*
915 	 * Initialise the XFS quota management subsystem for this mount
916 	 */
917 	if (XFS_IS_QUOTA_RUNNING(mp)) {
918 		error = xfs_qm_newmount(mp, &quotamount, &quotaflags);
919 		if (error)
920 			goto out_rtunmount;
921 	} else {
922 		ASSERT(!XFS_IS_QUOTA_ON(mp));
923 
924 		/*
925 		 * If a file system had quotas running earlier, but decided to
926 		 * mount without -o uquota/pquota/gquota options, revoke the
927 		 * quotachecked license.
928 		 */
929 		if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) {
930 			xfs_notice(mp, "resetting quota flags");
931 			error = xfs_mount_reset_sbqflags(mp);
932 			if (error)
933 				goto out_rtunmount;
934 		}
935 	}
936 
937 	/*
938 	 * Finish recovering the file system.  This part needed to be
939 	 * delayed until after the root and real-time bitmap inodes
940 	 * were consistently read in.
941 	 */
942 	error = xfs_log_mount_finish(mp);
943 	if (error) {
944 		xfs_warn(mp, "log mount finish failed");
945 		goto out_rtunmount;
946 	}
947 
948 	/*
949 	 * Complete the quota initialisation, post-log-replay component.
950 	 */
951 	if (quotamount) {
952 		ASSERT(mp->m_qflags == 0);
953 		mp->m_qflags = quotaflags;
954 
955 		xfs_qm_mount_quotas(mp);
956 	}
957 
958 	/*
959 	 * Now we are mounted, reserve a small amount of unused space for
960 	 * privileged transactions. This is needed so that transaction
961 	 * space required for critical operations can dip into this pool
962 	 * when at ENOSPC. This is needed for operations like create with
963 	 * attr, unwritten extent conversion at ENOSPC, etc. Data allocations
964 	 * are not allowed to use this reserved space.
965 	 *
966 	 * This may drive us straight to ENOSPC on mount, but that implies
967 	 * we were already there on the last unmount. Warn if this occurs.
968 	 */
969 	if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
970 		resblks = xfs_default_resblks(mp);
971 		error = xfs_reserve_blocks(mp, &resblks, NULL);
972 		if (error)
973 			xfs_warn(mp,
974 	"Unable to allocate reserve blocks. Continuing without reserve pool.");
975 	}
976 
977 	return 0;
978 
979  out_rtunmount:
980 	xfs_rtunmount_inodes(mp);
981  out_rele_rip:
982 	IRELE(rip);
983  out_log_dealloc:
984 	xfs_log_unmount(mp);
985  out_fail_wait:
986 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
987 		xfs_wait_buftarg(mp->m_logdev_targp);
988 	xfs_wait_buftarg(mp->m_ddev_targp);
989  out_free_perag:
990 	xfs_free_perag(mp);
991  out_free_dir:
992 	xfs_da_unmount(mp);
993  out_remove_uuid:
994 	xfs_uuid_unmount(mp);
995  out_remove_sysfs:
996 	xfs_sysfs_del(&mp->m_kobj);
997  out:
998 	return error;
999 }
1000 
1001 /*
1002  * This flushes out the inodes,dquots and the superblock, unmounts the
1003  * log and makes sure that incore structures are freed.
1004  */
1005 void
1006 xfs_unmountfs(
1007 	struct xfs_mount	*mp)
1008 {
1009 	__uint64_t		resblks;
1010 	int			error;
1011 
1012 	cancel_delayed_work_sync(&mp->m_eofblocks_work);
1013 
1014 	xfs_qm_unmount_quotas(mp);
1015 	xfs_rtunmount_inodes(mp);
1016 	IRELE(mp->m_rootip);
1017 
1018 	/*
1019 	 * We can potentially deadlock here if we have an inode cluster
1020 	 * that has been freed has its buffer still pinned in memory because
1021 	 * the transaction is still sitting in a iclog. The stale inodes
1022 	 * on that buffer will have their flush locks held until the
1023 	 * transaction hits the disk and the callbacks run. the inode
1024 	 * flush takes the flush lock unconditionally and with nothing to
1025 	 * push out the iclog we will never get that unlocked. hence we
1026 	 * need to force the log first.
1027 	 */
1028 	xfs_log_force(mp, XFS_LOG_SYNC);
1029 
1030 	/*
1031 	 * Flush all pending changes from the AIL.
1032 	 */
1033 	xfs_ail_push_all_sync(mp->m_ail);
1034 
1035 	/*
1036 	 * And reclaim all inodes.  At this point there should be no dirty
1037 	 * inodes and none should be pinned or locked, but use synchronous
1038 	 * reclaim just to be sure. We can stop background inode reclaim
1039 	 * here as well if it is still running.
1040 	 */
1041 	cancel_delayed_work_sync(&mp->m_reclaim_work);
1042 	xfs_reclaim_inodes(mp, SYNC_WAIT);
1043 
1044 	xfs_qm_unmount(mp);
1045 
1046 	/*
1047 	 * Unreserve any blocks we have so that when we unmount we don't account
1048 	 * the reserved free space as used. This is really only necessary for
1049 	 * lazy superblock counting because it trusts the incore superblock
1050 	 * counters to be absolutely correct on clean unmount.
1051 	 *
1052 	 * We don't bother correcting this elsewhere for lazy superblock
1053 	 * counting because on mount of an unclean filesystem we reconstruct the
1054 	 * correct counter value and this is irrelevant.
1055 	 *
1056 	 * For non-lazy counter filesystems, this doesn't matter at all because
1057 	 * we only every apply deltas to the superblock and hence the incore
1058 	 * value does not matter....
1059 	 */
1060 	resblks = 0;
1061 	error = xfs_reserve_blocks(mp, &resblks, NULL);
1062 	if (error)
1063 		xfs_warn(mp, "Unable to free reserved block pool. "
1064 				"Freespace may not be correct on next mount.");
1065 
1066 	error = xfs_log_sbcount(mp);
1067 	if (error)
1068 		xfs_warn(mp, "Unable to update superblock counters. "
1069 				"Freespace may not be correct on next mount.");
1070 
1071 	xfs_log_unmount(mp);
1072 	xfs_da_unmount(mp);
1073 	xfs_uuid_unmount(mp);
1074 
1075 #if defined(DEBUG)
1076 	xfs_errortag_clearall(mp, 0);
1077 #endif
1078 	xfs_free_perag(mp);
1079 
1080 	xfs_sysfs_del(&mp->m_kobj);
1081 }
1082 
1083 int
1084 xfs_fs_writable(xfs_mount_t *mp)
1085 {
1086 	return !(mp->m_super->s_writers.frozen || XFS_FORCED_SHUTDOWN(mp) ||
1087 		(mp->m_flags & XFS_MOUNT_RDONLY));
1088 }
1089 
1090 /*
1091  * xfs_log_sbcount
1092  *
1093  * Sync the superblock counters to disk.
1094  *
1095  * Note this code can be called during the process of freezing, so
1096  * we may need to use the transaction allocator which does not
1097  * block when the transaction subsystem is in its frozen state.
1098  */
1099 int
1100 xfs_log_sbcount(xfs_mount_t *mp)
1101 {
1102 	xfs_trans_t	*tp;
1103 	int		error;
1104 
1105 	if (!xfs_fs_writable(mp))
1106 		return 0;
1107 
1108 	xfs_icsb_sync_counters(mp, 0);
1109 
1110 	/*
1111 	 * we don't need to do this if we are updating the superblock
1112 	 * counters on every modification.
1113 	 */
1114 	if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
1115 		return 0;
1116 
1117 	tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP);
1118 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
1119 	if (error) {
1120 		xfs_trans_cancel(tp, 0);
1121 		return error;
1122 	}
1123 
1124 	xfs_mod_sb(tp, XFS_SB_IFREE | XFS_SB_ICOUNT | XFS_SB_FDBLOCKS);
1125 	xfs_trans_set_sync(tp);
1126 	error = xfs_trans_commit(tp, 0);
1127 	return error;
1128 }
1129 
1130 /*
1131  * xfs_mod_incore_sb_unlocked() is a utility routine commonly used to apply
1132  * a delta to a specified field in the in-core superblock.  Simply
1133  * switch on the field indicated and apply the delta to that field.
1134  * Fields are not allowed to dip below zero, so if the delta would
1135  * do this do not apply it and return EINVAL.
1136  *
1137  * The m_sb_lock must be held when this routine is called.
1138  */
1139 STATIC int
1140 xfs_mod_incore_sb_unlocked(
1141 	xfs_mount_t	*mp,
1142 	xfs_sb_field_t	field,
1143 	int64_t		delta,
1144 	int		rsvd)
1145 {
1146 	int		scounter;	/* short counter for 32 bit fields */
1147 	long long	lcounter;	/* long counter for 64 bit fields */
1148 	long long	res_used, rem;
1149 
1150 	/*
1151 	 * With the in-core superblock spin lock held, switch
1152 	 * on the indicated field.  Apply the delta to the
1153 	 * proper field.  If the fields value would dip below
1154 	 * 0, then do not apply the delta and return EINVAL.
1155 	 */
1156 	switch (field) {
1157 	case XFS_SBS_ICOUNT:
1158 		lcounter = (long long)mp->m_sb.sb_icount;
1159 		lcounter += delta;
1160 		if (lcounter < 0) {
1161 			ASSERT(0);
1162 			return -EINVAL;
1163 		}
1164 		mp->m_sb.sb_icount = lcounter;
1165 		return 0;
1166 	case XFS_SBS_IFREE:
1167 		lcounter = (long long)mp->m_sb.sb_ifree;
1168 		lcounter += delta;
1169 		if (lcounter < 0) {
1170 			ASSERT(0);
1171 			return -EINVAL;
1172 		}
1173 		mp->m_sb.sb_ifree = lcounter;
1174 		return 0;
1175 	case XFS_SBS_FDBLOCKS:
1176 		lcounter = (long long)
1177 			mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1178 		res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
1179 
1180 		if (delta > 0) {		/* Putting blocks back */
1181 			if (res_used > delta) {
1182 				mp->m_resblks_avail += delta;
1183 			} else {
1184 				rem = delta - res_used;
1185 				mp->m_resblks_avail = mp->m_resblks;
1186 				lcounter += rem;
1187 			}
1188 		} else {				/* Taking blocks away */
1189 			lcounter += delta;
1190 			if (lcounter >= 0) {
1191 				mp->m_sb.sb_fdblocks = lcounter +
1192 							XFS_ALLOC_SET_ASIDE(mp);
1193 				return 0;
1194 			}
1195 
1196 			/*
1197 			 * We are out of blocks, use any available reserved
1198 			 * blocks if were allowed to.
1199 			 */
1200 			if (!rsvd)
1201 				return -ENOSPC;
1202 
1203 			lcounter = (long long)mp->m_resblks_avail + delta;
1204 			if (lcounter >= 0) {
1205 				mp->m_resblks_avail = lcounter;
1206 				return 0;
1207 			}
1208 			printk_once(KERN_WARNING
1209 				"Filesystem \"%s\": reserve blocks depleted! "
1210 				"Consider increasing reserve pool size.",
1211 				mp->m_fsname);
1212 			return -ENOSPC;
1213 		}
1214 
1215 		mp->m_sb.sb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp);
1216 		return 0;
1217 	case XFS_SBS_FREXTENTS:
1218 		lcounter = (long long)mp->m_sb.sb_frextents;
1219 		lcounter += delta;
1220 		if (lcounter < 0) {
1221 			return -ENOSPC;
1222 		}
1223 		mp->m_sb.sb_frextents = lcounter;
1224 		return 0;
1225 	case XFS_SBS_DBLOCKS:
1226 		lcounter = (long long)mp->m_sb.sb_dblocks;
1227 		lcounter += delta;
1228 		if (lcounter < 0) {
1229 			ASSERT(0);
1230 			return -EINVAL;
1231 		}
1232 		mp->m_sb.sb_dblocks = lcounter;
1233 		return 0;
1234 	case XFS_SBS_AGCOUNT:
1235 		scounter = mp->m_sb.sb_agcount;
1236 		scounter += delta;
1237 		if (scounter < 0) {
1238 			ASSERT(0);
1239 			return -EINVAL;
1240 		}
1241 		mp->m_sb.sb_agcount = scounter;
1242 		return 0;
1243 	case XFS_SBS_IMAX_PCT:
1244 		scounter = mp->m_sb.sb_imax_pct;
1245 		scounter += delta;
1246 		if (scounter < 0) {
1247 			ASSERT(0);
1248 			return -EINVAL;
1249 		}
1250 		mp->m_sb.sb_imax_pct = scounter;
1251 		return 0;
1252 	case XFS_SBS_REXTSIZE:
1253 		scounter = mp->m_sb.sb_rextsize;
1254 		scounter += delta;
1255 		if (scounter < 0) {
1256 			ASSERT(0);
1257 			return -EINVAL;
1258 		}
1259 		mp->m_sb.sb_rextsize = scounter;
1260 		return 0;
1261 	case XFS_SBS_RBMBLOCKS:
1262 		scounter = mp->m_sb.sb_rbmblocks;
1263 		scounter += delta;
1264 		if (scounter < 0) {
1265 			ASSERT(0);
1266 			return -EINVAL;
1267 		}
1268 		mp->m_sb.sb_rbmblocks = scounter;
1269 		return 0;
1270 	case XFS_SBS_RBLOCKS:
1271 		lcounter = (long long)mp->m_sb.sb_rblocks;
1272 		lcounter += delta;
1273 		if (lcounter < 0) {
1274 			ASSERT(0);
1275 			return -EINVAL;
1276 		}
1277 		mp->m_sb.sb_rblocks = lcounter;
1278 		return 0;
1279 	case XFS_SBS_REXTENTS:
1280 		lcounter = (long long)mp->m_sb.sb_rextents;
1281 		lcounter += delta;
1282 		if (lcounter < 0) {
1283 			ASSERT(0);
1284 			return -EINVAL;
1285 		}
1286 		mp->m_sb.sb_rextents = lcounter;
1287 		return 0;
1288 	case XFS_SBS_REXTSLOG:
1289 		scounter = mp->m_sb.sb_rextslog;
1290 		scounter += delta;
1291 		if (scounter < 0) {
1292 			ASSERT(0);
1293 			return -EINVAL;
1294 		}
1295 		mp->m_sb.sb_rextslog = scounter;
1296 		return 0;
1297 	default:
1298 		ASSERT(0);
1299 		return -EINVAL;
1300 	}
1301 }
1302 
1303 /*
1304  * xfs_mod_incore_sb() is used to change a field in the in-core
1305  * superblock structure by the specified delta.  This modification
1306  * is protected by the m_sb_lock.  Just use the xfs_mod_incore_sb_unlocked()
1307  * routine to do the work.
1308  */
1309 int
1310 xfs_mod_incore_sb(
1311 	struct xfs_mount	*mp,
1312 	xfs_sb_field_t		field,
1313 	int64_t			delta,
1314 	int			rsvd)
1315 {
1316 	int			status;
1317 
1318 #ifdef HAVE_PERCPU_SB
1319 	ASSERT(field < XFS_SBS_ICOUNT || field > XFS_SBS_FDBLOCKS);
1320 #endif
1321 	spin_lock(&mp->m_sb_lock);
1322 	status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
1323 	spin_unlock(&mp->m_sb_lock);
1324 
1325 	return status;
1326 }
1327 
1328 /*
1329  * Change more than one field in the in-core superblock structure at a time.
1330  *
1331  * The fields and changes to those fields are specified in the array of
1332  * xfs_mod_sb structures passed in.  Either all of the specified deltas
1333  * will be applied or none of them will.  If any modified field dips below 0,
1334  * then all modifications will be backed out and EINVAL will be returned.
1335  *
1336  * Note that this function may not be used for the superblock values that
1337  * are tracked with the in-memory per-cpu counters - a direct call to
1338  * xfs_icsb_modify_counters is required for these.
1339  */
1340 int
1341 xfs_mod_incore_sb_batch(
1342 	struct xfs_mount	*mp,
1343 	xfs_mod_sb_t		*msb,
1344 	uint			nmsb,
1345 	int			rsvd)
1346 {
1347 	xfs_mod_sb_t		*msbp;
1348 	int			error = 0;
1349 
1350 	/*
1351 	 * Loop through the array of mod structures and apply each individually.
1352 	 * If any fail, then back out all those which have already been applied.
1353 	 * Do all of this within the scope of the m_sb_lock so that all of the
1354 	 * changes will be atomic.
1355 	 */
1356 	spin_lock(&mp->m_sb_lock);
1357 	for (msbp = msb; msbp < (msb + nmsb); msbp++) {
1358 		ASSERT(msbp->msb_field < XFS_SBS_ICOUNT ||
1359 		       msbp->msb_field > XFS_SBS_FDBLOCKS);
1360 
1361 		error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field,
1362 						   msbp->msb_delta, rsvd);
1363 		if (error)
1364 			goto unwind;
1365 	}
1366 	spin_unlock(&mp->m_sb_lock);
1367 	return 0;
1368 
1369 unwind:
1370 	while (--msbp >= msb) {
1371 		error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field,
1372 						   -msbp->msb_delta, rsvd);
1373 		ASSERT(error == 0);
1374 	}
1375 	spin_unlock(&mp->m_sb_lock);
1376 	return error;
1377 }
1378 
1379 /*
1380  * xfs_getsb() is called to obtain the buffer for the superblock.
1381  * The buffer is returned locked and read in from disk.
1382  * The buffer should be released with a call to xfs_brelse().
1383  *
1384  * If the flags parameter is BUF_TRYLOCK, then we'll only return
1385  * the superblock buffer if it can be locked without sleeping.
1386  * If it can't then we'll return NULL.
1387  */
1388 struct xfs_buf *
1389 xfs_getsb(
1390 	struct xfs_mount	*mp,
1391 	int			flags)
1392 {
1393 	struct xfs_buf		*bp = mp->m_sb_bp;
1394 
1395 	if (!xfs_buf_trylock(bp)) {
1396 		if (flags & XBF_TRYLOCK)
1397 			return NULL;
1398 		xfs_buf_lock(bp);
1399 	}
1400 
1401 	xfs_buf_hold(bp);
1402 	ASSERT(XFS_BUF_ISDONE(bp));
1403 	return bp;
1404 }
1405 
1406 /*
1407  * Used to free the superblock along various error paths.
1408  */
1409 void
1410 xfs_freesb(
1411 	struct xfs_mount	*mp)
1412 {
1413 	struct xfs_buf		*bp = mp->m_sb_bp;
1414 
1415 	xfs_buf_lock(bp);
1416 	mp->m_sb_bp = NULL;
1417 	xfs_buf_relse(bp);
1418 }
1419 
1420 /*
1421  * Used to log changes to the superblock unit and width fields which could
1422  * be altered by the mount options, as well as any potential sb_features2
1423  * fixup. Only the first superblock is updated.
1424  */
1425 int
1426 xfs_mount_log_sb(
1427 	xfs_mount_t	*mp,
1428 	__int64_t	fields)
1429 {
1430 	xfs_trans_t	*tp;
1431 	int		error;
1432 
1433 	ASSERT(fields & (XFS_SB_UNIT | XFS_SB_WIDTH | XFS_SB_UUID |
1434 			 XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2 |
1435 			 XFS_SB_VERSIONNUM));
1436 
1437 	tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT);
1438 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
1439 	if (error) {
1440 		xfs_trans_cancel(tp, 0);
1441 		return error;
1442 	}
1443 	xfs_mod_sb(tp, fields);
1444 	error = xfs_trans_commit(tp, 0);
1445 	return error;
1446 }
1447 
1448 /*
1449  * If the underlying (data/log/rt) device is readonly, there are some
1450  * operations that cannot proceed.
1451  */
1452 int
1453 xfs_dev_is_read_only(
1454 	struct xfs_mount	*mp,
1455 	char			*message)
1456 {
1457 	if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
1458 	    xfs_readonly_buftarg(mp->m_logdev_targp) ||
1459 	    (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
1460 		xfs_notice(mp, "%s required on read-only device.", message);
1461 		xfs_notice(mp, "write access unavailable, cannot proceed.");
1462 		return -EROFS;
1463 	}
1464 	return 0;
1465 }
1466 
1467 #ifdef HAVE_PERCPU_SB
1468 /*
1469  * Per-cpu incore superblock counters
1470  *
1471  * Simple concept, difficult implementation
1472  *
1473  * Basically, replace the incore superblock counters with a distributed per cpu
1474  * counter for contended fields (e.g.  free block count).
1475  *
1476  * Difficulties arise in that the incore sb is used for ENOSPC checking, and
1477  * hence needs to be accurately read when we are running low on space. Hence
1478  * there is a method to enable and disable the per-cpu counters based on how
1479  * much "stuff" is available in them.
1480  *
1481  * Basically, a counter is enabled if there is enough free resource to justify
1482  * running a per-cpu fast-path. If the per-cpu counter runs out (i.e. a local
1483  * ENOSPC), then we disable the counters to synchronise all callers and
1484  * re-distribute the available resources.
1485  *
1486  * If, once we redistributed the available resources, we still get a failure,
1487  * we disable the per-cpu counter and go through the slow path.
1488  *
1489  * The slow path is the current xfs_mod_incore_sb() function.  This means that
1490  * when we disable a per-cpu counter, we need to drain its resources back to
1491  * the global superblock. We do this after disabling the counter to prevent
1492  * more threads from queueing up on the counter.
1493  *
1494  * Essentially, this means that we still need a lock in the fast path to enable
1495  * synchronisation between the global counters and the per-cpu counters. This
1496  * is not a problem because the lock will be local to a CPU almost all the time
1497  * and have little contention except when we get to ENOSPC conditions.
1498  *
1499  * Basically, this lock becomes a barrier that enables us to lock out the fast
1500  * path while we do things like enabling and disabling counters and
1501  * synchronising the counters.
1502  *
1503  * Locking rules:
1504  *
1505  * 	1. m_sb_lock before picking up per-cpu locks
1506  * 	2. per-cpu locks always picked up via for_each_online_cpu() order
1507  * 	3. accurate counter sync requires m_sb_lock + per cpu locks
1508  * 	4. modifying per-cpu counters requires holding per-cpu lock
1509  * 	5. modifying global counters requires holding m_sb_lock
1510  *	6. enabling or disabling a counter requires holding the m_sb_lock
1511  *	   and _none_ of the per-cpu locks.
1512  *
1513  * Disabled counters are only ever re-enabled by a balance operation
1514  * that results in more free resources per CPU than a given threshold.
1515  * To ensure counters don't remain disabled, they are rebalanced when
1516  * the global resource goes above a higher threshold (i.e. some hysteresis
1517  * is present to prevent thrashing).
1518  */
1519 
1520 #ifdef CONFIG_HOTPLUG_CPU
1521 /*
1522  * hot-plug CPU notifier support.
1523  *
1524  * We need a notifier per filesystem as we need to be able to identify
1525  * the filesystem to balance the counters out. This is achieved by
1526  * having a notifier block embedded in the xfs_mount_t and doing pointer
1527  * magic to get the mount pointer from the notifier block address.
1528  */
1529 STATIC int
1530 xfs_icsb_cpu_notify(
1531 	struct notifier_block *nfb,
1532 	unsigned long action,
1533 	void *hcpu)
1534 {
1535 	xfs_icsb_cnts_t *cntp;
1536 	xfs_mount_t	*mp;
1537 
1538 	mp = (xfs_mount_t *)container_of(nfb, xfs_mount_t, m_icsb_notifier);
1539 	cntp = (xfs_icsb_cnts_t *)
1540 			per_cpu_ptr(mp->m_sb_cnts, (unsigned long)hcpu);
1541 	switch (action) {
1542 	case CPU_UP_PREPARE:
1543 	case CPU_UP_PREPARE_FROZEN:
1544 		/* Easy Case - initialize the area and locks, and
1545 		 * then rebalance when online does everything else for us. */
1546 		memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
1547 		break;
1548 	case CPU_ONLINE:
1549 	case CPU_ONLINE_FROZEN:
1550 		xfs_icsb_lock(mp);
1551 		xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
1552 		xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
1553 		xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
1554 		xfs_icsb_unlock(mp);
1555 		break;
1556 	case CPU_DEAD:
1557 	case CPU_DEAD_FROZEN:
1558 		/* Disable all the counters, then fold the dead cpu's
1559 		 * count into the total on the global superblock and
1560 		 * re-enable the counters. */
1561 		xfs_icsb_lock(mp);
1562 		spin_lock(&mp->m_sb_lock);
1563 		xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT);
1564 		xfs_icsb_disable_counter(mp, XFS_SBS_IFREE);
1565 		xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS);
1566 
1567 		mp->m_sb.sb_icount += cntp->icsb_icount;
1568 		mp->m_sb.sb_ifree += cntp->icsb_ifree;
1569 		mp->m_sb.sb_fdblocks += cntp->icsb_fdblocks;
1570 
1571 		memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
1572 
1573 		xfs_icsb_balance_counter_locked(mp, XFS_SBS_ICOUNT, 0);
1574 		xfs_icsb_balance_counter_locked(mp, XFS_SBS_IFREE, 0);
1575 		xfs_icsb_balance_counter_locked(mp, XFS_SBS_FDBLOCKS, 0);
1576 		spin_unlock(&mp->m_sb_lock);
1577 		xfs_icsb_unlock(mp);
1578 		break;
1579 	}
1580 
1581 	return NOTIFY_OK;
1582 }
1583 #endif /* CONFIG_HOTPLUG_CPU */
1584 
1585 int
1586 xfs_icsb_init_counters(
1587 	xfs_mount_t	*mp)
1588 {
1589 	xfs_icsb_cnts_t *cntp;
1590 	int		i;
1591 
1592 	mp->m_sb_cnts = alloc_percpu(xfs_icsb_cnts_t);
1593 	if (mp->m_sb_cnts == NULL)
1594 		return -ENOMEM;
1595 
1596 	for_each_online_cpu(i) {
1597 		cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
1598 		memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
1599 	}
1600 
1601 	mutex_init(&mp->m_icsb_mutex);
1602 
1603 	/*
1604 	 * start with all counters disabled so that the
1605 	 * initial balance kicks us off correctly
1606 	 */
1607 	mp->m_icsb_counters = -1;
1608 
1609 #ifdef CONFIG_HOTPLUG_CPU
1610 	mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify;
1611 	mp->m_icsb_notifier.priority = 0;
1612 	register_hotcpu_notifier(&mp->m_icsb_notifier);
1613 #endif /* CONFIG_HOTPLUG_CPU */
1614 
1615 	return 0;
1616 }
1617 
1618 void
1619 xfs_icsb_reinit_counters(
1620 	xfs_mount_t	*mp)
1621 {
1622 	xfs_icsb_lock(mp);
1623 	/*
1624 	 * start with all counters disabled so that the
1625 	 * initial balance kicks us off correctly
1626 	 */
1627 	mp->m_icsb_counters = -1;
1628 	xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
1629 	xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
1630 	xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
1631 	xfs_icsb_unlock(mp);
1632 }
1633 
1634 void
1635 xfs_icsb_destroy_counters(
1636 	xfs_mount_t	*mp)
1637 {
1638 	if (mp->m_sb_cnts) {
1639 		unregister_hotcpu_notifier(&mp->m_icsb_notifier);
1640 		free_percpu(mp->m_sb_cnts);
1641 	}
1642 	mutex_destroy(&mp->m_icsb_mutex);
1643 }
1644 
1645 STATIC void
1646 xfs_icsb_lock_cntr(
1647 	xfs_icsb_cnts_t	*icsbp)
1648 {
1649 	while (test_and_set_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags)) {
1650 		ndelay(1000);
1651 	}
1652 }
1653 
1654 STATIC void
1655 xfs_icsb_unlock_cntr(
1656 	xfs_icsb_cnts_t	*icsbp)
1657 {
1658 	clear_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags);
1659 }
1660 
1661 
1662 STATIC void
1663 xfs_icsb_lock_all_counters(
1664 	xfs_mount_t	*mp)
1665 {
1666 	xfs_icsb_cnts_t *cntp;
1667 	int		i;
1668 
1669 	for_each_online_cpu(i) {
1670 		cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
1671 		xfs_icsb_lock_cntr(cntp);
1672 	}
1673 }
1674 
1675 STATIC void
1676 xfs_icsb_unlock_all_counters(
1677 	xfs_mount_t	*mp)
1678 {
1679 	xfs_icsb_cnts_t *cntp;
1680 	int		i;
1681 
1682 	for_each_online_cpu(i) {
1683 		cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
1684 		xfs_icsb_unlock_cntr(cntp);
1685 	}
1686 }
1687 
1688 STATIC void
1689 xfs_icsb_count(
1690 	xfs_mount_t	*mp,
1691 	xfs_icsb_cnts_t	*cnt,
1692 	int		flags)
1693 {
1694 	xfs_icsb_cnts_t *cntp;
1695 	int		i;
1696 
1697 	memset(cnt, 0, sizeof(xfs_icsb_cnts_t));
1698 
1699 	if (!(flags & XFS_ICSB_LAZY_COUNT))
1700 		xfs_icsb_lock_all_counters(mp);
1701 
1702 	for_each_online_cpu(i) {
1703 		cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
1704 		cnt->icsb_icount += cntp->icsb_icount;
1705 		cnt->icsb_ifree += cntp->icsb_ifree;
1706 		cnt->icsb_fdblocks += cntp->icsb_fdblocks;
1707 	}
1708 
1709 	if (!(flags & XFS_ICSB_LAZY_COUNT))
1710 		xfs_icsb_unlock_all_counters(mp);
1711 }
1712 
1713 STATIC int
1714 xfs_icsb_counter_disabled(
1715 	xfs_mount_t	*mp,
1716 	xfs_sb_field_t	field)
1717 {
1718 	ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
1719 	return test_bit(field, &mp->m_icsb_counters);
1720 }
1721 
1722 STATIC void
1723 xfs_icsb_disable_counter(
1724 	xfs_mount_t	*mp,
1725 	xfs_sb_field_t	field)
1726 {
1727 	xfs_icsb_cnts_t	cnt;
1728 
1729 	ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
1730 
1731 	/*
1732 	 * If we are already disabled, then there is nothing to do
1733 	 * here. We check before locking all the counters to avoid
1734 	 * the expensive lock operation when being called in the
1735 	 * slow path and the counter is already disabled. This is
1736 	 * safe because the only time we set or clear this state is under
1737 	 * the m_icsb_mutex.
1738 	 */
1739 	if (xfs_icsb_counter_disabled(mp, field))
1740 		return;
1741 
1742 	xfs_icsb_lock_all_counters(mp);
1743 	if (!test_and_set_bit(field, &mp->m_icsb_counters)) {
1744 		/* drain back to superblock */
1745 
1746 		xfs_icsb_count(mp, &cnt, XFS_ICSB_LAZY_COUNT);
1747 		switch(field) {
1748 		case XFS_SBS_ICOUNT:
1749 			mp->m_sb.sb_icount = cnt.icsb_icount;
1750 			break;
1751 		case XFS_SBS_IFREE:
1752 			mp->m_sb.sb_ifree = cnt.icsb_ifree;
1753 			break;
1754 		case XFS_SBS_FDBLOCKS:
1755 			mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
1756 			break;
1757 		default:
1758 			BUG();
1759 		}
1760 	}
1761 
1762 	xfs_icsb_unlock_all_counters(mp);
1763 }
1764 
1765 STATIC void
1766 xfs_icsb_enable_counter(
1767 	xfs_mount_t	*mp,
1768 	xfs_sb_field_t	field,
1769 	uint64_t	count,
1770 	uint64_t	resid)
1771 {
1772 	xfs_icsb_cnts_t	*cntp;
1773 	int		i;
1774 
1775 	ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
1776 
1777 	xfs_icsb_lock_all_counters(mp);
1778 	for_each_online_cpu(i) {
1779 		cntp = per_cpu_ptr(mp->m_sb_cnts, i);
1780 		switch (field) {
1781 		case XFS_SBS_ICOUNT:
1782 			cntp->icsb_icount = count + resid;
1783 			break;
1784 		case XFS_SBS_IFREE:
1785 			cntp->icsb_ifree = count + resid;
1786 			break;
1787 		case XFS_SBS_FDBLOCKS:
1788 			cntp->icsb_fdblocks = count + resid;
1789 			break;
1790 		default:
1791 			BUG();
1792 			break;
1793 		}
1794 		resid = 0;
1795 	}
1796 	clear_bit(field, &mp->m_icsb_counters);
1797 	xfs_icsb_unlock_all_counters(mp);
1798 }
1799 
1800 void
1801 xfs_icsb_sync_counters_locked(
1802 	xfs_mount_t	*mp,
1803 	int		flags)
1804 {
1805 	xfs_icsb_cnts_t	cnt;
1806 
1807 	xfs_icsb_count(mp, &cnt, flags);
1808 
1809 	if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT))
1810 		mp->m_sb.sb_icount = cnt.icsb_icount;
1811 	if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE))
1812 		mp->m_sb.sb_ifree = cnt.icsb_ifree;
1813 	if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS))
1814 		mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
1815 }
1816 
1817 /*
1818  * Accurate update of per-cpu counters to incore superblock
1819  */
1820 void
1821 xfs_icsb_sync_counters(
1822 	xfs_mount_t	*mp,
1823 	int		flags)
1824 {
1825 	spin_lock(&mp->m_sb_lock);
1826 	xfs_icsb_sync_counters_locked(mp, flags);
1827 	spin_unlock(&mp->m_sb_lock);
1828 }
1829 
1830 /*
1831  * Balance and enable/disable counters as necessary.
1832  *
1833  * Thresholds for re-enabling counters are somewhat magic.  inode counts are
1834  * chosen to be the same number as single on disk allocation chunk per CPU, and
1835  * free blocks is something far enough zero that we aren't going thrash when we
1836  * get near ENOSPC. We also need to supply a minimum we require per cpu to
1837  * prevent looping endlessly when xfs_alloc_space asks for more than will
1838  * be distributed to a single CPU but each CPU has enough blocks to be
1839  * reenabled.
1840  *
1841  * Note that we can be called when counters are already disabled.
1842  * xfs_icsb_disable_counter() optimises the counter locking in this case to
1843  * prevent locking every per-cpu counter needlessly.
1844  */
1845 
1846 #define XFS_ICSB_INO_CNTR_REENABLE	(uint64_t)64
1847 #define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \
1848 		(uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp))
1849 STATIC void
1850 xfs_icsb_balance_counter_locked(
1851 	xfs_mount_t	*mp,
1852 	xfs_sb_field_t  field,
1853 	int		min_per_cpu)
1854 {
1855 	uint64_t	count, resid;
1856 	int		weight = num_online_cpus();
1857 	uint64_t	min = (uint64_t)min_per_cpu;
1858 
1859 	/* disable counter and sync counter */
1860 	xfs_icsb_disable_counter(mp, field);
1861 
1862 	/* update counters  - first CPU gets residual*/
1863 	switch (field) {
1864 	case XFS_SBS_ICOUNT:
1865 		count = mp->m_sb.sb_icount;
1866 		resid = do_div(count, weight);
1867 		if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
1868 			return;
1869 		break;
1870 	case XFS_SBS_IFREE:
1871 		count = mp->m_sb.sb_ifree;
1872 		resid = do_div(count, weight);
1873 		if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
1874 			return;
1875 		break;
1876 	case XFS_SBS_FDBLOCKS:
1877 		count = mp->m_sb.sb_fdblocks;
1878 		resid = do_div(count, weight);
1879 		if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp)))
1880 			return;
1881 		break;
1882 	default:
1883 		BUG();
1884 		count = resid = 0;	/* quiet, gcc */
1885 		break;
1886 	}
1887 
1888 	xfs_icsb_enable_counter(mp, field, count, resid);
1889 }
1890 
1891 STATIC void
1892 xfs_icsb_balance_counter(
1893 	xfs_mount_t	*mp,
1894 	xfs_sb_field_t  fields,
1895 	int		min_per_cpu)
1896 {
1897 	spin_lock(&mp->m_sb_lock);
1898 	xfs_icsb_balance_counter_locked(mp, fields, min_per_cpu);
1899 	spin_unlock(&mp->m_sb_lock);
1900 }
1901 
1902 int
1903 xfs_icsb_modify_counters(
1904 	xfs_mount_t	*mp,
1905 	xfs_sb_field_t	field,
1906 	int64_t		delta,
1907 	int		rsvd)
1908 {
1909 	xfs_icsb_cnts_t	*icsbp;
1910 	long long	lcounter;	/* long counter for 64 bit fields */
1911 	int		ret = 0;
1912 
1913 	might_sleep();
1914 again:
1915 	preempt_disable();
1916 	icsbp = this_cpu_ptr(mp->m_sb_cnts);
1917 
1918 	/*
1919 	 * if the counter is disabled, go to slow path
1920 	 */
1921 	if (unlikely(xfs_icsb_counter_disabled(mp, field)))
1922 		goto slow_path;
1923 	xfs_icsb_lock_cntr(icsbp);
1924 	if (unlikely(xfs_icsb_counter_disabled(mp, field))) {
1925 		xfs_icsb_unlock_cntr(icsbp);
1926 		goto slow_path;
1927 	}
1928 
1929 	switch (field) {
1930 	case XFS_SBS_ICOUNT:
1931 		lcounter = icsbp->icsb_icount;
1932 		lcounter += delta;
1933 		if (unlikely(lcounter < 0))
1934 			goto balance_counter;
1935 		icsbp->icsb_icount = lcounter;
1936 		break;
1937 
1938 	case XFS_SBS_IFREE:
1939 		lcounter = icsbp->icsb_ifree;
1940 		lcounter += delta;
1941 		if (unlikely(lcounter < 0))
1942 			goto balance_counter;
1943 		icsbp->icsb_ifree = lcounter;
1944 		break;
1945 
1946 	case XFS_SBS_FDBLOCKS:
1947 		BUG_ON((mp->m_resblks - mp->m_resblks_avail) != 0);
1948 
1949 		lcounter = icsbp->icsb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1950 		lcounter += delta;
1951 		if (unlikely(lcounter < 0))
1952 			goto balance_counter;
1953 		icsbp->icsb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp);
1954 		break;
1955 	default:
1956 		BUG();
1957 		break;
1958 	}
1959 	xfs_icsb_unlock_cntr(icsbp);
1960 	preempt_enable();
1961 	return 0;
1962 
1963 slow_path:
1964 	preempt_enable();
1965 
1966 	/*
1967 	 * serialise with a mutex so we don't burn lots of cpu on
1968 	 * the superblock lock. We still need to hold the superblock
1969 	 * lock, however, when we modify the global structures.
1970 	 */
1971 	xfs_icsb_lock(mp);
1972 
1973 	/*
1974 	 * Now running atomically.
1975 	 *
1976 	 * If the counter is enabled, someone has beaten us to rebalancing.
1977 	 * Drop the lock and try again in the fast path....
1978 	 */
1979 	if (!(xfs_icsb_counter_disabled(mp, field))) {
1980 		xfs_icsb_unlock(mp);
1981 		goto again;
1982 	}
1983 
1984 	/*
1985 	 * The counter is currently disabled. Because we are
1986 	 * running atomically here, we know a rebalance cannot
1987 	 * be in progress. Hence we can go straight to operating
1988 	 * on the global superblock. We do not call xfs_mod_incore_sb()
1989 	 * here even though we need to get the m_sb_lock. Doing so
1990 	 * will cause us to re-enter this function and deadlock.
1991 	 * Hence we get the m_sb_lock ourselves and then call
1992 	 * xfs_mod_incore_sb_unlocked() as the unlocked path operates
1993 	 * directly on the global counters.
1994 	 */
1995 	spin_lock(&mp->m_sb_lock);
1996 	ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
1997 	spin_unlock(&mp->m_sb_lock);
1998 
1999 	/*
2000 	 * Now that we've modified the global superblock, we
2001 	 * may be able to re-enable the distributed counters
2002 	 * (e.g. lots of space just got freed). After that
2003 	 * we are done.
2004 	 */
2005 	if (ret != -ENOSPC)
2006 		xfs_icsb_balance_counter(mp, field, 0);
2007 	xfs_icsb_unlock(mp);
2008 	return ret;
2009 
2010 balance_counter:
2011 	xfs_icsb_unlock_cntr(icsbp);
2012 	preempt_enable();
2013 
2014 	/*
2015 	 * We may have multiple threads here if multiple per-cpu
2016 	 * counters run dry at the same time. This will mean we can
2017 	 * do more balances than strictly necessary but it is not
2018 	 * the common slowpath case.
2019 	 */
2020 	xfs_icsb_lock(mp);
2021 
2022 	/*
2023 	 * running atomically.
2024 	 *
2025 	 * This will leave the counter in the correct state for future
2026 	 * accesses. After the rebalance, we simply try again and our retry
2027 	 * will either succeed through the fast path or slow path without
2028 	 * another balance operation being required.
2029 	 */
2030 	xfs_icsb_balance_counter(mp, field, delta);
2031 	xfs_icsb_unlock(mp);
2032 	goto again;
2033 }
2034 
2035 #endif
2036