xref: /linux/fs/xfs/xfs_fsops.c (revision 04a65666a69508fa0022c7343026c5a3d41d166d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs_platform.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_trans.h"
15 #include "xfs_error.h"
16 #include "xfs_alloc.h"
17 #include "xfs_fsops.h"
18 #include "xfs_trans_space.h"
19 #include "xfs_log.h"
20 #include "xfs_log_priv.h"
21 #include "xfs_ag.h"
22 #include "xfs_ag_resv.h"
23 #include "xfs_trace.h"
24 #include "xfs_rtalloc.h"
25 #include "xfs_rtrmap_btree.h"
26 #include "xfs_rtrefcount_btree.h"
27 #include "xfs_metafile.h"
28 #include "xfs_healthmon.h"
29 
30 #include <linux/fserror.h>
31 
32 /*
33  * Write new AG headers to disk. Non-transactional, but need to be
34  * written and completed prior to the growfs transaction being logged.
35  * To do this, we use a delayed write buffer list and wait for
36  * submission and IO completion of the list as a whole. This allows the
37  * IO subsystem to merge all the AG headers in a single AG into a single
38  * IO and hide most of the latency of the IO from us.
39  *
40  * This also means that if we get an error whilst building the buffer
41  * list to write, we can cancel the entire list without having written
42  * anything.
43  */
44 static int
45 xfs_resizefs_init_new_ags(
46 	struct xfs_trans	*tp,
47 	struct aghdr_init_data	*id,
48 	xfs_agnumber_t		oagcount,
49 	xfs_agnumber_t		nagcount,
50 	xfs_rfsblock_t		delta,
51 	struct xfs_perag	*last_pag,
52 	bool			*lastag_extended)
53 {
54 	struct xfs_mount	*mp = tp->t_mountp;
55 	xfs_rfsblock_t		nb = mp->m_sb.sb_dblocks + delta;
56 	int			error;
57 
58 	*lastag_extended = false;
59 
60 	INIT_LIST_HEAD(&id->buffer_list);
61 	for (id->agno = nagcount - 1;
62 	     id->agno >= oagcount;
63 	     id->agno--, delta -= id->agsize) {
64 
65 		if (id->agno == nagcount - 1)
66 			id->agsize = nb - (id->agno *
67 					(xfs_rfsblock_t)mp->m_sb.sb_agblocks);
68 		else
69 			id->agsize = mp->m_sb.sb_agblocks;
70 
71 		error = xfs_ag_init_headers(mp, id);
72 		if (error) {
73 			xfs_buf_delwri_cancel(&id->buffer_list);
74 			return error;
75 		}
76 	}
77 
78 	error = xfs_buf_delwri_submit(&id->buffer_list);
79 	if (error)
80 		return error;
81 
82 	if (delta) {
83 		*lastag_extended = true;
84 		error = xfs_ag_extend_space(last_pag, tp, delta);
85 	}
86 	return error;
87 }
88 
89 /*
90  * growfs operations
91  */
92 static int
93 xfs_growfs_data_private(
94 	struct xfs_mount	*mp,		/* mount point for filesystem */
95 	struct xfs_growfs_data	*in)		/* growfs data input struct */
96 {
97 	xfs_agnumber_t		oagcount = mp->m_sb.sb_agcount;
98 	struct xfs_buf		*bp;
99 	int			error;
100 	xfs_agnumber_t		nagcount;
101 	xfs_agnumber_t		nagimax = 0;
102 	xfs_rfsblock_t		nb, nb_div, nb_mod;
103 	int64_t			delta;
104 	bool			lastag_extended = false;
105 	struct xfs_trans	*tp;
106 	struct aghdr_init_data	id = {};
107 	struct xfs_perag	*last_pag;
108 
109 	nb = in->newblocks;
110 	error = xfs_sb_validate_fsb_count(&mp->m_sb, nb);
111 	if (error)
112 		return error;
113 
114 	if (nb > mp->m_sb.sb_dblocks) {
115 		error = xfs_buf_read_uncached(mp->m_ddev_targp,
116 				XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
117 				XFS_FSS_TO_BB(mp, 1), &bp, NULL);
118 		if (error)
119 			return error;
120 		xfs_buf_relse(bp);
121 	}
122 
123 	/* Make sure the new fs size won't cause problems with the log. */
124 	error = xfs_growfs_check_rtgeom(mp, nb, mp->m_sb.sb_rblocks,
125 			mp->m_sb.sb_rextsize);
126 	if (error)
127 		return error;
128 
129 	nb_div = nb;
130 	nb_mod = do_div(nb_div, mp->m_sb.sb_agblocks);
131 	if (nb_mod && nb_mod >= XFS_MIN_AG_BLOCKS)
132 		nb_div++;
133 	else if (nb_mod)
134 		nb = nb_div * mp->m_sb.sb_agblocks;
135 
136 	if (nb_div > XFS_MAX_AGNUMBER + 1) {
137 		nb_div = XFS_MAX_AGNUMBER + 1;
138 		nb = nb_div * mp->m_sb.sb_agblocks;
139 	}
140 	nagcount = nb_div;
141 	delta = nb - mp->m_sb.sb_dblocks;
142 	/*
143 	 * Reject filesystems with a single AG because they are not
144 	 * supported, and reject a shrink operation that would cause a
145 	 * filesystem to become unsupported.
146 	 */
147 	if (delta < 0 && nagcount < 2)
148 		return -EINVAL;
149 
150 	/* No work to do */
151 	if (delta == 0)
152 		return 0;
153 
154 	/* TODO: shrinking the entire AGs hasn't yet completed */
155 	if (nagcount < oagcount)
156 		return -EINVAL;
157 
158 	/* allocate the new per-ag structures */
159 	error = xfs_initialize_perag(mp, oagcount, nagcount, nb, &nagimax);
160 	if (error)
161 		return error;
162 
163 	if (delta > 0)
164 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
165 				XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
166 				&tp);
167 	else
168 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata, -delta, 0,
169 				0, &tp);
170 	if (error)
171 		goto out_free_unused_perag;
172 
173 	last_pag = xfs_perag_get(mp, oagcount - 1);
174 	if (delta > 0) {
175 		error = xfs_resizefs_init_new_ags(tp, &id, oagcount, nagcount,
176 				delta, last_pag, &lastag_extended);
177 	} else {
178 		xfs_warn_experimental(mp, XFS_EXPERIMENTAL_SHRINK);
179 		error = xfs_ag_shrink_space(last_pag, &tp, -delta);
180 	}
181 	xfs_perag_put(last_pag);
182 	if (error)
183 		goto out_trans_cancel;
184 
185 	/*
186 	 * Update changed superblock fields transactionally. These are not
187 	 * seen by the rest of the world until the transaction commit applies
188 	 * them atomically to the superblock.
189 	 */
190 	if (nagcount > oagcount)
191 		xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount);
192 	if (delta)
193 		xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS, delta);
194 	if (id.nfree)
195 		xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, id.nfree);
196 
197 	/*
198 	 * Sync sb counters now to reflect the updated values. This is
199 	 * particularly important for shrink because the write verifier
200 	 * will fail if sb_fdblocks is ever larger than sb_dblocks.
201 	 */
202 	if (xfs_has_lazysbcount(mp))
203 		xfs_log_sb(tp);
204 
205 	xfs_trans_set_sync(tp);
206 	error = xfs_trans_commit(tp);
207 	if (error)
208 		return error;
209 
210 	/* New allocation groups fully initialized, so update mount struct */
211 	if (nagimax)
212 		mp->m_maxagi = nagimax;
213 	xfs_set_low_space_thresholds(mp);
214 	mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
215 
216 	if (delta > 0) {
217 		/*
218 		 * If we expanded the last AG, free the per-AG reservation
219 		 * so we can reinitialize it with the new size.
220 		 */
221 		if (lastag_extended) {
222 			struct xfs_perag	*pag;
223 
224 			pag = xfs_perag_get(mp, id.agno);
225 			xfs_ag_resv_free(pag);
226 			xfs_perag_put(pag);
227 		}
228 		/*
229 		 * Reserve AG metadata blocks. ENOSPC here does not mean there
230 		 * was a growfs failure, just that there still isn't space for
231 		 * new user data after the grow has been run.
232 		 */
233 		error = xfs_fs_reserve_ag_blocks(mp);
234 		if (error == -ENOSPC)
235 			error = 0;
236 
237 		/* Compute new maxlevels for rt btrees. */
238 		xfs_rtrmapbt_compute_maxlevels(mp);
239 		xfs_rtrefcountbt_compute_maxlevels(mp);
240 	}
241 
242 	return error;
243 
244 out_trans_cancel:
245 	xfs_trans_cancel(tp);
246 out_free_unused_perag:
247 	if (nagcount > oagcount)
248 		xfs_free_perag_range(mp, oagcount, nagcount);
249 	return error;
250 }
251 
252 static int
253 xfs_growfs_log_private(
254 	struct xfs_mount	*mp,	/* mount point for filesystem */
255 	struct xfs_growfs_log	*in)	/* growfs log input struct */
256 {
257 	xfs_extlen_t		nb;
258 
259 	nb = in->newblocks;
260 	if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES))
261 		return -EINVAL;
262 	if (nb == mp->m_sb.sb_logblocks &&
263 	    in->isint == (mp->m_sb.sb_logstart != 0))
264 		return -EINVAL;
265 	/*
266 	 * Moving the log is hard, need new interfaces to sync
267 	 * the log first, hold off all activity while moving it.
268 	 * Can have shorter or longer log in the same space,
269 	 * or transform internal to external log or vice versa.
270 	 */
271 	return -ENOSYS;
272 }
273 
274 static int
275 xfs_growfs_imaxpct(
276 	struct xfs_mount	*mp,
277 	__u32			imaxpct)
278 {
279 	struct xfs_trans	*tp;
280 	int			dpct;
281 	int			error;
282 
283 	if (imaxpct > 100)
284 		return -EINVAL;
285 
286 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
287 			XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
288 	if (error)
289 		return error;
290 
291 	dpct = imaxpct - mp->m_sb.sb_imax_pct;
292 	xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
293 	xfs_trans_set_sync(tp);
294 	return xfs_trans_commit(tp);
295 }
296 
297 /*
298  * protected versions of growfs function acquire and release locks on the mount
299  * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG,
300  * XFS_IOC_FSGROWFSRT
301  */
302 int
303 xfs_growfs_data(
304 	struct xfs_mount	*mp,
305 	struct xfs_growfs_data	*in)
306 {
307 	int			error;
308 
309 	if (!capable(CAP_SYS_ADMIN))
310 		return -EPERM;
311 	if (!mutex_trylock(&mp->m_growlock))
312 		return -EWOULDBLOCK;
313 
314 	/* we can't grow the data section when an internal RT section exists */
315 	if (in->newblocks != mp->m_sb.sb_dblocks && mp->m_sb.sb_rtstart) {
316 		error = -EINVAL;
317 		goto out_unlock;
318 	}
319 
320 	/* update imaxpct separately to the physical grow of the filesystem */
321 	if (in->imaxpct != mp->m_sb.sb_imax_pct) {
322 		error = xfs_growfs_imaxpct(mp, in->imaxpct);
323 		if (error)
324 			goto out_unlock;
325 	}
326 
327 	if (in->newblocks != mp->m_sb.sb_dblocks) {
328 		error = xfs_growfs_data_private(mp, in);
329 		if (error)
330 			goto out_unlock;
331 	}
332 
333 	/* Post growfs calculations needed to reflect new state in operations */
334 	if (mp->m_sb.sb_imax_pct) {
335 		uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct;
336 		do_div(icount, 100);
337 		M_IGEO(mp)->maxicount = XFS_FSB_TO_INO(mp, icount);
338 	} else
339 		M_IGEO(mp)->maxicount = 0;
340 
341 	/* Update secondary superblocks now the physical grow has completed */
342 	error = xfs_update_secondary_sbs(mp);
343 
344 	/*
345 	 * Increment the generation unconditionally, after trying to update the
346 	 * secondary superblocks, as the new size is live already at this point.
347 	 */
348 	mp->m_generation++;
349 out_unlock:
350 	mutex_unlock(&mp->m_growlock);
351 	return error;
352 }
353 
354 int
355 xfs_growfs_log(
356 	xfs_mount_t		*mp,
357 	struct xfs_growfs_log	*in)
358 {
359 	int error;
360 
361 	if (!capable(CAP_SYS_ADMIN))
362 		return -EPERM;
363 	if (!mutex_trylock(&mp->m_growlock))
364 		return -EWOULDBLOCK;
365 	error = xfs_growfs_log_private(mp, in);
366 	mutex_unlock(&mp->m_growlock);
367 	return error;
368 }
369 
370 /*
371  * Reserve the requested number of blocks if available. Otherwise return
372  * as many as possible to satisfy the request. The actual number
373  * reserved are returned in outval.
374  */
375 int
376 xfs_reserve_blocks(
377 	struct xfs_mount	*mp,
378 	enum xfs_free_counter	ctr,
379 	uint64_t		request)
380 {
381 	int64_t			lcounter, delta;
382 	int64_t			fdblks_delta = 0;
383 	int64_t			free;
384 	int			error = 0;
385 
386 	ASSERT(ctr < XC_FREE_NR);
387 
388 	/*
389 	 * With per-cpu counters, this becomes an interesting problem. we need
390 	 * to work out if we are freeing or allocation blocks first, then we can
391 	 * do the modification as necessary.
392 	 *
393 	 * We do this under the m_sb_lock so that if we are near ENOSPC, we will
394 	 * hold out any changes while we work out what to do. This means that
395 	 * the amount of free space can change while we do this, so we need to
396 	 * retry if we end up trying to reserve more space than is available.
397 	 */
398 	spin_lock(&mp->m_sb_lock);
399 
400 	/*
401 	 * If our previous reservation was larger than the current value,
402 	 * then move any unused blocks back to the free pool. Modify the resblks
403 	 * counters directly since we shouldn't have any problems unreserving
404 	 * space.
405 	 */
406 	if (mp->m_free[ctr].res_total > request) {
407 		lcounter = mp->m_free[ctr].res_avail - request;
408 		if (lcounter > 0) {		/* release unused blocks */
409 			fdblks_delta = lcounter;
410 			mp->m_free[ctr].res_avail -= lcounter;
411 		}
412 		mp->m_free[ctr].res_total = request;
413 		if (fdblks_delta) {
414 			spin_unlock(&mp->m_sb_lock);
415 			xfs_add_freecounter(mp, ctr, fdblks_delta);
416 			spin_lock(&mp->m_sb_lock);
417 		}
418 
419 		goto out;
420 	}
421 
422 	/*
423 	 * If the request is larger than the current reservation, reserve the
424 	 * blocks before we update the reserve counters. Sample m_free and
425 	 * perform a partial reservation if the request exceeds free space.
426 	 *
427 	 * The code below estimates how many blocks it can request from
428 	 * fdblocks to stash in the reserve pool.  This is a classic TOCTOU
429 	 * race since fdblocks updates are not always coordinated via
430 	 * m_sb_lock.  Set the reserve size even if there's not enough free
431 	 * space to fill it because mod_fdblocks will refill an undersized
432 	 * reserve when it can.
433 	 */
434 	free = xfs_sum_freecounter_raw(mp, ctr) -
435 		xfs_freecounter_unavailable(mp, ctr);
436 	delta = request - mp->m_free[ctr].res_total;
437 	mp->m_free[ctr].res_total = request;
438 	if (delta > 0 && free > 0) {
439 		/*
440 		 * We'll either succeed in getting space from the free block
441 		 * count or we'll get an ENOSPC.  Don't set the reserved flag
442 		 * here - we don't want to reserve the extra reserve blocks
443 		 * from the reserve.
444 		 *
445 		 * The desired reserve size can change after we drop the lock.
446 		 * Use mod_fdblocks to put the space into the reserve or into
447 		 * fdblocks as appropriate.
448 		 */
449 		fdblks_delta = min(free, delta);
450 		spin_unlock(&mp->m_sb_lock);
451 		error = xfs_dec_freecounter(mp, ctr, fdblks_delta, 0);
452 		if (!error)
453 			xfs_add_freecounter(mp, ctr, fdblks_delta);
454 		spin_lock(&mp->m_sb_lock);
455 	}
456 out:
457 	spin_unlock(&mp->m_sb_lock);
458 	return error;
459 }
460 
461 int
462 xfs_fs_goingdown(
463 	xfs_mount_t	*mp,
464 	uint32_t	inflags)
465 {
466 	switch (inflags) {
467 	case XFS_FSOP_GOING_FLAGS_DEFAULT: {
468 		if (!bdev_freeze(mp->m_super->s_bdev)) {
469 			xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
470 			bdev_thaw(mp->m_super->s_bdev);
471 		}
472 		break;
473 	}
474 	case XFS_FSOP_GOING_FLAGS_LOGFLUSH:
475 		xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
476 		break;
477 	case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH:
478 		xfs_force_shutdown(mp,
479 				SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR);
480 		break;
481 	default:
482 		return -EINVAL;
483 	}
484 
485 	return 0;
486 }
487 
488 /*
489  * Force a shutdown of the filesystem instantly while keeping the filesystem
490  * consistent. We don't do an unmount here; just shutdown the shop, make sure
491  * that absolutely nothing persistent happens to this filesystem after this
492  * point.
493  *
494  * The shutdown state change is atomic, resulting in the first and only the
495  * first shutdown call processing the shutdown. This means we only shutdown the
496  * log once as it requires, and we don't spam the logs when multiple concurrent
497  * shutdowns race to set the shutdown flags.
498  */
499 void
500 xfs_do_force_shutdown(
501 	struct xfs_mount *mp,
502 	uint32_t	flags,
503 	char		*fname,
504 	int		lnnum)
505 {
506 	int		tag;
507 	const char	*why;
508 
509 
510 	if (xfs_set_shutdown(mp)) {
511 		xlog_shutdown_wait(mp->m_log);
512 		return;
513 	}
514 	if (mp->m_sb_bp)
515 		mp->m_sb_bp->b_flags |= XBF_DONE;
516 
517 	if (flags & SHUTDOWN_FORCE_UMOUNT)
518 		xfs_alert(mp, "User initiated shutdown received.");
519 
520 	if (xlog_force_shutdown(mp->m_log, flags)) {
521 		tag = XFS_PTAG_SHUTDOWN_LOGERROR;
522 		why = "Log I/O Error";
523 	} else if (flags & SHUTDOWN_CORRUPT_INCORE) {
524 		tag = XFS_PTAG_SHUTDOWN_CORRUPT;
525 		why = "Corruption of in-memory data";
526 	} else if (flags & SHUTDOWN_CORRUPT_ONDISK) {
527 		tag = XFS_PTAG_SHUTDOWN_CORRUPT;
528 		why = "Corruption of on-disk metadata";
529 	} else if (flags & SHUTDOWN_DEVICE_REMOVED) {
530 		tag = XFS_PTAG_SHUTDOWN_IOERROR;
531 		why = "Block device removal";
532 	} else {
533 		tag = XFS_PTAG_SHUTDOWN_IOERROR;
534 		why = "Metadata I/O Error";
535 	}
536 
537 	trace_xfs_force_shutdown(mp, tag, flags, fname, lnnum);
538 
539 	xfs_alert_tag(mp, tag,
540 "%s (0x%x) detected at %pS (%s:%d).  Shutting down filesystem.",
541 			why, flags, __return_address, fname, lnnum);
542 	xfs_alert(mp,
543 		"Please unmount the filesystem and rectify the problem(s)");
544 	if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
545 		xfs_stack_trace();
546 
547 	fserror_report_shutdown(mp->m_super, GFP_KERNEL);
548 	xfs_healthmon_report_shutdown(mp, flags);
549 }
550 
551 /*
552  * Reserve free space for per-AG metadata.
553  */
554 int
555 xfs_fs_reserve_ag_blocks(
556 	struct xfs_mount	*mp)
557 {
558 	struct xfs_perag	*pag = NULL;
559 	int			error = 0;
560 	int			err2;
561 
562 	mp->m_finobt_nores = false;
563 	while ((pag = xfs_perag_next(mp, pag))) {
564 		err2 = xfs_ag_resv_init(pag, NULL);
565 		if (err2 && !error)
566 			error = err2;
567 	}
568 
569 	if (error && error != -ENOSPC) {
570 		xfs_warn(mp,
571 	"Error %d reserving per-AG metadata reserve pool.", error);
572 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
573 		return error;
574 	}
575 
576 	err2 = xfs_metafile_resv_init(mp);
577 	if (err2 && err2 != -ENOSPC) {
578 		xfs_warn(mp,
579 	"Error %d reserving realtime metadata reserve pool.", err2);
580 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
581 
582 		if (!error)
583 			error = err2;
584 	}
585 
586 	return error;
587 }
588 
589 /*
590  * Free space reserved for per-AG metadata.
591  */
592 void
593 xfs_fs_unreserve_ag_blocks(
594 	struct xfs_mount	*mp)
595 {
596 	struct xfs_perag	*pag = NULL;
597 
598 	xfs_metafile_resv_free(mp);
599 	while ((pag = xfs_perag_next(mp, pag)))
600 		xfs_ag_resv_free(pag);
601 }
602