xref: /linux/fs/xfs/xfs_fsops.c (revision a8b3be2617d677796e576cc64d4ad9de45dfaf14)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_trans.h"
15 #include "xfs_error.h"
16 #include "xfs_alloc.h"
17 #include "xfs_fsops.h"
18 #include "xfs_trans_space.h"
19 #include "xfs_log.h"
20 #include "xfs_log_priv.h"
21 #include "xfs_ag.h"
22 #include "xfs_ag_resv.h"
23 #include "xfs_trace.h"
24 
25 /*
26  * Write new AG headers to disk. Non-transactional, but need to be
27  * written and completed prior to the growfs transaction being logged.
28  * To do this, we use a delayed write buffer list and wait for
29  * submission and IO completion of the list as a whole. This allows the
30  * IO subsystem to merge all the AG headers in a single AG into a single
31  * IO and hide most of the latency of the IO from us.
32  *
33  * This also means that if we get an error whilst building the buffer
34  * list to write, we can cancel the entire list without having written
35  * anything.
36  */
37 static int
xfs_resizefs_init_new_ags(struct xfs_trans * tp,struct aghdr_init_data * id,xfs_agnumber_t oagcount,xfs_agnumber_t nagcount,xfs_rfsblock_t delta,struct xfs_perag * last_pag,bool * lastag_extended)38 xfs_resizefs_init_new_ags(
39 	struct xfs_trans	*tp,
40 	struct aghdr_init_data	*id,
41 	xfs_agnumber_t		oagcount,
42 	xfs_agnumber_t		nagcount,
43 	xfs_rfsblock_t		delta,
44 	struct xfs_perag	*last_pag,
45 	bool			*lastag_extended)
46 {
47 	struct xfs_mount	*mp = tp->t_mountp;
48 	xfs_rfsblock_t		nb = mp->m_sb.sb_dblocks + delta;
49 	int			error;
50 
51 	*lastag_extended = false;
52 
53 	INIT_LIST_HEAD(&id->buffer_list);
54 	for (id->agno = nagcount - 1;
55 	     id->agno >= oagcount;
56 	     id->agno--, delta -= id->agsize) {
57 
58 		if (id->agno == nagcount - 1)
59 			id->agsize = nb - (id->agno *
60 					(xfs_rfsblock_t)mp->m_sb.sb_agblocks);
61 		else
62 			id->agsize = mp->m_sb.sb_agblocks;
63 
64 		error = xfs_ag_init_headers(mp, id);
65 		if (error) {
66 			xfs_buf_delwri_cancel(&id->buffer_list);
67 			return error;
68 		}
69 	}
70 
71 	error = xfs_buf_delwri_submit(&id->buffer_list);
72 	if (error)
73 		return error;
74 
75 	if (delta) {
76 		*lastag_extended = true;
77 		error = xfs_ag_extend_space(last_pag, tp, delta);
78 	}
79 	return error;
80 }
81 
82 /*
83  * growfs operations
84  */
85 static int
xfs_growfs_data_private(struct xfs_mount * mp,struct xfs_growfs_data * in)86 xfs_growfs_data_private(
87 	struct xfs_mount	*mp,		/* mount point for filesystem */
88 	struct xfs_growfs_data	*in)		/* growfs data input struct */
89 {
90 	xfs_agnumber_t		oagcount = mp->m_sb.sb_agcount;
91 	struct xfs_buf		*bp;
92 	int			error;
93 	xfs_agnumber_t		nagcount;
94 	xfs_agnumber_t		nagimax = 0;
95 	xfs_rfsblock_t		nb, nb_div, nb_mod;
96 	int64_t			delta;
97 	bool			lastag_extended = false;
98 	struct xfs_trans	*tp;
99 	struct aghdr_init_data	id = {};
100 	struct xfs_perag	*last_pag;
101 
102 	nb = in->newblocks;
103 	error = xfs_sb_validate_fsb_count(&mp->m_sb, nb);
104 	if (error)
105 		return error;
106 
107 	if (nb > mp->m_sb.sb_dblocks) {
108 		error = xfs_buf_read_uncached(mp->m_ddev_targp,
109 				XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
110 				XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL);
111 		if (error)
112 			return error;
113 		xfs_buf_relse(bp);
114 	}
115 
116 	nb_div = nb;
117 	nb_mod = do_div(nb_div, mp->m_sb.sb_agblocks);
118 	if (nb_mod && nb_mod >= XFS_MIN_AG_BLOCKS)
119 		nb_div++;
120 	else if (nb_mod)
121 		nb = nb_div * mp->m_sb.sb_agblocks;
122 
123 	if (nb_div > XFS_MAX_AGNUMBER + 1) {
124 		nb_div = XFS_MAX_AGNUMBER + 1;
125 		nb = nb_div * mp->m_sb.sb_agblocks;
126 	}
127 	nagcount = nb_div;
128 	delta = nb - mp->m_sb.sb_dblocks;
129 	/*
130 	 * Reject filesystems with a single AG because they are not
131 	 * supported, and reject a shrink operation that would cause a
132 	 * filesystem to become unsupported.
133 	 */
134 	if (delta < 0 && nagcount < 2)
135 		return -EINVAL;
136 
137 	/* No work to do */
138 	if (delta == 0)
139 		return 0;
140 
141 	/* TODO: shrinking the entire AGs hasn't yet completed */
142 	if (nagcount < oagcount)
143 		return -EINVAL;
144 
145 	/* allocate the new per-ag structures */
146 	error = xfs_initialize_perag(mp, oagcount, nagcount, nb, &nagimax);
147 	if (error)
148 		return error;
149 
150 	if (delta > 0)
151 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
152 				XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
153 				&tp);
154 	else
155 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata, -delta, 0,
156 				0, &tp);
157 	if (error)
158 		goto out_free_unused_perag;
159 
160 	last_pag = xfs_perag_get(mp, oagcount - 1);
161 	if (delta > 0) {
162 		error = xfs_resizefs_init_new_ags(tp, &id, oagcount, nagcount,
163 				delta, last_pag, &lastag_extended);
164 	} else {
165 		xfs_warn_mount(mp, XFS_OPSTATE_WARNED_SHRINK,
166 	"EXPERIMENTAL online shrink feature in use. Use at your own risk!");
167 
168 		error = xfs_ag_shrink_space(last_pag, &tp, -delta);
169 	}
170 	xfs_perag_put(last_pag);
171 	if (error)
172 		goto out_trans_cancel;
173 
174 	/*
175 	 * Update changed superblock fields transactionally. These are not
176 	 * seen by the rest of the world until the transaction commit applies
177 	 * them atomically to the superblock.
178 	 */
179 	if (nagcount > oagcount)
180 		xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount);
181 	if (delta)
182 		xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS, delta);
183 	if (id.nfree)
184 		xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, id.nfree);
185 
186 	/*
187 	 * Sync sb counters now to reflect the updated values. This is
188 	 * particularly important for shrink because the write verifier
189 	 * will fail if sb_fdblocks is ever larger than sb_dblocks.
190 	 */
191 	if (xfs_has_lazysbcount(mp))
192 		xfs_log_sb(tp);
193 
194 	xfs_trans_set_sync(tp);
195 	error = xfs_trans_commit(tp);
196 	if (error)
197 		return error;
198 
199 	/* New allocation groups fully initialized, so update mount struct */
200 	if (nagimax)
201 		mp->m_maxagi = nagimax;
202 	xfs_set_low_space_thresholds(mp);
203 	mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
204 
205 	if (delta > 0) {
206 		/*
207 		 * If we expanded the last AG, free the per-AG reservation
208 		 * so we can reinitialize it with the new size.
209 		 */
210 		if (lastag_extended) {
211 			struct xfs_perag	*pag;
212 
213 			pag = xfs_perag_get(mp, id.agno);
214 			xfs_ag_resv_free(pag);
215 			xfs_perag_put(pag);
216 		}
217 		/*
218 		 * Reserve AG metadata blocks. ENOSPC here does not mean there
219 		 * was a growfs failure, just that there still isn't space for
220 		 * new user data after the grow has been run.
221 		 */
222 		error = xfs_fs_reserve_ag_blocks(mp);
223 		if (error == -ENOSPC)
224 			error = 0;
225 	}
226 	return error;
227 
228 out_trans_cancel:
229 	xfs_trans_cancel(tp);
230 out_free_unused_perag:
231 	if (nagcount > oagcount)
232 		xfs_free_perag_range(mp, oagcount, nagcount);
233 	return error;
234 }
235 
236 static int
xfs_growfs_log_private(struct xfs_mount * mp,struct xfs_growfs_log * in)237 xfs_growfs_log_private(
238 	struct xfs_mount	*mp,	/* mount point for filesystem */
239 	struct xfs_growfs_log	*in)	/* growfs log input struct */
240 {
241 	xfs_extlen_t		nb;
242 
243 	nb = in->newblocks;
244 	if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES))
245 		return -EINVAL;
246 	if (nb == mp->m_sb.sb_logblocks &&
247 	    in->isint == (mp->m_sb.sb_logstart != 0))
248 		return -EINVAL;
249 	/*
250 	 * Moving the log is hard, need new interfaces to sync
251 	 * the log first, hold off all activity while moving it.
252 	 * Can have shorter or longer log in the same space,
253 	 * or transform internal to external log or vice versa.
254 	 */
255 	return -ENOSYS;
256 }
257 
258 static int
xfs_growfs_imaxpct(struct xfs_mount * mp,__u32 imaxpct)259 xfs_growfs_imaxpct(
260 	struct xfs_mount	*mp,
261 	__u32			imaxpct)
262 {
263 	struct xfs_trans	*tp;
264 	int			dpct;
265 	int			error;
266 
267 	if (imaxpct > 100)
268 		return -EINVAL;
269 
270 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
271 			XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
272 	if (error)
273 		return error;
274 
275 	dpct = imaxpct - mp->m_sb.sb_imax_pct;
276 	xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
277 	xfs_trans_set_sync(tp);
278 	return xfs_trans_commit(tp);
279 }
280 
281 /*
282  * protected versions of growfs function acquire and release locks on the mount
283  * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG,
284  * XFS_IOC_FSGROWFSRT
285  */
286 int
xfs_growfs_data(struct xfs_mount * mp,struct xfs_growfs_data * in)287 xfs_growfs_data(
288 	struct xfs_mount	*mp,
289 	struct xfs_growfs_data	*in)
290 {
291 	int			error = 0;
292 
293 	if (!capable(CAP_SYS_ADMIN))
294 		return -EPERM;
295 	if (!mutex_trylock(&mp->m_growlock))
296 		return -EWOULDBLOCK;
297 
298 	/* update imaxpct separately to the physical grow of the filesystem */
299 	if (in->imaxpct != mp->m_sb.sb_imax_pct) {
300 		error = xfs_growfs_imaxpct(mp, in->imaxpct);
301 		if (error)
302 			goto out_error;
303 	}
304 
305 	if (in->newblocks != mp->m_sb.sb_dblocks) {
306 		error = xfs_growfs_data_private(mp, in);
307 		if (error)
308 			goto out_error;
309 	}
310 
311 	/* Post growfs calculations needed to reflect new state in operations */
312 	if (mp->m_sb.sb_imax_pct) {
313 		uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct;
314 		do_div(icount, 100);
315 		M_IGEO(mp)->maxicount = XFS_FSB_TO_INO(mp, icount);
316 	} else
317 		M_IGEO(mp)->maxicount = 0;
318 
319 	/* Update secondary superblocks now the physical grow has completed */
320 	error = xfs_update_secondary_sbs(mp);
321 
322 out_error:
323 	/*
324 	 * Increment the generation unconditionally, the error could be from
325 	 * updating the secondary superblocks, in which case the new size
326 	 * is live already.
327 	 */
328 	mp->m_generation++;
329 	mutex_unlock(&mp->m_growlock);
330 	return error;
331 }
332 
333 int
xfs_growfs_log(xfs_mount_t * mp,struct xfs_growfs_log * in)334 xfs_growfs_log(
335 	xfs_mount_t		*mp,
336 	struct xfs_growfs_log	*in)
337 {
338 	int error;
339 
340 	if (!capable(CAP_SYS_ADMIN))
341 		return -EPERM;
342 	if (!mutex_trylock(&mp->m_growlock))
343 		return -EWOULDBLOCK;
344 	error = xfs_growfs_log_private(mp, in);
345 	mutex_unlock(&mp->m_growlock);
346 	return error;
347 }
348 
349 /*
350  * Reserve the requested number of blocks if available. Otherwise return
351  * as many as possible to satisfy the request. The actual number
352  * reserved are returned in outval.
353  */
354 int
xfs_reserve_blocks(struct xfs_mount * mp,uint64_t request)355 xfs_reserve_blocks(
356 	struct xfs_mount	*mp,
357 	uint64_t		request)
358 {
359 	int64_t			lcounter, delta;
360 	int64_t			fdblks_delta = 0;
361 	int64_t			free;
362 	int			error = 0;
363 
364 	/*
365 	 * With per-cpu counters, this becomes an interesting problem. we need
366 	 * to work out if we are freeing or allocation blocks first, then we can
367 	 * do the modification as necessary.
368 	 *
369 	 * We do this under the m_sb_lock so that if we are near ENOSPC, we will
370 	 * hold out any changes while we work out what to do. This means that
371 	 * the amount of free space can change while we do this, so we need to
372 	 * retry if we end up trying to reserve more space than is available.
373 	 */
374 	spin_lock(&mp->m_sb_lock);
375 
376 	/*
377 	 * If our previous reservation was larger than the current value,
378 	 * then move any unused blocks back to the free pool. Modify the resblks
379 	 * counters directly since we shouldn't have any problems unreserving
380 	 * space.
381 	 */
382 	if (mp->m_resblks > request) {
383 		lcounter = mp->m_resblks_avail - request;
384 		if (lcounter > 0) {		/* release unused blocks */
385 			fdblks_delta = lcounter;
386 			mp->m_resblks_avail -= lcounter;
387 		}
388 		mp->m_resblks = request;
389 		if (fdblks_delta) {
390 			spin_unlock(&mp->m_sb_lock);
391 			xfs_add_fdblocks(mp, fdblks_delta);
392 			spin_lock(&mp->m_sb_lock);
393 		}
394 
395 		goto out;
396 	}
397 
398 	/*
399 	 * If the request is larger than the current reservation, reserve the
400 	 * blocks before we update the reserve counters. Sample m_fdblocks and
401 	 * perform a partial reservation if the request exceeds free space.
402 	 *
403 	 * The code below estimates how many blocks it can request from
404 	 * fdblocks to stash in the reserve pool.  This is a classic TOCTOU
405 	 * race since fdblocks updates are not always coordinated via
406 	 * m_sb_lock.  Set the reserve size even if there's not enough free
407 	 * space to fill it because mod_fdblocks will refill an undersized
408 	 * reserve when it can.
409 	 */
410 	free = percpu_counter_sum(&mp->m_fdblocks) -
411 						xfs_fdblocks_unavailable(mp);
412 	delta = request - mp->m_resblks;
413 	mp->m_resblks = request;
414 	if (delta > 0 && free > 0) {
415 		/*
416 		 * We'll either succeed in getting space from the free block
417 		 * count or we'll get an ENOSPC.  Don't set the reserved flag
418 		 * here - we don't want to reserve the extra reserve blocks
419 		 * from the reserve.
420 		 *
421 		 * The desired reserve size can change after we drop the lock.
422 		 * Use mod_fdblocks to put the space into the reserve or into
423 		 * fdblocks as appropriate.
424 		 */
425 		fdblks_delta = min(free, delta);
426 		spin_unlock(&mp->m_sb_lock);
427 		error = xfs_dec_fdblocks(mp, fdblks_delta, 0);
428 		if (!error)
429 			xfs_add_fdblocks(mp, fdblks_delta);
430 		spin_lock(&mp->m_sb_lock);
431 	}
432 out:
433 	spin_unlock(&mp->m_sb_lock);
434 	return error;
435 }
436 
437 int
xfs_fs_goingdown(xfs_mount_t * mp,uint32_t inflags)438 xfs_fs_goingdown(
439 	xfs_mount_t	*mp,
440 	uint32_t	inflags)
441 {
442 	switch (inflags) {
443 	case XFS_FSOP_GOING_FLAGS_DEFAULT: {
444 		if (!bdev_freeze(mp->m_super->s_bdev)) {
445 			xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
446 			bdev_thaw(mp->m_super->s_bdev);
447 		}
448 		break;
449 	}
450 	case XFS_FSOP_GOING_FLAGS_LOGFLUSH:
451 		xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
452 		break;
453 	case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH:
454 		xfs_force_shutdown(mp,
455 				SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR);
456 		break;
457 	default:
458 		return -EINVAL;
459 	}
460 
461 	return 0;
462 }
463 
464 /*
465  * Force a shutdown of the filesystem instantly while keeping the filesystem
466  * consistent. We don't do an unmount here; just shutdown the shop, make sure
467  * that absolutely nothing persistent happens to this filesystem after this
468  * point.
469  *
470  * The shutdown state change is atomic, resulting in the first and only the
471  * first shutdown call processing the shutdown. This means we only shutdown the
472  * log once as it requires, and we don't spam the logs when multiple concurrent
473  * shutdowns race to set the shutdown flags.
474  */
475 void
xfs_do_force_shutdown(struct xfs_mount * mp,uint32_t flags,char * fname,int lnnum)476 xfs_do_force_shutdown(
477 	struct xfs_mount *mp,
478 	uint32_t	flags,
479 	char		*fname,
480 	int		lnnum)
481 {
482 	int		tag;
483 	const char	*why;
484 
485 
486 	if (xfs_set_shutdown(mp)) {
487 		xlog_shutdown_wait(mp->m_log);
488 		return;
489 	}
490 	if (mp->m_sb_bp)
491 		mp->m_sb_bp->b_flags |= XBF_DONE;
492 
493 	if (flags & SHUTDOWN_FORCE_UMOUNT)
494 		xfs_alert(mp, "User initiated shutdown received.");
495 
496 	if (xlog_force_shutdown(mp->m_log, flags)) {
497 		tag = XFS_PTAG_SHUTDOWN_LOGERROR;
498 		why = "Log I/O Error";
499 	} else if (flags & SHUTDOWN_CORRUPT_INCORE) {
500 		tag = XFS_PTAG_SHUTDOWN_CORRUPT;
501 		why = "Corruption of in-memory data";
502 	} else if (flags & SHUTDOWN_CORRUPT_ONDISK) {
503 		tag = XFS_PTAG_SHUTDOWN_CORRUPT;
504 		why = "Corruption of on-disk metadata";
505 	} else if (flags & SHUTDOWN_DEVICE_REMOVED) {
506 		tag = XFS_PTAG_SHUTDOWN_IOERROR;
507 		why = "Block device removal";
508 	} else {
509 		tag = XFS_PTAG_SHUTDOWN_IOERROR;
510 		why = "Metadata I/O Error";
511 	}
512 
513 	trace_xfs_force_shutdown(mp, tag, flags, fname, lnnum);
514 
515 	xfs_alert_tag(mp, tag,
516 "%s (0x%x) detected at %pS (%s:%d).  Shutting down filesystem.",
517 			why, flags, __return_address, fname, lnnum);
518 	xfs_alert(mp,
519 		"Please unmount the filesystem and rectify the problem(s)");
520 	if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
521 		xfs_stack_trace();
522 }
523 
524 /*
525  * Reserve free space for per-AG metadata.
526  */
527 int
xfs_fs_reserve_ag_blocks(struct xfs_mount * mp)528 xfs_fs_reserve_ag_blocks(
529 	struct xfs_mount	*mp)
530 {
531 	xfs_agnumber_t		agno;
532 	struct xfs_perag	*pag;
533 	int			error = 0;
534 	int			err2;
535 
536 	mp->m_finobt_nores = false;
537 	for_each_perag(mp, agno, pag) {
538 		err2 = xfs_ag_resv_init(pag, NULL);
539 		if (err2 && !error)
540 			error = err2;
541 	}
542 
543 	if (error && error != -ENOSPC) {
544 		xfs_warn(mp,
545 	"Error %d reserving per-AG metadata reserve pool.", error);
546 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
547 	}
548 
549 	return error;
550 }
551 
552 /*
553  * Free space reserved for per-AG metadata.
554  */
555 void
xfs_fs_unreserve_ag_blocks(struct xfs_mount * mp)556 xfs_fs_unreserve_ag_blocks(
557 	struct xfs_mount	*mp)
558 {
559 	xfs_agnumber_t		agno;
560 	struct xfs_perag	*pag;
561 
562 	for_each_perag(mp, agno, pag)
563 		xfs_ag_resv_free(pag);
564 }
565