xref: /linux/fs/xfs/xfs_attr_inactive.c (revision 722ecdbce68a87de2d9296f91308f44ea900a039)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * Copyright (c) 2013 Red Hat, Inc.
5  * All Rights Reserved.
6  */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_bit.h"
14 #include "xfs_mount.h"
15 #include "xfs_da_format.h"
16 #include "xfs_da_btree.h"
17 #include "xfs_inode.h"
18 #include "xfs_attr.h"
19 #include "xfs_attr_remote.h"
20 #include "xfs_trans.h"
21 #include "xfs_bmap.h"
22 #include "xfs_attr_leaf.h"
23 #include "xfs_quota.h"
24 #include "xfs_dir2.h"
25 #include "xfs_error.h"
26 
27 /*
28  * Invalidate any incore buffers associated with this remote attribute value
29  * extent.   We never log remote attribute value buffers, which means that they
30  * won't be attached to a transaction and are therefore safe to mark stale.
31  * The actual bunmapi will be taken care of later.
32  */
33 STATIC int
34 xfs_attr3_rmt_stale(
35 	struct xfs_inode	*dp,
36 	xfs_dablk_t		blkno,
37 	int			blkcnt)
38 {
39 	struct xfs_bmbt_irec	map;
40 	int			nmap;
41 	int			error;
42 
43 	/*
44 	 * Roll through the "value", invalidating the attribute value's
45 	 * blocks.
46 	 */
47 	while (blkcnt > 0) {
48 		/*
49 		 * Try to remember where we decided to put the value.
50 		 */
51 		nmap = 1;
52 		error = xfs_bmapi_read(dp, (xfs_fileoff_t)blkno, blkcnt,
53 				       &map, &nmap, XFS_BMAPI_ATTRFORK);
54 		if (error)
55 			return error;
56 		if (XFS_IS_CORRUPT(dp->i_mount, nmap != 1))
57 			return -EFSCORRUPTED;
58 
59 		/*
60 		 * Mark any incore buffers for the remote value as stale.  We
61 		 * never log remote attr value buffers, so the buffer should be
62 		 * easy to kill.
63 		 */
64 		error = xfs_attr_rmtval_stale(dp, &map, 0);
65 		if (error)
66 			return error;
67 
68 		blkno += map.br_blockcount;
69 		blkcnt -= map.br_blockcount;
70 	}
71 
72 	return 0;
73 }
74 
75 /*
76  * Invalidate all of the "remote" value regions pointed to by a particular
77  * leaf block.
78  * Note that we must release the lock on the buffer so that we are not
79  * caught holding something that the logging code wants to flush to disk.
80  */
81 STATIC int
82 xfs_attr3_leaf_inactive(
83 	struct xfs_trans		**trans,
84 	struct xfs_inode		*dp,
85 	struct xfs_buf			*bp)
86 {
87 	struct xfs_attr3_icleaf_hdr	ichdr;
88 	struct xfs_mount		*mp = bp->b_mount;
89 	struct xfs_attr_leafblock	*leaf = bp->b_addr;
90 	struct xfs_attr_leaf_entry	*entry;
91 	struct xfs_attr_leaf_name_remote *name_rmt;
92 	int				error = 0;
93 	int				i;
94 
95 	xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
96 
97 	/*
98 	 * Find the remote value extents for this leaf and invalidate their
99 	 * incore buffers.
100 	 */
101 	entry = xfs_attr3_leaf_entryp(leaf);
102 	for (i = 0; i < ichdr.count; entry++, i++) {
103 		int		blkcnt;
104 
105 		if (!entry->nameidx || (entry->flags & XFS_ATTR_LOCAL))
106 			continue;
107 
108 		name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
109 		if (!name_rmt->valueblk)
110 			continue;
111 
112 		blkcnt = xfs_attr3_rmt_blocks(dp->i_mount,
113 				be32_to_cpu(name_rmt->valuelen));
114 		error = xfs_attr3_rmt_stale(dp,
115 				be32_to_cpu(name_rmt->valueblk), blkcnt);
116 		if (error)
117 			goto err;
118 	}
119 
120 	xfs_trans_brelse(*trans, bp);
121 err:
122 	return error;
123 }
124 
125 /*
126  * Recurse (gasp!) through the attribute nodes until we find leaves.
127  * We're doing a depth-first traversal in order to invalidate everything.
128  */
129 STATIC int
130 xfs_attr3_node_inactive(
131 	struct xfs_trans	**trans,
132 	struct xfs_inode	*dp,
133 	struct xfs_buf		*bp,
134 	int			level)
135 {
136 	struct xfs_mount	*mp = dp->i_mount;
137 	struct xfs_da_blkinfo	*info;
138 	xfs_dablk_t		child_fsb;
139 	xfs_daddr_t		parent_blkno, child_blkno;
140 	struct xfs_buf		*child_bp;
141 	struct xfs_da3_icnode_hdr ichdr;
142 	int			error, i;
143 
144 	/*
145 	 * Since this code is recursive (gasp!) we must protect ourselves.
146 	 */
147 	if (level > XFS_DA_NODE_MAXDEPTH) {
148 		xfs_buf_mark_corrupt(bp);
149 		xfs_trans_brelse(*trans, bp);	/* no locks for later trans */
150 		return -EFSCORRUPTED;
151 	}
152 
153 	xfs_da3_node_hdr_from_disk(dp->i_mount, &ichdr, bp->b_addr);
154 	parent_blkno = xfs_buf_daddr(bp);
155 	if (!ichdr.count) {
156 		xfs_trans_brelse(*trans, bp);
157 		return 0;
158 	}
159 	child_fsb = be32_to_cpu(ichdr.btree[0].before);
160 	xfs_trans_brelse(*trans, bp);	/* no locks for later trans */
161 
162 	/*
163 	 * If this is the node level just above the leaves, simply loop
164 	 * over the leaves removing all of them.  If this is higher up
165 	 * in the tree, recurse downward.
166 	 */
167 	for (i = 0; i < ichdr.count; i++) {
168 		/*
169 		 * Read the subsidiary block to see what we have to work with.
170 		 * Don't do this in a transaction.  This is a depth-first
171 		 * traversal of the tree so we may deal with many blocks
172 		 * before we come back to this one.
173 		 */
174 		error = xfs_da3_node_read(*trans, dp, child_fsb, &child_bp,
175 					  XFS_ATTR_FORK);
176 		if (error)
177 			return error;
178 
179 		/* save for re-read later */
180 		child_blkno = xfs_buf_daddr(child_bp);
181 
182 		/*
183 		 * Invalidate the subtree, however we have to.
184 		 */
185 		info = child_bp->b_addr;
186 		switch (info->magic) {
187 		case cpu_to_be16(XFS_DA_NODE_MAGIC):
188 		case cpu_to_be16(XFS_DA3_NODE_MAGIC):
189 			error = xfs_attr3_node_inactive(trans, dp, child_bp,
190 							level + 1);
191 			break;
192 		case cpu_to_be16(XFS_ATTR_LEAF_MAGIC):
193 		case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC):
194 			error = xfs_attr3_leaf_inactive(trans, dp, child_bp);
195 			break;
196 		default:
197 			xfs_buf_mark_corrupt(child_bp);
198 			xfs_trans_brelse(*trans, child_bp);
199 			error = -EFSCORRUPTED;
200 			break;
201 		}
202 		if (error)
203 			return error;
204 
205 		/*
206 		 * Remove the subsidiary block from the cache and from the log.
207 		 */
208 		error = xfs_trans_get_buf(*trans, mp->m_ddev_targp,
209 				child_blkno,
210 				XFS_FSB_TO_BB(mp, mp->m_attr_geo->fsbcount), 0,
211 				&child_bp);
212 		if (error)
213 			return error;
214 		error = bp->b_error;
215 		if (error) {
216 			xfs_trans_brelse(*trans, child_bp);
217 			return error;
218 		}
219 		xfs_trans_binval(*trans, child_bp);
220 
221 		/*
222 		 * If we're not done, re-read the parent to get the next
223 		 * child block number.
224 		 */
225 		if (i + 1 < ichdr.count) {
226 			struct xfs_da3_icnode_hdr phdr;
227 
228 			error = xfs_da3_node_read_mapped(*trans, dp,
229 					parent_blkno, &bp, XFS_ATTR_FORK);
230 			if (error)
231 				return error;
232 			xfs_da3_node_hdr_from_disk(dp->i_mount, &phdr,
233 						  bp->b_addr);
234 			child_fsb = be32_to_cpu(phdr.btree[i + 1].before);
235 			xfs_trans_brelse(*trans, bp);
236 		}
237 		/*
238 		 * Atomically commit the whole invalidate stuff.
239 		 */
240 		error = xfs_trans_roll_inode(trans, dp);
241 		if (error)
242 			return  error;
243 	}
244 
245 	return 0;
246 }
247 
248 /*
249  * Indiscriminately delete the entire attribute fork
250  *
251  * Recurse (gasp!) through the attribute nodes until we find leaves.
252  * We're doing a depth-first traversal in order to invalidate everything.
253  */
254 static int
255 xfs_attr3_root_inactive(
256 	struct xfs_trans	**trans,
257 	struct xfs_inode	*dp)
258 {
259 	struct xfs_mount	*mp = dp->i_mount;
260 	struct xfs_da_blkinfo	*info;
261 	struct xfs_buf		*bp;
262 	xfs_daddr_t		blkno;
263 	int			error;
264 
265 	/*
266 	 * Read block 0 to see what we have to work with.
267 	 * We only get here if we have extents, since we remove
268 	 * the extents in reverse order the extent containing
269 	 * block 0 must still be there.
270 	 */
271 	error = xfs_da3_node_read(*trans, dp, 0, &bp, XFS_ATTR_FORK);
272 	if (error)
273 		return error;
274 	blkno = xfs_buf_daddr(bp);
275 
276 	/*
277 	 * Invalidate the tree, even if the "tree" is only a single leaf block.
278 	 * This is a depth-first traversal!
279 	 */
280 	info = bp->b_addr;
281 	switch (info->magic) {
282 	case cpu_to_be16(XFS_DA_NODE_MAGIC):
283 	case cpu_to_be16(XFS_DA3_NODE_MAGIC):
284 		error = xfs_attr3_node_inactive(trans, dp, bp, 1);
285 		break;
286 	case cpu_to_be16(XFS_ATTR_LEAF_MAGIC):
287 	case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC):
288 		error = xfs_attr3_leaf_inactive(trans, dp, bp);
289 		break;
290 	default:
291 		error = -EFSCORRUPTED;
292 		xfs_buf_mark_corrupt(bp);
293 		xfs_trans_brelse(*trans, bp);
294 		break;
295 	}
296 	if (error)
297 		return error;
298 
299 	/*
300 	 * Invalidate the incore copy of the root block.
301 	 */
302 	error = xfs_trans_get_buf(*trans, mp->m_ddev_targp, blkno,
303 			XFS_FSB_TO_BB(mp, mp->m_attr_geo->fsbcount), 0, &bp);
304 	if (error)
305 		return error;
306 	error = bp->b_error;
307 	if (error) {
308 		xfs_trans_brelse(*trans, bp);
309 		return error;
310 	}
311 	xfs_trans_binval(*trans, bp);	/* remove from cache */
312 	/*
313 	 * Commit the invalidate and start the next transaction.
314 	 */
315 	error = xfs_trans_roll_inode(trans, dp);
316 
317 	return error;
318 }
319 
320 /*
321  * xfs_attr_inactive kills all traces of an attribute fork on an inode. It
322  * removes both the on-disk and in-memory inode fork. Note that this also has to
323  * handle the condition of inodes without attributes but with an attribute fork
324  * configured, so we can't use xfs_inode_hasattr() here.
325  *
326  * The in-memory attribute fork is removed even on error.
327  */
328 int
329 xfs_attr_inactive(
330 	struct xfs_inode	*dp)
331 {
332 	struct xfs_trans	*trans;
333 	struct xfs_mount	*mp;
334 	int			lock_mode = XFS_ILOCK_SHARED;
335 	int			error = 0;
336 
337 	mp = dp->i_mount;
338 	ASSERT(! XFS_NOT_DQATTACHED(mp, dp));
339 
340 	xfs_ilock(dp, lock_mode);
341 	if (!XFS_IFORK_Q(dp))
342 		goto out_destroy_fork;
343 	xfs_iunlock(dp, lock_mode);
344 
345 	lock_mode = 0;
346 
347 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_attrinval, 0, 0, 0, &trans);
348 	if (error)
349 		goto out_destroy_fork;
350 
351 	lock_mode = XFS_ILOCK_EXCL;
352 	xfs_ilock(dp, lock_mode);
353 
354 	if (!XFS_IFORK_Q(dp))
355 		goto out_cancel;
356 
357 	/*
358 	 * No need to make quota reservations here. We expect to release some
359 	 * blocks, not allocate, in the common case.
360 	 */
361 	xfs_trans_ijoin(trans, dp, 0);
362 
363 	/*
364 	 * Invalidate and truncate the attribute fork extents. Make sure the
365 	 * fork actually has attributes as otherwise the invalidation has no
366 	 * blocks to read and returns an error. In this case, just do the fork
367 	 * removal below.
368 	 */
369 	if (xfs_inode_hasattr(dp) &&
370 	    dp->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
371 		error = xfs_attr3_root_inactive(&trans, dp);
372 		if (error)
373 			goto out_cancel;
374 
375 		error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0);
376 		if (error)
377 			goto out_cancel;
378 	}
379 
380 	/* Reset the attribute fork - this also destroys the in-core fork */
381 	xfs_attr_fork_remove(dp, trans);
382 
383 	error = xfs_trans_commit(trans);
384 	xfs_iunlock(dp, lock_mode);
385 	return error;
386 
387 out_cancel:
388 	xfs_trans_cancel(trans);
389 out_destroy_fork:
390 	/* kill the in-core attr fork before we drop the inode lock */
391 	if (dp->i_afp) {
392 		xfs_idestroy_fork(dp->i_afp);
393 		kmem_cache_free(xfs_ifork_cache, dp->i_afp);
394 		dp->i_afp = NULL;
395 	}
396 	if (lock_mode)
397 		xfs_iunlock(dp, lock_mode);
398 	return error;
399 }
400