1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2006-2007 Silicon Graphics, Inc. 4 * Copyright (c) 2014 Christoph Hellwig. 5 * All Rights Reserved. 6 */ 7 #include "xfs.h" 8 #include "xfs_format.h" 9 #include "xfs_log_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_sb.h" 12 #include "xfs_mount.h" 13 #include "xfs_defer.h" 14 #include "xfs_inode.h" 15 #include "xfs_bmap.h" 16 #include "xfs_bmap_util.h" 17 #include "xfs_alloc.h" 18 #include "xfs_mru_cache.h" 19 #include "xfs_filestream.h" 20 #include "xfs_trace.h" 21 #include "xfs_ag_resv.h" 22 23 struct xfs_fstrm_item { 24 struct xfs_mru_cache_elem mru; 25 xfs_agnumber_t ag; /* AG in use for this directory */ 26 }; 27 28 enum xfs_fstrm_alloc { 29 XFS_PICK_USERDATA = 1, 30 XFS_PICK_LOWSPACE = 2, 31 }; 32 33 /* 34 * Allocation group filestream associations are tracked with per-ag atomic 35 * counters. These counters allow xfs_filestream_pick_ag() to tell whether a 36 * particular AG already has active filestreams associated with it. The mount 37 * point's m_peraglock is used to protect these counters from per-ag array 38 * re-allocation during a growfs operation. When xfs_growfs_data_private() is 39 * about to reallocate the array, it calls xfs_filestream_flush() with the 40 * m_peraglock held in write mode. 41 * 42 * Since xfs_mru_cache_flush() guarantees that all the free functions for all 43 * the cache elements have finished executing before it returns, it's safe for 44 * the free functions to use the atomic counters without m_peraglock protection. 45 * This allows the implementation of xfs_fstrm_free_func() to be agnostic about 46 * whether it was called with the m_peraglock held in read mode, write mode or 47 * not held at all. The race condition this addresses is the following: 48 * 49 * - The work queue scheduler fires and pulls a filestream directory cache 50 * element off the LRU end of the cache for deletion, then gets pre-empted. 51 * - A growfs operation grabs the m_peraglock in write mode, flushes all the 52 * remaining items from the cache and reallocates the mount point's per-ag 53 * array, resetting all the counters to zero. 54 * - The work queue thread resumes and calls the free function for the element 55 * it started cleaning up earlier. In the process it decrements the 56 * filestreams counter for an AG that now has no references. 57 * 58 * With a shrinkfs feature, the above scenario could panic the system. 59 * 60 * All other uses of the following macros should be protected by either the 61 * m_peraglock held in read mode, or the cache's internal locking exposed by the 62 * interval between a call to xfs_mru_cache_lookup() and a call to 63 * xfs_mru_cache_done(). In addition, the m_peraglock must be held in read mode 64 * when new elements are added to the cache. 65 * 66 * Combined, these locking rules ensure that no associations will ever exist in 67 * the cache that reference per-ag array elements that have since been 68 * reallocated. 69 */ 70 int 71 xfs_filestream_peek_ag( 72 xfs_mount_t *mp, 73 xfs_agnumber_t agno) 74 { 75 struct xfs_perag *pag; 76 int ret; 77 78 pag = xfs_perag_get(mp, agno); 79 ret = atomic_read(&pag->pagf_fstrms); 80 xfs_perag_put(pag); 81 return ret; 82 } 83 84 static int 85 xfs_filestream_get_ag( 86 xfs_mount_t *mp, 87 xfs_agnumber_t agno) 88 { 89 struct xfs_perag *pag; 90 int ret; 91 92 pag = xfs_perag_get(mp, agno); 93 ret = atomic_inc_return(&pag->pagf_fstrms); 94 xfs_perag_put(pag); 95 return ret; 96 } 97 98 static void 99 xfs_filestream_put_ag( 100 xfs_mount_t *mp, 101 xfs_agnumber_t agno) 102 { 103 struct xfs_perag *pag; 104 105 pag = xfs_perag_get(mp, agno); 106 atomic_dec(&pag->pagf_fstrms); 107 xfs_perag_put(pag); 108 } 109 110 static void 111 xfs_fstrm_free_func( 112 void *data, 113 struct xfs_mru_cache_elem *mru) 114 { 115 struct xfs_mount *mp = data; 116 struct xfs_fstrm_item *item = 117 container_of(mru, struct xfs_fstrm_item, mru); 118 119 xfs_filestream_put_ag(mp, item->ag); 120 trace_xfs_filestream_free(mp, mru->key, item->ag); 121 122 kmem_free(item); 123 } 124 125 /* 126 * Scan the AGs starting at startag looking for an AG that isn't in use and has 127 * at least minlen blocks free. 128 */ 129 static int 130 xfs_filestream_pick_ag( 131 struct xfs_inode *ip, 132 xfs_agnumber_t startag, 133 xfs_agnumber_t *agp, 134 int flags, 135 xfs_extlen_t minlen) 136 { 137 struct xfs_mount *mp = ip->i_mount; 138 struct xfs_fstrm_item *item; 139 struct xfs_perag *pag; 140 xfs_extlen_t longest, free = 0, minfree, maxfree = 0; 141 xfs_agnumber_t ag, max_ag = NULLAGNUMBER; 142 int err, trylock, nscan; 143 144 ASSERT(S_ISDIR(VFS_I(ip)->i_mode)); 145 146 /* 2% of an AG's blocks must be free for it to be chosen. */ 147 minfree = mp->m_sb.sb_agblocks / 50; 148 149 ag = startag; 150 *agp = NULLAGNUMBER; 151 152 /* For the first pass, don't sleep trying to init the per-AG. */ 153 trylock = XFS_ALLOC_FLAG_TRYLOCK; 154 155 for (nscan = 0; 1; nscan++) { 156 trace_xfs_filestream_scan(mp, ip->i_ino, ag); 157 158 pag = xfs_perag_get(mp, ag); 159 160 if (!pag->pagf_init) { 161 err = xfs_alloc_pagf_init(mp, NULL, ag, trylock); 162 if (err && !trylock) { 163 xfs_perag_put(pag); 164 return err; 165 } 166 } 167 168 /* Might fail sometimes during the 1st pass with trylock set. */ 169 if (!pag->pagf_init) 170 goto next_ag; 171 172 /* Keep track of the AG with the most free blocks. */ 173 if (pag->pagf_freeblks > maxfree) { 174 maxfree = pag->pagf_freeblks; 175 max_ag = ag; 176 } 177 178 /* 179 * The AG reference count does two things: it enforces mutual 180 * exclusion when examining the suitability of an AG in this 181 * loop, and it guards against two filestreams being established 182 * in the same AG as each other. 183 */ 184 if (xfs_filestream_get_ag(mp, ag) > 1) { 185 xfs_filestream_put_ag(mp, ag); 186 goto next_ag; 187 } 188 189 longest = xfs_alloc_longest_free_extent(pag, 190 xfs_alloc_min_freelist(mp, pag), 191 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE)); 192 if (((minlen && longest >= minlen) || 193 (!minlen && pag->pagf_freeblks >= minfree)) && 194 (!pag->pagf_metadata || !(flags & XFS_PICK_USERDATA) || 195 (flags & XFS_PICK_LOWSPACE))) { 196 197 /* Break out, retaining the reference on the AG. */ 198 free = pag->pagf_freeblks; 199 xfs_perag_put(pag); 200 *agp = ag; 201 break; 202 } 203 204 /* Drop the reference on this AG, it's not usable. */ 205 xfs_filestream_put_ag(mp, ag); 206 next_ag: 207 xfs_perag_put(pag); 208 /* Move to the next AG, wrapping to AG 0 if necessary. */ 209 if (++ag >= mp->m_sb.sb_agcount) 210 ag = 0; 211 212 /* If a full pass of the AGs hasn't been done yet, continue. */ 213 if (ag != startag) 214 continue; 215 216 /* Allow sleeping in xfs_alloc_pagf_init() on the 2nd pass. */ 217 if (trylock != 0) { 218 trylock = 0; 219 continue; 220 } 221 222 /* Finally, if lowspace wasn't set, set it for the 3rd pass. */ 223 if (!(flags & XFS_PICK_LOWSPACE)) { 224 flags |= XFS_PICK_LOWSPACE; 225 continue; 226 } 227 228 /* 229 * Take the AG with the most free space, regardless of whether 230 * it's already in use by another filestream. 231 */ 232 if (max_ag != NULLAGNUMBER) { 233 xfs_filestream_get_ag(mp, max_ag); 234 free = maxfree; 235 *agp = max_ag; 236 break; 237 } 238 239 /* take AG 0 if none matched */ 240 trace_xfs_filestream_pick(ip, *agp, free, nscan); 241 *agp = 0; 242 return 0; 243 } 244 245 trace_xfs_filestream_pick(ip, *agp, free, nscan); 246 247 if (*agp == NULLAGNUMBER) 248 return 0; 249 250 err = -ENOMEM; 251 item = kmem_alloc(sizeof(*item), KM_MAYFAIL); 252 if (!item) 253 goto out_put_ag; 254 255 item->ag = *agp; 256 257 err = xfs_mru_cache_insert(mp->m_filestream, ip->i_ino, &item->mru); 258 if (err) { 259 if (err == -EEXIST) 260 err = 0; 261 goto out_free_item; 262 } 263 264 return 0; 265 266 out_free_item: 267 kmem_free(item); 268 out_put_ag: 269 xfs_filestream_put_ag(mp, *agp); 270 return err; 271 } 272 273 static struct xfs_inode * 274 xfs_filestream_get_parent( 275 struct xfs_inode *ip) 276 { 277 struct inode *inode = VFS_I(ip), *dir = NULL; 278 struct dentry *dentry, *parent; 279 280 dentry = d_find_alias(inode); 281 if (!dentry) 282 goto out; 283 284 parent = dget_parent(dentry); 285 if (!parent) 286 goto out_dput; 287 288 dir = igrab(d_inode(parent)); 289 dput(parent); 290 291 out_dput: 292 dput(dentry); 293 out: 294 return dir ? XFS_I(dir) : NULL; 295 } 296 297 /* 298 * Find the right allocation group for a file, either by finding an 299 * existing file stream or creating a new one. 300 * 301 * Returns NULLAGNUMBER in case of an error. 302 */ 303 xfs_agnumber_t 304 xfs_filestream_lookup_ag( 305 struct xfs_inode *ip) 306 { 307 struct xfs_mount *mp = ip->i_mount; 308 struct xfs_inode *pip = NULL; 309 xfs_agnumber_t startag, ag = NULLAGNUMBER; 310 struct xfs_mru_cache_elem *mru; 311 312 ASSERT(S_ISREG(VFS_I(ip)->i_mode)); 313 314 pip = xfs_filestream_get_parent(ip); 315 if (!pip) 316 return NULLAGNUMBER; 317 318 mru = xfs_mru_cache_lookup(mp->m_filestream, pip->i_ino); 319 if (mru) { 320 ag = container_of(mru, struct xfs_fstrm_item, mru)->ag; 321 xfs_mru_cache_done(mp->m_filestream); 322 323 trace_xfs_filestream_lookup(mp, ip->i_ino, ag); 324 goto out; 325 } 326 327 /* 328 * Set the starting AG using the rotor for inode32, otherwise 329 * use the directory inode's AG. 330 */ 331 if (mp->m_flags & XFS_MOUNT_32BITINODES) { 332 xfs_agnumber_t rotorstep = xfs_rotorstep; 333 startag = (mp->m_agfrotor / rotorstep) % mp->m_sb.sb_agcount; 334 mp->m_agfrotor = (mp->m_agfrotor + 1) % 335 (mp->m_sb.sb_agcount * rotorstep); 336 } else 337 startag = XFS_INO_TO_AGNO(mp, pip->i_ino); 338 339 if (xfs_filestream_pick_ag(pip, startag, &ag, 0, 0)) 340 ag = NULLAGNUMBER; 341 out: 342 IRELE(pip); 343 return ag; 344 } 345 346 /* 347 * Pick a new allocation group for the current file and its file stream. 348 * 349 * This is called when the allocator can't find a suitable extent in the 350 * current AG, and we have to move the stream into a new AG with more space. 351 */ 352 int 353 xfs_filestream_new_ag( 354 struct xfs_bmalloca *ap, 355 xfs_agnumber_t *agp) 356 { 357 struct xfs_inode *ip = ap->ip, *pip; 358 struct xfs_mount *mp = ip->i_mount; 359 xfs_extlen_t minlen = ap->length; 360 xfs_agnumber_t startag = 0; 361 int flags = 0; 362 int err = 0; 363 struct xfs_mru_cache_elem *mru; 364 365 *agp = NULLAGNUMBER; 366 367 pip = xfs_filestream_get_parent(ip); 368 if (!pip) 369 goto exit; 370 371 mru = xfs_mru_cache_remove(mp->m_filestream, pip->i_ino); 372 if (mru) { 373 struct xfs_fstrm_item *item = 374 container_of(mru, struct xfs_fstrm_item, mru); 375 startag = (item->ag + 1) % mp->m_sb.sb_agcount; 376 } 377 378 if (xfs_alloc_is_userdata(ap->datatype)) 379 flags |= XFS_PICK_USERDATA; 380 if (ap->dfops->dop_low) 381 flags |= XFS_PICK_LOWSPACE; 382 383 err = xfs_filestream_pick_ag(pip, startag, agp, flags, minlen); 384 385 /* 386 * Only free the item here so we skip over the old AG earlier. 387 */ 388 if (mru) 389 xfs_fstrm_free_func(mp, mru); 390 391 IRELE(pip); 392 exit: 393 if (*agp == NULLAGNUMBER) 394 *agp = 0; 395 return err; 396 } 397 398 void 399 xfs_filestream_deassociate( 400 struct xfs_inode *ip) 401 { 402 xfs_mru_cache_delete(ip->i_mount->m_filestream, ip->i_ino); 403 } 404 405 int 406 xfs_filestream_mount( 407 xfs_mount_t *mp) 408 { 409 /* 410 * The filestream timer tunable is currently fixed within the range of 411 * one second to four minutes, with five seconds being the default. The 412 * group count is somewhat arbitrary, but it'd be nice to adhere to the 413 * timer tunable to within about 10 percent. This requires at least 10 414 * groups. 415 */ 416 return xfs_mru_cache_create(&mp->m_filestream, mp, 417 xfs_fstrm_centisecs * 10, 10, xfs_fstrm_free_func); 418 } 419 420 void 421 xfs_filestream_unmount( 422 xfs_mount_t *mp) 423 { 424 xfs_mru_cache_destroy(mp->m_filestream); 425 } 426