1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 19 #include "xfs.h" 20 #include "xfs_bit.h" 21 #include "xfs_log.h" 22 #include "xfs_inum.h" 23 #include "xfs_trans.h" 24 #include "xfs_sb.h" 25 #include "xfs_ag.h" 26 #include "xfs_dir2.h" 27 #include "xfs_alloc.h" 28 #include "xfs_quota.h" 29 #include "xfs_mount.h" 30 #include "xfs_bmap_btree.h" 31 #include "xfs_alloc_btree.h" 32 #include "xfs_ialloc_btree.h" 33 #include "xfs_dinode.h" 34 #include "xfs_inode.h" 35 #include "xfs_btree.h" 36 #include "xfs_ialloc.h" 37 #include "xfs_bmap.h" 38 #include "xfs_rtalloc.h" 39 #include "xfs_error.h" 40 #include "xfs_itable.h" 41 #include "xfs_fsops.h" 42 #include "xfs_attr.h" 43 #include "xfs_buf_item.h" 44 #include "xfs_utils.h" 45 #include "xfs_vnodeops.h" 46 #include "xfs_log_priv.h" 47 #include "xfs_trans_priv.h" 48 #include "xfs_filestream.h" 49 #include "xfs_da_btree.h" 50 #include "xfs_extfree_item.h" 51 #include "xfs_mru_cache.h" 52 #include "xfs_inode_item.h" 53 #include "xfs_sync.h" 54 #include "xfs_trace.h" 55 56 #include <linux/namei.h> 57 #include <linux/init.h> 58 #include <linux/slab.h> 59 #include <linux/mount.h> 60 #include <linux/mempool.h> 61 #include <linux/writeback.h> 62 #include <linux/kthread.h> 63 #include <linux/freezer.h> 64 #include <linux/parser.h> 65 66 static const struct super_operations xfs_super_operations; 67 static kmem_zone_t *xfs_ioend_zone; 68 mempool_t *xfs_ioend_pool; 69 70 #define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */ 71 #define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */ 72 #define MNTOPT_LOGDEV "logdev" /* log device */ 73 #define MNTOPT_RTDEV "rtdev" /* realtime I/O device */ 74 #define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */ 75 #define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */ 76 #define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */ 77 #define MNTOPT_SWALLOC "swalloc" /* turn on stripe width allocation */ 78 #define MNTOPT_SUNIT "sunit" /* data volume stripe unit */ 79 #define MNTOPT_SWIDTH "swidth" /* data volume stripe width */ 80 #define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */ 81 #define MNTOPT_MTPT "mtpt" /* filesystem mount point */ 82 #define MNTOPT_GRPID "grpid" /* group-ID from parent directory */ 83 #define MNTOPT_NOGRPID "nogrpid" /* group-ID from current process */ 84 #define MNTOPT_BSDGROUPS "bsdgroups" /* group-ID from parent directory */ 85 #define MNTOPT_SYSVGROUPS "sysvgroups" /* group-ID from current process */ 86 #define MNTOPT_ALLOCSIZE "allocsize" /* preferred allocation size */ 87 #define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */ 88 #define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and 89 * unwritten extent conversion */ 90 #define MNTOPT_NOBARRIER "nobarrier" /* .. disable */ 91 #define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */ 92 #define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */ 93 #define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */ 94 #define MNTOPT_LARGEIO "largeio" /* report large I/O sizes in stat() */ 95 #define MNTOPT_NOLARGEIO "nolargeio" /* do not report large I/O sizes 96 * in stat(). */ 97 #define MNTOPT_ATTR2 "attr2" /* do use attr2 attribute format */ 98 #define MNTOPT_NOATTR2 "noattr2" /* do not use attr2 attribute format */ 99 #define MNTOPT_FILESTREAM "filestreams" /* use filestreams allocator */ 100 #define MNTOPT_QUOTA "quota" /* disk quotas (user) */ 101 #define MNTOPT_NOQUOTA "noquota" /* no quotas */ 102 #define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */ 103 #define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */ 104 #define MNTOPT_PRJQUOTA "prjquota" /* project quota enabled */ 105 #define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */ 106 #define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */ 107 #define MNTOPT_PQUOTA "pquota" /* project quota (IRIX variant) */ 108 #define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */ 109 #define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */ 110 #define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */ 111 #define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */ 112 #define MNTOPT_DELAYLOG "delaylog" /* Delayed logging enabled */ 113 #define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed logging disabled */ 114 #define MNTOPT_DISCARD "discard" /* Discard unused blocks */ 115 #define MNTOPT_NODISCARD "nodiscard" /* Do not discard unused blocks */ 116 117 /* 118 * Table driven mount option parser. 119 * 120 * Currently only used for remount, but it will be used for mount 121 * in the future, too. 122 */ 123 enum { 124 Opt_barrier, Opt_nobarrier, Opt_err 125 }; 126 127 static const match_table_t tokens = { 128 {Opt_barrier, "barrier"}, 129 {Opt_nobarrier, "nobarrier"}, 130 {Opt_err, NULL} 131 }; 132 133 134 STATIC unsigned long 135 suffix_strtoul(char *s, char **endp, unsigned int base) 136 { 137 int last, shift_left_factor = 0; 138 char *value = s; 139 140 last = strlen(value) - 1; 141 if (value[last] == 'K' || value[last] == 'k') { 142 shift_left_factor = 10; 143 value[last] = '\0'; 144 } 145 if (value[last] == 'M' || value[last] == 'm') { 146 shift_left_factor = 20; 147 value[last] = '\0'; 148 } 149 if (value[last] == 'G' || value[last] == 'g') { 150 shift_left_factor = 30; 151 value[last] = '\0'; 152 } 153 154 return simple_strtoul((const char *)s, endp, base) << shift_left_factor; 155 } 156 157 /* 158 * This function fills in xfs_mount_t fields based on mount args. 159 * Note: the superblock has _not_ yet been read in. 160 * 161 * Note that this function leaks the various device name allocations on 162 * failure. The caller takes care of them. 163 */ 164 STATIC int 165 xfs_parseargs( 166 struct xfs_mount *mp, 167 char *options) 168 { 169 struct super_block *sb = mp->m_super; 170 char *this_char, *value, *eov; 171 int dsunit = 0; 172 int dswidth = 0; 173 int iosize = 0; 174 __uint8_t iosizelog = 0; 175 176 /* 177 * set up the mount name first so all the errors will refer to the 178 * correct device. 179 */ 180 mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL); 181 if (!mp->m_fsname) 182 return ENOMEM; 183 mp->m_fsname_len = strlen(mp->m_fsname) + 1; 184 185 /* 186 * Copy binary VFS mount flags we are interested in. 187 */ 188 if (sb->s_flags & MS_RDONLY) 189 mp->m_flags |= XFS_MOUNT_RDONLY; 190 if (sb->s_flags & MS_DIRSYNC) 191 mp->m_flags |= XFS_MOUNT_DIRSYNC; 192 if (sb->s_flags & MS_SYNCHRONOUS) 193 mp->m_flags |= XFS_MOUNT_WSYNC; 194 195 /* 196 * Set some default flags that could be cleared by the mount option 197 * parsing. 198 */ 199 mp->m_flags |= XFS_MOUNT_BARRIER; 200 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; 201 mp->m_flags |= XFS_MOUNT_SMALL_INUMS; 202 203 /* 204 * These can be overridden by the mount option parsing. 205 */ 206 mp->m_logbufs = -1; 207 mp->m_logbsize = -1; 208 209 if (!options) 210 goto done; 211 212 while ((this_char = strsep(&options, ",")) != NULL) { 213 if (!*this_char) 214 continue; 215 if ((value = strchr(this_char, '=')) != NULL) 216 *value++ = 0; 217 218 if (!strcmp(this_char, MNTOPT_LOGBUFS)) { 219 if (!value || !*value) { 220 xfs_warn(mp, "%s option requires an argument", 221 this_char); 222 return EINVAL; 223 } 224 mp->m_logbufs = simple_strtoul(value, &eov, 10); 225 } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) { 226 if (!value || !*value) { 227 xfs_warn(mp, "%s option requires an argument", 228 this_char); 229 return EINVAL; 230 } 231 mp->m_logbsize = suffix_strtoul(value, &eov, 10); 232 } else if (!strcmp(this_char, MNTOPT_LOGDEV)) { 233 if (!value || !*value) { 234 xfs_warn(mp, "%s option requires an argument", 235 this_char); 236 return EINVAL; 237 } 238 mp->m_logname = kstrndup(value, MAXNAMELEN, GFP_KERNEL); 239 if (!mp->m_logname) 240 return ENOMEM; 241 } else if (!strcmp(this_char, MNTOPT_MTPT)) { 242 xfs_warn(mp, "%s option not allowed on this system", 243 this_char); 244 return EINVAL; 245 } else if (!strcmp(this_char, MNTOPT_RTDEV)) { 246 if (!value || !*value) { 247 xfs_warn(mp, "%s option requires an argument", 248 this_char); 249 return EINVAL; 250 } 251 mp->m_rtname = kstrndup(value, MAXNAMELEN, GFP_KERNEL); 252 if (!mp->m_rtname) 253 return ENOMEM; 254 } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) { 255 if (!value || !*value) { 256 xfs_warn(mp, "%s option requires an argument", 257 this_char); 258 return EINVAL; 259 } 260 iosize = simple_strtoul(value, &eov, 10); 261 iosizelog = ffs(iosize) - 1; 262 } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) { 263 if (!value || !*value) { 264 xfs_warn(mp, "%s option requires an argument", 265 this_char); 266 return EINVAL; 267 } 268 iosize = suffix_strtoul(value, &eov, 10); 269 iosizelog = ffs(iosize) - 1; 270 } else if (!strcmp(this_char, MNTOPT_GRPID) || 271 !strcmp(this_char, MNTOPT_BSDGROUPS)) { 272 mp->m_flags |= XFS_MOUNT_GRPID; 273 } else if (!strcmp(this_char, MNTOPT_NOGRPID) || 274 !strcmp(this_char, MNTOPT_SYSVGROUPS)) { 275 mp->m_flags &= ~XFS_MOUNT_GRPID; 276 } else if (!strcmp(this_char, MNTOPT_WSYNC)) { 277 mp->m_flags |= XFS_MOUNT_WSYNC; 278 } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) { 279 mp->m_flags |= XFS_MOUNT_NORECOVERY; 280 } else if (!strcmp(this_char, MNTOPT_NOALIGN)) { 281 mp->m_flags |= XFS_MOUNT_NOALIGN; 282 } else if (!strcmp(this_char, MNTOPT_SWALLOC)) { 283 mp->m_flags |= XFS_MOUNT_SWALLOC; 284 } else if (!strcmp(this_char, MNTOPT_SUNIT)) { 285 if (!value || !*value) { 286 xfs_warn(mp, "%s option requires an argument", 287 this_char); 288 return EINVAL; 289 } 290 dsunit = simple_strtoul(value, &eov, 10); 291 } else if (!strcmp(this_char, MNTOPT_SWIDTH)) { 292 if (!value || !*value) { 293 xfs_warn(mp, "%s option requires an argument", 294 this_char); 295 return EINVAL; 296 } 297 dswidth = simple_strtoul(value, &eov, 10); 298 } else if (!strcmp(this_char, MNTOPT_64BITINODE)) { 299 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; 300 #if !XFS_BIG_INUMS 301 xfs_warn(mp, "%s option not allowed on this system", 302 this_char); 303 return EINVAL; 304 #endif 305 } else if (!strcmp(this_char, MNTOPT_NOUUID)) { 306 mp->m_flags |= XFS_MOUNT_NOUUID; 307 } else if (!strcmp(this_char, MNTOPT_BARRIER)) { 308 mp->m_flags |= XFS_MOUNT_BARRIER; 309 } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) { 310 mp->m_flags &= ~XFS_MOUNT_BARRIER; 311 } else if (!strcmp(this_char, MNTOPT_IKEEP)) { 312 mp->m_flags |= XFS_MOUNT_IKEEP; 313 } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) { 314 mp->m_flags &= ~XFS_MOUNT_IKEEP; 315 } else if (!strcmp(this_char, MNTOPT_LARGEIO)) { 316 mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE; 317 } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) { 318 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; 319 } else if (!strcmp(this_char, MNTOPT_ATTR2)) { 320 mp->m_flags |= XFS_MOUNT_ATTR2; 321 } else if (!strcmp(this_char, MNTOPT_NOATTR2)) { 322 mp->m_flags &= ~XFS_MOUNT_ATTR2; 323 mp->m_flags |= XFS_MOUNT_NOATTR2; 324 } else if (!strcmp(this_char, MNTOPT_FILESTREAM)) { 325 mp->m_flags |= XFS_MOUNT_FILESTREAMS; 326 } else if (!strcmp(this_char, MNTOPT_NOQUOTA)) { 327 mp->m_qflags &= ~(XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE | 328 XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | 329 XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | 330 XFS_UQUOTA_ENFD | XFS_OQUOTA_ENFD); 331 } else if (!strcmp(this_char, MNTOPT_QUOTA) || 332 !strcmp(this_char, MNTOPT_UQUOTA) || 333 !strcmp(this_char, MNTOPT_USRQUOTA)) { 334 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE | 335 XFS_UQUOTA_ENFD); 336 } else if (!strcmp(this_char, MNTOPT_QUOTANOENF) || 337 !strcmp(this_char, MNTOPT_UQUOTANOENF)) { 338 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE); 339 mp->m_qflags &= ~XFS_UQUOTA_ENFD; 340 } else if (!strcmp(this_char, MNTOPT_PQUOTA) || 341 !strcmp(this_char, MNTOPT_PRJQUOTA)) { 342 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | 343 XFS_OQUOTA_ENFD); 344 } else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) { 345 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE); 346 mp->m_qflags &= ~XFS_OQUOTA_ENFD; 347 } else if (!strcmp(this_char, MNTOPT_GQUOTA) || 348 !strcmp(this_char, MNTOPT_GRPQUOTA)) { 349 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | 350 XFS_OQUOTA_ENFD); 351 } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) { 352 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); 353 mp->m_qflags &= ~XFS_OQUOTA_ENFD; 354 } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) { 355 xfs_warn(mp, 356 "delaylog is the default now, option is deprecated."); 357 } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) { 358 xfs_warn(mp, 359 "nodelaylog support has been removed, option is deprecated."); 360 } else if (!strcmp(this_char, MNTOPT_DISCARD)) { 361 mp->m_flags |= XFS_MOUNT_DISCARD; 362 } else if (!strcmp(this_char, MNTOPT_NODISCARD)) { 363 mp->m_flags &= ~XFS_MOUNT_DISCARD; 364 } else if (!strcmp(this_char, "ihashsize")) { 365 xfs_warn(mp, 366 "ihashsize no longer used, option is deprecated."); 367 } else if (!strcmp(this_char, "osyncisdsync")) { 368 xfs_warn(mp, 369 "osyncisdsync has no effect, option is deprecated."); 370 } else if (!strcmp(this_char, "osyncisosync")) { 371 xfs_warn(mp, 372 "osyncisosync has no effect, option is deprecated."); 373 } else if (!strcmp(this_char, "irixsgid")) { 374 xfs_warn(mp, 375 "irixsgid is now a sysctl(2) variable, option is deprecated."); 376 } else { 377 xfs_warn(mp, "unknown mount option [%s].", this_char); 378 return EINVAL; 379 } 380 } 381 382 /* 383 * no recovery flag requires a read-only mount 384 */ 385 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) && 386 !(mp->m_flags & XFS_MOUNT_RDONLY)) { 387 xfs_warn(mp, "no-recovery mounts must be read-only."); 388 return EINVAL; 389 } 390 391 if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) { 392 xfs_warn(mp, 393 "sunit and swidth options incompatible with the noalign option"); 394 return EINVAL; 395 } 396 397 #ifndef CONFIG_XFS_QUOTA 398 if (XFS_IS_QUOTA_RUNNING(mp)) { 399 xfs_warn(mp, "quota support not available in this kernel."); 400 return EINVAL; 401 } 402 #endif 403 404 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) && 405 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) { 406 xfs_warn(mp, "cannot mount with both project and group quota"); 407 return EINVAL; 408 } 409 410 if ((dsunit && !dswidth) || (!dsunit && dswidth)) { 411 xfs_warn(mp, "sunit and swidth must be specified together"); 412 return EINVAL; 413 } 414 415 if (dsunit && (dswidth % dsunit != 0)) { 416 xfs_warn(mp, 417 "stripe width (%d) must be a multiple of the stripe unit (%d)", 418 dswidth, dsunit); 419 return EINVAL; 420 } 421 422 done: 423 if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) { 424 /* 425 * At this point the superblock has not been read 426 * in, therefore we do not know the block size. 427 * Before the mount call ends we will convert 428 * these to FSBs. 429 */ 430 if (dsunit) { 431 mp->m_dalign = dsunit; 432 mp->m_flags |= XFS_MOUNT_RETERR; 433 } 434 435 if (dswidth) 436 mp->m_swidth = dswidth; 437 } 438 439 if (mp->m_logbufs != -1 && 440 mp->m_logbufs != 0 && 441 (mp->m_logbufs < XLOG_MIN_ICLOGS || 442 mp->m_logbufs > XLOG_MAX_ICLOGS)) { 443 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]", 444 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); 445 return XFS_ERROR(EINVAL); 446 } 447 if (mp->m_logbsize != -1 && 448 mp->m_logbsize != 0 && 449 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE || 450 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE || 451 !is_power_of_2(mp->m_logbsize))) { 452 xfs_warn(mp, 453 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", 454 mp->m_logbsize); 455 return XFS_ERROR(EINVAL); 456 } 457 458 if (iosizelog) { 459 if (iosizelog > XFS_MAX_IO_LOG || 460 iosizelog < XFS_MIN_IO_LOG) { 461 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]", 462 iosizelog, XFS_MIN_IO_LOG, 463 XFS_MAX_IO_LOG); 464 return XFS_ERROR(EINVAL); 465 } 466 467 mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE; 468 mp->m_readio_log = iosizelog; 469 mp->m_writeio_log = iosizelog; 470 } 471 472 return 0; 473 } 474 475 struct proc_xfs_info { 476 int flag; 477 char *str; 478 }; 479 480 STATIC int 481 xfs_showargs( 482 struct xfs_mount *mp, 483 struct seq_file *m) 484 { 485 static struct proc_xfs_info xfs_info_set[] = { 486 /* the few simple ones we can get from the mount struct */ 487 { XFS_MOUNT_IKEEP, "," MNTOPT_IKEEP }, 488 { XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC }, 489 { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN }, 490 { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC }, 491 { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID }, 492 { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY }, 493 { XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 }, 494 { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM }, 495 { XFS_MOUNT_GRPID, "," MNTOPT_GRPID }, 496 { XFS_MOUNT_DISCARD, "," MNTOPT_DISCARD }, 497 { 0, NULL } 498 }; 499 static struct proc_xfs_info xfs_info_unset[] = { 500 /* the few simple ones we can get from the mount struct */ 501 { XFS_MOUNT_COMPAT_IOSIZE, "," MNTOPT_LARGEIO }, 502 { XFS_MOUNT_BARRIER, "," MNTOPT_NOBARRIER }, 503 { XFS_MOUNT_SMALL_INUMS, "," MNTOPT_64BITINODE }, 504 { 0, NULL } 505 }; 506 struct proc_xfs_info *xfs_infop; 507 508 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) { 509 if (mp->m_flags & xfs_infop->flag) 510 seq_puts(m, xfs_infop->str); 511 } 512 for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) { 513 if (!(mp->m_flags & xfs_infop->flag)) 514 seq_puts(m, xfs_infop->str); 515 } 516 517 if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) 518 seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk", 519 (int)(1 << mp->m_writeio_log) >> 10); 520 521 if (mp->m_logbufs > 0) 522 seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs); 523 if (mp->m_logbsize > 0) 524 seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10); 525 526 if (mp->m_logname) 527 seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname); 528 if (mp->m_rtname) 529 seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname); 530 531 if (mp->m_dalign > 0) 532 seq_printf(m, "," MNTOPT_SUNIT "=%d", 533 (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); 534 if (mp->m_swidth > 0) 535 seq_printf(m, "," MNTOPT_SWIDTH "=%d", 536 (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); 537 538 if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD)) 539 seq_puts(m, "," MNTOPT_USRQUOTA); 540 else if (mp->m_qflags & XFS_UQUOTA_ACCT) 541 seq_puts(m, "," MNTOPT_UQUOTANOENF); 542 543 /* Either project or group quotas can be active, not both */ 544 545 if (mp->m_qflags & XFS_PQUOTA_ACCT) { 546 if (mp->m_qflags & XFS_OQUOTA_ENFD) 547 seq_puts(m, "," MNTOPT_PRJQUOTA); 548 else 549 seq_puts(m, "," MNTOPT_PQUOTANOENF); 550 } else if (mp->m_qflags & XFS_GQUOTA_ACCT) { 551 if (mp->m_qflags & XFS_OQUOTA_ENFD) 552 seq_puts(m, "," MNTOPT_GRPQUOTA); 553 else 554 seq_puts(m, "," MNTOPT_GQUOTANOENF); 555 } 556 557 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) 558 seq_puts(m, "," MNTOPT_NOQUOTA); 559 560 return 0; 561 } 562 __uint64_t 563 xfs_max_file_offset( 564 unsigned int blockshift) 565 { 566 unsigned int pagefactor = 1; 567 unsigned int bitshift = BITS_PER_LONG - 1; 568 569 /* Figure out maximum filesize, on Linux this can depend on 570 * the filesystem blocksize (on 32 bit platforms). 571 * __block_write_begin does this in an [unsigned] long... 572 * page->index << (PAGE_CACHE_SHIFT - bbits) 573 * So, for page sized blocks (4K on 32 bit platforms), 574 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is 575 * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) 576 * but for smaller blocksizes it is less (bbits = log2 bsize). 577 * Note1: get_block_t takes a long (implicit cast from above) 578 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch 579 * can optionally convert the [unsigned] long from above into 580 * an [unsigned] long long. 581 */ 582 583 #if BITS_PER_LONG == 32 584 # if defined(CONFIG_LBDAF) 585 ASSERT(sizeof(sector_t) == 8); 586 pagefactor = PAGE_CACHE_SIZE; 587 bitshift = BITS_PER_LONG; 588 # else 589 pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift); 590 # endif 591 #endif 592 593 return (((__uint64_t)pagefactor) << bitshift) - 1; 594 } 595 596 STATIC int 597 xfs_blkdev_get( 598 xfs_mount_t *mp, 599 const char *name, 600 struct block_device **bdevp) 601 { 602 int error = 0; 603 604 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 605 mp); 606 if (IS_ERR(*bdevp)) { 607 error = PTR_ERR(*bdevp); 608 xfs_warn(mp, "Invalid device [%s], error=%d\n", name, error); 609 } 610 611 return -error; 612 } 613 614 STATIC void 615 xfs_blkdev_put( 616 struct block_device *bdev) 617 { 618 if (bdev) 619 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 620 } 621 622 void 623 xfs_blkdev_issue_flush( 624 xfs_buftarg_t *buftarg) 625 { 626 blkdev_issue_flush(buftarg->bt_bdev, GFP_KERNEL, NULL); 627 } 628 629 STATIC void 630 xfs_close_devices( 631 struct xfs_mount *mp) 632 { 633 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 634 struct block_device *logdev = mp->m_logdev_targp->bt_bdev; 635 xfs_free_buftarg(mp, mp->m_logdev_targp); 636 xfs_blkdev_put(logdev); 637 } 638 if (mp->m_rtdev_targp) { 639 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev; 640 xfs_free_buftarg(mp, mp->m_rtdev_targp); 641 xfs_blkdev_put(rtdev); 642 } 643 xfs_free_buftarg(mp, mp->m_ddev_targp); 644 } 645 646 /* 647 * The file system configurations are: 648 * (1) device (partition) with data and internal log 649 * (2) logical volume with data and log subvolumes. 650 * (3) logical volume with data, log, and realtime subvolumes. 651 * 652 * We only have to handle opening the log and realtime volumes here if 653 * they are present. The data subvolume has already been opened by 654 * get_sb_bdev() and is stored in sb->s_bdev. 655 */ 656 STATIC int 657 xfs_open_devices( 658 struct xfs_mount *mp) 659 { 660 struct block_device *ddev = mp->m_super->s_bdev; 661 struct block_device *logdev = NULL, *rtdev = NULL; 662 int error; 663 664 /* 665 * Open real time and log devices - order is important. 666 */ 667 if (mp->m_logname) { 668 error = xfs_blkdev_get(mp, mp->m_logname, &logdev); 669 if (error) 670 goto out; 671 } 672 673 if (mp->m_rtname) { 674 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev); 675 if (error) 676 goto out_close_logdev; 677 678 if (rtdev == ddev || rtdev == logdev) { 679 xfs_warn(mp, 680 "Cannot mount filesystem with identical rtdev and ddev/logdev."); 681 error = EINVAL; 682 goto out_close_rtdev; 683 } 684 } 685 686 /* 687 * Setup xfs_mount buffer target pointers 688 */ 689 error = ENOMEM; 690 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, 0, mp->m_fsname); 691 if (!mp->m_ddev_targp) 692 goto out_close_rtdev; 693 694 if (rtdev) { 695 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, 1, 696 mp->m_fsname); 697 if (!mp->m_rtdev_targp) 698 goto out_free_ddev_targ; 699 } 700 701 if (logdev && logdev != ddev) { 702 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, 1, 703 mp->m_fsname); 704 if (!mp->m_logdev_targp) 705 goto out_free_rtdev_targ; 706 } else { 707 mp->m_logdev_targp = mp->m_ddev_targp; 708 } 709 710 return 0; 711 712 out_free_rtdev_targ: 713 if (mp->m_rtdev_targp) 714 xfs_free_buftarg(mp, mp->m_rtdev_targp); 715 out_free_ddev_targ: 716 xfs_free_buftarg(mp, mp->m_ddev_targp); 717 out_close_rtdev: 718 if (rtdev) 719 xfs_blkdev_put(rtdev); 720 out_close_logdev: 721 if (logdev && logdev != ddev) 722 xfs_blkdev_put(logdev); 723 out: 724 return error; 725 } 726 727 /* 728 * Setup xfs_mount buffer target pointers based on superblock 729 */ 730 STATIC int 731 xfs_setup_devices( 732 struct xfs_mount *mp) 733 { 734 int error; 735 736 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize, 737 mp->m_sb.sb_sectsize); 738 if (error) 739 return error; 740 741 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 742 unsigned int log_sector_size = BBSIZE; 743 744 if (xfs_sb_version_hassector(&mp->m_sb)) 745 log_sector_size = mp->m_sb.sb_logsectsize; 746 error = xfs_setsize_buftarg(mp->m_logdev_targp, 747 mp->m_sb.sb_blocksize, 748 log_sector_size); 749 if (error) 750 return error; 751 } 752 if (mp->m_rtdev_targp) { 753 error = xfs_setsize_buftarg(mp->m_rtdev_targp, 754 mp->m_sb.sb_blocksize, 755 mp->m_sb.sb_sectsize); 756 if (error) 757 return error; 758 } 759 760 return 0; 761 } 762 763 /* Catch misguided souls that try to use this interface on XFS */ 764 STATIC struct inode * 765 xfs_fs_alloc_inode( 766 struct super_block *sb) 767 { 768 BUG(); 769 return NULL; 770 } 771 772 /* 773 * Now that the generic code is guaranteed not to be accessing 774 * the linux inode, we can reclaim the inode. 775 */ 776 STATIC void 777 xfs_fs_destroy_inode( 778 struct inode *inode) 779 { 780 struct xfs_inode *ip = XFS_I(inode); 781 782 trace_xfs_destroy_inode(ip); 783 784 XFS_STATS_INC(vn_reclaim); 785 786 /* bad inode, get out here ASAP */ 787 if (is_bad_inode(inode)) 788 goto out_reclaim; 789 790 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0); 791 792 /* 793 * We should never get here with one of the reclaim flags already set. 794 */ 795 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE)); 796 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM)); 797 798 /* 799 * We always use background reclaim here because even if the 800 * inode is clean, it still may be under IO and hence we have 801 * to take the flush lock. The background reclaim path handles 802 * this more efficiently than we can here, so simply let background 803 * reclaim tear down all inodes. 804 */ 805 out_reclaim: 806 xfs_inode_set_reclaim_tag(ip); 807 } 808 809 /* 810 * Slab object creation initialisation for the XFS inode. 811 * This covers only the idempotent fields in the XFS inode; 812 * all other fields need to be initialised on allocation 813 * from the slab. This avoids the need to repeatedly initialise 814 * fields in the xfs inode that left in the initialise state 815 * when freeing the inode. 816 */ 817 STATIC void 818 xfs_fs_inode_init_once( 819 void *inode) 820 { 821 struct xfs_inode *ip = inode; 822 823 memset(ip, 0, sizeof(struct xfs_inode)); 824 825 /* vfs inode */ 826 inode_init_once(VFS_I(ip)); 827 828 /* xfs inode */ 829 atomic_set(&ip->i_pincount, 0); 830 spin_lock_init(&ip->i_flags_lock); 831 832 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, 833 "xfsino", ip->i_ino); 834 } 835 836 /* 837 * Dirty the XFS inode when mark_inode_dirty_sync() is called so that 838 * we catch unlogged VFS level updates to the inode. 839 * 840 * We need the barrier() to maintain correct ordering between unlogged 841 * updates and the transaction commit code that clears the i_update_core 842 * field. This requires all updates to be completed before marking the 843 * inode dirty. 844 */ 845 STATIC void 846 xfs_fs_dirty_inode( 847 struct inode *inode, 848 int flags) 849 { 850 barrier(); 851 XFS_I(inode)->i_update_core = 1; 852 } 853 854 STATIC int 855 xfs_fs_write_inode( 856 struct inode *inode, 857 struct writeback_control *wbc) 858 { 859 struct xfs_inode *ip = XFS_I(inode); 860 struct xfs_mount *mp = ip->i_mount; 861 int error = EAGAIN; 862 863 trace_xfs_write_inode(ip); 864 865 if (XFS_FORCED_SHUTDOWN(mp)) 866 return -XFS_ERROR(EIO); 867 868 if (wbc->sync_mode == WB_SYNC_ALL || wbc->for_kupdate) { 869 /* 870 * Make sure the inode has made it it into the log. Instead 871 * of forcing it all the way to stable storage using a 872 * synchronous transaction we let the log force inside the 873 * ->sync_fs call do that for thus, which reduces the number 874 * of synchronous log forces dramatically. 875 */ 876 error = xfs_log_dirty_inode(ip, NULL, 0); 877 if (error) 878 goto out; 879 return 0; 880 } else { 881 if (!ip->i_update_core) 882 return 0; 883 884 /* 885 * We make this non-blocking if the inode is contended, return 886 * EAGAIN to indicate to the caller that they did not succeed. 887 * This prevents the flush path from blocking on inodes inside 888 * another operation right now, they get caught later by 889 * xfs_sync. 890 */ 891 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) 892 goto out; 893 894 if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) 895 goto out_unlock; 896 897 /* 898 * Now we have the flush lock and the inode is not pinned, we 899 * can check if the inode is really clean as we know that 900 * there are no pending transaction completions, it is not 901 * waiting on the delayed write queue and there is no IO in 902 * progress. 903 */ 904 if (xfs_inode_clean(ip)) { 905 xfs_ifunlock(ip); 906 error = 0; 907 goto out_unlock; 908 } 909 error = xfs_iflush(ip, SYNC_TRYLOCK); 910 } 911 912 out_unlock: 913 xfs_iunlock(ip, XFS_ILOCK_SHARED); 914 out: 915 /* 916 * if we failed to write out the inode then mark 917 * it dirty again so we'll try again later. 918 */ 919 if (error) 920 xfs_mark_inode_dirty_sync(ip); 921 return -error; 922 } 923 924 STATIC void 925 xfs_fs_evict_inode( 926 struct inode *inode) 927 { 928 xfs_inode_t *ip = XFS_I(inode); 929 930 trace_xfs_evict_inode(ip); 931 932 truncate_inode_pages(&inode->i_data, 0); 933 end_writeback(inode); 934 XFS_STATS_INC(vn_rele); 935 XFS_STATS_INC(vn_remove); 936 XFS_STATS_DEC(vn_active); 937 938 /* 939 * The iolock is used by the file system to coordinate reads, 940 * writes, and block truncates. Up to this point the lock 941 * protected concurrent accesses by users of the inode. But 942 * from here forward we're doing some final processing of the 943 * inode because we're done with it, and although we reuse the 944 * iolock for protection it is really a distinct lock class 945 * (in the lockdep sense) from before. To keep lockdep happy 946 * (and basically indicate what we are doing), we explicitly 947 * re-init the iolock here. 948 */ 949 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock)); 950 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); 951 lockdep_set_class_and_name(&ip->i_iolock.mr_lock, 952 &xfs_iolock_reclaimable, "xfs_iolock_reclaimable"); 953 954 xfs_inactive(ip); 955 } 956 957 STATIC void 958 xfs_free_fsname( 959 struct xfs_mount *mp) 960 { 961 kfree(mp->m_fsname); 962 kfree(mp->m_rtname); 963 kfree(mp->m_logname); 964 } 965 966 STATIC void 967 xfs_fs_put_super( 968 struct super_block *sb) 969 { 970 struct xfs_mount *mp = XFS_M(sb); 971 972 xfs_syncd_stop(mp); 973 974 /* 975 * Blow away any referenced inode in the filestreams cache. 976 * This can and will cause log traffic as inodes go inactive 977 * here. 978 */ 979 xfs_filestream_unmount(mp); 980 981 xfs_flush_buftarg(mp->m_ddev_targp, 1); 982 983 xfs_unmountfs(mp); 984 xfs_freesb(mp); 985 xfs_icsb_destroy_counters(mp); 986 xfs_close_devices(mp); 987 xfs_free_fsname(mp); 988 kfree(mp); 989 } 990 991 STATIC int 992 xfs_fs_sync_fs( 993 struct super_block *sb, 994 int wait) 995 { 996 struct xfs_mount *mp = XFS_M(sb); 997 int error; 998 999 /* 1000 * Doing anything during the async pass would be counterproductive. 1001 */ 1002 if (!wait) 1003 return 0; 1004 1005 error = xfs_quiesce_data(mp); 1006 if (error) 1007 return -error; 1008 1009 if (laptop_mode) { 1010 /* 1011 * The disk must be active because we're syncing. 1012 * We schedule xfssyncd now (now that the disk is 1013 * active) instead of later (when it might not be). 1014 */ 1015 flush_delayed_work_sync(&mp->m_sync_work); 1016 } 1017 1018 return 0; 1019 } 1020 1021 STATIC int 1022 xfs_fs_statfs( 1023 struct dentry *dentry, 1024 struct kstatfs *statp) 1025 { 1026 struct xfs_mount *mp = XFS_M(dentry->d_sb); 1027 xfs_sb_t *sbp = &mp->m_sb; 1028 struct xfs_inode *ip = XFS_I(dentry->d_inode); 1029 __uint64_t fakeinos, id; 1030 xfs_extlen_t lsize; 1031 __int64_t ffree; 1032 1033 statp->f_type = XFS_SB_MAGIC; 1034 statp->f_namelen = MAXNAMELEN - 1; 1035 1036 id = huge_encode_dev(mp->m_ddev_targp->bt_dev); 1037 statp->f_fsid.val[0] = (u32)id; 1038 statp->f_fsid.val[1] = (u32)(id >> 32); 1039 1040 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); 1041 1042 spin_lock(&mp->m_sb_lock); 1043 statp->f_bsize = sbp->sb_blocksize; 1044 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; 1045 statp->f_blocks = sbp->sb_dblocks - lsize; 1046 statp->f_bfree = statp->f_bavail = 1047 sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 1048 fakeinos = statp->f_bfree << sbp->sb_inopblog; 1049 statp->f_files = 1050 MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER); 1051 if (mp->m_maxicount) 1052 statp->f_files = min_t(typeof(statp->f_files), 1053 statp->f_files, 1054 mp->m_maxicount); 1055 1056 /* make sure statp->f_ffree does not underflow */ 1057 ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); 1058 statp->f_ffree = max_t(__int64_t, ffree, 0); 1059 1060 spin_unlock(&mp->m_sb_lock); 1061 1062 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) || 1063 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) == 1064 (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD)) 1065 xfs_qm_statvfs(ip, statp); 1066 return 0; 1067 } 1068 1069 STATIC void 1070 xfs_save_resvblks(struct xfs_mount *mp) 1071 { 1072 __uint64_t resblks = 0; 1073 1074 mp->m_resblks_save = mp->m_resblks; 1075 xfs_reserve_blocks(mp, &resblks, NULL); 1076 } 1077 1078 STATIC void 1079 xfs_restore_resvblks(struct xfs_mount *mp) 1080 { 1081 __uint64_t resblks; 1082 1083 if (mp->m_resblks_save) { 1084 resblks = mp->m_resblks_save; 1085 mp->m_resblks_save = 0; 1086 } else 1087 resblks = xfs_default_resblks(mp); 1088 1089 xfs_reserve_blocks(mp, &resblks, NULL); 1090 } 1091 1092 STATIC int 1093 xfs_fs_remount( 1094 struct super_block *sb, 1095 int *flags, 1096 char *options) 1097 { 1098 struct xfs_mount *mp = XFS_M(sb); 1099 substring_t args[MAX_OPT_ARGS]; 1100 char *p; 1101 int error; 1102 1103 while ((p = strsep(&options, ",")) != NULL) { 1104 int token; 1105 1106 if (!*p) 1107 continue; 1108 1109 token = match_token(p, tokens, args); 1110 switch (token) { 1111 case Opt_barrier: 1112 mp->m_flags |= XFS_MOUNT_BARRIER; 1113 break; 1114 case Opt_nobarrier: 1115 mp->m_flags &= ~XFS_MOUNT_BARRIER; 1116 break; 1117 default: 1118 /* 1119 * Logically we would return an error here to prevent 1120 * users from believing they might have changed 1121 * mount options using remount which can't be changed. 1122 * 1123 * But unfortunately mount(8) adds all options from 1124 * mtab and fstab to the mount arguments in some cases 1125 * so we can't blindly reject options, but have to 1126 * check for each specified option if it actually 1127 * differs from the currently set option and only 1128 * reject it if that's the case. 1129 * 1130 * Until that is implemented we return success for 1131 * every remount request, and silently ignore all 1132 * options that we can't actually change. 1133 */ 1134 #if 0 1135 xfs_info(mp, 1136 "mount option \"%s\" not supported for remount\n", p); 1137 return -EINVAL; 1138 #else 1139 break; 1140 #endif 1141 } 1142 } 1143 1144 /* ro -> rw */ 1145 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) { 1146 mp->m_flags &= ~XFS_MOUNT_RDONLY; 1147 1148 /* 1149 * If this is the first remount to writeable state we 1150 * might have some superblock changes to update. 1151 */ 1152 if (mp->m_update_flags) { 1153 error = xfs_mount_log_sb(mp, mp->m_update_flags); 1154 if (error) { 1155 xfs_warn(mp, "failed to write sb changes"); 1156 return error; 1157 } 1158 mp->m_update_flags = 0; 1159 } 1160 1161 /* 1162 * Fill out the reserve pool if it is empty. Use the stashed 1163 * value if it is non-zero, otherwise go with the default. 1164 */ 1165 xfs_restore_resvblks(mp); 1166 } 1167 1168 /* rw -> ro */ 1169 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) { 1170 /* 1171 * After we have synced the data but before we sync the 1172 * metadata, we need to free up the reserve block pool so that 1173 * the used block count in the superblock on disk is correct at 1174 * the end of the remount. Stash the current reserve pool size 1175 * so that if we get remounted rw, we can return it to the same 1176 * size. 1177 */ 1178 1179 xfs_quiesce_data(mp); 1180 xfs_save_resvblks(mp); 1181 xfs_quiesce_attr(mp); 1182 mp->m_flags |= XFS_MOUNT_RDONLY; 1183 } 1184 1185 return 0; 1186 } 1187 1188 /* 1189 * Second stage of a freeze. The data is already frozen so we only 1190 * need to take care of the metadata. Once that's done write a dummy 1191 * record to dirty the log in case of a crash while frozen. 1192 */ 1193 STATIC int 1194 xfs_fs_freeze( 1195 struct super_block *sb) 1196 { 1197 struct xfs_mount *mp = XFS_M(sb); 1198 1199 xfs_save_resvblks(mp); 1200 xfs_quiesce_attr(mp); 1201 return -xfs_fs_log_dummy(mp); 1202 } 1203 1204 STATIC int 1205 xfs_fs_unfreeze( 1206 struct super_block *sb) 1207 { 1208 struct xfs_mount *mp = XFS_M(sb); 1209 1210 xfs_restore_resvblks(mp); 1211 return 0; 1212 } 1213 1214 STATIC int 1215 xfs_fs_show_options( 1216 struct seq_file *m, 1217 struct dentry *root) 1218 { 1219 return -xfs_showargs(XFS_M(root->d_sb), m); 1220 } 1221 1222 /* 1223 * This function fills in xfs_mount_t fields based on mount args. 1224 * Note: the superblock _has_ now been read in. 1225 */ 1226 STATIC int 1227 xfs_finish_flags( 1228 struct xfs_mount *mp) 1229 { 1230 int ronly = (mp->m_flags & XFS_MOUNT_RDONLY); 1231 1232 /* Fail a mount where the logbuf is smaller than the log stripe */ 1233 if (xfs_sb_version_haslogv2(&mp->m_sb)) { 1234 if (mp->m_logbsize <= 0 && 1235 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) { 1236 mp->m_logbsize = mp->m_sb.sb_logsunit; 1237 } else if (mp->m_logbsize > 0 && 1238 mp->m_logbsize < mp->m_sb.sb_logsunit) { 1239 xfs_warn(mp, 1240 "logbuf size must be greater than or equal to log stripe size"); 1241 return XFS_ERROR(EINVAL); 1242 } 1243 } else { 1244 /* Fail a mount if the logbuf is larger than 32K */ 1245 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { 1246 xfs_warn(mp, 1247 "logbuf size for version 1 logs must be 16K or 32K"); 1248 return XFS_ERROR(EINVAL); 1249 } 1250 } 1251 1252 /* 1253 * mkfs'ed attr2 will turn on attr2 mount unless explicitly 1254 * told by noattr2 to turn it off 1255 */ 1256 if (xfs_sb_version_hasattr2(&mp->m_sb) && 1257 !(mp->m_flags & XFS_MOUNT_NOATTR2)) 1258 mp->m_flags |= XFS_MOUNT_ATTR2; 1259 1260 /* 1261 * prohibit r/w mounts of read-only filesystems 1262 */ 1263 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) { 1264 xfs_warn(mp, 1265 "cannot mount a read-only filesystem as read-write"); 1266 return XFS_ERROR(EROFS); 1267 } 1268 1269 return 0; 1270 } 1271 1272 STATIC int 1273 xfs_fs_fill_super( 1274 struct super_block *sb, 1275 void *data, 1276 int silent) 1277 { 1278 struct inode *root; 1279 struct xfs_mount *mp = NULL; 1280 int flags = 0, error = ENOMEM; 1281 1282 mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL); 1283 if (!mp) 1284 goto out; 1285 1286 spin_lock_init(&mp->m_sb_lock); 1287 mutex_init(&mp->m_growlock); 1288 atomic_set(&mp->m_active_trans, 0); 1289 1290 mp->m_super = sb; 1291 sb->s_fs_info = mp; 1292 1293 error = xfs_parseargs(mp, (char *)data); 1294 if (error) 1295 goto out_free_fsname; 1296 1297 sb_min_blocksize(sb, BBSIZE); 1298 sb->s_xattr = xfs_xattr_handlers; 1299 sb->s_export_op = &xfs_export_operations; 1300 #ifdef CONFIG_XFS_QUOTA 1301 sb->s_qcop = &xfs_quotactl_operations; 1302 #endif 1303 sb->s_op = &xfs_super_operations; 1304 1305 if (silent) 1306 flags |= XFS_MFSI_QUIET; 1307 1308 error = xfs_open_devices(mp); 1309 if (error) 1310 goto out_free_fsname; 1311 1312 error = xfs_icsb_init_counters(mp); 1313 if (error) 1314 goto out_close_devices; 1315 1316 error = xfs_readsb(mp, flags); 1317 if (error) 1318 goto out_destroy_counters; 1319 1320 error = xfs_finish_flags(mp); 1321 if (error) 1322 goto out_free_sb; 1323 1324 error = xfs_setup_devices(mp); 1325 if (error) 1326 goto out_free_sb; 1327 1328 error = xfs_filestream_mount(mp); 1329 if (error) 1330 goto out_free_sb; 1331 1332 /* 1333 * we must configure the block size in the superblock before we run the 1334 * full mount process as the mount process can lookup and cache inodes. 1335 * For the same reason we must also initialise the syncd and register 1336 * the inode cache shrinker so that inodes can be reclaimed during 1337 * operations like a quotacheck that iterate all inodes in the 1338 * filesystem. 1339 */ 1340 sb->s_magic = XFS_SB_MAGIC; 1341 sb->s_blocksize = mp->m_sb.sb_blocksize; 1342 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; 1343 sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits); 1344 sb->s_time_gran = 1; 1345 set_posix_acl_flag(sb); 1346 1347 error = xfs_mountfs(mp); 1348 if (error) 1349 goto out_filestream_unmount; 1350 1351 error = xfs_syncd_init(mp); 1352 if (error) 1353 goto out_unmount; 1354 1355 root = igrab(VFS_I(mp->m_rootip)); 1356 if (!root) { 1357 error = ENOENT; 1358 goto out_syncd_stop; 1359 } 1360 if (is_bad_inode(root)) { 1361 error = EINVAL; 1362 goto out_syncd_stop; 1363 } 1364 sb->s_root = d_alloc_root(root); 1365 if (!sb->s_root) { 1366 error = ENOMEM; 1367 goto out_iput; 1368 } 1369 1370 return 0; 1371 1372 out_filestream_unmount: 1373 xfs_filestream_unmount(mp); 1374 out_free_sb: 1375 xfs_freesb(mp); 1376 out_destroy_counters: 1377 xfs_icsb_destroy_counters(mp); 1378 out_close_devices: 1379 xfs_close_devices(mp); 1380 out_free_fsname: 1381 xfs_free_fsname(mp); 1382 kfree(mp); 1383 out: 1384 return -error; 1385 1386 out_iput: 1387 iput(root); 1388 out_syncd_stop: 1389 xfs_syncd_stop(mp); 1390 out_unmount: 1391 /* 1392 * Blow away any referenced inode in the filestreams cache. 1393 * This can and will cause log traffic as inodes go inactive 1394 * here. 1395 */ 1396 xfs_filestream_unmount(mp); 1397 1398 xfs_flush_buftarg(mp->m_ddev_targp, 1); 1399 1400 xfs_unmountfs(mp); 1401 goto out_free_sb; 1402 } 1403 1404 STATIC struct dentry * 1405 xfs_fs_mount( 1406 struct file_system_type *fs_type, 1407 int flags, 1408 const char *dev_name, 1409 void *data) 1410 { 1411 return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super); 1412 } 1413 1414 static int 1415 xfs_fs_nr_cached_objects( 1416 struct super_block *sb) 1417 { 1418 return xfs_reclaim_inodes_count(XFS_M(sb)); 1419 } 1420 1421 static void 1422 xfs_fs_free_cached_objects( 1423 struct super_block *sb, 1424 int nr_to_scan) 1425 { 1426 xfs_reclaim_inodes_nr(XFS_M(sb), nr_to_scan); 1427 } 1428 1429 static const struct super_operations xfs_super_operations = { 1430 .alloc_inode = xfs_fs_alloc_inode, 1431 .destroy_inode = xfs_fs_destroy_inode, 1432 .dirty_inode = xfs_fs_dirty_inode, 1433 .write_inode = xfs_fs_write_inode, 1434 .evict_inode = xfs_fs_evict_inode, 1435 .put_super = xfs_fs_put_super, 1436 .sync_fs = xfs_fs_sync_fs, 1437 .freeze_fs = xfs_fs_freeze, 1438 .unfreeze_fs = xfs_fs_unfreeze, 1439 .statfs = xfs_fs_statfs, 1440 .remount_fs = xfs_fs_remount, 1441 .show_options = xfs_fs_show_options, 1442 .nr_cached_objects = xfs_fs_nr_cached_objects, 1443 .free_cached_objects = xfs_fs_free_cached_objects, 1444 }; 1445 1446 static struct file_system_type xfs_fs_type = { 1447 .owner = THIS_MODULE, 1448 .name = "xfs", 1449 .mount = xfs_fs_mount, 1450 .kill_sb = kill_block_super, 1451 .fs_flags = FS_REQUIRES_DEV, 1452 }; 1453 1454 STATIC int __init 1455 xfs_init_zones(void) 1456 { 1457 1458 xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend"); 1459 if (!xfs_ioend_zone) 1460 goto out; 1461 1462 xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE, 1463 xfs_ioend_zone); 1464 if (!xfs_ioend_pool) 1465 goto out_destroy_ioend_zone; 1466 1467 xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t), 1468 "xfs_log_ticket"); 1469 if (!xfs_log_ticket_zone) 1470 goto out_destroy_ioend_pool; 1471 1472 xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t), 1473 "xfs_bmap_free_item"); 1474 if (!xfs_bmap_free_item_zone) 1475 goto out_destroy_log_ticket_zone; 1476 1477 xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t), 1478 "xfs_btree_cur"); 1479 if (!xfs_btree_cur_zone) 1480 goto out_destroy_bmap_free_item_zone; 1481 1482 xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t), 1483 "xfs_da_state"); 1484 if (!xfs_da_state_zone) 1485 goto out_destroy_btree_cur_zone; 1486 1487 xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf"); 1488 if (!xfs_dabuf_zone) 1489 goto out_destroy_da_state_zone; 1490 1491 xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork"); 1492 if (!xfs_ifork_zone) 1493 goto out_destroy_dabuf_zone; 1494 1495 xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans"); 1496 if (!xfs_trans_zone) 1497 goto out_destroy_ifork_zone; 1498 1499 xfs_log_item_desc_zone = 1500 kmem_zone_init(sizeof(struct xfs_log_item_desc), 1501 "xfs_log_item_desc"); 1502 if (!xfs_log_item_desc_zone) 1503 goto out_destroy_trans_zone; 1504 1505 /* 1506 * The size of the zone allocated buf log item is the maximum 1507 * size possible under XFS. This wastes a little bit of memory, 1508 * but it is much faster. 1509 */ 1510 xfs_buf_item_zone = kmem_zone_init((sizeof(xfs_buf_log_item_t) + 1511 (((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) / 1512 NBWORD) * sizeof(int))), "xfs_buf_item"); 1513 if (!xfs_buf_item_zone) 1514 goto out_destroy_log_item_desc_zone; 1515 1516 xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) + 1517 ((XFS_EFD_MAX_FAST_EXTENTS - 1) * 1518 sizeof(xfs_extent_t))), "xfs_efd_item"); 1519 if (!xfs_efd_zone) 1520 goto out_destroy_buf_item_zone; 1521 1522 xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) + 1523 ((XFS_EFI_MAX_FAST_EXTENTS - 1) * 1524 sizeof(xfs_extent_t))), "xfs_efi_item"); 1525 if (!xfs_efi_zone) 1526 goto out_destroy_efd_zone; 1527 1528 xfs_inode_zone = 1529 kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode", 1530 KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD, 1531 xfs_fs_inode_init_once); 1532 if (!xfs_inode_zone) 1533 goto out_destroy_efi_zone; 1534 1535 xfs_ili_zone = 1536 kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili", 1537 KM_ZONE_SPREAD, NULL); 1538 if (!xfs_ili_zone) 1539 goto out_destroy_inode_zone; 1540 1541 return 0; 1542 1543 out_destroy_inode_zone: 1544 kmem_zone_destroy(xfs_inode_zone); 1545 out_destroy_efi_zone: 1546 kmem_zone_destroy(xfs_efi_zone); 1547 out_destroy_efd_zone: 1548 kmem_zone_destroy(xfs_efd_zone); 1549 out_destroy_buf_item_zone: 1550 kmem_zone_destroy(xfs_buf_item_zone); 1551 out_destroy_log_item_desc_zone: 1552 kmem_zone_destroy(xfs_log_item_desc_zone); 1553 out_destroy_trans_zone: 1554 kmem_zone_destroy(xfs_trans_zone); 1555 out_destroy_ifork_zone: 1556 kmem_zone_destroy(xfs_ifork_zone); 1557 out_destroy_dabuf_zone: 1558 kmem_zone_destroy(xfs_dabuf_zone); 1559 out_destroy_da_state_zone: 1560 kmem_zone_destroy(xfs_da_state_zone); 1561 out_destroy_btree_cur_zone: 1562 kmem_zone_destroy(xfs_btree_cur_zone); 1563 out_destroy_bmap_free_item_zone: 1564 kmem_zone_destroy(xfs_bmap_free_item_zone); 1565 out_destroy_log_ticket_zone: 1566 kmem_zone_destroy(xfs_log_ticket_zone); 1567 out_destroy_ioend_pool: 1568 mempool_destroy(xfs_ioend_pool); 1569 out_destroy_ioend_zone: 1570 kmem_zone_destroy(xfs_ioend_zone); 1571 out: 1572 return -ENOMEM; 1573 } 1574 1575 STATIC void 1576 xfs_destroy_zones(void) 1577 { 1578 kmem_zone_destroy(xfs_ili_zone); 1579 kmem_zone_destroy(xfs_inode_zone); 1580 kmem_zone_destroy(xfs_efi_zone); 1581 kmem_zone_destroy(xfs_efd_zone); 1582 kmem_zone_destroy(xfs_buf_item_zone); 1583 kmem_zone_destroy(xfs_log_item_desc_zone); 1584 kmem_zone_destroy(xfs_trans_zone); 1585 kmem_zone_destroy(xfs_ifork_zone); 1586 kmem_zone_destroy(xfs_dabuf_zone); 1587 kmem_zone_destroy(xfs_da_state_zone); 1588 kmem_zone_destroy(xfs_btree_cur_zone); 1589 kmem_zone_destroy(xfs_bmap_free_item_zone); 1590 kmem_zone_destroy(xfs_log_ticket_zone); 1591 mempool_destroy(xfs_ioend_pool); 1592 kmem_zone_destroy(xfs_ioend_zone); 1593 1594 } 1595 1596 STATIC int __init 1597 xfs_init_workqueues(void) 1598 { 1599 /* 1600 * We never want to the same work item to run twice, reclaiming inodes 1601 * or idling the log is not going to get any faster by multiple CPUs 1602 * competing for ressources. Use the default large max_active value 1603 * so that even lots of filesystems can perform these task in parallel. 1604 */ 1605 xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_NON_REENTRANT, 0); 1606 if (!xfs_syncd_wq) 1607 return -ENOMEM; 1608 return 0; 1609 } 1610 1611 STATIC void 1612 xfs_destroy_workqueues(void) 1613 { 1614 destroy_workqueue(xfs_syncd_wq); 1615 } 1616 1617 STATIC int __init 1618 init_xfs_fs(void) 1619 { 1620 int error; 1621 1622 printk(KERN_INFO XFS_VERSION_STRING " with " 1623 XFS_BUILD_OPTIONS " enabled\n"); 1624 1625 xfs_dir_startup(); 1626 1627 error = xfs_init_zones(); 1628 if (error) 1629 goto out; 1630 1631 error = xfs_init_workqueues(); 1632 if (error) 1633 goto out_destroy_zones; 1634 1635 error = xfs_mru_cache_init(); 1636 if (error) 1637 goto out_destroy_wq; 1638 1639 error = xfs_filestream_init(); 1640 if (error) 1641 goto out_mru_cache_uninit; 1642 1643 error = xfs_buf_init(); 1644 if (error) 1645 goto out_filestream_uninit; 1646 1647 error = xfs_init_procfs(); 1648 if (error) 1649 goto out_buf_terminate; 1650 1651 error = xfs_sysctl_register(); 1652 if (error) 1653 goto out_cleanup_procfs; 1654 1655 vfs_initquota(); 1656 1657 error = register_filesystem(&xfs_fs_type); 1658 if (error) 1659 goto out_sysctl_unregister; 1660 return 0; 1661 1662 out_sysctl_unregister: 1663 xfs_sysctl_unregister(); 1664 out_cleanup_procfs: 1665 xfs_cleanup_procfs(); 1666 out_buf_terminate: 1667 xfs_buf_terminate(); 1668 out_filestream_uninit: 1669 xfs_filestream_uninit(); 1670 out_mru_cache_uninit: 1671 xfs_mru_cache_uninit(); 1672 out_destroy_wq: 1673 xfs_destroy_workqueues(); 1674 out_destroy_zones: 1675 xfs_destroy_zones(); 1676 out: 1677 return error; 1678 } 1679 1680 STATIC void __exit 1681 exit_xfs_fs(void) 1682 { 1683 vfs_exitquota(); 1684 unregister_filesystem(&xfs_fs_type); 1685 xfs_sysctl_unregister(); 1686 xfs_cleanup_procfs(); 1687 xfs_buf_terminate(); 1688 xfs_filestream_uninit(); 1689 xfs_mru_cache_uninit(); 1690 xfs_destroy_workqueues(); 1691 xfs_destroy_zones(); 1692 } 1693 1694 module_init(init_xfs_fs); 1695 module_exit(exit_xfs_fs); 1696 1697 MODULE_AUTHOR("Silicon Graphics, Inc."); 1698 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled"); 1699 MODULE_LICENSE("GPL"); 1700