Lines Matching refs:ip
54 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
67 struct xfs_inode *ip)
71 if (xfs_need_iread_extents(&ip->i_df))
73 xfs_ilock(ip, lock_mode);
79 struct xfs_inode *ip)
83 if (xfs_inode_has_attr_fork(ip) && xfs_need_iread_extents(&ip->i_af))
85 xfs_ilock(ip, lock_mode);
141 xfs_inode_t *ip,
144 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
149 down_write_nested(&VFS_I(ip)->i_rwsem,
152 down_read_nested(&VFS_I(ip)->i_rwsem,
157 down_write_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
160 down_read_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
165 down_write_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
167 down_read_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
177 * ip -- the inode being locked
184 xfs_inode_t *ip,
187 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
192 if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
195 if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
200 if (!down_write_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
203 if (!down_read_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
208 if (!down_write_trylock(&ip->i_lock))
211 if (!down_read_trylock(&ip->i_lock))
218 up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
220 up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
223 up_write(&VFS_I(ip)->i_rwsem);
225 up_read(&VFS_I(ip)->i_rwsem);
236 * ip -- the inode being unlocked
244 xfs_inode_t *ip,
250 up_write(&VFS_I(ip)->i_rwsem);
252 up_read(&VFS_I(ip)->i_rwsem);
255 up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
257 up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
260 up_write(&ip->i_lock);
262 up_read(&ip->i_lock);
264 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
273 xfs_inode_t *ip,
281 downgrade_write(&ip->i_lock);
283 downgrade_write(&VFS_I(ip)->i_mapping->invalidate_lock);
285 downgrade_write(&VFS_I(ip)->i_rwsem);
287 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
292 struct xfs_inode *ip,
300 rwsem_assert_held(&ip->i_lock);
302 rwsem_assert_held_write_nolockdep(&ip->i_lock);
305 rwsem_assert_held(&VFS_I(ip)->i_mapping->invalidate_lock);
307 rwsem_assert_held_write(&VFS_I(ip)->i_mapping->invalidate_lock);
310 rwsem_assert_held(&VFS_I(ip)->i_rwsem);
312 rwsem_assert_held_write(&VFS_I(ip)->i_rwsem);
593 struct xfs_inode *ip = NULL;
600 error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
604 ASSERT(ip != NULL);
605 xfs_trans_ijoin(tp, ip, 0);
606 xfs_inode_init(tp, args, ip);
609 xfs_setup_inode(ip);
611 *ipp = ip;
721 error = xfs_icreate(tp, ino, args, &du.ip);
751 xfs_qm_vop_create_dqattach(tp, du.ip, udqp, gdqp, pdqp);
761 *ipp = du.ip;
762 xfs_iunlock(du.ip, XFS_ILOCK_EXCL);
775 if (du.ip) {
776 xfs_iunlock(du.ip, XFS_ILOCK_EXCL);
777 xfs_finish_inode_setup(du.ip);
778 xfs_irele(du.ip);
799 struct xfs_inode *ip = NULL;
829 error = xfs_icreate(tp, ino, args, &ip);
841 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
843 error = xfs_iunlink(tp, ip);
855 *ipp = ip;
856 xfs_iunlock(ip, XFS_ILOCK_EXCL);
867 if (ip) {
868 xfs_iunlock(ip, XFS_ILOCK_EXCL);
869 xfs_finish_inode_setup(ip);
870 xfs_irele(ip);
889 .ip = sip,
988 struct xfs_inode *ip)
993 if (!xfs_is_reflink_inode(ip))
995 dfork = xfs_ifork_ptr(ip, XFS_DATA_FORK);
996 cfork = xfs_ifork_ptr(ip, XFS_COW_FORK);
998 ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1000 xfs_inode_clear_cowblocks_tag(ip);
1027 struct xfs_inode *ip,
1032 struct xfs_mount *mp = ip->i_mount;
1037 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1038 if (atomic_read(&VFS_I(ip)->i_count))
1039 xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL);
1040 ASSERT(new_size <= XFS_ISIZE(ip));
1042 ASSERT(ip->i_itemp != NULL);
1043 ASSERT(ip->i_itemp->ili_lock_flags == 0);
1044 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1046 trace_xfs_itruncate_extents_start(ip, new_size);
1066 error = xfs_bunmapi_range(&tp, ip, flags, first_unmap_block,
1073 error = xfs_reflink_cancel_cow_blocks(ip, &tp,
1078 xfs_itruncate_clear_reflink_flags(ip);
1085 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1087 trace_xfs_itruncate_extents_end(ip, new_size);
1146 struct xfs_inode *ip)
1148 struct xfs_mount *mp = ip->i_mount;
1157 xfs_ilock(ip, XFS_ILOCK_EXCL);
1158 xfs_trans_ijoin(tp, ip, 0);
1165 ip->i_disk_size = 0;
1166 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1168 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1172 ASSERT(ip->i_df.if_nextents == 0);
1178 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1184 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1195 struct xfs_inode *ip)
1197 struct xfs_mount *mp = ip->i_mount;
1250 xfs_ilock(ip, XFS_ILOCK_EXCL);
1251 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1253 error = xfs_ifree(tp, ip);
1254 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1273 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1286 struct xfs_inode *ip)
1288 struct xfs_mount *mp = ip->i_mount;
1289 struct xfs_ifork *cow_ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
1295 if (VFS_I(ip)->i_mode == 0)
1310 if (xfs_is_internal_inode(ip))
1318 if (VFS_I(ip)->i_nlink == 0)
1329 return xfs_can_free_eofblocks(ip);
1338 struct xfs_inode *ip)
1340 struct xfs_mount *mp = ip->i_mount;
1345 xfs_inode_measure_sickness(ip, &sick, &checked);
1349 trace_xfs_inode_unfixed_corruption(ip, sick);
1354 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1375 xfs_inode_t *ip)
1385 if (VFS_I(ip)->i_mode == 0) {
1386 ASSERT(ip->i_df.if_broot_bytes == 0);
1390 mp = ip->i_mount;
1391 ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1393 xfs_inactive_health(ip);
1403 if (xfs_is_internal_inode(ip))
1407 if (xfs_inode_has_cow_data(ip)) {
1408 error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
1413 if (VFS_I(ip)->i_nlink != 0) {
1419 if (xfs_can_free_eofblocks(ip))
1420 error = xfs_free_eofblocks(ip);
1425 if (S_ISREG(VFS_I(ip)->i_mode) &&
1426 (ip->i_disk_size != 0 || XFS_ISIZE(ip) != 0 ||
1427 xfs_inode_has_filedata(ip)))
1430 if (xfs_iflags_test(ip, XFS_IQUOTAUNCHECKED)) {
1439 xfs_qm_dqdetach(ip);
1441 error = xfs_qm_dqattach(ip);
1446 if (S_ISDIR(VFS_I(ip)->i_mode) && ip->i_df.if_nextents > 0) {
1447 xfs_inactive_dir(ip);
1451 if (S_ISLNK(VFS_I(ip)->i_mode))
1452 error = xfs_inactive_symlink(ip);
1454 error = xfs_inactive_truncate(ip);
1463 if (xfs_inode_has_attr_fork(ip)) {
1464 error = xfs_attr_inactive(ip);
1469 ASSERT(ip->i_forkoff == 0);
1474 error = xfs_inactive_ifree(ip);
1481 xfs_qm_dqdetach(ip);
1496 struct xfs_inode *ip;
1499 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
1500 if (!ip) {
1510 if (WARN_ON_ONCE(!ip->i_ino)) {
1514 ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM));
1516 return ip;
1592 struct xfs_inode *ip;
1596 ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
1599 if (!ip) {
1610 spin_lock(&ip->i_flags_lock);
1611 if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE))
1620 if (ip != free_ip) {
1621 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
1622 spin_unlock(&ip->i_flags_lock);
1628 ip->i_flags |= XFS_ISTALE;
1635 iip = ip->i_itemp;
1636 if (__xfs_iflags_test(ip, XFS_IFLUSHING)) {
1651 __xfs_iflags_set(ip, XFS_IFLUSHING);
1652 spin_unlock(&ip->i_flags_lock);
1663 if (ip != free_ip)
1664 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1668 if (ip != free_ip)
1669 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1671 spin_unlock(&ip->i_flags_lock);
1781 struct xfs_inode *ip)
1783 struct xfs_mount *mp = ip->i_mount;
1786 struct xfs_inode_log_item *iip = ip->i_itemp;
1789 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1790 ASSERT(VFS_I(ip)->i_nlink == 0);
1791 ASSERT(ip->i_df.if_nextents == 0);
1792 ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
1793 ASSERT(ip->i_nblocks == 0);
1795 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1797 error = xfs_inode_uninit(tp, pag, ip, &xic);
1801 if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS))
1802 xfs_iflags_clear(ip, XFS_IPRESERVE_DM_FIELDS);
1810 error = xfs_ifree_cluster(tp, pag, ip, &xic);
1823 struct xfs_inode *ip)
1825 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED);
1827 trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
1830 xfs_log_force_seq(ip->i_mount, ip->i_itemp->ili_commit_seq, 0, NULL);
1836 struct xfs_inode *ip)
1838 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
1839 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
1841 xfs_iunpin(ip);
1845 if (xfs_ipincount(ip))
1847 } while (xfs_ipincount(ip));
1853 struct xfs_inode *ip)
1855 if (xfs_ipincount(ip))
1856 __xfs_iunpin_wait(ip);
1890 struct xfs_inode *ip)
1895 .ip = ip,
1899 int is_dir = S_ISDIR(VFS_I(ip)->i_mode);
1915 error = xfs_qm_dqattach(ip);
1935 error = xfs_trans_alloc_dir(dp, &M_RES(mp)->tr_remove, ip, &resblks,
1958 if (is_dir && xfs_inode_is_filestream(ip))
1959 xfs_filestream_deassociate(ip);
1961 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1969 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2120 .ip = src_ip,
2125 .ip = target_ip,
2151 &du_wip.ip);
2159 xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, du_wip.ip,
2166 if (du_wip.ip) {
2181 target_name->len, du_wip.ip != NULL);
2228 if (du_wip.ip)
2229 xfs_trans_ijoin(tp, du_wip.ip, 0);
2297 if (inodes[i] == du_wip.ip ||
2317 if (du_wip.ip) {
2323 VFS_I(du_wip.ip)->i_state &= ~I_LINKABLE;
2349 if (du_wip.ip)
2350 xfs_irele(du_wip.ip);
2358 struct xfs_inode *ip,
2361 struct xfs_inode_log_item *iip = ip->i_itemp;
2363 struct xfs_mount *mp = ip->i_mount;
2366 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED);
2367 ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING));
2368 ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
2369 ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
2372 dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
2385 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
2388 if (ip->i_df.if_format == XFS_DINODE_FMT_META_BTREE) {
2389 if (!S_ISREG(VFS_I(ip)->i_mode) ||
2390 !(ip->i_diflags2 & XFS_DIFLAG2_METADATA)) {
2393 __func__, xfs_metafile_type_str(ip->i_metatype),
2394 ip->i_ino, ip);
2397 } else if (S_ISREG(VFS_I(ip)->i_mode)) {
2399 ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
2400 ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
2404 __func__, ip->i_ino, ip);
2407 } else if (S_ISDIR(VFS_I(ip)->i_mode)) {
2409 ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
2410 ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
2411 ip->i_df.if_format != XFS_DINODE_FMT_LOCAL,
2415 __func__, ip->i_ino, ip);
2419 if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af) >
2420 ip->i_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
2424 __func__, ip->i_ino,
2425 ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af),
2426 ip->i_nblocks, ip);
2429 if (XFS_TEST_ERROR(ip->i_forkoff > mp->m_sb.sb_inodesize,
2433 __func__, ip->i_ino, ip->i_forkoff, ip);
2437 if (xfs_inode_has_attr_fork(ip) &&
2438 ip->i_af.if_format == XFS_DINODE_FMT_META_BTREE) {
2441 __func__, ip->i_ino, ip);
2454 ip->i_flushiter++;
2460 if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
2461 xfs_ifork_verify_local_data(ip))
2463 if (xfs_inode_has_attr_fork(ip) &&
2464 ip->i_af.if_format == XFS_DINODE_FMT_LOCAL &&
2465 xfs_ifork_verify_local_attr(ip))
2473 xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
2477 if (ip->i_flushiter == DI_MAX_FLUSH)
2478 ip->i_flushiter = 0;
2481 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
2482 if (xfs_inode_has_attr_fork(ip))
2483 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
2520 xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE);
2543 struct xfs_inode *ip;
2554 ip = iip->ili_inode;
2559 if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING))
2561 if (xfs_ipincount(ip))
2571 spin_lock(&ip->i_flags_lock);
2572 ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE));
2573 if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) {
2574 spin_unlock(&ip->i_flags_lock);
2584 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
2585 spin_unlock(&ip->i_flags_lock);
2588 __xfs_iflags_set(ip, XFS_IFLUSHING);
2589 spin_unlock(&ip->i_flags_lock);
2599 xfs_iunpin_wait(ip);
2600 xfs_iflush_abort(ip);
2601 xfs_iunlock(ip, XFS_ILOCK_SHARED);
2607 if (xfs_ipincount(ip)) {
2608 xfs_iflags_clear(ip, XFS_IFLUSHING);
2609 xfs_iunlock(ip, XFS_ILOCK_SHARED);
2613 if (!xfs_inode_clean(ip))
2614 error = xfs_iflush(ip, bp);
2616 xfs_iflags_clear(ip, XFS_IFLUSHING);
2617 xfs_iunlock(ip, XFS_ILOCK_SHARED);
2652 struct xfs_inode *ip)
2654 trace_xfs_irele(ip, _RET_IP_);
2655 iput(VFS_I(ip));
2663 struct xfs_inode *ip)
2667 xfs_ilock(ip, XFS_ILOCK_SHARED);
2668 if (xfs_ipincount(ip))
2669 seq = ip->i_itemp->ili_commit_seq;
2670 xfs_iunlock(ip, XFS_ILOCK_SHARED);
2674 return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, NULL);
2850 struct xfs_inode *ip)
2856 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2857 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2875 if (!xfs_inode_unlinked_incomplete(ip)) {
2883 trace_xfs_inode_reload_unlinked_bucket(ip);
2896 next_ip = ip;
2938 struct xfs_inode *ip)
2943 error = xfs_trans_alloc_empty(ip->i_mount, &tp);
2947 xfs_ilock(ip, XFS_ILOCK_SHARED);
2948 if (xfs_inode_unlinked_incomplete(ip))
2949 error = xfs_inode_reload_unlinked_bucket(tp, ip);
2950 xfs_iunlock(ip, XFS_ILOCK_SHARED);
2959 const struct xfs_inode *ip,
2966 switch (ip->i_vnode.i_mode & S_IFMT) {
2974 return ip->i_sick & (XFS_SICK_INO_BMBTD_ZAPPED | datamask);
2976 return ip->i_sick & XFS_SICK_INO_BMBTA_ZAPPED;
2986 struct xfs_inode *ip,
2990 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
2993 if (XFS_IS_REALTIME_INODE(ip))
2995 *dblocks = ip->i_nblocks - *rblocks;
3002 struct xfs_inode *ip = XFS_I(inode);
3004 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
3006 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
3062 struct xfs_inode *ip)
3066 if (XFS_IS_REALTIME_INODE(ip))
3067 blocks = ip->i_mount->m_sb.sb_rextsize;
3069 return XFS_FSB_TO_B(ip->i_mount, blocks);
3075 const struct xfs_inode *ip)
3077 return ip->i_mount->m_always_cow && xfs_has_reflink(ip->i_mount);