xref: /linux/fs/netfs/locking.c (revision 566ab427f827b0256d3e8ce0235d088e6a9c28bd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * I/O and data path helper functionality.
4  *
5  * Borrowed from NFS Copyright (c) 2016 Trond Myklebust
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/netfs.h>
10 #include "internal.h"
11 
12 /*
13  * inode_dio_wait_interruptible - wait for outstanding DIO requests to finish
14  * @inode: inode to wait for
15  *
16  * Waits for all pending direct I/O requests to finish so that we can
17  * proceed with a truncate or equivalent operation.
18  *
19  * Must be called under a lock that serializes taking new references
20  * to i_dio_count, usually by inode->i_mutex.
21  */
22 static int netfs_inode_dio_wait_interruptible(struct inode *inode)
23 {
24 	if (inode_dio_finished(inode))
25 		return 0;
26 
27 	inode_dio_wait_interruptible(inode);
28 	return !inode_dio_finished(inode) ? -ERESTARTSYS : 0;
29 }
30 
31 /* Call with exclusively locked inode->i_rwsem */
32 static int netfs_block_o_direct(struct netfs_inode *ictx)
33 {
34 	if (!test_bit(NETFS_ICTX_ODIRECT, &ictx->flags))
35 		return 0;
36 	clear_bit(NETFS_ICTX_ODIRECT, &ictx->flags);
37 	return netfs_inode_dio_wait_interruptible(&ictx->inode);
38 }
39 
40 /**
41  * netfs_start_io_read - declare the file is being used for buffered reads
42  * @inode: file inode
43  *
44  * Declare that a buffered read operation is about to start, and ensure
45  * that we block all direct I/O.
46  * On exit, the function ensures that the NETFS_ICTX_ODIRECT flag is unset,
47  * and holds a shared lock on inode->i_rwsem to ensure that the flag
48  * cannot be changed.
49  * In practice, this means that buffered read operations are allowed to
50  * execute in parallel, thanks to the shared lock, whereas direct I/O
51  * operations need to wait to grab an exclusive lock in order to set
52  * NETFS_ICTX_ODIRECT.
53  * Note that buffered writes and truncates both take a write lock on
54  * inode->i_rwsem, meaning that those are serialised w.r.t. the reads.
55  */
56 int netfs_start_io_read(struct inode *inode)
57 	__acquires(inode->i_rwsem)
58 {
59 	struct netfs_inode *ictx = netfs_inode(inode);
60 
61 	/* Be an optimist! */
62 	if (down_read_interruptible(&inode->i_rwsem) < 0)
63 		return -ERESTARTSYS;
64 	if (test_bit(NETFS_ICTX_ODIRECT, &ictx->flags) == 0)
65 		return 0;
66 	up_read(&inode->i_rwsem);
67 
68 	/* Slow path.... */
69 	if (down_write_killable(&inode->i_rwsem) < 0)
70 		return -ERESTARTSYS;
71 	if (netfs_block_o_direct(ictx) < 0) {
72 		up_write(&inode->i_rwsem);
73 		return -ERESTARTSYS;
74 	}
75 	downgrade_write(&inode->i_rwsem);
76 	return 0;
77 }
78 EXPORT_SYMBOL(netfs_start_io_read);
79 
80 /**
81  * netfs_end_io_read - declare that the buffered read operation is done
82  * @inode: file inode
83  *
84  * Declare that a buffered read operation is done, and release the shared
85  * lock on inode->i_rwsem.
86  */
87 void netfs_end_io_read(struct inode *inode)
88 	__releases(inode->i_rwsem)
89 {
90 	up_read(&inode->i_rwsem);
91 }
92 EXPORT_SYMBOL(netfs_end_io_read);
93 
94 /**
95  * netfs_start_io_write - declare the file is being used for buffered writes
96  * @inode: file inode
97  *
98  * Declare that a buffered read operation is about to start, and ensure
99  * that we block all direct I/O.
100  */
101 int netfs_start_io_write(struct inode *inode)
102 	__acquires(inode->i_rwsem)
103 {
104 	struct netfs_inode *ictx = netfs_inode(inode);
105 
106 	if (down_write_killable(&inode->i_rwsem) < 0)
107 		return -ERESTARTSYS;
108 	if (netfs_block_o_direct(ictx) < 0) {
109 		up_write(&inode->i_rwsem);
110 		return -ERESTARTSYS;
111 	}
112 	return 0;
113 }
114 EXPORT_SYMBOL(netfs_start_io_write);
115 
116 /**
117  * netfs_end_io_write - declare that the buffered write operation is done
118  * @inode: file inode
119  *
120  * Declare that a buffered write operation is done, and release the
121  * lock on inode->i_rwsem.
122  */
123 void netfs_end_io_write(struct inode *inode)
124 	__releases(inode->i_rwsem)
125 {
126 	up_write(&inode->i_rwsem);
127 }
128 EXPORT_SYMBOL(netfs_end_io_write);
129 
130 /* Call with exclusively locked inode->i_rwsem */
131 static int netfs_block_buffered(struct inode *inode)
132 {
133 	struct netfs_inode *ictx = netfs_inode(inode);
134 	int ret;
135 
136 	if (!test_bit(NETFS_ICTX_ODIRECT, &ictx->flags)) {
137 		set_bit(NETFS_ICTX_ODIRECT, &ictx->flags);
138 		if (inode->i_mapping->nrpages != 0) {
139 			unmap_mapping_range(inode->i_mapping, 0, 0, 0);
140 			ret = filemap_fdatawait(inode->i_mapping);
141 			if (ret < 0) {
142 				clear_bit(NETFS_ICTX_ODIRECT, &ictx->flags);
143 				return ret;
144 			}
145 		}
146 	}
147 	return 0;
148 }
149 
150 /**
151  * netfs_start_io_direct - declare the file is being used for direct i/o
152  * @inode: file inode
153  *
154  * Declare that a direct I/O operation is about to start, and ensure
155  * that we block all buffered I/O.
156  * On exit, the function ensures that the NETFS_ICTX_ODIRECT flag is set,
157  * and holds a shared lock on inode->i_rwsem to ensure that the flag
158  * cannot be changed.
159  * In practice, this means that direct I/O operations are allowed to
160  * execute in parallel, thanks to the shared lock, whereas buffered I/O
161  * operations need to wait to grab an exclusive lock in order to clear
162  * NETFS_ICTX_ODIRECT.
163  * Note that buffered writes and truncates both take a write lock on
164  * inode->i_rwsem, meaning that those are serialised w.r.t. O_DIRECT.
165  */
166 int netfs_start_io_direct(struct inode *inode)
167 	__acquires(inode->i_rwsem)
168 {
169 	struct netfs_inode *ictx = netfs_inode(inode);
170 	int ret;
171 
172 	/* Be an optimist! */
173 	if (down_read_interruptible(&inode->i_rwsem) < 0)
174 		return -ERESTARTSYS;
175 	if (test_bit(NETFS_ICTX_ODIRECT, &ictx->flags) != 0)
176 		return 0;
177 	up_read(&inode->i_rwsem);
178 
179 	/* Slow path.... */
180 	if (down_write_killable(&inode->i_rwsem) < 0)
181 		return -ERESTARTSYS;
182 	ret = netfs_block_buffered(inode);
183 	if (ret < 0) {
184 		up_write(&inode->i_rwsem);
185 		return ret;
186 	}
187 	downgrade_write(&inode->i_rwsem);
188 	return 0;
189 }
190 EXPORT_SYMBOL(netfs_start_io_direct);
191 
192 /**
193  * netfs_end_io_direct - declare that the direct i/o operation is done
194  * @inode: file inode
195  *
196  * Declare that a direct I/O operation is done, and release the shared
197  * lock on inode->i_rwsem.
198  */
199 void netfs_end_io_direct(struct inode *inode)
200 	__releases(inode->i_rwsem)
201 {
202 	up_read(&inode->i_rwsem);
203 }
204 EXPORT_SYMBOL(netfs_end_io_direct);
205