xref: /linux/fs/netfs/locking.c (revision 7166c32651fa2a5712215980d1b54d4b9ccca6b5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * I/O and data path helper functionality.
4  *
5  * Borrowed from NFS Copyright (c) 2016 Trond Myklebust
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/netfs.h>
10 #include "internal.h"
11 
12 /*
13  * inode_dio_wait_interruptible - wait for outstanding DIO requests to finish
14  * @inode: inode to wait for
15  *
16  * Waits for all pending direct I/O requests to finish so that we can
17  * proceed with a truncate or equivalent operation.
18  *
19  * Must be called under a lock that serializes taking new references
20  * to i_dio_count, usually by inode->i_mutex.
21  */
22 static int netfs_inode_dio_wait_interruptible(struct inode *inode)
23 {
24 	if (inode_dio_finished(inode))
25 		return 0;
26 
27 	inode_dio_wait_interruptible(inode);
28 	return !inode_dio_finished(inode) ? -ERESTARTSYS : 0;
29 }
30 
31 /* Call with exclusively locked inode->i_rwsem */
32 static int netfs_block_o_direct(struct netfs_inode *ictx)
33 {
34 	if (!test_bit(NETFS_ICTX_ODIRECT, &ictx->flags))
35 		return 0;
36 	clear_bit(NETFS_ICTX_ODIRECT, &ictx->flags);
37 	return netfs_inode_dio_wait_interruptible(&ictx->inode);
38 }
39 
40 /**
41  * netfs_start_io_read - declare the file is being used for buffered reads
42  * @inode: file inode
43  *
44  * Declare that a buffered read operation is about to start, and ensure
45  * that we block all direct I/O.
46  * On exit, the function ensures that the NETFS_ICTX_ODIRECT flag is unset,
47  * and holds a shared lock on inode->i_rwsem to ensure that the flag
48  * cannot be changed.
49  * In practice, this means that buffered read operations are allowed to
50  * execute in parallel, thanks to the shared lock, whereas direct I/O
51  * operations need to wait to grab an exclusive lock in order to set
52  * NETFS_ICTX_ODIRECT.
53  * Note that buffered writes and truncates both take a write lock on
54  * inode->i_rwsem, meaning that those are serialised w.r.t. the reads.
55  */
56 int netfs_start_io_read(struct inode *inode)
57 	__acquires(inode->i_rwsem)
58 {
59 	struct netfs_inode *ictx = netfs_inode(inode);
60 
61 	/* Be an optimist! */
62 	if (down_read_interruptible(&inode->i_rwsem) < 0)
63 		return -ERESTARTSYS;
64 	if (test_bit(NETFS_ICTX_ODIRECT, &ictx->flags) == 0)
65 		return 0;
66 	up_read(&inode->i_rwsem);
67 
68 	/* Slow path.... */
69 	if (down_write_killable(&inode->i_rwsem) < 0)
70 		return -ERESTARTSYS;
71 	if (netfs_block_o_direct(ictx) < 0) {
72 		up_write(&inode->i_rwsem);
73 		return -ERESTARTSYS;
74 	}
75 	downgrade_write(&inode->i_rwsem);
76 	return 0;
77 }
78 EXPORT_SYMBOL(netfs_start_io_read);
79 
80 /**
81  * netfs_end_io_read - declare that the buffered read operation is done
82  * @inode: file inode
83  *
84  * Declare that a buffered read operation is done, and release the shared
85  * lock on inode->i_rwsem.
86  */
87 void netfs_end_io_read(struct inode *inode)
88 	__releases(inode->i_rwsem)
89 {
90 	up_read(&inode->i_rwsem);
91 }
92 EXPORT_SYMBOL(netfs_end_io_read);
93 
94 /**
95  * netfs_start_io_write - declare the file is being used for buffered writes
96  * @inode: file inode
97  *
98  * Declare that a buffered read operation is about to start, and ensure
99  * that we block all direct I/O.
100  */
101 int netfs_start_io_write(struct inode *inode)
102 	__acquires(inode->i_rwsem)
103 {
104 	struct netfs_inode *ictx = netfs_inode(inode);
105 
106 	if (down_write_killable(&inode->i_rwsem) < 0)
107 		return -ERESTARTSYS;
108 	if (netfs_block_o_direct(ictx) < 0) {
109 		up_write(&inode->i_rwsem);
110 		return -ERESTARTSYS;
111 	}
112 	downgrade_write(&inode->i_rwsem);
113 	return 0;
114 }
115 EXPORT_SYMBOL(netfs_start_io_write);
116 
117 /**
118  * netfs_end_io_write - declare that the buffered write operation is done
119  * @inode: file inode
120  *
121  * Declare that a buffered write operation is done, and release the
122  * lock on inode->i_rwsem.
123  */
124 void netfs_end_io_write(struct inode *inode)
125 	__releases(inode->i_rwsem)
126 {
127 	up_read(&inode->i_rwsem);
128 }
129 EXPORT_SYMBOL(netfs_end_io_write);
130 
131 /* Call with exclusively locked inode->i_rwsem */
132 static int netfs_block_buffered(struct inode *inode)
133 {
134 	struct netfs_inode *ictx = netfs_inode(inode);
135 	int ret;
136 
137 	if (!test_bit(NETFS_ICTX_ODIRECT, &ictx->flags)) {
138 		set_bit(NETFS_ICTX_ODIRECT, &ictx->flags);
139 		if (inode->i_mapping->nrpages != 0) {
140 			unmap_mapping_range(inode->i_mapping, 0, 0, 0);
141 			ret = filemap_fdatawait(inode->i_mapping);
142 			if (ret < 0) {
143 				clear_bit(NETFS_ICTX_ODIRECT, &ictx->flags);
144 				return ret;
145 			}
146 		}
147 	}
148 	return 0;
149 }
150 
151 /**
152  * netfs_start_io_direct - declare the file is being used for direct i/o
153  * @inode: file inode
154  *
155  * Declare that a direct I/O operation is about to start, and ensure
156  * that we block all buffered I/O.
157  * On exit, the function ensures that the NETFS_ICTX_ODIRECT flag is set,
158  * and holds a shared lock on inode->i_rwsem to ensure that the flag
159  * cannot be changed.
160  * In practice, this means that direct I/O operations are allowed to
161  * execute in parallel, thanks to the shared lock, whereas buffered I/O
162  * operations need to wait to grab an exclusive lock in order to clear
163  * NETFS_ICTX_ODIRECT.
164  * Note that buffered writes and truncates both take a write lock on
165  * inode->i_rwsem, meaning that those are serialised w.r.t. O_DIRECT.
166  */
167 int netfs_start_io_direct(struct inode *inode)
168 	__acquires(inode->i_rwsem)
169 {
170 	struct netfs_inode *ictx = netfs_inode(inode);
171 	int ret;
172 
173 	/* Be an optimist! */
174 	if (down_read_interruptible(&inode->i_rwsem) < 0)
175 		return -ERESTARTSYS;
176 	if (test_bit(NETFS_ICTX_ODIRECT, &ictx->flags) != 0)
177 		return 0;
178 	up_read(&inode->i_rwsem);
179 
180 	/* Slow path.... */
181 	if (down_write_killable(&inode->i_rwsem) < 0)
182 		return -ERESTARTSYS;
183 	ret = netfs_block_buffered(inode);
184 	if (ret < 0) {
185 		up_write(&inode->i_rwsem);
186 		return ret;
187 	}
188 	downgrade_write(&inode->i_rwsem);
189 	return 0;
190 }
191 EXPORT_SYMBOL(netfs_start_io_direct);
192 
193 /**
194  * netfs_end_io_direct - declare that the direct i/o operation is done
195  * @inode: file inode
196  *
197  * Declare that a direct I/O operation is done, and release the shared
198  * lock on inode->i_rwsem.
199  */
200 void netfs_end_io_direct(struct inode *inode)
201 	__releases(inode->i_rwsem)
202 {
203 	up_read(&inode->i_rwsem);
204 }
205 EXPORT_SYMBOL(netfs_end_io_direct);
206