xref: /linux/fs/smb/client/file.c (revision 75f99f8cf445d577132ed97514032d9a3d3e2758)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  */
11 #include <linux/fs.h>
12 #include <linux/filelock.h>
13 #include <linux/backing-dev.h>
14 #include <linux/stat.h>
15 #include <linux/fcntl.h>
16 #include <linux/pagemap.h>
17 #include <linux/pagevec.h>
18 #include <linux/writeback.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/delay.h>
21 #include <linux/mount.h>
22 #include <linux/slab.h>
23 #include <linux/swap.h>
24 #include <linux/mm.h>
25 #include <asm/div64.h>
26 #include "cifsfs.h"
27 #include "cifspdu.h"
28 #include "cifsglob.h"
29 #include "cifsproto.h"
30 #include "smb2proto.h"
31 #include "cifs_unicode.h"
32 #include "cifs_debug.h"
33 #include "cifs_fs_sb.h"
34 #include "fscache.h"
35 #include "smbdirect.h"
36 #include "fs_context.h"
37 #include "cifs_ioctl.h"
38 #include "cached_dir.h"
39 #include <trace/events/netfs.h>
40 
41 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
42 
43 /*
44  * Prepare a subrequest to upload to the server.  We need to allocate credits
45  * so that we know the maximum amount of data that we can include in it.
46  */
cifs_prepare_write(struct netfs_io_subrequest * subreq)47 static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
48 {
49 	struct cifs_io_subrequest *wdata =
50 		container_of(subreq, struct cifs_io_subrequest, subreq);
51 	struct cifs_io_request *req = wdata->req;
52 	struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr];
53 	struct TCP_Server_Info *server;
54 	struct cifsFileInfo *open_file = req->cfile;
55 	struct cifs_sb_info *cifs_sb = CIFS_SB(wdata->rreq->inode->i_sb);
56 	size_t wsize = req->rreq.wsize;
57 	int rc;
58 
59 	if (!wdata->have_xid) {
60 		wdata->xid = get_xid();
61 		wdata->have_xid = true;
62 	}
63 
64 	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
65 	wdata->server = server;
66 
67 	if (cifs_sb->ctx->wsize == 0)
68 		cifs_negotiate_wsize(server, cifs_sb->ctx,
69 				     tlink_tcon(req->cfile->tlink));
70 
71 retry:
72 	if (open_file->invalidHandle) {
73 		rc = cifs_reopen_file(open_file, false);
74 		if (rc < 0) {
75 			if (rc == -EAGAIN)
76 				goto retry;
77 			subreq->error = rc;
78 			return netfs_prepare_write_failed(subreq);
79 		}
80 	}
81 
82 	rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len,
83 					   &wdata->credits);
84 	if (rc < 0) {
85 		subreq->error = rc;
86 		return netfs_prepare_write_failed(subreq);
87 	}
88 
89 	wdata->credits.rreq_debug_id = subreq->rreq->debug_id;
90 	wdata->credits.rreq_debug_index = subreq->debug_index;
91 	wdata->credits.in_flight_check = 1;
92 	trace_smb3_rw_credits(wdata->rreq->debug_id,
93 			      wdata->subreq.debug_index,
94 			      wdata->credits.value,
95 			      server->credits, server->in_flight,
96 			      wdata->credits.value,
97 			      cifs_trace_rw_credits_write_prepare);
98 
99 #ifdef CONFIG_CIFS_SMB_DIRECT
100 	if (server->smbd_conn)
101 		stream->sreq_max_segs = server->smbd_conn->max_frmr_depth;
102 #endif
103 }
104 
105 /*
106  * Issue a subrequest to upload to the server.
107  */
cifs_issue_write(struct netfs_io_subrequest * subreq)108 static void cifs_issue_write(struct netfs_io_subrequest *subreq)
109 {
110 	struct cifs_io_subrequest *wdata =
111 		container_of(subreq, struct cifs_io_subrequest, subreq);
112 	struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
113 	int rc;
114 
115 	if (cifs_forced_shutdown(sbi)) {
116 		rc = -EIO;
117 		goto fail;
118 	}
119 
120 	rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
121 	if (rc)
122 		goto fail;
123 
124 	rc = -EAGAIN;
125 	if (wdata->req->cfile->invalidHandle)
126 		goto fail;
127 
128 	wdata->server->ops->async_writev(wdata);
129 out:
130 	return;
131 
132 fail:
133 	if (rc == -EAGAIN)
134 		trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
135 	else
136 		trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
137 	add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
138 	cifs_write_subrequest_terminated(wdata, rc);
139 	goto out;
140 }
141 
cifs_netfs_invalidate_cache(struct netfs_io_request * wreq)142 static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq)
143 {
144 	cifs_invalidate_cache(wreq->inode, 0);
145 }
146 
147 /*
148  * Negotiate the size of a read operation on behalf of the netfs library.
149  */
cifs_prepare_read(struct netfs_io_subrequest * subreq)150 static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
151 {
152 	struct netfs_io_request *rreq = subreq->rreq;
153 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
154 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
155 	struct TCP_Server_Info *server;
156 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
157 	size_t size;
158 	int rc = 0;
159 
160 	if (!rdata->have_xid) {
161 		rdata->xid = get_xid();
162 		rdata->have_xid = true;
163 	}
164 
165 	server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
166 	rdata->server = server;
167 
168 	if (cifs_sb->ctx->rsize == 0)
169 		cifs_negotiate_rsize(server, cifs_sb->ctx,
170 				     tlink_tcon(req->cfile->tlink));
171 
172 	rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
173 					   &size, &rdata->credits);
174 	if (rc)
175 		return rc;
176 
177 	rreq->io_streams[0].sreq_max_len = size;
178 
179 	rdata->credits.in_flight_check = 1;
180 	rdata->credits.rreq_debug_id = rreq->debug_id;
181 	rdata->credits.rreq_debug_index = subreq->debug_index;
182 
183 	trace_smb3_rw_credits(rdata->rreq->debug_id,
184 			      rdata->subreq.debug_index,
185 			      rdata->credits.value,
186 			      server->credits, server->in_flight, 0,
187 			      cifs_trace_rw_credits_read_submit);
188 
189 #ifdef CONFIG_CIFS_SMB_DIRECT
190 	if (server->smbd_conn)
191 		rreq->io_streams[0].sreq_max_segs = server->smbd_conn->max_frmr_depth;
192 #endif
193 	return 0;
194 }
195 
196 /*
197  * Issue a read operation on behalf of the netfs helper functions.  We're asked
198  * to make a read of a certain size at a point in the file.  We are permitted
199  * to only read a portion of that, but as long as we read something, the netfs
200  * helper will call us again so that we can issue another read.
201  */
cifs_issue_read(struct netfs_io_subrequest * subreq)202 static void cifs_issue_read(struct netfs_io_subrequest *subreq)
203 {
204 	struct netfs_io_request *rreq = subreq->rreq;
205 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
206 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
207 	struct TCP_Server_Info *server = rdata->server;
208 	int rc = 0;
209 
210 	cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
211 		 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
212 		 subreq->transferred, subreq->len);
213 
214 	rc = adjust_credits(server, rdata, cifs_trace_rw_credits_issue_read_adjust);
215 	if (rc)
216 		goto failed;
217 
218 	if (req->cfile->invalidHandle) {
219 		do {
220 			rc = cifs_reopen_file(req->cfile, true);
221 		} while (rc == -EAGAIN);
222 		if (rc)
223 			goto failed;
224 	}
225 
226 	if (subreq->rreq->origin != NETFS_UNBUFFERED_READ &&
227 	    subreq->rreq->origin != NETFS_DIO_READ)
228 		__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
229 
230 	trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
231 	rc = rdata->server->ops->async_readv(rdata);
232 	if (rc)
233 		goto failed;
234 	return;
235 
236 failed:
237 	subreq->error = rc;
238 	netfs_read_subreq_terminated(subreq);
239 }
240 
241 /*
242  * Writeback calls this when it finds a folio that needs uploading.  This isn't
243  * called if writeback only has copy-to-cache to deal with.
244  */
cifs_begin_writeback(struct netfs_io_request * wreq)245 static void cifs_begin_writeback(struct netfs_io_request *wreq)
246 {
247 	struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
248 	int ret;
249 
250 	ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
251 	if (ret) {
252 		cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
253 		return;
254 	}
255 
256 	wreq->io_streams[0].avail = true;
257 }
258 
259 /*
260  * Initialise a request.
261  */
cifs_init_request(struct netfs_io_request * rreq,struct file * file)262 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
263 {
264 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
265 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
266 	struct cifsFileInfo *open_file = NULL;
267 
268 	rreq->rsize = cifs_sb->ctx->rsize;
269 	rreq->wsize = cifs_sb->ctx->wsize;
270 	req->pid = current->tgid; // Ummm...  This may be a workqueue
271 
272 	if (file) {
273 		open_file = file->private_data;
274 		rreq->netfs_priv = file->private_data;
275 		req->cfile = cifsFileInfo_get(open_file);
276 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
277 			req->pid = req->cfile->pid;
278 	} else if (rreq->origin != NETFS_WRITEBACK) {
279 		WARN_ON_ONCE(1);
280 		return -EIO;
281 	}
282 
283 	return 0;
284 }
285 
286 /*
287  * Completion of a request operation.
288  */
cifs_rreq_done(struct netfs_io_request * rreq)289 static void cifs_rreq_done(struct netfs_io_request *rreq)
290 {
291 	struct timespec64 atime, mtime;
292 	struct inode *inode = rreq->inode;
293 
294 	/* we do not want atime to be less than mtime, it broke some apps */
295 	atime = inode_set_atime_to_ts(inode, current_time(inode));
296 	mtime = inode_get_mtime(inode);
297 	if (timespec64_compare(&atime, &mtime))
298 		inode_set_atime_to_ts(inode, inode_get_mtime(inode));
299 }
300 
cifs_free_request(struct netfs_io_request * rreq)301 static void cifs_free_request(struct netfs_io_request *rreq)
302 {
303 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
304 
305 	if (req->cfile)
306 		cifsFileInfo_put(req->cfile);
307 }
308 
cifs_free_subrequest(struct netfs_io_subrequest * subreq)309 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
310 {
311 	struct cifs_io_subrequest *rdata =
312 		container_of(subreq, struct cifs_io_subrequest, subreq);
313 	int rc = subreq->error;
314 
315 	if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
316 #ifdef CONFIG_CIFS_SMB_DIRECT
317 		if (rdata->mr) {
318 			smbd_deregister_mr(rdata->mr);
319 			rdata->mr = NULL;
320 		}
321 #endif
322 	}
323 
324 	if (rdata->credits.value != 0) {
325 		trace_smb3_rw_credits(rdata->rreq->debug_id,
326 				      rdata->subreq.debug_index,
327 				      rdata->credits.value,
328 				      rdata->server ? rdata->server->credits : 0,
329 				      rdata->server ? rdata->server->in_flight : 0,
330 				      -rdata->credits.value,
331 				      cifs_trace_rw_credits_free_subreq);
332 		if (rdata->server)
333 			add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
334 		else
335 			rdata->credits.value = 0;
336 	}
337 
338 	if (rdata->have_xid)
339 		free_xid(rdata->xid);
340 }
341 
342 const struct netfs_request_ops cifs_req_ops = {
343 	.request_pool		= &cifs_io_request_pool,
344 	.subrequest_pool	= &cifs_io_subrequest_pool,
345 	.init_request		= cifs_init_request,
346 	.free_request		= cifs_free_request,
347 	.free_subrequest	= cifs_free_subrequest,
348 	.prepare_read		= cifs_prepare_read,
349 	.issue_read		= cifs_issue_read,
350 	.done			= cifs_rreq_done,
351 	.begin_writeback	= cifs_begin_writeback,
352 	.prepare_write		= cifs_prepare_write,
353 	.issue_write		= cifs_issue_write,
354 	.invalidate_cache	= cifs_netfs_invalidate_cache,
355 };
356 
357 /*
358  * Mark as invalid, all open files on tree connections since they
359  * were closed when session to server was lost.
360  */
361 void
cifs_mark_open_files_invalid(struct cifs_tcon * tcon)362 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
363 {
364 	struct cifsFileInfo *open_file = NULL;
365 	struct list_head *tmp;
366 	struct list_head *tmp1;
367 
368 	/* only send once per connect */
369 	spin_lock(&tcon->tc_lock);
370 	if (tcon->need_reconnect)
371 		tcon->status = TID_NEED_RECON;
372 
373 	if (tcon->status != TID_NEED_RECON) {
374 		spin_unlock(&tcon->tc_lock);
375 		return;
376 	}
377 	tcon->status = TID_IN_FILES_INVALIDATE;
378 	spin_unlock(&tcon->tc_lock);
379 
380 	/* list all files open on tree connection and mark them invalid */
381 	spin_lock(&tcon->open_file_lock);
382 	list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
383 		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
384 		open_file->invalidHandle = true;
385 		open_file->oplock_break_cancelled = true;
386 	}
387 	spin_unlock(&tcon->open_file_lock);
388 
389 	invalidate_all_cached_dirs(tcon);
390 	spin_lock(&tcon->tc_lock);
391 	if (tcon->status == TID_IN_FILES_INVALIDATE)
392 		tcon->status = TID_NEED_TCON;
393 	spin_unlock(&tcon->tc_lock);
394 
395 	/*
396 	 * BB Add call to evict_inodes(sb) for all superblocks mounted
397 	 * to this tcon.
398 	 */
399 }
400 
cifs_convert_flags(unsigned int flags,int rdwr_for_fscache)401 static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
402 {
403 	if ((flags & O_ACCMODE) == O_RDONLY)
404 		return GENERIC_READ;
405 	else if ((flags & O_ACCMODE) == O_WRONLY)
406 		return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
407 	else if ((flags & O_ACCMODE) == O_RDWR) {
408 		/* GENERIC_ALL is too much permission to request
409 		   can cause unnecessary access denied on create */
410 		/* return GENERIC_ALL; */
411 		return (GENERIC_READ | GENERIC_WRITE);
412 	}
413 
414 	return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
415 		FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
416 		FILE_READ_DATA);
417 }
418 
419 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_convert_flags(unsigned int flags)420 static u32 cifs_posix_convert_flags(unsigned int flags)
421 {
422 	u32 posix_flags = 0;
423 
424 	if ((flags & O_ACCMODE) == O_RDONLY)
425 		posix_flags = SMB_O_RDONLY;
426 	else if ((flags & O_ACCMODE) == O_WRONLY)
427 		posix_flags = SMB_O_WRONLY;
428 	else if ((flags & O_ACCMODE) == O_RDWR)
429 		posix_flags = SMB_O_RDWR;
430 
431 	if (flags & O_CREAT) {
432 		posix_flags |= SMB_O_CREAT;
433 		if (flags & O_EXCL)
434 			posix_flags |= SMB_O_EXCL;
435 	} else if (flags & O_EXCL)
436 		cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
437 			 current->comm, current->tgid);
438 
439 	if (flags & O_TRUNC)
440 		posix_flags |= SMB_O_TRUNC;
441 	/* be safe and imply O_SYNC for O_DSYNC */
442 	if (flags & O_DSYNC)
443 		posix_flags |= SMB_O_SYNC;
444 	if (flags & O_DIRECTORY)
445 		posix_flags |= SMB_O_DIRECTORY;
446 	if (flags & O_NOFOLLOW)
447 		posix_flags |= SMB_O_NOFOLLOW;
448 	if (flags & O_DIRECT)
449 		posix_flags |= SMB_O_DIRECT;
450 
451 	return posix_flags;
452 }
453 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
454 
cifs_get_disposition(unsigned int flags)455 static inline int cifs_get_disposition(unsigned int flags)
456 {
457 	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
458 		return FILE_CREATE;
459 	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
460 		return FILE_OVERWRITE_IF;
461 	else if ((flags & O_CREAT) == O_CREAT)
462 		return FILE_OPEN_IF;
463 	else if ((flags & O_TRUNC) == O_TRUNC)
464 		return FILE_OVERWRITE;
465 	else
466 		return FILE_OPEN;
467 }
468 
469 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_open(const char * full_path,struct inode ** pinode,struct super_block * sb,int mode,unsigned int f_flags,__u32 * poplock,__u16 * pnetfid,unsigned int xid)470 int cifs_posix_open(const char *full_path, struct inode **pinode,
471 			struct super_block *sb, int mode, unsigned int f_flags,
472 			__u32 *poplock, __u16 *pnetfid, unsigned int xid)
473 {
474 	int rc;
475 	FILE_UNIX_BASIC_INFO *presp_data;
476 	__u32 posix_flags = 0;
477 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
478 	struct cifs_fattr fattr;
479 	struct tcon_link *tlink;
480 	struct cifs_tcon *tcon;
481 
482 	cifs_dbg(FYI, "posix open %s\n", full_path);
483 
484 	presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
485 	if (presp_data == NULL)
486 		return -ENOMEM;
487 
488 	tlink = cifs_sb_tlink(cifs_sb);
489 	if (IS_ERR(tlink)) {
490 		rc = PTR_ERR(tlink);
491 		goto posix_open_ret;
492 	}
493 
494 	tcon = tlink_tcon(tlink);
495 	mode &= ~current_umask();
496 
497 	posix_flags = cifs_posix_convert_flags(f_flags);
498 	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
499 			     poplock, full_path, cifs_sb->local_nls,
500 			     cifs_remap(cifs_sb));
501 	cifs_put_tlink(tlink);
502 
503 	if (rc)
504 		goto posix_open_ret;
505 
506 	if (presp_data->Type == cpu_to_le32(-1))
507 		goto posix_open_ret; /* open ok, caller does qpathinfo */
508 
509 	if (!pinode)
510 		goto posix_open_ret; /* caller does not need info */
511 
512 	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
513 
514 	/* get new inode and set it up */
515 	if (*pinode == NULL) {
516 		cifs_fill_uniqueid(sb, &fattr);
517 		*pinode = cifs_iget(sb, &fattr);
518 		if (!*pinode) {
519 			rc = -ENOMEM;
520 			goto posix_open_ret;
521 		}
522 	} else {
523 		cifs_revalidate_mapping(*pinode);
524 		rc = cifs_fattr_to_inode(*pinode, &fattr, false);
525 	}
526 
527 posix_open_ret:
528 	kfree(presp_data);
529 	return rc;
530 }
531 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
532 
cifs_nt_open(const char * full_path,struct inode * inode,struct cifs_sb_info * cifs_sb,struct cifs_tcon * tcon,unsigned int f_flags,__u32 * oplock,struct cifs_fid * fid,unsigned int xid,struct cifs_open_info_data * buf)533 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
534 			struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
535 			struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
536 {
537 	int rc;
538 	int desired_access;
539 	int disposition;
540 	int create_options = CREATE_NOT_DIR;
541 	struct TCP_Server_Info *server = tcon->ses->server;
542 	struct cifs_open_parms oparms;
543 	int rdwr_for_fscache = 0;
544 
545 	if (!server->ops->open)
546 		return -ENOSYS;
547 
548 	/* If we're caching, we need to be able to fill in around partial writes. */
549 	if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
550 		rdwr_for_fscache = 1;
551 
552 	desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
553 
554 /*********************************************************************
555  *  open flag mapping table:
556  *
557  *	POSIX Flag            CIFS Disposition
558  *	----------            ----------------
559  *	O_CREAT               FILE_OPEN_IF
560  *	O_CREAT | O_EXCL      FILE_CREATE
561  *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
562  *	O_TRUNC               FILE_OVERWRITE
563  *	none of the above     FILE_OPEN
564  *
565  *	Note that there is not a direct match between disposition
566  *	FILE_SUPERSEDE (ie create whether or not file exists although
567  *	O_CREAT | O_TRUNC is similar but truncates the existing
568  *	file rather than creating a new file as FILE_SUPERSEDE does
569  *	(which uses the attributes / metadata passed in on open call)
570  *?
571  *?  O_SYNC is a reasonable match to CIFS writethrough flag
572  *?  and the read write flags match reasonably.  O_LARGEFILE
573  *?  is irrelevant because largefile support is always used
574  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
575  *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
576  *********************************************************************/
577 
578 	disposition = cifs_get_disposition(f_flags);
579 
580 	/* BB pass O_SYNC flag through on file attributes .. BB */
581 
582 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
583 	if (f_flags & O_SYNC)
584 		create_options |= CREATE_WRITE_THROUGH;
585 
586 	if (f_flags & O_DIRECT)
587 		create_options |= CREATE_NO_BUFFER;
588 
589 retry_open:
590 	oparms = (struct cifs_open_parms) {
591 		.tcon = tcon,
592 		.cifs_sb = cifs_sb,
593 		.desired_access = desired_access,
594 		.create_options = cifs_create_options(cifs_sb, create_options),
595 		.disposition = disposition,
596 		.path = full_path,
597 		.fid = fid,
598 	};
599 
600 	rc = server->ops->open(xid, &oparms, oplock, buf);
601 	if (rc) {
602 		if (rc == -EACCES && rdwr_for_fscache == 1) {
603 			desired_access = cifs_convert_flags(f_flags, 0);
604 			rdwr_for_fscache = 2;
605 			goto retry_open;
606 		}
607 		return rc;
608 	}
609 	if (rdwr_for_fscache == 2)
610 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
611 
612 	/* TODO: Add support for calling posix query info but with passing in fid */
613 	if (tcon->unix_ext)
614 		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
615 					      xid);
616 	else
617 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
618 					 xid, fid);
619 
620 	if (rc) {
621 		server->ops->close(xid, tcon, fid);
622 		if (rc == -ESTALE)
623 			rc = -EOPENSTALE;
624 	}
625 
626 	return rc;
627 }
628 
629 static bool
cifs_has_mand_locks(struct cifsInodeInfo * cinode)630 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
631 {
632 	struct cifs_fid_locks *cur;
633 	bool has_locks = false;
634 
635 	down_read(&cinode->lock_sem);
636 	list_for_each_entry(cur, &cinode->llist, llist) {
637 		if (!list_empty(&cur->locks)) {
638 			has_locks = true;
639 			break;
640 		}
641 	}
642 	up_read(&cinode->lock_sem);
643 	return has_locks;
644 }
645 
646 void
cifs_down_write(struct rw_semaphore * sem)647 cifs_down_write(struct rw_semaphore *sem)
648 {
649 	while (!down_write_trylock(sem))
650 		msleep(10);
651 }
652 
653 static void cifsFileInfo_put_work(struct work_struct *work);
654 void serverclose_work(struct work_struct *work);
655 
cifs_new_fileinfo(struct cifs_fid * fid,struct file * file,struct tcon_link * tlink,__u32 oplock,const char * symlink_target)656 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
657 				       struct tcon_link *tlink, __u32 oplock,
658 				       const char *symlink_target)
659 {
660 	struct dentry *dentry = file_dentry(file);
661 	struct inode *inode = d_inode(dentry);
662 	struct cifsInodeInfo *cinode = CIFS_I(inode);
663 	struct cifsFileInfo *cfile;
664 	struct cifs_fid_locks *fdlocks;
665 	struct cifs_tcon *tcon = tlink_tcon(tlink);
666 	struct TCP_Server_Info *server = tcon->ses->server;
667 
668 	cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
669 	if (cfile == NULL)
670 		return cfile;
671 
672 	fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
673 	if (!fdlocks) {
674 		kfree(cfile);
675 		return NULL;
676 	}
677 
678 	if (symlink_target) {
679 		cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
680 		if (!cfile->symlink_target) {
681 			kfree(fdlocks);
682 			kfree(cfile);
683 			return NULL;
684 		}
685 	}
686 
687 	INIT_LIST_HEAD(&fdlocks->locks);
688 	fdlocks->cfile = cfile;
689 	cfile->llist = fdlocks;
690 
691 	cfile->count = 1;
692 	cfile->pid = current->tgid;
693 	cfile->uid = current_fsuid();
694 	cfile->dentry = dget(dentry);
695 	cfile->f_flags = file->f_flags;
696 	cfile->invalidHandle = false;
697 	cfile->deferred_close_scheduled = false;
698 	cfile->tlink = cifs_get_tlink(tlink);
699 	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
700 	INIT_WORK(&cfile->put, cifsFileInfo_put_work);
701 	INIT_WORK(&cfile->serverclose, serverclose_work);
702 	INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
703 	mutex_init(&cfile->fh_mutex);
704 	spin_lock_init(&cfile->file_info_lock);
705 
706 	cifs_sb_active(inode->i_sb);
707 
708 	/*
709 	 * If the server returned a read oplock and we have mandatory brlocks,
710 	 * set oplock level to None.
711 	 */
712 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
713 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
714 		oplock = 0;
715 	}
716 
717 	cifs_down_write(&cinode->lock_sem);
718 	list_add(&fdlocks->llist, &cinode->llist);
719 	up_write(&cinode->lock_sem);
720 
721 	spin_lock(&tcon->open_file_lock);
722 	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
723 		oplock = fid->pending_open->oplock;
724 	list_del(&fid->pending_open->olist);
725 
726 	fid->purge_cache = false;
727 	server->ops->set_fid(cfile, fid, oplock);
728 
729 	list_add(&cfile->tlist, &tcon->openFileList);
730 	atomic_inc(&tcon->num_local_opens);
731 
732 	/* if readable file instance put first in list*/
733 	spin_lock(&cinode->open_file_lock);
734 	if (file->f_mode & FMODE_READ)
735 		list_add(&cfile->flist, &cinode->openFileList);
736 	else
737 		list_add_tail(&cfile->flist, &cinode->openFileList);
738 	spin_unlock(&cinode->open_file_lock);
739 	spin_unlock(&tcon->open_file_lock);
740 
741 	if (fid->purge_cache)
742 		cifs_zap_mapping(inode);
743 
744 	file->private_data = cfile;
745 	return cfile;
746 }
747 
748 struct cifsFileInfo *
cifsFileInfo_get(struct cifsFileInfo * cifs_file)749 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
750 {
751 	spin_lock(&cifs_file->file_info_lock);
752 	cifsFileInfo_get_locked(cifs_file);
753 	spin_unlock(&cifs_file->file_info_lock);
754 	return cifs_file;
755 }
756 
cifsFileInfo_put_final(struct cifsFileInfo * cifs_file)757 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
758 {
759 	struct inode *inode = d_inode(cifs_file->dentry);
760 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
761 	struct cifsLockInfo *li, *tmp;
762 	struct super_block *sb = inode->i_sb;
763 
764 	/*
765 	 * Delete any outstanding lock records. We'll lose them when the file
766 	 * is closed anyway.
767 	 */
768 	cifs_down_write(&cifsi->lock_sem);
769 	list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
770 		list_del(&li->llist);
771 		cifs_del_lock_waiters(li);
772 		kfree(li);
773 	}
774 	list_del(&cifs_file->llist->llist);
775 	kfree(cifs_file->llist);
776 	up_write(&cifsi->lock_sem);
777 
778 	cifs_put_tlink(cifs_file->tlink);
779 	dput(cifs_file->dentry);
780 	cifs_sb_deactive(sb);
781 	kfree(cifs_file->symlink_target);
782 	kfree(cifs_file);
783 }
784 
cifsFileInfo_put_work(struct work_struct * work)785 static void cifsFileInfo_put_work(struct work_struct *work)
786 {
787 	struct cifsFileInfo *cifs_file = container_of(work,
788 			struct cifsFileInfo, put);
789 
790 	cifsFileInfo_put_final(cifs_file);
791 }
792 
serverclose_work(struct work_struct * work)793 void serverclose_work(struct work_struct *work)
794 {
795 	struct cifsFileInfo *cifs_file = container_of(work,
796 			struct cifsFileInfo, serverclose);
797 
798 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
799 
800 	struct TCP_Server_Info *server = tcon->ses->server;
801 	int rc = 0;
802 	int retries = 0;
803 	int MAX_RETRIES = 4;
804 
805 	do {
806 		if (server->ops->close_getattr)
807 			rc = server->ops->close_getattr(0, tcon, cifs_file);
808 		else if (server->ops->close)
809 			rc = server->ops->close(0, tcon, &cifs_file->fid);
810 
811 		if (rc == -EBUSY || rc == -EAGAIN) {
812 			retries++;
813 			msleep(250);
814 		}
815 	} while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
816 	);
817 
818 	if (retries == MAX_RETRIES)
819 		pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
820 
821 	if (cifs_file->offload)
822 		queue_work(fileinfo_put_wq, &cifs_file->put);
823 	else
824 		cifsFileInfo_put_final(cifs_file);
825 }
826 
827 /**
828  * cifsFileInfo_put - release a reference of file priv data
829  *
830  * Always potentially wait for oplock handler. See _cifsFileInfo_put().
831  *
832  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
833  */
cifsFileInfo_put(struct cifsFileInfo * cifs_file)834 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
835 {
836 	_cifsFileInfo_put(cifs_file, true, true);
837 }
838 
839 /**
840  * _cifsFileInfo_put - release a reference of file priv data
841  *
842  * This may involve closing the filehandle @cifs_file out on the
843  * server. Must be called without holding tcon->open_file_lock,
844  * cinode->open_file_lock and cifs_file->file_info_lock.
845  *
846  * If @wait_for_oplock_handler is true and we are releasing the last
847  * reference, wait for any running oplock break handler of the file
848  * and cancel any pending one.
849  *
850  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
851  * @wait_oplock_handler: must be false if called from oplock_break_handler
852  * @offload:	not offloaded on close and oplock breaks
853  *
854  */
_cifsFileInfo_put(struct cifsFileInfo * cifs_file,bool wait_oplock_handler,bool offload)855 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
856 		       bool wait_oplock_handler, bool offload)
857 {
858 	struct inode *inode = d_inode(cifs_file->dentry);
859 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
860 	struct TCP_Server_Info *server = tcon->ses->server;
861 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
862 	struct super_block *sb = inode->i_sb;
863 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
864 	struct cifs_fid fid = {};
865 	struct cifs_pending_open open;
866 	bool oplock_break_cancelled;
867 	bool serverclose_offloaded = false;
868 
869 	spin_lock(&tcon->open_file_lock);
870 	spin_lock(&cifsi->open_file_lock);
871 	spin_lock(&cifs_file->file_info_lock);
872 
873 	cifs_file->offload = offload;
874 	if (--cifs_file->count > 0) {
875 		spin_unlock(&cifs_file->file_info_lock);
876 		spin_unlock(&cifsi->open_file_lock);
877 		spin_unlock(&tcon->open_file_lock);
878 		return;
879 	}
880 	spin_unlock(&cifs_file->file_info_lock);
881 
882 	if (server->ops->get_lease_key)
883 		server->ops->get_lease_key(inode, &fid);
884 
885 	/* store open in pending opens to make sure we don't miss lease break */
886 	cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
887 
888 	/* remove it from the lists */
889 	list_del(&cifs_file->flist);
890 	list_del(&cifs_file->tlist);
891 	atomic_dec(&tcon->num_local_opens);
892 
893 	if (list_empty(&cifsi->openFileList)) {
894 		cifs_dbg(FYI, "closing last open instance for inode %p\n",
895 			 d_inode(cifs_file->dentry));
896 		/*
897 		 * In strict cache mode we need invalidate mapping on the last
898 		 * close  because it may cause a error when we open this file
899 		 * again and get at least level II oplock.
900 		 */
901 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
902 			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
903 		cifs_set_oplock_level(cifsi, 0);
904 	}
905 
906 	spin_unlock(&cifsi->open_file_lock);
907 	spin_unlock(&tcon->open_file_lock);
908 
909 	oplock_break_cancelled = wait_oplock_handler ?
910 		cancel_work_sync(&cifs_file->oplock_break) : false;
911 
912 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
913 		struct TCP_Server_Info *server = tcon->ses->server;
914 		unsigned int xid;
915 		int rc = 0;
916 
917 		xid = get_xid();
918 		if (server->ops->close_getattr)
919 			rc = server->ops->close_getattr(xid, tcon, cifs_file);
920 		else if (server->ops->close)
921 			rc = server->ops->close(xid, tcon, &cifs_file->fid);
922 		_free_xid(xid);
923 
924 		if (rc == -EBUSY || rc == -EAGAIN) {
925 			// Server close failed, hence offloading it as an async op
926 			queue_work(serverclose_wq, &cifs_file->serverclose);
927 			serverclose_offloaded = true;
928 		}
929 	}
930 
931 	if (oplock_break_cancelled)
932 		cifs_done_oplock_break(cifsi);
933 
934 	cifs_del_pending_open(&open);
935 
936 	// if serverclose has been offloaded to wq (on failure), it will
937 	// handle offloading put as well. If serverclose not offloaded,
938 	// we need to handle offloading put here.
939 	if (!serverclose_offloaded) {
940 		if (offload)
941 			queue_work(fileinfo_put_wq, &cifs_file->put);
942 		else
943 			cifsFileInfo_put_final(cifs_file);
944 	}
945 }
946 
cifs_open(struct inode * inode,struct file * file)947 int cifs_open(struct inode *inode, struct file *file)
948 
949 {
950 	int rc = -EACCES;
951 	unsigned int xid;
952 	__u32 oplock;
953 	struct cifs_sb_info *cifs_sb;
954 	struct TCP_Server_Info *server;
955 	struct cifs_tcon *tcon;
956 	struct tcon_link *tlink;
957 	struct cifsFileInfo *cfile = NULL;
958 	void *page;
959 	const char *full_path;
960 	bool posix_open_ok = false;
961 	struct cifs_fid fid = {};
962 	struct cifs_pending_open open;
963 	struct cifs_open_info_data data = {};
964 
965 	xid = get_xid();
966 
967 	cifs_sb = CIFS_SB(inode->i_sb);
968 	if (unlikely(cifs_forced_shutdown(cifs_sb))) {
969 		free_xid(xid);
970 		return -EIO;
971 	}
972 
973 	tlink = cifs_sb_tlink(cifs_sb);
974 	if (IS_ERR(tlink)) {
975 		free_xid(xid);
976 		return PTR_ERR(tlink);
977 	}
978 	tcon = tlink_tcon(tlink);
979 	server = tcon->ses->server;
980 
981 	page = alloc_dentry_path();
982 	full_path = build_path_from_dentry(file_dentry(file), page);
983 	if (IS_ERR(full_path)) {
984 		rc = PTR_ERR(full_path);
985 		goto out;
986 	}
987 
988 	cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
989 		 inode, file->f_flags, full_path);
990 
991 	if (file->f_flags & O_DIRECT &&
992 	    cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
993 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
994 			file->f_op = &cifs_file_direct_nobrl_ops;
995 		else
996 			file->f_op = &cifs_file_direct_ops;
997 	}
998 
999 	/* Get the cached handle as SMB2 close is deferred */
1000 	if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) {
1001 		rc = cifs_get_writable_path(tcon, full_path, FIND_WR_FSUID_ONLY, &cfile);
1002 	} else {
1003 		rc = cifs_get_readable_path(tcon, full_path, &cfile);
1004 	}
1005 	if (rc == 0) {
1006 		unsigned int oflags = file->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC);
1007 		unsigned int cflags = cfile->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC);
1008 
1009 		if (cifs_convert_flags(oflags, 0) == cifs_convert_flags(cflags, 0) &&
1010 		    (oflags & (O_SYNC|O_DIRECT)) == (cflags & (O_SYNC|O_DIRECT))) {
1011 			file->private_data = cfile;
1012 			spin_lock(&CIFS_I(inode)->deferred_lock);
1013 			cifs_del_deferred_close(cfile);
1014 			spin_unlock(&CIFS_I(inode)->deferred_lock);
1015 			goto use_cache;
1016 		}
1017 		_cifsFileInfo_put(cfile, true, false);
1018 	} else {
1019 		/* hard link on the defeered close file */
1020 		rc = cifs_get_hardlink_path(tcon, inode, file);
1021 		if (rc)
1022 			cifs_close_deferred_file(CIFS_I(inode));
1023 	}
1024 
1025 	if (server->oplocks)
1026 		oplock = REQ_OPLOCK;
1027 	else
1028 		oplock = 0;
1029 
1030 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1031 	if (!tcon->broken_posix_open && tcon->unix_ext &&
1032 	    cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1033 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1034 		/* can not refresh inode info since size could be stale */
1035 		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
1036 				cifs_sb->ctx->file_mode /* ignored */,
1037 				file->f_flags, &oplock, &fid.netfid, xid);
1038 		if (rc == 0) {
1039 			cifs_dbg(FYI, "posix open succeeded\n");
1040 			posix_open_ok = true;
1041 		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1042 			if (tcon->ses->serverNOS)
1043 				cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1044 					 tcon->ses->ip_addr,
1045 					 tcon->ses->serverNOS);
1046 			tcon->broken_posix_open = true;
1047 		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
1048 			 (rc != -EOPNOTSUPP)) /* path not found or net err */
1049 			goto out;
1050 		/*
1051 		 * Else fallthrough to retry open the old way on network i/o
1052 		 * or DFS errors.
1053 		 */
1054 	}
1055 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1056 
1057 	if (server->ops->get_lease_key)
1058 		server->ops->get_lease_key(inode, &fid);
1059 
1060 	cifs_add_pending_open(&fid, tlink, &open);
1061 
1062 	if (!posix_open_ok) {
1063 		if (server->ops->get_lease_key)
1064 			server->ops->get_lease_key(inode, &fid);
1065 
1066 		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1067 				  xid, &data);
1068 		if (rc) {
1069 			cifs_del_pending_open(&open);
1070 			goto out;
1071 		}
1072 	}
1073 
1074 	cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1075 	if (cfile == NULL) {
1076 		if (server->ops->close)
1077 			server->ops->close(xid, tcon, &fid);
1078 		cifs_del_pending_open(&open);
1079 		rc = -ENOMEM;
1080 		goto out;
1081 	}
1082 
1083 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1084 	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1085 		/*
1086 		 * Time to set mode which we can not set earlier due to
1087 		 * problems creating new read-only files.
1088 		 */
1089 		struct cifs_unix_set_info_args args = {
1090 			.mode	= inode->i_mode,
1091 			.uid	= INVALID_UID, /* no change */
1092 			.gid	= INVALID_GID, /* no change */
1093 			.ctime	= NO_CHANGE_64,
1094 			.atime	= NO_CHANGE_64,
1095 			.mtime	= NO_CHANGE_64,
1096 			.device	= 0,
1097 		};
1098 		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1099 				       cfile->pid);
1100 	}
1101 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1102 
1103 use_cache:
1104 	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1105 			   file->f_mode & FMODE_WRITE);
1106 	if (!(file->f_flags & O_DIRECT))
1107 		goto out;
1108 	if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1109 		goto out;
1110 	cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1111 
1112 out:
1113 	free_dentry_path(page);
1114 	free_xid(xid);
1115 	cifs_put_tlink(tlink);
1116 	cifs_free_open_info(&data);
1117 	return rc;
1118 }
1119 
1120 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1121 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1122 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1123 
1124 /*
1125  * Try to reacquire byte range locks that were released when session
1126  * to server was lost.
1127  */
1128 static int
cifs_relock_file(struct cifsFileInfo * cfile)1129 cifs_relock_file(struct cifsFileInfo *cfile)
1130 {
1131 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1132 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1133 	int rc = 0;
1134 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1135 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1136 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1137 
1138 	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1139 	if (cinode->can_cache_brlcks) {
1140 		/* can cache locks - no need to relock */
1141 		up_read(&cinode->lock_sem);
1142 		return rc;
1143 	}
1144 
1145 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1146 	if (cap_unix(tcon->ses) &&
1147 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1148 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1149 		rc = cifs_push_posix_locks(cfile);
1150 	else
1151 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1152 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1153 
1154 	up_read(&cinode->lock_sem);
1155 	return rc;
1156 }
1157 
1158 static int
cifs_reopen_file(struct cifsFileInfo * cfile,bool can_flush)1159 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1160 {
1161 	int rc = -EACCES;
1162 	unsigned int xid;
1163 	__u32 oplock;
1164 	struct cifs_sb_info *cifs_sb;
1165 	struct cifs_tcon *tcon;
1166 	struct TCP_Server_Info *server;
1167 	struct cifsInodeInfo *cinode;
1168 	struct inode *inode;
1169 	void *page;
1170 	const char *full_path;
1171 	int desired_access;
1172 	int disposition = FILE_OPEN;
1173 	int create_options = CREATE_NOT_DIR;
1174 	struct cifs_open_parms oparms;
1175 	int rdwr_for_fscache = 0;
1176 
1177 	xid = get_xid();
1178 	mutex_lock(&cfile->fh_mutex);
1179 	if (!cfile->invalidHandle) {
1180 		mutex_unlock(&cfile->fh_mutex);
1181 		free_xid(xid);
1182 		return 0;
1183 	}
1184 
1185 	inode = d_inode(cfile->dentry);
1186 	cifs_sb = CIFS_SB(inode->i_sb);
1187 	tcon = tlink_tcon(cfile->tlink);
1188 	server = tcon->ses->server;
1189 
1190 	/*
1191 	 * Can not grab rename sem here because various ops, including those
1192 	 * that already have the rename sem can end up causing writepage to get
1193 	 * called and if the server was down that means we end up here, and we
1194 	 * can never tell if the caller already has the rename_sem.
1195 	 */
1196 	page = alloc_dentry_path();
1197 	full_path = build_path_from_dentry(cfile->dentry, page);
1198 	if (IS_ERR(full_path)) {
1199 		mutex_unlock(&cfile->fh_mutex);
1200 		free_dentry_path(page);
1201 		free_xid(xid);
1202 		return PTR_ERR(full_path);
1203 	}
1204 
1205 	cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1206 		 inode, cfile->f_flags, full_path);
1207 
1208 	if (tcon->ses->server->oplocks)
1209 		oplock = REQ_OPLOCK;
1210 	else
1211 		oplock = 0;
1212 
1213 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1214 	if (tcon->unix_ext && cap_unix(tcon->ses) &&
1215 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1216 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1217 		/*
1218 		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1219 		 * original open. Must mask them off for a reopen.
1220 		 */
1221 		unsigned int oflags = cfile->f_flags &
1222 						~(O_CREAT | O_EXCL | O_TRUNC);
1223 
1224 		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1225 				     cifs_sb->ctx->file_mode /* ignored */,
1226 				     oflags, &oplock, &cfile->fid.netfid, xid);
1227 		if (rc == 0) {
1228 			cifs_dbg(FYI, "posix reopen succeeded\n");
1229 			oparms.reconnect = true;
1230 			goto reopen_success;
1231 		}
1232 		/*
1233 		 * fallthrough to retry open the old way on errors, especially
1234 		 * in the reconnect path it is important to retry hard
1235 		 */
1236 	}
1237 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1238 
1239 	/* If we're caching, we need to be able to fill in around partial writes. */
1240 	if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1241 		rdwr_for_fscache = 1;
1242 
1243 	desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1244 
1245 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
1246 	if (cfile->f_flags & O_SYNC)
1247 		create_options |= CREATE_WRITE_THROUGH;
1248 
1249 	if (cfile->f_flags & O_DIRECT)
1250 		create_options |= CREATE_NO_BUFFER;
1251 
1252 	if (server->ops->get_lease_key)
1253 		server->ops->get_lease_key(inode, &cfile->fid);
1254 
1255 retry_open:
1256 	oparms = (struct cifs_open_parms) {
1257 		.tcon = tcon,
1258 		.cifs_sb = cifs_sb,
1259 		.desired_access = desired_access,
1260 		.create_options = cifs_create_options(cifs_sb, create_options),
1261 		.disposition = disposition,
1262 		.path = full_path,
1263 		.fid = &cfile->fid,
1264 		.reconnect = true,
1265 	};
1266 
1267 	/*
1268 	 * Can not refresh inode by passing in file_info buf to be returned by
1269 	 * ops->open and then calling get_inode_info with returned buf since
1270 	 * file might have write behind data that needs to be flushed and server
1271 	 * version of file size can be stale. If we knew for sure that inode was
1272 	 * not dirty locally we could do this.
1273 	 */
1274 	rc = server->ops->open(xid, &oparms, &oplock, NULL);
1275 	if (rc == -ENOENT && oparms.reconnect == false) {
1276 		/* durable handle timeout is expired - open the file again */
1277 		rc = server->ops->open(xid, &oparms, &oplock, NULL);
1278 		/* indicate that we need to relock the file */
1279 		oparms.reconnect = true;
1280 	}
1281 	if (rc == -EACCES && rdwr_for_fscache == 1) {
1282 		desired_access = cifs_convert_flags(cfile->f_flags, 0);
1283 		rdwr_for_fscache = 2;
1284 		goto retry_open;
1285 	}
1286 
1287 	if (rc) {
1288 		mutex_unlock(&cfile->fh_mutex);
1289 		cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1290 		cifs_dbg(FYI, "oplock: %d\n", oplock);
1291 		goto reopen_error_exit;
1292 	}
1293 
1294 	if (rdwr_for_fscache == 2)
1295 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1296 
1297 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1298 reopen_success:
1299 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1300 	cfile->invalidHandle = false;
1301 	mutex_unlock(&cfile->fh_mutex);
1302 	cinode = CIFS_I(inode);
1303 
1304 	if (can_flush) {
1305 		rc = filemap_write_and_wait(inode->i_mapping);
1306 		if (!is_interrupt_error(rc))
1307 			mapping_set_error(inode->i_mapping, rc);
1308 
1309 		if (tcon->posix_extensions) {
1310 			rc = smb311_posix_get_inode_info(&inode, full_path,
1311 							 NULL, inode->i_sb, xid);
1312 		} else if (tcon->unix_ext) {
1313 			rc = cifs_get_inode_info_unix(&inode, full_path,
1314 						      inode->i_sb, xid);
1315 		} else {
1316 			rc = cifs_get_inode_info(&inode, full_path, NULL,
1317 						 inode->i_sb, xid, NULL);
1318 		}
1319 	}
1320 	/*
1321 	 * Else we are writing out data to server already and could deadlock if
1322 	 * we tried to flush data, and since we do not know if we have data that
1323 	 * would invalidate the current end of file on the server we can not go
1324 	 * to the server to get the new inode info.
1325 	 */
1326 
1327 	/*
1328 	 * If the server returned a read oplock and we have mandatory brlocks,
1329 	 * set oplock level to None.
1330 	 */
1331 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1332 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1333 		oplock = 0;
1334 	}
1335 
1336 	server->ops->set_fid(cfile, &cfile->fid, oplock);
1337 	if (oparms.reconnect)
1338 		cifs_relock_file(cfile);
1339 
1340 reopen_error_exit:
1341 	free_dentry_path(page);
1342 	free_xid(xid);
1343 	return rc;
1344 }
1345 
smb2_deferred_work_close(struct work_struct * work)1346 void smb2_deferred_work_close(struct work_struct *work)
1347 {
1348 	struct cifsFileInfo *cfile = container_of(work,
1349 			struct cifsFileInfo, deferred.work);
1350 
1351 	spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1352 	cifs_del_deferred_close(cfile);
1353 	cfile->deferred_close_scheduled = false;
1354 	spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1355 	_cifsFileInfo_put(cfile, true, false);
1356 }
1357 
1358 static bool
smb2_can_defer_close(struct inode * inode,struct cifs_deferred_close * dclose)1359 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1360 {
1361 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1362 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1363 
1364 	return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1365 			(cinode->oplock == CIFS_CACHE_RHW_FLG ||
1366 			 cinode->oplock == CIFS_CACHE_RH_FLG) &&
1367 			!test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags));
1368 
1369 }
1370 
cifs_close(struct inode * inode,struct file * file)1371 int cifs_close(struct inode *inode, struct file *file)
1372 {
1373 	struct cifsFileInfo *cfile;
1374 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1375 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1376 	struct cifs_deferred_close *dclose;
1377 
1378 	cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1379 
1380 	if (file->private_data != NULL) {
1381 		cfile = file->private_data;
1382 		file->private_data = NULL;
1383 		dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
1384 		if ((cfile->status_file_deleted == false) &&
1385 		    (smb2_can_defer_close(inode, dclose))) {
1386 			if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR, &cinode->netfs.flags)) {
1387 				inode_set_mtime_to_ts(inode,
1388 						      inode_set_ctime_current(inode));
1389 			}
1390 			spin_lock(&cinode->deferred_lock);
1391 			cifs_add_deferred_close(cfile, dclose);
1392 			if (cfile->deferred_close_scheduled &&
1393 			    delayed_work_pending(&cfile->deferred)) {
1394 				/*
1395 				 * If there is no pending work, mod_delayed_work queues new work.
1396 				 * So, Increase the ref count to avoid use-after-free.
1397 				 */
1398 				if (!mod_delayed_work(deferredclose_wq,
1399 						&cfile->deferred, cifs_sb->ctx->closetimeo))
1400 					cifsFileInfo_get(cfile);
1401 			} else {
1402 				/* Deferred close for files */
1403 				queue_delayed_work(deferredclose_wq,
1404 						&cfile->deferred, cifs_sb->ctx->closetimeo);
1405 				cfile->deferred_close_scheduled = true;
1406 				spin_unlock(&cinode->deferred_lock);
1407 				return 0;
1408 			}
1409 			spin_unlock(&cinode->deferred_lock);
1410 			_cifsFileInfo_put(cfile, true, false);
1411 		} else {
1412 			_cifsFileInfo_put(cfile, true, false);
1413 			kfree(dclose);
1414 		}
1415 	}
1416 
1417 	/* return code from the ->release op is always ignored */
1418 	return 0;
1419 }
1420 
1421 void
cifs_reopen_persistent_handles(struct cifs_tcon * tcon)1422 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1423 {
1424 	struct cifsFileInfo *open_file, *tmp;
1425 	LIST_HEAD(tmp_list);
1426 
1427 	if (!tcon->use_persistent || !tcon->need_reopen_files)
1428 		return;
1429 
1430 	tcon->need_reopen_files = false;
1431 
1432 	cifs_dbg(FYI, "Reopen persistent handles\n");
1433 
1434 	/* list all files open on tree connection, reopen resilient handles  */
1435 	spin_lock(&tcon->open_file_lock);
1436 	list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1437 		if (!open_file->invalidHandle)
1438 			continue;
1439 		cifsFileInfo_get(open_file);
1440 		list_add_tail(&open_file->rlist, &tmp_list);
1441 	}
1442 	spin_unlock(&tcon->open_file_lock);
1443 
1444 	list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1445 		if (cifs_reopen_file(open_file, false /* do not flush */))
1446 			tcon->need_reopen_files = true;
1447 		list_del_init(&open_file->rlist);
1448 		cifsFileInfo_put(open_file);
1449 	}
1450 }
1451 
cifs_closedir(struct inode * inode,struct file * file)1452 int cifs_closedir(struct inode *inode, struct file *file)
1453 {
1454 	int rc = 0;
1455 	unsigned int xid;
1456 	struct cifsFileInfo *cfile = file->private_data;
1457 	struct cifs_tcon *tcon;
1458 	struct TCP_Server_Info *server;
1459 	char *buf;
1460 
1461 	cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1462 
1463 	if (cfile == NULL)
1464 		return rc;
1465 
1466 	xid = get_xid();
1467 	tcon = tlink_tcon(cfile->tlink);
1468 	server = tcon->ses->server;
1469 
1470 	cifs_dbg(FYI, "Freeing private data in close dir\n");
1471 	spin_lock(&cfile->file_info_lock);
1472 	if (server->ops->dir_needs_close(cfile)) {
1473 		cfile->invalidHandle = true;
1474 		spin_unlock(&cfile->file_info_lock);
1475 		if (server->ops->close_dir)
1476 			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1477 		else
1478 			rc = -ENOSYS;
1479 		cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1480 		/* not much we can do if it fails anyway, ignore rc */
1481 		rc = 0;
1482 	} else
1483 		spin_unlock(&cfile->file_info_lock);
1484 
1485 	buf = cfile->srch_inf.ntwrk_buf_start;
1486 	if (buf) {
1487 		cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1488 		cfile->srch_inf.ntwrk_buf_start = NULL;
1489 		if (cfile->srch_inf.smallBuf)
1490 			cifs_small_buf_release(buf);
1491 		else
1492 			cifs_buf_release(buf);
1493 	}
1494 
1495 	cifs_put_tlink(cfile->tlink);
1496 	kfree(file->private_data);
1497 	file->private_data = NULL;
1498 	/* BB can we lock the filestruct while this is going on? */
1499 	free_xid(xid);
1500 	return rc;
1501 }
1502 
1503 static struct cifsLockInfo *
cifs_lock_init(__u64 offset,__u64 length,__u8 type,__u16 flags)1504 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1505 {
1506 	struct cifsLockInfo *lock =
1507 		kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
1508 	if (!lock)
1509 		return lock;
1510 	lock->offset = offset;
1511 	lock->length = length;
1512 	lock->type = type;
1513 	lock->pid = current->tgid;
1514 	lock->flags = flags;
1515 	INIT_LIST_HEAD(&lock->blist);
1516 	init_waitqueue_head(&lock->block_q);
1517 	return lock;
1518 }
1519 
1520 void
cifs_del_lock_waiters(struct cifsLockInfo * lock)1521 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1522 {
1523 	struct cifsLockInfo *li, *tmp;
1524 	list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1525 		list_del_init(&li->blist);
1526 		wake_up(&li->block_q);
1527 	}
1528 }
1529 
1530 #define CIFS_LOCK_OP	0
1531 #define CIFS_READ_OP	1
1532 #define CIFS_WRITE_OP	2
1533 
1534 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1535 static bool
cifs_find_fid_lock_conflict(struct cifs_fid_locks * fdlocks,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsFileInfo * cfile,struct cifsLockInfo ** conf_lock,int rw_check)1536 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1537 			    __u64 length, __u8 type, __u16 flags,
1538 			    struct cifsFileInfo *cfile,
1539 			    struct cifsLockInfo **conf_lock, int rw_check)
1540 {
1541 	struct cifsLockInfo *li;
1542 	struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1543 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1544 
1545 	list_for_each_entry(li, &fdlocks->locks, llist) {
1546 		if (offset + length <= li->offset ||
1547 		    offset >= li->offset + li->length)
1548 			continue;
1549 		if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1550 		    server->ops->compare_fids(cfile, cur_cfile)) {
1551 			/* shared lock prevents write op through the same fid */
1552 			if (!(li->type & server->vals->shared_lock_type) ||
1553 			    rw_check != CIFS_WRITE_OP)
1554 				continue;
1555 		}
1556 		if ((type & server->vals->shared_lock_type) &&
1557 		    ((server->ops->compare_fids(cfile, cur_cfile) &&
1558 		     current->tgid == li->pid) || type == li->type))
1559 			continue;
1560 		if (rw_check == CIFS_LOCK_OP &&
1561 		    (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1562 		    server->ops->compare_fids(cfile, cur_cfile))
1563 			continue;
1564 		if (conf_lock)
1565 			*conf_lock = li;
1566 		return true;
1567 	}
1568 	return false;
1569 }
1570 
1571 bool
cifs_find_lock_conflict(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsLockInfo ** conf_lock,int rw_check)1572 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1573 			__u8 type, __u16 flags,
1574 			struct cifsLockInfo **conf_lock, int rw_check)
1575 {
1576 	bool rc = false;
1577 	struct cifs_fid_locks *cur;
1578 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1579 
1580 	list_for_each_entry(cur, &cinode->llist, llist) {
1581 		rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1582 						 flags, cfile, conf_lock,
1583 						 rw_check);
1584 		if (rc)
1585 			break;
1586 	}
1587 
1588 	return rc;
1589 }
1590 
1591 /*
1592  * Check if there is another lock that prevents us to set the lock (mandatory
1593  * style). If such a lock exists, update the flock structure with its
1594  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1595  * or leave it the same if we can't. Returns 0 if we don't need to request to
1596  * the server or 1 otherwise.
1597  */
1598 static int
cifs_lock_test(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,struct file_lock * flock)1599 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1600 	       __u8 type, struct file_lock *flock)
1601 {
1602 	int rc = 0;
1603 	struct cifsLockInfo *conf_lock;
1604 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1605 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1606 	bool exist;
1607 
1608 	down_read(&cinode->lock_sem);
1609 
1610 	exist = cifs_find_lock_conflict(cfile, offset, length, type,
1611 					flock->c.flc_flags, &conf_lock,
1612 					CIFS_LOCK_OP);
1613 	if (exist) {
1614 		flock->fl_start = conf_lock->offset;
1615 		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1616 		flock->c.flc_pid = conf_lock->pid;
1617 		if (conf_lock->type & server->vals->shared_lock_type)
1618 			flock->c.flc_type = F_RDLCK;
1619 		else
1620 			flock->c.flc_type = F_WRLCK;
1621 	} else if (!cinode->can_cache_brlcks)
1622 		rc = 1;
1623 	else
1624 		flock->c.flc_type = F_UNLCK;
1625 
1626 	up_read(&cinode->lock_sem);
1627 	return rc;
1628 }
1629 
1630 static void
cifs_lock_add(struct cifsFileInfo * cfile,struct cifsLockInfo * lock)1631 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1632 {
1633 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1634 	cifs_down_write(&cinode->lock_sem);
1635 	list_add_tail(&lock->llist, &cfile->llist->locks);
1636 	up_write(&cinode->lock_sem);
1637 }
1638 
1639 /*
1640  * Set the byte-range lock (mandatory style). Returns:
1641  * 1) 0, if we set the lock and don't need to request to the server;
1642  * 2) 1, if no locks prevent us but we need to request to the server;
1643  * 3) -EACCES, if there is a lock that prevents us and wait is false.
1644  */
1645 static int
cifs_lock_add_if(struct cifsFileInfo * cfile,struct cifsLockInfo * lock,bool wait)1646 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1647 		 bool wait)
1648 {
1649 	struct cifsLockInfo *conf_lock;
1650 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1651 	bool exist;
1652 	int rc = 0;
1653 
1654 try_again:
1655 	exist = false;
1656 	cifs_down_write(&cinode->lock_sem);
1657 
1658 	exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1659 					lock->type, lock->flags, &conf_lock,
1660 					CIFS_LOCK_OP);
1661 	if (!exist && cinode->can_cache_brlcks) {
1662 		list_add_tail(&lock->llist, &cfile->llist->locks);
1663 		up_write(&cinode->lock_sem);
1664 		return rc;
1665 	}
1666 
1667 	if (!exist)
1668 		rc = 1;
1669 	else if (!wait)
1670 		rc = -EACCES;
1671 	else {
1672 		list_add_tail(&lock->blist, &conf_lock->blist);
1673 		up_write(&cinode->lock_sem);
1674 		rc = wait_event_interruptible(lock->block_q,
1675 					(lock->blist.prev == &lock->blist) &&
1676 					(lock->blist.next == &lock->blist));
1677 		if (!rc)
1678 			goto try_again;
1679 		cifs_down_write(&cinode->lock_sem);
1680 		list_del_init(&lock->blist);
1681 	}
1682 
1683 	up_write(&cinode->lock_sem);
1684 	return rc;
1685 }
1686 
1687 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1688 /*
1689  * Check if there is another lock that prevents us to set the lock (posix
1690  * style). If such a lock exists, update the flock structure with its
1691  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1692  * or leave it the same if we can't. Returns 0 if we don't need to request to
1693  * the server or 1 otherwise.
1694  */
1695 static int
cifs_posix_lock_test(struct file * file,struct file_lock * flock)1696 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1697 {
1698 	int rc = 0;
1699 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1700 	unsigned char saved_type = flock->c.flc_type;
1701 
1702 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1703 		return 1;
1704 
1705 	down_read(&cinode->lock_sem);
1706 	posix_test_lock(file, flock);
1707 
1708 	if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1709 		flock->c.flc_type = saved_type;
1710 		rc = 1;
1711 	}
1712 
1713 	up_read(&cinode->lock_sem);
1714 	return rc;
1715 }
1716 
1717 /*
1718  * Set the byte-range lock (posix style). Returns:
1719  * 1) <0, if the error occurs while setting the lock;
1720  * 2) 0, if we set the lock and don't need to request to the server;
1721  * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1722  * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1723  */
1724 static int
cifs_posix_lock_set(struct file * file,struct file_lock * flock)1725 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1726 {
1727 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1728 	int rc = FILE_LOCK_DEFERRED + 1;
1729 
1730 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1731 		return rc;
1732 
1733 	cifs_down_write(&cinode->lock_sem);
1734 	if (!cinode->can_cache_brlcks) {
1735 		up_write(&cinode->lock_sem);
1736 		return rc;
1737 	}
1738 
1739 	rc = posix_lock_file(file, flock, NULL);
1740 	up_write(&cinode->lock_sem);
1741 	return rc;
1742 }
1743 
1744 int
cifs_push_mandatory_locks(struct cifsFileInfo * cfile)1745 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1746 {
1747 	unsigned int xid;
1748 	int rc = 0, stored_rc;
1749 	struct cifsLockInfo *li, *tmp;
1750 	struct cifs_tcon *tcon;
1751 	unsigned int num, max_num, max_buf;
1752 	LOCKING_ANDX_RANGE *buf, *cur;
1753 	static const int types[] = {
1754 		LOCKING_ANDX_LARGE_FILES,
1755 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1756 	};
1757 	int i;
1758 
1759 	xid = get_xid();
1760 	tcon = tlink_tcon(cfile->tlink);
1761 
1762 	/*
1763 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1764 	 * and check it before using.
1765 	 */
1766 	max_buf = tcon->ses->server->maxBuf;
1767 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1768 		free_xid(xid);
1769 		return -EINVAL;
1770 	}
1771 
1772 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1773 		     PAGE_SIZE);
1774 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1775 			PAGE_SIZE);
1776 	max_num = (max_buf - sizeof(struct smb_hdr)) /
1777 						sizeof(LOCKING_ANDX_RANGE);
1778 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1779 	if (!buf) {
1780 		free_xid(xid);
1781 		return -ENOMEM;
1782 	}
1783 
1784 	for (i = 0; i < 2; i++) {
1785 		cur = buf;
1786 		num = 0;
1787 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1788 			if (li->type != types[i])
1789 				continue;
1790 			cur->Pid = cpu_to_le16(li->pid);
1791 			cur->LengthLow = cpu_to_le32((u32)li->length);
1792 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1793 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
1794 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1795 			if (++num == max_num) {
1796 				stored_rc = cifs_lockv(xid, tcon,
1797 						       cfile->fid.netfid,
1798 						       (__u8)li->type, 0, num,
1799 						       buf);
1800 				if (stored_rc)
1801 					rc = stored_rc;
1802 				cur = buf;
1803 				num = 0;
1804 			} else
1805 				cur++;
1806 		}
1807 
1808 		if (num) {
1809 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1810 					       (__u8)types[i], 0, num, buf);
1811 			if (stored_rc)
1812 				rc = stored_rc;
1813 		}
1814 	}
1815 
1816 	kfree(buf);
1817 	free_xid(xid);
1818 	return rc;
1819 }
1820 
1821 static __u32
hash_lockowner(fl_owner_t owner)1822 hash_lockowner(fl_owner_t owner)
1823 {
1824 	return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1825 }
1826 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1827 
1828 struct lock_to_push {
1829 	struct list_head llist;
1830 	__u64 offset;
1831 	__u64 length;
1832 	__u32 pid;
1833 	__u16 netfid;
1834 	__u8 type;
1835 };
1836 
1837 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1838 static int
cifs_push_posix_locks(struct cifsFileInfo * cfile)1839 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1840 {
1841 	struct inode *inode = d_inode(cfile->dentry);
1842 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1843 	struct file_lock *flock;
1844 	struct file_lock_context *flctx = locks_inode_context(inode);
1845 	unsigned int count = 0, i;
1846 	int rc = 0, xid, type;
1847 	struct list_head locks_to_send, *el;
1848 	struct lock_to_push *lck, *tmp;
1849 	__u64 length;
1850 
1851 	xid = get_xid();
1852 
1853 	if (!flctx)
1854 		goto out;
1855 
1856 	spin_lock(&flctx->flc_lock);
1857 	list_for_each(el, &flctx->flc_posix) {
1858 		count++;
1859 	}
1860 	spin_unlock(&flctx->flc_lock);
1861 
1862 	INIT_LIST_HEAD(&locks_to_send);
1863 
1864 	/*
1865 	 * Allocating count locks is enough because no FL_POSIX locks can be
1866 	 * added to the list while we are holding cinode->lock_sem that
1867 	 * protects locking operations of this inode.
1868 	 */
1869 	for (i = 0; i < count; i++) {
1870 		lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1871 		if (!lck) {
1872 			rc = -ENOMEM;
1873 			goto err_out;
1874 		}
1875 		list_add_tail(&lck->llist, &locks_to_send);
1876 	}
1877 
1878 	el = locks_to_send.next;
1879 	spin_lock(&flctx->flc_lock);
1880 	for_each_file_lock(flock, &flctx->flc_posix) {
1881 		unsigned char ftype = flock->c.flc_type;
1882 
1883 		if (el == &locks_to_send) {
1884 			/*
1885 			 * The list ended. We don't have enough allocated
1886 			 * structures - something is really wrong.
1887 			 */
1888 			cifs_dbg(VFS, "Can't push all brlocks!\n");
1889 			break;
1890 		}
1891 		length = cifs_flock_len(flock);
1892 		if (ftype == F_RDLCK || ftype == F_SHLCK)
1893 			type = CIFS_RDLCK;
1894 		else
1895 			type = CIFS_WRLCK;
1896 		lck = list_entry(el, struct lock_to_push, llist);
1897 		lck->pid = hash_lockowner(flock->c.flc_owner);
1898 		lck->netfid = cfile->fid.netfid;
1899 		lck->length = length;
1900 		lck->type = type;
1901 		lck->offset = flock->fl_start;
1902 	}
1903 	spin_unlock(&flctx->flc_lock);
1904 
1905 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1906 		int stored_rc;
1907 
1908 		stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1909 					     lck->offset, lck->length, NULL,
1910 					     lck->type, 0);
1911 		if (stored_rc)
1912 			rc = stored_rc;
1913 		list_del(&lck->llist);
1914 		kfree(lck);
1915 	}
1916 
1917 out:
1918 	free_xid(xid);
1919 	return rc;
1920 err_out:
1921 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1922 		list_del(&lck->llist);
1923 		kfree(lck);
1924 	}
1925 	goto out;
1926 }
1927 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1928 
1929 static int
cifs_push_locks(struct cifsFileInfo * cfile)1930 cifs_push_locks(struct cifsFileInfo *cfile)
1931 {
1932 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1933 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1934 	int rc = 0;
1935 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1936 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1937 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1938 
1939 	/* we are going to update can_cache_brlcks here - need a write access */
1940 	cifs_down_write(&cinode->lock_sem);
1941 	if (!cinode->can_cache_brlcks) {
1942 		up_write(&cinode->lock_sem);
1943 		return rc;
1944 	}
1945 
1946 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1947 	if (cap_unix(tcon->ses) &&
1948 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1949 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1950 		rc = cifs_push_posix_locks(cfile);
1951 	else
1952 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1953 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1954 
1955 	cinode->can_cache_brlcks = false;
1956 	up_write(&cinode->lock_sem);
1957 	return rc;
1958 }
1959 
1960 static void
cifs_read_flock(struct file_lock * flock,__u32 * type,int * lock,int * unlock,bool * wait_flag,struct TCP_Server_Info * server)1961 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1962 		bool *wait_flag, struct TCP_Server_Info *server)
1963 {
1964 	if (flock->c.flc_flags & FL_POSIX)
1965 		cifs_dbg(FYI, "Posix\n");
1966 	if (flock->c.flc_flags & FL_FLOCK)
1967 		cifs_dbg(FYI, "Flock\n");
1968 	if (flock->c.flc_flags & FL_SLEEP) {
1969 		cifs_dbg(FYI, "Blocking lock\n");
1970 		*wait_flag = true;
1971 	}
1972 	if (flock->c.flc_flags & FL_ACCESS)
1973 		cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1974 	if (flock->c.flc_flags & FL_LEASE)
1975 		cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1976 	if (flock->c.flc_flags &
1977 	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1978 	       FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
1979 		cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
1980 		         flock->c.flc_flags);
1981 
1982 	*type = server->vals->large_lock_type;
1983 	if (lock_is_write(flock)) {
1984 		cifs_dbg(FYI, "F_WRLCK\n");
1985 		*type |= server->vals->exclusive_lock_type;
1986 		*lock = 1;
1987 	} else if (lock_is_unlock(flock)) {
1988 		cifs_dbg(FYI, "F_UNLCK\n");
1989 		*type |= server->vals->unlock_lock_type;
1990 		*unlock = 1;
1991 		/* Check if unlock includes more than one lock range */
1992 	} else if (lock_is_read(flock)) {
1993 		cifs_dbg(FYI, "F_RDLCK\n");
1994 		*type |= server->vals->shared_lock_type;
1995 		*lock = 1;
1996 	} else if (flock->c.flc_type == F_EXLCK) {
1997 		cifs_dbg(FYI, "F_EXLCK\n");
1998 		*type |= server->vals->exclusive_lock_type;
1999 		*lock = 1;
2000 	} else if (flock->c.flc_type == F_SHLCK) {
2001 		cifs_dbg(FYI, "F_SHLCK\n");
2002 		*type |= server->vals->shared_lock_type;
2003 		*lock = 1;
2004 	} else
2005 		cifs_dbg(FYI, "Unknown type of lock\n");
2006 }
2007 
2008 static int
cifs_getlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,unsigned int xid)2009 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
2010 	   bool wait_flag, bool posix_lck, unsigned int xid)
2011 {
2012 	int rc = 0;
2013 	__u64 length = cifs_flock_len(flock);
2014 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2015 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2016 	struct TCP_Server_Info *server = tcon->ses->server;
2017 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2018 	__u16 netfid = cfile->fid.netfid;
2019 
2020 	if (posix_lck) {
2021 		int posix_lock_type;
2022 
2023 		rc = cifs_posix_lock_test(file, flock);
2024 		if (!rc)
2025 			return rc;
2026 
2027 		if (type & server->vals->shared_lock_type)
2028 			posix_lock_type = CIFS_RDLCK;
2029 		else
2030 			posix_lock_type = CIFS_WRLCK;
2031 		rc = CIFSSMBPosixLock(xid, tcon, netfid,
2032 				      hash_lockowner(flock->c.flc_owner),
2033 				      flock->fl_start, length, flock,
2034 				      posix_lock_type, wait_flag);
2035 		return rc;
2036 	}
2037 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2038 
2039 	rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
2040 	if (!rc)
2041 		return rc;
2042 
2043 	/* BB we could chain these into one lock request BB */
2044 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2045 				    1, 0, false);
2046 	if (rc == 0) {
2047 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2048 					    type, 0, 1, false);
2049 		flock->c.flc_type = F_UNLCK;
2050 		if (rc != 0)
2051 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2052 				 rc);
2053 		return 0;
2054 	}
2055 
2056 	if (type & server->vals->shared_lock_type) {
2057 		flock->c.flc_type = F_WRLCK;
2058 		return 0;
2059 	}
2060 
2061 	type &= ~server->vals->exclusive_lock_type;
2062 
2063 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2064 				    type | server->vals->shared_lock_type,
2065 				    1, 0, false);
2066 	if (rc == 0) {
2067 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2068 			type | server->vals->shared_lock_type, 0, 1, false);
2069 		flock->c.flc_type = F_RDLCK;
2070 		if (rc != 0)
2071 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2072 				 rc);
2073 	} else
2074 		flock->c.flc_type = F_WRLCK;
2075 
2076 	return 0;
2077 }
2078 
2079 void
cifs_move_llist(struct list_head * source,struct list_head * dest)2080 cifs_move_llist(struct list_head *source, struct list_head *dest)
2081 {
2082 	struct list_head *li, *tmp;
2083 	list_for_each_safe(li, tmp, source)
2084 		list_move(li, dest);
2085 }
2086 
2087 int
cifs_get_hardlink_path(struct cifs_tcon * tcon,struct inode * inode,struct file * file)2088 cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode,
2089 				struct file *file)
2090 {
2091 	struct cifsFileInfo *open_file = NULL;
2092 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2093 	int rc = 0;
2094 
2095 	spin_lock(&tcon->open_file_lock);
2096 	spin_lock(&cinode->open_file_lock);
2097 
2098 	list_for_each_entry(open_file, &cinode->openFileList, flist) {
2099 		if (file->f_flags == open_file->f_flags) {
2100 			rc = -EINVAL;
2101 			break;
2102 		}
2103 	}
2104 
2105 	spin_unlock(&cinode->open_file_lock);
2106 	spin_unlock(&tcon->open_file_lock);
2107 	return rc;
2108 }
2109 
2110 void
cifs_free_llist(struct list_head * llist)2111 cifs_free_llist(struct list_head *llist)
2112 {
2113 	struct cifsLockInfo *li, *tmp;
2114 	list_for_each_entry_safe(li, tmp, llist, llist) {
2115 		cifs_del_lock_waiters(li);
2116 		list_del(&li->llist);
2117 		kfree(li);
2118 	}
2119 }
2120 
2121 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2122 int
cifs_unlock_range(struct cifsFileInfo * cfile,struct file_lock * flock,unsigned int xid)2123 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2124 		  unsigned int xid)
2125 {
2126 	int rc = 0, stored_rc;
2127 	static const int types[] = {
2128 		LOCKING_ANDX_LARGE_FILES,
2129 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2130 	};
2131 	unsigned int i;
2132 	unsigned int max_num, num, max_buf;
2133 	LOCKING_ANDX_RANGE *buf, *cur;
2134 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2135 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2136 	struct cifsLockInfo *li, *tmp;
2137 	__u64 length = cifs_flock_len(flock);
2138 	LIST_HEAD(tmp_llist);
2139 
2140 	/*
2141 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2142 	 * and check it before using.
2143 	 */
2144 	max_buf = tcon->ses->server->maxBuf;
2145 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2146 		return -EINVAL;
2147 
2148 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2149 		     PAGE_SIZE);
2150 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2151 			PAGE_SIZE);
2152 	max_num = (max_buf - sizeof(struct smb_hdr)) /
2153 						sizeof(LOCKING_ANDX_RANGE);
2154 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
2155 	if (!buf)
2156 		return -ENOMEM;
2157 
2158 	cifs_down_write(&cinode->lock_sem);
2159 	for (i = 0; i < 2; i++) {
2160 		cur = buf;
2161 		num = 0;
2162 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2163 			if (flock->fl_start > li->offset ||
2164 			    (flock->fl_start + length) <
2165 			    (li->offset + li->length))
2166 				continue;
2167 			if (current->tgid != li->pid)
2168 				continue;
2169 			if (types[i] != li->type)
2170 				continue;
2171 			if (cinode->can_cache_brlcks) {
2172 				/*
2173 				 * We can cache brlock requests - simply remove
2174 				 * a lock from the file's list.
2175 				 */
2176 				list_del(&li->llist);
2177 				cifs_del_lock_waiters(li);
2178 				kfree(li);
2179 				continue;
2180 			}
2181 			cur->Pid = cpu_to_le16(li->pid);
2182 			cur->LengthLow = cpu_to_le32((u32)li->length);
2183 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2184 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
2185 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2186 			/*
2187 			 * We need to save a lock here to let us add it again to
2188 			 * the file's list if the unlock range request fails on
2189 			 * the server.
2190 			 */
2191 			list_move(&li->llist, &tmp_llist);
2192 			if (++num == max_num) {
2193 				stored_rc = cifs_lockv(xid, tcon,
2194 						       cfile->fid.netfid,
2195 						       li->type, num, 0, buf);
2196 				if (stored_rc) {
2197 					/*
2198 					 * We failed on the unlock range
2199 					 * request - add all locks from the tmp
2200 					 * list to the head of the file's list.
2201 					 */
2202 					cifs_move_llist(&tmp_llist,
2203 							&cfile->llist->locks);
2204 					rc = stored_rc;
2205 				} else
2206 					/*
2207 					 * The unlock range request succeed -
2208 					 * free the tmp list.
2209 					 */
2210 					cifs_free_llist(&tmp_llist);
2211 				cur = buf;
2212 				num = 0;
2213 			} else
2214 				cur++;
2215 		}
2216 		if (num) {
2217 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2218 					       types[i], num, 0, buf);
2219 			if (stored_rc) {
2220 				cifs_move_llist(&tmp_llist,
2221 						&cfile->llist->locks);
2222 				rc = stored_rc;
2223 			} else
2224 				cifs_free_llist(&tmp_llist);
2225 		}
2226 	}
2227 
2228 	up_write(&cinode->lock_sem);
2229 	kfree(buf);
2230 	return rc;
2231 }
2232 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2233 
2234 static int
cifs_setlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,int lock,int unlock,unsigned int xid)2235 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2236 	   bool wait_flag, bool posix_lck, int lock, int unlock,
2237 	   unsigned int xid)
2238 {
2239 	int rc = 0;
2240 	__u64 length = cifs_flock_len(flock);
2241 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2242 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2243 	struct TCP_Server_Info *server = tcon->ses->server;
2244 	struct inode *inode = d_inode(cfile->dentry);
2245 
2246 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2247 	if (posix_lck) {
2248 		int posix_lock_type;
2249 
2250 		rc = cifs_posix_lock_set(file, flock);
2251 		if (rc <= FILE_LOCK_DEFERRED)
2252 			return rc;
2253 
2254 		if (type & server->vals->shared_lock_type)
2255 			posix_lock_type = CIFS_RDLCK;
2256 		else
2257 			posix_lock_type = CIFS_WRLCK;
2258 
2259 		if (unlock == 1)
2260 			posix_lock_type = CIFS_UNLCK;
2261 
2262 		rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2263 				      hash_lockowner(flock->c.flc_owner),
2264 				      flock->fl_start, length,
2265 				      NULL, posix_lock_type, wait_flag);
2266 		goto out;
2267 	}
2268 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2269 	if (lock) {
2270 		struct cifsLockInfo *lock;
2271 
2272 		lock = cifs_lock_init(flock->fl_start, length, type,
2273 				      flock->c.flc_flags);
2274 		if (!lock)
2275 			return -ENOMEM;
2276 
2277 		rc = cifs_lock_add_if(cfile, lock, wait_flag);
2278 		if (rc < 0) {
2279 			kfree(lock);
2280 			return rc;
2281 		}
2282 		if (!rc)
2283 			goto out;
2284 
2285 		/*
2286 		 * Windows 7 server can delay breaking lease from read to None
2287 		 * if we set a byte-range lock on a file - break it explicitly
2288 		 * before sending the lock to the server to be sure the next
2289 		 * read won't conflict with non-overlapted locks due to
2290 		 * pagereading.
2291 		 */
2292 		if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2293 					CIFS_CACHE_READ(CIFS_I(inode))) {
2294 			cifs_zap_mapping(inode);
2295 			cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2296 				 inode);
2297 			CIFS_I(inode)->oplock = 0;
2298 		}
2299 
2300 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2301 					    type, 1, 0, wait_flag);
2302 		if (rc) {
2303 			kfree(lock);
2304 			return rc;
2305 		}
2306 
2307 		cifs_lock_add(cfile, lock);
2308 	} else if (unlock)
2309 		rc = server->ops->mand_unlock_range(cfile, flock, xid);
2310 
2311 out:
2312 	if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2313 		/*
2314 		 * If this is a request to remove all locks because we
2315 		 * are closing the file, it doesn't matter if the
2316 		 * unlocking failed as both cifs.ko and the SMB server
2317 		 * remove the lock on file close
2318 		 */
2319 		if (rc) {
2320 			cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2321 			if (!(flock->c.flc_flags & FL_CLOSE))
2322 				return rc;
2323 		}
2324 		rc = locks_lock_file_wait(file, flock);
2325 	}
2326 	return rc;
2327 }
2328 
cifs_flock(struct file * file,int cmd,struct file_lock * fl)2329 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2330 {
2331 	int rc, xid;
2332 	int lock = 0, unlock = 0;
2333 	bool wait_flag = false;
2334 	bool posix_lck = false;
2335 	struct cifs_sb_info *cifs_sb;
2336 	struct cifs_tcon *tcon;
2337 	struct cifsFileInfo *cfile;
2338 	__u32 type;
2339 
2340 	xid = get_xid();
2341 
2342 	if (!(fl->c.flc_flags & FL_FLOCK)) {
2343 		rc = -ENOLCK;
2344 		free_xid(xid);
2345 		return rc;
2346 	}
2347 
2348 	cfile = (struct cifsFileInfo *)file->private_data;
2349 	tcon = tlink_tcon(cfile->tlink);
2350 
2351 	cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2352 			tcon->ses->server);
2353 	cifs_sb = CIFS_FILE_SB(file);
2354 
2355 	if (cap_unix(tcon->ses) &&
2356 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2357 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2358 		posix_lck = true;
2359 
2360 	if (!lock && !unlock) {
2361 		/*
2362 		 * if no lock or unlock then nothing to do since we do not
2363 		 * know what it is
2364 		 */
2365 		rc = -EOPNOTSUPP;
2366 		free_xid(xid);
2367 		return rc;
2368 	}
2369 
2370 	rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2371 			xid);
2372 	free_xid(xid);
2373 	return rc;
2374 
2375 
2376 }
2377 
cifs_lock(struct file * file,int cmd,struct file_lock * flock)2378 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2379 {
2380 	int rc, xid;
2381 	int lock = 0, unlock = 0;
2382 	bool wait_flag = false;
2383 	bool posix_lck = false;
2384 	struct cifs_sb_info *cifs_sb;
2385 	struct cifs_tcon *tcon;
2386 	struct cifsFileInfo *cfile;
2387 	__u32 type;
2388 
2389 	rc = -EACCES;
2390 	xid = get_xid();
2391 
2392 	cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2393 		 flock->c.flc_flags, flock->c.flc_type,
2394 		 (long long)flock->fl_start,
2395 		 (long long)flock->fl_end);
2396 
2397 	cfile = (struct cifsFileInfo *)file->private_data;
2398 	tcon = tlink_tcon(cfile->tlink);
2399 
2400 	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2401 			tcon->ses->server);
2402 	cifs_sb = CIFS_FILE_SB(file);
2403 	set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2404 
2405 	if (cap_unix(tcon->ses) &&
2406 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2407 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2408 		posix_lck = true;
2409 	/*
2410 	 * BB add code here to normalize offset and length to account for
2411 	 * negative length which we can not accept over the wire.
2412 	 */
2413 	if (IS_GETLK(cmd)) {
2414 		rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2415 		free_xid(xid);
2416 		return rc;
2417 	}
2418 
2419 	if (!lock && !unlock) {
2420 		/*
2421 		 * if no lock or unlock then nothing to do since we do not
2422 		 * know what it is
2423 		 */
2424 		free_xid(xid);
2425 		return -EOPNOTSUPP;
2426 	}
2427 
2428 	rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2429 			xid);
2430 	free_xid(xid);
2431 	return rc;
2432 }
2433 
cifs_write_subrequest_terminated(struct cifs_io_subrequest * wdata,ssize_t result)2434 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result)
2435 {
2436 	struct netfs_io_request *wreq = wdata->rreq;
2437 	struct netfs_inode *ictx = netfs_inode(wreq->inode);
2438 	loff_t wrend;
2439 
2440 	if (result > 0) {
2441 		wrend = wdata->subreq.start + wdata->subreq.transferred + result;
2442 
2443 		if (wrend > ictx->zero_point &&
2444 		    (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE ||
2445 		     wdata->rreq->origin == NETFS_DIO_WRITE))
2446 			ictx->zero_point = wrend;
2447 		if (wrend > ictx->remote_i_size)
2448 			netfs_resize_file(ictx, wrend, true);
2449 	}
2450 
2451 	netfs_write_subrequest_terminated(&wdata->subreq, result);
2452 }
2453 
find_readable_file(struct cifsInodeInfo * cifs_inode,bool fsuid_only)2454 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
2455 					bool fsuid_only)
2456 {
2457 	struct cifsFileInfo *open_file = NULL;
2458 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2459 
2460 	/* only filter by fsuid on multiuser mounts */
2461 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2462 		fsuid_only = false;
2463 
2464 	spin_lock(&cifs_inode->open_file_lock);
2465 	/* we could simply get the first_list_entry since write-only entries
2466 	   are always at the end of the list but since the first entry might
2467 	   have a close pending, we go through the whole list */
2468 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2469 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2470 			continue;
2471 		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2472 			if ((!open_file->invalidHandle)) {
2473 				/* found a good file */
2474 				/* lock it so it will not be closed on us */
2475 				cifsFileInfo_get(open_file);
2476 				spin_unlock(&cifs_inode->open_file_lock);
2477 				return open_file;
2478 			} /* else might as well continue, and look for
2479 			     another, or simply have the caller reopen it
2480 			     again rather than trying to fix this handle */
2481 		} else /* write only file */
2482 			break; /* write only files are last so must be done */
2483 	}
2484 	spin_unlock(&cifs_inode->open_file_lock);
2485 	return NULL;
2486 }
2487 
2488 /* Return -EBADF if no handle is found and general rc otherwise */
2489 int
cifs_get_writable_file(struct cifsInodeInfo * cifs_inode,int flags,struct cifsFileInfo ** ret_file)2490 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
2491 		       struct cifsFileInfo **ret_file)
2492 {
2493 	struct cifsFileInfo *open_file, *inv_file = NULL;
2494 	struct cifs_sb_info *cifs_sb;
2495 	bool any_available = false;
2496 	int rc = -EBADF;
2497 	unsigned int refind = 0;
2498 	bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2499 	bool with_delete = flags & FIND_WR_WITH_DELETE;
2500 	*ret_file = NULL;
2501 
2502 	/*
2503 	 * Having a null inode here (because mapping->host was set to zero by
2504 	 * the VFS or MM) should not happen but we had reports of on oops (due
2505 	 * to it being zero) during stress testcases so we need to check for it
2506 	 */
2507 
2508 	if (cifs_inode == NULL) {
2509 		cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2510 		dump_stack();
2511 		return rc;
2512 	}
2513 
2514 	cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2515 
2516 	/* only filter by fsuid on multiuser mounts */
2517 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2518 		fsuid_only = false;
2519 
2520 	spin_lock(&cifs_inode->open_file_lock);
2521 refind_writable:
2522 	if (refind > MAX_REOPEN_ATT) {
2523 		spin_unlock(&cifs_inode->open_file_lock);
2524 		return rc;
2525 	}
2526 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2527 		if (!any_available && open_file->pid != current->tgid)
2528 			continue;
2529 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2530 			continue;
2531 		if (with_delete && !(open_file->fid.access & DELETE))
2532 			continue;
2533 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2534 			if (!open_file->invalidHandle) {
2535 				/* found a good writable file */
2536 				cifsFileInfo_get(open_file);
2537 				spin_unlock(&cifs_inode->open_file_lock);
2538 				*ret_file = open_file;
2539 				return 0;
2540 			} else {
2541 				if (!inv_file)
2542 					inv_file = open_file;
2543 			}
2544 		}
2545 	}
2546 	/* couldn't find usable FH with same pid, try any available */
2547 	if (!any_available) {
2548 		any_available = true;
2549 		goto refind_writable;
2550 	}
2551 
2552 	if (inv_file) {
2553 		any_available = false;
2554 		cifsFileInfo_get(inv_file);
2555 	}
2556 
2557 	spin_unlock(&cifs_inode->open_file_lock);
2558 
2559 	if (inv_file) {
2560 		rc = cifs_reopen_file(inv_file, false);
2561 		if (!rc) {
2562 			*ret_file = inv_file;
2563 			return 0;
2564 		}
2565 
2566 		spin_lock(&cifs_inode->open_file_lock);
2567 		list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2568 		spin_unlock(&cifs_inode->open_file_lock);
2569 		cifsFileInfo_put(inv_file);
2570 		++refind;
2571 		inv_file = NULL;
2572 		spin_lock(&cifs_inode->open_file_lock);
2573 		goto refind_writable;
2574 	}
2575 
2576 	return rc;
2577 }
2578 
2579 struct cifsFileInfo *
find_writable_file(struct cifsInodeInfo * cifs_inode,int flags)2580 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2581 {
2582 	struct cifsFileInfo *cfile;
2583 	int rc;
2584 
2585 	rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2586 	if (rc)
2587 		cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2588 
2589 	return cfile;
2590 }
2591 
2592 int
cifs_get_writable_path(struct cifs_tcon * tcon,const char * name,int flags,struct cifsFileInfo ** ret_file)2593 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2594 		       int flags,
2595 		       struct cifsFileInfo **ret_file)
2596 {
2597 	struct cifsFileInfo *cfile;
2598 	void *page = alloc_dentry_path();
2599 
2600 	*ret_file = NULL;
2601 
2602 	spin_lock(&tcon->open_file_lock);
2603 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2604 		struct cifsInodeInfo *cinode;
2605 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2606 		if (IS_ERR(full_path)) {
2607 			spin_unlock(&tcon->open_file_lock);
2608 			free_dentry_path(page);
2609 			return PTR_ERR(full_path);
2610 		}
2611 		if (strcmp(full_path, name))
2612 			continue;
2613 
2614 		cinode = CIFS_I(d_inode(cfile->dentry));
2615 		spin_unlock(&tcon->open_file_lock);
2616 		free_dentry_path(page);
2617 		return cifs_get_writable_file(cinode, flags, ret_file);
2618 	}
2619 
2620 	spin_unlock(&tcon->open_file_lock);
2621 	free_dentry_path(page);
2622 	return -ENOENT;
2623 }
2624 
2625 int
cifs_get_readable_path(struct cifs_tcon * tcon,const char * name,struct cifsFileInfo ** ret_file)2626 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2627 		       struct cifsFileInfo **ret_file)
2628 {
2629 	struct cifsFileInfo *cfile;
2630 	void *page = alloc_dentry_path();
2631 
2632 	*ret_file = NULL;
2633 
2634 	spin_lock(&tcon->open_file_lock);
2635 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2636 		struct cifsInodeInfo *cinode;
2637 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2638 		if (IS_ERR(full_path)) {
2639 			spin_unlock(&tcon->open_file_lock);
2640 			free_dentry_path(page);
2641 			return PTR_ERR(full_path);
2642 		}
2643 		if (strcmp(full_path, name))
2644 			continue;
2645 
2646 		cinode = CIFS_I(d_inode(cfile->dentry));
2647 		spin_unlock(&tcon->open_file_lock);
2648 		free_dentry_path(page);
2649 		*ret_file = find_readable_file(cinode, 0);
2650 		return *ret_file ? 0 : -ENOENT;
2651 	}
2652 
2653 	spin_unlock(&tcon->open_file_lock);
2654 	free_dentry_path(page);
2655 	return -ENOENT;
2656 }
2657 
2658 /*
2659  * Flush data on a strict file.
2660  */
cifs_strict_fsync(struct file * file,loff_t start,loff_t end,int datasync)2661 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2662 		      int datasync)
2663 {
2664 	unsigned int xid;
2665 	int rc = 0;
2666 	struct cifs_tcon *tcon;
2667 	struct TCP_Server_Info *server;
2668 	struct cifsFileInfo *smbfile = file->private_data;
2669 	struct inode *inode = file_inode(file);
2670 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2671 
2672 	rc = file_write_and_wait_range(file, start, end);
2673 	if (rc) {
2674 		trace_cifs_fsync_err(inode->i_ino, rc);
2675 		return rc;
2676 	}
2677 
2678 	xid = get_xid();
2679 
2680 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2681 		 file, datasync);
2682 
2683 	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2684 		rc = cifs_zap_mapping(inode);
2685 		if (rc) {
2686 			cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2687 			rc = 0; /* don't care about it in fsync */
2688 		}
2689 	}
2690 
2691 	tcon = tlink_tcon(smbfile->tlink);
2692 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2693 		server = tcon->ses->server;
2694 		if (server->ops->flush == NULL) {
2695 			rc = -ENOSYS;
2696 			goto strict_fsync_exit;
2697 		}
2698 
2699 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2700 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2701 			if (smbfile) {
2702 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2703 				cifsFileInfo_put(smbfile);
2704 			} else
2705 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2706 		} else
2707 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2708 	}
2709 
2710 strict_fsync_exit:
2711 	free_xid(xid);
2712 	return rc;
2713 }
2714 
2715 /*
2716  * Flush data on a non-strict data.
2717  */
cifs_fsync(struct file * file,loff_t start,loff_t end,int datasync)2718 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2719 {
2720 	unsigned int xid;
2721 	int rc = 0;
2722 	struct cifs_tcon *tcon;
2723 	struct TCP_Server_Info *server;
2724 	struct cifsFileInfo *smbfile = file->private_data;
2725 	struct inode *inode = file_inode(file);
2726 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2727 
2728 	rc = file_write_and_wait_range(file, start, end);
2729 	if (rc) {
2730 		trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2731 		return rc;
2732 	}
2733 
2734 	xid = get_xid();
2735 
2736 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2737 		 file, datasync);
2738 
2739 	tcon = tlink_tcon(smbfile->tlink);
2740 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2741 		server = tcon->ses->server;
2742 		if (server->ops->flush == NULL) {
2743 			rc = -ENOSYS;
2744 			goto fsync_exit;
2745 		}
2746 
2747 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2748 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2749 			if (smbfile) {
2750 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2751 				cifsFileInfo_put(smbfile);
2752 			} else
2753 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2754 		} else
2755 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2756 	}
2757 
2758 fsync_exit:
2759 	free_xid(xid);
2760 	return rc;
2761 }
2762 
2763 /*
2764  * As file closes, flush all cached write data for this inode checking
2765  * for write behind errors.
2766  */
cifs_flush(struct file * file,fl_owner_t id)2767 int cifs_flush(struct file *file, fl_owner_t id)
2768 {
2769 	struct inode *inode = file_inode(file);
2770 	int rc = 0;
2771 
2772 	if (file->f_mode & FMODE_WRITE)
2773 		rc = filemap_write_and_wait(inode->i_mapping);
2774 
2775 	cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2776 	if (rc) {
2777 		/* get more nuanced writeback errors */
2778 		rc = filemap_check_wb_err(file->f_mapping, 0);
2779 		trace_cifs_flush_err(inode->i_ino, rc);
2780 	}
2781 	return rc;
2782 }
2783 
2784 static ssize_t
cifs_writev(struct kiocb * iocb,struct iov_iter * from)2785 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2786 {
2787 	struct file *file = iocb->ki_filp;
2788 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2789 	struct inode *inode = file->f_mapping->host;
2790 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2791 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2792 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2793 	ssize_t rc;
2794 
2795 	rc = netfs_start_io_write(inode);
2796 	if (rc < 0)
2797 		return rc;
2798 
2799 	/*
2800 	 * We need to hold the sem to be sure nobody modifies lock list
2801 	 * with a brlock that prevents writing.
2802 	 */
2803 	down_read(&cinode->lock_sem);
2804 
2805 	rc = generic_write_checks(iocb, from);
2806 	if (rc <= 0)
2807 		goto out;
2808 
2809 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) &&
2810 	    (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2811 				     server->vals->exclusive_lock_type, 0,
2812 				     NULL, CIFS_WRITE_OP))) {
2813 		rc = -EACCES;
2814 		goto out;
2815 	}
2816 
2817 	rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2818 
2819 out:
2820 	up_read(&cinode->lock_sem);
2821 	netfs_end_io_write(inode);
2822 	if (rc > 0)
2823 		rc = generic_write_sync(iocb, rc);
2824 	return rc;
2825 }
2826 
2827 ssize_t
cifs_strict_writev(struct kiocb * iocb,struct iov_iter * from)2828 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2829 {
2830 	struct inode *inode = file_inode(iocb->ki_filp);
2831 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2832 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2833 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2834 						iocb->ki_filp->private_data;
2835 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2836 	ssize_t written;
2837 
2838 	written = cifs_get_writer(cinode);
2839 	if (written)
2840 		return written;
2841 
2842 	if (CIFS_CACHE_WRITE(cinode)) {
2843 		if (cap_unix(tcon->ses) &&
2844 		    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2845 		    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2846 			written = netfs_file_write_iter(iocb, from);
2847 			goto out;
2848 		}
2849 		written = cifs_writev(iocb, from);
2850 		goto out;
2851 	}
2852 	/*
2853 	 * For non-oplocked files in strict cache mode we need to write the data
2854 	 * to the server exactly from the pos to pos+len-1 rather than flush all
2855 	 * affected pages because it may cause a error with mandatory locks on
2856 	 * these pages but not on the region from pos to ppos+len-1.
2857 	 */
2858 	written = netfs_file_write_iter(iocb, from);
2859 	if (CIFS_CACHE_READ(cinode)) {
2860 		/*
2861 		 * We have read level caching and we have just sent a write
2862 		 * request to the server thus making data in the cache stale.
2863 		 * Zap the cache and set oplock/lease level to NONE to avoid
2864 		 * reading stale data from the cache. All subsequent read
2865 		 * operations will read new data from the server.
2866 		 */
2867 		cifs_zap_mapping(inode);
2868 		cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2869 			 inode);
2870 		cinode->oplock = 0;
2871 	}
2872 out:
2873 	cifs_put_writer(cinode);
2874 	return written;
2875 }
2876 
cifs_loose_read_iter(struct kiocb * iocb,struct iov_iter * iter)2877 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2878 {
2879 	ssize_t rc;
2880 	struct inode *inode = file_inode(iocb->ki_filp);
2881 
2882 	if (iocb->ki_flags & IOCB_DIRECT)
2883 		return netfs_unbuffered_read_iter(iocb, iter);
2884 
2885 	rc = cifs_revalidate_mapping(inode);
2886 	if (rc)
2887 		return rc;
2888 
2889 	return netfs_file_read_iter(iocb, iter);
2890 }
2891 
cifs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)2892 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2893 {
2894 	struct inode *inode = file_inode(iocb->ki_filp);
2895 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2896 	ssize_t written;
2897 	int rc;
2898 
2899 	if (iocb->ki_filp->f_flags & O_DIRECT) {
2900 		written = netfs_unbuffered_write_iter(iocb, from);
2901 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
2902 			cifs_zap_mapping(inode);
2903 			cifs_dbg(FYI,
2904 				 "Set no oplock for inode=%p after a write operation\n",
2905 				 inode);
2906 			cinode->oplock = 0;
2907 		}
2908 		return written;
2909 	}
2910 
2911 	written = cifs_get_writer(cinode);
2912 	if (written)
2913 		return written;
2914 
2915 	written = netfs_file_write_iter(iocb, from);
2916 
2917 	if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
2918 		rc = filemap_fdatawrite(inode->i_mapping);
2919 		if (rc)
2920 			cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
2921 				 rc, inode);
2922 	}
2923 
2924 	cifs_put_writer(cinode);
2925 	return written;
2926 }
2927 
2928 ssize_t
cifs_strict_readv(struct kiocb * iocb,struct iov_iter * to)2929 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
2930 {
2931 	struct inode *inode = file_inode(iocb->ki_filp);
2932 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2933 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2934 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2935 						iocb->ki_filp->private_data;
2936 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2937 	int rc = -EACCES;
2938 
2939 	/*
2940 	 * In strict cache mode we need to read from the server all the time
2941 	 * if we don't have level II oplock because the server can delay mtime
2942 	 * change - so we can't make a decision about inode invalidating.
2943 	 * And we can also fail with pagereading if there are mandatory locks
2944 	 * on pages affected by this read but not on the region from pos to
2945 	 * pos+len-1.
2946 	 */
2947 	if (!CIFS_CACHE_READ(cinode))
2948 		return netfs_unbuffered_read_iter(iocb, to);
2949 
2950 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) {
2951 		if (iocb->ki_flags & IOCB_DIRECT)
2952 			return netfs_unbuffered_read_iter(iocb, to);
2953 		return netfs_buffered_read_iter(iocb, to);
2954 	}
2955 
2956 	/*
2957 	 * We need to hold the sem to be sure nobody modifies lock list
2958 	 * with a brlock that prevents reading.
2959 	 */
2960 	if (iocb->ki_flags & IOCB_DIRECT) {
2961 		rc = netfs_start_io_direct(inode);
2962 		if (rc < 0)
2963 			goto out;
2964 		rc = -EACCES;
2965 		down_read(&cinode->lock_sem);
2966 		if (!cifs_find_lock_conflict(
2967 			    cfile, iocb->ki_pos, iov_iter_count(to),
2968 			    tcon->ses->server->vals->shared_lock_type,
2969 			    0, NULL, CIFS_READ_OP))
2970 			rc = netfs_unbuffered_read_iter_locked(iocb, to);
2971 		up_read(&cinode->lock_sem);
2972 		netfs_end_io_direct(inode);
2973 	} else {
2974 		rc = netfs_start_io_read(inode);
2975 		if (rc < 0)
2976 			goto out;
2977 		rc = -EACCES;
2978 		down_read(&cinode->lock_sem);
2979 		if (!cifs_find_lock_conflict(
2980 			    cfile, iocb->ki_pos, iov_iter_count(to),
2981 			    tcon->ses->server->vals->shared_lock_type,
2982 			    0, NULL, CIFS_READ_OP))
2983 			rc = filemap_read(iocb, to, 0);
2984 		up_read(&cinode->lock_sem);
2985 		netfs_end_io_read(inode);
2986 	}
2987 out:
2988 	return rc;
2989 }
2990 
cifs_page_mkwrite(struct vm_fault * vmf)2991 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
2992 {
2993 	return netfs_page_mkwrite(vmf, NULL);
2994 }
2995 
2996 static const struct vm_operations_struct cifs_file_vm_ops = {
2997 	.fault = filemap_fault,
2998 	.map_pages = filemap_map_pages,
2999 	.page_mkwrite = cifs_page_mkwrite,
3000 };
3001 
cifs_file_strict_mmap(struct file * file,struct vm_area_struct * vma)3002 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3003 {
3004 	int xid, rc = 0;
3005 	struct inode *inode = file_inode(file);
3006 
3007 	xid = get_xid();
3008 
3009 	if (!CIFS_CACHE_READ(CIFS_I(inode)))
3010 		rc = cifs_zap_mapping(inode);
3011 	if (!rc)
3012 		rc = generic_file_mmap(file, vma);
3013 	if (!rc)
3014 		vma->vm_ops = &cifs_file_vm_ops;
3015 
3016 	free_xid(xid);
3017 	return rc;
3018 }
3019 
cifs_file_mmap(struct file * file,struct vm_area_struct * vma)3020 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3021 {
3022 	int rc, xid;
3023 
3024 	xid = get_xid();
3025 
3026 	rc = cifs_revalidate_file(file);
3027 	if (rc)
3028 		cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3029 			 rc);
3030 	if (!rc)
3031 		rc = generic_file_mmap(file, vma);
3032 	if (!rc)
3033 		vma->vm_ops = &cifs_file_vm_ops;
3034 
3035 	free_xid(xid);
3036 	return rc;
3037 }
3038 
is_inode_writable(struct cifsInodeInfo * cifs_inode)3039 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3040 {
3041 	struct cifsFileInfo *open_file;
3042 
3043 	spin_lock(&cifs_inode->open_file_lock);
3044 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3045 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3046 			spin_unlock(&cifs_inode->open_file_lock);
3047 			return 1;
3048 		}
3049 	}
3050 	spin_unlock(&cifs_inode->open_file_lock);
3051 	return 0;
3052 }
3053 
3054 /* We do not want to update the file size from server for inodes
3055    open for write - to avoid races with writepage extending
3056    the file - in the future we could consider allowing
3057    refreshing the inode only on increases in the file size
3058    but this is tricky to do without racing with writebehind
3059    page caching in the current Linux kernel design */
is_size_safe_to_change(struct cifsInodeInfo * cifsInode,__u64 end_of_file,bool from_readdir)3060 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3061 			    bool from_readdir)
3062 {
3063 	if (!cifsInode)
3064 		return true;
3065 
3066 	if (is_inode_writable(cifsInode) ||
3067 		((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
3068 		/* This inode is open for write at least once */
3069 		struct cifs_sb_info *cifs_sb;
3070 
3071 		cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
3072 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
3073 			/* since no page cache to corrupt on directio
3074 			we can change size safely */
3075 			return true;
3076 		}
3077 
3078 		if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
3079 			return true;
3080 
3081 		return false;
3082 	} else
3083 		return true;
3084 }
3085 
cifs_oplock_break(struct work_struct * work)3086 void cifs_oplock_break(struct work_struct *work)
3087 {
3088 	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3089 						  oplock_break);
3090 	struct inode *inode = d_inode(cfile->dentry);
3091 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3092 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3093 	struct cifs_tcon *tcon;
3094 	struct TCP_Server_Info *server;
3095 	struct tcon_link *tlink;
3096 	int rc = 0;
3097 	bool purge_cache = false, oplock_break_cancelled;
3098 	__u64 persistent_fid, volatile_fid;
3099 	__u16 net_fid;
3100 
3101 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3102 			TASK_UNINTERRUPTIBLE);
3103 
3104 	tlink = cifs_sb_tlink(cifs_sb);
3105 	if (IS_ERR(tlink))
3106 		goto out;
3107 	tcon = tlink_tcon(tlink);
3108 	server = tcon->ses->server;
3109 
3110 	server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3111 				      cfile->oplock_epoch, &purge_cache);
3112 
3113 	if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
3114 						cifs_has_mand_locks(cinode)) {
3115 		cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3116 			 inode);
3117 		cinode->oplock = 0;
3118 	}
3119 
3120 	if (S_ISREG(inode->i_mode)) {
3121 		if (CIFS_CACHE_READ(cinode))
3122 			break_lease(inode, O_RDONLY);
3123 		else
3124 			break_lease(inode, O_WRONLY);
3125 		rc = filemap_fdatawrite(inode->i_mapping);
3126 		if (!CIFS_CACHE_READ(cinode) || purge_cache) {
3127 			rc = filemap_fdatawait(inode->i_mapping);
3128 			mapping_set_error(inode->i_mapping, rc);
3129 			cifs_zap_mapping(inode);
3130 		}
3131 		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3132 		if (CIFS_CACHE_WRITE(cinode))
3133 			goto oplock_break_ack;
3134 	}
3135 
3136 	rc = cifs_push_locks(cfile);
3137 	if (rc)
3138 		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3139 
3140 oplock_break_ack:
3141 	/*
3142 	 * When oplock break is received and there are no active
3143 	 * file handles but cached, then schedule deferred close immediately.
3144 	 * So, new open will not use cached handle.
3145 	 */
3146 
3147 	if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
3148 		cifs_close_deferred_file(cinode);
3149 
3150 	persistent_fid = cfile->fid.persistent_fid;
3151 	volatile_fid = cfile->fid.volatile_fid;
3152 	net_fid = cfile->fid.netfid;
3153 	oplock_break_cancelled = cfile->oplock_break_cancelled;
3154 
3155 	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3156 	/*
3157 	 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3158 	 * an acknowledgment to be sent when the file has already been closed.
3159 	 */
3160 	spin_lock(&cinode->open_file_lock);
3161 	/* check list empty since can race with kill_sb calling tree disconnect */
3162 	if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3163 		spin_unlock(&cinode->open_file_lock);
3164 		rc = server->ops->oplock_response(tcon, persistent_fid,
3165 						  volatile_fid, net_fid, cinode);
3166 		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3167 	} else
3168 		spin_unlock(&cinode->open_file_lock);
3169 
3170 	cifs_put_tlink(tlink);
3171 out:
3172 	cifs_done_oplock_break(cinode);
3173 }
3174 
cifs_swap_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)3175 static int cifs_swap_activate(struct swap_info_struct *sis,
3176 			      struct file *swap_file, sector_t *span)
3177 {
3178 	struct cifsFileInfo *cfile = swap_file->private_data;
3179 	struct inode *inode = swap_file->f_mapping->host;
3180 	unsigned long blocks;
3181 	long long isize;
3182 
3183 	cifs_dbg(FYI, "swap activate\n");
3184 
3185 	if (!swap_file->f_mapping->a_ops->swap_rw)
3186 		/* Cannot support swap */
3187 		return -EINVAL;
3188 
3189 	spin_lock(&inode->i_lock);
3190 	blocks = inode->i_blocks;
3191 	isize = inode->i_size;
3192 	spin_unlock(&inode->i_lock);
3193 	if (blocks*512 < isize) {
3194 		pr_warn("swap activate: swapfile has holes\n");
3195 		return -EINVAL;
3196 	}
3197 	*span = sis->pages;
3198 
3199 	pr_warn_once("Swap support over SMB3 is experimental\n");
3200 
3201 	/*
3202 	 * TODO: consider adding ACL (or documenting how) to prevent other
3203 	 * users (on this or other systems) from reading it
3204 	 */
3205 
3206 
3207 	/* TODO: add sk_set_memalloc(inet) or similar */
3208 
3209 	if (cfile)
3210 		cfile->swapfile = true;
3211 	/*
3212 	 * TODO: Since file already open, we can't open with DENY_ALL here
3213 	 * but we could add call to grab a byte range lock to prevent others
3214 	 * from reading or writing the file
3215 	 */
3216 
3217 	sis->flags |= SWP_FS_OPS;
3218 	return add_swap_extent(sis, 0, sis->max, 0);
3219 }
3220 
cifs_swap_deactivate(struct file * file)3221 static void cifs_swap_deactivate(struct file *file)
3222 {
3223 	struct cifsFileInfo *cfile = file->private_data;
3224 
3225 	cifs_dbg(FYI, "swap deactivate\n");
3226 
3227 	/* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3228 
3229 	if (cfile)
3230 		cfile->swapfile = false;
3231 
3232 	/* do we need to unpin (or unlock) the file */
3233 }
3234 
3235 /**
3236  * cifs_swap_rw - SMB3 address space operation for swap I/O
3237  * @iocb: target I/O control block
3238  * @iter: I/O buffer
3239  *
3240  * Perform IO to the swap-file.  This is much like direct IO.
3241  */
cifs_swap_rw(struct kiocb * iocb,struct iov_iter * iter)3242 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3243 {
3244 	ssize_t ret;
3245 
3246 	if (iov_iter_rw(iter) == READ)
3247 		ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3248 	else
3249 		ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3250 	if (ret < 0)
3251 		return ret;
3252 	return 0;
3253 }
3254 
3255 const struct address_space_operations cifs_addr_ops = {
3256 	.read_folio	= netfs_read_folio,
3257 	.readahead	= netfs_readahead,
3258 	.writepages	= netfs_writepages,
3259 	.dirty_folio	= netfs_dirty_folio,
3260 	.release_folio	= netfs_release_folio,
3261 	.direct_IO	= noop_direct_IO,
3262 	.invalidate_folio = netfs_invalidate_folio,
3263 	.migrate_folio	= filemap_migrate_folio,
3264 	/*
3265 	 * TODO: investigate and if useful we could add an is_dirty_writeback
3266 	 * helper if needed
3267 	 */
3268 	.swap_activate	= cifs_swap_activate,
3269 	.swap_deactivate = cifs_swap_deactivate,
3270 	.swap_rw = cifs_swap_rw,
3271 };
3272 
3273 /*
3274  * cifs_readahead requires the server to support a buffer large enough to
3275  * contain the header plus one complete page of data.  Otherwise, we need
3276  * to leave cifs_readahead out of the address space operations.
3277  */
3278 const struct address_space_operations cifs_addr_ops_smallbuf = {
3279 	.read_folio	= netfs_read_folio,
3280 	.writepages	= netfs_writepages,
3281 	.dirty_folio	= netfs_dirty_folio,
3282 	.release_folio	= netfs_release_folio,
3283 	.invalidate_folio = netfs_invalidate_folio,
3284 	.migrate_folio	= filemap_migrate_folio,
3285 };
3286