xref: /linux/fs/smb/client/file.c (revision e51da4a2324e595af54a0cb3b4c35eed87548de4)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  */
11 #include <linux/fs.h>
12 #include <linux/filelock.h>
13 #include <linux/backing-dev.h>
14 #include <linux/stat.h>
15 #include <linux/fcntl.h>
16 #include <linux/pagemap.h>
17 #include <linux/pagevec.h>
18 #include <linux/writeback.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/delay.h>
21 #include <linux/mount.h>
22 #include <linux/slab.h>
23 #include <linux/swap.h>
24 #include <linux/mm.h>
25 #include <asm/div64.h>
26 #include "cifsfs.h"
27 #include "cifspdu.h"
28 #include "cifsglob.h"
29 #include "cifsproto.h"
30 #include "smb2proto.h"
31 #include "cifs_unicode.h"
32 #include "cifs_debug.h"
33 #include "cifs_fs_sb.h"
34 #include "fscache.h"
35 #include "smbdirect.h"
36 #include "fs_context.h"
37 #include "cifs_ioctl.h"
38 #include "cached_dir.h"
39 #include <trace/events/netfs.h>
40 
41 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
42 
43 /*
44  * Prepare a subrequest to upload to the server.  We need to allocate credits
45  * so that we know the maximum amount of data that we can include in it.
46  */
cifs_prepare_write(struct netfs_io_subrequest * subreq)47 static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
48 {
49 	struct cifs_io_subrequest *wdata =
50 		container_of(subreq, struct cifs_io_subrequest, subreq);
51 	struct cifs_io_request *req = wdata->req;
52 	struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr];
53 	struct TCP_Server_Info *server;
54 	struct cifsFileInfo *open_file = req->cfile;
55 	size_t wsize = req->rreq.wsize;
56 	int rc;
57 
58 	if (!wdata->have_xid) {
59 		wdata->xid = get_xid();
60 		wdata->have_xid = true;
61 	}
62 
63 	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
64 	wdata->server = server;
65 
66 retry:
67 	if (open_file->invalidHandle) {
68 		rc = cifs_reopen_file(open_file, false);
69 		if (rc < 0) {
70 			if (rc == -EAGAIN)
71 				goto retry;
72 			subreq->error = rc;
73 			return netfs_prepare_write_failed(subreq);
74 		}
75 	}
76 
77 	rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len,
78 					   &wdata->credits);
79 	if (rc < 0) {
80 		subreq->error = rc;
81 		return netfs_prepare_write_failed(subreq);
82 	}
83 
84 	wdata->credits.rreq_debug_id = subreq->rreq->debug_id;
85 	wdata->credits.rreq_debug_index = subreq->debug_index;
86 	wdata->credits.in_flight_check = 1;
87 	trace_smb3_rw_credits(wdata->rreq->debug_id,
88 			      wdata->subreq.debug_index,
89 			      wdata->credits.value,
90 			      server->credits, server->in_flight,
91 			      wdata->credits.value,
92 			      cifs_trace_rw_credits_write_prepare);
93 
94 #ifdef CONFIG_CIFS_SMB_DIRECT
95 	if (server->smbd_conn)
96 		stream->sreq_max_segs = server->smbd_conn->max_frmr_depth;
97 #endif
98 }
99 
100 /*
101  * Issue a subrequest to upload to the server.
102  */
cifs_issue_write(struct netfs_io_subrequest * subreq)103 static void cifs_issue_write(struct netfs_io_subrequest *subreq)
104 {
105 	struct cifs_io_subrequest *wdata =
106 		container_of(subreq, struct cifs_io_subrequest, subreq);
107 	struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
108 	int rc;
109 
110 	if (cifs_forced_shutdown(sbi)) {
111 		rc = -EIO;
112 		goto fail;
113 	}
114 
115 	rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
116 	if (rc)
117 		goto fail;
118 
119 	rc = -EAGAIN;
120 	if (wdata->req->cfile->invalidHandle)
121 		goto fail;
122 
123 	wdata->server->ops->async_writev(wdata);
124 out:
125 	return;
126 
127 fail:
128 	if (rc == -EAGAIN)
129 		trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
130 	else
131 		trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
132 	add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
133 	cifs_write_subrequest_terminated(wdata, rc, false);
134 	goto out;
135 }
136 
cifs_netfs_invalidate_cache(struct netfs_io_request * wreq)137 static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq)
138 {
139 	cifs_invalidate_cache(wreq->inode, 0);
140 }
141 
142 /*
143  * Negotiate the size of a read operation on behalf of the netfs library.
144  */
cifs_prepare_read(struct netfs_io_subrequest * subreq)145 static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
146 {
147 	struct netfs_io_request *rreq = subreq->rreq;
148 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
149 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
150 	struct TCP_Server_Info *server = req->server;
151 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
152 	size_t size;
153 	int rc = 0;
154 
155 	if (!rdata->have_xid) {
156 		rdata->xid = get_xid();
157 		rdata->have_xid = true;
158 	}
159 	rdata->server = server;
160 
161 	if (cifs_sb->ctx->rsize == 0)
162 		cifs_sb->ctx->rsize =
163 			server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink),
164 						     cifs_sb->ctx);
165 
166 	rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
167 					   &size, &rdata->credits);
168 	if (rc)
169 		return rc;
170 
171 	rreq->io_streams[0].sreq_max_len = size;
172 
173 	rdata->credits.in_flight_check = 1;
174 	rdata->credits.rreq_debug_id = rreq->debug_id;
175 	rdata->credits.rreq_debug_index = subreq->debug_index;
176 
177 	trace_smb3_rw_credits(rdata->rreq->debug_id,
178 			      rdata->subreq.debug_index,
179 			      rdata->credits.value,
180 			      server->credits, server->in_flight, 0,
181 			      cifs_trace_rw_credits_read_submit);
182 
183 #ifdef CONFIG_CIFS_SMB_DIRECT
184 	if (server->smbd_conn)
185 		rreq->io_streams[0].sreq_max_segs = server->smbd_conn->max_frmr_depth;
186 #endif
187 	return 0;
188 }
189 
190 /*
191  * Issue a read operation on behalf of the netfs helper functions.  We're asked
192  * to make a read of a certain size at a point in the file.  We are permitted
193  * to only read a portion of that, but as long as we read something, the netfs
194  * helper will call us again so that we can issue another read.
195  */
cifs_issue_read(struct netfs_io_subrequest * subreq)196 static void cifs_issue_read(struct netfs_io_subrequest *subreq)
197 {
198 	struct netfs_io_request *rreq = subreq->rreq;
199 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
200 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
201 	struct TCP_Server_Info *server = req->server;
202 	int rc = 0;
203 
204 	cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
205 		 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
206 		 subreq->transferred, subreq->len);
207 
208 	rc = adjust_credits(server, rdata, cifs_trace_rw_credits_issue_read_adjust);
209 	if (rc)
210 		goto failed;
211 
212 	if (req->cfile->invalidHandle) {
213 		do {
214 			rc = cifs_reopen_file(req->cfile, true);
215 		} while (rc == -EAGAIN);
216 		if (rc)
217 			goto failed;
218 	}
219 
220 	if (subreq->rreq->origin != NETFS_DIO_READ)
221 		__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
222 
223 	trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
224 	rc = rdata->server->ops->async_readv(rdata);
225 	if (rc)
226 		goto failed;
227 	return;
228 
229 failed:
230 	netfs_read_subreq_terminated(subreq, rc, false);
231 }
232 
233 /*
234  * Writeback calls this when it finds a folio that needs uploading.  This isn't
235  * called if writeback only has copy-to-cache to deal with.
236  */
cifs_begin_writeback(struct netfs_io_request * wreq)237 static void cifs_begin_writeback(struct netfs_io_request *wreq)
238 {
239 	struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
240 	int ret;
241 
242 	ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
243 	if (ret) {
244 		cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
245 		return;
246 	}
247 
248 	wreq->io_streams[0].avail = true;
249 }
250 
251 /*
252  * Initialise a request.
253  */
cifs_init_request(struct netfs_io_request * rreq,struct file * file)254 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
255 {
256 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
257 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
258 	struct cifsFileInfo *open_file = NULL;
259 
260 	rreq->rsize = cifs_sb->ctx->rsize;
261 	rreq->wsize = cifs_sb->ctx->wsize;
262 	req->pid = current->tgid; // Ummm...  This may be a workqueue
263 
264 	if (file) {
265 		open_file = file->private_data;
266 		rreq->netfs_priv = file->private_data;
267 		req->cfile = cifsFileInfo_get(open_file);
268 		req->server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
269 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
270 			req->pid = req->cfile->pid;
271 	} else if (rreq->origin != NETFS_WRITEBACK) {
272 		WARN_ON_ONCE(1);
273 		return -EIO;
274 	}
275 
276 	return 0;
277 }
278 
279 /*
280  * Completion of a request operation.
281  */
cifs_rreq_done(struct netfs_io_request * rreq)282 static void cifs_rreq_done(struct netfs_io_request *rreq)
283 {
284 	struct timespec64 atime, mtime;
285 	struct inode *inode = rreq->inode;
286 
287 	/* we do not want atime to be less than mtime, it broke some apps */
288 	atime = inode_set_atime_to_ts(inode, current_time(inode));
289 	mtime = inode_get_mtime(inode);
290 	if (timespec64_compare(&atime, &mtime))
291 		inode_set_atime_to_ts(inode, inode_get_mtime(inode));
292 }
293 
cifs_free_request(struct netfs_io_request * rreq)294 static void cifs_free_request(struct netfs_io_request *rreq)
295 {
296 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
297 
298 	if (req->cfile)
299 		cifsFileInfo_put(req->cfile);
300 }
301 
cifs_free_subrequest(struct netfs_io_subrequest * subreq)302 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
303 {
304 	struct cifs_io_subrequest *rdata =
305 		container_of(subreq, struct cifs_io_subrequest, subreq);
306 	int rc = subreq->error;
307 
308 	if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
309 #ifdef CONFIG_CIFS_SMB_DIRECT
310 		if (rdata->mr) {
311 			smbd_deregister_mr(rdata->mr);
312 			rdata->mr = NULL;
313 		}
314 #endif
315 	}
316 
317 	if (rdata->credits.value != 0) {
318 		trace_smb3_rw_credits(rdata->rreq->debug_id,
319 				      rdata->subreq.debug_index,
320 				      rdata->credits.value,
321 				      rdata->server ? rdata->server->credits : 0,
322 				      rdata->server ? rdata->server->in_flight : 0,
323 				      -rdata->credits.value,
324 				      cifs_trace_rw_credits_free_subreq);
325 		if (rdata->server)
326 			add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
327 		else
328 			rdata->credits.value = 0;
329 	}
330 
331 	if (rdata->have_xid)
332 		free_xid(rdata->xid);
333 }
334 
335 const struct netfs_request_ops cifs_req_ops = {
336 	.request_pool		= &cifs_io_request_pool,
337 	.subrequest_pool	= &cifs_io_subrequest_pool,
338 	.init_request		= cifs_init_request,
339 	.free_request		= cifs_free_request,
340 	.free_subrequest	= cifs_free_subrequest,
341 	.prepare_read		= cifs_prepare_read,
342 	.issue_read		= cifs_issue_read,
343 	.done			= cifs_rreq_done,
344 	.begin_writeback	= cifs_begin_writeback,
345 	.prepare_write		= cifs_prepare_write,
346 	.issue_write		= cifs_issue_write,
347 	.invalidate_cache	= cifs_netfs_invalidate_cache,
348 };
349 
350 /*
351  * Mark as invalid, all open files on tree connections since they
352  * were closed when session to server was lost.
353  */
354 void
cifs_mark_open_files_invalid(struct cifs_tcon * tcon)355 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
356 {
357 	struct cifsFileInfo *open_file = NULL;
358 	struct list_head *tmp;
359 	struct list_head *tmp1;
360 
361 	/* only send once per connect */
362 	spin_lock(&tcon->tc_lock);
363 	if (tcon->need_reconnect)
364 		tcon->status = TID_NEED_RECON;
365 
366 	if (tcon->status != TID_NEED_RECON) {
367 		spin_unlock(&tcon->tc_lock);
368 		return;
369 	}
370 	tcon->status = TID_IN_FILES_INVALIDATE;
371 	spin_unlock(&tcon->tc_lock);
372 
373 	/* list all files open on tree connection and mark them invalid */
374 	spin_lock(&tcon->open_file_lock);
375 	list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
376 		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
377 		open_file->invalidHandle = true;
378 		open_file->oplock_break_cancelled = true;
379 	}
380 	spin_unlock(&tcon->open_file_lock);
381 
382 	invalidate_all_cached_dirs(tcon);
383 	spin_lock(&tcon->tc_lock);
384 	if (tcon->status == TID_IN_FILES_INVALIDATE)
385 		tcon->status = TID_NEED_TCON;
386 	spin_unlock(&tcon->tc_lock);
387 
388 	/*
389 	 * BB Add call to invalidate_inodes(sb) for all superblocks mounted
390 	 * to this tcon.
391 	 */
392 }
393 
cifs_convert_flags(unsigned int flags,int rdwr_for_fscache)394 static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
395 {
396 	if ((flags & O_ACCMODE) == O_RDONLY)
397 		return GENERIC_READ;
398 	else if ((flags & O_ACCMODE) == O_WRONLY)
399 		return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
400 	else if ((flags & O_ACCMODE) == O_RDWR) {
401 		/* GENERIC_ALL is too much permission to request
402 		   can cause unnecessary access denied on create */
403 		/* return GENERIC_ALL; */
404 		return (GENERIC_READ | GENERIC_WRITE);
405 	}
406 
407 	return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
408 		FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
409 		FILE_READ_DATA);
410 }
411 
412 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_convert_flags(unsigned int flags)413 static u32 cifs_posix_convert_flags(unsigned int flags)
414 {
415 	u32 posix_flags = 0;
416 
417 	if ((flags & O_ACCMODE) == O_RDONLY)
418 		posix_flags = SMB_O_RDONLY;
419 	else if ((flags & O_ACCMODE) == O_WRONLY)
420 		posix_flags = SMB_O_WRONLY;
421 	else if ((flags & O_ACCMODE) == O_RDWR)
422 		posix_flags = SMB_O_RDWR;
423 
424 	if (flags & O_CREAT) {
425 		posix_flags |= SMB_O_CREAT;
426 		if (flags & O_EXCL)
427 			posix_flags |= SMB_O_EXCL;
428 	} else if (flags & O_EXCL)
429 		cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
430 			 current->comm, current->tgid);
431 
432 	if (flags & O_TRUNC)
433 		posix_flags |= SMB_O_TRUNC;
434 	/* be safe and imply O_SYNC for O_DSYNC */
435 	if (flags & O_DSYNC)
436 		posix_flags |= SMB_O_SYNC;
437 	if (flags & O_DIRECTORY)
438 		posix_flags |= SMB_O_DIRECTORY;
439 	if (flags & O_NOFOLLOW)
440 		posix_flags |= SMB_O_NOFOLLOW;
441 	if (flags & O_DIRECT)
442 		posix_flags |= SMB_O_DIRECT;
443 
444 	return posix_flags;
445 }
446 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
447 
cifs_get_disposition(unsigned int flags)448 static inline int cifs_get_disposition(unsigned int flags)
449 {
450 	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
451 		return FILE_CREATE;
452 	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
453 		return FILE_OVERWRITE_IF;
454 	else if ((flags & O_CREAT) == O_CREAT)
455 		return FILE_OPEN_IF;
456 	else if ((flags & O_TRUNC) == O_TRUNC)
457 		return FILE_OVERWRITE;
458 	else
459 		return FILE_OPEN;
460 }
461 
462 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_open(const char * full_path,struct inode ** pinode,struct super_block * sb,int mode,unsigned int f_flags,__u32 * poplock,__u16 * pnetfid,unsigned int xid)463 int cifs_posix_open(const char *full_path, struct inode **pinode,
464 			struct super_block *sb, int mode, unsigned int f_flags,
465 			__u32 *poplock, __u16 *pnetfid, unsigned int xid)
466 {
467 	int rc;
468 	FILE_UNIX_BASIC_INFO *presp_data;
469 	__u32 posix_flags = 0;
470 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
471 	struct cifs_fattr fattr;
472 	struct tcon_link *tlink;
473 	struct cifs_tcon *tcon;
474 
475 	cifs_dbg(FYI, "posix open %s\n", full_path);
476 
477 	presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
478 	if (presp_data == NULL)
479 		return -ENOMEM;
480 
481 	tlink = cifs_sb_tlink(cifs_sb);
482 	if (IS_ERR(tlink)) {
483 		rc = PTR_ERR(tlink);
484 		goto posix_open_ret;
485 	}
486 
487 	tcon = tlink_tcon(tlink);
488 	mode &= ~current_umask();
489 
490 	posix_flags = cifs_posix_convert_flags(f_flags);
491 	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
492 			     poplock, full_path, cifs_sb->local_nls,
493 			     cifs_remap(cifs_sb));
494 	cifs_put_tlink(tlink);
495 
496 	if (rc)
497 		goto posix_open_ret;
498 
499 	if (presp_data->Type == cpu_to_le32(-1))
500 		goto posix_open_ret; /* open ok, caller does qpathinfo */
501 
502 	if (!pinode)
503 		goto posix_open_ret; /* caller does not need info */
504 
505 	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
506 
507 	/* get new inode and set it up */
508 	if (*pinode == NULL) {
509 		cifs_fill_uniqueid(sb, &fattr);
510 		*pinode = cifs_iget(sb, &fattr);
511 		if (!*pinode) {
512 			rc = -ENOMEM;
513 			goto posix_open_ret;
514 		}
515 	} else {
516 		cifs_revalidate_mapping(*pinode);
517 		rc = cifs_fattr_to_inode(*pinode, &fattr, false);
518 	}
519 
520 posix_open_ret:
521 	kfree(presp_data);
522 	return rc;
523 }
524 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
525 
cifs_nt_open(const char * full_path,struct inode * inode,struct cifs_sb_info * cifs_sb,struct cifs_tcon * tcon,unsigned int f_flags,__u32 * oplock,struct cifs_fid * fid,unsigned int xid,struct cifs_open_info_data * buf)526 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
527 			struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
528 			struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
529 {
530 	int rc;
531 	int desired_access;
532 	int disposition;
533 	int create_options = CREATE_NOT_DIR;
534 	struct TCP_Server_Info *server = tcon->ses->server;
535 	struct cifs_open_parms oparms;
536 	int rdwr_for_fscache = 0;
537 
538 	if (!server->ops->open)
539 		return -ENOSYS;
540 
541 	/* If we're caching, we need to be able to fill in around partial writes. */
542 	if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
543 		rdwr_for_fscache = 1;
544 
545 	desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
546 
547 /*********************************************************************
548  *  open flag mapping table:
549  *
550  *	POSIX Flag            CIFS Disposition
551  *	----------            ----------------
552  *	O_CREAT               FILE_OPEN_IF
553  *	O_CREAT | O_EXCL      FILE_CREATE
554  *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
555  *	O_TRUNC               FILE_OVERWRITE
556  *	none of the above     FILE_OPEN
557  *
558  *	Note that there is not a direct match between disposition
559  *	FILE_SUPERSEDE (ie create whether or not file exists although
560  *	O_CREAT | O_TRUNC is similar but truncates the existing
561  *	file rather than creating a new file as FILE_SUPERSEDE does
562  *	(which uses the attributes / metadata passed in on open call)
563  *?
564  *?  O_SYNC is a reasonable match to CIFS writethrough flag
565  *?  and the read write flags match reasonably.  O_LARGEFILE
566  *?  is irrelevant because largefile support is always used
567  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
568  *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
569  *********************************************************************/
570 
571 	disposition = cifs_get_disposition(f_flags);
572 
573 	/* BB pass O_SYNC flag through on file attributes .. BB */
574 
575 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
576 	if (f_flags & O_SYNC)
577 		create_options |= CREATE_WRITE_THROUGH;
578 
579 	if (f_flags & O_DIRECT)
580 		create_options |= CREATE_NO_BUFFER;
581 
582 retry_open:
583 	oparms = (struct cifs_open_parms) {
584 		.tcon = tcon,
585 		.cifs_sb = cifs_sb,
586 		.desired_access = desired_access,
587 		.create_options = cifs_create_options(cifs_sb, create_options),
588 		.disposition = disposition,
589 		.path = full_path,
590 		.fid = fid,
591 	};
592 
593 	rc = server->ops->open(xid, &oparms, oplock, buf);
594 	if (rc) {
595 		if (rc == -EACCES && rdwr_for_fscache == 1) {
596 			desired_access = cifs_convert_flags(f_flags, 0);
597 			rdwr_for_fscache = 2;
598 			goto retry_open;
599 		}
600 		return rc;
601 	}
602 	if (rdwr_for_fscache == 2)
603 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
604 
605 	/* TODO: Add support for calling posix query info but with passing in fid */
606 	if (tcon->unix_ext)
607 		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
608 					      xid);
609 	else
610 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
611 					 xid, fid);
612 
613 	if (rc) {
614 		server->ops->close(xid, tcon, fid);
615 		if (rc == -ESTALE)
616 			rc = -EOPENSTALE;
617 	}
618 
619 	return rc;
620 }
621 
622 static bool
cifs_has_mand_locks(struct cifsInodeInfo * cinode)623 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
624 {
625 	struct cifs_fid_locks *cur;
626 	bool has_locks = false;
627 
628 	down_read(&cinode->lock_sem);
629 	list_for_each_entry(cur, &cinode->llist, llist) {
630 		if (!list_empty(&cur->locks)) {
631 			has_locks = true;
632 			break;
633 		}
634 	}
635 	up_read(&cinode->lock_sem);
636 	return has_locks;
637 }
638 
639 void
cifs_down_write(struct rw_semaphore * sem)640 cifs_down_write(struct rw_semaphore *sem)
641 {
642 	while (!down_write_trylock(sem))
643 		msleep(10);
644 }
645 
646 static void cifsFileInfo_put_work(struct work_struct *work);
647 void serverclose_work(struct work_struct *work);
648 
cifs_new_fileinfo(struct cifs_fid * fid,struct file * file,struct tcon_link * tlink,__u32 oplock,const char * symlink_target)649 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
650 				       struct tcon_link *tlink, __u32 oplock,
651 				       const char *symlink_target)
652 {
653 	struct dentry *dentry = file_dentry(file);
654 	struct inode *inode = d_inode(dentry);
655 	struct cifsInodeInfo *cinode = CIFS_I(inode);
656 	struct cifsFileInfo *cfile;
657 	struct cifs_fid_locks *fdlocks;
658 	struct cifs_tcon *tcon = tlink_tcon(tlink);
659 	struct TCP_Server_Info *server = tcon->ses->server;
660 
661 	cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
662 	if (cfile == NULL)
663 		return cfile;
664 
665 	fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
666 	if (!fdlocks) {
667 		kfree(cfile);
668 		return NULL;
669 	}
670 
671 	if (symlink_target) {
672 		cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
673 		if (!cfile->symlink_target) {
674 			kfree(fdlocks);
675 			kfree(cfile);
676 			return NULL;
677 		}
678 	}
679 
680 	INIT_LIST_HEAD(&fdlocks->locks);
681 	fdlocks->cfile = cfile;
682 	cfile->llist = fdlocks;
683 
684 	cfile->count = 1;
685 	cfile->pid = current->tgid;
686 	cfile->uid = current_fsuid();
687 	cfile->dentry = dget(dentry);
688 	cfile->f_flags = file->f_flags;
689 	cfile->invalidHandle = false;
690 	cfile->deferred_close_scheduled = false;
691 	cfile->tlink = cifs_get_tlink(tlink);
692 	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
693 	INIT_WORK(&cfile->put, cifsFileInfo_put_work);
694 	INIT_WORK(&cfile->serverclose, serverclose_work);
695 	INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
696 	mutex_init(&cfile->fh_mutex);
697 	spin_lock_init(&cfile->file_info_lock);
698 
699 	cifs_sb_active(inode->i_sb);
700 
701 	/*
702 	 * If the server returned a read oplock and we have mandatory brlocks,
703 	 * set oplock level to None.
704 	 */
705 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
706 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
707 		oplock = 0;
708 	}
709 
710 	cifs_down_write(&cinode->lock_sem);
711 	list_add(&fdlocks->llist, &cinode->llist);
712 	up_write(&cinode->lock_sem);
713 
714 	spin_lock(&tcon->open_file_lock);
715 	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
716 		oplock = fid->pending_open->oplock;
717 	list_del(&fid->pending_open->olist);
718 
719 	fid->purge_cache = false;
720 	server->ops->set_fid(cfile, fid, oplock);
721 
722 	list_add(&cfile->tlist, &tcon->openFileList);
723 	atomic_inc(&tcon->num_local_opens);
724 
725 	/* if readable file instance put first in list*/
726 	spin_lock(&cinode->open_file_lock);
727 	if (file->f_mode & FMODE_READ)
728 		list_add(&cfile->flist, &cinode->openFileList);
729 	else
730 		list_add_tail(&cfile->flist, &cinode->openFileList);
731 	spin_unlock(&cinode->open_file_lock);
732 	spin_unlock(&tcon->open_file_lock);
733 
734 	if (fid->purge_cache)
735 		cifs_zap_mapping(inode);
736 
737 	file->private_data = cfile;
738 	return cfile;
739 }
740 
741 struct cifsFileInfo *
cifsFileInfo_get(struct cifsFileInfo * cifs_file)742 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
743 {
744 	spin_lock(&cifs_file->file_info_lock);
745 	cifsFileInfo_get_locked(cifs_file);
746 	spin_unlock(&cifs_file->file_info_lock);
747 	return cifs_file;
748 }
749 
cifsFileInfo_put_final(struct cifsFileInfo * cifs_file)750 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
751 {
752 	struct inode *inode = d_inode(cifs_file->dentry);
753 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
754 	struct cifsLockInfo *li, *tmp;
755 	struct super_block *sb = inode->i_sb;
756 
757 	/*
758 	 * Delete any outstanding lock records. We'll lose them when the file
759 	 * is closed anyway.
760 	 */
761 	cifs_down_write(&cifsi->lock_sem);
762 	list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
763 		list_del(&li->llist);
764 		cifs_del_lock_waiters(li);
765 		kfree(li);
766 	}
767 	list_del(&cifs_file->llist->llist);
768 	kfree(cifs_file->llist);
769 	up_write(&cifsi->lock_sem);
770 
771 	cifs_put_tlink(cifs_file->tlink);
772 	dput(cifs_file->dentry);
773 	cifs_sb_deactive(sb);
774 	kfree(cifs_file->symlink_target);
775 	kfree(cifs_file);
776 }
777 
cifsFileInfo_put_work(struct work_struct * work)778 static void cifsFileInfo_put_work(struct work_struct *work)
779 {
780 	struct cifsFileInfo *cifs_file = container_of(work,
781 			struct cifsFileInfo, put);
782 
783 	cifsFileInfo_put_final(cifs_file);
784 }
785 
serverclose_work(struct work_struct * work)786 void serverclose_work(struct work_struct *work)
787 {
788 	struct cifsFileInfo *cifs_file = container_of(work,
789 			struct cifsFileInfo, serverclose);
790 
791 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
792 
793 	struct TCP_Server_Info *server = tcon->ses->server;
794 	int rc = 0;
795 	int retries = 0;
796 	int MAX_RETRIES = 4;
797 
798 	do {
799 		if (server->ops->close_getattr)
800 			rc = server->ops->close_getattr(0, tcon, cifs_file);
801 		else if (server->ops->close)
802 			rc = server->ops->close(0, tcon, &cifs_file->fid);
803 
804 		if (rc == -EBUSY || rc == -EAGAIN) {
805 			retries++;
806 			msleep(250);
807 		}
808 	} while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
809 	);
810 
811 	if (retries == MAX_RETRIES)
812 		pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
813 
814 	if (cifs_file->offload)
815 		queue_work(fileinfo_put_wq, &cifs_file->put);
816 	else
817 		cifsFileInfo_put_final(cifs_file);
818 }
819 
820 /**
821  * cifsFileInfo_put - release a reference of file priv data
822  *
823  * Always potentially wait for oplock handler. See _cifsFileInfo_put().
824  *
825  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
826  */
cifsFileInfo_put(struct cifsFileInfo * cifs_file)827 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
828 {
829 	_cifsFileInfo_put(cifs_file, true, true);
830 }
831 
832 /**
833  * _cifsFileInfo_put - release a reference of file priv data
834  *
835  * This may involve closing the filehandle @cifs_file out on the
836  * server. Must be called without holding tcon->open_file_lock,
837  * cinode->open_file_lock and cifs_file->file_info_lock.
838  *
839  * If @wait_for_oplock_handler is true and we are releasing the last
840  * reference, wait for any running oplock break handler of the file
841  * and cancel any pending one.
842  *
843  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
844  * @wait_oplock_handler: must be false if called from oplock_break_handler
845  * @offload:	not offloaded on close and oplock breaks
846  *
847  */
_cifsFileInfo_put(struct cifsFileInfo * cifs_file,bool wait_oplock_handler,bool offload)848 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
849 		       bool wait_oplock_handler, bool offload)
850 {
851 	struct inode *inode = d_inode(cifs_file->dentry);
852 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
853 	struct TCP_Server_Info *server = tcon->ses->server;
854 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
855 	struct super_block *sb = inode->i_sb;
856 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
857 	struct cifs_fid fid = {};
858 	struct cifs_pending_open open;
859 	bool oplock_break_cancelled;
860 	bool serverclose_offloaded = false;
861 
862 	spin_lock(&tcon->open_file_lock);
863 	spin_lock(&cifsi->open_file_lock);
864 	spin_lock(&cifs_file->file_info_lock);
865 
866 	cifs_file->offload = offload;
867 	if (--cifs_file->count > 0) {
868 		spin_unlock(&cifs_file->file_info_lock);
869 		spin_unlock(&cifsi->open_file_lock);
870 		spin_unlock(&tcon->open_file_lock);
871 		return;
872 	}
873 	spin_unlock(&cifs_file->file_info_lock);
874 
875 	if (server->ops->get_lease_key)
876 		server->ops->get_lease_key(inode, &fid);
877 
878 	/* store open in pending opens to make sure we don't miss lease break */
879 	cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
880 
881 	/* remove it from the lists */
882 	list_del(&cifs_file->flist);
883 	list_del(&cifs_file->tlist);
884 	atomic_dec(&tcon->num_local_opens);
885 
886 	if (list_empty(&cifsi->openFileList)) {
887 		cifs_dbg(FYI, "closing last open instance for inode %p\n",
888 			 d_inode(cifs_file->dentry));
889 		/*
890 		 * In strict cache mode we need invalidate mapping on the last
891 		 * close  because it may cause a error when we open this file
892 		 * again and get at least level II oplock.
893 		 */
894 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
895 			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
896 		cifs_set_oplock_level(cifsi, 0);
897 	}
898 
899 	spin_unlock(&cifsi->open_file_lock);
900 	spin_unlock(&tcon->open_file_lock);
901 
902 	oplock_break_cancelled = wait_oplock_handler ?
903 		cancel_work_sync(&cifs_file->oplock_break) : false;
904 
905 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
906 		struct TCP_Server_Info *server = tcon->ses->server;
907 		unsigned int xid;
908 		int rc = 0;
909 
910 		xid = get_xid();
911 		if (server->ops->close_getattr)
912 			rc = server->ops->close_getattr(xid, tcon, cifs_file);
913 		else if (server->ops->close)
914 			rc = server->ops->close(xid, tcon, &cifs_file->fid);
915 		_free_xid(xid);
916 
917 		if (rc == -EBUSY || rc == -EAGAIN) {
918 			// Server close failed, hence offloading it as an async op
919 			queue_work(serverclose_wq, &cifs_file->serverclose);
920 			serverclose_offloaded = true;
921 		}
922 	}
923 
924 	if (oplock_break_cancelled)
925 		cifs_done_oplock_break(cifsi);
926 
927 	cifs_del_pending_open(&open);
928 
929 	// if serverclose has been offloaded to wq (on failure), it will
930 	// handle offloading put as well. If serverclose not offloaded,
931 	// we need to handle offloading put here.
932 	if (!serverclose_offloaded) {
933 		if (offload)
934 			queue_work(fileinfo_put_wq, &cifs_file->put);
935 		else
936 			cifsFileInfo_put_final(cifs_file);
937 	}
938 }
939 
cifs_open(struct inode * inode,struct file * file)940 int cifs_open(struct inode *inode, struct file *file)
941 
942 {
943 	int rc = -EACCES;
944 	unsigned int xid;
945 	__u32 oplock;
946 	struct cifs_sb_info *cifs_sb;
947 	struct TCP_Server_Info *server;
948 	struct cifs_tcon *tcon;
949 	struct tcon_link *tlink;
950 	struct cifsFileInfo *cfile = NULL;
951 	void *page;
952 	const char *full_path;
953 	bool posix_open_ok = false;
954 	struct cifs_fid fid = {};
955 	struct cifs_pending_open open;
956 	struct cifs_open_info_data data = {};
957 
958 	xid = get_xid();
959 
960 	cifs_sb = CIFS_SB(inode->i_sb);
961 	if (unlikely(cifs_forced_shutdown(cifs_sb))) {
962 		free_xid(xid);
963 		return -EIO;
964 	}
965 
966 	tlink = cifs_sb_tlink(cifs_sb);
967 	if (IS_ERR(tlink)) {
968 		free_xid(xid);
969 		return PTR_ERR(tlink);
970 	}
971 	tcon = tlink_tcon(tlink);
972 	server = tcon->ses->server;
973 
974 	page = alloc_dentry_path();
975 	full_path = build_path_from_dentry(file_dentry(file), page);
976 	if (IS_ERR(full_path)) {
977 		rc = PTR_ERR(full_path);
978 		goto out;
979 	}
980 
981 	cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
982 		 inode, file->f_flags, full_path);
983 
984 	if (file->f_flags & O_DIRECT &&
985 	    cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
986 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
987 			file->f_op = &cifs_file_direct_nobrl_ops;
988 		else
989 			file->f_op = &cifs_file_direct_ops;
990 	}
991 
992 	/* Get the cached handle as SMB2 close is deferred */
993 	if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) {
994 		rc = cifs_get_writable_path(tcon, full_path, FIND_WR_FSUID_ONLY, &cfile);
995 	} else {
996 		rc = cifs_get_readable_path(tcon, full_path, &cfile);
997 	}
998 	if (rc == 0) {
999 		if (file->f_flags == cfile->f_flags) {
1000 			file->private_data = cfile;
1001 			spin_lock(&CIFS_I(inode)->deferred_lock);
1002 			cifs_del_deferred_close(cfile);
1003 			spin_unlock(&CIFS_I(inode)->deferred_lock);
1004 			goto use_cache;
1005 		} else {
1006 			_cifsFileInfo_put(cfile, true, false);
1007 		}
1008 	}
1009 
1010 	if (server->oplocks)
1011 		oplock = REQ_OPLOCK;
1012 	else
1013 		oplock = 0;
1014 
1015 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1016 	if (!tcon->broken_posix_open && tcon->unix_ext &&
1017 	    cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1018 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1019 		/* can not refresh inode info since size could be stale */
1020 		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
1021 				cifs_sb->ctx->file_mode /* ignored */,
1022 				file->f_flags, &oplock, &fid.netfid, xid);
1023 		if (rc == 0) {
1024 			cifs_dbg(FYI, "posix open succeeded\n");
1025 			posix_open_ok = true;
1026 		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1027 			if (tcon->ses->serverNOS)
1028 				cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1029 					 tcon->ses->ip_addr,
1030 					 tcon->ses->serverNOS);
1031 			tcon->broken_posix_open = true;
1032 		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
1033 			 (rc != -EOPNOTSUPP)) /* path not found or net err */
1034 			goto out;
1035 		/*
1036 		 * Else fallthrough to retry open the old way on network i/o
1037 		 * or DFS errors.
1038 		 */
1039 	}
1040 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1041 
1042 	if (server->ops->get_lease_key)
1043 		server->ops->get_lease_key(inode, &fid);
1044 
1045 	cifs_add_pending_open(&fid, tlink, &open);
1046 
1047 	if (!posix_open_ok) {
1048 		if (server->ops->get_lease_key)
1049 			server->ops->get_lease_key(inode, &fid);
1050 
1051 		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1052 				  xid, &data);
1053 		if (rc) {
1054 			cifs_del_pending_open(&open);
1055 			goto out;
1056 		}
1057 	}
1058 
1059 	cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1060 	if (cfile == NULL) {
1061 		if (server->ops->close)
1062 			server->ops->close(xid, tcon, &fid);
1063 		cifs_del_pending_open(&open);
1064 		rc = -ENOMEM;
1065 		goto out;
1066 	}
1067 
1068 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1069 	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1070 		/*
1071 		 * Time to set mode which we can not set earlier due to
1072 		 * problems creating new read-only files.
1073 		 */
1074 		struct cifs_unix_set_info_args args = {
1075 			.mode	= inode->i_mode,
1076 			.uid	= INVALID_UID, /* no change */
1077 			.gid	= INVALID_GID, /* no change */
1078 			.ctime	= NO_CHANGE_64,
1079 			.atime	= NO_CHANGE_64,
1080 			.mtime	= NO_CHANGE_64,
1081 			.device	= 0,
1082 		};
1083 		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1084 				       cfile->pid);
1085 	}
1086 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1087 
1088 use_cache:
1089 	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1090 			   file->f_mode & FMODE_WRITE);
1091 	if (!(file->f_flags & O_DIRECT))
1092 		goto out;
1093 	if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1094 		goto out;
1095 	cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1096 
1097 out:
1098 	free_dentry_path(page);
1099 	free_xid(xid);
1100 	cifs_put_tlink(tlink);
1101 	cifs_free_open_info(&data);
1102 	return rc;
1103 }
1104 
1105 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1106 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1107 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1108 
1109 /*
1110  * Try to reacquire byte range locks that were released when session
1111  * to server was lost.
1112  */
1113 static int
cifs_relock_file(struct cifsFileInfo * cfile)1114 cifs_relock_file(struct cifsFileInfo *cfile)
1115 {
1116 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1117 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1118 	int rc = 0;
1119 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1120 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1121 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1122 
1123 	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1124 	if (cinode->can_cache_brlcks) {
1125 		/* can cache locks - no need to relock */
1126 		up_read(&cinode->lock_sem);
1127 		return rc;
1128 	}
1129 
1130 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1131 	if (cap_unix(tcon->ses) &&
1132 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1133 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1134 		rc = cifs_push_posix_locks(cfile);
1135 	else
1136 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1137 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1138 
1139 	up_read(&cinode->lock_sem);
1140 	return rc;
1141 }
1142 
1143 static int
cifs_reopen_file(struct cifsFileInfo * cfile,bool can_flush)1144 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1145 {
1146 	int rc = -EACCES;
1147 	unsigned int xid;
1148 	__u32 oplock;
1149 	struct cifs_sb_info *cifs_sb;
1150 	struct cifs_tcon *tcon;
1151 	struct TCP_Server_Info *server;
1152 	struct cifsInodeInfo *cinode;
1153 	struct inode *inode;
1154 	void *page;
1155 	const char *full_path;
1156 	int desired_access;
1157 	int disposition = FILE_OPEN;
1158 	int create_options = CREATE_NOT_DIR;
1159 	struct cifs_open_parms oparms;
1160 	int rdwr_for_fscache = 0;
1161 
1162 	xid = get_xid();
1163 	mutex_lock(&cfile->fh_mutex);
1164 	if (!cfile->invalidHandle) {
1165 		mutex_unlock(&cfile->fh_mutex);
1166 		free_xid(xid);
1167 		return 0;
1168 	}
1169 
1170 	inode = d_inode(cfile->dentry);
1171 	cifs_sb = CIFS_SB(inode->i_sb);
1172 	tcon = tlink_tcon(cfile->tlink);
1173 	server = tcon->ses->server;
1174 
1175 	/*
1176 	 * Can not grab rename sem here because various ops, including those
1177 	 * that already have the rename sem can end up causing writepage to get
1178 	 * called and if the server was down that means we end up here, and we
1179 	 * can never tell if the caller already has the rename_sem.
1180 	 */
1181 	page = alloc_dentry_path();
1182 	full_path = build_path_from_dentry(cfile->dentry, page);
1183 	if (IS_ERR(full_path)) {
1184 		mutex_unlock(&cfile->fh_mutex);
1185 		free_dentry_path(page);
1186 		free_xid(xid);
1187 		return PTR_ERR(full_path);
1188 	}
1189 
1190 	cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1191 		 inode, cfile->f_flags, full_path);
1192 
1193 	if (tcon->ses->server->oplocks)
1194 		oplock = REQ_OPLOCK;
1195 	else
1196 		oplock = 0;
1197 
1198 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1199 	if (tcon->unix_ext && cap_unix(tcon->ses) &&
1200 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1201 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1202 		/*
1203 		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1204 		 * original open. Must mask them off for a reopen.
1205 		 */
1206 		unsigned int oflags = cfile->f_flags &
1207 						~(O_CREAT | O_EXCL | O_TRUNC);
1208 
1209 		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1210 				     cifs_sb->ctx->file_mode /* ignored */,
1211 				     oflags, &oplock, &cfile->fid.netfid, xid);
1212 		if (rc == 0) {
1213 			cifs_dbg(FYI, "posix reopen succeeded\n");
1214 			oparms.reconnect = true;
1215 			goto reopen_success;
1216 		}
1217 		/*
1218 		 * fallthrough to retry open the old way on errors, especially
1219 		 * in the reconnect path it is important to retry hard
1220 		 */
1221 	}
1222 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1223 
1224 	/* If we're caching, we need to be able to fill in around partial writes. */
1225 	if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1226 		rdwr_for_fscache = 1;
1227 
1228 	desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1229 
1230 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
1231 	if (cfile->f_flags & O_SYNC)
1232 		create_options |= CREATE_WRITE_THROUGH;
1233 
1234 	if (cfile->f_flags & O_DIRECT)
1235 		create_options |= CREATE_NO_BUFFER;
1236 
1237 	if (server->ops->get_lease_key)
1238 		server->ops->get_lease_key(inode, &cfile->fid);
1239 
1240 retry_open:
1241 	oparms = (struct cifs_open_parms) {
1242 		.tcon = tcon,
1243 		.cifs_sb = cifs_sb,
1244 		.desired_access = desired_access,
1245 		.create_options = cifs_create_options(cifs_sb, create_options),
1246 		.disposition = disposition,
1247 		.path = full_path,
1248 		.fid = &cfile->fid,
1249 		.reconnect = true,
1250 	};
1251 
1252 	/*
1253 	 * Can not refresh inode by passing in file_info buf to be returned by
1254 	 * ops->open and then calling get_inode_info with returned buf since
1255 	 * file might have write behind data that needs to be flushed and server
1256 	 * version of file size can be stale. If we knew for sure that inode was
1257 	 * not dirty locally we could do this.
1258 	 */
1259 	rc = server->ops->open(xid, &oparms, &oplock, NULL);
1260 	if (rc == -ENOENT && oparms.reconnect == false) {
1261 		/* durable handle timeout is expired - open the file again */
1262 		rc = server->ops->open(xid, &oparms, &oplock, NULL);
1263 		/* indicate that we need to relock the file */
1264 		oparms.reconnect = true;
1265 	}
1266 	if (rc == -EACCES && rdwr_for_fscache == 1) {
1267 		desired_access = cifs_convert_flags(cfile->f_flags, 0);
1268 		rdwr_for_fscache = 2;
1269 		goto retry_open;
1270 	}
1271 
1272 	if (rc) {
1273 		mutex_unlock(&cfile->fh_mutex);
1274 		cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1275 		cifs_dbg(FYI, "oplock: %d\n", oplock);
1276 		goto reopen_error_exit;
1277 	}
1278 
1279 	if (rdwr_for_fscache == 2)
1280 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1281 
1282 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1283 reopen_success:
1284 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1285 	cfile->invalidHandle = false;
1286 	mutex_unlock(&cfile->fh_mutex);
1287 	cinode = CIFS_I(inode);
1288 
1289 	if (can_flush) {
1290 		rc = filemap_write_and_wait(inode->i_mapping);
1291 		if (!is_interrupt_error(rc))
1292 			mapping_set_error(inode->i_mapping, rc);
1293 
1294 		if (tcon->posix_extensions) {
1295 			rc = smb311_posix_get_inode_info(&inode, full_path,
1296 							 NULL, inode->i_sb, xid);
1297 		} else if (tcon->unix_ext) {
1298 			rc = cifs_get_inode_info_unix(&inode, full_path,
1299 						      inode->i_sb, xid);
1300 		} else {
1301 			rc = cifs_get_inode_info(&inode, full_path, NULL,
1302 						 inode->i_sb, xid, NULL);
1303 		}
1304 	}
1305 	/*
1306 	 * Else we are writing out data to server already and could deadlock if
1307 	 * we tried to flush data, and since we do not know if we have data that
1308 	 * would invalidate the current end of file on the server we can not go
1309 	 * to the server to get the new inode info.
1310 	 */
1311 
1312 	/*
1313 	 * If the server returned a read oplock and we have mandatory brlocks,
1314 	 * set oplock level to None.
1315 	 */
1316 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1317 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1318 		oplock = 0;
1319 	}
1320 
1321 	server->ops->set_fid(cfile, &cfile->fid, oplock);
1322 	if (oparms.reconnect)
1323 		cifs_relock_file(cfile);
1324 
1325 reopen_error_exit:
1326 	free_dentry_path(page);
1327 	free_xid(xid);
1328 	return rc;
1329 }
1330 
smb2_deferred_work_close(struct work_struct * work)1331 void smb2_deferred_work_close(struct work_struct *work)
1332 {
1333 	struct cifsFileInfo *cfile = container_of(work,
1334 			struct cifsFileInfo, deferred.work);
1335 
1336 	spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1337 	cifs_del_deferred_close(cfile);
1338 	cfile->deferred_close_scheduled = false;
1339 	spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1340 	_cifsFileInfo_put(cfile, true, false);
1341 }
1342 
1343 static bool
smb2_can_defer_close(struct inode * inode,struct cifs_deferred_close * dclose)1344 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1345 {
1346 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1347 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1348 
1349 	return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1350 			(cinode->oplock == CIFS_CACHE_RHW_FLG ||
1351 			 cinode->oplock == CIFS_CACHE_RH_FLG) &&
1352 			!test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags));
1353 
1354 }
1355 
cifs_close(struct inode * inode,struct file * file)1356 int cifs_close(struct inode *inode, struct file *file)
1357 {
1358 	struct cifsFileInfo *cfile;
1359 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1360 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1361 	struct cifs_deferred_close *dclose;
1362 
1363 	cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1364 
1365 	if (file->private_data != NULL) {
1366 		cfile = file->private_data;
1367 		file->private_data = NULL;
1368 		dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
1369 		if ((cfile->status_file_deleted == false) &&
1370 		    (smb2_can_defer_close(inode, dclose))) {
1371 			if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR, &cinode->netfs.flags)) {
1372 				inode_set_mtime_to_ts(inode,
1373 						      inode_set_ctime_current(inode));
1374 			}
1375 			spin_lock(&cinode->deferred_lock);
1376 			cifs_add_deferred_close(cfile, dclose);
1377 			if (cfile->deferred_close_scheduled &&
1378 			    delayed_work_pending(&cfile->deferred)) {
1379 				/*
1380 				 * If there is no pending work, mod_delayed_work queues new work.
1381 				 * So, Increase the ref count to avoid use-after-free.
1382 				 */
1383 				if (!mod_delayed_work(deferredclose_wq,
1384 						&cfile->deferred, cifs_sb->ctx->closetimeo))
1385 					cifsFileInfo_get(cfile);
1386 			} else {
1387 				/* Deferred close for files */
1388 				queue_delayed_work(deferredclose_wq,
1389 						&cfile->deferred, cifs_sb->ctx->closetimeo);
1390 				cfile->deferred_close_scheduled = true;
1391 				spin_unlock(&cinode->deferred_lock);
1392 				return 0;
1393 			}
1394 			spin_unlock(&cinode->deferred_lock);
1395 			_cifsFileInfo_put(cfile, true, false);
1396 		} else {
1397 			_cifsFileInfo_put(cfile, true, false);
1398 			kfree(dclose);
1399 		}
1400 	}
1401 
1402 	/* return code from the ->release op is always ignored */
1403 	return 0;
1404 }
1405 
1406 void
cifs_reopen_persistent_handles(struct cifs_tcon * tcon)1407 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1408 {
1409 	struct cifsFileInfo *open_file, *tmp;
1410 	LIST_HEAD(tmp_list);
1411 
1412 	if (!tcon->use_persistent || !tcon->need_reopen_files)
1413 		return;
1414 
1415 	tcon->need_reopen_files = false;
1416 
1417 	cifs_dbg(FYI, "Reopen persistent handles\n");
1418 
1419 	/* list all files open on tree connection, reopen resilient handles  */
1420 	spin_lock(&tcon->open_file_lock);
1421 	list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1422 		if (!open_file->invalidHandle)
1423 			continue;
1424 		cifsFileInfo_get(open_file);
1425 		list_add_tail(&open_file->rlist, &tmp_list);
1426 	}
1427 	spin_unlock(&tcon->open_file_lock);
1428 
1429 	list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1430 		if (cifs_reopen_file(open_file, false /* do not flush */))
1431 			tcon->need_reopen_files = true;
1432 		list_del_init(&open_file->rlist);
1433 		cifsFileInfo_put(open_file);
1434 	}
1435 }
1436 
cifs_closedir(struct inode * inode,struct file * file)1437 int cifs_closedir(struct inode *inode, struct file *file)
1438 {
1439 	int rc = 0;
1440 	unsigned int xid;
1441 	struct cifsFileInfo *cfile = file->private_data;
1442 	struct cifs_tcon *tcon;
1443 	struct TCP_Server_Info *server;
1444 	char *buf;
1445 
1446 	cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1447 
1448 	if (cfile == NULL)
1449 		return rc;
1450 
1451 	xid = get_xid();
1452 	tcon = tlink_tcon(cfile->tlink);
1453 	server = tcon->ses->server;
1454 
1455 	cifs_dbg(FYI, "Freeing private data in close dir\n");
1456 	spin_lock(&cfile->file_info_lock);
1457 	if (server->ops->dir_needs_close(cfile)) {
1458 		cfile->invalidHandle = true;
1459 		spin_unlock(&cfile->file_info_lock);
1460 		if (server->ops->close_dir)
1461 			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1462 		else
1463 			rc = -ENOSYS;
1464 		cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1465 		/* not much we can do if it fails anyway, ignore rc */
1466 		rc = 0;
1467 	} else
1468 		spin_unlock(&cfile->file_info_lock);
1469 
1470 	buf = cfile->srch_inf.ntwrk_buf_start;
1471 	if (buf) {
1472 		cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1473 		cfile->srch_inf.ntwrk_buf_start = NULL;
1474 		if (cfile->srch_inf.smallBuf)
1475 			cifs_small_buf_release(buf);
1476 		else
1477 			cifs_buf_release(buf);
1478 	}
1479 
1480 	cifs_put_tlink(cfile->tlink);
1481 	kfree(file->private_data);
1482 	file->private_data = NULL;
1483 	/* BB can we lock the filestruct while this is going on? */
1484 	free_xid(xid);
1485 	return rc;
1486 }
1487 
1488 static struct cifsLockInfo *
cifs_lock_init(__u64 offset,__u64 length,__u8 type,__u16 flags)1489 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1490 {
1491 	struct cifsLockInfo *lock =
1492 		kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
1493 	if (!lock)
1494 		return lock;
1495 	lock->offset = offset;
1496 	lock->length = length;
1497 	lock->type = type;
1498 	lock->pid = current->tgid;
1499 	lock->flags = flags;
1500 	INIT_LIST_HEAD(&lock->blist);
1501 	init_waitqueue_head(&lock->block_q);
1502 	return lock;
1503 }
1504 
1505 void
cifs_del_lock_waiters(struct cifsLockInfo * lock)1506 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1507 {
1508 	struct cifsLockInfo *li, *tmp;
1509 	list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1510 		list_del_init(&li->blist);
1511 		wake_up(&li->block_q);
1512 	}
1513 }
1514 
1515 #define CIFS_LOCK_OP	0
1516 #define CIFS_READ_OP	1
1517 #define CIFS_WRITE_OP	2
1518 
1519 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1520 static bool
cifs_find_fid_lock_conflict(struct cifs_fid_locks * fdlocks,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsFileInfo * cfile,struct cifsLockInfo ** conf_lock,int rw_check)1521 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1522 			    __u64 length, __u8 type, __u16 flags,
1523 			    struct cifsFileInfo *cfile,
1524 			    struct cifsLockInfo **conf_lock, int rw_check)
1525 {
1526 	struct cifsLockInfo *li;
1527 	struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1528 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1529 
1530 	list_for_each_entry(li, &fdlocks->locks, llist) {
1531 		if (offset + length <= li->offset ||
1532 		    offset >= li->offset + li->length)
1533 			continue;
1534 		if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1535 		    server->ops->compare_fids(cfile, cur_cfile)) {
1536 			/* shared lock prevents write op through the same fid */
1537 			if (!(li->type & server->vals->shared_lock_type) ||
1538 			    rw_check != CIFS_WRITE_OP)
1539 				continue;
1540 		}
1541 		if ((type & server->vals->shared_lock_type) &&
1542 		    ((server->ops->compare_fids(cfile, cur_cfile) &&
1543 		     current->tgid == li->pid) || type == li->type))
1544 			continue;
1545 		if (rw_check == CIFS_LOCK_OP &&
1546 		    (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1547 		    server->ops->compare_fids(cfile, cur_cfile))
1548 			continue;
1549 		if (conf_lock)
1550 			*conf_lock = li;
1551 		return true;
1552 	}
1553 	return false;
1554 }
1555 
1556 bool
cifs_find_lock_conflict(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsLockInfo ** conf_lock,int rw_check)1557 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1558 			__u8 type, __u16 flags,
1559 			struct cifsLockInfo **conf_lock, int rw_check)
1560 {
1561 	bool rc = false;
1562 	struct cifs_fid_locks *cur;
1563 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1564 
1565 	list_for_each_entry(cur, &cinode->llist, llist) {
1566 		rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1567 						 flags, cfile, conf_lock,
1568 						 rw_check);
1569 		if (rc)
1570 			break;
1571 	}
1572 
1573 	return rc;
1574 }
1575 
1576 /*
1577  * Check if there is another lock that prevents us to set the lock (mandatory
1578  * style). If such a lock exists, update the flock structure with its
1579  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1580  * or leave it the same if we can't. Returns 0 if we don't need to request to
1581  * the server or 1 otherwise.
1582  */
1583 static int
cifs_lock_test(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,struct file_lock * flock)1584 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1585 	       __u8 type, struct file_lock *flock)
1586 {
1587 	int rc = 0;
1588 	struct cifsLockInfo *conf_lock;
1589 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1590 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1591 	bool exist;
1592 
1593 	down_read(&cinode->lock_sem);
1594 
1595 	exist = cifs_find_lock_conflict(cfile, offset, length, type,
1596 					flock->c.flc_flags, &conf_lock,
1597 					CIFS_LOCK_OP);
1598 	if (exist) {
1599 		flock->fl_start = conf_lock->offset;
1600 		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1601 		flock->c.flc_pid = conf_lock->pid;
1602 		if (conf_lock->type & server->vals->shared_lock_type)
1603 			flock->c.flc_type = F_RDLCK;
1604 		else
1605 			flock->c.flc_type = F_WRLCK;
1606 	} else if (!cinode->can_cache_brlcks)
1607 		rc = 1;
1608 	else
1609 		flock->c.flc_type = F_UNLCK;
1610 
1611 	up_read(&cinode->lock_sem);
1612 	return rc;
1613 }
1614 
1615 static void
cifs_lock_add(struct cifsFileInfo * cfile,struct cifsLockInfo * lock)1616 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1617 {
1618 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1619 	cifs_down_write(&cinode->lock_sem);
1620 	list_add_tail(&lock->llist, &cfile->llist->locks);
1621 	up_write(&cinode->lock_sem);
1622 }
1623 
1624 /*
1625  * Set the byte-range lock (mandatory style). Returns:
1626  * 1) 0, if we set the lock and don't need to request to the server;
1627  * 2) 1, if no locks prevent us but we need to request to the server;
1628  * 3) -EACCES, if there is a lock that prevents us and wait is false.
1629  */
1630 static int
cifs_lock_add_if(struct cifsFileInfo * cfile,struct cifsLockInfo * lock,bool wait)1631 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1632 		 bool wait)
1633 {
1634 	struct cifsLockInfo *conf_lock;
1635 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1636 	bool exist;
1637 	int rc = 0;
1638 
1639 try_again:
1640 	exist = false;
1641 	cifs_down_write(&cinode->lock_sem);
1642 
1643 	exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1644 					lock->type, lock->flags, &conf_lock,
1645 					CIFS_LOCK_OP);
1646 	if (!exist && cinode->can_cache_brlcks) {
1647 		list_add_tail(&lock->llist, &cfile->llist->locks);
1648 		up_write(&cinode->lock_sem);
1649 		return rc;
1650 	}
1651 
1652 	if (!exist)
1653 		rc = 1;
1654 	else if (!wait)
1655 		rc = -EACCES;
1656 	else {
1657 		list_add_tail(&lock->blist, &conf_lock->blist);
1658 		up_write(&cinode->lock_sem);
1659 		rc = wait_event_interruptible(lock->block_q,
1660 					(lock->blist.prev == &lock->blist) &&
1661 					(lock->blist.next == &lock->blist));
1662 		if (!rc)
1663 			goto try_again;
1664 		cifs_down_write(&cinode->lock_sem);
1665 		list_del_init(&lock->blist);
1666 	}
1667 
1668 	up_write(&cinode->lock_sem);
1669 	return rc;
1670 }
1671 
1672 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1673 /*
1674  * Check if there is another lock that prevents us to set the lock (posix
1675  * style). If such a lock exists, update the flock structure with its
1676  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1677  * or leave it the same if we can't. Returns 0 if we don't need to request to
1678  * the server or 1 otherwise.
1679  */
1680 static int
cifs_posix_lock_test(struct file * file,struct file_lock * flock)1681 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1682 {
1683 	int rc = 0;
1684 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1685 	unsigned char saved_type = flock->c.flc_type;
1686 
1687 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1688 		return 1;
1689 
1690 	down_read(&cinode->lock_sem);
1691 	posix_test_lock(file, flock);
1692 
1693 	if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1694 		flock->c.flc_type = saved_type;
1695 		rc = 1;
1696 	}
1697 
1698 	up_read(&cinode->lock_sem);
1699 	return rc;
1700 }
1701 
1702 /*
1703  * Set the byte-range lock (posix style). Returns:
1704  * 1) <0, if the error occurs while setting the lock;
1705  * 2) 0, if we set the lock and don't need to request to the server;
1706  * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1707  * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1708  */
1709 static int
cifs_posix_lock_set(struct file * file,struct file_lock * flock)1710 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1711 {
1712 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1713 	int rc = FILE_LOCK_DEFERRED + 1;
1714 
1715 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1716 		return rc;
1717 
1718 	cifs_down_write(&cinode->lock_sem);
1719 	if (!cinode->can_cache_brlcks) {
1720 		up_write(&cinode->lock_sem);
1721 		return rc;
1722 	}
1723 
1724 	rc = posix_lock_file(file, flock, NULL);
1725 	up_write(&cinode->lock_sem);
1726 	return rc;
1727 }
1728 
1729 int
cifs_push_mandatory_locks(struct cifsFileInfo * cfile)1730 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1731 {
1732 	unsigned int xid;
1733 	int rc = 0, stored_rc;
1734 	struct cifsLockInfo *li, *tmp;
1735 	struct cifs_tcon *tcon;
1736 	unsigned int num, max_num, max_buf;
1737 	LOCKING_ANDX_RANGE *buf, *cur;
1738 	static const int types[] = {
1739 		LOCKING_ANDX_LARGE_FILES,
1740 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1741 	};
1742 	int i;
1743 
1744 	xid = get_xid();
1745 	tcon = tlink_tcon(cfile->tlink);
1746 
1747 	/*
1748 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1749 	 * and check it before using.
1750 	 */
1751 	max_buf = tcon->ses->server->maxBuf;
1752 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1753 		free_xid(xid);
1754 		return -EINVAL;
1755 	}
1756 
1757 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1758 		     PAGE_SIZE);
1759 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1760 			PAGE_SIZE);
1761 	max_num = (max_buf - sizeof(struct smb_hdr)) /
1762 						sizeof(LOCKING_ANDX_RANGE);
1763 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1764 	if (!buf) {
1765 		free_xid(xid);
1766 		return -ENOMEM;
1767 	}
1768 
1769 	for (i = 0; i < 2; i++) {
1770 		cur = buf;
1771 		num = 0;
1772 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1773 			if (li->type != types[i])
1774 				continue;
1775 			cur->Pid = cpu_to_le16(li->pid);
1776 			cur->LengthLow = cpu_to_le32((u32)li->length);
1777 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1778 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
1779 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1780 			if (++num == max_num) {
1781 				stored_rc = cifs_lockv(xid, tcon,
1782 						       cfile->fid.netfid,
1783 						       (__u8)li->type, 0, num,
1784 						       buf);
1785 				if (stored_rc)
1786 					rc = stored_rc;
1787 				cur = buf;
1788 				num = 0;
1789 			} else
1790 				cur++;
1791 		}
1792 
1793 		if (num) {
1794 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1795 					       (__u8)types[i], 0, num, buf);
1796 			if (stored_rc)
1797 				rc = stored_rc;
1798 		}
1799 	}
1800 
1801 	kfree(buf);
1802 	free_xid(xid);
1803 	return rc;
1804 }
1805 
1806 static __u32
hash_lockowner(fl_owner_t owner)1807 hash_lockowner(fl_owner_t owner)
1808 {
1809 	return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1810 }
1811 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1812 
1813 struct lock_to_push {
1814 	struct list_head llist;
1815 	__u64 offset;
1816 	__u64 length;
1817 	__u32 pid;
1818 	__u16 netfid;
1819 	__u8 type;
1820 };
1821 
1822 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1823 static int
cifs_push_posix_locks(struct cifsFileInfo * cfile)1824 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1825 {
1826 	struct inode *inode = d_inode(cfile->dentry);
1827 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1828 	struct file_lock *flock;
1829 	struct file_lock_context *flctx = locks_inode_context(inode);
1830 	unsigned int count = 0, i;
1831 	int rc = 0, xid, type;
1832 	struct list_head locks_to_send, *el;
1833 	struct lock_to_push *lck, *tmp;
1834 	__u64 length;
1835 
1836 	xid = get_xid();
1837 
1838 	if (!flctx)
1839 		goto out;
1840 
1841 	spin_lock(&flctx->flc_lock);
1842 	list_for_each(el, &flctx->flc_posix) {
1843 		count++;
1844 	}
1845 	spin_unlock(&flctx->flc_lock);
1846 
1847 	INIT_LIST_HEAD(&locks_to_send);
1848 
1849 	/*
1850 	 * Allocating count locks is enough because no FL_POSIX locks can be
1851 	 * added to the list while we are holding cinode->lock_sem that
1852 	 * protects locking operations of this inode.
1853 	 */
1854 	for (i = 0; i < count; i++) {
1855 		lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1856 		if (!lck) {
1857 			rc = -ENOMEM;
1858 			goto err_out;
1859 		}
1860 		list_add_tail(&lck->llist, &locks_to_send);
1861 	}
1862 
1863 	el = locks_to_send.next;
1864 	spin_lock(&flctx->flc_lock);
1865 	for_each_file_lock(flock, &flctx->flc_posix) {
1866 		unsigned char ftype = flock->c.flc_type;
1867 
1868 		if (el == &locks_to_send) {
1869 			/*
1870 			 * The list ended. We don't have enough allocated
1871 			 * structures - something is really wrong.
1872 			 */
1873 			cifs_dbg(VFS, "Can't push all brlocks!\n");
1874 			break;
1875 		}
1876 		length = cifs_flock_len(flock);
1877 		if (ftype == F_RDLCK || ftype == F_SHLCK)
1878 			type = CIFS_RDLCK;
1879 		else
1880 			type = CIFS_WRLCK;
1881 		lck = list_entry(el, struct lock_to_push, llist);
1882 		lck->pid = hash_lockowner(flock->c.flc_owner);
1883 		lck->netfid = cfile->fid.netfid;
1884 		lck->length = length;
1885 		lck->type = type;
1886 		lck->offset = flock->fl_start;
1887 	}
1888 	spin_unlock(&flctx->flc_lock);
1889 
1890 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1891 		int stored_rc;
1892 
1893 		stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1894 					     lck->offset, lck->length, NULL,
1895 					     lck->type, 0);
1896 		if (stored_rc)
1897 			rc = stored_rc;
1898 		list_del(&lck->llist);
1899 		kfree(lck);
1900 	}
1901 
1902 out:
1903 	free_xid(xid);
1904 	return rc;
1905 err_out:
1906 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1907 		list_del(&lck->llist);
1908 		kfree(lck);
1909 	}
1910 	goto out;
1911 }
1912 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1913 
1914 static int
cifs_push_locks(struct cifsFileInfo * cfile)1915 cifs_push_locks(struct cifsFileInfo *cfile)
1916 {
1917 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1918 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1919 	int rc = 0;
1920 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1921 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1922 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1923 
1924 	/* we are going to update can_cache_brlcks here - need a write access */
1925 	cifs_down_write(&cinode->lock_sem);
1926 	if (!cinode->can_cache_brlcks) {
1927 		up_write(&cinode->lock_sem);
1928 		return rc;
1929 	}
1930 
1931 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1932 	if (cap_unix(tcon->ses) &&
1933 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1934 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1935 		rc = cifs_push_posix_locks(cfile);
1936 	else
1937 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1938 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1939 
1940 	cinode->can_cache_brlcks = false;
1941 	up_write(&cinode->lock_sem);
1942 	return rc;
1943 }
1944 
1945 static void
cifs_read_flock(struct file_lock * flock,__u32 * type,int * lock,int * unlock,bool * wait_flag,struct TCP_Server_Info * server)1946 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1947 		bool *wait_flag, struct TCP_Server_Info *server)
1948 {
1949 	if (flock->c.flc_flags & FL_POSIX)
1950 		cifs_dbg(FYI, "Posix\n");
1951 	if (flock->c.flc_flags & FL_FLOCK)
1952 		cifs_dbg(FYI, "Flock\n");
1953 	if (flock->c.flc_flags & FL_SLEEP) {
1954 		cifs_dbg(FYI, "Blocking lock\n");
1955 		*wait_flag = true;
1956 	}
1957 	if (flock->c.flc_flags & FL_ACCESS)
1958 		cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1959 	if (flock->c.flc_flags & FL_LEASE)
1960 		cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1961 	if (flock->c.flc_flags &
1962 	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1963 	       FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
1964 		cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
1965 		         flock->c.flc_flags);
1966 
1967 	*type = server->vals->large_lock_type;
1968 	if (lock_is_write(flock)) {
1969 		cifs_dbg(FYI, "F_WRLCK\n");
1970 		*type |= server->vals->exclusive_lock_type;
1971 		*lock = 1;
1972 	} else if (lock_is_unlock(flock)) {
1973 		cifs_dbg(FYI, "F_UNLCK\n");
1974 		*type |= server->vals->unlock_lock_type;
1975 		*unlock = 1;
1976 		/* Check if unlock includes more than one lock range */
1977 	} else if (lock_is_read(flock)) {
1978 		cifs_dbg(FYI, "F_RDLCK\n");
1979 		*type |= server->vals->shared_lock_type;
1980 		*lock = 1;
1981 	} else if (flock->c.flc_type == F_EXLCK) {
1982 		cifs_dbg(FYI, "F_EXLCK\n");
1983 		*type |= server->vals->exclusive_lock_type;
1984 		*lock = 1;
1985 	} else if (flock->c.flc_type == F_SHLCK) {
1986 		cifs_dbg(FYI, "F_SHLCK\n");
1987 		*type |= server->vals->shared_lock_type;
1988 		*lock = 1;
1989 	} else
1990 		cifs_dbg(FYI, "Unknown type of lock\n");
1991 }
1992 
1993 static int
cifs_getlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,unsigned int xid)1994 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1995 	   bool wait_flag, bool posix_lck, unsigned int xid)
1996 {
1997 	int rc = 0;
1998 	__u64 length = cifs_flock_len(flock);
1999 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2000 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2001 	struct TCP_Server_Info *server = tcon->ses->server;
2002 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2003 	__u16 netfid = cfile->fid.netfid;
2004 
2005 	if (posix_lck) {
2006 		int posix_lock_type;
2007 
2008 		rc = cifs_posix_lock_test(file, flock);
2009 		if (!rc)
2010 			return rc;
2011 
2012 		if (type & server->vals->shared_lock_type)
2013 			posix_lock_type = CIFS_RDLCK;
2014 		else
2015 			posix_lock_type = CIFS_WRLCK;
2016 		rc = CIFSSMBPosixLock(xid, tcon, netfid,
2017 				      hash_lockowner(flock->c.flc_owner),
2018 				      flock->fl_start, length, flock,
2019 				      posix_lock_type, wait_flag);
2020 		return rc;
2021 	}
2022 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2023 
2024 	rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
2025 	if (!rc)
2026 		return rc;
2027 
2028 	/* BB we could chain these into one lock request BB */
2029 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2030 				    1, 0, false);
2031 	if (rc == 0) {
2032 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2033 					    type, 0, 1, false);
2034 		flock->c.flc_type = F_UNLCK;
2035 		if (rc != 0)
2036 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2037 				 rc);
2038 		return 0;
2039 	}
2040 
2041 	if (type & server->vals->shared_lock_type) {
2042 		flock->c.flc_type = F_WRLCK;
2043 		return 0;
2044 	}
2045 
2046 	type &= ~server->vals->exclusive_lock_type;
2047 
2048 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2049 				    type | server->vals->shared_lock_type,
2050 				    1, 0, false);
2051 	if (rc == 0) {
2052 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2053 			type | server->vals->shared_lock_type, 0, 1, false);
2054 		flock->c.flc_type = F_RDLCK;
2055 		if (rc != 0)
2056 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2057 				 rc);
2058 	} else
2059 		flock->c.flc_type = F_WRLCK;
2060 
2061 	return 0;
2062 }
2063 
2064 void
cifs_move_llist(struct list_head * source,struct list_head * dest)2065 cifs_move_llist(struct list_head *source, struct list_head *dest)
2066 {
2067 	struct list_head *li, *tmp;
2068 	list_for_each_safe(li, tmp, source)
2069 		list_move(li, dest);
2070 }
2071 
2072 void
cifs_free_llist(struct list_head * llist)2073 cifs_free_llist(struct list_head *llist)
2074 {
2075 	struct cifsLockInfo *li, *tmp;
2076 	list_for_each_entry_safe(li, tmp, llist, llist) {
2077 		cifs_del_lock_waiters(li);
2078 		list_del(&li->llist);
2079 		kfree(li);
2080 	}
2081 }
2082 
2083 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2084 int
cifs_unlock_range(struct cifsFileInfo * cfile,struct file_lock * flock,unsigned int xid)2085 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2086 		  unsigned int xid)
2087 {
2088 	int rc = 0, stored_rc;
2089 	static const int types[] = {
2090 		LOCKING_ANDX_LARGE_FILES,
2091 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2092 	};
2093 	unsigned int i;
2094 	unsigned int max_num, num, max_buf;
2095 	LOCKING_ANDX_RANGE *buf, *cur;
2096 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2097 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2098 	struct cifsLockInfo *li, *tmp;
2099 	__u64 length = cifs_flock_len(flock);
2100 	LIST_HEAD(tmp_llist);
2101 
2102 	/*
2103 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2104 	 * and check it before using.
2105 	 */
2106 	max_buf = tcon->ses->server->maxBuf;
2107 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2108 		return -EINVAL;
2109 
2110 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2111 		     PAGE_SIZE);
2112 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2113 			PAGE_SIZE);
2114 	max_num = (max_buf - sizeof(struct smb_hdr)) /
2115 						sizeof(LOCKING_ANDX_RANGE);
2116 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
2117 	if (!buf)
2118 		return -ENOMEM;
2119 
2120 	cifs_down_write(&cinode->lock_sem);
2121 	for (i = 0; i < 2; i++) {
2122 		cur = buf;
2123 		num = 0;
2124 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2125 			if (flock->fl_start > li->offset ||
2126 			    (flock->fl_start + length) <
2127 			    (li->offset + li->length))
2128 				continue;
2129 			if (current->tgid != li->pid)
2130 				continue;
2131 			if (types[i] != li->type)
2132 				continue;
2133 			if (cinode->can_cache_brlcks) {
2134 				/*
2135 				 * We can cache brlock requests - simply remove
2136 				 * a lock from the file's list.
2137 				 */
2138 				list_del(&li->llist);
2139 				cifs_del_lock_waiters(li);
2140 				kfree(li);
2141 				continue;
2142 			}
2143 			cur->Pid = cpu_to_le16(li->pid);
2144 			cur->LengthLow = cpu_to_le32((u32)li->length);
2145 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2146 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
2147 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2148 			/*
2149 			 * We need to save a lock here to let us add it again to
2150 			 * the file's list if the unlock range request fails on
2151 			 * the server.
2152 			 */
2153 			list_move(&li->llist, &tmp_llist);
2154 			if (++num == max_num) {
2155 				stored_rc = cifs_lockv(xid, tcon,
2156 						       cfile->fid.netfid,
2157 						       li->type, num, 0, buf);
2158 				if (stored_rc) {
2159 					/*
2160 					 * We failed on the unlock range
2161 					 * request - add all locks from the tmp
2162 					 * list to the head of the file's list.
2163 					 */
2164 					cifs_move_llist(&tmp_llist,
2165 							&cfile->llist->locks);
2166 					rc = stored_rc;
2167 				} else
2168 					/*
2169 					 * The unlock range request succeed -
2170 					 * free the tmp list.
2171 					 */
2172 					cifs_free_llist(&tmp_llist);
2173 				cur = buf;
2174 				num = 0;
2175 			} else
2176 				cur++;
2177 		}
2178 		if (num) {
2179 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2180 					       types[i], num, 0, buf);
2181 			if (stored_rc) {
2182 				cifs_move_llist(&tmp_llist,
2183 						&cfile->llist->locks);
2184 				rc = stored_rc;
2185 			} else
2186 				cifs_free_llist(&tmp_llist);
2187 		}
2188 	}
2189 
2190 	up_write(&cinode->lock_sem);
2191 	kfree(buf);
2192 	return rc;
2193 }
2194 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2195 
2196 static int
cifs_setlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,int lock,int unlock,unsigned int xid)2197 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2198 	   bool wait_flag, bool posix_lck, int lock, int unlock,
2199 	   unsigned int xid)
2200 {
2201 	int rc = 0;
2202 	__u64 length = cifs_flock_len(flock);
2203 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2204 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2205 	struct TCP_Server_Info *server = tcon->ses->server;
2206 	struct inode *inode = d_inode(cfile->dentry);
2207 
2208 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2209 	if (posix_lck) {
2210 		int posix_lock_type;
2211 
2212 		rc = cifs_posix_lock_set(file, flock);
2213 		if (rc <= FILE_LOCK_DEFERRED)
2214 			return rc;
2215 
2216 		if (type & server->vals->shared_lock_type)
2217 			posix_lock_type = CIFS_RDLCK;
2218 		else
2219 			posix_lock_type = CIFS_WRLCK;
2220 
2221 		if (unlock == 1)
2222 			posix_lock_type = CIFS_UNLCK;
2223 
2224 		rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2225 				      hash_lockowner(flock->c.flc_owner),
2226 				      flock->fl_start, length,
2227 				      NULL, posix_lock_type, wait_flag);
2228 		goto out;
2229 	}
2230 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2231 	if (lock) {
2232 		struct cifsLockInfo *lock;
2233 
2234 		lock = cifs_lock_init(flock->fl_start, length, type,
2235 				      flock->c.flc_flags);
2236 		if (!lock)
2237 			return -ENOMEM;
2238 
2239 		rc = cifs_lock_add_if(cfile, lock, wait_flag);
2240 		if (rc < 0) {
2241 			kfree(lock);
2242 			return rc;
2243 		}
2244 		if (!rc)
2245 			goto out;
2246 
2247 		/*
2248 		 * Windows 7 server can delay breaking lease from read to None
2249 		 * if we set a byte-range lock on a file - break it explicitly
2250 		 * before sending the lock to the server to be sure the next
2251 		 * read won't conflict with non-overlapted locks due to
2252 		 * pagereading.
2253 		 */
2254 		if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2255 					CIFS_CACHE_READ(CIFS_I(inode))) {
2256 			cifs_zap_mapping(inode);
2257 			cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2258 				 inode);
2259 			CIFS_I(inode)->oplock = 0;
2260 		}
2261 
2262 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2263 					    type, 1, 0, wait_flag);
2264 		if (rc) {
2265 			kfree(lock);
2266 			return rc;
2267 		}
2268 
2269 		cifs_lock_add(cfile, lock);
2270 	} else if (unlock)
2271 		rc = server->ops->mand_unlock_range(cfile, flock, xid);
2272 
2273 out:
2274 	if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2275 		/*
2276 		 * If this is a request to remove all locks because we
2277 		 * are closing the file, it doesn't matter if the
2278 		 * unlocking failed as both cifs.ko and the SMB server
2279 		 * remove the lock on file close
2280 		 */
2281 		if (rc) {
2282 			cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2283 			if (!(flock->c.flc_flags & FL_CLOSE))
2284 				return rc;
2285 		}
2286 		rc = locks_lock_file_wait(file, flock);
2287 	}
2288 	return rc;
2289 }
2290 
cifs_flock(struct file * file,int cmd,struct file_lock * fl)2291 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2292 {
2293 	int rc, xid;
2294 	int lock = 0, unlock = 0;
2295 	bool wait_flag = false;
2296 	bool posix_lck = false;
2297 	struct cifs_sb_info *cifs_sb;
2298 	struct cifs_tcon *tcon;
2299 	struct cifsFileInfo *cfile;
2300 	__u32 type;
2301 
2302 	xid = get_xid();
2303 
2304 	if (!(fl->c.flc_flags & FL_FLOCK)) {
2305 		rc = -ENOLCK;
2306 		free_xid(xid);
2307 		return rc;
2308 	}
2309 
2310 	cfile = (struct cifsFileInfo *)file->private_data;
2311 	tcon = tlink_tcon(cfile->tlink);
2312 
2313 	cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2314 			tcon->ses->server);
2315 	cifs_sb = CIFS_FILE_SB(file);
2316 
2317 	if (cap_unix(tcon->ses) &&
2318 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2319 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2320 		posix_lck = true;
2321 
2322 	if (!lock && !unlock) {
2323 		/*
2324 		 * if no lock or unlock then nothing to do since we do not
2325 		 * know what it is
2326 		 */
2327 		rc = -EOPNOTSUPP;
2328 		free_xid(xid);
2329 		return rc;
2330 	}
2331 
2332 	rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2333 			xid);
2334 	free_xid(xid);
2335 	return rc;
2336 
2337 
2338 }
2339 
cifs_lock(struct file * file,int cmd,struct file_lock * flock)2340 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2341 {
2342 	int rc, xid;
2343 	int lock = 0, unlock = 0;
2344 	bool wait_flag = false;
2345 	bool posix_lck = false;
2346 	struct cifs_sb_info *cifs_sb;
2347 	struct cifs_tcon *tcon;
2348 	struct cifsFileInfo *cfile;
2349 	__u32 type;
2350 
2351 	rc = -EACCES;
2352 	xid = get_xid();
2353 
2354 	cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2355 		 flock->c.flc_flags, flock->c.flc_type,
2356 		 (long long)flock->fl_start,
2357 		 (long long)flock->fl_end);
2358 
2359 	cfile = (struct cifsFileInfo *)file->private_data;
2360 	tcon = tlink_tcon(cfile->tlink);
2361 
2362 	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2363 			tcon->ses->server);
2364 	cifs_sb = CIFS_FILE_SB(file);
2365 	set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2366 
2367 	if (cap_unix(tcon->ses) &&
2368 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2369 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2370 		posix_lck = true;
2371 	/*
2372 	 * BB add code here to normalize offset and length to account for
2373 	 * negative length which we can not accept over the wire.
2374 	 */
2375 	if (IS_GETLK(cmd)) {
2376 		rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2377 		free_xid(xid);
2378 		return rc;
2379 	}
2380 
2381 	if (!lock && !unlock) {
2382 		/*
2383 		 * if no lock or unlock then nothing to do since we do not
2384 		 * know what it is
2385 		 */
2386 		free_xid(xid);
2387 		return -EOPNOTSUPP;
2388 	}
2389 
2390 	rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2391 			xid);
2392 	free_xid(xid);
2393 	return rc;
2394 }
2395 
cifs_write_subrequest_terminated(struct cifs_io_subrequest * wdata,ssize_t result,bool was_async)2396 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result,
2397 				      bool was_async)
2398 {
2399 	struct netfs_io_request *wreq = wdata->rreq;
2400 	struct netfs_inode *ictx = netfs_inode(wreq->inode);
2401 	loff_t wrend;
2402 
2403 	if (result > 0) {
2404 		wrend = wdata->subreq.start + wdata->subreq.transferred + result;
2405 
2406 		if (wrend > ictx->zero_point &&
2407 		    (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE ||
2408 		     wdata->rreq->origin == NETFS_DIO_WRITE))
2409 			ictx->zero_point = wrend;
2410 		if (wrend > ictx->remote_i_size)
2411 			netfs_resize_file(ictx, wrend, true);
2412 	}
2413 
2414 	netfs_write_subrequest_terminated(&wdata->subreq, result, was_async);
2415 }
2416 
find_readable_file(struct cifsInodeInfo * cifs_inode,bool fsuid_only)2417 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
2418 					bool fsuid_only)
2419 {
2420 	struct cifsFileInfo *open_file = NULL;
2421 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2422 
2423 	/* only filter by fsuid on multiuser mounts */
2424 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2425 		fsuid_only = false;
2426 
2427 	spin_lock(&cifs_inode->open_file_lock);
2428 	/* we could simply get the first_list_entry since write-only entries
2429 	   are always at the end of the list but since the first entry might
2430 	   have a close pending, we go through the whole list */
2431 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2432 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2433 			continue;
2434 		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2435 			if ((!open_file->invalidHandle)) {
2436 				/* found a good file */
2437 				/* lock it so it will not be closed on us */
2438 				cifsFileInfo_get(open_file);
2439 				spin_unlock(&cifs_inode->open_file_lock);
2440 				return open_file;
2441 			} /* else might as well continue, and look for
2442 			     another, or simply have the caller reopen it
2443 			     again rather than trying to fix this handle */
2444 		} else /* write only file */
2445 			break; /* write only files are last so must be done */
2446 	}
2447 	spin_unlock(&cifs_inode->open_file_lock);
2448 	return NULL;
2449 }
2450 
2451 /* Return -EBADF if no handle is found and general rc otherwise */
2452 int
cifs_get_writable_file(struct cifsInodeInfo * cifs_inode,int flags,struct cifsFileInfo ** ret_file)2453 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
2454 		       struct cifsFileInfo **ret_file)
2455 {
2456 	struct cifsFileInfo *open_file, *inv_file = NULL;
2457 	struct cifs_sb_info *cifs_sb;
2458 	bool any_available = false;
2459 	int rc = -EBADF;
2460 	unsigned int refind = 0;
2461 	bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2462 	bool with_delete = flags & FIND_WR_WITH_DELETE;
2463 	*ret_file = NULL;
2464 
2465 	/*
2466 	 * Having a null inode here (because mapping->host was set to zero by
2467 	 * the VFS or MM) should not happen but we had reports of on oops (due
2468 	 * to it being zero) during stress testcases so we need to check for it
2469 	 */
2470 
2471 	if (cifs_inode == NULL) {
2472 		cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2473 		dump_stack();
2474 		return rc;
2475 	}
2476 
2477 	cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2478 
2479 	/* only filter by fsuid on multiuser mounts */
2480 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2481 		fsuid_only = false;
2482 
2483 	spin_lock(&cifs_inode->open_file_lock);
2484 refind_writable:
2485 	if (refind > MAX_REOPEN_ATT) {
2486 		spin_unlock(&cifs_inode->open_file_lock);
2487 		return rc;
2488 	}
2489 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2490 		if (!any_available && open_file->pid != current->tgid)
2491 			continue;
2492 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2493 			continue;
2494 		if (with_delete && !(open_file->fid.access & DELETE))
2495 			continue;
2496 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2497 			if (!open_file->invalidHandle) {
2498 				/* found a good writable file */
2499 				cifsFileInfo_get(open_file);
2500 				spin_unlock(&cifs_inode->open_file_lock);
2501 				*ret_file = open_file;
2502 				return 0;
2503 			} else {
2504 				if (!inv_file)
2505 					inv_file = open_file;
2506 			}
2507 		}
2508 	}
2509 	/* couldn't find usable FH with same pid, try any available */
2510 	if (!any_available) {
2511 		any_available = true;
2512 		goto refind_writable;
2513 	}
2514 
2515 	if (inv_file) {
2516 		any_available = false;
2517 		cifsFileInfo_get(inv_file);
2518 	}
2519 
2520 	spin_unlock(&cifs_inode->open_file_lock);
2521 
2522 	if (inv_file) {
2523 		rc = cifs_reopen_file(inv_file, false);
2524 		if (!rc) {
2525 			*ret_file = inv_file;
2526 			return 0;
2527 		}
2528 
2529 		spin_lock(&cifs_inode->open_file_lock);
2530 		list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2531 		spin_unlock(&cifs_inode->open_file_lock);
2532 		cifsFileInfo_put(inv_file);
2533 		++refind;
2534 		inv_file = NULL;
2535 		spin_lock(&cifs_inode->open_file_lock);
2536 		goto refind_writable;
2537 	}
2538 
2539 	return rc;
2540 }
2541 
2542 struct cifsFileInfo *
find_writable_file(struct cifsInodeInfo * cifs_inode,int flags)2543 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2544 {
2545 	struct cifsFileInfo *cfile;
2546 	int rc;
2547 
2548 	rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2549 	if (rc)
2550 		cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2551 
2552 	return cfile;
2553 }
2554 
2555 int
cifs_get_writable_path(struct cifs_tcon * tcon,const char * name,int flags,struct cifsFileInfo ** ret_file)2556 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2557 		       int flags,
2558 		       struct cifsFileInfo **ret_file)
2559 {
2560 	struct cifsFileInfo *cfile;
2561 	void *page = alloc_dentry_path();
2562 
2563 	*ret_file = NULL;
2564 
2565 	spin_lock(&tcon->open_file_lock);
2566 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2567 		struct cifsInodeInfo *cinode;
2568 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2569 		if (IS_ERR(full_path)) {
2570 			spin_unlock(&tcon->open_file_lock);
2571 			free_dentry_path(page);
2572 			return PTR_ERR(full_path);
2573 		}
2574 		if (strcmp(full_path, name))
2575 			continue;
2576 
2577 		cinode = CIFS_I(d_inode(cfile->dentry));
2578 		spin_unlock(&tcon->open_file_lock);
2579 		free_dentry_path(page);
2580 		return cifs_get_writable_file(cinode, flags, ret_file);
2581 	}
2582 
2583 	spin_unlock(&tcon->open_file_lock);
2584 	free_dentry_path(page);
2585 	return -ENOENT;
2586 }
2587 
2588 int
cifs_get_readable_path(struct cifs_tcon * tcon,const char * name,struct cifsFileInfo ** ret_file)2589 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2590 		       struct cifsFileInfo **ret_file)
2591 {
2592 	struct cifsFileInfo *cfile;
2593 	void *page = alloc_dentry_path();
2594 
2595 	*ret_file = NULL;
2596 
2597 	spin_lock(&tcon->open_file_lock);
2598 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2599 		struct cifsInodeInfo *cinode;
2600 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2601 		if (IS_ERR(full_path)) {
2602 			spin_unlock(&tcon->open_file_lock);
2603 			free_dentry_path(page);
2604 			return PTR_ERR(full_path);
2605 		}
2606 		if (strcmp(full_path, name))
2607 			continue;
2608 
2609 		cinode = CIFS_I(d_inode(cfile->dentry));
2610 		spin_unlock(&tcon->open_file_lock);
2611 		free_dentry_path(page);
2612 		*ret_file = find_readable_file(cinode, 0);
2613 		return *ret_file ? 0 : -ENOENT;
2614 	}
2615 
2616 	spin_unlock(&tcon->open_file_lock);
2617 	free_dentry_path(page);
2618 	return -ENOENT;
2619 }
2620 
2621 /*
2622  * Flush data on a strict file.
2623  */
cifs_strict_fsync(struct file * file,loff_t start,loff_t end,int datasync)2624 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2625 		      int datasync)
2626 {
2627 	unsigned int xid;
2628 	int rc = 0;
2629 	struct cifs_tcon *tcon;
2630 	struct TCP_Server_Info *server;
2631 	struct cifsFileInfo *smbfile = file->private_data;
2632 	struct inode *inode = file_inode(file);
2633 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2634 
2635 	rc = file_write_and_wait_range(file, start, end);
2636 	if (rc) {
2637 		trace_cifs_fsync_err(inode->i_ino, rc);
2638 		return rc;
2639 	}
2640 
2641 	xid = get_xid();
2642 
2643 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2644 		 file, datasync);
2645 
2646 	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2647 		rc = cifs_zap_mapping(inode);
2648 		if (rc) {
2649 			cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2650 			rc = 0; /* don't care about it in fsync */
2651 		}
2652 	}
2653 
2654 	tcon = tlink_tcon(smbfile->tlink);
2655 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2656 		server = tcon->ses->server;
2657 		if (server->ops->flush == NULL) {
2658 			rc = -ENOSYS;
2659 			goto strict_fsync_exit;
2660 		}
2661 
2662 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2663 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2664 			if (smbfile) {
2665 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2666 				cifsFileInfo_put(smbfile);
2667 			} else
2668 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2669 		} else
2670 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2671 	}
2672 
2673 strict_fsync_exit:
2674 	free_xid(xid);
2675 	return rc;
2676 }
2677 
2678 /*
2679  * Flush data on a non-strict data.
2680  */
cifs_fsync(struct file * file,loff_t start,loff_t end,int datasync)2681 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2682 {
2683 	unsigned int xid;
2684 	int rc = 0;
2685 	struct cifs_tcon *tcon;
2686 	struct TCP_Server_Info *server;
2687 	struct cifsFileInfo *smbfile = file->private_data;
2688 	struct inode *inode = file_inode(file);
2689 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2690 
2691 	rc = file_write_and_wait_range(file, start, end);
2692 	if (rc) {
2693 		trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2694 		return rc;
2695 	}
2696 
2697 	xid = get_xid();
2698 
2699 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2700 		 file, datasync);
2701 
2702 	tcon = tlink_tcon(smbfile->tlink);
2703 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2704 		server = tcon->ses->server;
2705 		if (server->ops->flush == NULL) {
2706 			rc = -ENOSYS;
2707 			goto fsync_exit;
2708 		}
2709 
2710 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2711 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2712 			if (smbfile) {
2713 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2714 				cifsFileInfo_put(smbfile);
2715 			} else
2716 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2717 		} else
2718 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2719 	}
2720 
2721 fsync_exit:
2722 	free_xid(xid);
2723 	return rc;
2724 }
2725 
2726 /*
2727  * As file closes, flush all cached write data for this inode checking
2728  * for write behind errors.
2729  */
cifs_flush(struct file * file,fl_owner_t id)2730 int cifs_flush(struct file *file, fl_owner_t id)
2731 {
2732 	struct inode *inode = file_inode(file);
2733 	int rc = 0;
2734 
2735 	if (file->f_mode & FMODE_WRITE)
2736 		rc = filemap_write_and_wait(inode->i_mapping);
2737 
2738 	cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2739 	if (rc) {
2740 		/* get more nuanced writeback errors */
2741 		rc = filemap_check_wb_err(file->f_mapping, 0);
2742 		trace_cifs_flush_err(inode->i_ino, rc);
2743 	}
2744 	return rc;
2745 }
2746 
2747 static ssize_t
cifs_writev(struct kiocb * iocb,struct iov_iter * from)2748 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2749 {
2750 	struct file *file = iocb->ki_filp;
2751 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2752 	struct inode *inode = file->f_mapping->host;
2753 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2754 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2755 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2756 	ssize_t rc;
2757 
2758 	rc = netfs_start_io_write(inode);
2759 	if (rc < 0)
2760 		return rc;
2761 
2762 	/*
2763 	 * We need to hold the sem to be sure nobody modifies lock list
2764 	 * with a brlock that prevents writing.
2765 	 */
2766 	down_read(&cinode->lock_sem);
2767 
2768 	rc = generic_write_checks(iocb, from);
2769 	if (rc <= 0)
2770 		goto out;
2771 
2772 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) &&
2773 	    (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2774 				     server->vals->exclusive_lock_type, 0,
2775 				     NULL, CIFS_WRITE_OP))) {
2776 		rc = -EACCES;
2777 		goto out;
2778 	}
2779 
2780 	rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2781 
2782 out:
2783 	up_read(&cinode->lock_sem);
2784 	netfs_end_io_write(inode);
2785 	if (rc > 0)
2786 		rc = generic_write_sync(iocb, rc);
2787 	return rc;
2788 }
2789 
2790 ssize_t
cifs_strict_writev(struct kiocb * iocb,struct iov_iter * from)2791 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2792 {
2793 	struct inode *inode = file_inode(iocb->ki_filp);
2794 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2795 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2796 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2797 						iocb->ki_filp->private_data;
2798 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2799 	ssize_t written;
2800 
2801 	written = cifs_get_writer(cinode);
2802 	if (written)
2803 		return written;
2804 
2805 	if (CIFS_CACHE_WRITE(cinode)) {
2806 		if (cap_unix(tcon->ses) &&
2807 		    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2808 		    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2809 			written = netfs_file_write_iter(iocb, from);
2810 			goto out;
2811 		}
2812 		written = cifs_writev(iocb, from);
2813 		goto out;
2814 	}
2815 	/*
2816 	 * For non-oplocked files in strict cache mode we need to write the data
2817 	 * to the server exactly from the pos to pos+len-1 rather than flush all
2818 	 * affected pages because it may cause a error with mandatory locks on
2819 	 * these pages but not on the region from pos to ppos+len-1.
2820 	 */
2821 	written = netfs_file_write_iter(iocb, from);
2822 	if (CIFS_CACHE_READ(cinode)) {
2823 		/*
2824 		 * We have read level caching and we have just sent a write
2825 		 * request to the server thus making data in the cache stale.
2826 		 * Zap the cache and set oplock/lease level to NONE to avoid
2827 		 * reading stale data from the cache. All subsequent read
2828 		 * operations will read new data from the server.
2829 		 */
2830 		cifs_zap_mapping(inode);
2831 		cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2832 			 inode);
2833 		cinode->oplock = 0;
2834 	}
2835 out:
2836 	cifs_put_writer(cinode);
2837 	return written;
2838 }
2839 
cifs_loose_read_iter(struct kiocb * iocb,struct iov_iter * iter)2840 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2841 {
2842 	ssize_t rc;
2843 	struct inode *inode = file_inode(iocb->ki_filp);
2844 
2845 	if (iocb->ki_flags & IOCB_DIRECT)
2846 		return netfs_unbuffered_read_iter(iocb, iter);
2847 
2848 	rc = cifs_revalidate_mapping(inode);
2849 	if (rc)
2850 		return rc;
2851 
2852 	return netfs_file_read_iter(iocb, iter);
2853 }
2854 
cifs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)2855 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2856 {
2857 	struct inode *inode = file_inode(iocb->ki_filp);
2858 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2859 	ssize_t written;
2860 	int rc;
2861 
2862 	if (iocb->ki_filp->f_flags & O_DIRECT) {
2863 		written = netfs_unbuffered_write_iter(iocb, from);
2864 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
2865 			cifs_zap_mapping(inode);
2866 			cifs_dbg(FYI,
2867 				 "Set no oplock for inode=%p after a write operation\n",
2868 				 inode);
2869 			cinode->oplock = 0;
2870 		}
2871 		return written;
2872 	}
2873 
2874 	written = cifs_get_writer(cinode);
2875 	if (written)
2876 		return written;
2877 
2878 	written = netfs_file_write_iter(iocb, from);
2879 
2880 	if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
2881 		rc = filemap_fdatawrite(inode->i_mapping);
2882 		if (rc)
2883 			cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
2884 				 rc, inode);
2885 	}
2886 
2887 	cifs_put_writer(cinode);
2888 	return written;
2889 }
2890 
2891 ssize_t
cifs_strict_readv(struct kiocb * iocb,struct iov_iter * to)2892 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
2893 {
2894 	struct inode *inode = file_inode(iocb->ki_filp);
2895 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2896 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2897 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2898 						iocb->ki_filp->private_data;
2899 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2900 	int rc = -EACCES;
2901 
2902 	/*
2903 	 * In strict cache mode we need to read from the server all the time
2904 	 * if we don't have level II oplock because the server can delay mtime
2905 	 * change - so we can't make a decision about inode invalidating.
2906 	 * And we can also fail with pagereading if there are mandatory locks
2907 	 * on pages affected by this read but not on the region from pos to
2908 	 * pos+len-1.
2909 	 */
2910 	if (!CIFS_CACHE_READ(cinode))
2911 		return netfs_unbuffered_read_iter(iocb, to);
2912 
2913 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) {
2914 		if (iocb->ki_flags & IOCB_DIRECT)
2915 			return netfs_unbuffered_read_iter(iocb, to);
2916 		return netfs_buffered_read_iter(iocb, to);
2917 	}
2918 
2919 	/*
2920 	 * We need to hold the sem to be sure nobody modifies lock list
2921 	 * with a brlock that prevents reading.
2922 	 */
2923 	if (iocb->ki_flags & IOCB_DIRECT) {
2924 		rc = netfs_start_io_direct(inode);
2925 		if (rc < 0)
2926 			goto out;
2927 		rc = -EACCES;
2928 		down_read(&cinode->lock_sem);
2929 		if (!cifs_find_lock_conflict(
2930 			    cfile, iocb->ki_pos, iov_iter_count(to),
2931 			    tcon->ses->server->vals->shared_lock_type,
2932 			    0, NULL, CIFS_READ_OP))
2933 			rc = netfs_unbuffered_read_iter_locked(iocb, to);
2934 		up_read(&cinode->lock_sem);
2935 		netfs_end_io_direct(inode);
2936 	} else {
2937 		rc = netfs_start_io_read(inode);
2938 		if (rc < 0)
2939 			goto out;
2940 		rc = -EACCES;
2941 		down_read(&cinode->lock_sem);
2942 		if (!cifs_find_lock_conflict(
2943 			    cfile, iocb->ki_pos, iov_iter_count(to),
2944 			    tcon->ses->server->vals->shared_lock_type,
2945 			    0, NULL, CIFS_READ_OP))
2946 			rc = filemap_read(iocb, to, 0);
2947 		up_read(&cinode->lock_sem);
2948 		netfs_end_io_read(inode);
2949 	}
2950 out:
2951 	return rc;
2952 }
2953 
cifs_page_mkwrite(struct vm_fault * vmf)2954 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
2955 {
2956 	return netfs_page_mkwrite(vmf, NULL);
2957 }
2958 
2959 static const struct vm_operations_struct cifs_file_vm_ops = {
2960 	.fault = filemap_fault,
2961 	.map_pages = filemap_map_pages,
2962 	.page_mkwrite = cifs_page_mkwrite,
2963 };
2964 
cifs_file_strict_mmap(struct file * file,struct vm_area_struct * vma)2965 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2966 {
2967 	int xid, rc = 0;
2968 	struct inode *inode = file_inode(file);
2969 
2970 	xid = get_xid();
2971 
2972 	if (!CIFS_CACHE_READ(CIFS_I(inode)))
2973 		rc = cifs_zap_mapping(inode);
2974 	if (!rc)
2975 		rc = generic_file_mmap(file, vma);
2976 	if (!rc)
2977 		vma->vm_ops = &cifs_file_vm_ops;
2978 
2979 	free_xid(xid);
2980 	return rc;
2981 }
2982 
cifs_file_mmap(struct file * file,struct vm_area_struct * vma)2983 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2984 {
2985 	int rc, xid;
2986 
2987 	xid = get_xid();
2988 
2989 	rc = cifs_revalidate_file(file);
2990 	if (rc)
2991 		cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
2992 			 rc);
2993 	if (!rc)
2994 		rc = generic_file_mmap(file, vma);
2995 	if (!rc)
2996 		vma->vm_ops = &cifs_file_vm_ops;
2997 
2998 	free_xid(xid);
2999 	return rc;
3000 }
3001 
is_inode_writable(struct cifsInodeInfo * cifs_inode)3002 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3003 {
3004 	struct cifsFileInfo *open_file;
3005 
3006 	spin_lock(&cifs_inode->open_file_lock);
3007 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3008 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3009 			spin_unlock(&cifs_inode->open_file_lock);
3010 			return 1;
3011 		}
3012 	}
3013 	spin_unlock(&cifs_inode->open_file_lock);
3014 	return 0;
3015 }
3016 
3017 /* We do not want to update the file size from server for inodes
3018    open for write - to avoid races with writepage extending
3019    the file - in the future we could consider allowing
3020    refreshing the inode only on increases in the file size
3021    but this is tricky to do without racing with writebehind
3022    page caching in the current Linux kernel design */
is_size_safe_to_change(struct cifsInodeInfo * cifsInode,__u64 end_of_file,bool from_readdir)3023 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3024 			    bool from_readdir)
3025 {
3026 	if (!cifsInode)
3027 		return true;
3028 
3029 	if (is_inode_writable(cifsInode) ||
3030 		((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
3031 		/* This inode is open for write at least once */
3032 		struct cifs_sb_info *cifs_sb;
3033 
3034 		cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
3035 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
3036 			/* since no page cache to corrupt on directio
3037 			we can change size safely */
3038 			return true;
3039 		}
3040 
3041 		if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
3042 			return true;
3043 
3044 		return false;
3045 	} else
3046 		return true;
3047 }
3048 
cifs_oplock_break(struct work_struct * work)3049 void cifs_oplock_break(struct work_struct *work)
3050 {
3051 	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3052 						  oplock_break);
3053 	struct inode *inode = d_inode(cfile->dentry);
3054 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3055 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3056 	struct cifs_tcon *tcon;
3057 	struct TCP_Server_Info *server;
3058 	struct tcon_link *tlink;
3059 	int rc = 0;
3060 	bool purge_cache = false, oplock_break_cancelled;
3061 	__u64 persistent_fid, volatile_fid;
3062 	__u16 net_fid;
3063 
3064 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3065 			TASK_UNINTERRUPTIBLE);
3066 
3067 	tlink = cifs_sb_tlink(cifs_sb);
3068 	if (IS_ERR(tlink))
3069 		goto out;
3070 	tcon = tlink_tcon(tlink);
3071 	server = tcon->ses->server;
3072 
3073 	server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3074 				      cfile->oplock_epoch, &purge_cache);
3075 
3076 	if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
3077 						cifs_has_mand_locks(cinode)) {
3078 		cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3079 			 inode);
3080 		cinode->oplock = 0;
3081 	}
3082 
3083 	if (inode && S_ISREG(inode->i_mode)) {
3084 		if (CIFS_CACHE_READ(cinode))
3085 			break_lease(inode, O_RDONLY);
3086 		else
3087 			break_lease(inode, O_WRONLY);
3088 		rc = filemap_fdatawrite(inode->i_mapping);
3089 		if (!CIFS_CACHE_READ(cinode) || purge_cache) {
3090 			rc = filemap_fdatawait(inode->i_mapping);
3091 			mapping_set_error(inode->i_mapping, rc);
3092 			cifs_zap_mapping(inode);
3093 		}
3094 		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3095 		if (CIFS_CACHE_WRITE(cinode))
3096 			goto oplock_break_ack;
3097 	}
3098 
3099 	rc = cifs_push_locks(cfile);
3100 	if (rc)
3101 		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3102 
3103 oplock_break_ack:
3104 	/*
3105 	 * When oplock break is received and there are no active
3106 	 * file handles but cached, then schedule deferred close immediately.
3107 	 * So, new open will not use cached handle.
3108 	 */
3109 
3110 	if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
3111 		cifs_close_deferred_file(cinode);
3112 
3113 	persistent_fid = cfile->fid.persistent_fid;
3114 	volatile_fid = cfile->fid.volatile_fid;
3115 	net_fid = cfile->fid.netfid;
3116 	oplock_break_cancelled = cfile->oplock_break_cancelled;
3117 
3118 	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3119 	/*
3120 	 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3121 	 * an acknowledgment to be sent when the file has already been closed.
3122 	 */
3123 	spin_lock(&cinode->open_file_lock);
3124 	/* check list empty since can race with kill_sb calling tree disconnect */
3125 	if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3126 		spin_unlock(&cinode->open_file_lock);
3127 		rc = server->ops->oplock_response(tcon, persistent_fid,
3128 						  volatile_fid, net_fid, cinode);
3129 		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3130 	} else
3131 		spin_unlock(&cinode->open_file_lock);
3132 
3133 	cifs_put_tlink(tlink);
3134 out:
3135 	cifs_done_oplock_break(cinode);
3136 }
3137 
cifs_swap_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)3138 static int cifs_swap_activate(struct swap_info_struct *sis,
3139 			      struct file *swap_file, sector_t *span)
3140 {
3141 	struct cifsFileInfo *cfile = swap_file->private_data;
3142 	struct inode *inode = swap_file->f_mapping->host;
3143 	unsigned long blocks;
3144 	long long isize;
3145 
3146 	cifs_dbg(FYI, "swap activate\n");
3147 
3148 	if (!swap_file->f_mapping->a_ops->swap_rw)
3149 		/* Cannot support swap */
3150 		return -EINVAL;
3151 
3152 	spin_lock(&inode->i_lock);
3153 	blocks = inode->i_blocks;
3154 	isize = inode->i_size;
3155 	spin_unlock(&inode->i_lock);
3156 	if (blocks*512 < isize) {
3157 		pr_warn("swap activate: swapfile has holes\n");
3158 		return -EINVAL;
3159 	}
3160 	*span = sis->pages;
3161 
3162 	pr_warn_once("Swap support over SMB3 is experimental\n");
3163 
3164 	/*
3165 	 * TODO: consider adding ACL (or documenting how) to prevent other
3166 	 * users (on this or other systems) from reading it
3167 	 */
3168 
3169 
3170 	/* TODO: add sk_set_memalloc(inet) or similar */
3171 
3172 	if (cfile)
3173 		cfile->swapfile = true;
3174 	/*
3175 	 * TODO: Since file already open, we can't open with DENY_ALL here
3176 	 * but we could add call to grab a byte range lock to prevent others
3177 	 * from reading or writing the file
3178 	 */
3179 
3180 	sis->flags |= SWP_FS_OPS;
3181 	return add_swap_extent(sis, 0, sis->max, 0);
3182 }
3183 
cifs_swap_deactivate(struct file * file)3184 static void cifs_swap_deactivate(struct file *file)
3185 {
3186 	struct cifsFileInfo *cfile = file->private_data;
3187 
3188 	cifs_dbg(FYI, "swap deactivate\n");
3189 
3190 	/* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3191 
3192 	if (cfile)
3193 		cfile->swapfile = false;
3194 
3195 	/* do we need to unpin (or unlock) the file */
3196 }
3197 
3198 /**
3199  * cifs_swap_rw - SMB3 address space operation for swap I/O
3200  * @iocb: target I/O control block
3201  * @iter: I/O buffer
3202  *
3203  * Perform IO to the swap-file.  This is much like direct IO.
3204  */
cifs_swap_rw(struct kiocb * iocb,struct iov_iter * iter)3205 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3206 {
3207 	ssize_t ret;
3208 
3209 	if (iov_iter_rw(iter) == READ)
3210 		ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3211 	else
3212 		ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3213 	if (ret < 0)
3214 		return ret;
3215 	return 0;
3216 }
3217 
3218 const struct address_space_operations cifs_addr_ops = {
3219 	.read_folio	= netfs_read_folio,
3220 	.readahead	= netfs_readahead,
3221 	.writepages	= netfs_writepages,
3222 	.dirty_folio	= netfs_dirty_folio,
3223 	.release_folio	= netfs_release_folio,
3224 	.direct_IO	= noop_direct_IO,
3225 	.invalidate_folio = netfs_invalidate_folio,
3226 	.migrate_folio	= filemap_migrate_folio,
3227 	/*
3228 	 * TODO: investigate and if useful we could add an is_dirty_writeback
3229 	 * helper if needed
3230 	 */
3231 	.swap_activate	= cifs_swap_activate,
3232 	.swap_deactivate = cifs_swap_deactivate,
3233 	.swap_rw = cifs_swap_rw,
3234 };
3235 
3236 /*
3237  * cifs_readahead requires the server to support a buffer large enough to
3238  * contain the header plus one complete page of data.  Otherwise, we need
3239  * to leave cifs_readahead out of the address space operations.
3240  */
3241 const struct address_space_operations cifs_addr_ops_smallbuf = {
3242 	.read_folio	= netfs_read_folio,
3243 	.writepages	= netfs_writepages,
3244 	.dirty_folio	= netfs_dirty_folio,
3245 	.release_folio	= netfs_release_folio,
3246 	.invalidate_folio = netfs_invalidate_folio,
3247 	.migrate_folio	= filemap_migrate_folio,
3248 };
3249