xref: /linux/fs/smb/client/file.c (revision d53b8e36925256097a08d7cb749198d85cbf9b2b)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  */
11 #include <linux/fs.h>
12 #include <linux/filelock.h>
13 #include <linux/backing-dev.h>
14 #include <linux/stat.h>
15 #include <linux/fcntl.h>
16 #include <linux/pagemap.h>
17 #include <linux/pagevec.h>
18 #include <linux/writeback.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/delay.h>
21 #include <linux/mount.h>
22 #include <linux/slab.h>
23 #include <linux/swap.h>
24 #include <linux/mm.h>
25 #include <asm/div64.h>
26 #include "cifsfs.h"
27 #include "cifspdu.h"
28 #include "cifsglob.h"
29 #include "cifsproto.h"
30 #include "smb2proto.h"
31 #include "cifs_unicode.h"
32 #include "cifs_debug.h"
33 #include "cifs_fs_sb.h"
34 #include "fscache.h"
35 #include "smbdirect.h"
36 #include "fs_context.h"
37 #include "cifs_ioctl.h"
38 #include "cached_dir.h"
39 #include <trace/events/netfs.h>
40 
41 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
42 
43 /*
44  * Prepare a subrequest to upload to the server.  We need to allocate credits
45  * so that we know the maximum amount of data that we can include in it.
46  */
47 static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
48 {
49 	struct cifs_io_subrequest *wdata =
50 		container_of(subreq, struct cifs_io_subrequest, subreq);
51 	struct cifs_io_request *req = wdata->req;
52 	struct TCP_Server_Info *server;
53 	struct cifsFileInfo *open_file = req->cfile;
54 	size_t wsize = req->rreq.wsize;
55 	int rc;
56 
57 	if (!wdata->have_xid) {
58 		wdata->xid = get_xid();
59 		wdata->have_xid = true;
60 	}
61 
62 	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
63 	wdata->server = server;
64 
65 retry:
66 	if (open_file->invalidHandle) {
67 		rc = cifs_reopen_file(open_file, false);
68 		if (rc < 0) {
69 			if (rc == -EAGAIN)
70 				goto retry;
71 			subreq->error = rc;
72 			return netfs_prepare_write_failed(subreq);
73 		}
74 	}
75 
76 	rc = server->ops->wait_mtu_credits(server, wsize, &wdata->subreq.max_len,
77 					   &wdata->credits);
78 	if (rc < 0) {
79 		subreq->error = rc;
80 		return netfs_prepare_write_failed(subreq);
81 	}
82 
83 	wdata->credits.rreq_debug_id = subreq->rreq->debug_id;
84 	wdata->credits.rreq_debug_index = subreq->debug_index;
85 	wdata->credits.in_flight_check = 1;
86 	trace_smb3_rw_credits(wdata->rreq->debug_id,
87 			      wdata->subreq.debug_index,
88 			      wdata->credits.value,
89 			      server->credits, server->in_flight,
90 			      wdata->credits.value,
91 			      cifs_trace_rw_credits_write_prepare);
92 
93 #ifdef CONFIG_CIFS_SMB_DIRECT
94 	if (server->smbd_conn)
95 		subreq->max_nr_segs = server->smbd_conn->max_frmr_depth;
96 #endif
97 }
98 
99 /*
100  * Issue a subrequest to upload to the server.
101  */
102 static void cifs_issue_write(struct netfs_io_subrequest *subreq)
103 {
104 	struct cifs_io_subrequest *wdata =
105 		container_of(subreq, struct cifs_io_subrequest, subreq);
106 	struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
107 	int rc;
108 
109 	if (cifs_forced_shutdown(sbi)) {
110 		rc = -EIO;
111 		goto fail;
112 	}
113 
114 	rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
115 	if (rc)
116 		goto fail;
117 
118 	rc = -EAGAIN;
119 	if (wdata->req->cfile->invalidHandle)
120 		goto fail;
121 
122 	wdata->server->ops->async_writev(wdata);
123 out:
124 	return;
125 
126 fail:
127 	if (rc == -EAGAIN)
128 		trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
129 	else
130 		trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
131 	add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
132 	cifs_write_subrequest_terminated(wdata, rc, false);
133 	goto out;
134 }
135 
136 static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq)
137 {
138 	cifs_invalidate_cache(wreq->inode, 0);
139 }
140 
141 /*
142  * Split the read up according to how many credits we can get for each piece.
143  * It's okay to sleep here if we need to wait for more credit to become
144  * available.
145  *
146  * We also choose the server and allocate an operation ID to be cleaned up
147  * later.
148  */
149 static bool cifs_clamp_length(struct netfs_io_subrequest *subreq)
150 {
151 	struct netfs_io_request *rreq = subreq->rreq;
152 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
153 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
154 	struct TCP_Server_Info *server = req->server;
155 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
156 	size_t rsize = 0;
157 	int rc;
158 
159 	rdata->xid = get_xid();
160 	rdata->have_xid = true;
161 	rdata->server = server;
162 
163 	if (cifs_sb->ctx->rsize == 0)
164 		cifs_sb->ctx->rsize =
165 			server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink),
166 						     cifs_sb->ctx);
167 
168 
169 	rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, &rsize,
170 					   &rdata->credits);
171 	if (rc) {
172 		subreq->error = rc;
173 		return false;
174 	}
175 
176 	rdata->credits.in_flight_check = 1;
177 	rdata->credits.rreq_debug_id = rreq->debug_id;
178 	rdata->credits.rreq_debug_index = subreq->debug_index;
179 
180 	trace_smb3_rw_credits(rdata->rreq->debug_id,
181 			      rdata->subreq.debug_index,
182 			      rdata->credits.value,
183 			      server->credits, server->in_flight, 0,
184 			      cifs_trace_rw_credits_read_submit);
185 
186 	subreq->len = min_t(size_t, subreq->len, rsize);
187 
188 #ifdef CONFIG_CIFS_SMB_DIRECT
189 	if (server->smbd_conn)
190 		subreq->max_nr_segs = server->smbd_conn->max_frmr_depth;
191 #endif
192 	return true;
193 }
194 
195 /*
196  * Issue a read operation on behalf of the netfs helper functions.  We're asked
197  * to make a read of a certain size at a point in the file.  We are permitted
198  * to only read a portion of that, but as long as we read something, the netfs
199  * helper will call us again so that we can issue another read.
200  */
201 static void cifs_req_issue_read(struct netfs_io_subrequest *subreq)
202 {
203 	struct netfs_io_request *rreq = subreq->rreq;
204 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
205 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
206 	int rc = 0;
207 
208 	cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
209 		 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
210 		 subreq->transferred, subreq->len);
211 
212 	if (req->cfile->invalidHandle) {
213 		do {
214 			rc = cifs_reopen_file(req->cfile, true);
215 		} while (rc == -EAGAIN);
216 		if (rc)
217 			goto out;
218 	}
219 
220 	if (subreq->rreq->origin != NETFS_DIO_READ)
221 		__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
222 
223 	rc = rdata->server->ops->async_readv(rdata);
224 out:
225 	if (rc)
226 		netfs_subreq_terminated(subreq, rc, false);
227 }
228 
229 /*
230  * Writeback calls this when it finds a folio that needs uploading.  This isn't
231  * called if writeback only has copy-to-cache to deal with.
232  */
233 static void cifs_begin_writeback(struct netfs_io_request *wreq)
234 {
235 	struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
236 	int ret;
237 
238 	ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
239 	if (ret) {
240 		cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
241 		return;
242 	}
243 
244 	wreq->io_streams[0].avail = true;
245 }
246 
247 /*
248  * Initialise a request.
249  */
250 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
251 {
252 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
253 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
254 	struct cifsFileInfo *open_file = NULL;
255 
256 	rreq->rsize = cifs_sb->ctx->rsize;
257 	rreq->wsize = cifs_sb->ctx->wsize;
258 	req->pid = current->tgid; // Ummm...  This may be a workqueue
259 
260 	if (file) {
261 		open_file = file->private_data;
262 		rreq->netfs_priv = file->private_data;
263 		req->cfile = cifsFileInfo_get(open_file);
264 		req->server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
265 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
266 			req->pid = req->cfile->pid;
267 	} else if (rreq->origin != NETFS_WRITEBACK) {
268 		WARN_ON_ONCE(1);
269 		return -EIO;
270 	}
271 
272 	return 0;
273 }
274 
275 /*
276  * Completion of a request operation.
277  */
278 static void cifs_rreq_done(struct netfs_io_request *rreq)
279 {
280 	struct timespec64 atime, mtime;
281 	struct inode *inode = rreq->inode;
282 
283 	/* we do not want atime to be less than mtime, it broke some apps */
284 	atime = inode_set_atime_to_ts(inode, current_time(inode));
285 	mtime = inode_get_mtime(inode);
286 	if (timespec64_compare(&atime, &mtime))
287 		inode_set_atime_to_ts(inode, inode_get_mtime(inode));
288 }
289 
290 static void cifs_post_modify(struct inode *inode)
291 {
292 	/* Indication to update ctime and mtime as close is deferred */
293 	set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
294 }
295 
296 static void cifs_free_request(struct netfs_io_request *rreq)
297 {
298 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
299 
300 	if (req->cfile)
301 		cifsFileInfo_put(req->cfile);
302 }
303 
304 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
305 {
306 	struct cifs_io_subrequest *rdata =
307 		container_of(subreq, struct cifs_io_subrequest, subreq);
308 	int rc = subreq->error;
309 
310 	if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
311 #ifdef CONFIG_CIFS_SMB_DIRECT
312 		if (rdata->mr) {
313 			smbd_deregister_mr(rdata->mr);
314 			rdata->mr = NULL;
315 		}
316 #endif
317 	}
318 
319 	if (rdata->credits.value != 0) {
320 		trace_smb3_rw_credits(rdata->rreq->debug_id,
321 				      rdata->subreq.debug_index,
322 				      rdata->credits.value,
323 				      rdata->server ? rdata->server->credits : 0,
324 				      rdata->server ? rdata->server->in_flight : 0,
325 				      -rdata->credits.value,
326 				      cifs_trace_rw_credits_free_subreq);
327 		if (rdata->server)
328 			add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
329 		else
330 			rdata->credits.value = 0;
331 	}
332 
333 	if (rdata->have_xid)
334 		free_xid(rdata->xid);
335 }
336 
337 const struct netfs_request_ops cifs_req_ops = {
338 	.request_pool		= &cifs_io_request_pool,
339 	.subrequest_pool	= &cifs_io_subrequest_pool,
340 	.init_request		= cifs_init_request,
341 	.free_request		= cifs_free_request,
342 	.free_subrequest	= cifs_free_subrequest,
343 	.clamp_length		= cifs_clamp_length,
344 	.issue_read		= cifs_req_issue_read,
345 	.done			= cifs_rreq_done,
346 	.post_modify		= cifs_post_modify,
347 	.begin_writeback	= cifs_begin_writeback,
348 	.prepare_write		= cifs_prepare_write,
349 	.issue_write		= cifs_issue_write,
350 	.invalidate_cache	= cifs_netfs_invalidate_cache,
351 };
352 
353 /*
354  * Mark as invalid, all open files on tree connections since they
355  * were closed when session to server was lost.
356  */
357 void
358 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
359 {
360 	struct cifsFileInfo *open_file = NULL;
361 	struct list_head *tmp;
362 	struct list_head *tmp1;
363 
364 	/* only send once per connect */
365 	spin_lock(&tcon->tc_lock);
366 	if (tcon->need_reconnect)
367 		tcon->status = TID_NEED_RECON;
368 
369 	if (tcon->status != TID_NEED_RECON) {
370 		spin_unlock(&tcon->tc_lock);
371 		return;
372 	}
373 	tcon->status = TID_IN_FILES_INVALIDATE;
374 	spin_unlock(&tcon->tc_lock);
375 
376 	/* list all files open on tree connection and mark them invalid */
377 	spin_lock(&tcon->open_file_lock);
378 	list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
379 		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
380 		open_file->invalidHandle = true;
381 		open_file->oplock_break_cancelled = true;
382 	}
383 	spin_unlock(&tcon->open_file_lock);
384 
385 	invalidate_all_cached_dirs(tcon);
386 	spin_lock(&tcon->tc_lock);
387 	if (tcon->status == TID_IN_FILES_INVALIDATE)
388 		tcon->status = TID_NEED_TCON;
389 	spin_unlock(&tcon->tc_lock);
390 
391 	/*
392 	 * BB Add call to invalidate_inodes(sb) for all superblocks mounted
393 	 * to this tcon.
394 	 */
395 }
396 
397 static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
398 {
399 	if ((flags & O_ACCMODE) == O_RDONLY)
400 		return GENERIC_READ;
401 	else if ((flags & O_ACCMODE) == O_WRONLY)
402 		return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
403 	else if ((flags & O_ACCMODE) == O_RDWR) {
404 		/* GENERIC_ALL is too much permission to request
405 		   can cause unnecessary access denied on create */
406 		/* return GENERIC_ALL; */
407 		return (GENERIC_READ | GENERIC_WRITE);
408 	}
409 
410 	return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
411 		FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
412 		FILE_READ_DATA);
413 }
414 
415 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
416 static u32 cifs_posix_convert_flags(unsigned int flags)
417 {
418 	u32 posix_flags = 0;
419 
420 	if ((flags & O_ACCMODE) == O_RDONLY)
421 		posix_flags = SMB_O_RDONLY;
422 	else if ((flags & O_ACCMODE) == O_WRONLY)
423 		posix_flags = SMB_O_WRONLY;
424 	else if ((flags & O_ACCMODE) == O_RDWR)
425 		posix_flags = SMB_O_RDWR;
426 
427 	if (flags & O_CREAT) {
428 		posix_flags |= SMB_O_CREAT;
429 		if (flags & O_EXCL)
430 			posix_flags |= SMB_O_EXCL;
431 	} else if (flags & O_EXCL)
432 		cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
433 			 current->comm, current->tgid);
434 
435 	if (flags & O_TRUNC)
436 		posix_flags |= SMB_O_TRUNC;
437 	/* be safe and imply O_SYNC for O_DSYNC */
438 	if (flags & O_DSYNC)
439 		posix_flags |= SMB_O_SYNC;
440 	if (flags & O_DIRECTORY)
441 		posix_flags |= SMB_O_DIRECTORY;
442 	if (flags & O_NOFOLLOW)
443 		posix_flags |= SMB_O_NOFOLLOW;
444 	if (flags & O_DIRECT)
445 		posix_flags |= SMB_O_DIRECT;
446 
447 	return posix_flags;
448 }
449 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
450 
451 static inline int cifs_get_disposition(unsigned int flags)
452 {
453 	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
454 		return FILE_CREATE;
455 	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
456 		return FILE_OVERWRITE_IF;
457 	else if ((flags & O_CREAT) == O_CREAT)
458 		return FILE_OPEN_IF;
459 	else if ((flags & O_TRUNC) == O_TRUNC)
460 		return FILE_OVERWRITE;
461 	else
462 		return FILE_OPEN;
463 }
464 
465 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
466 int cifs_posix_open(const char *full_path, struct inode **pinode,
467 			struct super_block *sb, int mode, unsigned int f_flags,
468 			__u32 *poplock, __u16 *pnetfid, unsigned int xid)
469 {
470 	int rc;
471 	FILE_UNIX_BASIC_INFO *presp_data;
472 	__u32 posix_flags = 0;
473 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
474 	struct cifs_fattr fattr;
475 	struct tcon_link *tlink;
476 	struct cifs_tcon *tcon;
477 
478 	cifs_dbg(FYI, "posix open %s\n", full_path);
479 
480 	presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
481 	if (presp_data == NULL)
482 		return -ENOMEM;
483 
484 	tlink = cifs_sb_tlink(cifs_sb);
485 	if (IS_ERR(tlink)) {
486 		rc = PTR_ERR(tlink);
487 		goto posix_open_ret;
488 	}
489 
490 	tcon = tlink_tcon(tlink);
491 	mode &= ~current_umask();
492 
493 	posix_flags = cifs_posix_convert_flags(f_flags);
494 	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
495 			     poplock, full_path, cifs_sb->local_nls,
496 			     cifs_remap(cifs_sb));
497 	cifs_put_tlink(tlink);
498 
499 	if (rc)
500 		goto posix_open_ret;
501 
502 	if (presp_data->Type == cpu_to_le32(-1))
503 		goto posix_open_ret; /* open ok, caller does qpathinfo */
504 
505 	if (!pinode)
506 		goto posix_open_ret; /* caller does not need info */
507 
508 	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
509 
510 	/* get new inode and set it up */
511 	if (*pinode == NULL) {
512 		cifs_fill_uniqueid(sb, &fattr);
513 		*pinode = cifs_iget(sb, &fattr);
514 		if (!*pinode) {
515 			rc = -ENOMEM;
516 			goto posix_open_ret;
517 		}
518 	} else {
519 		cifs_revalidate_mapping(*pinode);
520 		rc = cifs_fattr_to_inode(*pinode, &fattr, false);
521 	}
522 
523 posix_open_ret:
524 	kfree(presp_data);
525 	return rc;
526 }
527 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
528 
529 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
530 			struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
531 			struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
532 {
533 	int rc;
534 	int desired_access;
535 	int disposition;
536 	int create_options = CREATE_NOT_DIR;
537 	struct TCP_Server_Info *server = tcon->ses->server;
538 	struct cifs_open_parms oparms;
539 	int rdwr_for_fscache = 0;
540 
541 	if (!server->ops->open)
542 		return -ENOSYS;
543 
544 	/* If we're caching, we need to be able to fill in around partial writes. */
545 	if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
546 		rdwr_for_fscache = 1;
547 
548 	desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
549 
550 /*********************************************************************
551  *  open flag mapping table:
552  *
553  *	POSIX Flag            CIFS Disposition
554  *	----------            ----------------
555  *	O_CREAT               FILE_OPEN_IF
556  *	O_CREAT | O_EXCL      FILE_CREATE
557  *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
558  *	O_TRUNC               FILE_OVERWRITE
559  *	none of the above     FILE_OPEN
560  *
561  *	Note that there is not a direct match between disposition
562  *	FILE_SUPERSEDE (ie create whether or not file exists although
563  *	O_CREAT | O_TRUNC is similar but truncates the existing
564  *	file rather than creating a new file as FILE_SUPERSEDE does
565  *	(which uses the attributes / metadata passed in on open call)
566  *?
567  *?  O_SYNC is a reasonable match to CIFS writethrough flag
568  *?  and the read write flags match reasonably.  O_LARGEFILE
569  *?  is irrelevant because largefile support is always used
570  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
571  *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
572  *********************************************************************/
573 
574 	disposition = cifs_get_disposition(f_flags);
575 
576 	/* BB pass O_SYNC flag through on file attributes .. BB */
577 
578 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
579 	if (f_flags & O_SYNC)
580 		create_options |= CREATE_WRITE_THROUGH;
581 
582 	if (f_flags & O_DIRECT)
583 		create_options |= CREATE_NO_BUFFER;
584 
585 retry_open:
586 	oparms = (struct cifs_open_parms) {
587 		.tcon = tcon,
588 		.cifs_sb = cifs_sb,
589 		.desired_access = desired_access,
590 		.create_options = cifs_create_options(cifs_sb, create_options),
591 		.disposition = disposition,
592 		.path = full_path,
593 		.fid = fid,
594 	};
595 
596 	rc = server->ops->open(xid, &oparms, oplock, buf);
597 	if (rc) {
598 		if (rc == -EACCES && rdwr_for_fscache == 1) {
599 			desired_access = cifs_convert_flags(f_flags, 0);
600 			rdwr_for_fscache = 2;
601 			goto retry_open;
602 		}
603 		return rc;
604 	}
605 	if (rdwr_for_fscache == 2)
606 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
607 
608 	/* TODO: Add support for calling posix query info but with passing in fid */
609 	if (tcon->unix_ext)
610 		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
611 					      xid);
612 	else
613 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
614 					 xid, fid);
615 
616 	if (rc) {
617 		server->ops->close(xid, tcon, fid);
618 		if (rc == -ESTALE)
619 			rc = -EOPENSTALE;
620 	}
621 
622 	return rc;
623 }
624 
625 static bool
626 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
627 {
628 	struct cifs_fid_locks *cur;
629 	bool has_locks = false;
630 
631 	down_read(&cinode->lock_sem);
632 	list_for_each_entry(cur, &cinode->llist, llist) {
633 		if (!list_empty(&cur->locks)) {
634 			has_locks = true;
635 			break;
636 		}
637 	}
638 	up_read(&cinode->lock_sem);
639 	return has_locks;
640 }
641 
642 void
643 cifs_down_write(struct rw_semaphore *sem)
644 {
645 	while (!down_write_trylock(sem))
646 		msleep(10);
647 }
648 
649 static void cifsFileInfo_put_work(struct work_struct *work);
650 void serverclose_work(struct work_struct *work);
651 
652 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
653 				       struct tcon_link *tlink, __u32 oplock,
654 				       const char *symlink_target)
655 {
656 	struct dentry *dentry = file_dentry(file);
657 	struct inode *inode = d_inode(dentry);
658 	struct cifsInodeInfo *cinode = CIFS_I(inode);
659 	struct cifsFileInfo *cfile;
660 	struct cifs_fid_locks *fdlocks;
661 	struct cifs_tcon *tcon = tlink_tcon(tlink);
662 	struct TCP_Server_Info *server = tcon->ses->server;
663 
664 	cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
665 	if (cfile == NULL)
666 		return cfile;
667 
668 	fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
669 	if (!fdlocks) {
670 		kfree(cfile);
671 		return NULL;
672 	}
673 
674 	if (symlink_target) {
675 		cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
676 		if (!cfile->symlink_target) {
677 			kfree(fdlocks);
678 			kfree(cfile);
679 			return NULL;
680 		}
681 	}
682 
683 	INIT_LIST_HEAD(&fdlocks->locks);
684 	fdlocks->cfile = cfile;
685 	cfile->llist = fdlocks;
686 
687 	cfile->count = 1;
688 	cfile->pid = current->tgid;
689 	cfile->uid = current_fsuid();
690 	cfile->dentry = dget(dentry);
691 	cfile->f_flags = file->f_flags;
692 	cfile->invalidHandle = false;
693 	cfile->deferred_close_scheduled = false;
694 	cfile->tlink = cifs_get_tlink(tlink);
695 	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
696 	INIT_WORK(&cfile->put, cifsFileInfo_put_work);
697 	INIT_WORK(&cfile->serverclose, serverclose_work);
698 	INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
699 	mutex_init(&cfile->fh_mutex);
700 	spin_lock_init(&cfile->file_info_lock);
701 
702 	cifs_sb_active(inode->i_sb);
703 
704 	/*
705 	 * If the server returned a read oplock and we have mandatory brlocks,
706 	 * set oplock level to None.
707 	 */
708 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
709 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
710 		oplock = 0;
711 	}
712 
713 	cifs_down_write(&cinode->lock_sem);
714 	list_add(&fdlocks->llist, &cinode->llist);
715 	up_write(&cinode->lock_sem);
716 
717 	spin_lock(&tcon->open_file_lock);
718 	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
719 		oplock = fid->pending_open->oplock;
720 	list_del(&fid->pending_open->olist);
721 
722 	fid->purge_cache = false;
723 	server->ops->set_fid(cfile, fid, oplock);
724 
725 	list_add(&cfile->tlist, &tcon->openFileList);
726 	atomic_inc(&tcon->num_local_opens);
727 
728 	/* if readable file instance put first in list*/
729 	spin_lock(&cinode->open_file_lock);
730 	if (file->f_mode & FMODE_READ)
731 		list_add(&cfile->flist, &cinode->openFileList);
732 	else
733 		list_add_tail(&cfile->flist, &cinode->openFileList);
734 	spin_unlock(&cinode->open_file_lock);
735 	spin_unlock(&tcon->open_file_lock);
736 
737 	if (fid->purge_cache)
738 		cifs_zap_mapping(inode);
739 
740 	file->private_data = cfile;
741 	return cfile;
742 }
743 
744 struct cifsFileInfo *
745 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
746 {
747 	spin_lock(&cifs_file->file_info_lock);
748 	cifsFileInfo_get_locked(cifs_file);
749 	spin_unlock(&cifs_file->file_info_lock);
750 	return cifs_file;
751 }
752 
753 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
754 {
755 	struct inode *inode = d_inode(cifs_file->dentry);
756 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
757 	struct cifsLockInfo *li, *tmp;
758 	struct super_block *sb = inode->i_sb;
759 
760 	/*
761 	 * Delete any outstanding lock records. We'll lose them when the file
762 	 * is closed anyway.
763 	 */
764 	cifs_down_write(&cifsi->lock_sem);
765 	list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
766 		list_del(&li->llist);
767 		cifs_del_lock_waiters(li);
768 		kfree(li);
769 	}
770 	list_del(&cifs_file->llist->llist);
771 	kfree(cifs_file->llist);
772 	up_write(&cifsi->lock_sem);
773 
774 	cifs_put_tlink(cifs_file->tlink);
775 	dput(cifs_file->dentry);
776 	cifs_sb_deactive(sb);
777 	kfree(cifs_file->symlink_target);
778 	kfree(cifs_file);
779 }
780 
781 static void cifsFileInfo_put_work(struct work_struct *work)
782 {
783 	struct cifsFileInfo *cifs_file = container_of(work,
784 			struct cifsFileInfo, put);
785 
786 	cifsFileInfo_put_final(cifs_file);
787 }
788 
789 void serverclose_work(struct work_struct *work)
790 {
791 	struct cifsFileInfo *cifs_file = container_of(work,
792 			struct cifsFileInfo, serverclose);
793 
794 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
795 
796 	struct TCP_Server_Info *server = tcon->ses->server;
797 	int rc = 0;
798 	int retries = 0;
799 	int MAX_RETRIES = 4;
800 
801 	do {
802 		if (server->ops->close_getattr)
803 			rc = server->ops->close_getattr(0, tcon, cifs_file);
804 		else if (server->ops->close)
805 			rc = server->ops->close(0, tcon, &cifs_file->fid);
806 
807 		if (rc == -EBUSY || rc == -EAGAIN) {
808 			retries++;
809 			msleep(250);
810 		}
811 	} while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
812 	);
813 
814 	if (retries == MAX_RETRIES)
815 		pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
816 
817 	if (cifs_file->offload)
818 		queue_work(fileinfo_put_wq, &cifs_file->put);
819 	else
820 		cifsFileInfo_put_final(cifs_file);
821 }
822 
823 /**
824  * cifsFileInfo_put - release a reference of file priv data
825  *
826  * Always potentially wait for oplock handler. See _cifsFileInfo_put().
827  *
828  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
829  */
830 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
831 {
832 	_cifsFileInfo_put(cifs_file, true, true);
833 }
834 
835 /**
836  * _cifsFileInfo_put - release a reference of file priv data
837  *
838  * This may involve closing the filehandle @cifs_file out on the
839  * server. Must be called without holding tcon->open_file_lock,
840  * cinode->open_file_lock and cifs_file->file_info_lock.
841  *
842  * If @wait_for_oplock_handler is true and we are releasing the last
843  * reference, wait for any running oplock break handler of the file
844  * and cancel any pending one.
845  *
846  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
847  * @wait_oplock_handler: must be false if called from oplock_break_handler
848  * @offload:	not offloaded on close and oplock breaks
849  *
850  */
851 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
852 		       bool wait_oplock_handler, bool offload)
853 {
854 	struct inode *inode = d_inode(cifs_file->dentry);
855 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
856 	struct TCP_Server_Info *server = tcon->ses->server;
857 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
858 	struct super_block *sb = inode->i_sb;
859 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
860 	struct cifs_fid fid = {};
861 	struct cifs_pending_open open;
862 	bool oplock_break_cancelled;
863 	bool serverclose_offloaded = false;
864 
865 	spin_lock(&tcon->open_file_lock);
866 	spin_lock(&cifsi->open_file_lock);
867 	spin_lock(&cifs_file->file_info_lock);
868 
869 	cifs_file->offload = offload;
870 	if (--cifs_file->count > 0) {
871 		spin_unlock(&cifs_file->file_info_lock);
872 		spin_unlock(&cifsi->open_file_lock);
873 		spin_unlock(&tcon->open_file_lock);
874 		return;
875 	}
876 	spin_unlock(&cifs_file->file_info_lock);
877 
878 	if (server->ops->get_lease_key)
879 		server->ops->get_lease_key(inode, &fid);
880 
881 	/* store open in pending opens to make sure we don't miss lease break */
882 	cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
883 
884 	/* remove it from the lists */
885 	list_del(&cifs_file->flist);
886 	list_del(&cifs_file->tlist);
887 	atomic_dec(&tcon->num_local_opens);
888 
889 	if (list_empty(&cifsi->openFileList)) {
890 		cifs_dbg(FYI, "closing last open instance for inode %p\n",
891 			 d_inode(cifs_file->dentry));
892 		/*
893 		 * In strict cache mode we need invalidate mapping on the last
894 		 * close  because it may cause a error when we open this file
895 		 * again and get at least level II oplock.
896 		 */
897 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
898 			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
899 		cifs_set_oplock_level(cifsi, 0);
900 	}
901 
902 	spin_unlock(&cifsi->open_file_lock);
903 	spin_unlock(&tcon->open_file_lock);
904 
905 	oplock_break_cancelled = wait_oplock_handler ?
906 		cancel_work_sync(&cifs_file->oplock_break) : false;
907 
908 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
909 		struct TCP_Server_Info *server = tcon->ses->server;
910 		unsigned int xid;
911 		int rc = 0;
912 
913 		xid = get_xid();
914 		if (server->ops->close_getattr)
915 			rc = server->ops->close_getattr(xid, tcon, cifs_file);
916 		else if (server->ops->close)
917 			rc = server->ops->close(xid, tcon, &cifs_file->fid);
918 		_free_xid(xid);
919 
920 		if (rc == -EBUSY || rc == -EAGAIN) {
921 			// Server close failed, hence offloading it as an async op
922 			queue_work(serverclose_wq, &cifs_file->serverclose);
923 			serverclose_offloaded = true;
924 		}
925 	}
926 
927 	if (oplock_break_cancelled)
928 		cifs_done_oplock_break(cifsi);
929 
930 	cifs_del_pending_open(&open);
931 
932 	// if serverclose has been offloaded to wq (on failure), it will
933 	// handle offloading put as well. If serverclose not offloaded,
934 	// we need to handle offloading put here.
935 	if (!serverclose_offloaded) {
936 		if (offload)
937 			queue_work(fileinfo_put_wq, &cifs_file->put);
938 		else
939 			cifsFileInfo_put_final(cifs_file);
940 	}
941 }
942 
943 int cifs_open(struct inode *inode, struct file *file)
944 
945 {
946 	int rc = -EACCES;
947 	unsigned int xid;
948 	__u32 oplock;
949 	struct cifs_sb_info *cifs_sb;
950 	struct TCP_Server_Info *server;
951 	struct cifs_tcon *tcon;
952 	struct tcon_link *tlink;
953 	struct cifsFileInfo *cfile = NULL;
954 	void *page;
955 	const char *full_path;
956 	bool posix_open_ok = false;
957 	struct cifs_fid fid = {};
958 	struct cifs_pending_open open;
959 	struct cifs_open_info_data data = {};
960 
961 	xid = get_xid();
962 
963 	cifs_sb = CIFS_SB(inode->i_sb);
964 	if (unlikely(cifs_forced_shutdown(cifs_sb))) {
965 		free_xid(xid);
966 		return -EIO;
967 	}
968 
969 	tlink = cifs_sb_tlink(cifs_sb);
970 	if (IS_ERR(tlink)) {
971 		free_xid(xid);
972 		return PTR_ERR(tlink);
973 	}
974 	tcon = tlink_tcon(tlink);
975 	server = tcon->ses->server;
976 
977 	page = alloc_dentry_path();
978 	full_path = build_path_from_dentry(file_dentry(file), page);
979 	if (IS_ERR(full_path)) {
980 		rc = PTR_ERR(full_path);
981 		goto out;
982 	}
983 
984 	cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
985 		 inode, file->f_flags, full_path);
986 
987 	if (file->f_flags & O_DIRECT &&
988 	    cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
989 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
990 			file->f_op = &cifs_file_direct_nobrl_ops;
991 		else
992 			file->f_op = &cifs_file_direct_ops;
993 	}
994 
995 	/* Get the cached handle as SMB2 close is deferred */
996 	rc = cifs_get_readable_path(tcon, full_path, &cfile);
997 	if (rc == 0) {
998 		if (file->f_flags == cfile->f_flags) {
999 			file->private_data = cfile;
1000 			spin_lock(&CIFS_I(inode)->deferred_lock);
1001 			cifs_del_deferred_close(cfile);
1002 			spin_unlock(&CIFS_I(inode)->deferred_lock);
1003 			goto use_cache;
1004 		} else {
1005 			_cifsFileInfo_put(cfile, true, false);
1006 		}
1007 	}
1008 
1009 	if (server->oplocks)
1010 		oplock = REQ_OPLOCK;
1011 	else
1012 		oplock = 0;
1013 
1014 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1015 	if (!tcon->broken_posix_open && tcon->unix_ext &&
1016 	    cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1017 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1018 		/* can not refresh inode info since size could be stale */
1019 		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
1020 				cifs_sb->ctx->file_mode /* ignored */,
1021 				file->f_flags, &oplock, &fid.netfid, xid);
1022 		if (rc == 0) {
1023 			cifs_dbg(FYI, "posix open succeeded\n");
1024 			posix_open_ok = true;
1025 		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1026 			if (tcon->ses->serverNOS)
1027 				cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1028 					 tcon->ses->ip_addr,
1029 					 tcon->ses->serverNOS);
1030 			tcon->broken_posix_open = true;
1031 		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
1032 			 (rc != -EOPNOTSUPP)) /* path not found or net err */
1033 			goto out;
1034 		/*
1035 		 * Else fallthrough to retry open the old way on network i/o
1036 		 * or DFS errors.
1037 		 */
1038 	}
1039 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1040 
1041 	if (server->ops->get_lease_key)
1042 		server->ops->get_lease_key(inode, &fid);
1043 
1044 	cifs_add_pending_open(&fid, tlink, &open);
1045 
1046 	if (!posix_open_ok) {
1047 		if (server->ops->get_lease_key)
1048 			server->ops->get_lease_key(inode, &fid);
1049 
1050 		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1051 				  xid, &data);
1052 		if (rc) {
1053 			cifs_del_pending_open(&open);
1054 			goto out;
1055 		}
1056 	}
1057 
1058 	cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1059 	if (cfile == NULL) {
1060 		if (server->ops->close)
1061 			server->ops->close(xid, tcon, &fid);
1062 		cifs_del_pending_open(&open);
1063 		rc = -ENOMEM;
1064 		goto out;
1065 	}
1066 
1067 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1068 	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1069 		/*
1070 		 * Time to set mode which we can not set earlier due to
1071 		 * problems creating new read-only files.
1072 		 */
1073 		struct cifs_unix_set_info_args args = {
1074 			.mode	= inode->i_mode,
1075 			.uid	= INVALID_UID, /* no change */
1076 			.gid	= INVALID_GID, /* no change */
1077 			.ctime	= NO_CHANGE_64,
1078 			.atime	= NO_CHANGE_64,
1079 			.mtime	= NO_CHANGE_64,
1080 			.device	= 0,
1081 		};
1082 		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1083 				       cfile->pid);
1084 	}
1085 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1086 
1087 use_cache:
1088 	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1089 			   file->f_mode & FMODE_WRITE);
1090 	if (!(file->f_flags & O_DIRECT))
1091 		goto out;
1092 	if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1093 		goto out;
1094 	cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1095 
1096 out:
1097 	free_dentry_path(page);
1098 	free_xid(xid);
1099 	cifs_put_tlink(tlink);
1100 	cifs_free_open_info(&data);
1101 	return rc;
1102 }
1103 
1104 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1105 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1106 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1107 
1108 /*
1109  * Try to reacquire byte range locks that were released when session
1110  * to server was lost.
1111  */
1112 static int
1113 cifs_relock_file(struct cifsFileInfo *cfile)
1114 {
1115 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1116 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1117 	int rc = 0;
1118 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1119 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1120 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1121 
1122 	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1123 	if (cinode->can_cache_brlcks) {
1124 		/* can cache locks - no need to relock */
1125 		up_read(&cinode->lock_sem);
1126 		return rc;
1127 	}
1128 
1129 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1130 	if (cap_unix(tcon->ses) &&
1131 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1132 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1133 		rc = cifs_push_posix_locks(cfile);
1134 	else
1135 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1136 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1137 
1138 	up_read(&cinode->lock_sem);
1139 	return rc;
1140 }
1141 
1142 static int
1143 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1144 {
1145 	int rc = -EACCES;
1146 	unsigned int xid;
1147 	__u32 oplock;
1148 	struct cifs_sb_info *cifs_sb;
1149 	struct cifs_tcon *tcon;
1150 	struct TCP_Server_Info *server;
1151 	struct cifsInodeInfo *cinode;
1152 	struct inode *inode;
1153 	void *page;
1154 	const char *full_path;
1155 	int desired_access;
1156 	int disposition = FILE_OPEN;
1157 	int create_options = CREATE_NOT_DIR;
1158 	struct cifs_open_parms oparms;
1159 	int rdwr_for_fscache = 0;
1160 
1161 	xid = get_xid();
1162 	mutex_lock(&cfile->fh_mutex);
1163 	if (!cfile->invalidHandle) {
1164 		mutex_unlock(&cfile->fh_mutex);
1165 		free_xid(xid);
1166 		return 0;
1167 	}
1168 
1169 	inode = d_inode(cfile->dentry);
1170 	cifs_sb = CIFS_SB(inode->i_sb);
1171 	tcon = tlink_tcon(cfile->tlink);
1172 	server = tcon->ses->server;
1173 
1174 	/*
1175 	 * Can not grab rename sem here because various ops, including those
1176 	 * that already have the rename sem can end up causing writepage to get
1177 	 * called and if the server was down that means we end up here, and we
1178 	 * can never tell if the caller already has the rename_sem.
1179 	 */
1180 	page = alloc_dentry_path();
1181 	full_path = build_path_from_dentry(cfile->dentry, page);
1182 	if (IS_ERR(full_path)) {
1183 		mutex_unlock(&cfile->fh_mutex);
1184 		free_dentry_path(page);
1185 		free_xid(xid);
1186 		return PTR_ERR(full_path);
1187 	}
1188 
1189 	cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1190 		 inode, cfile->f_flags, full_path);
1191 
1192 	if (tcon->ses->server->oplocks)
1193 		oplock = REQ_OPLOCK;
1194 	else
1195 		oplock = 0;
1196 
1197 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1198 	if (tcon->unix_ext && cap_unix(tcon->ses) &&
1199 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1200 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1201 		/*
1202 		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1203 		 * original open. Must mask them off for a reopen.
1204 		 */
1205 		unsigned int oflags = cfile->f_flags &
1206 						~(O_CREAT | O_EXCL | O_TRUNC);
1207 
1208 		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1209 				     cifs_sb->ctx->file_mode /* ignored */,
1210 				     oflags, &oplock, &cfile->fid.netfid, xid);
1211 		if (rc == 0) {
1212 			cifs_dbg(FYI, "posix reopen succeeded\n");
1213 			oparms.reconnect = true;
1214 			goto reopen_success;
1215 		}
1216 		/*
1217 		 * fallthrough to retry open the old way on errors, especially
1218 		 * in the reconnect path it is important to retry hard
1219 		 */
1220 	}
1221 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1222 
1223 	/* If we're caching, we need to be able to fill in around partial writes. */
1224 	if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1225 		rdwr_for_fscache = 1;
1226 
1227 	desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1228 
1229 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
1230 	if (cfile->f_flags & O_SYNC)
1231 		create_options |= CREATE_WRITE_THROUGH;
1232 
1233 	if (cfile->f_flags & O_DIRECT)
1234 		create_options |= CREATE_NO_BUFFER;
1235 
1236 	if (server->ops->get_lease_key)
1237 		server->ops->get_lease_key(inode, &cfile->fid);
1238 
1239 retry_open:
1240 	oparms = (struct cifs_open_parms) {
1241 		.tcon = tcon,
1242 		.cifs_sb = cifs_sb,
1243 		.desired_access = desired_access,
1244 		.create_options = cifs_create_options(cifs_sb, create_options),
1245 		.disposition = disposition,
1246 		.path = full_path,
1247 		.fid = &cfile->fid,
1248 		.reconnect = true,
1249 	};
1250 
1251 	/*
1252 	 * Can not refresh inode by passing in file_info buf to be returned by
1253 	 * ops->open and then calling get_inode_info with returned buf since
1254 	 * file might have write behind data that needs to be flushed and server
1255 	 * version of file size can be stale. If we knew for sure that inode was
1256 	 * not dirty locally we could do this.
1257 	 */
1258 	rc = server->ops->open(xid, &oparms, &oplock, NULL);
1259 	if (rc == -ENOENT && oparms.reconnect == false) {
1260 		/* durable handle timeout is expired - open the file again */
1261 		rc = server->ops->open(xid, &oparms, &oplock, NULL);
1262 		/* indicate that we need to relock the file */
1263 		oparms.reconnect = true;
1264 	}
1265 	if (rc == -EACCES && rdwr_for_fscache == 1) {
1266 		desired_access = cifs_convert_flags(cfile->f_flags, 0);
1267 		rdwr_for_fscache = 2;
1268 		goto retry_open;
1269 	}
1270 
1271 	if (rc) {
1272 		mutex_unlock(&cfile->fh_mutex);
1273 		cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1274 		cifs_dbg(FYI, "oplock: %d\n", oplock);
1275 		goto reopen_error_exit;
1276 	}
1277 
1278 	if (rdwr_for_fscache == 2)
1279 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1280 
1281 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1282 reopen_success:
1283 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1284 	cfile->invalidHandle = false;
1285 	mutex_unlock(&cfile->fh_mutex);
1286 	cinode = CIFS_I(inode);
1287 
1288 	if (can_flush) {
1289 		rc = filemap_write_and_wait(inode->i_mapping);
1290 		if (!is_interrupt_error(rc))
1291 			mapping_set_error(inode->i_mapping, rc);
1292 
1293 		if (tcon->posix_extensions) {
1294 			rc = smb311_posix_get_inode_info(&inode, full_path,
1295 							 NULL, inode->i_sb, xid);
1296 		} else if (tcon->unix_ext) {
1297 			rc = cifs_get_inode_info_unix(&inode, full_path,
1298 						      inode->i_sb, xid);
1299 		} else {
1300 			rc = cifs_get_inode_info(&inode, full_path, NULL,
1301 						 inode->i_sb, xid, NULL);
1302 		}
1303 	}
1304 	/*
1305 	 * Else we are writing out data to server already and could deadlock if
1306 	 * we tried to flush data, and since we do not know if we have data that
1307 	 * would invalidate the current end of file on the server we can not go
1308 	 * to the server to get the new inode info.
1309 	 */
1310 
1311 	/*
1312 	 * If the server returned a read oplock and we have mandatory brlocks,
1313 	 * set oplock level to None.
1314 	 */
1315 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1316 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1317 		oplock = 0;
1318 	}
1319 
1320 	server->ops->set_fid(cfile, &cfile->fid, oplock);
1321 	if (oparms.reconnect)
1322 		cifs_relock_file(cfile);
1323 
1324 reopen_error_exit:
1325 	free_dentry_path(page);
1326 	free_xid(xid);
1327 	return rc;
1328 }
1329 
1330 void smb2_deferred_work_close(struct work_struct *work)
1331 {
1332 	struct cifsFileInfo *cfile = container_of(work,
1333 			struct cifsFileInfo, deferred.work);
1334 
1335 	spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1336 	cifs_del_deferred_close(cfile);
1337 	cfile->deferred_close_scheduled = false;
1338 	spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1339 	_cifsFileInfo_put(cfile, true, false);
1340 }
1341 
1342 static bool
1343 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1344 {
1345 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1346 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1347 
1348 	return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1349 			(cinode->oplock == CIFS_CACHE_RHW_FLG ||
1350 			 cinode->oplock == CIFS_CACHE_RH_FLG) &&
1351 			!test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags));
1352 
1353 }
1354 
1355 int cifs_close(struct inode *inode, struct file *file)
1356 {
1357 	struct cifsFileInfo *cfile;
1358 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1359 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1360 	struct cifs_deferred_close *dclose;
1361 
1362 	cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1363 
1364 	if (file->private_data != NULL) {
1365 		cfile = file->private_data;
1366 		file->private_data = NULL;
1367 		dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
1368 		if ((cfile->status_file_deleted == false) &&
1369 		    (smb2_can_defer_close(inode, dclose))) {
1370 			if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
1371 				inode_set_mtime_to_ts(inode,
1372 						      inode_set_ctime_current(inode));
1373 			}
1374 			spin_lock(&cinode->deferred_lock);
1375 			cifs_add_deferred_close(cfile, dclose);
1376 			if (cfile->deferred_close_scheduled &&
1377 			    delayed_work_pending(&cfile->deferred)) {
1378 				/*
1379 				 * If there is no pending work, mod_delayed_work queues new work.
1380 				 * So, Increase the ref count to avoid use-after-free.
1381 				 */
1382 				if (!mod_delayed_work(deferredclose_wq,
1383 						&cfile->deferred, cifs_sb->ctx->closetimeo))
1384 					cifsFileInfo_get(cfile);
1385 			} else {
1386 				/* Deferred close for files */
1387 				queue_delayed_work(deferredclose_wq,
1388 						&cfile->deferred, cifs_sb->ctx->closetimeo);
1389 				cfile->deferred_close_scheduled = true;
1390 				spin_unlock(&cinode->deferred_lock);
1391 				return 0;
1392 			}
1393 			spin_unlock(&cinode->deferred_lock);
1394 			_cifsFileInfo_put(cfile, true, false);
1395 		} else {
1396 			_cifsFileInfo_put(cfile, true, false);
1397 			kfree(dclose);
1398 		}
1399 	}
1400 
1401 	/* return code from the ->release op is always ignored */
1402 	return 0;
1403 }
1404 
1405 void
1406 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1407 {
1408 	struct cifsFileInfo *open_file, *tmp;
1409 	struct list_head tmp_list;
1410 
1411 	if (!tcon->use_persistent || !tcon->need_reopen_files)
1412 		return;
1413 
1414 	tcon->need_reopen_files = false;
1415 
1416 	cifs_dbg(FYI, "Reopen persistent handles\n");
1417 	INIT_LIST_HEAD(&tmp_list);
1418 
1419 	/* list all files open on tree connection, reopen resilient handles  */
1420 	spin_lock(&tcon->open_file_lock);
1421 	list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1422 		if (!open_file->invalidHandle)
1423 			continue;
1424 		cifsFileInfo_get(open_file);
1425 		list_add_tail(&open_file->rlist, &tmp_list);
1426 	}
1427 	spin_unlock(&tcon->open_file_lock);
1428 
1429 	list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1430 		if (cifs_reopen_file(open_file, false /* do not flush */))
1431 			tcon->need_reopen_files = true;
1432 		list_del_init(&open_file->rlist);
1433 		cifsFileInfo_put(open_file);
1434 	}
1435 }
1436 
1437 int cifs_closedir(struct inode *inode, struct file *file)
1438 {
1439 	int rc = 0;
1440 	unsigned int xid;
1441 	struct cifsFileInfo *cfile = file->private_data;
1442 	struct cifs_tcon *tcon;
1443 	struct TCP_Server_Info *server;
1444 	char *buf;
1445 
1446 	cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1447 
1448 	if (cfile == NULL)
1449 		return rc;
1450 
1451 	xid = get_xid();
1452 	tcon = tlink_tcon(cfile->tlink);
1453 	server = tcon->ses->server;
1454 
1455 	cifs_dbg(FYI, "Freeing private data in close dir\n");
1456 	spin_lock(&cfile->file_info_lock);
1457 	if (server->ops->dir_needs_close(cfile)) {
1458 		cfile->invalidHandle = true;
1459 		spin_unlock(&cfile->file_info_lock);
1460 		if (server->ops->close_dir)
1461 			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1462 		else
1463 			rc = -ENOSYS;
1464 		cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1465 		/* not much we can do if it fails anyway, ignore rc */
1466 		rc = 0;
1467 	} else
1468 		spin_unlock(&cfile->file_info_lock);
1469 
1470 	buf = cfile->srch_inf.ntwrk_buf_start;
1471 	if (buf) {
1472 		cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1473 		cfile->srch_inf.ntwrk_buf_start = NULL;
1474 		if (cfile->srch_inf.smallBuf)
1475 			cifs_small_buf_release(buf);
1476 		else
1477 			cifs_buf_release(buf);
1478 	}
1479 
1480 	cifs_put_tlink(cfile->tlink);
1481 	kfree(file->private_data);
1482 	file->private_data = NULL;
1483 	/* BB can we lock the filestruct while this is going on? */
1484 	free_xid(xid);
1485 	return rc;
1486 }
1487 
1488 static struct cifsLockInfo *
1489 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1490 {
1491 	struct cifsLockInfo *lock =
1492 		kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
1493 	if (!lock)
1494 		return lock;
1495 	lock->offset = offset;
1496 	lock->length = length;
1497 	lock->type = type;
1498 	lock->pid = current->tgid;
1499 	lock->flags = flags;
1500 	INIT_LIST_HEAD(&lock->blist);
1501 	init_waitqueue_head(&lock->block_q);
1502 	return lock;
1503 }
1504 
1505 void
1506 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1507 {
1508 	struct cifsLockInfo *li, *tmp;
1509 	list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1510 		list_del_init(&li->blist);
1511 		wake_up(&li->block_q);
1512 	}
1513 }
1514 
1515 #define CIFS_LOCK_OP	0
1516 #define CIFS_READ_OP	1
1517 #define CIFS_WRITE_OP	2
1518 
1519 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1520 static bool
1521 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1522 			    __u64 length, __u8 type, __u16 flags,
1523 			    struct cifsFileInfo *cfile,
1524 			    struct cifsLockInfo **conf_lock, int rw_check)
1525 {
1526 	struct cifsLockInfo *li;
1527 	struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1528 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1529 
1530 	list_for_each_entry(li, &fdlocks->locks, llist) {
1531 		if (offset + length <= li->offset ||
1532 		    offset >= li->offset + li->length)
1533 			continue;
1534 		if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1535 		    server->ops->compare_fids(cfile, cur_cfile)) {
1536 			/* shared lock prevents write op through the same fid */
1537 			if (!(li->type & server->vals->shared_lock_type) ||
1538 			    rw_check != CIFS_WRITE_OP)
1539 				continue;
1540 		}
1541 		if ((type & server->vals->shared_lock_type) &&
1542 		    ((server->ops->compare_fids(cfile, cur_cfile) &&
1543 		     current->tgid == li->pid) || type == li->type))
1544 			continue;
1545 		if (rw_check == CIFS_LOCK_OP &&
1546 		    (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1547 		    server->ops->compare_fids(cfile, cur_cfile))
1548 			continue;
1549 		if (conf_lock)
1550 			*conf_lock = li;
1551 		return true;
1552 	}
1553 	return false;
1554 }
1555 
1556 bool
1557 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1558 			__u8 type, __u16 flags,
1559 			struct cifsLockInfo **conf_lock, int rw_check)
1560 {
1561 	bool rc = false;
1562 	struct cifs_fid_locks *cur;
1563 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1564 
1565 	list_for_each_entry(cur, &cinode->llist, llist) {
1566 		rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1567 						 flags, cfile, conf_lock,
1568 						 rw_check);
1569 		if (rc)
1570 			break;
1571 	}
1572 
1573 	return rc;
1574 }
1575 
1576 /*
1577  * Check if there is another lock that prevents us to set the lock (mandatory
1578  * style). If such a lock exists, update the flock structure with its
1579  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1580  * or leave it the same if we can't. Returns 0 if we don't need to request to
1581  * the server or 1 otherwise.
1582  */
1583 static int
1584 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1585 	       __u8 type, struct file_lock *flock)
1586 {
1587 	int rc = 0;
1588 	struct cifsLockInfo *conf_lock;
1589 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1590 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1591 	bool exist;
1592 
1593 	down_read(&cinode->lock_sem);
1594 
1595 	exist = cifs_find_lock_conflict(cfile, offset, length, type,
1596 					flock->c.flc_flags, &conf_lock,
1597 					CIFS_LOCK_OP);
1598 	if (exist) {
1599 		flock->fl_start = conf_lock->offset;
1600 		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1601 		flock->c.flc_pid = conf_lock->pid;
1602 		if (conf_lock->type & server->vals->shared_lock_type)
1603 			flock->c.flc_type = F_RDLCK;
1604 		else
1605 			flock->c.flc_type = F_WRLCK;
1606 	} else if (!cinode->can_cache_brlcks)
1607 		rc = 1;
1608 	else
1609 		flock->c.flc_type = F_UNLCK;
1610 
1611 	up_read(&cinode->lock_sem);
1612 	return rc;
1613 }
1614 
1615 static void
1616 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1617 {
1618 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1619 	cifs_down_write(&cinode->lock_sem);
1620 	list_add_tail(&lock->llist, &cfile->llist->locks);
1621 	up_write(&cinode->lock_sem);
1622 }
1623 
1624 /*
1625  * Set the byte-range lock (mandatory style). Returns:
1626  * 1) 0, if we set the lock and don't need to request to the server;
1627  * 2) 1, if no locks prevent us but we need to request to the server;
1628  * 3) -EACCES, if there is a lock that prevents us and wait is false.
1629  */
1630 static int
1631 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1632 		 bool wait)
1633 {
1634 	struct cifsLockInfo *conf_lock;
1635 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1636 	bool exist;
1637 	int rc = 0;
1638 
1639 try_again:
1640 	exist = false;
1641 	cifs_down_write(&cinode->lock_sem);
1642 
1643 	exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1644 					lock->type, lock->flags, &conf_lock,
1645 					CIFS_LOCK_OP);
1646 	if (!exist && cinode->can_cache_brlcks) {
1647 		list_add_tail(&lock->llist, &cfile->llist->locks);
1648 		up_write(&cinode->lock_sem);
1649 		return rc;
1650 	}
1651 
1652 	if (!exist)
1653 		rc = 1;
1654 	else if (!wait)
1655 		rc = -EACCES;
1656 	else {
1657 		list_add_tail(&lock->blist, &conf_lock->blist);
1658 		up_write(&cinode->lock_sem);
1659 		rc = wait_event_interruptible(lock->block_q,
1660 					(lock->blist.prev == &lock->blist) &&
1661 					(lock->blist.next == &lock->blist));
1662 		if (!rc)
1663 			goto try_again;
1664 		cifs_down_write(&cinode->lock_sem);
1665 		list_del_init(&lock->blist);
1666 	}
1667 
1668 	up_write(&cinode->lock_sem);
1669 	return rc;
1670 }
1671 
1672 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1673 /*
1674  * Check if there is another lock that prevents us to set the lock (posix
1675  * style). If such a lock exists, update the flock structure with its
1676  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1677  * or leave it the same if we can't. Returns 0 if we don't need to request to
1678  * the server or 1 otherwise.
1679  */
1680 static int
1681 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1682 {
1683 	int rc = 0;
1684 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1685 	unsigned char saved_type = flock->c.flc_type;
1686 
1687 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1688 		return 1;
1689 
1690 	down_read(&cinode->lock_sem);
1691 	posix_test_lock(file, flock);
1692 
1693 	if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1694 		flock->c.flc_type = saved_type;
1695 		rc = 1;
1696 	}
1697 
1698 	up_read(&cinode->lock_sem);
1699 	return rc;
1700 }
1701 
1702 /*
1703  * Set the byte-range lock (posix style). Returns:
1704  * 1) <0, if the error occurs while setting the lock;
1705  * 2) 0, if we set the lock and don't need to request to the server;
1706  * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1707  * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1708  */
1709 static int
1710 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1711 {
1712 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1713 	int rc = FILE_LOCK_DEFERRED + 1;
1714 
1715 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1716 		return rc;
1717 
1718 	cifs_down_write(&cinode->lock_sem);
1719 	if (!cinode->can_cache_brlcks) {
1720 		up_write(&cinode->lock_sem);
1721 		return rc;
1722 	}
1723 
1724 	rc = posix_lock_file(file, flock, NULL);
1725 	up_write(&cinode->lock_sem);
1726 	return rc;
1727 }
1728 
1729 int
1730 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1731 {
1732 	unsigned int xid;
1733 	int rc = 0, stored_rc;
1734 	struct cifsLockInfo *li, *tmp;
1735 	struct cifs_tcon *tcon;
1736 	unsigned int num, max_num, max_buf;
1737 	LOCKING_ANDX_RANGE *buf, *cur;
1738 	static const int types[] = {
1739 		LOCKING_ANDX_LARGE_FILES,
1740 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1741 	};
1742 	int i;
1743 
1744 	xid = get_xid();
1745 	tcon = tlink_tcon(cfile->tlink);
1746 
1747 	/*
1748 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1749 	 * and check it before using.
1750 	 */
1751 	max_buf = tcon->ses->server->maxBuf;
1752 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1753 		free_xid(xid);
1754 		return -EINVAL;
1755 	}
1756 
1757 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1758 		     PAGE_SIZE);
1759 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1760 			PAGE_SIZE);
1761 	max_num = (max_buf - sizeof(struct smb_hdr)) /
1762 						sizeof(LOCKING_ANDX_RANGE);
1763 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1764 	if (!buf) {
1765 		free_xid(xid);
1766 		return -ENOMEM;
1767 	}
1768 
1769 	for (i = 0; i < 2; i++) {
1770 		cur = buf;
1771 		num = 0;
1772 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1773 			if (li->type != types[i])
1774 				continue;
1775 			cur->Pid = cpu_to_le16(li->pid);
1776 			cur->LengthLow = cpu_to_le32((u32)li->length);
1777 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1778 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
1779 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1780 			if (++num == max_num) {
1781 				stored_rc = cifs_lockv(xid, tcon,
1782 						       cfile->fid.netfid,
1783 						       (__u8)li->type, 0, num,
1784 						       buf);
1785 				if (stored_rc)
1786 					rc = stored_rc;
1787 				cur = buf;
1788 				num = 0;
1789 			} else
1790 				cur++;
1791 		}
1792 
1793 		if (num) {
1794 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1795 					       (__u8)types[i], 0, num, buf);
1796 			if (stored_rc)
1797 				rc = stored_rc;
1798 		}
1799 	}
1800 
1801 	kfree(buf);
1802 	free_xid(xid);
1803 	return rc;
1804 }
1805 
1806 static __u32
1807 hash_lockowner(fl_owner_t owner)
1808 {
1809 	return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1810 }
1811 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1812 
1813 struct lock_to_push {
1814 	struct list_head llist;
1815 	__u64 offset;
1816 	__u64 length;
1817 	__u32 pid;
1818 	__u16 netfid;
1819 	__u8 type;
1820 };
1821 
1822 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1823 static int
1824 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1825 {
1826 	struct inode *inode = d_inode(cfile->dentry);
1827 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1828 	struct file_lock *flock;
1829 	struct file_lock_context *flctx = locks_inode_context(inode);
1830 	unsigned int count = 0, i;
1831 	int rc = 0, xid, type;
1832 	struct list_head locks_to_send, *el;
1833 	struct lock_to_push *lck, *tmp;
1834 	__u64 length;
1835 
1836 	xid = get_xid();
1837 
1838 	if (!flctx)
1839 		goto out;
1840 
1841 	spin_lock(&flctx->flc_lock);
1842 	list_for_each(el, &flctx->flc_posix) {
1843 		count++;
1844 	}
1845 	spin_unlock(&flctx->flc_lock);
1846 
1847 	INIT_LIST_HEAD(&locks_to_send);
1848 
1849 	/*
1850 	 * Allocating count locks is enough because no FL_POSIX locks can be
1851 	 * added to the list while we are holding cinode->lock_sem that
1852 	 * protects locking operations of this inode.
1853 	 */
1854 	for (i = 0; i < count; i++) {
1855 		lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1856 		if (!lck) {
1857 			rc = -ENOMEM;
1858 			goto err_out;
1859 		}
1860 		list_add_tail(&lck->llist, &locks_to_send);
1861 	}
1862 
1863 	el = locks_to_send.next;
1864 	spin_lock(&flctx->flc_lock);
1865 	for_each_file_lock(flock, &flctx->flc_posix) {
1866 		unsigned char ftype = flock->c.flc_type;
1867 
1868 		if (el == &locks_to_send) {
1869 			/*
1870 			 * The list ended. We don't have enough allocated
1871 			 * structures - something is really wrong.
1872 			 */
1873 			cifs_dbg(VFS, "Can't push all brlocks!\n");
1874 			break;
1875 		}
1876 		length = cifs_flock_len(flock);
1877 		if (ftype == F_RDLCK || ftype == F_SHLCK)
1878 			type = CIFS_RDLCK;
1879 		else
1880 			type = CIFS_WRLCK;
1881 		lck = list_entry(el, struct lock_to_push, llist);
1882 		lck->pid = hash_lockowner(flock->c.flc_owner);
1883 		lck->netfid = cfile->fid.netfid;
1884 		lck->length = length;
1885 		lck->type = type;
1886 		lck->offset = flock->fl_start;
1887 	}
1888 	spin_unlock(&flctx->flc_lock);
1889 
1890 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1891 		int stored_rc;
1892 
1893 		stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1894 					     lck->offset, lck->length, NULL,
1895 					     lck->type, 0);
1896 		if (stored_rc)
1897 			rc = stored_rc;
1898 		list_del(&lck->llist);
1899 		kfree(lck);
1900 	}
1901 
1902 out:
1903 	free_xid(xid);
1904 	return rc;
1905 err_out:
1906 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1907 		list_del(&lck->llist);
1908 		kfree(lck);
1909 	}
1910 	goto out;
1911 }
1912 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1913 
1914 static int
1915 cifs_push_locks(struct cifsFileInfo *cfile)
1916 {
1917 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1918 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1919 	int rc = 0;
1920 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1921 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1922 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1923 
1924 	/* we are going to update can_cache_brlcks here - need a write access */
1925 	cifs_down_write(&cinode->lock_sem);
1926 	if (!cinode->can_cache_brlcks) {
1927 		up_write(&cinode->lock_sem);
1928 		return rc;
1929 	}
1930 
1931 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1932 	if (cap_unix(tcon->ses) &&
1933 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1934 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1935 		rc = cifs_push_posix_locks(cfile);
1936 	else
1937 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1938 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1939 
1940 	cinode->can_cache_brlcks = false;
1941 	up_write(&cinode->lock_sem);
1942 	return rc;
1943 }
1944 
1945 static void
1946 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1947 		bool *wait_flag, struct TCP_Server_Info *server)
1948 {
1949 	if (flock->c.flc_flags & FL_POSIX)
1950 		cifs_dbg(FYI, "Posix\n");
1951 	if (flock->c.flc_flags & FL_FLOCK)
1952 		cifs_dbg(FYI, "Flock\n");
1953 	if (flock->c.flc_flags & FL_SLEEP) {
1954 		cifs_dbg(FYI, "Blocking lock\n");
1955 		*wait_flag = true;
1956 	}
1957 	if (flock->c.flc_flags & FL_ACCESS)
1958 		cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1959 	if (flock->c.flc_flags & FL_LEASE)
1960 		cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1961 	if (flock->c.flc_flags &
1962 	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1963 	       FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
1964 		cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
1965 		         flock->c.flc_flags);
1966 
1967 	*type = server->vals->large_lock_type;
1968 	if (lock_is_write(flock)) {
1969 		cifs_dbg(FYI, "F_WRLCK\n");
1970 		*type |= server->vals->exclusive_lock_type;
1971 		*lock = 1;
1972 	} else if (lock_is_unlock(flock)) {
1973 		cifs_dbg(FYI, "F_UNLCK\n");
1974 		*type |= server->vals->unlock_lock_type;
1975 		*unlock = 1;
1976 		/* Check if unlock includes more than one lock range */
1977 	} else if (lock_is_read(flock)) {
1978 		cifs_dbg(FYI, "F_RDLCK\n");
1979 		*type |= server->vals->shared_lock_type;
1980 		*lock = 1;
1981 	} else if (flock->c.flc_type == F_EXLCK) {
1982 		cifs_dbg(FYI, "F_EXLCK\n");
1983 		*type |= server->vals->exclusive_lock_type;
1984 		*lock = 1;
1985 	} else if (flock->c.flc_type == F_SHLCK) {
1986 		cifs_dbg(FYI, "F_SHLCK\n");
1987 		*type |= server->vals->shared_lock_type;
1988 		*lock = 1;
1989 	} else
1990 		cifs_dbg(FYI, "Unknown type of lock\n");
1991 }
1992 
1993 static int
1994 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1995 	   bool wait_flag, bool posix_lck, unsigned int xid)
1996 {
1997 	int rc = 0;
1998 	__u64 length = cifs_flock_len(flock);
1999 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2000 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2001 	struct TCP_Server_Info *server = tcon->ses->server;
2002 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2003 	__u16 netfid = cfile->fid.netfid;
2004 
2005 	if (posix_lck) {
2006 		int posix_lock_type;
2007 
2008 		rc = cifs_posix_lock_test(file, flock);
2009 		if (!rc)
2010 			return rc;
2011 
2012 		if (type & server->vals->shared_lock_type)
2013 			posix_lock_type = CIFS_RDLCK;
2014 		else
2015 			posix_lock_type = CIFS_WRLCK;
2016 		rc = CIFSSMBPosixLock(xid, tcon, netfid,
2017 				      hash_lockowner(flock->c.flc_owner),
2018 				      flock->fl_start, length, flock,
2019 				      posix_lock_type, wait_flag);
2020 		return rc;
2021 	}
2022 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2023 
2024 	rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
2025 	if (!rc)
2026 		return rc;
2027 
2028 	/* BB we could chain these into one lock request BB */
2029 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2030 				    1, 0, false);
2031 	if (rc == 0) {
2032 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2033 					    type, 0, 1, false);
2034 		flock->c.flc_type = F_UNLCK;
2035 		if (rc != 0)
2036 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2037 				 rc);
2038 		return 0;
2039 	}
2040 
2041 	if (type & server->vals->shared_lock_type) {
2042 		flock->c.flc_type = F_WRLCK;
2043 		return 0;
2044 	}
2045 
2046 	type &= ~server->vals->exclusive_lock_type;
2047 
2048 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2049 				    type | server->vals->shared_lock_type,
2050 				    1, 0, false);
2051 	if (rc == 0) {
2052 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2053 			type | server->vals->shared_lock_type, 0, 1, false);
2054 		flock->c.flc_type = F_RDLCK;
2055 		if (rc != 0)
2056 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2057 				 rc);
2058 	} else
2059 		flock->c.flc_type = F_WRLCK;
2060 
2061 	return 0;
2062 }
2063 
2064 void
2065 cifs_move_llist(struct list_head *source, struct list_head *dest)
2066 {
2067 	struct list_head *li, *tmp;
2068 	list_for_each_safe(li, tmp, source)
2069 		list_move(li, dest);
2070 }
2071 
2072 void
2073 cifs_free_llist(struct list_head *llist)
2074 {
2075 	struct cifsLockInfo *li, *tmp;
2076 	list_for_each_entry_safe(li, tmp, llist, llist) {
2077 		cifs_del_lock_waiters(li);
2078 		list_del(&li->llist);
2079 		kfree(li);
2080 	}
2081 }
2082 
2083 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2084 int
2085 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2086 		  unsigned int xid)
2087 {
2088 	int rc = 0, stored_rc;
2089 	static const int types[] = {
2090 		LOCKING_ANDX_LARGE_FILES,
2091 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2092 	};
2093 	unsigned int i;
2094 	unsigned int max_num, num, max_buf;
2095 	LOCKING_ANDX_RANGE *buf, *cur;
2096 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2097 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2098 	struct cifsLockInfo *li, *tmp;
2099 	__u64 length = cifs_flock_len(flock);
2100 	struct list_head tmp_llist;
2101 
2102 	INIT_LIST_HEAD(&tmp_llist);
2103 
2104 	/*
2105 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2106 	 * and check it before using.
2107 	 */
2108 	max_buf = tcon->ses->server->maxBuf;
2109 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2110 		return -EINVAL;
2111 
2112 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2113 		     PAGE_SIZE);
2114 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2115 			PAGE_SIZE);
2116 	max_num = (max_buf - sizeof(struct smb_hdr)) /
2117 						sizeof(LOCKING_ANDX_RANGE);
2118 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
2119 	if (!buf)
2120 		return -ENOMEM;
2121 
2122 	cifs_down_write(&cinode->lock_sem);
2123 	for (i = 0; i < 2; i++) {
2124 		cur = buf;
2125 		num = 0;
2126 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2127 			if (flock->fl_start > li->offset ||
2128 			    (flock->fl_start + length) <
2129 			    (li->offset + li->length))
2130 				continue;
2131 			if (current->tgid != li->pid)
2132 				continue;
2133 			if (types[i] != li->type)
2134 				continue;
2135 			if (cinode->can_cache_brlcks) {
2136 				/*
2137 				 * We can cache brlock requests - simply remove
2138 				 * a lock from the file's list.
2139 				 */
2140 				list_del(&li->llist);
2141 				cifs_del_lock_waiters(li);
2142 				kfree(li);
2143 				continue;
2144 			}
2145 			cur->Pid = cpu_to_le16(li->pid);
2146 			cur->LengthLow = cpu_to_le32((u32)li->length);
2147 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2148 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
2149 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2150 			/*
2151 			 * We need to save a lock here to let us add it again to
2152 			 * the file's list if the unlock range request fails on
2153 			 * the server.
2154 			 */
2155 			list_move(&li->llist, &tmp_llist);
2156 			if (++num == max_num) {
2157 				stored_rc = cifs_lockv(xid, tcon,
2158 						       cfile->fid.netfid,
2159 						       li->type, num, 0, buf);
2160 				if (stored_rc) {
2161 					/*
2162 					 * We failed on the unlock range
2163 					 * request - add all locks from the tmp
2164 					 * list to the head of the file's list.
2165 					 */
2166 					cifs_move_llist(&tmp_llist,
2167 							&cfile->llist->locks);
2168 					rc = stored_rc;
2169 				} else
2170 					/*
2171 					 * The unlock range request succeed -
2172 					 * free the tmp list.
2173 					 */
2174 					cifs_free_llist(&tmp_llist);
2175 				cur = buf;
2176 				num = 0;
2177 			} else
2178 				cur++;
2179 		}
2180 		if (num) {
2181 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2182 					       types[i], num, 0, buf);
2183 			if (stored_rc) {
2184 				cifs_move_llist(&tmp_llist,
2185 						&cfile->llist->locks);
2186 				rc = stored_rc;
2187 			} else
2188 				cifs_free_llist(&tmp_llist);
2189 		}
2190 	}
2191 
2192 	up_write(&cinode->lock_sem);
2193 	kfree(buf);
2194 	return rc;
2195 }
2196 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2197 
2198 static int
2199 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2200 	   bool wait_flag, bool posix_lck, int lock, int unlock,
2201 	   unsigned int xid)
2202 {
2203 	int rc = 0;
2204 	__u64 length = cifs_flock_len(flock);
2205 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2206 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2207 	struct TCP_Server_Info *server = tcon->ses->server;
2208 	struct inode *inode = d_inode(cfile->dentry);
2209 
2210 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2211 	if (posix_lck) {
2212 		int posix_lock_type;
2213 
2214 		rc = cifs_posix_lock_set(file, flock);
2215 		if (rc <= FILE_LOCK_DEFERRED)
2216 			return rc;
2217 
2218 		if (type & server->vals->shared_lock_type)
2219 			posix_lock_type = CIFS_RDLCK;
2220 		else
2221 			posix_lock_type = CIFS_WRLCK;
2222 
2223 		if (unlock == 1)
2224 			posix_lock_type = CIFS_UNLCK;
2225 
2226 		rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2227 				      hash_lockowner(flock->c.flc_owner),
2228 				      flock->fl_start, length,
2229 				      NULL, posix_lock_type, wait_flag);
2230 		goto out;
2231 	}
2232 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2233 	if (lock) {
2234 		struct cifsLockInfo *lock;
2235 
2236 		lock = cifs_lock_init(flock->fl_start, length, type,
2237 				      flock->c.flc_flags);
2238 		if (!lock)
2239 			return -ENOMEM;
2240 
2241 		rc = cifs_lock_add_if(cfile, lock, wait_flag);
2242 		if (rc < 0) {
2243 			kfree(lock);
2244 			return rc;
2245 		}
2246 		if (!rc)
2247 			goto out;
2248 
2249 		/*
2250 		 * Windows 7 server can delay breaking lease from read to None
2251 		 * if we set a byte-range lock on a file - break it explicitly
2252 		 * before sending the lock to the server to be sure the next
2253 		 * read won't conflict with non-overlapted locks due to
2254 		 * pagereading.
2255 		 */
2256 		if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2257 					CIFS_CACHE_READ(CIFS_I(inode))) {
2258 			cifs_zap_mapping(inode);
2259 			cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2260 				 inode);
2261 			CIFS_I(inode)->oplock = 0;
2262 		}
2263 
2264 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2265 					    type, 1, 0, wait_flag);
2266 		if (rc) {
2267 			kfree(lock);
2268 			return rc;
2269 		}
2270 
2271 		cifs_lock_add(cfile, lock);
2272 	} else if (unlock)
2273 		rc = server->ops->mand_unlock_range(cfile, flock, xid);
2274 
2275 out:
2276 	if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2277 		/*
2278 		 * If this is a request to remove all locks because we
2279 		 * are closing the file, it doesn't matter if the
2280 		 * unlocking failed as both cifs.ko and the SMB server
2281 		 * remove the lock on file close
2282 		 */
2283 		if (rc) {
2284 			cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2285 			if (!(flock->c.flc_flags & FL_CLOSE))
2286 				return rc;
2287 		}
2288 		rc = locks_lock_file_wait(file, flock);
2289 	}
2290 	return rc;
2291 }
2292 
2293 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2294 {
2295 	int rc, xid;
2296 	int lock = 0, unlock = 0;
2297 	bool wait_flag = false;
2298 	bool posix_lck = false;
2299 	struct cifs_sb_info *cifs_sb;
2300 	struct cifs_tcon *tcon;
2301 	struct cifsFileInfo *cfile;
2302 	__u32 type;
2303 
2304 	xid = get_xid();
2305 
2306 	if (!(fl->c.flc_flags & FL_FLOCK)) {
2307 		rc = -ENOLCK;
2308 		free_xid(xid);
2309 		return rc;
2310 	}
2311 
2312 	cfile = (struct cifsFileInfo *)file->private_data;
2313 	tcon = tlink_tcon(cfile->tlink);
2314 
2315 	cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2316 			tcon->ses->server);
2317 	cifs_sb = CIFS_FILE_SB(file);
2318 
2319 	if (cap_unix(tcon->ses) &&
2320 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2321 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2322 		posix_lck = true;
2323 
2324 	if (!lock && !unlock) {
2325 		/*
2326 		 * if no lock or unlock then nothing to do since we do not
2327 		 * know what it is
2328 		 */
2329 		rc = -EOPNOTSUPP;
2330 		free_xid(xid);
2331 		return rc;
2332 	}
2333 
2334 	rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2335 			xid);
2336 	free_xid(xid);
2337 	return rc;
2338 
2339 
2340 }
2341 
2342 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2343 {
2344 	int rc, xid;
2345 	int lock = 0, unlock = 0;
2346 	bool wait_flag = false;
2347 	bool posix_lck = false;
2348 	struct cifs_sb_info *cifs_sb;
2349 	struct cifs_tcon *tcon;
2350 	struct cifsFileInfo *cfile;
2351 	__u32 type;
2352 
2353 	rc = -EACCES;
2354 	xid = get_xid();
2355 
2356 	cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2357 		 flock->c.flc_flags, flock->c.flc_type,
2358 		 (long long)flock->fl_start,
2359 		 (long long)flock->fl_end);
2360 
2361 	cfile = (struct cifsFileInfo *)file->private_data;
2362 	tcon = tlink_tcon(cfile->tlink);
2363 
2364 	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2365 			tcon->ses->server);
2366 	cifs_sb = CIFS_FILE_SB(file);
2367 	set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2368 
2369 	if (cap_unix(tcon->ses) &&
2370 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2371 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2372 		posix_lck = true;
2373 	/*
2374 	 * BB add code here to normalize offset and length to account for
2375 	 * negative length which we can not accept over the wire.
2376 	 */
2377 	if (IS_GETLK(cmd)) {
2378 		rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2379 		free_xid(xid);
2380 		return rc;
2381 	}
2382 
2383 	if (!lock && !unlock) {
2384 		/*
2385 		 * if no lock or unlock then nothing to do since we do not
2386 		 * know what it is
2387 		 */
2388 		free_xid(xid);
2389 		return -EOPNOTSUPP;
2390 	}
2391 
2392 	rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2393 			xid);
2394 	free_xid(xid);
2395 	return rc;
2396 }
2397 
2398 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result,
2399 				      bool was_async)
2400 {
2401 	struct netfs_io_request *wreq = wdata->rreq;
2402 	struct netfs_inode *ictx = netfs_inode(wreq->inode);
2403 	loff_t wrend;
2404 
2405 	if (result > 0) {
2406 		wrend = wdata->subreq.start + wdata->subreq.transferred + result;
2407 
2408 		if (wrend > ictx->zero_point &&
2409 		    (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE ||
2410 		     wdata->rreq->origin == NETFS_DIO_WRITE))
2411 			ictx->zero_point = wrend;
2412 		if (wrend > ictx->remote_i_size)
2413 			netfs_resize_file(ictx, wrend, true);
2414 	}
2415 
2416 	netfs_write_subrequest_terminated(&wdata->subreq, result, was_async);
2417 }
2418 
2419 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
2420 					bool fsuid_only)
2421 {
2422 	struct cifsFileInfo *open_file = NULL;
2423 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2424 
2425 	/* only filter by fsuid on multiuser mounts */
2426 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2427 		fsuid_only = false;
2428 
2429 	spin_lock(&cifs_inode->open_file_lock);
2430 	/* we could simply get the first_list_entry since write-only entries
2431 	   are always at the end of the list but since the first entry might
2432 	   have a close pending, we go through the whole list */
2433 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2434 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2435 			continue;
2436 		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2437 			if ((!open_file->invalidHandle)) {
2438 				/* found a good file */
2439 				/* lock it so it will not be closed on us */
2440 				cifsFileInfo_get(open_file);
2441 				spin_unlock(&cifs_inode->open_file_lock);
2442 				return open_file;
2443 			} /* else might as well continue, and look for
2444 			     another, or simply have the caller reopen it
2445 			     again rather than trying to fix this handle */
2446 		} else /* write only file */
2447 			break; /* write only files are last so must be done */
2448 	}
2449 	spin_unlock(&cifs_inode->open_file_lock);
2450 	return NULL;
2451 }
2452 
2453 /* Return -EBADF if no handle is found and general rc otherwise */
2454 int
2455 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
2456 		       struct cifsFileInfo **ret_file)
2457 {
2458 	struct cifsFileInfo *open_file, *inv_file = NULL;
2459 	struct cifs_sb_info *cifs_sb;
2460 	bool any_available = false;
2461 	int rc = -EBADF;
2462 	unsigned int refind = 0;
2463 	bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2464 	bool with_delete = flags & FIND_WR_WITH_DELETE;
2465 	*ret_file = NULL;
2466 
2467 	/*
2468 	 * Having a null inode here (because mapping->host was set to zero by
2469 	 * the VFS or MM) should not happen but we had reports of on oops (due
2470 	 * to it being zero) during stress testcases so we need to check for it
2471 	 */
2472 
2473 	if (cifs_inode == NULL) {
2474 		cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2475 		dump_stack();
2476 		return rc;
2477 	}
2478 
2479 	cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2480 
2481 	/* only filter by fsuid on multiuser mounts */
2482 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2483 		fsuid_only = false;
2484 
2485 	spin_lock(&cifs_inode->open_file_lock);
2486 refind_writable:
2487 	if (refind > MAX_REOPEN_ATT) {
2488 		spin_unlock(&cifs_inode->open_file_lock);
2489 		return rc;
2490 	}
2491 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2492 		if (!any_available && open_file->pid != current->tgid)
2493 			continue;
2494 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2495 			continue;
2496 		if (with_delete && !(open_file->fid.access & DELETE))
2497 			continue;
2498 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2499 			if (!open_file->invalidHandle) {
2500 				/* found a good writable file */
2501 				cifsFileInfo_get(open_file);
2502 				spin_unlock(&cifs_inode->open_file_lock);
2503 				*ret_file = open_file;
2504 				return 0;
2505 			} else {
2506 				if (!inv_file)
2507 					inv_file = open_file;
2508 			}
2509 		}
2510 	}
2511 	/* couldn't find useable FH with same pid, try any available */
2512 	if (!any_available) {
2513 		any_available = true;
2514 		goto refind_writable;
2515 	}
2516 
2517 	if (inv_file) {
2518 		any_available = false;
2519 		cifsFileInfo_get(inv_file);
2520 	}
2521 
2522 	spin_unlock(&cifs_inode->open_file_lock);
2523 
2524 	if (inv_file) {
2525 		rc = cifs_reopen_file(inv_file, false);
2526 		if (!rc) {
2527 			*ret_file = inv_file;
2528 			return 0;
2529 		}
2530 
2531 		spin_lock(&cifs_inode->open_file_lock);
2532 		list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2533 		spin_unlock(&cifs_inode->open_file_lock);
2534 		cifsFileInfo_put(inv_file);
2535 		++refind;
2536 		inv_file = NULL;
2537 		spin_lock(&cifs_inode->open_file_lock);
2538 		goto refind_writable;
2539 	}
2540 
2541 	return rc;
2542 }
2543 
2544 struct cifsFileInfo *
2545 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2546 {
2547 	struct cifsFileInfo *cfile;
2548 	int rc;
2549 
2550 	rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2551 	if (rc)
2552 		cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2553 
2554 	return cfile;
2555 }
2556 
2557 int
2558 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2559 		       int flags,
2560 		       struct cifsFileInfo **ret_file)
2561 {
2562 	struct cifsFileInfo *cfile;
2563 	void *page = alloc_dentry_path();
2564 
2565 	*ret_file = NULL;
2566 
2567 	spin_lock(&tcon->open_file_lock);
2568 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2569 		struct cifsInodeInfo *cinode;
2570 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2571 		if (IS_ERR(full_path)) {
2572 			spin_unlock(&tcon->open_file_lock);
2573 			free_dentry_path(page);
2574 			return PTR_ERR(full_path);
2575 		}
2576 		if (strcmp(full_path, name))
2577 			continue;
2578 
2579 		cinode = CIFS_I(d_inode(cfile->dentry));
2580 		spin_unlock(&tcon->open_file_lock);
2581 		free_dentry_path(page);
2582 		return cifs_get_writable_file(cinode, flags, ret_file);
2583 	}
2584 
2585 	spin_unlock(&tcon->open_file_lock);
2586 	free_dentry_path(page);
2587 	return -ENOENT;
2588 }
2589 
2590 int
2591 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2592 		       struct cifsFileInfo **ret_file)
2593 {
2594 	struct cifsFileInfo *cfile;
2595 	void *page = alloc_dentry_path();
2596 
2597 	*ret_file = NULL;
2598 
2599 	spin_lock(&tcon->open_file_lock);
2600 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2601 		struct cifsInodeInfo *cinode;
2602 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2603 		if (IS_ERR(full_path)) {
2604 			spin_unlock(&tcon->open_file_lock);
2605 			free_dentry_path(page);
2606 			return PTR_ERR(full_path);
2607 		}
2608 		if (strcmp(full_path, name))
2609 			continue;
2610 
2611 		cinode = CIFS_I(d_inode(cfile->dentry));
2612 		spin_unlock(&tcon->open_file_lock);
2613 		free_dentry_path(page);
2614 		*ret_file = find_readable_file(cinode, 0);
2615 		return *ret_file ? 0 : -ENOENT;
2616 	}
2617 
2618 	spin_unlock(&tcon->open_file_lock);
2619 	free_dentry_path(page);
2620 	return -ENOENT;
2621 }
2622 
2623 /*
2624  * Flush data on a strict file.
2625  */
2626 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2627 		      int datasync)
2628 {
2629 	unsigned int xid;
2630 	int rc = 0;
2631 	struct cifs_tcon *tcon;
2632 	struct TCP_Server_Info *server;
2633 	struct cifsFileInfo *smbfile = file->private_data;
2634 	struct inode *inode = file_inode(file);
2635 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2636 
2637 	rc = file_write_and_wait_range(file, start, end);
2638 	if (rc) {
2639 		trace_cifs_fsync_err(inode->i_ino, rc);
2640 		return rc;
2641 	}
2642 
2643 	xid = get_xid();
2644 
2645 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2646 		 file, datasync);
2647 
2648 	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2649 		rc = cifs_zap_mapping(inode);
2650 		if (rc) {
2651 			cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2652 			rc = 0; /* don't care about it in fsync */
2653 		}
2654 	}
2655 
2656 	tcon = tlink_tcon(smbfile->tlink);
2657 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2658 		server = tcon->ses->server;
2659 		if (server->ops->flush == NULL) {
2660 			rc = -ENOSYS;
2661 			goto strict_fsync_exit;
2662 		}
2663 
2664 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2665 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2666 			if (smbfile) {
2667 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2668 				cifsFileInfo_put(smbfile);
2669 			} else
2670 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2671 		} else
2672 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2673 	}
2674 
2675 strict_fsync_exit:
2676 	free_xid(xid);
2677 	return rc;
2678 }
2679 
2680 /*
2681  * Flush data on a non-strict data.
2682  */
2683 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2684 {
2685 	unsigned int xid;
2686 	int rc = 0;
2687 	struct cifs_tcon *tcon;
2688 	struct TCP_Server_Info *server;
2689 	struct cifsFileInfo *smbfile = file->private_data;
2690 	struct inode *inode = file_inode(file);
2691 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2692 
2693 	rc = file_write_and_wait_range(file, start, end);
2694 	if (rc) {
2695 		trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2696 		return rc;
2697 	}
2698 
2699 	xid = get_xid();
2700 
2701 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2702 		 file, datasync);
2703 
2704 	tcon = tlink_tcon(smbfile->tlink);
2705 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2706 		server = tcon->ses->server;
2707 		if (server->ops->flush == NULL) {
2708 			rc = -ENOSYS;
2709 			goto fsync_exit;
2710 		}
2711 
2712 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2713 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2714 			if (smbfile) {
2715 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2716 				cifsFileInfo_put(smbfile);
2717 			} else
2718 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2719 		} else
2720 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2721 	}
2722 
2723 fsync_exit:
2724 	free_xid(xid);
2725 	return rc;
2726 }
2727 
2728 /*
2729  * As file closes, flush all cached write data for this inode checking
2730  * for write behind errors.
2731  */
2732 int cifs_flush(struct file *file, fl_owner_t id)
2733 {
2734 	struct inode *inode = file_inode(file);
2735 	int rc = 0;
2736 
2737 	if (file->f_mode & FMODE_WRITE)
2738 		rc = filemap_write_and_wait(inode->i_mapping);
2739 
2740 	cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2741 	if (rc) {
2742 		/* get more nuanced writeback errors */
2743 		rc = filemap_check_wb_err(file->f_mapping, 0);
2744 		trace_cifs_flush_err(inode->i_ino, rc);
2745 	}
2746 	return rc;
2747 }
2748 
2749 static ssize_t
2750 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2751 {
2752 	struct file *file = iocb->ki_filp;
2753 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2754 	struct inode *inode = file->f_mapping->host;
2755 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2756 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2757 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2758 	ssize_t rc;
2759 
2760 	rc = netfs_start_io_write(inode);
2761 	if (rc < 0)
2762 		return rc;
2763 
2764 	/*
2765 	 * We need to hold the sem to be sure nobody modifies lock list
2766 	 * with a brlock that prevents writing.
2767 	 */
2768 	down_read(&cinode->lock_sem);
2769 
2770 	rc = generic_write_checks(iocb, from);
2771 	if (rc <= 0)
2772 		goto out;
2773 
2774 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) &&
2775 	    (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2776 				     server->vals->exclusive_lock_type, 0,
2777 				     NULL, CIFS_WRITE_OP))) {
2778 		rc = -EACCES;
2779 		goto out;
2780 	}
2781 
2782 	rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2783 
2784 out:
2785 	up_read(&cinode->lock_sem);
2786 	netfs_end_io_write(inode);
2787 	if (rc > 0)
2788 		rc = generic_write_sync(iocb, rc);
2789 	return rc;
2790 }
2791 
2792 ssize_t
2793 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2794 {
2795 	struct inode *inode = file_inode(iocb->ki_filp);
2796 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2797 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2798 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2799 						iocb->ki_filp->private_data;
2800 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2801 	ssize_t written;
2802 
2803 	written = cifs_get_writer(cinode);
2804 	if (written)
2805 		return written;
2806 
2807 	if (CIFS_CACHE_WRITE(cinode)) {
2808 		if (cap_unix(tcon->ses) &&
2809 		    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2810 		    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2811 			written = netfs_file_write_iter(iocb, from);
2812 			goto out;
2813 		}
2814 		written = cifs_writev(iocb, from);
2815 		goto out;
2816 	}
2817 	/*
2818 	 * For non-oplocked files in strict cache mode we need to write the data
2819 	 * to the server exactly from the pos to pos+len-1 rather than flush all
2820 	 * affected pages because it may cause a error with mandatory locks on
2821 	 * these pages but not on the region from pos to ppos+len-1.
2822 	 */
2823 	written = netfs_file_write_iter(iocb, from);
2824 	if (CIFS_CACHE_READ(cinode)) {
2825 		/*
2826 		 * We have read level caching and we have just sent a write
2827 		 * request to the server thus making data in the cache stale.
2828 		 * Zap the cache and set oplock/lease level to NONE to avoid
2829 		 * reading stale data from the cache. All subsequent read
2830 		 * operations will read new data from the server.
2831 		 */
2832 		cifs_zap_mapping(inode);
2833 		cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2834 			 inode);
2835 		cinode->oplock = 0;
2836 	}
2837 out:
2838 	cifs_put_writer(cinode);
2839 	return written;
2840 }
2841 
2842 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2843 {
2844 	ssize_t rc;
2845 	struct inode *inode = file_inode(iocb->ki_filp);
2846 
2847 	if (iocb->ki_flags & IOCB_DIRECT)
2848 		return netfs_unbuffered_read_iter(iocb, iter);
2849 
2850 	rc = cifs_revalidate_mapping(inode);
2851 	if (rc)
2852 		return rc;
2853 
2854 	return netfs_file_read_iter(iocb, iter);
2855 }
2856 
2857 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2858 {
2859 	struct inode *inode = file_inode(iocb->ki_filp);
2860 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2861 	ssize_t written;
2862 	int rc;
2863 
2864 	if (iocb->ki_filp->f_flags & O_DIRECT) {
2865 		written = netfs_unbuffered_write_iter(iocb, from);
2866 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
2867 			cifs_zap_mapping(inode);
2868 			cifs_dbg(FYI,
2869 				 "Set no oplock for inode=%p after a write operation\n",
2870 				 inode);
2871 			cinode->oplock = 0;
2872 		}
2873 		return written;
2874 	}
2875 
2876 	written = cifs_get_writer(cinode);
2877 	if (written)
2878 		return written;
2879 
2880 	written = netfs_file_write_iter(iocb, from);
2881 
2882 	if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
2883 		rc = filemap_fdatawrite(inode->i_mapping);
2884 		if (rc)
2885 			cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
2886 				 rc, inode);
2887 	}
2888 
2889 	cifs_put_writer(cinode);
2890 	return written;
2891 }
2892 
2893 ssize_t
2894 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
2895 {
2896 	struct inode *inode = file_inode(iocb->ki_filp);
2897 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2898 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2899 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2900 						iocb->ki_filp->private_data;
2901 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2902 	int rc = -EACCES;
2903 
2904 	/*
2905 	 * In strict cache mode we need to read from the server all the time
2906 	 * if we don't have level II oplock because the server can delay mtime
2907 	 * change - so we can't make a decision about inode invalidating.
2908 	 * And we can also fail with pagereading if there are mandatory locks
2909 	 * on pages affected by this read but not on the region from pos to
2910 	 * pos+len-1.
2911 	 */
2912 	if (!CIFS_CACHE_READ(cinode))
2913 		return netfs_unbuffered_read_iter(iocb, to);
2914 
2915 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) {
2916 		if (iocb->ki_flags & IOCB_DIRECT)
2917 			return netfs_unbuffered_read_iter(iocb, to);
2918 		return netfs_buffered_read_iter(iocb, to);
2919 	}
2920 
2921 	/*
2922 	 * We need to hold the sem to be sure nobody modifies lock list
2923 	 * with a brlock that prevents reading.
2924 	 */
2925 	if (iocb->ki_flags & IOCB_DIRECT) {
2926 		rc = netfs_start_io_direct(inode);
2927 		if (rc < 0)
2928 			goto out;
2929 		rc = -EACCES;
2930 		down_read(&cinode->lock_sem);
2931 		if (!cifs_find_lock_conflict(
2932 			    cfile, iocb->ki_pos, iov_iter_count(to),
2933 			    tcon->ses->server->vals->shared_lock_type,
2934 			    0, NULL, CIFS_READ_OP))
2935 			rc = netfs_unbuffered_read_iter_locked(iocb, to);
2936 		up_read(&cinode->lock_sem);
2937 		netfs_end_io_direct(inode);
2938 	} else {
2939 		rc = netfs_start_io_read(inode);
2940 		if (rc < 0)
2941 			goto out;
2942 		rc = -EACCES;
2943 		down_read(&cinode->lock_sem);
2944 		if (!cifs_find_lock_conflict(
2945 			    cfile, iocb->ki_pos, iov_iter_count(to),
2946 			    tcon->ses->server->vals->shared_lock_type,
2947 			    0, NULL, CIFS_READ_OP))
2948 			rc = filemap_read(iocb, to, 0);
2949 		up_read(&cinode->lock_sem);
2950 		netfs_end_io_read(inode);
2951 	}
2952 out:
2953 	return rc;
2954 }
2955 
2956 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
2957 {
2958 	return netfs_page_mkwrite(vmf, NULL);
2959 }
2960 
2961 static const struct vm_operations_struct cifs_file_vm_ops = {
2962 	.fault = filemap_fault,
2963 	.map_pages = filemap_map_pages,
2964 	.page_mkwrite = cifs_page_mkwrite,
2965 };
2966 
2967 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2968 {
2969 	int xid, rc = 0;
2970 	struct inode *inode = file_inode(file);
2971 
2972 	xid = get_xid();
2973 
2974 	if (!CIFS_CACHE_READ(CIFS_I(inode)))
2975 		rc = cifs_zap_mapping(inode);
2976 	if (!rc)
2977 		rc = generic_file_mmap(file, vma);
2978 	if (!rc)
2979 		vma->vm_ops = &cifs_file_vm_ops;
2980 
2981 	free_xid(xid);
2982 	return rc;
2983 }
2984 
2985 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2986 {
2987 	int rc, xid;
2988 
2989 	xid = get_xid();
2990 
2991 	rc = cifs_revalidate_file(file);
2992 	if (rc)
2993 		cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
2994 			 rc);
2995 	if (!rc)
2996 		rc = generic_file_mmap(file, vma);
2997 	if (!rc)
2998 		vma->vm_ops = &cifs_file_vm_ops;
2999 
3000 	free_xid(xid);
3001 	return rc;
3002 }
3003 
3004 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3005 {
3006 	struct cifsFileInfo *open_file;
3007 
3008 	spin_lock(&cifs_inode->open_file_lock);
3009 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3010 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3011 			spin_unlock(&cifs_inode->open_file_lock);
3012 			return 1;
3013 		}
3014 	}
3015 	spin_unlock(&cifs_inode->open_file_lock);
3016 	return 0;
3017 }
3018 
3019 /* We do not want to update the file size from server for inodes
3020    open for write - to avoid races with writepage extending
3021    the file - in the future we could consider allowing
3022    refreshing the inode only on increases in the file size
3023    but this is tricky to do without racing with writebehind
3024    page caching in the current Linux kernel design */
3025 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3026 			    bool from_readdir)
3027 {
3028 	if (!cifsInode)
3029 		return true;
3030 
3031 	if (is_inode_writable(cifsInode) ||
3032 		((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
3033 		/* This inode is open for write at least once */
3034 		struct cifs_sb_info *cifs_sb;
3035 
3036 		cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
3037 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
3038 			/* since no page cache to corrupt on directio
3039 			we can change size safely */
3040 			return true;
3041 		}
3042 
3043 		if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
3044 			return true;
3045 
3046 		return false;
3047 	} else
3048 		return true;
3049 }
3050 
3051 void cifs_oplock_break(struct work_struct *work)
3052 {
3053 	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3054 						  oplock_break);
3055 	struct inode *inode = d_inode(cfile->dentry);
3056 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3057 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3058 	struct cifs_tcon *tcon;
3059 	struct TCP_Server_Info *server;
3060 	struct tcon_link *tlink;
3061 	int rc = 0;
3062 	bool purge_cache = false, oplock_break_cancelled;
3063 	__u64 persistent_fid, volatile_fid;
3064 	__u16 net_fid;
3065 
3066 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3067 			TASK_UNINTERRUPTIBLE);
3068 
3069 	tlink = cifs_sb_tlink(cifs_sb);
3070 	if (IS_ERR(tlink))
3071 		goto out;
3072 	tcon = tlink_tcon(tlink);
3073 	server = tcon->ses->server;
3074 
3075 	server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3076 				      cfile->oplock_epoch, &purge_cache);
3077 
3078 	if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
3079 						cifs_has_mand_locks(cinode)) {
3080 		cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3081 			 inode);
3082 		cinode->oplock = 0;
3083 	}
3084 
3085 	if (inode && S_ISREG(inode->i_mode)) {
3086 		if (CIFS_CACHE_READ(cinode))
3087 			break_lease(inode, O_RDONLY);
3088 		else
3089 			break_lease(inode, O_WRONLY);
3090 		rc = filemap_fdatawrite(inode->i_mapping);
3091 		if (!CIFS_CACHE_READ(cinode) || purge_cache) {
3092 			rc = filemap_fdatawait(inode->i_mapping);
3093 			mapping_set_error(inode->i_mapping, rc);
3094 			cifs_zap_mapping(inode);
3095 		}
3096 		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3097 		if (CIFS_CACHE_WRITE(cinode))
3098 			goto oplock_break_ack;
3099 	}
3100 
3101 	rc = cifs_push_locks(cfile);
3102 	if (rc)
3103 		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3104 
3105 oplock_break_ack:
3106 	/*
3107 	 * When oplock break is received and there are no active
3108 	 * file handles but cached, then schedule deferred close immediately.
3109 	 * So, new open will not use cached handle.
3110 	 */
3111 
3112 	if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
3113 		cifs_close_deferred_file(cinode);
3114 
3115 	persistent_fid = cfile->fid.persistent_fid;
3116 	volatile_fid = cfile->fid.volatile_fid;
3117 	net_fid = cfile->fid.netfid;
3118 	oplock_break_cancelled = cfile->oplock_break_cancelled;
3119 
3120 	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3121 	/*
3122 	 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3123 	 * an acknowledgment to be sent when the file has already been closed.
3124 	 */
3125 	spin_lock(&cinode->open_file_lock);
3126 	/* check list empty since can race with kill_sb calling tree disconnect */
3127 	if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3128 		spin_unlock(&cinode->open_file_lock);
3129 		rc = server->ops->oplock_response(tcon, persistent_fid,
3130 						  volatile_fid, net_fid, cinode);
3131 		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3132 	} else
3133 		spin_unlock(&cinode->open_file_lock);
3134 
3135 	cifs_put_tlink(tlink);
3136 out:
3137 	cifs_done_oplock_break(cinode);
3138 }
3139 
3140 static int cifs_swap_activate(struct swap_info_struct *sis,
3141 			      struct file *swap_file, sector_t *span)
3142 {
3143 	struct cifsFileInfo *cfile = swap_file->private_data;
3144 	struct inode *inode = swap_file->f_mapping->host;
3145 	unsigned long blocks;
3146 	long long isize;
3147 
3148 	cifs_dbg(FYI, "swap activate\n");
3149 
3150 	if (!swap_file->f_mapping->a_ops->swap_rw)
3151 		/* Cannot support swap */
3152 		return -EINVAL;
3153 
3154 	spin_lock(&inode->i_lock);
3155 	blocks = inode->i_blocks;
3156 	isize = inode->i_size;
3157 	spin_unlock(&inode->i_lock);
3158 	if (blocks*512 < isize) {
3159 		pr_warn("swap activate: swapfile has holes\n");
3160 		return -EINVAL;
3161 	}
3162 	*span = sis->pages;
3163 
3164 	pr_warn_once("Swap support over SMB3 is experimental\n");
3165 
3166 	/*
3167 	 * TODO: consider adding ACL (or documenting how) to prevent other
3168 	 * users (on this or other systems) from reading it
3169 	 */
3170 
3171 
3172 	/* TODO: add sk_set_memalloc(inet) or similar */
3173 
3174 	if (cfile)
3175 		cfile->swapfile = true;
3176 	/*
3177 	 * TODO: Since file already open, we can't open with DENY_ALL here
3178 	 * but we could add call to grab a byte range lock to prevent others
3179 	 * from reading or writing the file
3180 	 */
3181 
3182 	sis->flags |= SWP_FS_OPS;
3183 	return add_swap_extent(sis, 0, sis->max, 0);
3184 }
3185 
3186 static void cifs_swap_deactivate(struct file *file)
3187 {
3188 	struct cifsFileInfo *cfile = file->private_data;
3189 
3190 	cifs_dbg(FYI, "swap deactivate\n");
3191 
3192 	/* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3193 
3194 	if (cfile)
3195 		cfile->swapfile = false;
3196 
3197 	/* do we need to unpin (or unlock) the file */
3198 }
3199 
3200 /**
3201  * cifs_swap_rw - SMB3 address space operation for swap I/O
3202  * @iocb: target I/O control block
3203  * @iter: I/O buffer
3204  *
3205  * Perform IO to the swap-file.  This is much like direct IO.
3206  */
3207 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3208 {
3209 	ssize_t ret;
3210 
3211 	if (iov_iter_rw(iter) == READ)
3212 		ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3213 	else
3214 		ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3215 	if (ret < 0)
3216 		return ret;
3217 	return 0;
3218 }
3219 
3220 const struct address_space_operations cifs_addr_ops = {
3221 	.read_folio	= netfs_read_folio,
3222 	.readahead	= netfs_readahead,
3223 	.writepages	= netfs_writepages,
3224 	.dirty_folio	= netfs_dirty_folio,
3225 	.release_folio	= netfs_release_folio,
3226 	.direct_IO	= noop_direct_IO,
3227 	.invalidate_folio = netfs_invalidate_folio,
3228 	.migrate_folio	= filemap_migrate_folio,
3229 	/*
3230 	 * TODO: investigate and if useful we could add an is_dirty_writeback
3231 	 * helper if needed
3232 	 */
3233 	.swap_activate	= cifs_swap_activate,
3234 	.swap_deactivate = cifs_swap_deactivate,
3235 	.swap_rw = cifs_swap_rw,
3236 };
3237 
3238 /*
3239  * cifs_readahead requires the server to support a buffer large enough to
3240  * contain the header plus one complete page of data.  Otherwise, we need
3241  * to leave cifs_readahead out of the address space operations.
3242  */
3243 const struct address_space_operations cifs_addr_ops_smallbuf = {
3244 	.read_folio	= netfs_read_folio,
3245 	.writepages	= netfs_writepages,
3246 	.dirty_folio	= netfs_dirty_folio,
3247 	.release_folio	= netfs_release_folio,
3248 	.invalidate_folio = netfs_invalidate_folio,
3249 	.migrate_folio	= filemap_migrate_folio,
3250 };
3251