xref: /linux/fs/smb/client/file.c (revision 62597edf6340191511bdf9a7f64fa315ddc58805)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  */
11 #include <linux/fs.h>
12 #include <linux/filelock.h>
13 #include <linux/backing-dev.h>
14 #include <linux/stat.h>
15 #include <linux/fcntl.h>
16 #include <linux/pagemap.h>
17 #include <linux/pagevec.h>
18 #include <linux/writeback.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/delay.h>
21 #include <linux/mount.h>
22 #include <linux/slab.h>
23 #include <linux/swap.h>
24 #include <linux/mm.h>
25 #include <asm/div64.h>
26 #include "cifsfs.h"
27 #include "cifspdu.h"
28 #include "cifsglob.h"
29 #include "cifsproto.h"
30 #include "smb2proto.h"
31 #include "cifs_unicode.h"
32 #include "cifs_debug.h"
33 #include "cifs_fs_sb.h"
34 #include "fscache.h"
35 #include "smbdirect.h"
36 #include "fs_context.h"
37 #include "cifs_ioctl.h"
38 #include "cached_dir.h"
39 #include <trace/events/netfs.h>
40 
41 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
42 
43 /*
44  * Prepare a subrequest to upload to the server.  We need to allocate credits
45  * so that we know the maximum amount of data that we can include in it.
46  */
47 static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
48 {
49 	struct cifs_io_subrequest *wdata =
50 		container_of(subreq, struct cifs_io_subrequest, subreq);
51 	struct cifs_io_request *req = wdata->req;
52 	struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr];
53 	struct TCP_Server_Info *server;
54 	struct cifsFileInfo *open_file = req->cfile;
55 	size_t wsize = req->rreq.wsize;
56 	int rc;
57 
58 	if (!wdata->have_xid) {
59 		wdata->xid = get_xid();
60 		wdata->have_xid = true;
61 	}
62 
63 	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
64 	wdata->server = server;
65 
66 retry:
67 	if (open_file->invalidHandle) {
68 		rc = cifs_reopen_file(open_file, false);
69 		if (rc < 0) {
70 			if (rc == -EAGAIN)
71 				goto retry;
72 			subreq->error = rc;
73 			return netfs_prepare_write_failed(subreq);
74 		}
75 	}
76 
77 	rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len,
78 					   &wdata->credits);
79 	if (rc < 0) {
80 		subreq->error = rc;
81 		return netfs_prepare_write_failed(subreq);
82 	}
83 
84 	wdata->credits.rreq_debug_id = subreq->rreq->debug_id;
85 	wdata->credits.rreq_debug_index = subreq->debug_index;
86 	wdata->credits.in_flight_check = 1;
87 	trace_smb3_rw_credits(wdata->rreq->debug_id,
88 			      wdata->subreq.debug_index,
89 			      wdata->credits.value,
90 			      server->credits, server->in_flight,
91 			      wdata->credits.value,
92 			      cifs_trace_rw_credits_write_prepare);
93 
94 #ifdef CONFIG_CIFS_SMB_DIRECT
95 	if (server->smbd_conn)
96 		stream->sreq_max_segs = server->smbd_conn->max_frmr_depth;
97 #endif
98 }
99 
100 /*
101  * Issue a subrequest to upload to the server.
102  */
103 static void cifs_issue_write(struct netfs_io_subrequest *subreq)
104 {
105 	struct cifs_io_subrequest *wdata =
106 		container_of(subreq, struct cifs_io_subrequest, subreq);
107 	struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
108 	int rc;
109 
110 	if (cifs_forced_shutdown(sbi)) {
111 		rc = -EIO;
112 		goto fail;
113 	}
114 
115 	rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
116 	if (rc)
117 		goto fail;
118 
119 	rc = -EAGAIN;
120 	if (wdata->req->cfile->invalidHandle)
121 		goto fail;
122 
123 	wdata->server->ops->async_writev(wdata);
124 out:
125 	return;
126 
127 fail:
128 	if (rc == -EAGAIN)
129 		trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
130 	else
131 		trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
132 	add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
133 	cifs_write_subrequest_terminated(wdata, rc, false);
134 	goto out;
135 }
136 
137 static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq)
138 {
139 	cifs_invalidate_cache(wreq->inode, 0);
140 }
141 
142 /*
143  * Negotiate the size of a read operation on behalf of the netfs library.
144  */
145 static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
146 {
147 	struct netfs_io_request *rreq = subreq->rreq;
148 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
149 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
150 	struct TCP_Server_Info *server = req->server;
151 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
152 	size_t size;
153 	int rc = 0;
154 
155 	if (!rdata->have_xid) {
156 		rdata->xid = get_xid();
157 		rdata->have_xid = true;
158 	}
159 	rdata->server = server;
160 
161 	if (cifs_sb->ctx->rsize == 0)
162 		cifs_sb->ctx->rsize =
163 			server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink),
164 						     cifs_sb->ctx);
165 
166 	rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
167 					   &size, &rdata->credits);
168 	if (rc)
169 		return rc;
170 
171 	rreq->io_streams[0].sreq_max_len = size;
172 
173 	rdata->credits.in_flight_check = 1;
174 	rdata->credits.rreq_debug_id = rreq->debug_id;
175 	rdata->credits.rreq_debug_index = subreq->debug_index;
176 
177 	trace_smb3_rw_credits(rdata->rreq->debug_id,
178 			      rdata->subreq.debug_index,
179 			      rdata->credits.value,
180 			      server->credits, server->in_flight, 0,
181 			      cifs_trace_rw_credits_read_submit);
182 
183 #ifdef CONFIG_CIFS_SMB_DIRECT
184 	if (server->smbd_conn)
185 		rreq->io_streams[0].sreq_max_segs = server->smbd_conn->max_frmr_depth;
186 #endif
187 	return 0;
188 }
189 
190 /*
191  * Issue a read operation on behalf of the netfs helper functions.  We're asked
192  * to make a read of a certain size at a point in the file.  We are permitted
193  * to only read a portion of that, but as long as we read something, the netfs
194  * helper will call us again so that we can issue another read.
195  */
196 static void cifs_issue_read(struct netfs_io_subrequest *subreq)
197 {
198 	struct netfs_io_request *rreq = subreq->rreq;
199 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
200 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
201 	struct TCP_Server_Info *server = req->server;
202 	int rc = 0;
203 
204 	cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
205 		 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
206 		 subreq->transferred, subreq->len);
207 
208 	rc = adjust_credits(server, rdata, cifs_trace_rw_credits_issue_read_adjust);
209 	if (rc)
210 		goto failed;
211 
212 	if (req->cfile->invalidHandle) {
213 		do {
214 			rc = cifs_reopen_file(req->cfile, true);
215 		} while (rc == -EAGAIN);
216 		if (rc)
217 			goto failed;
218 	}
219 
220 	if (subreq->rreq->origin != NETFS_DIO_READ)
221 		__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
222 
223 	trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
224 	rc = rdata->server->ops->async_readv(rdata);
225 	if (rc)
226 		goto failed;
227 	return;
228 
229 failed:
230 	netfs_read_subreq_terminated(subreq, rc, false);
231 }
232 
233 /*
234  * Writeback calls this when it finds a folio that needs uploading.  This isn't
235  * called if writeback only has copy-to-cache to deal with.
236  */
237 static void cifs_begin_writeback(struct netfs_io_request *wreq)
238 {
239 	struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
240 	int ret;
241 
242 	ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
243 	if (ret) {
244 		cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
245 		return;
246 	}
247 
248 	wreq->io_streams[0].avail = true;
249 }
250 
251 /*
252  * Initialise a request.
253  */
254 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
255 {
256 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
257 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
258 	struct cifsFileInfo *open_file = NULL;
259 
260 	rreq->rsize = cifs_sb->ctx->rsize;
261 	rreq->wsize = cifs_sb->ctx->wsize;
262 	req->pid = current->tgid; // Ummm...  This may be a workqueue
263 
264 	if (file) {
265 		open_file = file->private_data;
266 		rreq->netfs_priv = file->private_data;
267 		req->cfile = cifsFileInfo_get(open_file);
268 		req->server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
269 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
270 			req->pid = req->cfile->pid;
271 	} else if (rreq->origin != NETFS_WRITEBACK) {
272 		WARN_ON_ONCE(1);
273 		return -EIO;
274 	}
275 
276 	return 0;
277 }
278 
279 /*
280  * Completion of a request operation.
281  */
282 static void cifs_rreq_done(struct netfs_io_request *rreq)
283 {
284 	struct timespec64 atime, mtime;
285 	struct inode *inode = rreq->inode;
286 
287 	/* we do not want atime to be less than mtime, it broke some apps */
288 	atime = inode_set_atime_to_ts(inode, current_time(inode));
289 	mtime = inode_get_mtime(inode);
290 	if (timespec64_compare(&atime, &mtime))
291 		inode_set_atime_to_ts(inode, inode_get_mtime(inode));
292 }
293 
294 static void cifs_free_request(struct netfs_io_request *rreq)
295 {
296 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
297 
298 	if (req->cfile)
299 		cifsFileInfo_put(req->cfile);
300 }
301 
302 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
303 {
304 	struct cifs_io_subrequest *rdata =
305 		container_of(subreq, struct cifs_io_subrequest, subreq);
306 	int rc = subreq->error;
307 
308 	if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
309 #ifdef CONFIG_CIFS_SMB_DIRECT
310 		if (rdata->mr) {
311 			smbd_deregister_mr(rdata->mr);
312 			rdata->mr = NULL;
313 		}
314 #endif
315 	}
316 
317 	if (rdata->credits.value != 0) {
318 		trace_smb3_rw_credits(rdata->rreq->debug_id,
319 				      rdata->subreq.debug_index,
320 				      rdata->credits.value,
321 				      rdata->server ? rdata->server->credits : 0,
322 				      rdata->server ? rdata->server->in_flight : 0,
323 				      -rdata->credits.value,
324 				      cifs_trace_rw_credits_free_subreq);
325 		if (rdata->server)
326 			add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
327 		else
328 			rdata->credits.value = 0;
329 	}
330 
331 	if (rdata->have_xid)
332 		free_xid(rdata->xid);
333 }
334 
335 const struct netfs_request_ops cifs_req_ops = {
336 	.request_pool		= &cifs_io_request_pool,
337 	.subrequest_pool	= &cifs_io_subrequest_pool,
338 	.init_request		= cifs_init_request,
339 	.free_request		= cifs_free_request,
340 	.free_subrequest	= cifs_free_subrequest,
341 	.prepare_read		= cifs_prepare_read,
342 	.issue_read		= cifs_issue_read,
343 	.done			= cifs_rreq_done,
344 	.begin_writeback	= cifs_begin_writeback,
345 	.prepare_write		= cifs_prepare_write,
346 	.issue_write		= cifs_issue_write,
347 	.invalidate_cache	= cifs_netfs_invalidate_cache,
348 };
349 
350 /*
351  * Mark as invalid, all open files on tree connections since they
352  * were closed when session to server was lost.
353  */
354 void
355 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
356 {
357 	struct cifsFileInfo *open_file = NULL;
358 	struct list_head *tmp;
359 	struct list_head *tmp1;
360 
361 	/* only send once per connect */
362 	spin_lock(&tcon->tc_lock);
363 	if (tcon->need_reconnect)
364 		tcon->status = TID_NEED_RECON;
365 
366 	if (tcon->status != TID_NEED_RECON) {
367 		spin_unlock(&tcon->tc_lock);
368 		return;
369 	}
370 	tcon->status = TID_IN_FILES_INVALIDATE;
371 	spin_unlock(&tcon->tc_lock);
372 
373 	/* list all files open on tree connection and mark them invalid */
374 	spin_lock(&tcon->open_file_lock);
375 	list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
376 		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
377 		open_file->invalidHandle = true;
378 		open_file->oplock_break_cancelled = true;
379 	}
380 	spin_unlock(&tcon->open_file_lock);
381 
382 	invalidate_all_cached_dirs(tcon);
383 	spin_lock(&tcon->tc_lock);
384 	if (tcon->status == TID_IN_FILES_INVALIDATE)
385 		tcon->status = TID_NEED_TCON;
386 	spin_unlock(&tcon->tc_lock);
387 
388 	/*
389 	 * BB Add call to invalidate_inodes(sb) for all superblocks mounted
390 	 * to this tcon.
391 	 */
392 }
393 
394 static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
395 {
396 	if ((flags & O_ACCMODE) == O_RDONLY)
397 		return GENERIC_READ;
398 	else if ((flags & O_ACCMODE) == O_WRONLY)
399 		return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
400 	else if ((flags & O_ACCMODE) == O_RDWR) {
401 		/* GENERIC_ALL is too much permission to request
402 		   can cause unnecessary access denied on create */
403 		/* return GENERIC_ALL; */
404 		return (GENERIC_READ | GENERIC_WRITE);
405 	}
406 
407 	return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
408 		FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
409 		FILE_READ_DATA);
410 }
411 
412 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
413 static u32 cifs_posix_convert_flags(unsigned int flags)
414 {
415 	u32 posix_flags = 0;
416 
417 	if ((flags & O_ACCMODE) == O_RDONLY)
418 		posix_flags = SMB_O_RDONLY;
419 	else if ((flags & O_ACCMODE) == O_WRONLY)
420 		posix_flags = SMB_O_WRONLY;
421 	else if ((flags & O_ACCMODE) == O_RDWR)
422 		posix_flags = SMB_O_RDWR;
423 
424 	if (flags & O_CREAT) {
425 		posix_flags |= SMB_O_CREAT;
426 		if (flags & O_EXCL)
427 			posix_flags |= SMB_O_EXCL;
428 	} else if (flags & O_EXCL)
429 		cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
430 			 current->comm, current->tgid);
431 
432 	if (flags & O_TRUNC)
433 		posix_flags |= SMB_O_TRUNC;
434 	/* be safe and imply O_SYNC for O_DSYNC */
435 	if (flags & O_DSYNC)
436 		posix_flags |= SMB_O_SYNC;
437 	if (flags & O_DIRECTORY)
438 		posix_flags |= SMB_O_DIRECTORY;
439 	if (flags & O_NOFOLLOW)
440 		posix_flags |= SMB_O_NOFOLLOW;
441 	if (flags & O_DIRECT)
442 		posix_flags |= SMB_O_DIRECT;
443 
444 	return posix_flags;
445 }
446 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
447 
448 static inline int cifs_get_disposition(unsigned int flags)
449 {
450 	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
451 		return FILE_CREATE;
452 	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
453 		return FILE_OVERWRITE_IF;
454 	else if ((flags & O_CREAT) == O_CREAT)
455 		return FILE_OPEN_IF;
456 	else if ((flags & O_TRUNC) == O_TRUNC)
457 		return FILE_OVERWRITE;
458 	else
459 		return FILE_OPEN;
460 }
461 
462 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
463 int cifs_posix_open(const char *full_path, struct inode **pinode,
464 			struct super_block *sb, int mode, unsigned int f_flags,
465 			__u32 *poplock, __u16 *pnetfid, unsigned int xid)
466 {
467 	int rc;
468 	FILE_UNIX_BASIC_INFO *presp_data;
469 	__u32 posix_flags = 0;
470 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
471 	struct cifs_fattr fattr;
472 	struct tcon_link *tlink;
473 	struct cifs_tcon *tcon;
474 
475 	cifs_dbg(FYI, "posix open %s\n", full_path);
476 
477 	presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
478 	if (presp_data == NULL)
479 		return -ENOMEM;
480 
481 	tlink = cifs_sb_tlink(cifs_sb);
482 	if (IS_ERR(tlink)) {
483 		rc = PTR_ERR(tlink);
484 		goto posix_open_ret;
485 	}
486 
487 	tcon = tlink_tcon(tlink);
488 	mode &= ~current_umask();
489 
490 	posix_flags = cifs_posix_convert_flags(f_flags);
491 	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
492 			     poplock, full_path, cifs_sb->local_nls,
493 			     cifs_remap(cifs_sb));
494 	cifs_put_tlink(tlink);
495 
496 	if (rc)
497 		goto posix_open_ret;
498 
499 	if (presp_data->Type == cpu_to_le32(-1))
500 		goto posix_open_ret; /* open ok, caller does qpathinfo */
501 
502 	if (!pinode)
503 		goto posix_open_ret; /* caller does not need info */
504 
505 	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
506 
507 	/* get new inode and set it up */
508 	if (*pinode == NULL) {
509 		cifs_fill_uniqueid(sb, &fattr);
510 		*pinode = cifs_iget(sb, &fattr);
511 		if (!*pinode) {
512 			rc = -ENOMEM;
513 			goto posix_open_ret;
514 		}
515 	} else {
516 		cifs_revalidate_mapping(*pinode);
517 		rc = cifs_fattr_to_inode(*pinode, &fattr, false);
518 	}
519 
520 posix_open_ret:
521 	kfree(presp_data);
522 	return rc;
523 }
524 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
525 
526 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
527 			struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
528 			struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
529 {
530 	int rc;
531 	int desired_access;
532 	int disposition;
533 	int create_options = CREATE_NOT_DIR;
534 	struct TCP_Server_Info *server = tcon->ses->server;
535 	struct cifs_open_parms oparms;
536 	int rdwr_for_fscache = 0;
537 
538 	if (!server->ops->open)
539 		return -ENOSYS;
540 
541 	/* If we're caching, we need to be able to fill in around partial writes. */
542 	if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
543 		rdwr_for_fscache = 1;
544 
545 	desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
546 
547 /*********************************************************************
548  *  open flag mapping table:
549  *
550  *	POSIX Flag            CIFS Disposition
551  *	----------            ----------------
552  *	O_CREAT               FILE_OPEN_IF
553  *	O_CREAT | O_EXCL      FILE_CREATE
554  *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
555  *	O_TRUNC               FILE_OVERWRITE
556  *	none of the above     FILE_OPEN
557  *
558  *	Note that there is not a direct match between disposition
559  *	FILE_SUPERSEDE (ie create whether or not file exists although
560  *	O_CREAT | O_TRUNC is similar but truncates the existing
561  *	file rather than creating a new file as FILE_SUPERSEDE does
562  *	(which uses the attributes / metadata passed in on open call)
563  *?
564  *?  O_SYNC is a reasonable match to CIFS writethrough flag
565  *?  and the read write flags match reasonably.  O_LARGEFILE
566  *?  is irrelevant because largefile support is always used
567  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
568  *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
569  *********************************************************************/
570 
571 	disposition = cifs_get_disposition(f_flags);
572 
573 	/* BB pass O_SYNC flag through on file attributes .. BB */
574 
575 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
576 	if (f_flags & O_SYNC)
577 		create_options |= CREATE_WRITE_THROUGH;
578 
579 	if (f_flags & O_DIRECT)
580 		create_options |= CREATE_NO_BUFFER;
581 
582 retry_open:
583 	oparms = (struct cifs_open_parms) {
584 		.tcon = tcon,
585 		.cifs_sb = cifs_sb,
586 		.desired_access = desired_access,
587 		.create_options = cifs_create_options(cifs_sb, create_options),
588 		.disposition = disposition,
589 		.path = full_path,
590 		.fid = fid,
591 	};
592 
593 	rc = server->ops->open(xid, &oparms, oplock, buf);
594 	if (rc) {
595 		if (rc == -EACCES && rdwr_for_fscache == 1) {
596 			desired_access = cifs_convert_flags(f_flags, 0);
597 			rdwr_for_fscache = 2;
598 			goto retry_open;
599 		}
600 		return rc;
601 	}
602 	if (rdwr_for_fscache == 2)
603 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
604 
605 	/* TODO: Add support for calling posix query info but with passing in fid */
606 	if (tcon->unix_ext)
607 		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
608 					      xid);
609 	else
610 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
611 					 xid, fid);
612 
613 	if (rc) {
614 		server->ops->close(xid, tcon, fid);
615 		if (rc == -ESTALE)
616 			rc = -EOPENSTALE;
617 	}
618 
619 	return rc;
620 }
621 
622 static bool
623 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
624 {
625 	struct cifs_fid_locks *cur;
626 	bool has_locks = false;
627 
628 	down_read(&cinode->lock_sem);
629 	list_for_each_entry(cur, &cinode->llist, llist) {
630 		if (!list_empty(&cur->locks)) {
631 			has_locks = true;
632 			break;
633 		}
634 	}
635 	up_read(&cinode->lock_sem);
636 	return has_locks;
637 }
638 
639 void
640 cifs_down_write(struct rw_semaphore *sem)
641 {
642 	while (!down_write_trylock(sem))
643 		msleep(10);
644 }
645 
646 static void cifsFileInfo_put_work(struct work_struct *work);
647 void serverclose_work(struct work_struct *work);
648 
649 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
650 				       struct tcon_link *tlink, __u32 oplock,
651 				       const char *symlink_target)
652 {
653 	struct dentry *dentry = file_dentry(file);
654 	struct inode *inode = d_inode(dentry);
655 	struct cifsInodeInfo *cinode = CIFS_I(inode);
656 	struct cifsFileInfo *cfile;
657 	struct cifs_fid_locks *fdlocks;
658 	struct cifs_tcon *tcon = tlink_tcon(tlink);
659 	struct TCP_Server_Info *server = tcon->ses->server;
660 
661 	cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
662 	if (cfile == NULL)
663 		return cfile;
664 
665 	fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
666 	if (!fdlocks) {
667 		kfree(cfile);
668 		return NULL;
669 	}
670 
671 	if (symlink_target) {
672 		cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
673 		if (!cfile->symlink_target) {
674 			kfree(fdlocks);
675 			kfree(cfile);
676 			return NULL;
677 		}
678 	}
679 
680 	INIT_LIST_HEAD(&fdlocks->locks);
681 	fdlocks->cfile = cfile;
682 	cfile->llist = fdlocks;
683 
684 	cfile->count = 1;
685 	cfile->pid = current->tgid;
686 	cfile->uid = current_fsuid();
687 	cfile->dentry = dget(dentry);
688 	cfile->f_flags = file->f_flags;
689 	cfile->invalidHandle = false;
690 	cfile->deferred_close_scheduled = false;
691 	cfile->tlink = cifs_get_tlink(tlink);
692 	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
693 	INIT_WORK(&cfile->put, cifsFileInfo_put_work);
694 	INIT_WORK(&cfile->serverclose, serverclose_work);
695 	INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
696 	mutex_init(&cfile->fh_mutex);
697 	spin_lock_init(&cfile->file_info_lock);
698 
699 	cifs_sb_active(inode->i_sb);
700 
701 	/*
702 	 * If the server returned a read oplock and we have mandatory brlocks,
703 	 * set oplock level to None.
704 	 */
705 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
706 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
707 		oplock = 0;
708 	}
709 
710 	cifs_down_write(&cinode->lock_sem);
711 	list_add(&fdlocks->llist, &cinode->llist);
712 	up_write(&cinode->lock_sem);
713 
714 	spin_lock(&tcon->open_file_lock);
715 	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
716 		oplock = fid->pending_open->oplock;
717 	list_del(&fid->pending_open->olist);
718 
719 	fid->purge_cache = false;
720 	server->ops->set_fid(cfile, fid, oplock);
721 
722 	list_add(&cfile->tlist, &tcon->openFileList);
723 	atomic_inc(&tcon->num_local_opens);
724 
725 	/* if readable file instance put first in list*/
726 	spin_lock(&cinode->open_file_lock);
727 	if (file->f_mode & FMODE_READ)
728 		list_add(&cfile->flist, &cinode->openFileList);
729 	else
730 		list_add_tail(&cfile->flist, &cinode->openFileList);
731 	spin_unlock(&cinode->open_file_lock);
732 	spin_unlock(&tcon->open_file_lock);
733 
734 	if (fid->purge_cache)
735 		cifs_zap_mapping(inode);
736 
737 	file->private_data = cfile;
738 	return cfile;
739 }
740 
741 struct cifsFileInfo *
742 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
743 {
744 	spin_lock(&cifs_file->file_info_lock);
745 	cifsFileInfo_get_locked(cifs_file);
746 	spin_unlock(&cifs_file->file_info_lock);
747 	return cifs_file;
748 }
749 
750 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
751 {
752 	struct inode *inode = d_inode(cifs_file->dentry);
753 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
754 	struct cifsLockInfo *li, *tmp;
755 	struct super_block *sb = inode->i_sb;
756 
757 	/*
758 	 * Delete any outstanding lock records. We'll lose them when the file
759 	 * is closed anyway.
760 	 */
761 	cifs_down_write(&cifsi->lock_sem);
762 	list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
763 		list_del(&li->llist);
764 		cifs_del_lock_waiters(li);
765 		kfree(li);
766 	}
767 	list_del(&cifs_file->llist->llist);
768 	kfree(cifs_file->llist);
769 	up_write(&cifsi->lock_sem);
770 
771 	cifs_put_tlink(cifs_file->tlink);
772 	dput(cifs_file->dentry);
773 	cifs_sb_deactive(sb);
774 	kfree(cifs_file->symlink_target);
775 	kfree(cifs_file);
776 }
777 
778 static void cifsFileInfo_put_work(struct work_struct *work)
779 {
780 	struct cifsFileInfo *cifs_file = container_of(work,
781 			struct cifsFileInfo, put);
782 
783 	cifsFileInfo_put_final(cifs_file);
784 }
785 
786 void serverclose_work(struct work_struct *work)
787 {
788 	struct cifsFileInfo *cifs_file = container_of(work,
789 			struct cifsFileInfo, serverclose);
790 
791 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
792 
793 	struct TCP_Server_Info *server = tcon->ses->server;
794 	int rc = 0;
795 	int retries = 0;
796 	int MAX_RETRIES = 4;
797 
798 	do {
799 		if (server->ops->close_getattr)
800 			rc = server->ops->close_getattr(0, tcon, cifs_file);
801 		else if (server->ops->close)
802 			rc = server->ops->close(0, tcon, &cifs_file->fid);
803 
804 		if (rc == -EBUSY || rc == -EAGAIN) {
805 			retries++;
806 			msleep(250);
807 		}
808 	} while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
809 	);
810 
811 	if (retries == MAX_RETRIES)
812 		pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
813 
814 	if (cifs_file->offload)
815 		queue_work(fileinfo_put_wq, &cifs_file->put);
816 	else
817 		cifsFileInfo_put_final(cifs_file);
818 }
819 
820 /**
821  * cifsFileInfo_put - release a reference of file priv data
822  *
823  * Always potentially wait for oplock handler. See _cifsFileInfo_put().
824  *
825  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
826  */
827 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
828 {
829 	_cifsFileInfo_put(cifs_file, true, true);
830 }
831 
832 /**
833  * _cifsFileInfo_put - release a reference of file priv data
834  *
835  * This may involve closing the filehandle @cifs_file out on the
836  * server. Must be called without holding tcon->open_file_lock,
837  * cinode->open_file_lock and cifs_file->file_info_lock.
838  *
839  * If @wait_for_oplock_handler is true and we are releasing the last
840  * reference, wait for any running oplock break handler of the file
841  * and cancel any pending one.
842  *
843  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
844  * @wait_oplock_handler: must be false if called from oplock_break_handler
845  * @offload:	not offloaded on close and oplock breaks
846  *
847  */
848 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
849 		       bool wait_oplock_handler, bool offload)
850 {
851 	struct inode *inode = d_inode(cifs_file->dentry);
852 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
853 	struct TCP_Server_Info *server = tcon->ses->server;
854 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
855 	struct super_block *sb = inode->i_sb;
856 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
857 	struct cifs_fid fid = {};
858 	struct cifs_pending_open open;
859 	bool oplock_break_cancelled;
860 	bool serverclose_offloaded = false;
861 
862 	spin_lock(&tcon->open_file_lock);
863 	spin_lock(&cifsi->open_file_lock);
864 	spin_lock(&cifs_file->file_info_lock);
865 
866 	cifs_file->offload = offload;
867 	if (--cifs_file->count > 0) {
868 		spin_unlock(&cifs_file->file_info_lock);
869 		spin_unlock(&cifsi->open_file_lock);
870 		spin_unlock(&tcon->open_file_lock);
871 		return;
872 	}
873 	spin_unlock(&cifs_file->file_info_lock);
874 
875 	if (server->ops->get_lease_key)
876 		server->ops->get_lease_key(inode, &fid);
877 
878 	/* store open in pending opens to make sure we don't miss lease break */
879 	cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
880 
881 	/* remove it from the lists */
882 	list_del(&cifs_file->flist);
883 	list_del(&cifs_file->tlist);
884 	atomic_dec(&tcon->num_local_opens);
885 
886 	if (list_empty(&cifsi->openFileList)) {
887 		cifs_dbg(FYI, "closing last open instance for inode %p\n",
888 			 d_inode(cifs_file->dentry));
889 		/*
890 		 * In strict cache mode we need invalidate mapping on the last
891 		 * close  because it may cause a error when we open this file
892 		 * again and get at least level II oplock.
893 		 */
894 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
895 			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
896 		cifs_set_oplock_level(cifsi, 0);
897 	}
898 
899 	spin_unlock(&cifsi->open_file_lock);
900 	spin_unlock(&tcon->open_file_lock);
901 
902 	oplock_break_cancelled = wait_oplock_handler ?
903 		cancel_work_sync(&cifs_file->oplock_break) : false;
904 
905 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
906 		struct TCP_Server_Info *server = tcon->ses->server;
907 		unsigned int xid;
908 		int rc = 0;
909 
910 		xid = get_xid();
911 		if (server->ops->close_getattr)
912 			rc = server->ops->close_getattr(xid, tcon, cifs_file);
913 		else if (server->ops->close)
914 			rc = server->ops->close(xid, tcon, &cifs_file->fid);
915 		_free_xid(xid);
916 
917 		if (rc == -EBUSY || rc == -EAGAIN) {
918 			// Server close failed, hence offloading it as an async op
919 			queue_work(serverclose_wq, &cifs_file->serverclose);
920 			serverclose_offloaded = true;
921 		}
922 	}
923 
924 	if (oplock_break_cancelled)
925 		cifs_done_oplock_break(cifsi);
926 
927 	cifs_del_pending_open(&open);
928 
929 	// if serverclose has been offloaded to wq (on failure), it will
930 	// handle offloading put as well. If serverclose not offloaded,
931 	// we need to handle offloading put here.
932 	if (!serverclose_offloaded) {
933 		if (offload)
934 			queue_work(fileinfo_put_wq, &cifs_file->put);
935 		else
936 			cifsFileInfo_put_final(cifs_file);
937 	}
938 }
939 
940 int cifs_open(struct inode *inode, struct file *file)
941 
942 {
943 	int rc = -EACCES;
944 	unsigned int xid;
945 	__u32 oplock;
946 	struct cifs_sb_info *cifs_sb;
947 	struct TCP_Server_Info *server;
948 	struct cifs_tcon *tcon;
949 	struct tcon_link *tlink;
950 	struct cifsFileInfo *cfile = NULL;
951 	void *page;
952 	const char *full_path;
953 	bool posix_open_ok = false;
954 	struct cifs_fid fid = {};
955 	struct cifs_pending_open open;
956 	struct cifs_open_info_data data = {};
957 
958 	xid = get_xid();
959 
960 	cifs_sb = CIFS_SB(inode->i_sb);
961 	if (unlikely(cifs_forced_shutdown(cifs_sb))) {
962 		free_xid(xid);
963 		return -EIO;
964 	}
965 
966 	tlink = cifs_sb_tlink(cifs_sb);
967 	if (IS_ERR(tlink)) {
968 		free_xid(xid);
969 		return PTR_ERR(tlink);
970 	}
971 	tcon = tlink_tcon(tlink);
972 	server = tcon->ses->server;
973 
974 	page = alloc_dentry_path();
975 	full_path = build_path_from_dentry(file_dentry(file), page);
976 	if (IS_ERR(full_path)) {
977 		rc = PTR_ERR(full_path);
978 		goto out;
979 	}
980 
981 	cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
982 		 inode, file->f_flags, full_path);
983 
984 	if (file->f_flags & O_DIRECT &&
985 	    cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
986 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
987 			file->f_op = &cifs_file_direct_nobrl_ops;
988 		else
989 			file->f_op = &cifs_file_direct_ops;
990 	}
991 
992 	/* Get the cached handle as SMB2 close is deferred */
993 	rc = cifs_get_readable_path(tcon, full_path, &cfile);
994 	if (rc == 0) {
995 		if (file->f_flags == cfile->f_flags) {
996 			file->private_data = cfile;
997 			spin_lock(&CIFS_I(inode)->deferred_lock);
998 			cifs_del_deferred_close(cfile);
999 			spin_unlock(&CIFS_I(inode)->deferred_lock);
1000 			goto use_cache;
1001 		} else {
1002 			_cifsFileInfo_put(cfile, true, false);
1003 		}
1004 	}
1005 
1006 	if (server->oplocks)
1007 		oplock = REQ_OPLOCK;
1008 	else
1009 		oplock = 0;
1010 
1011 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1012 	if (!tcon->broken_posix_open && tcon->unix_ext &&
1013 	    cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1014 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1015 		/* can not refresh inode info since size could be stale */
1016 		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
1017 				cifs_sb->ctx->file_mode /* ignored */,
1018 				file->f_flags, &oplock, &fid.netfid, xid);
1019 		if (rc == 0) {
1020 			cifs_dbg(FYI, "posix open succeeded\n");
1021 			posix_open_ok = true;
1022 		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1023 			if (tcon->ses->serverNOS)
1024 				cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1025 					 tcon->ses->ip_addr,
1026 					 tcon->ses->serverNOS);
1027 			tcon->broken_posix_open = true;
1028 		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
1029 			 (rc != -EOPNOTSUPP)) /* path not found or net err */
1030 			goto out;
1031 		/*
1032 		 * Else fallthrough to retry open the old way on network i/o
1033 		 * or DFS errors.
1034 		 */
1035 	}
1036 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1037 
1038 	if (server->ops->get_lease_key)
1039 		server->ops->get_lease_key(inode, &fid);
1040 
1041 	cifs_add_pending_open(&fid, tlink, &open);
1042 
1043 	if (!posix_open_ok) {
1044 		if (server->ops->get_lease_key)
1045 			server->ops->get_lease_key(inode, &fid);
1046 
1047 		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1048 				  xid, &data);
1049 		if (rc) {
1050 			cifs_del_pending_open(&open);
1051 			goto out;
1052 		}
1053 	}
1054 
1055 	cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1056 	if (cfile == NULL) {
1057 		if (server->ops->close)
1058 			server->ops->close(xid, tcon, &fid);
1059 		cifs_del_pending_open(&open);
1060 		rc = -ENOMEM;
1061 		goto out;
1062 	}
1063 
1064 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1065 	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1066 		/*
1067 		 * Time to set mode which we can not set earlier due to
1068 		 * problems creating new read-only files.
1069 		 */
1070 		struct cifs_unix_set_info_args args = {
1071 			.mode	= inode->i_mode,
1072 			.uid	= INVALID_UID, /* no change */
1073 			.gid	= INVALID_GID, /* no change */
1074 			.ctime	= NO_CHANGE_64,
1075 			.atime	= NO_CHANGE_64,
1076 			.mtime	= NO_CHANGE_64,
1077 			.device	= 0,
1078 		};
1079 		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1080 				       cfile->pid);
1081 	}
1082 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1083 
1084 use_cache:
1085 	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1086 			   file->f_mode & FMODE_WRITE);
1087 	if (!(file->f_flags & O_DIRECT))
1088 		goto out;
1089 	if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1090 		goto out;
1091 	cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1092 
1093 out:
1094 	free_dentry_path(page);
1095 	free_xid(xid);
1096 	cifs_put_tlink(tlink);
1097 	cifs_free_open_info(&data);
1098 	return rc;
1099 }
1100 
1101 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1102 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1103 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1104 
1105 /*
1106  * Try to reacquire byte range locks that were released when session
1107  * to server was lost.
1108  */
1109 static int
1110 cifs_relock_file(struct cifsFileInfo *cfile)
1111 {
1112 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1113 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1114 	int rc = 0;
1115 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1116 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1117 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1118 
1119 	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1120 	if (cinode->can_cache_brlcks) {
1121 		/* can cache locks - no need to relock */
1122 		up_read(&cinode->lock_sem);
1123 		return rc;
1124 	}
1125 
1126 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1127 	if (cap_unix(tcon->ses) &&
1128 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1129 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1130 		rc = cifs_push_posix_locks(cfile);
1131 	else
1132 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1133 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1134 
1135 	up_read(&cinode->lock_sem);
1136 	return rc;
1137 }
1138 
1139 static int
1140 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1141 {
1142 	int rc = -EACCES;
1143 	unsigned int xid;
1144 	__u32 oplock;
1145 	struct cifs_sb_info *cifs_sb;
1146 	struct cifs_tcon *tcon;
1147 	struct TCP_Server_Info *server;
1148 	struct cifsInodeInfo *cinode;
1149 	struct inode *inode;
1150 	void *page;
1151 	const char *full_path;
1152 	int desired_access;
1153 	int disposition = FILE_OPEN;
1154 	int create_options = CREATE_NOT_DIR;
1155 	struct cifs_open_parms oparms;
1156 	int rdwr_for_fscache = 0;
1157 
1158 	xid = get_xid();
1159 	mutex_lock(&cfile->fh_mutex);
1160 	if (!cfile->invalidHandle) {
1161 		mutex_unlock(&cfile->fh_mutex);
1162 		free_xid(xid);
1163 		return 0;
1164 	}
1165 
1166 	inode = d_inode(cfile->dentry);
1167 	cifs_sb = CIFS_SB(inode->i_sb);
1168 	tcon = tlink_tcon(cfile->tlink);
1169 	server = tcon->ses->server;
1170 
1171 	/*
1172 	 * Can not grab rename sem here because various ops, including those
1173 	 * that already have the rename sem can end up causing writepage to get
1174 	 * called and if the server was down that means we end up here, and we
1175 	 * can never tell if the caller already has the rename_sem.
1176 	 */
1177 	page = alloc_dentry_path();
1178 	full_path = build_path_from_dentry(cfile->dentry, page);
1179 	if (IS_ERR(full_path)) {
1180 		mutex_unlock(&cfile->fh_mutex);
1181 		free_dentry_path(page);
1182 		free_xid(xid);
1183 		return PTR_ERR(full_path);
1184 	}
1185 
1186 	cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1187 		 inode, cfile->f_flags, full_path);
1188 
1189 	if (tcon->ses->server->oplocks)
1190 		oplock = REQ_OPLOCK;
1191 	else
1192 		oplock = 0;
1193 
1194 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1195 	if (tcon->unix_ext && cap_unix(tcon->ses) &&
1196 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1197 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1198 		/*
1199 		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1200 		 * original open. Must mask them off for a reopen.
1201 		 */
1202 		unsigned int oflags = cfile->f_flags &
1203 						~(O_CREAT | O_EXCL | O_TRUNC);
1204 
1205 		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1206 				     cifs_sb->ctx->file_mode /* ignored */,
1207 				     oflags, &oplock, &cfile->fid.netfid, xid);
1208 		if (rc == 0) {
1209 			cifs_dbg(FYI, "posix reopen succeeded\n");
1210 			oparms.reconnect = true;
1211 			goto reopen_success;
1212 		}
1213 		/*
1214 		 * fallthrough to retry open the old way on errors, especially
1215 		 * in the reconnect path it is important to retry hard
1216 		 */
1217 	}
1218 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1219 
1220 	/* If we're caching, we need to be able to fill in around partial writes. */
1221 	if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1222 		rdwr_for_fscache = 1;
1223 
1224 	desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1225 
1226 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
1227 	if (cfile->f_flags & O_SYNC)
1228 		create_options |= CREATE_WRITE_THROUGH;
1229 
1230 	if (cfile->f_flags & O_DIRECT)
1231 		create_options |= CREATE_NO_BUFFER;
1232 
1233 	if (server->ops->get_lease_key)
1234 		server->ops->get_lease_key(inode, &cfile->fid);
1235 
1236 retry_open:
1237 	oparms = (struct cifs_open_parms) {
1238 		.tcon = tcon,
1239 		.cifs_sb = cifs_sb,
1240 		.desired_access = desired_access,
1241 		.create_options = cifs_create_options(cifs_sb, create_options),
1242 		.disposition = disposition,
1243 		.path = full_path,
1244 		.fid = &cfile->fid,
1245 		.reconnect = true,
1246 	};
1247 
1248 	/*
1249 	 * Can not refresh inode by passing in file_info buf to be returned by
1250 	 * ops->open and then calling get_inode_info with returned buf since
1251 	 * file might have write behind data that needs to be flushed and server
1252 	 * version of file size can be stale. If we knew for sure that inode was
1253 	 * not dirty locally we could do this.
1254 	 */
1255 	rc = server->ops->open(xid, &oparms, &oplock, NULL);
1256 	if (rc == -ENOENT && oparms.reconnect == false) {
1257 		/* durable handle timeout is expired - open the file again */
1258 		rc = server->ops->open(xid, &oparms, &oplock, NULL);
1259 		/* indicate that we need to relock the file */
1260 		oparms.reconnect = true;
1261 	}
1262 	if (rc == -EACCES && rdwr_for_fscache == 1) {
1263 		desired_access = cifs_convert_flags(cfile->f_flags, 0);
1264 		rdwr_for_fscache = 2;
1265 		goto retry_open;
1266 	}
1267 
1268 	if (rc) {
1269 		mutex_unlock(&cfile->fh_mutex);
1270 		cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1271 		cifs_dbg(FYI, "oplock: %d\n", oplock);
1272 		goto reopen_error_exit;
1273 	}
1274 
1275 	if (rdwr_for_fscache == 2)
1276 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1277 
1278 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1279 reopen_success:
1280 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1281 	cfile->invalidHandle = false;
1282 	mutex_unlock(&cfile->fh_mutex);
1283 	cinode = CIFS_I(inode);
1284 
1285 	if (can_flush) {
1286 		rc = filemap_write_and_wait(inode->i_mapping);
1287 		if (!is_interrupt_error(rc))
1288 			mapping_set_error(inode->i_mapping, rc);
1289 
1290 		if (tcon->posix_extensions) {
1291 			rc = smb311_posix_get_inode_info(&inode, full_path,
1292 							 NULL, inode->i_sb, xid);
1293 		} else if (tcon->unix_ext) {
1294 			rc = cifs_get_inode_info_unix(&inode, full_path,
1295 						      inode->i_sb, xid);
1296 		} else {
1297 			rc = cifs_get_inode_info(&inode, full_path, NULL,
1298 						 inode->i_sb, xid, NULL);
1299 		}
1300 	}
1301 	/*
1302 	 * Else we are writing out data to server already and could deadlock if
1303 	 * we tried to flush data, and since we do not know if we have data that
1304 	 * would invalidate the current end of file on the server we can not go
1305 	 * to the server to get the new inode info.
1306 	 */
1307 
1308 	/*
1309 	 * If the server returned a read oplock and we have mandatory brlocks,
1310 	 * set oplock level to None.
1311 	 */
1312 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1313 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1314 		oplock = 0;
1315 	}
1316 
1317 	server->ops->set_fid(cfile, &cfile->fid, oplock);
1318 	if (oparms.reconnect)
1319 		cifs_relock_file(cfile);
1320 
1321 reopen_error_exit:
1322 	free_dentry_path(page);
1323 	free_xid(xid);
1324 	return rc;
1325 }
1326 
1327 void smb2_deferred_work_close(struct work_struct *work)
1328 {
1329 	struct cifsFileInfo *cfile = container_of(work,
1330 			struct cifsFileInfo, deferred.work);
1331 
1332 	spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1333 	cifs_del_deferred_close(cfile);
1334 	cfile->deferred_close_scheduled = false;
1335 	spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1336 	_cifsFileInfo_put(cfile, true, false);
1337 }
1338 
1339 static bool
1340 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1341 {
1342 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1343 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1344 
1345 	return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1346 			(cinode->oplock == CIFS_CACHE_RHW_FLG ||
1347 			 cinode->oplock == CIFS_CACHE_RH_FLG) &&
1348 			!test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags));
1349 
1350 }
1351 
1352 int cifs_close(struct inode *inode, struct file *file)
1353 {
1354 	struct cifsFileInfo *cfile;
1355 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1356 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1357 	struct cifs_deferred_close *dclose;
1358 
1359 	cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1360 
1361 	if (file->private_data != NULL) {
1362 		cfile = file->private_data;
1363 		file->private_data = NULL;
1364 		dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
1365 		if ((cfile->status_file_deleted == false) &&
1366 		    (smb2_can_defer_close(inode, dclose))) {
1367 			if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR, &cinode->netfs.flags)) {
1368 				inode_set_mtime_to_ts(inode,
1369 						      inode_set_ctime_current(inode));
1370 			}
1371 			spin_lock(&cinode->deferred_lock);
1372 			cifs_add_deferred_close(cfile, dclose);
1373 			if (cfile->deferred_close_scheduled &&
1374 			    delayed_work_pending(&cfile->deferred)) {
1375 				/*
1376 				 * If there is no pending work, mod_delayed_work queues new work.
1377 				 * So, Increase the ref count to avoid use-after-free.
1378 				 */
1379 				if (!mod_delayed_work(deferredclose_wq,
1380 						&cfile->deferred, cifs_sb->ctx->closetimeo))
1381 					cifsFileInfo_get(cfile);
1382 			} else {
1383 				/* Deferred close for files */
1384 				queue_delayed_work(deferredclose_wq,
1385 						&cfile->deferred, cifs_sb->ctx->closetimeo);
1386 				cfile->deferred_close_scheduled = true;
1387 				spin_unlock(&cinode->deferred_lock);
1388 				return 0;
1389 			}
1390 			spin_unlock(&cinode->deferred_lock);
1391 			_cifsFileInfo_put(cfile, true, false);
1392 		} else {
1393 			_cifsFileInfo_put(cfile, true, false);
1394 			kfree(dclose);
1395 		}
1396 	}
1397 
1398 	/* return code from the ->release op is always ignored */
1399 	return 0;
1400 }
1401 
1402 void
1403 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1404 {
1405 	struct cifsFileInfo *open_file, *tmp;
1406 	struct list_head tmp_list;
1407 
1408 	if (!tcon->use_persistent || !tcon->need_reopen_files)
1409 		return;
1410 
1411 	tcon->need_reopen_files = false;
1412 
1413 	cifs_dbg(FYI, "Reopen persistent handles\n");
1414 	INIT_LIST_HEAD(&tmp_list);
1415 
1416 	/* list all files open on tree connection, reopen resilient handles  */
1417 	spin_lock(&tcon->open_file_lock);
1418 	list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1419 		if (!open_file->invalidHandle)
1420 			continue;
1421 		cifsFileInfo_get(open_file);
1422 		list_add_tail(&open_file->rlist, &tmp_list);
1423 	}
1424 	spin_unlock(&tcon->open_file_lock);
1425 
1426 	list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1427 		if (cifs_reopen_file(open_file, false /* do not flush */))
1428 			tcon->need_reopen_files = true;
1429 		list_del_init(&open_file->rlist);
1430 		cifsFileInfo_put(open_file);
1431 	}
1432 }
1433 
1434 int cifs_closedir(struct inode *inode, struct file *file)
1435 {
1436 	int rc = 0;
1437 	unsigned int xid;
1438 	struct cifsFileInfo *cfile = file->private_data;
1439 	struct cifs_tcon *tcon;
1440 	struct TCP_Server_Info *server;
1441 	char *buf;
1442 
1443 	cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1444 
1445 	if (cfile == NULL)
1446 		return rc;
1447 
1448 	xid = get_xid();
1449 	tcon = tlink_tcon(cfile->tlink);
1450 	server = tcon->ses->server;
1451 
1452 	cifs_dbg(FYI, "Freeing private data in close dir\n");
1453 	spin_lock(&cfile->file_info_lock);
1454 	if (server->ops->dir_needs_close(cfile)) {
1455 		cfile->invalidHandle = true;
1456 		spin_unlock(&cfile->file_info_lock);
1457 		if (server->ops->close_dir)
1458 			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1459 		else
1460 			rc = -ENOSYS;
1461 		cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1462 		/* not much we can do if it fails anyway, ignore rc */
1463 		rc = 0;
1464 	} else
1465 		spin_unlock(&cfile->file_info_lock);
1466 
1467 	buf = cfile->srch_inf.ntwrk_buf_start;
1468 	if (buf) {
1469 		cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1470 		cfile->srch_inf.ntwrk_buf_start = NULL;
1471 		if (cfile->srch_inf.smallBuf)
1472 			cifs_small_buf_release(buf);
1473 		else
1474 			cifs_buf_release(buf);
1475 	}
1476 
1477 	cifs_put_tlink(cfile->tlink);
1478 	kfree(file->private_data);
1479 	file->private_data = NULL;
1480 	/* BB can we lock the filestruct while this is going on? */
1481 	free_xid(xid);
1482 	return rc;
1483 }
1484 
1485 static struct cifsLockInfo *
1486 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1487 {
1488 	struct cifsLockInfo *lock =
1489 		kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
1490 	if (!lock)
1491 		return lock;
1492 	lock->offset = offset;
1493 	lock->length = length;
1494 	lock->type = type;
1495 	lock->pid = current->tgid;
1496 	lock->flags = flags;
1497 	INIT_LIST_HEAD(&lock->blist);
1498 	init_waitqueue_head(&lock->block_q);
1499 	return lock;
1500 }
1501 
1502 void
1503 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1504 {
1505 	struct cifsLockInfo *li, *tmp;
1506 	list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1507 		list_del_init(&li->blist);
1508 		wake_up(&li->block_q);
1509 	}
1510 }
1511 
1512 #define CIFS_LOCK_OP	0
1513 #define CIFS_READ_OP	1
1514 #define CIFS_WRITE_OP	2
1515 
1516 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1517 static bool
1518 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1519 			    __u64 length, __u8 type, __u16 flags,
1520 			    struct cifsFileInfo *cfile,
1521 			    struct cifsLockInfo **conf_lock, int rw_check)
1522 {
1523 	struct cifsLockInfo *li;
1524 	struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1525 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1526 
1527 	list_for_each_entry(li, &fdlocks->locks, llist) {
1528 		if (offset + length <= li->offset ||
1529 		    offset >= li->offset + li->length)
1530 			continue;
1531 		if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1532 		    server->ops->compare_fids(cfile, cur_cfile)) {
1533 			/* shared lock prevents write op through the same fid */
1534 			if (!(li->type & server->vals->shared_lock_type) ||
1535 			    rw_check != CIFS_WRITE_OP)
1536 				continue;
1537 		}
1538 		if ((type & server->vals->shared_lock_type) &&
1539 		    ((server->ops->compare_fids(cfile, cur_cfile) &&
1540 		     current->tgid == li->pid) || type == li->type))
1541 			continue;
1542 		if (rw_check == CIFS_LOCK_OP &&
1543 		    (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1544 		    server->ops->compare_fids(cfile, cur_cfile))
1545 			continue;
1546 		if (conf_lock)
1547 			*conf_lock = li;
1548 		return true;
1549 	}
1550 	return false;
1551 }
1552 
1553 bool
1554 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1555 			__u8 type, __u16 flags,
1556 			struct cifsLockInfo **conf_lock, int rw_check)
1557 {
1558 	bool rc = false;
1559 	struct cifs_fid_locks *cur;
1560 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1561 
1562 	list_for_each_entry(cur, &cinode->llist, llist) {
1563 		rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1564 						 flags, cfile, conf_lock,
1565 						 rw_check);
1566 		if (rc)
1567 			break;
1568 	}
1569 
1570 	return rc;
1571 }
1572 
1573 /*
1574  * Check if there is another lock that prevents us to set the lock (mandatory
1575  * style). If such a lock exists, update the flock structure with its
1576  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1577  * or leave it the same if we can't. Returns 0 if we don't need to request to
1578  * the server or 1 otherwise.
1579  */
1580 static int
1581 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1582 	       __u8 type, struct file_lock *flock)
1583 {
1584 	int rc = 0;
1585 	struct cifsLockInfo *conf_lock;
1586 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1587 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1588 	bool exist;
1589 
1590 	down_read(&cinode->lock_sem);
1591 
1592 	exist = cifs_find_lock_conflict(cfile, offset, length, type,
1593 					flock->c.flc_flags, &conf_lock,
1594 					CIFS_LOCK_OP);
1595 	if (exist) {
1596 		flock->fl_start = conf_lock->offset;
1597 		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1598 		flock->c.flc_pid = conf_lock->pid;
1599 		if (conf_lock->type & server->vals->shared_lock_type)
1600 			flock->c.flc_type = F_RDLCK;
1601 		else
1602 			flock->c.flc_type = F_WRLCK;
1603 	} else if (!cinode->can_cache_brlcks)
1604 		rc = 1;
1605 	else
1606 		flock->c.flc_type = F_UNLCK;
1607 
1608 	up_read(&cinode->lock_sem);
1609 	return rc;
1610 }
1611 
1612 static void
1613 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1614 {
1615 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1616 	cifs_down_write(&cinode->lock_sem);
1617 	list_add_tail(&lock->llist, &cfile->llist->locks);
1618 	up_write(&cinode->lock_sem);
1619 }
1620 
1621 /*
1622  * Set the byte-range lock (mandatory style). Returns:
1623  * 1) 0, if we set the lock and don't need to request to the server;
1624  * 2) 1, if no locks prevent us but we need to request to the server;
1625  * 3) -EACCES, if there is a lock that prevents us and wait is false.
1626  */
1627 static int
1628 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1629 		 bool wait)
1630 {
1631 	struct cifsLockInfo *conf_lock;
1632 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1633 	bool exist;
1634 	int rc = 0;
1635 
1636 try_again:
1637 	exist = false;
1638 	cifs_down_write(&cinode->lock_sem);
1639 
1640 	exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1641 					lock->type, lock->flags, &conf_lock,
1642 					CIFS_LOCK_OP);
1643 	if (!exist && cinode->can_cache_brlcks) {
1644 		list_add_tail(&lock->llist, &cfile->llist->locks);
1645 		up_write(&cinode->lock_sem);
1646 		return rc;
1647 	}
1648 
1649 	if (!exist)
1650 		rc = 1;
1651 	else if (!wait)
1652 		rc = -EACCES;
1653 	else {
1654 		list_add_tail(&lock->blist, &conf_lock->blist);
1655 		up_write(&cinode->lock_sem);
1656 		rc = wait_event_interruptible(lock->block_q,
1657 					(lock->blist.prev == &lock->blist) &&
1658 					(lock->blist.next == &lock->blist));
1659 		if (!rc)
1660 			goto try_again;
1661 		cifs_down_write(&cinode->lock_sem);
1662 		list_del_init(&lock->blist);
1663 	}
1664 
1665 	up_write(&cinode->lock_sem);
1666 	return rc;
1667 }
1668 
1669 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1670 /*
1671  * Check if there is another lock that prevents us to set the lock (posix
1672  * style). If such a lock exists, update the flock structure with its
1673  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1674  * or leave it the same if we can't. Returns 0 if we don't need to request to
1675  * the server or 1 otherwise.
1676  */
1677 static int
1678 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1679 {
1680 	int rc = 0;
1681 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1682 	unsigned char saved_type = flock->c.flc_type;
1683 
1684 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1685 		return 1;
1686 
1687 	down_read(&cinode->lock_sem);
1688 	posix_test_lock(file, flock);
1689 
1690 	if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1691 		flock->c.flc_type = saved_type;
1692 		rc = 1;
1693 	}
1694 
1695 	up_read(&cinode->lock_sem);
1696 	return rc;
1697 }
1698 
1699 /*
1700  * Set the byte-range lock (posix style). Returns:
1701  * 1) <0, if the error occurs while setting the lock;
1702  * 2) 0, if we set the lock and don't need to request to the server;
1703  * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1704  * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1705  */
1706 static int
1707 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1708 {
1709 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1710 	int rc = FILE_LOCK_DEFERRED + 1;
1711 
1712 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1713 		return rc;
1714 
1715 	cifs_down_write(&cinode->lock_sem);
1716 	if (!cinode->can_cache_brlcks) {
1717 		up_write(&cinode->lock_sem);
1718 		return rc;
1719 	}
1720 
1721 	rc = posix_lock_file(file, flock, NULL);
1722 	up_write(&cinode->lock_sem);
1723 	return rc;
1724 }
1725 
1726 int
1727 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1728 {
1729 	unsigned int xid;
1730 	int rc = 0, stored_rc;
1731 	struct cifsLockInfo *li, *tmp;
1732 	struct cifs_tcon *tcon;
1733 	unsigned int num, max_num, max_buf;
1734 	LOCKING_ANDX_RANGE *buf, *cur;
1735 	static const int types[] = {
1736 		LOCKING_ANDX_LARGE_FILES,
1737 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1738 	};
1739 	int i;
1740 
1741 	xid = get_xid();
1742 	tcon = tlink_tcon(cfile->tlink);
1743 
1744 	/*
1745 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1746 	 * and check it before using.
1747 	 */
1748 	max_buf = tcon->ses->server->maxBuf;
1749 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1750 		free_xid(xid);
1751 		return -EINVAL;
1752 	}
1753 
1754 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1755 		     PAGE_SIZE);
1756 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1757 			PAGE_SIZE);
1758 	max_num = (max_buf - sizeof(struct smb_hdr)) /
1759 						sizeof(LOCKING_ANDX_RANGE);
1760 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1761 	if (!buf) {
1762 		free_xid(xid);
1763 		return -ENOMEM;
1764 	}
1765 
1766 	for (i = 0; i < 2; i++) {
1767 		cur = buf;
1768 		num = 0;
1769 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1770 			if (li->type != types[i])
1771 				continue;
1772 			cur->Pid = cpu_to_le16(li->pid);
1773 			cur->LengthLow = cpu_to_le32((u32)li->length);
1774 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1775 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
1776 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1777 			if (++num == max_num) {
1778 				stored_rc = cifs_lockv(xid, tcon,
1779 						       cfile->fid.netfid,
1780 						       (__u8)li->type, 0, num,
1781 						       buf);
1782 				if (stored_rc)
1783 					rc = stored_rc;
1784 				cur = buf;
1785 				num = 0;
1786 			} else
1787 				cur++;
1788 		}
1789 
1790 		if (num) {
1791 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1792 					       (__u8)types[i], 0, num, buf);
1793 			if (stored_rc)
1794 				rc = stored_rc;
1795 		}
1796 	}
1797 
1798 	kfree(buf);
1799 	free_xid(xid);
1800 	return rc;
1801 }
1802 
1803 static __u32
1804 hash_lockowner(fl_owner_t owner)
1805 {
1806 	return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1807 }
1808 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1809 
1810 struct lock_to_push {
1811 	struct list_head llist;
1812 	__u64 offset;
1813 	__u64 length;
1814 	__u32 pid;
1815 	__u16 netfid;
1816 	__u8 type;
1817 };
1818 
1819 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1820 static int
1821 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1822 {
1823 	struct inode *inode = d_inode(cfile->dentry);
1824 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1825 	struct file_lock *flock;
1826 	struct file_lock_context *flctx = locks_inode_context(inode);
1827 	unsigned int count = 0, i;
1828 	int rc = 0, xid, type;
1829 	struct list_head locks_to_send, *el;
1830 	struct lock_to_push *lck, *tmp;
1831 	__u64 length;
1832 
1833 	xid = get_xid();
1834 
1835 	if (!flctx)
1836 		goto out;
1837 
1838 	spin_lock(&flctx->flc_lock);
1839 	list_for_each(el, &flctx->flc_posix) {
1840 		count++;
1841 	}
1842 	spin_unlock(&flctx->flc_lock);
1843 
1844 	INIT_LIST_HEAD(&locks_to_send);
1845 
1846 	/*
1847 	 * Allocating count locks is enough because no FL_POSIX locks can be
1848 	 * added to the list while we are holding cinode->lock_sem that
1849 	 * protects locking operations of this inode.
1850 	 */
1851 	for (i = 0; i < count; i++) {
1852 		lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1853 		if (!lck) {
1854 			rc = -ENOMEM;
1855 			goto err_out;
1856 		}
1857 		list_add_tail(&lck->llist, &locks_to_send);
1858 	}
1859 
1860 	el = locks_to_send.next;
1861 	spin_lock(&flctx->flc_lock);
1862 	for_each_file_lock(flock, &flctx->flc_posix) {
1863 		unsigned char ftype = flock->c.flc_type;
1864 
1865 		if (el == &locks_to_send) {
1866 			/*
1867 			 * The list ended. We don't have enough allocated
1868 			 * structures - something is really wrong.
1869 			 */
1870 			cifs_dbg(VFS, "Can't push all brlocks!\n");
1871 			break;
1872 		}
1873 		length = cifs_flock_len(flock);
1874 		if (ftype == F_RDLCK || ftype == F_SHLCK)
1875 			type = CIFS_RDLCK;
1876 		else
1877 			type = CIFS_WRLCK;
1878 		lck = list_entry(el, struct lock_to_push, llist);
1879 		lck->pid = hash_lockowner(flock->c.flc_owner);
1880 		lck->netfid = cfile->fid.netfid;
1881 		lck->length = length;
1882 		lck->type = type;
1883 		lck->offset = flock->fl_start;
1884 	}
1885 	spin_unlock(&flctx->flc_lock);
1886 
1887 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1888 		int stored_rc;
1889 
1890 		stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1891 					     lck->offset, lck->length, NULL,
1892 					     lck->type, 0);
1893 		if (stored_rc)
1894 			rc = stored_rc;
1895 		list_del(&lck->llist);
1896 		kfree(lck);
1897 	}
1898 
1899 out:
1900 	free_xid(xid);
1901 	return rc;
1902 err_out:
1903 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1904 		list_del(&lck->llist);
1905 		kfree(lck);
1906 	}
1907 	goto out;
1908 }
1909 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1910 
1911 static int
1912 cifs_push_locks(struct cifsFileInfo *cfile)
1913 {
1914 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1915 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1916 	int rc = 0;
1917 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1918 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1919 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1920 
1921 	/* we are going to update can_cache_brlcks here - need a write access */
1922 	cifs_down_write(&cinode->lock_sem);
1923 	if (!cinode->can_cache_brlcks) {
1924 		up_write(&cinode->lock_sem);
1925 		return rc;
1926 	}
1927 
1928 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1929 	if (cap_unix(tcon->ses) &&
1930 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1931 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1932 		rc = cifs_push_posix_locks(cfile);
1933 	else
1934 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1935 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1936 
1937 	cinode->can_cache_brlcks = false;
1938 	up_write(&cinode->lock_sem);
1939 	return rc;
1940 }
1941 
1942 static void
1943 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1944 		bool *wait_flag, struct TCP_Server_Info *server)
1945 {
1946 	if (flock->c.flc_flags & FL_POSIX)
1947 		cifs_dbg(FYI, "Posix\n");
1948 	if (flock->c.flc_flags & FL_FLOCK)
1949 		cifs_dbg(FYI, "Flock\n");
1950 	if (flock->c.flc_flags & FL_SLEEP) {
1951 		cifs_dbg(FYI, "Blocking lock\n");
1952 		*wait_flag = true;
1953 	}
1954 	if (flock->c.flc_flags & FL_ACCESS)
1955 		cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1956 	if (flock->c.flc_flags & FL_LEASE)
1957 		cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1958 	if (flock->c.flc_flags &
1959 	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1960 	       FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
1961 		cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
1962 		         flock->c.flc_flags);
1963 
1964 	*type = server->vals->large_lock_type;
1965 	if (lock_is_write(flock)) {
1966 		cifs_dbg(FYI, "F_WRLCK\n");
1967 		*type |= server->vals->exclusive_lock_type;
1968 		*lock = 1;
1969 	} else if (lock_is_unlock(flock)) {
1970 		cifs_dbg(FYI, "F_UNLCK\n");
1971 		*type |= server->vals->unlock_lock_type;
1972 		*unlock = 1;
1973 		/* Check if unlock includes more than one lock range */
1974 	} else if (lock_is_read(flock)) {
1975 		cifs_dbg(FYI, "F_RDLCK\n");
1976 		*type |= server->vals->shared_lock_type;
1977 		*lock = 1;
1978 	} else if (flock->c.flc_type == F_EXLCK) {
1979 		cifs_dbg(FYI, "F_EXLCK\n");
1980 		*type |= server->vals->exclusive_lock_type;
1981 		*lock = 1;
1982 	} else if (flock->c.flc_type == F_SHLCK) {
1983 		cifs_dbg(FYI, "F_SHLCK\n");
1984 		*type |= server->vals->shared_lock_type;
1985 		*lock = 1;
1986 	} else
1987 		cifs_dbg(FYI, "Unknown type of lock\n");
1988 }
1989 
1990 static int
1991 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1992 	   bool wait_flag, bool posix_lck, unsigned int xid)
1993 {
1994 	int rc = 0;
1995 	__u64 length = cifs_flock_len(flock);
1996 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1997 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1998 	struct TCP_Server_Info *server = tcon->ses->server;
1999 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2000 	__u16 netfid = cfile->fid.netfid;
2001 
2002 	if (posix_lck) {
2003 		int posix_lock_type;
2004 
2005 		rc = cifs_posix_lock_test(file, flock);
2006 		if (!rc)
2007 			return rc;
2008 
2009 		if (type & server->vals->shared_lock_type)
2010 			posix_lock_type = CIFS_RDLCK;
2011 		else
2012 			posix_lock_type = CIFS_WRLCK;
2013 		rc = CIFSSMBPosixLock(xid, tcon, netfid,
2014 				      hash_lockowner(flock->c.flc_owner),
2015 				      flock->fl_start, length, flock,
2016 				      posix_lock_type, wait_flag);
2017 		return rc;
2018 	}
2019 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2020 
2021 	rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
2022 	if (!rc)
2023 		return rc;
2024 
2025 	/* BB we could chain these into one lock request BB */
2026 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2027 				    1, 0, false);
2028 	if (rc == 0) {
2029 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2030 					    type, 0, 1, false);
2031 		flock->c.flc_type = F_UNLCK;
2032 		if (rc != 0)
2033 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2034 				 rc);
2035 		return 0;
2036 	}
2037 
2038 	if (type & server->vals->shared_lock_type) {
2039 		flock->c.flc_type = F_WRLCK;
2040 		return 0;
2041 	}
2042 
2043 	type &= ~server->vals->exclusive_lock_type;
2044 
2045 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2046 				    type | server->vals->shared_lock_type,
2047 				    1, 0, false);
2048 	if (rc == 0) {
2049 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2050 			type | server->vals->shared_lock_type, 0, 1, false);
2051 		flock->c.flc_type = F_RDLCK;
2052 		if (rc != 0)
2053 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2054 				 rc);
2055 	} else
2056 		flock->c.flc_type = F_WRLCK;
2057 
2058 	return 0;
2059 }
2060 
2061 void
2062 cifs_move_llist(struct list_head *source, struct list_head *dest)
2063 {
2064 	struct list_head *li, *tmp;
2065 	list_for_each_safe(li, tmp, source)
2066 		list_move(li, dest);
2067 }
2068 
2069 void
2070 cifs_free_llist(struct list_head *llist)
2071 {
2072 	struct cifsLockInfo *li, *tmp;
2073 	list_for_each_entry_safe(li, tmp, llist, llist) {
2074 		cifs_del_lock_waiters(li);
2075 		list_del(&li->llist);
2076 		kfree(li);
2077 	}
2078 }
2079 
2080 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2081 int
2082 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2083 		  unsigned int xid)
2084 {
2085 	int rc = 0, stored_rc;
2086 	static const int types[] = {
2087 		LOCKING_ANDX_LARGE_FILES,
2088 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2089 	};
2090 	unsigned int i;
2091 	unsigned int max_num, num, max_buf;
2092 	LOCKING_ANDX_RANGE *buf, *cur;
2093 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2094 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2095 	struct cifsLockInfo *li, *tmp;
2096 	__u64 length = cifs_flock_len(flock);
2097 	struct list_head tmp_llist;
2098 
2099 	INIT_LIST_HEAD(&tmp_llist);
2100 
2101 	/*
2102 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2103 	 * and check it before using.
2104 	 */
2105 	max_buf = tcon->ses->server->maxBuf;
2106 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2107 		return -EINVAL;
2108 
2109 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2110 		     PAGE_SIZE);
2111 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2112 			PAGE_SIZE);
2113 	max_num = (max_buf - sizeof(struct smb_hdr)) /
2114 						sizeof(LOCKING_ANDX_RANGE);
2115 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
2116 	if (!buf)
2117 		return -ENOMEM;
2118 
2119 	cifs_down_write(&cinode->lock_sem);
2120 	for (i = 0; i < 2; i++) {
2121 		cur = buf;
2122 		num = 0;
2123 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2124 			if (flock->fl_start > li->offset ||
2125 			    (flock->fl_start + length) <
2126 			    (li->offset + li->length))
2127 				continue;
2128 			if (current->tgid != li->pid)
2129 				continue;
2130 			if (types[i] != li->type)
2131 				continue;
2132 			if (cinode->can_cache_brlcks) {
2133 				/*
2134 				 * We can cache brlock requests - simply remove
2135 				 * a lock from the file's list.
2136 				 */
2137 				list_del(&li->llist);
2138 				cifs_del_lock_waiters(li);
2139 				kfree(li);
2140 				continue;
2141 			}
2142 			cur->Pid = cpu_to_le16(li->pid);
2143 			cur->LengthLow = cpu_to_le32((u32)li->length);
2144 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2145 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
2146 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2147 			/*
2148 			 * We need to save a lock here to let us add it again to
2149 			 * the file's list if the unlock range request fails on
2150 			 * the server.
2151 			 */
2152 			list_move(&li->llist, &tmp_llist);
2153 			if (++num == max_num) {
2154 				stored_rc = cifs_lockv(xid, tcon,
2155 						       cfile->fid.netfid,
2156 						       li->type, num, 0, buf);
2157 				if (stored_rc) {
2158 					/*
2159 					 * We failed on the unlock range
2160 					 * request - add all locks from the tmp
2161 					 * list to the head of the file's list.
2162 					 */
2163 					cifs_move_llist(&tmp_llist,
2164 							&cfile->llist->locks);
2165 					rc = stored_rc;
2166 				} else
2167 					/*
2168 					 * The unlock range request succeed -
2169 					 * free the tmp list.
2170 					 */
2171 					cifs_free_llist(&tmp_llist);
2172 				cur = buf;
2173 				num = 0;
2174 			} else
2175 				cur++;
2176 		}
2177 		if (num) {
2178 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2179 					       types[i], num, 0, buf);
2180 			if (stored_rc) {
2181 				cifs_move_llist(&tmp_llist,
2182 						&cfile->llist->locks);
2183 				rc = stored_rc;
2184 			} else
2185 				cifs_free_llist(&tmp_llist);
2186 		}
2187 	}
2188 
2189 	up_write(&cinode->lock_sem);
2190 	kfree(buf);
2191 	return rc;
2192 }
2193 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2194 
2195 static int
2196 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2197 	   bool wait_flag, bool posix_lck, int lock, int unlock,
2198 	   unsigned int xid)
2199 {
2200 	int rc = 0;
2201 	__u64 length = cifs_flock_len(flock);
2202 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2203 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2204 	struct TCP_Server_Info *server = tcon->ses->server;
2205 	struct inode *inode = d_inode(cfile->dentry);
2206 
2207 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2208 	if (posix_lck) {
2209 		int posix_lock_type;
2210 
2211 		rc = cifs_posix_lock_set(file, flock);
2212 		if (rc <= FILE_LOCK_DEFERRED)
2213 			return rc;
2214 
2215 		if (type & server->vals->shared_lock_type)
2216 			posix_lock_type = CIFS_RDLCK;
2217 		else
2218 			posix_lock_type = CIFS_WRLCK;
2219 
2220 		if (unlock == 1)
2221 			posix_lock_type = CIFS_UNLCK;
2222 
2223 		rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2224 				      hash_lockowner(flock->c.flc_owner),
2225 				      flock->fl_start, length,
2226 				      NULL, posix_lock_type, wait_flag);
2227 		goto out;
2228 	}
2229 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2230 	if (lock) {
2231 		struct cifsLockInfo *lock;
2232 
2233 		lock = cifs_lock_init(flock->fl_start, length, type,
2234 				      flock->c.flc_flags);
2235 		if (!lock)
2236 			return -ENOMEM;
2237 
2238 		rc = cifs_lock_add_if(cfile, lock, wait_flag);
2239 		if (rc < 0) {
2240 			kfree(lock);
2241 			return rc;
2242 		}
2243 		if (!rc)
2244 			goto out;
2245 
2246 		/*
2247 		 * Windows 7 server can delay breaking lease from read to None
2248 		 * if we set a byte-range lock on a file - break it explicitly
2249 		 * before sending the lock to the server to be sure the next
2250 		 * read won't conflict with non-overlapted locks due to
2251 		 * pagereading.
2252 		 */
2253 		if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2254 					CIFS_CACHE_READ(CIFS_I(inode))) {
2255 			cifs_zap_mapping(inode);
2256 			cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2257 				 inode);
2258 			CIFS_I(inode)->oplock = 0;
2259 		}
2260 
2261 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2262 					    type, 1, 0, wait_flag);
2263 		if (rc) {
2264 			kfree(lock);
2265 			return rc;
2266 		}
2267 
2268 		cifs_lock_add(cfile, lock);
2269 	} else if (unlock)
2270 		rc = server->ops->mand_unlock_range(cfile, flock, xid);
2271 
2272 out:
2273 	if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2274 		/*
2275 		 * If this is a request to remove all locks because we
2276 		 * are closing the file, it doesn't matter if the
2277 		 * unlocking failed as both cifs.ko and the SMB server
2278 		 * remove the lock on file close
2279 		 */
2280 		if (rc) {
2281 			cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2282 			if (!(flock->c.flc_flags & FL_CLOSE))
2283 				return rc;
2284 		}
2285 		rc = locks_lock_file_wait(file, flock);
2286 	}
2287 	return rc;
2288 }
2289 
2290 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2291 {
2292 	int rc, xid;
2293 	int lock = 0, unlock = 0;
2294 	bool wait_flag = false;
2295 	bool posix_lck = false;
2296 	struct cifs_sb_info *cifs_sb;
2297 	struct cifs_tcon *tcon;
2298 	struct cifsFileInfo *cfile;
2299 	__u32 type;
2300 
2301 	xid = get_xid();
2302 
2303 	if (!(fl->c.flc_flags & FL_FLOCK)) {
2304 		rc = -ENOLCK;
2305 		free_xid(xid);
2306 		return rc;
2307 	}
2308 
2309 	cfile = (struct cifsFileInfo *)file->private_data;
2310 	tcon = tlink_tcon(cfile->tlink);
2311 
2312 	cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2313 			tcon->ses->server);
2314 	cifs_sb = CIFS_FILE_SB(file);
2315 
2316 	if (cap_unix(tcon->ses) &&
2317 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2318 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2319 		posix_lck = true;
2320 
2321 	if (!lock && !unlock) {
2322 		/*
2323 		 * if no lock or unlock then nothing to do since we do not
2324 		 * know what it is
2325 		 */
2326 		rc = -EOPNOTSUPP;
2327 		free_xid(xid);
2328 		return rc;
2329 	}
2330 
2331 	rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2332 			xid);
2333 	free_xid(xid);
2334 	return rc;
2335 
2336 
2337 }
2338 
2339 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2340 {
2341 	int rc, xid;
2342 	int lock = 0, unlock = 0;
2343 	bool wait_flag = false;
2344 	bool posix_lck = false;
2345 	struct cifs_sb_info *cifs_sb;
2346 	struct cifs_tcon *tcon;
2347 	struct cifsFileInfo *cfile;
2348 	__u32 type;
2349 
2350 	rc = -EACCES;
2351 	xid = get_xid();
2352 
2353 	cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2354 		 flock->c.flc_flags, flock->c.flc_type,
2355 		 (long long)flock->fl_start,
2356 		 (long long)flock->fl_end);
2357 
2358 	cfile = (struct cifsFileInfo *)file->private_data;
2359 	tcon = tlink_tcon(cfile->tlink);
2360 
2361 	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2362 			tcon->ses->server);
2363 	cifs_sb = CIFS_FILE_SB(file);
2364 	set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2365 
2366 	if (cap_unix(tcon->ses) &&
2367 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2368 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2369 		posix_lck = true;
2370 	/*
2371 	 * BB add code here to normalize offset and length to account for
2372 	 * negative length which we can not accept over the wire.
2373 	 */
2374 	if (IS_GETLK(cmd)) {
2375 		rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2376 		free_xid(xid);
2377 		return rc;
2378 	}
2379 
2380 	if (!lock && !unlock) {
2381 		/*
2382 		 * if no lock or unlock then nothing to do since we do not
2383 		 * know what it is
2384 		 */
2385 		free_xid(xid);
2386 		return -EOPNOTSUPP;
2387 	}
2388 
2389 	rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2390 			xid);
2391 	free_xid(xid);
2392 	return rc;
2393 }
2394 
2395 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result,
2396 				      bool was_async)
2397 {
2398 	struct netfs_io_request *wreq = wdata->rreq;
2399 	struct netfs_inode *ictx = netfs_inode(wreq->inode);
2400 	loff_t wrend;
2401 
2402 	if (result > 0) {
2403 		wrend = wdata->subreq.start + wdata->subreq.transferred + result;
2404 
2405 		if (wrend > ictx->zero_point &&
2406 		    (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE ||
2407 		     wdata->rreq->origin == NETFS_DIO_WRITE))
2408 			ictx->zero_point = wrend;
2409 		if (wrend > ictx->remote_i_size)
2410 			netfs_resize_file(ictx, wrend, true);
2411 	}
2412 
2413 	netfs_write_subrequest_terminated(&wdata->subreq, result, was_async);
2414 }
2415 
2416 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
2417 					bool fsuid_only)
2418 {
2419 	struct cifsFileInfo *open_file = NULL;
2420 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2421 
2422 	/* only filter by fsuid on multiuser mounts */
2423 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2424 		fsuid_only = false;
2425 
2426 	spin_lock(&cifs_inode->open_file_lock);
2427 	/* we could simply get the first_list_entry since write-only entries
2428 	   are always at the end of the list but since the first entry might
2429 	   have a close pending, we go through the whole list */
2430 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2431 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2432 			continue;
2433 		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2434 			if ((!open_file->invalidHandle)) {
2435 				/* found a good file */
2436 				/* lock it so it will not be closed on us */
2437 				cifsFileInfo_get(open_file);
2438 				spin_unlock(&cifs_inode->open_file_lock);
2439 				return open_file;
2440 			} /* else might as well continue, and look for
2441 			     another, or simply have the caller reopen it
2442 			     again rather than trying to fix this handle */
2443 		} else /* write only file */
2444 			break; /* write only files are last so must be done */
2445 	}
2446 	spin_unlock(&cifs_inode->open_file_lock);
2447 	return NULL;
2448 }
2449 
2450 /* Return -EBADF if no handle is found and general rc otherwise */
2451 int
2452 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
2453 		       struct cifsFileInfo **ret_file)
2454 {
2455 	struct cifsFileInfo *open_file, *inv_file = NULL;
2456 	struct cifs_sb_info *cifs_sb;
2457 	bool any_available = false;
2458 	int rc = -EBADF;
2459 	unsigned int refind = 0;
2460 	bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2461 	bool with_delete = flags & FIND_WR_WITH_DELETE;
2462 	*ret_file = NULL;
2463 
2464 	/*
2465 	 * Having a null inode here (because mapping->host was set to zero by
2466 	 * the VFS or MM) should not happen but we had reports of on oops (due
2467 	 * to it being zero) during stress testcases so we need to check for it
2468 	 */
2469 
2470 	if (cifs_inode == NULL) {
2471 		cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2472 		dump_stack();
2473 		return rc;
2474 	}
2475 
2476 	cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2477 
2478 	/* only filter by fsuid on multiuser mounts */
2479 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2480 		fsuid_only = false;
2481 
2482 	spin_lock(&cifs_inode->open_file_lock);
2483 refind_writable:
2484 	if (refind > MAX_REOPEN_ATT) {
2485 		spin_unlock(&cifs_inode->open_file_lock);
2486 		return rc;
2487 	}
2488 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2489 		if (!any_available && open_file->pid != current->tgid)
2490 			continue;
2491 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2492 			continue;
2493 		if (with_delete && !(open_file->fid.access & DELETE))
2494 			continue;
2495 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2496 			if (!open_file->invalidHandle) {
2497 				/* found a good writable file */
2498 				cifsFileInfo_get(open_file);
2499 				spin_unlock(&cifs_inode->open_file_lock);
2500 				*ret_file = open_file;
2501 				return 0;
2502 			} else {
2503 				if (!inv_file)
2504 					inv_file = open_file;
2505 			}
2506 		}
2507 	}
2508 	/* couldn't find useable FH with same pid, try any available */
2509 	if (!any_available) {
2510 		any_available = true;
2511 		goto refind_writable;
2512 	}
2513 
2514 	if (inv_file) {
2515 		any_available = false;
2516 		cifsFileInfo_get(inv_file);
2517 	}
2518 
2519 	spin_unlock(&cifs_inode->open_file_lock);
2520 
2521 	if (inv_file) {
2522 		rc = cifs_reopen_file(inv_file, false);
2523 		if (!rc) {
2524 			*ret_file = inv_file;
2525 			return 0;
2526 		}
2527 
2528 		spin_lock(&cifs_inode->open_file_lock);
2529 		list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2530 		spin_unlock(&cifs_inode->open_file_lock);
2531 		cifsFileInfo_put(inv_file);
2532 		++refind;
2533 		inv_file = NULL;
2534 		spin_lock(&cifs_inode->open_file_lock);
2535 		goto refind_writable;
2536 	}
2537 
2538 	return rc;
2539 }
2540 
2541 struct cifsFileInfo *
2542 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2543 {
2544 	struct cifsFileInfo *cfile;
2545 	int rc;
2546 
2547 	rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2548 	if (rc)
2549 		cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2550 
2551 	return cfile;
2552 }
2553 
2554 int
2555 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2556 		       int flags,
2557 		       struct cifsFileInfo **ret_file)
2558 {
2559 	struct cifsFileInfo *cfile;
2560 	void *page = alloc_dentry_path();
2561 
2562 	*ret_file = NULL;
2563 
2564 	spin_lock(&tcon->open_file_lock);
2565 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2566 		struct cifsInodeInfo *cinode;
2567 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2568 		if (IS_ERR(full_path)) {
2569 			spin_unlock(&tcon->open_file_lock);
2570 			free_dentry_path(page);
2571 			return PTR_ERR(full_path);
2572 		}
2573 		if (strcmp(full_path, name))
2574 			continue;
2575 
2576 		cinode = CIFS_I(d_inode(cfile->dentry));
2577 		spin_unlock(&tcon->open_file_lock);
2578 		free_dentry_path(page);
2579 		return cifs_get_writable_file(cinode, flags, ret_file);
2580 	}
2581 
2582 	spin_unlock(&tcon->open_file_lock);
2583 	free_dentry_path(page);
2584 	return -ENOENT;
2585 }
2586 
2587 int
2588 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2589 		       struct cifsFileInfo **ret_file)
2590 {
2591 	struct cifsFileInfo *cfile;
2592 	void *page = alloc_dentry_path();
2593 
2594 	*ret_file = NULL;
2595 
2596 	spin_lock(&tcon->open_file_lock);
2597 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2598 		struct cifsInodeInfo *cinode;
2599 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2600 		if (IS_ERR(full_path)) {
2601 			spin_unlock(&tcon->open_file_lock);
2602 			free_dentry_path(page);
2603 			return PTR_ERR(full_path);
2604 		}
2605 		if (strcmp(full_path, name))
2606 			continue;
2607 
2608 		cinode = CIFS_I(d_inode(cfile->dentry));
2609 		spin_unlock(&tcon->open_file_lock);
2610 		free_dentry_path(page);
2611 		*ret_file = find_readable_file(cinode, 0);
2612 		return *ret_file ? 0 : -ENOENT;
2613 	}
2614 
2615 	spin_unlock(&tcon->open_file_lock);
2616 	free_dentry_path(page);
2617 	return -ENOENT;
2618 }
2619 
2620 /*
2621  * Flush data on a strict file.
2622  */
2623 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2624 		      int datasync)
2625 {
2626 	unsigned int xid;
2627 	int rc = 0;
2628 	struct cifs_tcon *tcon;
2629 	struct TCP_Server_Info *server;
2630 	struct cifsFileInfo *smbfile = file->private_data;
2631 	struct inode *inode = file_inode(file);
2632 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2633 
2634 	rc = file_write_and_wait_range(file, start, end);
2635 	if (rc) {
2636 		trace_cifs_fsync_err(inode->i_ino, rc);
2637 		return rc;
2638 	}
2639 
2640 	xid = get_xid();
2641 
2642 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2643 		 file, datasync);
2644 
2645 	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2646 		rc = cifs_zap_mapping(inode);
2647 		if (rc) {
2648 			cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2649 			rc = 0; /* don't care about it in fsync */
2650 		}
2651 	}
2652 
2653 	tcon = tlink_tcon(smbfile->tlink);
2654 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2655 		server = tcon->ses->server;
2656 		if (server->ops->flush == NULL) {
2657 			rc = -ENOSYS;
2658 			goto strict_fsync_exit;
2659 		}
2660 
2661 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2662 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2663 			if (smbfile) {
2664 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2665 				cifsFileInfo_put(smbfile);
2666 			} else
2667 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2668 		} else
2669 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2670 	}
2671 
2672 strict_fsync_exit:
2673 	free_xid(xid);
2674 	return rc;
2675 }
2676 
2677 /*
2678  * Flush data on a non-strict data.
2679  */
2680 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2681 {
2682 	unsigned int xid;
2683 	int rc = 0;
2684 	struct cifs_tcon *tcon;
2685 	struct TCP_Server_Info *server;
2686 	struct cifsFileInfo *smbfile = file->private_data;
2687 	struct inode *inode = file_inode(file);
2688 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2689 
2690 	rc = file_write_and_wait_range(file, start, end);
2691 	if (rc) {
2692 		trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2693 		return rc;
2694 	}
2695 
2696 	xid = get_xid();
2697 
2698 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2699 		 file, datasync);
2700 
2701 	tcon = tlink_tcon(smbfile->tlink);
2702 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2703 		server = tcon->ses->server;
2704 		if (server->ops->flush == NULL) {
2705 			rc = -ENOSYS;
2706 			goto fsync_exit;
2707 		}
2708 
2709 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2710 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2711 			if (smbfile) {
2712 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2713 				cifsFileInfo_put(smbfile);
2714 			} else
2715 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2716 		} else
2717 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2718 	}
2719 
2720 fsync_exit:
2721 	free_xid(xid);
2722 	return rc;
2723 }
2724 
2725 /*
2726  * As file closes, flush all cached write data for this inode checking
2727  * for write behind errors.
2728  */
2729 int cifs_flush(struct file *file, fl_owner_t id)
2730 {
2731 	struct inode *inode = file_inode(file);
2732 	int rc = 0;
2733 
2734 	if (file->f_mode & FMODE_WRITE)
2735 		rc = filemap_write_and_wait(inode->i_mapping);
2736 
2737 	cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2738 	if (rc) {
2739 		/* get more nuanced writeback errors */
2740 		rc = filemap_check_wb_err(file->f_mapping, 0);
2741 		trace_cifs_flush_err(inode->i_ino, rc);
2742 	}
2743 	return rc;
2744 }
2745 
2746 static ssize_t
2747 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2748 {
2749 	struct file *file = iocb->ki_filp;
2750 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2751 	struct inode *inode = file->f_mapping->host;
2752 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2753 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2754 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2755 	ssize_t rc;
2756 
2757 	rc = netfs_start_io_write(inode);
2758 	if (rc < 0)
2759 		return rc;
2760 
2761 	/*
2762 	 * We need to hold the sem to be sure nobody modifies lock list
2763 	 * with a brlock that prevents writing.
2764 	 */
2765 	down_read(&cinode->lock_sem);
2766 
2767 	rc = generic_write_checks(iocb, from);
2768 	if (rc <= 0)
2769 		goto out;
2770 
2771 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) &&
2772 	    (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2773 				     server->vals->exclusive_lock_type, 0,
2774 				     NULL, CIFS_WRITE_OP))) {
2775 		rc = -EACCES;
2776 		goto out;
2777 	}
2778 
2779 	rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2780 
2781 out:
2782 	up_read(&cinode->lock_sem);
2783 	netfs_end_io_write(inode);
2784 	if (rc > 0)
2785 		rc = generic_write_sync(iocb, rc);
2786 	return rc;
2787 }
2788 
2789 ssize_t
2790 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2791 {
2792 	struct inode *inode = file_inode(iocb->ki_filp);
2793 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2794 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2795 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2796 						iocb->ki_filp->private_data;
2797 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2798 	ssize_t written;
2799 
2800 	written = cifs_get_writer(cinode);
2801 	if (written)
2802 		return written;
2803 
2804 	if (CIFS_CACHE_WRITE(cinode)) {
2805 		if (cap_unix(tcon->ses) &&
2806 		    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2807 		    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2808 			written = netfs_file_write_iter(iocb, from);
2809 			goto out;
2810 		}
2811 		written = cifs_writev(iocb, from);
2812 		goto out;
2813 	}
2814 	/*
2815 	 * For non-oplocked files in strict cache mode we need to write the data
2816 	 * to the server exactly from the pos to pos+len-1 rather than flush all
2817 	 * affected pages because it may cause a error with mandatory locks on
2818 	 * these pages but not on the region from pos to ppos+len-1.
2819 	 */
2820 	written = netfs_file_write_iter(iocb, from);
2821 	if (CIFS_CACHE_READ(cinode)) {
2822 		/*
2823 		 * We have read level caching and we have just sent a write
2824 		 * request to the server thus making data in the cache stale.
2825 		 * Zap the cache and set oplock/lease level to NONE to avoid
2826 		 * reading stale data from the cache. All subsequent read
2827 		 * operations will read new data from the server.
2828 		 */
2829 		cifs_zap_mapping(inode);
2830 		cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2831 			 inode);
2832 		cinode->oplock = 0;
2833 	}
2834 out:
2835 	cifs_put_writer(cinode);
2836 	return written;
2837 }
2838 
2839 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2840 {
2841 	ssize_t rc;
2842 	struct inode *inode = file_inode(iocb->ki_filp);
2843 
2844 	if (iocb->ki_flags & IOCB_DIRECT)
2845 		return netfs_unbuffered_read_iter(iocb, iter);
2846 
2847 	rc = cifs_revalidate_mapping(inode);
2848 	if (rc)
2849 		return rc;
2850 
2851 	return netfs_file_read_iter(iocb, iter);
2852 }
2853 
2854 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2855 {
2856 	struct inode *inode = file_inode(iocb->ki_filp);
2857 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2858 	ssize_t written;
2859 	int rc;
2860 
2861 	if (iocb->ki_filp->f_flags & O_DIRECT) {
2862 		written = netfs_unbuffered_write_iter(iocb, from);
2863 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
2864 			cifs_zap_mapping(inode);
2865 			cifs_dbg(FYI,
2866 				 "Set no oplock for inode=%p after a write operation\n",
2867 				 inode);
2868 			cinode->oplock = 0;
2869 		}
2870 		return written;
2871 	}
2872 
2873 	written = cifs_get_writer(cinode);
2874 	if (written)
2875 		return written;
2876 
2877 	written = netfs_file_write_iter(iocb, from);
2878 
2879 	if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
2880 		rc = filemap_fdatawrite(inode->i_mapping);
2881 		if (rc)
2882 			cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
2883 				 rc, inode);
2884 	}
2885 
2886 	cifs_put_writer(cinode);
2887 	return written;
2888 }
2889 
2890 ssize_t
2891 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
2892 {
2893 	struct inode *inode = file_inode(iocb->ki_filp);
2894 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2895 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2896 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2897 						iocb->ki_filp->private_data;
2898 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2899 	int rc = -EACCES;
2900 
2901 	/*
2902 	 * In strict cache mode we need to read from the server all the time
2903 	 * if we don't have level II oplock because the server can delay mtime
2904 	 * change - so we can't make a decision about inode invalidating.
2905 	 * And we can also fail with pagereading if there are mandatory locks
2906 	 * on pages affected by this read but not on the region from pos to
2907 	 * pos+len-1.
2908 	 */
2909 	if (!CIFS_CACHE_READ(cinode))
2910 		return netfs_unbuffered_read_iter(iocb, to);
2911 
2912 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) {
2913 		if (iocb->ki_flags & IOCB_DIRECT)
2914 			return netfs_unbuffered_read_iter(iocb, to);
2915 		return netfs_buffered_read_iter(iocb, to);
2916 	}
2917 
2918 	/*
2919 	 * We need to hold the sem to be sure nobody modifies lock list
2920 	 * with a brlock that prevents reading.
2921 	 */
2922 	if (iocb->ki_flags & IOCB_DIRECT) {
2923 		rc = netfs_start_io_direct(inode);
2924 		if (rc < 0)
2925 			goto out;
2926 		rc = -EACCES;
2927 		down_read(&cinode->lock_sem);
2928 		if (!cifs_find_lock_conflict(
2929 			    cfile, iocb->ki_pos, iov_iter_count(to),
2930 			    tcon->ses->server->vals->shared_lock_type,
2931 			    0, NULL, CIFS_READ_OP))
2932 			rc = netfs_unbuffered_read_iter_locked(iocb, to);
2933 		up_read(&cinode->lock_sem);
2934 		netfs_end_io_direct(inode);
2935 	} else {
2936 		rc = netfs_start_io_read(inode);
2937 		if (rc < 0)
2938 			goto out;
2939 		rc = -EACCES;
2940 		down_read(&cinode->lock_sem);
2941 		if (!cifs_find_lock_conflict(
2942 			    cfile, iocb->ki_pos, iov_iter_count(to),
2943 			    tcon->ses->server->vals->shared_lock_type,
2944 			    0, NULL, CIFS_READ_OP))
2945 			rc = filemap_read(iocb, to, 0);
2946 		up_read(&cinode->lock_sem);
2947 		netfs_end_io_read(inode);
2948 	}
2949 out:
2950 	return rc;
2951 }
2952 
2953 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
2954 {
2955 	return netfs_page_mkwrite(vmf, NULL);
2956 }
2957 
2958 static const struct vm_operations_struct cifs_file_vm_ops = {
2959 	.fault = filemap_fault,
2960 	.map_pages = filemap_map_pages,
2961 	.page_mkwrite = cifs_page_mkwrite,
2962 };
2963 
2964 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2965 {
2966 	int xid, rc = 0;
2967 	struct inode *inode = file_inode(file);
2968 
2969 	xid = get_xid();
2970 
2971 	if (!CIFS_CACHE_READ(CIFS_I(inode)))
2972 		rc = cifs_zap_mapping(inode);
2973 	if (!rc)
2974 		rc = generic_file_mmap(file, vma);
2975 	if (!rc)
2976 		vma->vm_ops = &cifs_file_vm_ops;
2977 
2978 	free_xid(xid);
2979 	return rc;
2980 }
2981 
2982 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2983 {
2984 	int rc, xid;
2985 
2986 	xid = get_xid();
2987 
2988 	rc = cifs_revalidate_file(file);
2989 	if (rc)
2990 		cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
2991 			 rc);
2992 	if (!rc)
2993 		rc = generic_file_mmap(file, vma);
2994 	if (!rc)
2995 		vma->vm_ops = &cifs_file_vm_ops;
2996 
2997 	free_xid(xid);
2998 	return rc;
2999 }
3000 
3001 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3002 {
3003 	struct cifsFileInfo *open_file;
3004 
3005 	spin_lock(&cifs_inode->open_file_lock);
3006 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3007 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3008 			spin_unlock(&cifs_inode->open_file_lock);
3009 			return 1;
3010 		}
3011 	}
3012 	spin_unlock(&cifs_inode->open_file_lock);
3013 	return 0;
3014 }
3015 
3016 /* We do not want to update the file size from server for inodes
3017    open for write - to avoid races with writepage extending
3018    the file - in the future we could consider allowing
3019    refreshing the inode only on increases in the file size
3020    but this is tricky to do without racing with writebehind
3021    page caching in the current Linux kernel design */
3022 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3023 			    bool from_readdir)
3024 {
3025 	if (!cifsInode)
3026 		return true;
3027 
3028 	if (is_inode_writable(cifsInode) ||
3029 		((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
3030 		/* This inode is open for write at least once */
3031 		struct cifs_sb_info *cifs_sb;
3032 
3033 		cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
3034 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
3035 			/* since no page cache to corrupt on directio
3036 			we can change size safely */
3037 			return true;
3038 		}
3039 
3040 		if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
3041 			return true;
3042 
3043 		return false;
3044 	} else
3045 		return true;
3046 }
3047 
3048 void cifs_oplock_break(struct work_struct *work)
3049 {
3050 	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3051 						  oplock_break);
3052 	struct inode *inode = d_inode(cfile->dentry);
3053 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3054 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3055 	struct cifs_tcon *tcon;
3056 	struct TCP_Server_Info *server;
3057 	struct tcon_link *tlink;
3058 	int rc = 0;
3059 	bool purge_cache = false, oplock_break_cancelled;
3060 	__u64 persistent_fid, volatile_fid;
3061 	__u16 net_fid;
3062 
3063 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3064 			TASK_UNINTERRUPTIBLE);
3065 
3066 	tlink = cifs_sb_tlink(cifs_sb);
3067 	if (IS_ERR(tlink))
3068 		goto out;
3069 	tcon = tlink_tcon(tlink);
3070 	server = tcon->ses->server;
3071 
3072 	server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3073 				      cfile->oplock_epoch, &purge_cache);
3074 
3075 	if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
3076 						cifs_has_mand_locks(cinode)) {
3077 		cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3078 			 inode);
3079 		cinode->oplock = 0;
3080 	}
3081 
3082 	if (inode && S_ISREG(inode->i_mode)) {
3083 		if (CIFS_CACHE_READ(cinode))
3084 			break_lease(inode, O_RDONLY);
3085 		else
3086 			break_lease(inode, O_WRONLY);
3087 		rc = filemap_fdatawrite(inode->i_mapping);
3088 		if (!CIFS_CACHE_READ(cinode) || purge_cache) {
3089 			rc = filemap_fdatawait(inode->i_mapping);
3090 			mapping_set_error(inode->i_mapping, rc);
3091 			cifs_zap_mapping(inode);
3092 		}
3093 		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3094 		if (CIFS_CACHE_WRITE(cinode))
3095 			goto oplock_break_ack;
3096 	}
3097 
3098 	rc = cifs_push_locks(cfile);
3099 	if (rc)
3100 		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3101 
3102 oplock_break_ack:
3103 	/*
3104 	 * When oplock break is received and there are no active
3105 	 * file handles but cached, then schedule deferred close immediately.
3106 	 * So, new open will not use cached handle.
3107 	 */
3108 
3109 	if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
3110 		cifs_close_deferred_file(cinode);
3111 
3112 	persistent_fid = cfile->fid.persistent_fid;
3113 	volatile_fid = cfile->fid.volatile_fid;
3114 	net_fid = cfile->fid.netfid;
3115 	oplock_break_cancelled = cfile->oplock_break_cancelled;
3116 
3117 	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3118 	/*
3119 	 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3120 	 * an acknowledgment to be sent when the file has already been closed.
3121 	 */
3122 	spin_lock(&cinode->open_file_lock);
3123 	/* check list empty since can race with kill_sb calling tree disconnect */
3124 	if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3125 		spin_unlock(&cinode->open_file_lock);
3126 		rc = server->ops->oplock_response(tcon, persistent_fid,
3127 						  volatile_fid, net_fid, cinode);
3128 		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3129 	} else
3130 		spin_unlock(&cinode->open_file_lock);
3131 
3132 	cifs_put_tlink(tlink);
3133 out:
3134 	cifs_done_oplock_break(cinode);
3135 }
3136 
3137 static int cifs_swap_activate(struct swap_info_struct *sis,
3138 			      struct file *swap_file, sector_t *span)
3139 {
3140 	struct cifsFileInfo *cfile = swap_file->private_data;
3141 	struct inode *inode = swap_file->f_mapping->host;
3142 	unsigned long blocks;
3143 	long long isize;
3144 
3145 	cifs_dbg(FYI, "swap activate\n");
3146 
3147 	if (!swap_file->f_mapping->a_ops->swap_rw)
3148 		/* Cannot support swap */
3149 		return -EINVAL;
3150 
3151 	spin_lock(&inode->i_lock);
3152 	blocks = inode->i_blocks;
3153 	isize = inode->i_size;
3154 	spin_unlock(&inode->i_lock);
3155 	if (blocks*512 < isize) {
3156 		pr_warn("swap activate: swapfile has holes\n");
3157 		return -EINVAL;
3158 	}
3159 	*span = sis->pages;
3160 
3161 	pr_warn_once("Swap support over SMB3 is experimental\n");
3162 
3163 	/*
3164 	 * TODO: consider adding ACL (or documenting how) to prevent other
3165 	 * users (on this or other systems) from reading it
3166 	 */
3167 
3168 
3169 	/* TODO: add sk_set_memalloc(inet) or similar */
3170 
3171 	if (cfile)
3172 		cfile->swapfile = true;
3173 	/*
3174 	 * TODO: Since file already open, we can't open with DENY_ALL here
3175 	 * but we could add call to grab a byte range lock to prevent others
3176 	 * from reading or writing the file
3177 	 */
3178 
3179 	sis->flags |= SWP_FS_OPS;
3180 	return add_swap_extent(sis, 0, sis->max, 0);
3181 }
3182 
3183 static void cifs_swap_deactivate(struct file *file)
3184 {
3185 	struct cifsFileInfo *cfile = file->private_data;
3186 
3187 	cifs_dbg(FYI, "swap deactivate\n");
3188 
3189 	/* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3190 
3191 	if (cfile)
3192 		cfile->swapfile = false;
3193 
3194 	/* do we need to unpin (or unlock) the file */
3195 }
3196 
3197 /**
3198  * cifs_swap_rw - SMB3 address space operation for swap I/O
3199  * @iocb: target I/O control block
3200  * @iter: I/O buffer
3201  *
3202  * Perform IO to the swap-file.  This is much like direct IO.
3203  */
3204 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3205 {
3206 	ssize_t ret;
3207 
3208 	if (iov_iter_rw(iter) == READ)
3209 		ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3210 	else
3211 		ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3212 	if (ret < 0)
3213 		return ret;
3214 	return 0;
3215 }
3216 
3217 const struct address_space_operations cifs_addr_ops = {
3218 	.read_folio	= netfs_read_folio,
3219 	.readahead	= netfs_readahead,
3220 	.writepages	= netfs_writepages,
3221 	.dirty_folio	= netfs_dirty_folio,
3222 	.release_folio	= netfs_release_folio,
3223 	.direct_IO	= noop_direct_IO,
3224 	.invalidate_folio = netfs_invalidate_folio,
3225 	.migrate_folio	= filemap_migrate_folio,
3226 	/*
3227 	 * TODO: investigate and if useful we could add an is_dirty_writeback
3228 	 * helper if needed
3229 	 */
3230 	.swap_activate	= cifs_swap_activate,
3231 	.swap_deactivate = cifs_swap_deactivate,
3232 	.swap_rw = cifs_swap_rw,
3233 };
3234 
3235 /*
3236  * cifs_readahead requires the server to support a buffer large enough to
3237  * contain the header plus one complete page of data.  Otherwise, we need
3238  * to leave cifs_readahead out of the address space operations.
3239  */
3240 const struct address_space_operations cifs_addr_ops_smallbuf = {
3241 	.read_folio	= netfs_read_folio,
3242 	.writepages	= netfs_writepages,
3243 	.dirty_folio	= netfs_dirty_folio,
3244 	.release_folio	= netfs_release_folio,
3245 	.invalidate_folio = netfs_invalidate_folio,
3246 	.migrate_folio	= filemap_migrate_folio,
3247 };
3248