xref: /linux/fs/smb/client/file.c (revision fcab107abe1ab5be9dbe874baa722372da8f4f73)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  */
11 #include <linux/fs.h>
12 #include <linux/filelock.h>
13 #include <linux/backing-dev.h>
14 #include <linux/stat.h>
15 #include <linux/fcntl.h>
16 #include <linux/pagemap.h>
17 #include <linux/pagevec.h>
18 #include <linux/writeback.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/delay.h>
21 #include <linux/mount.h>
22 #include <linux/slab.h>
23 #include <linux/swap.h>
24 #include <linux/mm.h>
25 #include <asm/div64.h>
26 #include "cifsfs.h"
27 #include "cifspdu.h"
28 #include "cifsglob.h"
29 #include "cifsproto.h"
30 #include "smb2proto.h"
31 #include "cifs_unicode.h"
32 #include "cifs_debug.h"
33 #include "cifs_fs_sb.h"
34 #include "fscache.h"
35 #include "smbdirect.h"
36 #include "fs_context.h"
37 #include "cifs_ioctl.h"
38 #include "cached_dir.h"
39 #include <trace/events/netfs.h>
40 
41 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
42 
43 /*
44  * Prepare a subrequest to upload to the server.  We need to allocate credits
45  * so that we know the maximum amount of data that we can include in it.
46  */
47 static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
48 {
49 	struct cifs_io_subrequest *wdata =
50 		container_of(subreq, struct cifs_io_subrequest, subreq);
51 	struct cifs_io_request *req = wdata->req;
52 	struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr];
53 	struct TCP_Server_Info *server;
54 	struct cifsFileInfo *open_file = req->cfile;
55 	size_t wsize = req->rreq.wsize;
56 	int rc;
57 
58 	if (!wdata->have_xid) {
59 		wdata->xid = get_xid();
60 		wdata->have_xid = true;
61 	}
62 
63 	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
64 	wdata->server = server;
65 
66 retry:
67 	if (open_file->invalidHandle) {
68 		rc = cifs_reopen_file(open_file, false);
69 		if (rc < 0) {
70 			if (rc == -EAGAIN)
71 				goto retry;
72 			subreq->error = rc;
73 			return netfs_prepare_write_failed(subreq);
74 		}
75 	}
76 
77 	rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len,
78 					   &wdata->credits);
79 	if (rc < 0) {
80 		subreq->error = rc;
81 		return netfs_prepare_write_failed(subreq);
82 	}
83 
84 	wdata->credits.rreq_debug_id = subreq->rreq->debug_id;
85 	wdata->credits.rreq_debug_index = subreq->debug_index;
86 	wdata->credits.in_flight_check = 1;
87 	trace_smb3_rw_credits(wdata->rreq->debug_id,
88 			      wdata->subreq.debug_index,
89 			      wdata->credits.value,
90 			      server->credits, server->in_flight,
91 			      wdata->credits.value,
92 			      cifs_trace_rw_credits_write_prepare);
93 
94 #ifdef CONFIG_CIFS_SMB_DIRECT
95 	if (server->smbd_conn)
96 		stream->sreq_max_segs = server->smbd_conn->max_frmr_depth;
97 #endif
98 }
99 
100 /*
101  * Issue a subrequest to upload to the server.
102  */
103 static void cifs_issue_write(struct netfs_io_subrequest *subreq)
104 {
105 	struct cifs_io_subrequest *wdata =
106 		container_of(subreq, struct cifs_io_subrequest, subreq);
107 	struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
108 	int rc;
109 
110 	if (cifs_forced_shutdown(sbi)) {
111 		rc = -EIO;
112 		goto fail;
113 	}
114 
115 	rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
116 	if (rc)
117 		goto fail;
118 
119 	rc = -EAGAIN;
120 	if (wdata->req->cfile->invalidHandle)
121 		goto fail;
122 
123 	wdata->server->ops->async_writev(wdata);
124 out:
125 	return;
126 
127 fail:
128 	if (rc == -EAGAIN)
129 		trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
130 	else
131 		trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
132 	add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
133 	cifs_write_subrequest_terminated(wdata, rc);
134 	goto out;
135 }
136 
137 static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq)
138 {
139 	cifs_invalidate_cache(wreq->inode, 0);
140 }
141 
142 /*
143  * Negotiate the size of a read operation on behalf of the netfs library.
144  */
145 static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
146 {
147 	struct netfs_io_request *rreq = subreq->rreq;
148 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
149 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
150 	struct TCP_Server_Info *server;
151 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
152 	size_t size;
153 	int rc = 0;
154 
155 	if (!rdata->have_xid) {
156 		rdata->xid = get_xid();
157 		rdata->have_xid = true;
158 	}
159 
160 	server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
161 	rdata->server = server;
162 
163 	if (cifs_sb->ctx->rsize == 0) {
164 		cifs_negotiate_rsize(server, cifs_sb->ctx,
165 				     tlink_tcon(req->cfile->tlink));
166 	}
167 
168 	rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
169 					   &size, &rdata->credits);
170 	if (rc)
171 		return rc;
172 
173 	rreq->io_streams[0].sreq_max_len = size;
174 
175 	rdata->credits.in_flight_check = 1;
176 	rdata->credits.rreq_debug_id = rreq->debug_id;
177 	rdata->credits.rreq_debug_index = subreq->debug_index;
178 
179 	trace_smb3_rw_credits(rdata->rreq->debug_id,
180 			      rdata->subreq.debug_index,
181 			      rdata->credits.value,
182 			      server->credits, server->in_flight, 0,
183 			      cifs_trace_rw_credits_read_submit);
184 
185 #ifdef CONFIG_CIFS_SMB_DIRECT
186 	if (server->smbd_conn)
187 		rreq->io_streams[0].sreq_max_segs = server->smbd_conn->max_frmr_depth;
188 #endif
189 	return 0;
190 }
191 
192 /*
193  * Issue a read operation on behalf of the netfs helper functions.  We're asked
194  * to make a read of a certain size at a point in the file.  We are permitted
195  * to only read a portion of that, but as long as we read something, the netfs
196  * helper will call us again so that we can issue another read.
197  */
198 static void cifs_issue_read(struct netfs_io_subrequest *subreq)
199 {
200 	struct netfs_io_request *rreq = subreq->rreq;
201 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
202 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
203 	struct TCP_Server_Info *server = rdata->server;
204 	int rc = 0;
205 
206 	cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
207 		 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
208 		 subreq->transferred, subreq->len);
209 
210 	rc = adjust_credits(server, rdata, cifs_trace_rw_credits_issue_read_adjust);
211 	if (rc)
212 		goto failed;
213 
214 	if (req->cfile->invalidHandle) {
215 		do {
216 			rc = cifs_reopen_file(req->cfile, true);
217 		} while (rc == -EAGAIN);
218 		if (rc)
219 			goto failed;
220 	}
221 
222 	if (subreq->rreq->origin != NETFS_UNBUFFERED_READ &&
223 	    subreq->rreq->origin != NETFS_DIO_READ)
224 		__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
225 
226 	trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
227 	rc = rdata->server->ops->async_readv(rdata);
228 	if (rc)
229 		goto failed;
230 	return;
231 
232 failed:
233 	subreq->error = rc;
234 	netfs_read_subreq_terminated(subreq);
235 }
236 
237 /*
238  * Writeback calls this when it finds a folio that needs uploading.  This isn't
239  * called if writeback only has copy-to-cache to deal with.
240  */
241 static void cifs_begin_writeback(struct netfs_io_request *wreq)
242 {
243 	struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
244 	int ret;
245 
246 	ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
247 	if (ret) {
248 		cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
249 		return;
250 	}
251 
252 	wreq->io_streams[0].avail = true;
253 }
254 
255 /*
256  * Initialise a request.
257  */
258 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
259 {
260 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
261 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
262 	struct cifsFileInfo *open_file = NULL;
263 
264 	rreq->rsize = cifs_sb->ctx->rsize;
265 	rreq->wsize = cifs_sb->ctx->wsize;
266 	req->pid = current->tgid; // Ummm...  This may be a workqueue
267 
268 	if (file) {
269 		open_file = file->private_data;
270 		rreq->netfs_priv = file->private_data;
271 		req->cfile = cifsFileInfo_get(open_file);
272 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
273 			req->pid = req->cfile->pid;
274 	} else if (rreq->origin != NETFS_WRITEBACK) {
275 		WARN_ON_ONCE(1);
276 		return -EIO;
277 	}
278 
279 	return 0;
280 }
281 
282 /*
283  * Completion of a request operation.
284  */
285 static void cifs_rreq_done(struct netfs_io_request *rreq)
286 {
287 	struct timespec64 atime, mtime;
288 	struct inode *inode = rreq->inode;
289 
290 	/* we do not want atime to be less than mtime, it broke some apps */
291 	atime = inode_set_atime_to_ts(inode, current_time(inode));
292 	mtime = inode_get_mtime(inode);
293 	if (timespec64_compare(&atime, &mtime))
294 		inode_set_atime_to_ts(inode, inode_get_mtime(inode));
295 }
296 
297 static void cifs_free_request(struct netfs_io_request *rreq)
298 {
299 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
300 
301 	if (req->cfile)
302 		cifsFileInfo_put(req->cfile);
303 }
304 
305 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
306 {
307 	struct cifs_io_subrequest *rdata =
308 		container_of(subreq, struct cifs_io_subrequest, subreq);
309 	int rc = subreq->error;
310 
311 	if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
312 #ifdef CONFIG_CIFS_SMB_DIRECT
313 		if (rdata->mr) {
314 			smbd_deregister_mr(rdata->mr);
315 			rdata->mr = NULL;
316 		}
317 #endif
318 	}
319 
320 	if (rdata->credits.value != 0) {
321 		trace_smb3_rw_credits(rdata->rreq->debug_id,
322 				      rdata->subreq.debug_index,
323 				      rdata->credits.value,
324 				      rdata->server ? rdata->server->credits : 0,
325 				      rdata->server ? rdata->server->in_flight : 0,
326 				      -rdata->credits.value,
327 				      cifs_trace_rw_credits_free_subreq);
328 		if (rdata->server)
329 			add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
330 		else
331 			rdata->credits.value = 0;
332 	}
333 
334 	if (rdata->have_xid)
335 		free_xid(rdata->xid);
336 }
337 
338 const struct netfs_request_ops cifs_req_ops = {
339 	.request_pool		= &cifs_io_request_pool,
340 	.subrequest_pool	= &cifs_io_subrequest_pool,
341 	.init_request		= cifs_init_request,
342 	.free_request		= cifs_free_request,
343 	.free_subrequest	= cifs_free_subrequest,
344 	.prepare_read		= cifs_prepare_read,
345 	.issue_read		= cifs_issue_read,
346 	.done			= cifs_rreq_done,
347 	.begin_writeback	= cifs_begin_writeback,
348 	.prepare_write		= cifs_prepare_write,
349 	.issue_write		= cifs_issue_write,
350 	.invalidate_cache	= cifs_netfs_invalidate_cache,
351 };
352 
353 /*
354  * Mark as invalid, all open files on tree connections since they
355  * were closed when session to server was lost.
356  */
357 void
358 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
359 {
360 	struct cifsFileInfo *open_file = NULL;
361 	struct list_head *tmp;
362 	struct list_head *tmp1;
363 
364 	/* only send once per connect */
365 	spin_lock(&tcon->tc_lock);
366 	if (tcon->need_reconnect)
367 		tcon->status = TID_NEED_RECON;
368 
369 	if (tcon->status != TID_NEED_RECON) {
370 		spin_unlock(&tcon->tc_lock);
371 		return;
372 	}
373 	tcon->status = TID_IN_FILES_INVALIDATE;
374 	spin_unlock(&tcon->tc_lock);
375 
376 	/* list all files open on tree connection and mark them invalid */
377 	spin_lock(&tcon->open_file_lock);
378 	list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
379 		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
380 		open_file->invalidHandle = true;
381 		open_file->oplock_break_cancelled = true;
382 	}
383 	spin_unlock(&tcon->open_file_lock);
384 
385 	invalidate_all_cached_dirs(tcon);
386 	spin_lock(&tcon->tc_lock);
387 	if (tcon->status == TID_IN_FILES_INVALIDATE)
388 		tcon->status = TID_NEED_TCON;
389 	spin_unlock(&tcon->tc_lock);
390 
391 	/*
392 	 * BB Add call to evict_inodes(sb) for all superblocks mounted
393 	 * to this tcon.
394 	 */
395 }
396 
397 static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
398 {
399 	if ((flags & O_ACCMODE) == O_RDONLY)
400 		return GENERIC_READ;
401 	else if ((flags & O_ACCMODE) == O_WRONLY)
402 		return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
403 	else if ((flags & O_ACCMODE) == O_RDWR) {
404 		/* GENERIC_ALL is too much permission to request
405 		   can cause unnecessary access denied on create */
406 		/* return GENERIC_ALL; */
407 		return (GENERIC_READ | GENERIC_WRITE);
408 	}
409 
410 	return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
411 		FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
412 		FILE_READ_DATA);
413 }
414 
415 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
416 static u32 cifs_posix_convert_flags(unsigned int flags)
417 {
418 	u32 posix_flags = 0;
419 
420 	if ((flags & O_ACCMODE) == O_RDONLY)
421 		posix_flags = SMB_O_RDONLY;
422 	else if ((flags & O_ACCMODE) == O_WRONLY)
423 		posix_flags = SMB_O_WRONLY;
424 	else if ((flags & O_ACCMODE) == O_RDWR)
425 		posix_flags = SMB_O_RDWR;
426 
427 	if (flags & O_CREAT) {
428 		posix_flags |= SMB_O_CREAT;
429 		if (flags & O_EXCL)
430 			posix_flags |= SMB_O_EXCL;
431 	} else if (flags & O_EXCL)
432 		cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
433 			 current->comm, current->tgid);
434 
435 	if (flags & O_TRUNC)
436 		posix_flags |= SMB_O_TRUNC;
437 	/* be safe and imply O_SYNC for O_DSYNC */
438 	if (flags & O_DSYNC)
439 		posix_flags |= SMB_O_SYNC;
440 	if (flags & O_DIRECTORY)
441 		posix_flags |= SMB_O_DIRECTORY;
442 	if (flags & O_NOFOLLOW)
443 		posix_flags |= SMB_O_NOFOLLOW;
444 	if (flags & O_DIRECT)
445 		posix_flags |= SMB_O_DIRECT;
446 
447 	return posix_flags;
448 }
449 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
450 
451 static inline int cifs_get_disposition(unsigned int flags)
452 {
453 	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
454 		return FILE_CREATE;
455 	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
456 		return FILE_OVERWRITE_IF;
457 	else if ((flags & O_CREAT) == O_CREAT)
458 		return FILE_OPEN_IF;
459 	else if ((flags & O_TRUNC) == O_TRUNC)
460 		return FILE_OVERWRITE;
461 	else
462 		return FILE_OPEN;
463 }
464 
465 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
466 int cifs_posix_open(const char *full_path, struct inode **pinode,
467 			struct super_block *sb, int mode, unsigned int f_flags,
468 			__u32 *poplock, __u16 *pnetfid, unsigned int xid)
469 {
470 	int rc;
471 	FILE_UNIX_BASIC_INFO *presp_data;
472 	__u32 posix_flags = 0;
473 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
474 	struct cifs_fattr fattr;
475 	struct tcon_link *tlink;
476 	struct cifs_tcon *tcon;
477 
478 	cifs_dbg(FYI, "posix open %s\n", full_path);
479 
480 	presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
481 	if (presp_data == NULL)
482 		return -ENOMEM;
483 
484 	tlink = cifs_sb_tlink(cifs_sb);
485 	if (IS_ERR(tlink)) {
486 		rc = PTR_ERR(tlink);
487 		goto posix_open_ret;
488 	}
489 
490 	tcon = tlink_tcon(tlink);
491 	mode &= ~current_umask();
492 
493 	posix_flags = cifs_posix_convert_flags(f_flags);
494 	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
495 			     poplock, full_path, cifs_sb->local_nls,
496 			     cifs_remap(cifs_sb));
497 	cifs_put_tlink(tlink);
498 
499 	if (rc)
500 		goto posix_open_ret;
501 
502 	if (presp_data->Type == cpu_to_le32(-1))
503 		goto posix_open_ret; /* open ok, caller does qpathinfo */
504 
505 	if (!pinode)
506 		goto posix_open_ret; /* caller does not need info */
507 
508 	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
509 
510 	/* get new inode and set it up */
511 	if (*pinode == NULL) {
512 		cifs_fill_uniqueid(sb, &fattr);
513 		*pinode = cifs_iget(sb, &fattr);
514 		if (!*pinode) {
515 			rc = -ENOMEM;
516 			goto posix_open_ret;
517 		}
518 	} else {
519 		cifs_revalidate_mapping(*pinode);
520 		rc = cifs_fattr_to_inode(*pinode, &fattr, false);
521 	}
522 
523 posix_open_ret:
524 	kfree(presp_data);
525 	return rc;
526 }
527 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
528 
529 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
530 			struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
531 			struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
532 {
533 	int rc;
534 	int desired_access;
535 	int disposition;
536 	int create_options = CREATE_NOT_DIR;
537 	struct TCP_Server_Info *server = tcon->ses->server;
538 	struct cifs_open_parms oparms;
539 	int rdwr_for_fscache = 0;
540 
541 	if (!server->ops->open)
542 		return -ENOSYS;
543 
544 	/* If we're caching, we need to be able to fill in around partial writes. */
545 	if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
546 		rdwr_for_fscache = 1;
547 
548 	desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
549 
550 /*********************************************************************
551  *  open flag mapping table:
552  *
553  *	POSIX Flag            CIFS Disposition
554  *	----------            ----------------
555  *	O_CREAT               FILE_OPEN_IF
556  *	O_CREAT | O_EXCL      FILE_CREATE
557  *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
558  *	O_TRUNC               FILE_OVERWRITE
559  *	none of the above     FILE_OPEN
560  *
561  *	Note that there is not a direct match between disposition
562  *	FILE_SUPERSEDE (ie create whether or not file exists although
563  *	O_CREAT | O_TRUNC is similar but truncates the existing
564  *	file rather than creating a new file as FILE_SUPERSEDE does
565  *	(which uses the attributes / metadata passed in on open call)
566  *?
567  *?  O_SYNC is a reasonable match to CIFS writethrough flag
568  *?  and the read write flags match reasonably.  O_LARGEFILE
569  *?  is irrelevant because largefile support is always used
570  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
571  *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
572  *********************************************************************/
573 
574 	disposition = cifs_get_disposition(f_flags);
575 
576 	/* BB pass O_SYNC flag through on file attributes .. BB */
577 
578 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
579 	if (f_flags & O_SYNC)
580 		create_options |= CREATE_WRITE_THROUGH;
581 
582 	if (f_flags & O_DIRECT)
583 		create_options |= CREATE_NO_BUFFER;
584 
585 retry_open:
586 	oparms = (struct cifs_open_parms) {
587 		.tcon = tcon,
588 		.cifs_sb = cifs_sb,
589 		.desired_access = desired_access,
590 		.create_options = cifs_create_options(cifs_sb, create_options),
591 		.disposition = disposition,
592 		.path = full_path,
593 		.fid = fid,
594 	};
595 
596 	rc = server->ops->open(xid, &oparms, oplock, buf);
597 	if (rc) {
598 		if (rc == -EACCES && rdwr_for_fscache == 1) {
599 			desired_access = cifs_convert_flags(f_flags, 0);
600 			rdwr_for_fscache = 2;
601 			goto retry_open;
602 		}
603 		return rc;
604 	}
605 	if (rdwr_for_fscache == 2)
606 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
607 
608 	/* TODO: Add support for calling posix query info but with passing in fid */
609 	if (tcon->unix_ext)
610 		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
611 					      xid);
612 	else
613 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
614 					 xid, fid);
615 
616 	if (rc) {
617 		server->ops->close(xid, tcon, fid);
618 		if (rc == -ESTALE)
619 			rc = -EOPENSTALE;
620 	}
621 
622 	return rc;
623 }
624 
625 static bool
626 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
627 {
628 	struct cifs_fid_locks *cur;
629 	bool has_locks = false;
630 
631 	down_read(&cinode->lock_sem);
632 	list_for_each_entry(cur, &cinode->llist, llist) {
633 		if (!list_empty(&cur->locks)) {
634 			has_locks = true;
635 			break;
636 		}
637 	}
638 	up_read(&cinode->lock_sem);
639 	return has_locks;
640 }
641 
642 void
643 cifs_down_write(struct rw_semaphore *sem)
644 {
645 	while (!down_write_trylock(sem))
646 		msleep(10);
647 }
648 
649 static void cifsFileInfo_put_work(struct work_struct *work);
650 void serverclose_work(struct work_struct *work);
651 
652 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
653 				       struct tcon_link *tlink, __u32 oplock,
654 				       const char *symlink_target)
655 {
656 	struct dentry *dentry = file_dentry(file);
657 	struct inode *inode = d_inode(dentry);
658 	struct cifsInodeInfo *cinode = CIFS_I(inode);
659 	struct cifsFileInfo *cfile;
660 	struct cifs_fid_locks *fdlocks;
661 	struct cifs_tcon *tcon = tlink_tcon(tlink);
662 	struct TCP_Server_Info *server = tcon->ses->server;
663 
664 	cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
665 	if (cfile == NULL)
666 		return cfile;
667 
668 	fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
669 	if (!fdlocks) {
670 		kfree(cfile);
671 		return NULL;
672 	}
673 
674 	if (symlink_target) {
675 		cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
676 		if (!cfile->symlink_target) {
677 			kfree(fdlocks);
678 			kfree(cfile);
679 			return NULL;
680 		}
681 	}
682 
683 	INIT_LIST_HEAD(&fdlocks->locks);
684 	fdlocks->cfile = cfile;
685 	cfile->llist = fdlocks;
686 
687 	cfile->count = 1;
688 	cfile->pid = current->tgid;
689 	cfile->uid = current_fsuid();
690 	cfile->dentry = dget(dentry);
691 	cfile->f_flags = file->f_flags;
692 	cfile->invalidHandle = false;
693 	cfile->deferred_close_scheduled = false;
694 	cfile->tlink = cifs_get_tlink(tlink);
695 	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
696 	INIT_WORK(&cfile->put, cifsFileInfo_put_work);
697 	INIT_WORK(&cfile->serverclose, serverclose_work);
698 	INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
699 	mutex_init(&cfile->fh_mutex);
700 	spin_lock_init(&cfile->file_info_lock);
701 
702 	cifs_sb_active(inode->i_sb);
703 
704 	/*
705 	 * If the server returned a read oplock and we have mandatory brlocks,
706 	 * set oplock level to None.
707 	 */
708 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
709 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
710 		oplock = 0;
711 	}
712 
713 	cifs_down_write(&cinode->lock_sem);
714 	list_add(&fdlocks->llist, &cinode->llist);
715 	up_write(&cinode->lock_sem);
716 
717 	spin_lock(&tcon->open_file_lock);
718 	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
719 		oplock = fid->pending_open->oplock;
720 	list_del(&fid->pending_open->olist);
721 
722 	fid->purge_cache = false;
723 	server->ops->set_fid(cfile, fid, oplock);
724 
725 	list_add(&cfile->tlist, &tcon->openFileList);
726 	atomic_inc(&tcon->num_local_opens);
727 
728 	/* if readable file instance put first in list*/
729 	spin_lock(&cinode->open_file_lock);
730 	if (file->f_mode & FMODE_READ)
731 		list_add(&cfile->flist, &cinode->openFileList);
732 	else
733 		list_add_tail(&cfile->flist, &cinode->openFileList);
734 	spin_unlock(&cinode->open_file_lock);
735 	spin_unlock(&tcon->open_file_lock);
736 
737 	if (fid->purge_cache)
738 		cifs_zap_mapping(inode);
739 
740 	file->private_data = cfile;
741 	return cfile;
742 }
743 
744 struct cifsFileInfo *
745 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
746 {
747 	spin_lock(&cifs_file->file_info_lock);
748 	cifsFileInfo_get_locked(cifs_file);
749 	spin_unlock(&cifs_file->file_info_lock);
750 	return cifs_file;
751 }
752 
753 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
754 {
755 	struct inode *inode = d_inode(cifs_file->dentry);
756 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
757 	struct cifsLockInfo *li, *tmp;
758 	struct super_block *sb = inode->i_sb;
759 
760 	/*
761 	 * Delete any outstanding lock records. We'll lose them when the file
762 	 * is closed anyway.
763 	 */
764 	cifs_down_write(&cifsi->lock_sem);
765 	list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
766 		list_del(&li->llist);
767 		cifs_del_lock_waiters(li);
768 		kfree(li);
769 	}
770 	list_del(&cifs_file->llist->llist);
771 	kfree(cifs_file->llist);
772 	up_write(&cifsi->lock_sem);
773 
774 	cifs_put_tlink(cifs_file->tlink);
775 	dput(cifs_file->dentry);
776 	cifs_sb_deactive(sb);
777 	kfree(cifs_file->symlink_target);
778 	kfree(cifs_file);
779 }
780 
781 static void cifsFileInfo_put_work(struct work_struct *work)
782 {
783 	struct cifsFileInfo *cifs_file = container_of(work,
784 			struct cifsFileInfo, put);
785 
786 	cifsFileInfo_put_final(cifs_file);
787 }
788 
789 void serverclose_work(struct work_struct *work)
790 {
791 	struct cifsFileInfo *cifs_file = container_of(work,
792 			struct cifsFileInfo, serverclose);
793 
794 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
795 
796 	struct TCP_Server_Info *server = tcon->ses->server;
797 	int rc = 0;
798 	int retries = 0;
799 	int MAX_RETRIES = 4;
800 
801 	do {
802 		if (server->ops->close_getattr)
803 			rc = server->ops->close_getattr(0, tcon, cifs_file);
804 		else if (server->ops->close)
805 			rc = server->ops->close(0, tcon, &cifs_file->fid);
806 
807 		if (rc == -EBUSY || rc == -EAGAIN) {
808 			retries++;
809 			msleep(250);
810 		}
811 	} while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
812 	);
813 
814 	if (retries == MAX_RETRIES)
815 		pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
816 
817 	if (cifs_file->offload)
818 		queue_work(fileinfo_put_wq, &cifs_file->put);
819 	else
820 		cifsFileInfo_put_final(cifs_file);
821 }
822 
823 /**
824  * cifsFileInfo_put - release a reference of file priv data
825  *
826  * Always potentially wait for oplock handler. See _cifsFileInfo_put().
827  *
828  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
829  */
830 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
831 {
832 	_cifsFileInfo_put(cifs_file, true, true);
833 }
834 
835 /**
836  * _cifsFileInfo_put - release a reference of file priv data
837  *
838  * This may involve closing the filehandle @cifs_file out on the
839  * server. Must be called without holding tcon->open_file_lock,
840  * cinode->open_file_lock and cifs_file->file_info_lock.
841  *
842  * If @wait_for_oplock_handler is true and we are releasing the last
843  * reference, wait for any running oplock break handler of the file
844  * and cancel any pending one.
845  *
846  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
847  * @wait_oplock_handler: must be false if called from oplock_break_handler
848  * @offload:	not offloaded on close and oplock breaks
849  *
850  */
851 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
852 		       bool wait_oplock_handler, bool offload)
853 {
854 	struct inode *inode = d_inode(cifs_file->dentry);
855 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
856 	struct TCP_Server_Info *server = tcon->ses->server;
857 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
858 	struct super_block *sb = inode->i_sb;
859 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
860 	struct cifs_fid fid = {};
861 	struct cifs_pending_open open;
862 	bool oplock_break_cancelled;
863 	bool serverclose_offloaded = false;
864 
865 	spin_lock(&tcon->open_file_lock);
866 	spin_lock(&cifsi->open_file_lock);
867 	spin_lock(&cifs_file->file_info_lock);
868 
869 	cifs_file->offload = offload;
870 	if (--cifs_file->count > 0) {
871 		spin_unlock(&cifs_file->file_info_lock);
872 		spin_unlock(&cifsi->open_file_lock);
873 		spin_unlock(&tcon->open_file_lock);
874 		return;
875 	}
876 	spin_unlock(&cifs_file->file_info_lock);
877 
878 	if (server->ops->get_lease_key)
879 		server->ops->get_lease_key(inode, &fid);
880 
881 	/* store open in pending opens to make sure we don't miss lease break */
882 	cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
883 
884 	/* remove it from the lists */
885 	list_del(&cifs_file->flist);
886 	list_del(&cifs_file->tlist);
887 	atomic_dec(&tcon->num_local_opens);
888 
889 	if (list_empty(&cifsi->openFileList)) {
890 		cifs_dbg(FYI, "closing last open instance for inode %p\n",
891 			 d_inode(cifs_file->dentry));
892 		/*
893 		 * In strict cache mode we need invalidate mapping on the last
894 		 * close  because it may cause a error when we open this file
895 		 * again and get at least level II oplock.
896 		 */
897 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
898 			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
899 		cifs_set_oplock_level(cifsi, 0);
900 	}
901 
902 	spin_unlock(&cifsi->open_file_lock);
903 	spin_unlock(&tcon->open_file_lock);
904 
905 	oplock_break_cancelled = wait_oplock_handler ?
906 		cancel_work_sync(&cifs_file->oplock_break) : false;
907 
908 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
909 		struct TCP_Server_Info *server = tcon->ses->server;
910 		unsigned int xid;
911 		int rc = 0;
912 
913 		xid = get_xid();
914 		if (server->ops->close_getattr)
915 			rc = server->ops->close_getattr(xid, tcon, cifs_file);
916 		else if (server->ops->close)
917 			rc = server->ops->close(xid, tcon, &cifs_file->fid);
918 		_free_xid(xid);
919 
920 		if (rc == -EBUSY || rc == -EAGAIN) {
921 			// Server close failed, hence offloading it as an async op
922 			queue_work(serverclose_wq, &cifs_file->serverclose);
923 			serverclose_offloaded = true;
924 		}
925 	}
926 
927 	if (oplock_break_cancelled)
928 		cifs_done_oplock_break(cifsi);
929 
930 	cifs_del_pending_open(&open);
931 
932 	// if serverclose has been offloaded to wq (on failure), it will
933 	// handle offloading put as well. If serverclose not offloaded,
934 	// we need to handle offloading put here.
935 	if (!serverclose_offloaded) {
936 		if (offload)
937 			queue_work(fileinfo_put_wq, &cifs_file->put);
938 		else
939 			cifsFileInfo_put_final(cifs_file);
940 	}
941 }
942 
943 int cifs_open(struct inode *inode, struct file *file)
944 
945 {
946 	int rc = -EACCES;
947 	unsigned int xid;
948 	__u32 oplock;
949 	struct cifs_sb_info *cifs_sb;
950 	struct TCP_Server_Info *server;
951 	struct cifs_tcon *tcon;
952 	struct tcon_link *tlink;
953 	struct cifsFileInfo *cfile = NULL;
954 	void *page;
955 	const char *full_path;
956 	bool posix_open_ok = false;
957 	struct cifs_fid fid = {};
958 	struct cifs_pending_open open;
959 	struct cifs_open_info_data data = {};
960 
961 	xid = get_xid();
962 
963 	cifs_sb = CIFS_SB(inode->i_sb);
964 	if (unlikely(cifs_forced_shutdown(cifs_sb))) {
965 		free_xid(xid);
966 		return -EIO;
967 	}
968 
969 	tlink = cifs_sb_tlink(cifs_sb);
970 	if (IS_ERR(tlink)) {
971 		free_xid(xid);
972 		return PTR_ERR(tlink);
973 	}
974 	tcon = tlink_tcon(tlink);
975 	server = tcon->ses->server;
976 
977 	page = alloc_dentry_path();
978 	full_path = build_path_from_dentry(file_dentry(file), page);
979 	if (IS_ERR(full_path)) {
980 		rc = PTR_ERR(full_path);
981 		goto out;
982 	}
983 
984 	cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
985 		 inode, file->f_flags, full_path);
986 
987 	if (file->f_flags & O_DIRECT &&
988 	    cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
989 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
990 			file->f_op = &cifs_file_direct_nobrl_ops;
991 		else
992 			file->f_op = &cifs_file_direct_ops;
993 	}
994 
995 	/* Get the cached handle as SMB2 close is deferred */
996 	if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) {
997 		rc = cifs_get_writable_path(tcon, full_path, FIND_WR_FSUID_ONLY, &cfile);
998 	} else {
999 		rc = cifs_get_readable_path(tcon, full_path, &cfile);
1000 	}
1001 	if (rc == 0) {
1002 		unsigned int oflags = file->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC);
1003 		unsigned int cflags = cfile->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC);
1004 
1005 		if (cifs_convert_flags(oflags, 0) == cifs_convert_flags(cflags, 0) &&
1006 		    (oflags & (O_SYNC|O_DIRECT)) == (cflags & (O_SYNC|O_DIRECT))) {
1007 			file->private_data = cfile;
1008 			spin_lock(&CIFS_I(inode)->deferred_lock);
1009 			cifs_del_deferred_close(cfile);
1010 			spin_unlock(&CIFS_I(inode)->deferred_lock);
1011 			goto use_cache;
1012 		}
1013 		_cifsFileInfo_put(cfile, true, false);
1014 	} else {
1015 		/* hard link on the defeered close file */
1016 		rc = cifs_get_hardlink_path(tcon, inode, file);
1017 		if (rc)
1018 			cifs_close_deferred_file(CIFS_I(inode));
1019 	}
1020 
1021 	if (server->oplocks)
1022 		oplock = REQ_OPLOCK;
1023 	else
1024 		oplock = 0;
1025 
1026 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1027 	if (!tcon->broken_posix_open && tcon->unix_ext &&
1028 	    cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1029 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1030 		/* can not refresh inode info since size could be stale */
1031 		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
1032 				cifs_sb->ctx->file_mode /* ignored */,
1033 				file->f_flags, &oplock, &fid.netfid, xid);
1034 		if (rc == 0) {
1035 			cifs_dbg(FYI, "posix open succeeded\n");
1036 			posix_open_ok = true;
1037 		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1038 			if (tcon->ses->serverNOS)
1039 				cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1040 					 tcon->ses->ip_addr,
1041 					 tcon->ses->serverNOS);
1042 			tcon->broken_posix_open = true;
1043 		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
1044 			 (rc != -EOPNOTSUPP)) /* path not found or net err */
1045 			goto out;
1046 		/*
1047 		 * Else fallthrough to retry open the old way on network i/o
1048 		 * or DFS errors.
1049 		 */
1050 	}
1051 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1052 
1053 	if (server->ops->get_lease_key)
1054 		server->ops->get_lease_key(inode, &fid);
1055 
1056 	cifs_add_pending_open(&fid, tlink, &open);
1057 
1058 	if (!posix_open_ok) {
1059 		if (server->ops->get_lease_key)
1060 			server->ops->get_lease_key(inode, &fid);
1061 
1062 		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1063 				  xid, &data);
1064 		if (rc) {
1065 			cifs_del_pending_open(&open);
1066 			goto out;
1067 		}
1068 	}
1069 
1070 	cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1071 	if (cfile == NULL) {
1072 		if (server->ops->close)
1073 			server->ops->close(xid, tcon, &fid);
1074 		cifs_del_pending_open(&open);
1075 		rc = -ENOMEM;
1076 		goto out;
1077 	}
1078 
1079 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1080 	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1081 		/*
1082 		 * Time to set mode which we can not set earlier due to
1083 		 * problems creating new read-only files.
1084 		 */
1085 		struct cifs_unix_set_info_args args = {
1086 			.mode	= inode->i_mode,
1087 			.uid	= INVALID_UID, /* no change */
1088 			.gid	= INVALID_GID, /* no change */
1089 			.ctime	= NO_CHANGE_64,
1090 			.atime	= NO_CHANGE_64,
1091 			.mtime	= NO_CHANGE_64,
1092 			.device	= 0,
1093 		};
1094 		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1095 				       cfile->pid);
1096 	}
1097 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1098 
1099 use_cache:
1100 	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1101 			   file->f_mode & FMODE_WRITE);
1102 	if (!(file->f_flags & O_DIRECT))
1103 		goto out;
1104 	if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1105 		goto out;
1106 	cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1107 
1108 out:
1109 	free_dentry_path(page);
1110 	free_xid(xid);
1111 	cifs_put_tlink(tlink);
1112 	cifs_free_open_info(&data);
1113 	return rc;
1114 }
1115 
1116 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1117 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1118 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1119 
1120 /*
1121  * Try to reacquire byte range locks that were released when session
1122  * to server was lost.
1123  */
1124 static int
1125 cifs_relock_file(struct cifsFileInfo *cfile)
1126 {
1127 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1128 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1129 	int rc = 0;
1130 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1131 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1132 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1133 
1134 	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1135 	if (cinode->can_cache_brlcks) {
1136 		/* can cache locks - no need to relock */
1137 		up_read(&cinode->lock_sem);
1138 		return rc;
1139 	}
1140 
1141 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1142 	if (cap_unix(tcon->ses) &&
1143 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1144 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1145 		rc = cifs_push_posix_locks(cfile);
1146 	else
1147 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1148 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1149 
1150 	up_read(&cinode->lock_sem);
1151 	return rc;
1152 }
1153 
1154 static int
1155 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1156 {
1157 	int rc = -EACCES;
1158 	unsigned int xid;
1159 	__u32 oplock;
1160 	struct cifs_sb_info *cifs_sb;
1161 	struct cifs_tcon *tcon;
1162 	struct TCP_Server_Info *server;
1163 	struct cifsInodeInfo *cinode;
1164 	struct inode *inode;
1165 	void *page;
1166 	const char *full_path;
1167 	int desired_access;
1168 	int disposition = FILE_OPEN;
1169 	int create_options = CREATE_NOT_DIR;
1170 	struct cifs_open_parms oparms;
1171 	int rdwr_for_fscache = 0;
1172 
1173 	xid = get_xid();
1174 	mutex_lock(&cfile->fh_mutex);
1175 	if (!cfile->invalidHandle) {
1176 		mutex_unlock(&cfile->fh_mutex);
1177 		free_xid(xid);
1178 		return 0;
1179 	}
1180 
1181 	inode = d_inode(cfile->dentry);
1182 	cifs_sb = CIFS_SB(inode->i_sb);
1183 	tcon = tlink_tcon(cfile->tlink);
1184 	server = tcon->ses->server;
1185 
1186 	/*
1187 	 * Can not grab rename sem here because various ops, including those
1188 	 * that already have the rename sem can end up causing writepage to get
1189 	 * called and if the server was down that means we end up here, and we
1190 	 * can never tell if the caller already has the rename_sem.
1191 	 */
1192 	page = alloc_dentry_path();
1193 	full_path = build_path_from_dentry(cfile->dentry, page);
1194 	if (IS_ERR(full_path)) {
1195 		mutex_unlock(&cfile->fh_mutex);
1196 		free_dentry_path(page);
1197 		free_xid(xid);
1198 		return PTR_ERR(full_path);
1199 	}
1200 
1201 	cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1202 		 inode, cfile->f_flags, full_path);
1203 
1204 	if (tcon->ses->server->oplocks)
1205 		oplock = REQ_OPLOCK;
1206 	else
1207 		oplock = 0;
1208 
1209 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1210 	if (tcon->unix_ext && cap_unix(tcon->ses) &&
1211 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1212 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1213 		/*
1214 		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1215 		 * original open. Must mask them off for a reopen.
1216 		 */
1217 		unsigned int oflags = cfile->f_flags &
1218 						~(O_CREAT | O_EXCL | O_TRUNC);
1219 
1220 		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1221 				     cifs_sb->ctx->file_mode /* ignored */,
1222 				     oflags, &oplock, &cfile->fid.netfid, xid);
1223 		if (rc == 0) {
1224 			cifs_dbg(FYI, "posix reopen succeeded\n");
1225 			oparms.reconnect = true;
1226 			goto reopen_success;
1227 		}
1228 		/*
1229 		 * fallthrough to retry open the old way on errors, especially
1230 		 * in the reconnect path it is important to retry hard
1231 		 */
1232 	}
1233 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1234 
1235 	/* If we're caching, we need to be able to fill in around partial writes. */
1236 	if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1237 		rdwr_for_fscache = 1;
1238 
1239 	desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1240 
1241 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
1242 	if (cfile->f_flags & O_SYNC)
1243 		create_options |= CREATE_WRITE_THROUGH;
1244 
1245 	if (cfile->f_flags & O_DIRECT)
1246 		create_options |= CREATE_NO_BUFFER;
1247 
1248 	if (server->ops->get_lease_key)
1249 		server->ops->get_lease_key(inode, &cfile->fid);
1250 
1251 retry_open:
1252 	oparms = (struct cifs_open_parms) {
1253 		.tcon = tcon,
1254 		.cifs_sb = cifs_sb,
1255 		.desired_access = desired_access,
1256 		.create_options = cifs_create_options(cifs_sb, create_options),
1257 		.disposition = disposition,
1258 		.path = full_path,
1259 		.fid = &cfile->fid,
1260 		.reconnect = true,
1261 	};
1262 
1263 	/*
1264 	 * Can not refresh inode by passing in file_info buf to be returned by
1265 	 * ops->open and then calling get_inode_info with returned buf since
1266 	 * file might have write behind data that needs to be flushed and server
1267 	 * version of file size can be stale. If we knew for sure that inode was
1268 	 * not dirty locally we could do this.
1269 	 */
1270 	rc = server->ops->open(xid, &oparms, &oplock, NULL);
1271 	if (rc == -ENOENT && oparms.reconnect == false) {
1272 		/* durable handle timeout is expired - open the file again */
1273 		rc = server->ops->open(xid, &oparms, &oplock, NULL);
1274 		/* indicate that we need to relock the file */
1275 		oparms.reconnect = true;
1276 	}
1277 	if (rc == -EACCES && rdwr_for_fscache == 1) {
1278 		desired_access = cifs_convert_flags(cfile->f_flags, 0);
1279 		rdwr_for_fscache = 2;
1280 		goto retry_open;
1281 	}
1282 
1283 	if (rc) {
1284 		mutex_unlock(&cfile->fh_mutex);
1285 		cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1286 		cifs_dbg(FYI, "oplock: %d\n", oplock);
1287 		goto reopen_error_exit;
1288 	}
1289 
1290 	if (rdwr_for_fscache == 2)
1291 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1292 
1293 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1294 reopen_success:
1295 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1296 	cfile->invalidHandle = false;
1297 	mutex_unlock(&cfile->fh_mutex);
1298 	cinode = CIFS_I(inode);
1299 
1300 	if (can_flush) {
1301 		rc = filemap_write_and_wait(inode->i_mapping);
1302 		if (!is_interrupt_error(rc))
1303 			mapping_set_error(inode->i_mapping, rc);
1304 
1305 		if (tcon->posix_extensions) {
1306 			rc = smb311_posix_get_inode_info(&inode, full_path,
1307 							 NULL, inode->i_sb, xid);
1308 		} else if (tcon->unix_ext) {
1309 			rc = cifs_get_inode_info_unix(&inode, full_path,
1310 						      inode->i_sb, xid);
1311 		} else {
1312 			rc = cifs_get_inode_info(&inode, full_path, NULL,
1313 						 inode->i_sb, xid, NULL);
1314 		}
1315 	}
1316 	/*
1317 	 * Else we are writing out data to server already and could deadlock if
1318 	 * we tried to flush data, and since we do not know if we have data that
1319 	 * would invalidate the current end of file on the server we can not go
1320 	 * to the server to get the new inode info.
1321 	 */
1322 
1323 	/*
1324 	 * If the server returned a read oplock and we have mandatory brlocks,
1325 	 * set oplock level to None.
1326 	 */
1327 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1328 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1329 		oplock = 0;
1330 	}
1331 
1332 	server->ops->set_fid(cfile, &cfile->fid, oplock);
1333 	if (oparms.reconnect)
1334 		cifs_relock_file(cfile);
1335 
1336 reopen_error_exit:
1337 	free_dentry_path(page);
1338 	free_xid(xid);
1339 	return rc;
1340 }
1341 
1342 void smb2_deferred_work_close(struct work_struct *work)
1343 {
1344 	struct cifsFileInfo *cfile = container_of(work,
1345 			struct cifsFileInfo, deferred.work);
1346 
1347 	spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1348 	cifs_del_deferred_close(cfile);
1349 	cfile->deferred_close_scheduled = false;
1350 	spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1351 	_cifsFileInfo_put(cfile, true, false);
1352 }
1353 
1354 static bool
1355 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1356 {
1357 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1358 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1359 
1360 	return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1361 			(cinode->oplock == CIFS_CACHE_RHW_FLG ||
1362 			 cinode->oplock == CIFS_CACHE_RH_FLG) &&
1363 			!test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags));
1364 
1365 }
1366 
1367 int cifs_close(struct inode *inode, struct file *file)
1368 {
1369 	struct cifsFileInfo *cfile;
1370 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1371 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1372 	struct cifs_deferred_close *dclose;
1373 
1374 	cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1375 
1376 	if (file->private_data != NULL) {
1377 		cfile = file->private_data;
1378 		file->private_data = NULL;
1379 		dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
1380 		if ((cfile->status_file_deleted == false) &&
1381 		    (smb2_can_defer_close(inode, dclose))) {
1382 			if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR, &cinode->netfs.flags)) {
1383 				inode_set_mtime_to_ts(inode,
1384 						      inode_set_ctime_current(inode));
1385 			}
1386 			spin_lock(&cinode->deferred_lock);
1387 			cifs_add_deferred_close(cfile, dclose);
1388 			if (cfile->deferred_close_scheduled &&
1389 			    delayed_work_pending(&cfile->deferred)) {
1390 				/*
1391 				 * If there is no pending work, mod_delayed_work queues new work.
1392 				 * So, Increase the ref count to avoid use-after-free.
1393 				 */
1394 				if (!mod_delayed_work(deferredclose_wq,
1395 						&cfile->deferred, cifs_sb->ctx->closetimeo))
1396 					cifsFileInfo_get(cfile);
1397 			} else {
1398 				/* Deferred close for files */
1399 				queue_delayed_work(deferredclose_wq,
1400 						&cfile->deferred, cifs_sb->ctx->closetimeo);
1401 				cfile->deferred_close_scheduled = true;
1402 				spin_unlock(&cinode->deferred_lock);
1403 				return 0;
1404 			}
1405 			spin_unlock(&cinode->deferred_lock);
1406 			_cifsFileInfo_put(cfile, true, false);
1407 		} else {
1408 			_cifsFileInfo_put(cfile, true, false);
1409 			kfree(dclose);
1410 		}
1411 	}
1412 
1413 	/* return code from the ->release op is always ignored */
1414 	return 0;
1415 }
1416 
1417 void
1418 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1419 {
1420 	struct cifsFileInfo *open_file, *tmp;
1421 	LIST_HEAD(tmp_list);
1422 
1423 	if (!tcon->use_persistent || !tcon->need_reopen_files)
1424 		return;
1425 
1426 	tcon->need_reopen_files = false;
1427 
1428 	cifs_dbg(FYI, "Reopen persistent handles\n");
1429 
1430 	/* list all files open on tree connection, reopen resilient handles  */
1431 	spin_lock(&tcon->open_file_lock);
1432 	list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1433 		if (!open_file->invalidHandle)
1434 			continue;
1435 		cifsFileInfo_get(open_file);
1436 		list_add_tail(&open_file->rlist, &tmp_list);
1437 	}
1438 	spin_unlock(&tcon->open_file_lock);
1439 
1440 	list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1441 		if (cifs_reopen_file(open_file, false /* do not flush */))
1442 			tcon->need_reopen_files = true;
1443 		list_del_init(&open_file->rlist);
1444 		cifsFileInfo_put(open_file);
1445 	}
1446 }
1447 
1448 int cifs_closedir(struct inode *inode, struct file *file)
1449 {
1450 	int rc = 0;
1451 	unsigned int xid;
1452 	struct cifsFileInfo *cfile = file->private_data;
1453 	struct cifs_tcon *tcon;
1454 	struct TCP_Server_Info *server;
1455 	char *buf;
1456 
1457 	cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1458 
1459 	if (cfile == NULL)
1460 		return rc;
1461 
1462 	xid = get_xid();
1463 	tcon = tlink_tcon(cfile->tlink);
1464 	server = tcon->ses->server;
1465 
1466 	cifs_dbg(FYI, "Freeing private data in close dir\n");
1467 	spin_lock(&cfile->file_info_lock);
1468 	if (server->ops->dir_needs_close(cfile)) {
1469 		cfile->invalidHandle = true;
1470 		spin_unlock(&cfile->file_info_lock);
1471 		if (server->ops->close_dir)
1472 			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1473 		else
1474 			rc = -ENOSYS;
1475 		cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1476 		/* not much we can do if it fails anyway, ignore rc */
1477 		rc = 0;
1478 	} else
1479 		spin_unlock(&cfile->file_info_lock);
1480 
1481 	buf = cfile->srch_inf.ntwrk_buf_start;
1482 	if (buf) {
1483 		cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1484 		cfile->srch_inf.ntwrk_buf_start = NULL;
1485 		if (cfile->srch_inf.smallBuf)
1486 			cifs_small_buf_release(buf);
1487 		else
1488 			cifs_buf_release(buf);
1489 	}
1490 
1491 	cifs_put_tlink(cfile->tlink);
1492 	kfree(file->private_data);
1493 	file->private_data = NULL;
1494 	/* BB can we lock the filestruct while this is going on? */
1495 	free_xid(xid);
1496 	return rc;
1497 }
1498 
1499 static struct cifsLockInfo *
1500 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1501 {
1502 	struct cifsLockInfo *lock =
1503 		kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
1504 	if (!lock)
1505 		return lock;
1506 	lock->offset = offset;
1507 	lock->length = length;
1508 	lock->type = type;
1509 	lock->pid = current->tgid;
1510 	lock->flags = flags;
1511 	INIT_LIST_HEAD(&lock->blist);
1512 	init_waitqueue_head(&lock->block_q);
1513 	return lock;
1514 }
1515 
1516 void
1517 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1518 {
1519 	struct cifsLockInfo *li, *tmp;
1520 	list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1521 		list_del_init(&li->blist);
1522 		wake_up(&li->block_q);
1523 	}
1524 }
1525 
1526 #define CIFS_LOCK_OP	0
1527 #define CIFS_READ_OP	1
1528 #define CIFS_WRITE_OP	2
1529 
1530 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1531 static bool
1532 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1533 			    __u64 length, __u8 type, __u16 flags,
1534 			    struct cifsFileInfo *cfile,
1535 			    struct cifsLockInfo **conf_lock, int rw_check)
1536 {
1537 	struct cifsLockInfo *li;
1538 	struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1539 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1540 
1541 	list_for_each_entry(li, &fdlocks->locks, llist) {
1542 		if (offset + length <= li->offset ||
1543 		    offset >= li->offset + li->length)
1544 			continue;
1545 		if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1546 		    server->ops->compare_fids(cfile, cur_cfile)) {
1547 			/* shared lock prevents write op through the same fid */
1548 			if (!(li->type & server->vals->shared_lock_type) ||
1549 			    rw_check != CIFS_WRITE_OP)
1550 				continue;
1551 		}
1552 		if ((type & server->vals->shared_lock_type) &&
1553 		    ((server->ops->compare_fids(cfile, cur_cfile) &&
1554 		     current->tgid == li->pid) || type == li->type))
1555 			continue;
1556 		if (rw_check == CIFS_LOCK_OP &&
1557 		    (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1558 		    server->ops->compare_fids(cfile, cur_cfile))
1559 			continue;
1560 		if (conf_lock)
1561 			*conf_lock = li;
1562 		return true;
1563 	}
1564 	return false;
1565 }
1566 
1567 bool
1568 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1569 			__u8 type, __u16 flags,
1570 			struct cifsLockInfo **conf_lock, int rw_check)
1571 {
1572 	bool rc = false;
1573 	struct cifs_fid_locks *cur;
1574 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1575 
1576 	list_for_each_entry(cur, &cinode->llist, llist) {
1577 		rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1578 						 flags, cfile, conf_lock,
1579 						 rw_check);
1580 		if (rc)
1581 			break;
1582 	}
1583 
1584 	return rc;
1585 }
1586 
1587 /*
1588  * Check if there is another lock that prevents us to set the lock (mandatory
1589  * style). If such a lock exists, update the flock structure with its
1590  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1591  * or leave it the same if we can't. Returns 0 if we don't need to request to
1592  * the server or 1 otherwise.
1593  */
1594 static int
1595 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1596 	       __u8 type, struct file_lock *flock)
1597 {
1598 	int rc = 0;
1599 	struct cifsLockInfo *conf_lock;
1600 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1601 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1602 	bool exist;
1603 
1604 	down_read(&cinode->lock_sem);
1605 
1606 	exist = cifs_find_lock_conflict(cfile, offset, length, type,
1607 					flock->c.flc_flags, &conf_lock,
1608 					CIFS_LOCK_OP);
1609 	if (exist) {
1610 		flock->fl_start = conf_lock->offset;
1611 		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1612 		flock->c.flc_pid = conf_lock->pid;
1613 		if (conf_lock->type & server->vals->shared_lock_type)
1614 			flock->c.flc_type = F_RDLCK;
1615 		else
1616 			flock->c.flc_type = F_WRLCK;
1617 	} else if (!cinode->can_cache_brlcks)
1618 		rc = 1;
1619 	else
1620 		flock->c.flc_type = F_UNLCK;
1621 
1622 	up_read(&cinode->lock_sem);
1623 	return rc;
1624 }
1625 
1626 static void
1627 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1628 {
1629 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1630 	cifs_down_write(&cinode->lock_sem);
1631 	list_add_tail(&lock->llist, &cfile->llist->locks);
1632 	up_write(&cinode->lock_sem);
1633 }
1634 
1635 /*
1636  * Set the byte-range lock (mandatory style). Returns:
1637  * 1) 0, if we set the lock and don't need to request to the server;
1638  * 2) 1, if no locks prevent us but we need to request to the server;
1639  * 3) -EACCES, if there is a lock that prevents us and wait is false.
1640  */
1641 static int
1642 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1643 		 bool wait)
1644 {
1645 	struct cifsLockInfo *conf_lock;
1646 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1647 	bool exist;
1648 	int rc = 0;
1649 
1650 try_again:
1651 	exist = false;
1652 	cifs_down_write(&cinode->lock_sem);
1653 
1654 	exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1655 					lock->type, lock->flags, &conf_lock,
1656 					CIFS_LOCK_OP);
1657 	if (!exist && cinode->can_cache_brlcks) {
1658 		list_add_tail(&lock->llist, &cfile->llist->locks);
1659 		up_write(&cinode->lock_sem);
1660 		return rc;
1661 	}
1662 
1663 	if (!exist)
1664 		rc = 1;
1665 	else if (!wait)
1666 		rc = -EACCES;
1667 	else {
1668 		list_add_tail(&lock->blist, &conf_lock->blist);
1669 		up_write(&cinode->lock_sem);
1670 		rc = wait_event_interruptible(lock->block_q,
1671 					(lock->blist.prev == &lock->blist) &&
1672 					(lock->blist.next == &lock->blist));
1673 		if (!rc)
1674 			goto try_again;
1675 		cifs_down_write(&cinode->lock_sem);
1676 		list_del_init(&lock->blist);
1677 	}
1678 
1679 	up_write(&cinode->lock_sem);
1680 	return rc;
1681 }
1682 
1683 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1684 /*
1685  * Check if there is another lock that prevents us to set the lock (posix
1686  * style). If such a lock exists, update the flock structure with its
1687  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1688  * or leave it the same if we can't. Returns 0 if we don't need to request to
1689  * the server or 1 otherwise.
1690  */
1691 static int
1692 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1693 {
1694 	int rc = 0;
1695 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1696 	unsigned char saved_type = flock->c.flc_type;
1697 
1698 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1699 		return 1;
1700 
1701 	down_read(&cinode->lock_sem);
1702 	posix_test_lock(file, flock);
1703 
1704 	if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1705 		flock->c.flc_type = saved_type;
1706 		rc = 1;
1707 	}
1708 
1709 	up_read(&cinode->lock_sem);
1710 	return rc;
1711 }
1712 
1713 /*
1714  * Set the byte-range lock (posix style). Returns:
1715  * 1) <0, if the error occurs while setting the lock;
1716  * 2) 0, if we set the lock and don't need to request to the server;
1717  * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1718  * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1719  */
1720 static int
1721 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1722 {
1723 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1724 	int rc = FILE_LOCK_DEFERRED + 1;
1725 
1726 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1727 		return rc;
1728 
1729 	cifs_down_write(&cinode->lock_sem);
1730 	if (!cinode->can_cache_brlcks) {
1731 		up_write(&cinode->lock_sem);
1732 		return rc;
1733 	}
1734 
1735 	rc = posix_lock_file(file, flock, NULL);
1736 	up_write(&cinode->lock_sem);
1737 	return rc;
1738 }
1739 
1740 int
1741 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1742 {
1743 	unsigned int xid;
1744 	int rc = 0, stored_rc;
1745 	struct cifsLockInfo *li, *tmp;
1746 	struct cifs_tcon *tcon;
1747 	unsigned int num, max_num, max_buf;
1748 	LOCKING_ANDX_RANGE *buf, *cur;
1749 	static const int types[] = {
1750 		LOCKING_ANDX_LARGE_FILES,
1751 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1752 	};
1753 	int i;
1754 
1755 	xid = get_xid();
1756 	tcon = tlink_tcon(cfile->tlink);
1757 
1758 	/*
1759 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1760 	 * and check it before using.
1761 	 */
1762 	max_buf = tcon->ses->server->maxBuf;
1763 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1764 		free_xid(xid);
1765 		return -EINVAL;
1766 	}
1767 
1768 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1769 		     PAGE_SIZE);
1770 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1771 			PAGE_SIZE);
1772 	max_num = (max_buf - sizeof(struct smb_hdr)) /
1773 						sizeof(LOCKING_ANDX_RANGE);
1774 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1775 	if (!buf) {
1776 		free_xid(xid);
1777 		return -ENOMEM;
1778 	}
1779 
1780 	for (i = 0; i < 2; i++) {
1781 		cur = buf;
1782 		num = 0;
1783 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1784 			if (li->type != types[i])
1785 				continue;
1786 			cur->Pid = cpu_to_le16(li->pid);
1787 			cur->LengthLow = cpu_to_le32((u32)li->length);
1788 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1789 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
1790 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1791 			if (++num == max_num) {
1792 				stored_rc = cifs_lockv(xid, tcon,
1793 						       cfile->fid.netfid,
1794 						       (__u8)li->type, 0, num,
1795 						       buf);
1796 				if (stored_rc)
1797 					rc = stored_rc;
1798 				cur = buf;
1799 				num = 0;
1800 			} else
1801 				cur++;
1802 		}
1803 
1804 		if (num) {
1805 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1806 					       (__u8)types[i], 0, num, buf);
1807 			if (stored_rc)
1808 				rc = stored_rc;
1809 		}
1810 	}
1811 
1812 	kfree(buf);
1813 	free_xid(xid);
1814 	return rc;
1815 }
1816 
1817 static __u32
1818 hash_lockowner(fl_owner_t owner)
1819 {
1820 	return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1821 }
1822 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1823 
1824 struct lock_to_push {
1825 	struct list_head llist;
1826 	__u64 offset;
1827 	__u64 length;
1828 	__u32 pid;
1829 	__u16 netfid;
1830 	__u8 type;
1831 };
1832 
1833 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1834 static int
1835 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1836 {
1837 	struct inode *inode = d_inode(cfile->dentry);
1838 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1839 	struct file_lock *flock;
1840 	struct file_lock_context *flctx = locks_inode_context(inode);
1841 	unsigned int count = 0, i;
1842 	int rc = 0, xid, type;
1843 	struct list_head locks_to_send, *el;
1844 	struct lock_to_push *lck, *tmp;
1845 	__u64 length;
1846 
1847 	xid = get_xid();
1848 
1849 	if (!flctx)
1850 		goto out;
1851 
1852 	spin_lock(&flctx->flc_lock);
1853 	list_for_each(el, &flctx->flc_posix) {
1854 		count++;
1855 	}
1856 	spin_unlock(&flctx->flc_lock);
1857 
1858 	INIT_LIST_HEAD(&locks_to_send);
1859 
1860 	/*
1861 	 * Allocating count locks is enough because no FL_POSIX locks can be
1862 	 * added to the list while we are holding cinode->lock_sem that
1863 	 * protects locking operations of this inode.
1864 	 */
1865 	for (i = 0; i < count; i++) {
1866 		lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1867 		if (!lck) {
1868 			rc = -ENOMEM;
1869 			goto err_out;
1870 		}
1871 		list_add_tail(&lck->llist, &locks_to_send);
1872 	}
1873 
1874 	el = locks_to_send.next;
1875 	spin_lock(&flctx->flc_lock);
1876 	for_each_file_lock(flock, &flctx->flc_posix) {
1877 		unsigned char ftype = flock->c.flc_type;
1878 
1879 		if (el == &locks_to_send) {
1880 			/*
1881 			 * The list ended. We don't have enough allocated
1882 			 * structures - something is really wrong.
1883 			 */
1884 			cifs_dbg(VFS, "Can't push all brlocks!\n");
1885 			break;
1886 		}
1887 		length = cifs_flock_len(flock);
1888 		if (ftype == F_RDLCK || ftype == F_SHLCK)
1889 			type = CIFS_RDLCK;
1890 		else
1891 			type = CIFS_WRLCK;
1892 		lck = list_entry(el, struct lock_to_push, llist);
1893 		lck->pid = hash_lockowner(flock->c.flc_owner);
1894 		lck->netfid = cfile->fid.netfid;
1895 		lck->length = length;
1896 		lck->type = type;
1897 		lck->offset = flock->fl_start;
1898 	}
1899 	spin_unlock(&flctx->flc_lock);
1900 
1901 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1902 		int stored_rc;
1903 
1904 		stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1905 					     lck->offset, lck->length, NULL,
1906 					     lck->type, 0);
1907 		if (stored_rc)
1908 			rc = stored_rc;
1909 		list_del(&lck->llist);
1910 		kfree(lck);
1911 	}
1912 
1913 out:
1914 	free_xid(xid);
1915 	return rc;
1916 err_out:
1917 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1918 		list_del(&lck->llist);
1919 		kfree(lck);
1920 	}
1921 	goto out;
1922 }
1923 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1924 
1925 static int
1926 cifs_push_locks(struct cifsFileInfo *cfile)
1927 {
1928 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1929 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1930 	int rc = 0;
1931 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1932 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1933 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1934 
1935 	/* we are going to update can_cache_brlcks here - need a write access */
1936 	cifs_down_write(&cinode->lock_sem);
1937 	if (!cinode->can_cache_brlcks) {
1938 		up_write(&cinode->lock_sem);
1939 		return rc;
1940 	}
1941 
1942 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1943 	if (cap_unix(tcon->ses) &&
1944 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1945 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1946 		rc = cifs_push_posix_locks(cfile);
1947 	else
1948 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1949 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1950 
1951 	cinode->can_cache_brlcks = false;
1952 	up_write(&cinode->lock_sem);
1953 	return rc;
1954 }
1955 
1956 static void
1957 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1958 		bool *wait_flag, struct TCP_Server_Info *server)
1959 {
1960 	if (flock->c.flc_flags & FL_POSIX)
1961 		cifs_dbg(FYI, "Posix\n");
1962 	if (flock->c.flc_flags & FL_FLOCK)
1963 		cifs_dbg(FYI, "Flock\n");
1964 	if (flock->c.flc_flags & FL_SLEEP) {
1965 		cifs_dbg(FYI, "Blocking lock\n");
1966 		*wait_flag = true;
1967 	}
1968 	if (flock->c.flc_flags & FL_ACCESS)
1969 		cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1970 	if (flock->c.flc_flags & FL_LEASE)
1971 		cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1972 	if (flock->c.flc_flags &
1973 	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1974 	       FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
1975 		cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
1976 		         flock->c.flc_flags);
1977 
1978 	*type = server->vals->large_lock_type;
1979 	if (lock_is_write(flock)) {
1980 		cifs_dbg(FYI, "F_WRLCK\n");
1981 		*type |= server->vals->exclusive_lock_type;
1982 		*lock = 1;
1983 	} else if (lock_is_unlock(flock)) {
1984 		cifs_dbg(FYI, "F_UNLCK\n");
1985 		*type |= server->vals->unlock_lock_type;
1986 		*unlock = 1;
1987 		/* Check if unlock includes more than one lock range */
1988 	} else if (lock_is_read(flock)) {
1989 		cifs_dbg(FYI, "F_RDLCK\n");
1990 		*type |= server->vals->shared_lock_type;
1991 		*lock = 1;
1992 	} else if (flock->c.flc_type == F_EXLCK) {
1993 		cifs_dbg(FYI, "F_EXLCK\n");
1994 		*type |= server->vals->exclusive_lock_type;
1995 		*lock = 1;
1996 	} else if (flock->c.flc_type == F_SHLCK) {
1997 		cifs_dbg(FYI, "F_SHLCK\n");
1998 		*type |= server->vals->shared_lock_type;
1999 		*lock = 1;
2000 	} else
2001 		cifs_dbg(FYI, "Unknown type of lock\n");
2002 }
2003 
2004 static int
2005 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
2006 	   bool wait_flag, bool posix_lck, unsigned int xid)
2007 {
2008 	int rc = 0;
2009 	__u64 length = cifs_flock_len(flock);
2010 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2011 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2012 	struct TCP_Server_Info *server = tcon->ses->server;
2013 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2014 	__u16 netfid = cfile->fid.netfid;
2015 
2016 	if (posix_lck) {
2017 		int posix_lock_type;
2018 
2019 		rc = cifs_posix_lock_test(file, flock);
2020 		if (!rc)
2021 			return rc;
2022 
2023 		if (type & server->vals->shared_lock_type)
2024 			posix_lock_type = CIFS_RDLCK;
2025 		else
2026 			posix_lock_type = CIFS_WRLCK;
2027 		rc = CIFSSMBPosixLock(xid, tcon, netfid,
2028 				      hash_lockowner(flock->c.flc_owner),
2029 				      flock->fl_start, length, flock,
2030 				      posix_lock_type, wait_flag);
2031 		return rc;
2032 	}
2033 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2034 
2035 	rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
2036 	if (!rc)
2037 		return rc;
2038 
2039 	/* BB we could chain these into one lock request BB */
2040 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2041 				    1, 0, false);
2042 	if (rc == 0) {
2043 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2044 					    type, 0, 1, false);
2045 		flock->c.flc_type = F_UNLCK;
2046 		if (rc != 0)
2047 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2048 				 rc);
2049 		return 0;
2050 	}
2051 
2052 	if (type & server->vals->shared_lock_type) {
2053 		flock->c.flc_type = F_WRLCK;
2054 		return 0;
2055 	}
2056 
2057 	type &= ~server->vals->exclusive_lock_type;
2058 
2059 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2060 				    type | server->vals->shared_lock_type,
2061 				    1, 0, false);
2062 	if (rc == 0) {
2063 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2064 			type | server->vals->shared_lock_type, 0, 1, false);
2065 		flock->c.flc_type = F_RDLCK;
2066 		if (rc != 0)
2067 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2068 				 rc);
2069 	} else
2070 		flock->c.flc_type = F_WRLCK;
2071 
2072 	return 0;
2073 }
2074 
2075 void
2076 cifs_move_llist(struct list_head *source, struct list_head *dest)
2077 {
2078 	struct list_head *li, *tmp;
2079 	list_for_each_safe(li, tmp, source)
2080 		list_move(li, dest);
2081 }
2082 
2083 int
2084 cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode,
2085 				struct file *file)
2086 {
2087 	struct cifsFileInfo *open_file = NULL;
2088 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2089 	int rc = 0;
2090 
2091 	spin_lock(&tcon->open_file_lock);
2092 	spin_lock(&cinode->open_file_lock);
2093 
2094 	list_for_each_entry(open_file, &cinode->openFileList, flist) {
2095 		if (file->f_flags == open_file->f_flags) {
2096 			rc = -EINVAL;
2097 			break;
2098 		}
2099 	}
2100 
2101 	spin_unlock(&cinode->open_file_lock);
2102 	spin_unlock(&tcon->open_file_lock);
2103 	return rc;
2104 }
2105 
2106 void
2107 cifs_free_llist(struct list_head *llist)
2108 {
2109 	struct cifsLockInfo *li, *tmp;
2110 	list_for_each_entry_safe(li, tmp, llist, llist) {
2111 		cifs_del_lock_waiters(li);
2112 		list_del(&li->llist);
2113 		kfree(li);
2114 	}
2115 }
2116 
2117 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2118 int
2119 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2120 		  unsigned int xid)
2121 {
2122 	int rc = 0, stored_rc;
2123 	static const int types[] = {
2124 		LOCKING_ANDX_LARGE_FILES,
2125 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2126 	};
2127 	unsigned int i;
2128 	unsigned int max_num, num, max_buf;
2129 	LOCKING_ANDX_RANGE *buf, *cur;
2130 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2131 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2132 	struct cifsLockInfo *li, *tmp;
2133 	__u64 length = cifs_flock_len(flock);
2134 	LIST_HEAD(tmp_llist);
2135 
2136 	/*
2137 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2138 	 * and check it before using.
2139 	 */
2140 	max_buf = tcon->ses->server->maxBuf;
2141 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2142 		return -EINVAL;
2143 
2144 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2145 		     PAGE_SIZE);
2146 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2147 			PAGE_SIZE);
2148 	max_num = (max_buf - sizeof(struct smb_hdr)) /
2149 						sizeof(LOCKING_ANDX_RANGE);
2150 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
2151 	if (!buf)
2152 		return -ENOMEM;
2153 
2154 	cifs_down_write(&cinode->lock_sem);
2155 	for (i = 0; i < 2; i++) {
2156 		cur = buf;
2157 		num = 0;
2158 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2159 			if (flock->fl_start > li->offset ||
2160 			    (flock->fl_start + length) <
2161 			    (li->offset + li->length))
2162 				continue;
2163 			if (current->tgid != li->pid)
2164 				continue;
2165 			if (types[i] != li->type)
2166 				continue;
2167 			if (cinode->can_cache_brlcks) {
2168 				/*
2169 				 * We can cache brlock requests - simply remove
2170 				 * a lock from the file's list.
2171 				 */
2172 				list_del(&li->llist);
2173 				cifs_del_lock_waiters(li);
2174 				kfree(li);
2175 				continue;
2176 			}
2177 			cur->Pid = cpu_to_le16(li->pid);
2178 			cur->LengthLow = cpu_to_le32((u32)li->length);
2179 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2180 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
2181 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2182 			/*
2183 			 * We need to save a lock here to let us add it again to
2184 			 * the file's list if the unlock range request fails on
2185 			 * the server.
2186 			 */
2187 			list_move(&li->llist, &tmp_llist);
2188 			if (++num == max_num) {
2189 				stored_rc = cifs_lockv(xid, tcon,
2190 						       cfile->fid.netfid,
2191 						       li->type, num, 0, buf);
2192 				if (stored_rc) {
2193 					/*
2194 					 * We failed on the unlock range
2195 					 * request - add all locks from the tmp
2196 					 * list to the head of the file's list.
2197 					 */
2198 					cifs_move_llist(&tmp_llist,
2199 							&cfile->llist->locks);
2200 					rc = stored_rc;
2201 				} else
2202 					/*
2203 					 * The unlock range request succeed -
2204 					 * free the tmp list.
2205 					 */
2206 					cifs_free_llist(&tmp_llist);
2207 				cur = buf;
2208 				num = 0;
2209 			} else
2210 				cur++;
2211 		}
2212 		if (num) {
2213 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2214 					       types[i], num, 0, buf);
2215 			if (stored_rc) {
2216 				cifs_move_llist(&tmp_llist,
2217 						&cfile->llist->locks);
2218 				rc = stored_rc;
2219 			} else
2220 				cifs_free_llist(&tmp_llist);
2221 		}
2222 	}
2223 
2224 	up_write(&cinode->lock_sem);
2225 	kfree(buf);
2226 	return rc;
2227 }
2228 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2229 
2230 static int
2231 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2232 	   bool wait_flag, bool posix_lck, int lock, int unlock,
2233 	   unsigned int xid)
2234 {
2235 	int rc = 0;
2236 	__u64 length = cifs_flock_len(flock);
2237 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2238 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2239 	struct TCP_Server_Info *server = tcon->ses->server;
2240 	struct inode *inode = d_inode(cfile->dentry);
2241 
2242 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2243 	if (posix_lck) {
2244 		int posix_lock_type;
2245 
2246 		rc = cifs_posix_lock_set(file, flock);
2247 		if (rc <= FILE_LOCK_DEFERRED)
2248 			return rc;
2249 
2250 		if (type & server->vals->shared_lock_type)
2251 			posix_lock_type = CIFS_RDLCK;
2252 		else
2253 			posix_lock_type = CIFS_WRLCK;
2254 
2255 		if (unlock == 1)
2256 			posix_lock_type = CIFS_UNLCK;
2257 
2258 		rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2259 				      hash_lockowner(flock->c.flc_owner),
2260 				      flock->fl_start, length,
2261 				      NULL, posix_lock_type, wait_flag);
2262 		goto out;
2263 	}
2264 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2265 	if (lock) {
2266 		struct cifsLockInfo *lock;
2267 
2268 		lock = cifs_lock_init(flock->fl_start, length, type,
2269 				      flock->c.flc_flags);
2270 		if (!lock)
2271 			return -ENOMEM;
2272 
2273 		rc = cifs_lock_add_if(cfile, lock, wait_flag);
2274 		if (rc < 0) {
2275 			kfree(lock);
2276 			return rc;
2277 		}
2278 		if (!rc)
2279 			goto out;
2280 
2281 		/*
2282 		 * Windows 7 server can delay breaking lease from read to None
2283 		 * if we set a byte-range lock on a file - break it explicitly
2284 		 * before sending the lock to the server to be sure the next
2285 		 * read won't conflict with non-overlapted locks due to
2286 		 * pagereading.
2287 		 */
2288 		if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2289 					CIFS_CACHE_READ(CIFS_I(inode))) {
2290 			cifs_zap_mapping(inode);
2291 			cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2292 				 inode);
2293 			CIFS_I(inode)->oplock = 0;
2294 		}
2295 
2296 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2297 					    type, 1, 0, wait_flag);
2298 		if (rc) {
2299 			kfree(lock);
2300 			return rc;
2301 		}
2302 
2303 		cifs_lock_add(cfile, lock);
2304 	} else if (unlock)
2305 		rc = server->ops->mand_unlock_range(cfile, flock, xid);
2306 
2307 out:
2308 	if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2309 		/*
2310 		 * If this is a request to remove all locks because we
2311 		 * are closing the file, it doesn't matter if the
2312 		 * unlocking failed as both cifs.ko and the SMB server
2313 		 * remove the lock on file close
2314 		 */
2315 		if (rc) {
2316 			cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2317 			if (!(flock->c.flc_flags & FL_CLOSE))
2318 				return rc;
2319 		}
2320 		rc = locks_lock_file_wait(file, flock);
2321 	}
2322 	return rc;
2323 }
2324 
2325 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2326 {
2327 	int rc, xid;
2328 	int lock = 0, unlock = 0;
2329 	bool wait_flag = false;
2330 	bool posix_lck = false;
2331 	struct cifs_sb_info *cifs_sb;
2332 	struct cifs_tcon *tcon;
2333 	struct cifsFileInfo *cfile;
2334 	__u32 type;
2335 
2336 	xid = get_xid();
2337 
2338 	if (!(fl->c.flc_flags & FL_FLOCK)) {
2339 		rc = -ENOLCK;
2340 		free_xid(xid);
2341 		return rc;
2342 	}
2343 
2344 	cfile = (struct cifsFileInfo *)file->private_data;
2345 	tcon = tlink_tcon(cfile->tlink);
2346 
2347 	cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2348 			tcon->ses->server);
2349 	cifs_sb = CIFS_FILE_SB(file);
2350 
2351 	if (cap_unix(tcon->ses) &&
2352 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2353 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2354 		posix_lck = true;
2355 
2356 	if (!lock && !unlock) {
2357 		/*
2358 		 * if no lock or unlock then nothing to do since we do not
2359 		 * know what it is
2360 		 */
2361 		rc = -EOPNOTSUPP;
2362 		free_xid(xid);
2363 		return rc;
2364 	}
2365 
2366 	rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2367 			xid);
2368 	free_xid(xid);
2369 	return rc;
2370 
2371 
2372 }
2373 
2374 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2375 {
2376 	int rc, xid;
2377 	int lock = 0, unlock = 0;
2378 	bool wait_flag = false;
2379 	bool posix_lck = false;
2380 	struct cifs_sb_info *cifs_sb;
2381 	struct cifs_tcon *tcon;
2382 	struct cifsFileInfo *cfile;
2383 	__u32 type;
2384 
2385 	rc = -EACCES;
2386 	xid = get_xid();
2387 
2388 	cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2389 		 flock->c.flc_flags, flock->c.flc_type,
2390 		 (long long)flock->fl_start,
2391 		 (long long)flock->fl_end);
2392 
2393 	cfile = (struct cifsFileInfo *)file->private_data;
2394 	tcon = tlink_tcon(cfile->tlink);
2395 
2396 	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2397 			tcon->ses->server);
2398 	cifs_sb = CIFS_FILE_SB(file);
2399 	set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2400 
2401 	if (cap_unix(tcon->ses) &&
2402 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2403 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2404 		posix_lck = true;
2405 	/*
2406 	 * BB add code here to normalize offset and length to account for
2407 	 * negative length which we can not accept over the wire.
2408 	 */
2409 	if (IS_GETLK(cmd)) {
2410 		rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2411 		free_xid(xid);
2412 		return rc;
2413 	}
2414 
2415 	if (!lock && !unlock) {
2416 		/*
2417 		 * if no lock or unlock then nothing to do since we do not
2418 		 * know what it is
2419 		 */
2420 		free_xid(xid);
2421 		return -EOPNOTSUPP;
2422 	}
2423 
2424 	rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2425 			xid);
2426 	free_xid(xid);
2427 	return rc;
2428 }
2429 
2430 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result)
2431 {
2432 	struct netfs_io_request *wreq = wdata->rreq;
2433 	struct netfs_inode *ictx = netfs_inode(wreq->inode);
2434 	loff_t wrend;
2435 
2436 	if (result > 0) {
2437 		wrend = wdata->subreq.start + wdata->subreq.transferred + result;
2438 
2439 		if (wrend > ictx->zero_point &&
2440 		    (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE ||
2441 		     wdata->rreq->origin == NETFS_DIO_WRITE))
2442 			ictx->zero_point = wrend;
2443 		if (wrend > ictx->remote_i_size)
2444 			netfs_resize_file(ictx, wrend, true);
2445 	}
2446 
2447 	netfs_write_subrequest_terminated(&wdata->subreq, result);
2448 }
2449 
2450 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
2451 					bool fsuid_only)
2452 {
2453 	struct cifsFileInfo *open_file = NULL;
2454 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2455 
2456 	/* only filter by fsuid on multiuser mounts */
2457 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2458 		fsuid_only = false;
2459 
2460 	spin_lock(&cifs_inode->open_file_lock);
2461 	/* we could simply get the first_list_entry since write-only entries
2462 	   are always at the end of the list but since the first entry might
2463 	   have a close pending, we go through the whole list */
2464 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2465 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2466 			continue;
2467 		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2468 			if ((!open_file->invalidHandle)) {
2469 				/* found a good file */
2470 				/* lock it so it will not be closed on us */
2471 				cifsFileInfo_get(open_file);
2472 				spin_unlock(&cifs_inode->open_file_lock);
2473 				return open_file;
2474 			} /* else might as well continue, and look for
2475 			     another, or simply have the caller reopen it
2476 			     again rather than trying to fix this handle */
2477 		} else /* write only file */
2478 			break; /* write only files are last so must be done */
2479 	}
2480 	spin_unlock(&cifs_inode->open_file_lock);
2481 	return NULL;
2482 }
2483 
2484 /* Return -EBADF if no handle is found and general rc otherwise */
2485 int
2486 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
2487 		       struct cifsFileInfo **ret_file)
2488 {
2489 	struct cifsFileInfo *open_file, *inv_file = NULL;
2490 	struct cifs_sb_info *cifs_sb;
2491 	bool any_available = false;
2492 	int rc = -EBADF;
2493 	unsigned int refind = 0;
2494 	bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2495 	bool with_delete = flags & FIND_WR_WITH_DELETE;
2496 	*ret_file = NULL;
2497 
2498 	/*
2499 	 * Having a null inode here (because mapping->host was set to zero by
2500 	 * the VFS or MM) should not happen but we had reports of on oops (due
2501 	 * to it being zero) during stress testcases so we need to check for it
2502 	 */
2503 
2504 	if (cifs_inode == NULL) {
2505 		cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2506 		dump_stack();
2507 		return rc;
2508 	}
2509 
2510 	cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2511 
2512 	/* only filter by fsuid on multiuser mounts */
2513 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2514 		fsuid_only = false;
2515 
2516 	spin_lock(&cifs_inode->open_file_lock);
2517 refind_writable:
2518 	if (refind > MAX_REOPEN_ATT) {
2519 		spin_unlock(&cifs_inode->open_file_lock);
2520 		return rc;
2521 	}
2522 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2523 		if (!any_available && open_file->pid != current->tgid)
2524 			continue;
2525 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2526 			continue;
2527 		if (with_delete && !(open_file->fid.access & DELETE))
2528 			continue;
2529 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2530 			if (!open_file->invalidHandle) {
2531 				/* found a good writable file */
2532 				cifsFileInfo_get(open_file);
2533 				spin_unlock(&cifs_inode->open_file_lock);
2534 				*ret_file = open_file;
2535 				return 0;
2536 			} else {
2537 				if (!inv_file)
2538 					inv_file = open_file;
2539 			}
2540 		}
2541 	}
2542 	/* couldn't find usable FH with same pid, try any available */
2543 	if (!any_available) {
2544 		any_available = true;
2545 		goto refind_writable;
2546 	}
2547 
2548 	if (inv_file) {
2549 		any_available = false;
2550 		cifsFileInfo_get(inv_file);
2551 	}
2552 
2553 	spin_unlock(&cifs_inode->open_file_lock);
2554 
2555 	if (inv_file) {
2556 		rc = cifs_reopen_file(inv_file, false);
2557 		if (!rc) {
2558 			*ret_file = inv_file;
2559 			return 0;
2560 		}
2561 
2562 		spin_lock(&cifs_inode->open_file_lock);
2563 		list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2564 		spin_unlock(&cifs_inode->open_file_lock);
2565 		cifsFileInfo_put(inv_file);
2566 		++refind;
2567 		inv_file = NULL;
2568 		spin_lock(&cifs_inode->open_file_lock);
2569 		goto refind_writable;
2570 	}
2571 
2572 	return rc;
2573 }
2574 
2575 struct cifsFileInfo *
2576 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2577 {
2578 	struct cifsFileInfo *cfile;
2579 	int rc;
2580 
2581 	rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2582 	if (rc)
2583 		cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2584 
2585 	return cfile;
2586 }
2587 
2588 int
2589 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2590 		       int flags,
2591 		       struct cifsFileInfo **ret_file)
2592 {
2593 	struct cifsFileInfo *cfile;
2594 	void *page = alloc_dentry_path();
2595 
2596 	*ret_file = NULL;
2597 
2598 	spin_lock(&tcon->open_file_lock);
2599 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2600 		struct cifsInodeInfo *cinode;
2601 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2602 		if (IS_ERR(full_path)) {
2603 			spin_unlock(&tcon->open_file_lock);
2604 			free_dentry_path(page);
2605 			return PTR_ERR(full_path);
2606 		}
2607 		if (strcmp(full_path, name))
2608 			continue;
2609 
2610 		cinode = CIFS_I(d_inode(cfile->dentry));
2611 		spin_unlock(&tcon->open_file_lock);
2612 		free_dentry_path(page);
2613 		return cifs_get_writable_file(cinode, flags, ret_file);
2614 	}
2615 
2616 	spin_unlock(&tcon->open_file_lock);
2617 	free_dentry_path(page);
2618 	return -ENOENT;
2619 }
2620 
2621 int
2622 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2623 		       struct cifsFileInfo **ret_file)
2624 {
2625 	struct cifsFileInfo *cfile;
2626 	void *page = alloc_dentry_path();
2627 
2628 	*ret_file = NULL;
2629 
2630 	spin_lock(&tcon->open_file_lock);
2631 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2632 		struct cifsInodeInfo *cinode;
2633 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2634 		if (IS_ERR(full_path)) {
2635 			spin_unlock(&tcon->open_file_lock);
2636 			free_dentry_path(page);
2637 			return PTR_ERR(full_path);
2638 		}
2639 		if (strcmp(full_path, name))
2640 			continue;
2641 
2642 		cinode = CIFS_I(d_inode(cfile->dentry));
2643 		spin_unlock(&tcon->open_file_lock);
2644 		free_dentry_path(page);
2645 		*ret_file = find_readable_file(cinode, 0);
2646 		return *ret_file ? 0 : -ENOENT;
2647 	}
2648 
2649 	spin_unlock(&tcon->open_file_lock);
2650 	free_dentry_path(page);
2651 	return -ENOENT;
2652 }
2653 
2654 /*
2655  * Flush data on a strict file.
2656  */
2657 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2658 		      int datasync)
2659 {
2660 	unsigned int xid;
2661 	int rc = 0;
2662 	struct cifs_tcon *tcon;
2663 	struct TCP_Server_Info *server;
2664 	struct cifsFileInfo *smbfile = file->private_data;
2665 	struct inode *inode = file_inode(file);
2666 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2667 
2668 	rc = file_write_and_wait_range(file, start, end);
2669 	if (rc) {
2670 		trace_cifs_fsync_err(inode->i_ino, rc);
2671 		return rc;
2672 	}
2673 
2674 	xid = get_xid();
2675 
2676 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2677 		 file, datasync);
2678 
2679 	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2680 		rc = cifs_zap_mapping(inode);
2681 		if (rc) {
2682 			cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2683 			rc = 0; /* don't care about it in fsync */
2684 		}
2685 	}
2686 
2687 	tcon = tlink_tcon(smbfile->tlink);
2688 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2689 		server = tcon->ses->server;
2690 		if (server->ops->flush == NULL) {
2691 			rc = -ENOSYS;
2692 			goto strict_fsync_exit;
2693 		}
2694 
2695 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2696 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2697 			if (smbfile) {
2698 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2699 				cifsFileInfo_put(smbfile);
2700 			} else
2701 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2702 		} else
2703 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2704 	}
2705 
2706 strict_fsync_exit:
2707 	free_xid(xid);
2708 	return rc;
2709 }
2710 
2711 /*
2712  * Flush data on a non-strict data.
2713  */
2714 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2715 {
2716 	unsigned int xid;
2717 	int rc = 0;
2718 	struct cifs_tcon *tcon;
2719 	struct TCP_Server_Info *server;
2720 	struct cifsFileInfo *smbfile = file->private_data;
2721 	struct inode *inode = file_inode(file);
2722 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2723 
2724 	rc = file_write_and_wait_range(file, start, end);
2725 	if (rc) {
2726 		trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2727 		return rc;
2728 	}
2729 
2730 	xid = get_xid();
2731 
2732 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2733 		 file, datasync);
2734 
2735 	tcon = tlink_tcon(smbfile->tlink);
2736 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2737 		server = tcon->ses->server;
2738 		if (server->ops->flush == NULL) {
2739 			rc = -ENOSYS;
2740 			goto fsync_exit;
2741 		}
2742 
2743 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2744 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2745 			if (smbfile) {
2746 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2747 				cifsFileInfo_put(smbfile);
2748 			} else
2749 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2750 		} else
2751 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2752 	}
2753 
2754 fsync_exit:
2755 	free_xid(xid);
2756 	return rc;
2757 }
2758 
2759 /*
2760  * As file closes, flush all cached write data for this inode checking
2761  * for write behind errors.
2762  */
2763 int cifs_flush(struct file *file, fl_owner_t id)
2764 {
2765 	struct inode *inode = file_inode(file);
2766 	int rc = 0;
2767 
2768 	if (file->f_mode & FMODE_WRITE)
2769 		rc = filemap_write_and_wait(inode->i_mapping);
2770 
2771 	cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2772 	if (rc) {
2773 		/* get more nuanced writeback errors */
2774 		rc = filemap_check_wb_err(file->f_mapping, 0);
2775 		trace_cifs_flush_err(inode->i_ino, rc);
2776 	}
2777 	return rc;
2778 }
2779 
2780 static ssize_t
2781 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2782 {
2783 	struct file *file = iocb->ki_filp;
2784 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2785 	struct inode *inode = file->f_mapping->host;
2786 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2787 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2788 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2789 	ssize_t rc;
2790 
2791 	rc = netfs_start_io_write(inode);
2792 	if (rc < 0)
2793 		return rc;
2794 
2795 	/*
2796 	 * We need to hold the sem to be sure nobody modifies lock list
2797 	 * with a brlock that prevents writing.
2798 	 */
2799 	down_read(&cinode->lock_sem);
2800 
2801 	rc = generic_write_checks(iocb, from);
2802 	if (rc <= 0)
2803 		goto out;
2804 
2805 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) &&
2806 	    (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2807 				     server->vals->exclusive_lock_type, 0,
2808 				     NULL, CIFS_WRITE_OP))) {
2809 		rc = -EACCES;
2810 		goto out;
2811 	}
2812 
2813 	rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2814 
2815 out:
2816 	up_read(&cinode->lock_sem);
2817 	netfs_end_io_write(inode);
2818 	if (rc > 0)
2819 		rc = generic_write_sync(iocb, rc);
2820 	return rc;
2821 }
2822 
2823 ssize_t
2824 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2825 {
2826 	struct inode *inode = file_inode(iocb->ki_filp);
2827 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2828 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2829 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2830 						iocb->ki_filp->private_data;
2831 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2832 	ssize_t written;
2833 
2834 	written = cifs_get_writer(cinode);
2835 	if (written)
2836 		return written;
2837 
2838 	if (CIFS_CACHE_WRITE(cinode)) {
2839 		if (cap_unix(tcon->ses) &&
2840 		    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2841 		    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2842 			written = netfs_file_write_iter(iocb, from);
2843 			goto out;
2844 		}
2845 		written = cifs_writev(iocb, from);
2846 		goto out;
2847 	}
2848 	/*
2849 	 * For non-oplocked files in strict cache mode we need to write the data
2850 	 * to the server exactly from the pos to pos+len-1 rather than flush all
2851 	 * affected pages because it may cause a error with mandatory locks on
2852 	 * these pages but not on the region from pos to ppos+len-1.
2853 	 */
2854 	written = netfs_file_write_iter(iocb, from);
2855 	if (CIFS_CACHE_READ(cinode)) {
2856 		/*
2857 		 * We have read level caching and we have just sent a write
2858 		 * request to the server thus making data in the cache stale.
2859 		 * Zap the cache and set oplock/lease level to NONE to avoid
2860 		 * reading stale data from the cache. All subsequent read
2861 		 * operations will read new data from the server.
2862 		 */
2863 		cifs_zap_mapping(inode);
2864 		cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2865 			 inode);
2866 		cinode->oplock = 0;
2867 	}
2868 out:
2869 	cifs_put_writer(cinode);
2870 	return written;
2871 }
2872 
2873 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2874 {
2875 	ssize_t rc;
2876 	struct inode *inode = file_inode(iocb->ki_filp);
2877 
2878 	if (iocb->ki_flags & IOCB_DIRECT)
2879 		return netfs_unbuffered_read_iter(iocb, iter);
2880 
2881 	rc = cifs_revalidate_mapping(inode);
2882 	if (rc)
2883 		return rc;
2884 
2885 	return netfs_file_read_iter(iocb, iter);
2886 }
2887 
2888 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2889 {
2890 	struct inode *inode = file_inode(iocb->ki_filp);
2891 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2892 	ssize_t written;
2893 	int rc;
2894 
2895 	if (iocb->ki_filp->f_flags & O_DIRECT) {
2896 		written = netfs_unbuffered_write_iter(iocb, from);
2897 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
2898 			cifs_zap_mapping(inode);
2899 			cifs_dbg(FYI,
2900 				 "Set no oplock for inode=%p after a write operation\n",
2901 				 inode);
2902 			cinode->oplock = 0;
2903 		}
2904 		return written;
2905 	}
2906 
2907 	written = cifs_get_writer(cinode);
2908 	if (written)
2909 		return written;
2910 
2911 	written = netfs_file_write_iter(iocb, from);
2912 
2913 	if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
2914 		rc = filemap_fdatawrite(inode->i_mapping);
2915 		if (rc)
2916 			cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
2917 				 rc, inode);
2918 	}
2919 
2920 	cifs_put_writer(cinode);
2921 	return written;
2922 }
2923 
2924 ssize_t
2925 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
2926 {
2927 	struct inode *inode = file_inode(iocb->ki_filp);
2928 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2929 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2930 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2931 						iocb->ki_filp->private_data;
2932 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2933 	int rc = -EACCES;
2934 
2935 	/*
2936 	 * In strict cache mode we need to read from the server all the time
2937 	 * if we don't have level II oplock because the server can delay mtime
2938 	 * change - so we can't make a decision about inode invalidating.
2939 	 * And we can also fail with pagereading if there are mandatory locks
2940 	 * on pages affected by this read but not on the region from pos to
2941 	 * pos+len-1.
2942 	 */
2943 	if (!CIFS_CACHE_READ(cinode))
2944 		return netfs_unbuffered_read_iter(iocb, to);
2945 
2946 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) {
2947 		if (iocb->ki_flags & IOCB_DIRECT)
2948 			return netfs_unbuffered_read_iter(iocb, to);
2949 		return netfs_buffered_read_iter(iocb, to);
2950 	}
2951 
2952 	/*
2953 	 * We need to hold the sem to be sure nobody modifies lock list
2954 	 * with a brlock that prevents reading.
2955 	 */
2956 	if (iocb->ki_flags & IOCB_DIRECT) {
2957 		rc = netfs_start_io_direct(inode);
2958 		if (rc < 0)
2959 			goto out;
2960 		rc = -EACCES;
2961 		down_read(&cinode->lock_sem);
2962 		if (!cifs_find_lock_conflict(
2963 			    cfile, iocb->ki_pos, iov_iter_count(to),
2964 			    tcon->ses->server->vals->shared_lock_type,
2965 			    0, NULL, CIFS_READ_OP))
2966 			rc = netfs_unbuffered_read_iter_locked(iocb, to);
2967 		up_read(&cinode->lock_sem);
2968 		netfs_end_io_direct(inode);
2969 	} else {
2970 		rc = netfs_start_io_read(inode);
2971 		if (rc < 0)
2972 			goto out;
2973 		rc = -EACCES;
2974 		down_read(&cinode->lock_sem);
2975 		if (!cifs_find_lock_conflict(
2976 			    cfile, iocb->ki_pos, iov_iter_count(to),
2977 			    tcon->ses->server->vals->shared_lock_type,
2978 			    0, NULL, CIFS_READ_OP))
2979 			rc = filemap_read(iocb, to, 0);
2980 		up_read(&cinode->lock_sem);
2981 		netfs_end_io_read(inode);
2982 	}
2983 out:
2984 	return rc;
2985 }
2986 
2987 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
2988 {
2989 	return netfs_page_mkwrite(vmf, NULL);
2990 }
2991 
2992 static const struct vm_operations_struct cifs_file_vm_ops = {
2993 	.fault = filemap_fault,
2994 	.map_pages = filemap_map_pages,
2995 	.page_mkwrite = cifs_page_mkwrite,
2996 };
2997 
2998 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2999 {
3000 	int xid, rc = 0;
3001 	struct inode *inode = file_inode(file);
3002 
3003 	xid = get_xid();
3004 
3005 	if (!CIFS_CACHE_READ(CIFS_I(inode)))
3006 		rc = cifs_zap_mapping(inode);
3007 	if (!rc)
3008 		rc = generic_file_mmap(file, vma);
3009 	if (!rc)
3010 		vma->vm_ops = &cifs_file_vm_ops;
3011 
3012 	free_xid(xid);
3013 	return rc;
3014 }
3015 
3016 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3017 {
3018 	int rc, xid;
3019 
3020 	xid = get_xid();
3021 
3022 	rc = cifs_revalidate_file(file);
3023 	if (rc)
3024 		cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3025 			 rc);
3026 	if (!rc)
3027 		rc = generic_file_mmap(file, vma);
3028 	if (!rc)
3029 		vma->vm_ops = &cifs_file_vm_ops;
3030 
3031 	free_xid(xid);
3032 	return rc;
3033 }
3034 
3035 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3036 {
3037 	struct cifsFileInfo *open_file;
3038 
3039 	spin_lock(&cifs_inode->open_file_lock);
3040 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3041 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3042 			spin_unlock(&cifs_inode->open_file_lock);
3043 			return 1;
3044 		}
3045 	}
3046 	spin_unlock(&cifs_inode->open_file_lock);
3047 	return 0;
3048 }
3049 
3050 /* We do not want to update the file size from server for inodes
3051    open for write - to avoid races with writepage extending
3052    the file - in the future we could consider allowing
3053    refreshing the inode only on increases in the file size
3054    but this is tricky to do without racing with writebehind
3055    page caching in the current Linux kernel design */
3056 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3057 			    bool from_readdir)
3058 {
3059 	if (!cifsInode)
3060 		return true;
3061 
3062 	if (is_inode_writable(cifsInode) ||
3063 		((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
3064 		/* This inode is open for write at least once */
3065 		struct cifs_sb_info *cifs_sb;
3066 
3067 		cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
3068 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
3069 			/* since no page cache to corrupt on directio
3070 			we can change size safely */
3071 			return true;
3072 		}
3073 
3074 		if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
3075 			return true;
3076 
3077 		return false;
3078 	} else
3079 		return true;
3080 }
3081 
3082 void cifs_oplock_break(struct work_struct *work)
3083 {
3084 	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3085 						  oplock_break);
3086 	struct inode *inode = d_inode(cfile->dentry);
3087 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3088 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3089 	struct cifs_tcon *tcon;
3090 	struct TCP_Server_Info *server;
3091 	struct tcon_link *tlink;
3092 	int rc = 0;
3093 	bool purge_cache = false, oplock_break_cancelled;
3094 	__u64 persistent_fid, volatile_fid;
3095 	__u16 net_fid;
3096 
3097 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3098 			TASK_UNINTERRUPTIBLE);
3099 
3100 	tlink = cifs_sb_tlink(cifs_sb);
3101 	if (IS_ERR(tlink))
3102 		goto out;
3103 	tcon = tlink_tcon(tlink);
3104 	server = tcon->ses->server;
3105 
3106 	server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3107 				      cfile->oplock_epoch, &purge_cache);
3108 
3109 	if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
3110 						cifs_has_mand_locks(cinode)) {
3111 		cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3112 			 inode);
3113 		cinode->oplock = 0;
3114 	}
3115 
3116 	if (S_ISREG(inode->i_mode)) {
3117 		if (CIFS_CACHE_READ(cinode))
3118 			break_lease(inode, O_RDONLY);
3119 		else
3120 			break_lease(inode, O_WRONLY);
3121 		rc = filemap_fdatawrite(inode->i_mapping);
3122 		if (!CIFS_CACHE_READ(cinode) || purge_cache) {
3123 			rc = filemap_fdatawait(inode->i_mapping);
3124 			mapping_set_error(inode->i_mapping, rc);
3125 			cifs_zap_mapping(inode);
3126 		}
3127 		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3128 		if (CIFS_CACHE_WRITE(cinode))
3129 			goto oplock_break_ack;
3130 	}
3131 
3132 	rc = cifs_push_locks(cfile);
3133 	if (rc)
3134 		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3135 
3136 oplock_break_ack:
3137 	/*
3138 	 * When oplock break is received and there are no active
3139 	 * file handles but cached, then schedule deferred close immediately.
3140 	 * So, new open will not use cached handle.
3141 	 */
3142 
3143 	if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
3144 		cifs_close_deferred_file(cinode);
3145 
3146 	persistent_fid = cfile->fid.persistent_fid;
3147 	volatile_fid = cfile->fid.volatile_fid;
3148 	net_fid = cfile->fid.netfid;
3149 	oplock_break_cancelled = cfile->oplock_break_cancelled;
3150 
3151 	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3152 	/*
3153 	 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3154 	 * an acknowledgment to be sent when the file has already been closed.
3155 	 */
3156 	spin_lock(&cinode->open_file_lock);
3157 	/* check list empty since can race with kill_sb calling tree disconnect */
3158 	if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3159 		spin_unlock(&cinode->open_file_lock);
3160 		rc = server->ops->oplock_response(tcon, persistent_fid,
3161 						  volatile_fid, net_fid, cinode);
3162 		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3163 	} else
3164 		spin_unlock(&cinode->open_file_lock);
3165 
3166 	cifs_put_tlink(tlink);
3167 out:
3168 	cifs_done_oplock_break(cinode);
3169 }
3170 
3171 static int cifs_swap_activate(struct swap_info_struct *sis,
3172 			      struct file *swap_file, sector_t *span)
3173 {
3174 	struct cifsFileInfo *cfile = swap_file->private_data;
3175 	struct inode *inode = swap_file->f_mapping->host;
3176 	unsigned long blocks;
3177 	long long isize;
3178 
3179 	cifs_dbg(FYI, "swap activate\n");
3180 
3181 	if (!swap_file->f_mapping->a_ops->swap_rw)
3182 		/* Cannot support swap */
3183 		return -EINVAL;
3184 
3185 	spin_lock(&inode->i_lock);
3186 	blocks = inode->i_blocks;
3187 	isize = inode->i_size;
3188 	spin_unlock(&inode->i_lock);
3189 	if (blocks*512 < isize) {
3190 		pr_warn("swap activate: swapfile has holes\n");
3191 		return -EINVAL;
3192 	}
3193 	*span = sis->pages;
3194 
3195 	pr_warn_once("Swap support over SMB3 is experimental\n");
3196 
3197 	/*
3198 	 * TODO: consider adding ACL (or documenting how) to prevent other
3199 	 * users (on this or other systems) from reading it
3200 	 */
3201 
3202 
3203 	/* TODO: add sk_set_memalloc(inet) or similar */
3204 
3205 	if (cfile)
3206 		cfile->swapfile = true;
3207 	/*
3208 	 * TODO: Since file already open, we can't open with DENY_ALL here
3209 	 * but we could add call to grab a byte range lock to prevent others
3210 	 * from reading or writing the file
3211 	 */
3212 
3213 	sis->flags |= SWP_FS_OPS;
3214 	return add_swap_extent(sis, 0, sis->max, 0);
3215 }
3216 
3217 static void cifs_swap_deactivate(struct file *file)
3218 {
3219 	struct cifsFileInfo *cfile = file->private_data;
3220 
3221 	cifs_dbg(FYI, "swap deactivate\n");
3222 
3223 	/* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3224 
3225 	if (cfile)
3226 		cfile->swapfile = false;
3227 
3228 	/* do we need to unpin (or unlock) the file */
3229 }
3230 
3231 /**
3232  * cifs_swap_rw - SMB3 address space operation for swap I/O
3233  * @iocb: target I/O control block
3234  * @iter: I/O buffer
3235  *
3236  * Perform IO to the swap-file.  This is much like direct IO.
3237  */
3238 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3239 {
3240 	ssize_t ret;
3241 
3242 	if (iov_iter_rw(iter) == READ)
3243 		ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3244 	else
3245 		ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3246 	if (ret < 0)
3247 		return ret;
3248 	return 0;
3249 }
3250 
3251 const struct address_space_operations cifs_addr_ops = {
3252 	.read_folio	= netfs_read_folio,
3253 	.readahead	= netfs_readahead,
3254 	.writepages	= netfs_writepages,
3255 	.dirty_folio	= netfs_dirty_folio,
3256 	.release_folio	= netfs_release_folio,
3257 	.direct_IO	= noop_direct_IO,
3258 	.invalidate_folio = netfs_invalidate_folio,
3259 	.migrate_folio	= filemap_migrate_folio,
3260 	/*
3261 	 * TODO: investigate and if useful we could add an is_dirty_writeback
3262 	 * helper if needed
3263 	 */
3264 	.swap_activate	= cifs_swap_activate,
3265 	.swap_deactivate = cifs_swap_deactivate,
3266 	.swap_rw = cifs_swap_rw,
3267 };
3268 
3269 /*
3270  * cifs_readahead requires the server to support a buffer large enough to
3271  * contain the header plus one complete page of data.  Otherwise, we need
3272  * to leave cifs_readahead out of the address space operations.
3273  */
3274 const struct address_space_operations cifs_addr_ops_smallbuf = {
3275 	.read_folio	= netfs_read_folio,
3276 	.writepages	= netfs_writepages,
3277 	.dirty_folio	= netfs_dirty_folio,
3278 	.release_folio	= netfs_release_folio,
3279 	.invalidate_folio = netfs_invalidate_folio,
3280 	.migrate_folio	= filemap_migrate_folio,
3281 };
3282