xref: /linux/fs/smb/client/file.c (revision 3186a8e55ae3428ec1e06af09075e20885376e4e)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  */
11 #include <linux/fs.h>
12 #include <linux/filelock.h>
13 #include <linux/backing-dev.h>
14 #include <linux/stat.h>
15 #include <linux/fcntl.h>
16 #include <linux/pagemap.h>
17 #include <linux/pagevec.h>
18 #include <linux/writeback.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/delay.h>
21 #include <linux/mount.h>
22 #include <linux/slab.h>
23 #include <linux/swap.h>
24 #include <linux/mm.h>
25 #include <asm/div64.h>
26 #include "cifsfs.h"
27 #include "cifspdu.h"
28 #include "cifsglob.h"
29 #include "cifsproto.h"
30 #include "smb2proto.h"
31 #include "cifs_unicode.h"
32 #include "cifs_debug.h"
33 #include "cifs_fs_sb.h"
34 #include "fscache.h"
35 #include "smbdirect.h"
36 #include "fs_context.h"
37 #include "cifs_ioctl.h"
38 #include "cached_dir.h"
39 #include <trace/events/netfs.h>
40 
41 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
42 
43 /*
44  * Prepare a subrequest to upload to the server.  We need to allocate credits
45  * so that we know the maximum amount of data that we can include in it.
46  */
47 static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
48 {
49 	struct cifs_io_subrequest *wdata =
50 		container_of(subreq, struct cifs_io_subrequest, subreq);
51 	struct cifs_io_request *req = wdata->req;
52 	struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr];
53 	struct TCP_Server_Info *server;
54 	struct cifsFileInfo *open_file = req->cfile;
55 	size_t wsize = req->rreq.wsize;
56 	int rc;
57 
58 	if (!wdata->have_xid) {
59 		wdata->xid = get_xid();
60 		wdata->have_xid = true;
61 	}
62 
63 	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
64 	wdata->server = server;
65 
66 retry:
67 	if (open_file->invalidHandle) {
68 		rc = cifs_reopen_file(open_file, false);
69 		if (rc < 0) {
70 			if (rc == -EAGAIN)
71 				goto retry;
72 			subreq->error = rc;
73 			return netfs_prepare_write_failed(subreq);
74 		}
75 	}
76 
77 	rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len,
78 					   &wdata->credits);
79 	if (rc < 0) {
80 		subreq->error = rc;
81 		return netfs_prepare_write_failed(subreq);
82 	}
83 
84 	wdata->credits.rreq_debug_id = subreq->rreq->debug_id;
85 	wdata->credits.rreq_debug_index = subreq->debug_index;
86 	wdata->credits.in_flight_check = 1;
87 	trace_smb3_rw_credits(wdata->rreq->debug_id,
88 			      wdata->subreq.debug_index,
89 			      wdata->credits.value,
90 			      server->credits, server->in_flight,
91 			      wdata->credits.value,
92 			      cifs_trace_rw_credits_write_prepare);
93 
94 #ifdef CONFIG_CIFS_SMB_DIRECT
95 	if (server->smbd_conn)
96 		stream->sreq_max_segs = server->smbd_conn->max_frmr_depth;
97 #endif
98 }
99 
100 /*
101  * Issue a subrequest to upload to the server.
102  */
103 static void cifs_issue_write(struct netfs_io_subrequest *subreq)
104 {
105 	struct cifs_io_subrequest *wdata =
106 		container_of(subreq, struct cifs_io_subrequest, subreq);
107 	struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
108 	int rc;
109 
110 	if (cifs_forced_shutdown(sbi)) {
111 		rc = -EIO;
112 		goto fail;
113 	}
114 
115 	rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
116 	if (rc)
117 		goto fail;
118 
119 	rc = -EAGAIN;
120 	if (wdata->req->cfile->invalidHandle)
121 		goto fail;
122 
123 	wdata->server->ops->async_writev(wdata);
124 out:
125 	return;
126 
127 fail:
128 	if (rc == -EAGAIN)
129 		trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
130 	else
131 		trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
132 	add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
133 	cifs_write_subrequest_terminated(wdata, rc, false);
134 	goto out;
135 }
136 
137 static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq)
138 {
139 	cifs_invalidate_cache(wreq->inode, 0);
140 }
141 
142 /*
143  * Negotiate the size of a read operation on behalf of the netfs library.
144  */
145 static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
146 {
147 	struct netfs_io_request *rreq = subreq->rreq;
148 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
149 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
150 	struct TCP_Server_Info *server;
151 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
152 	size_t size;
153 	int rc = 0;
154 
155 	if (!rdata->have_xid) {
156 		rdata->xid = get_xid();
157 		rdata->have_xid = true;
158 	}
159 
160 	server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
161 	rdata->server = server;
162 
163 	cifs_negotiate_rsize(server, cifs_sb->ctx,
164 			     tlink_tcon(req->cfile->tlink));
165 
166 	rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
167 					   &size, &rdata->credits);
168 	if (rc)
169 		return rc;
170 
171 	rreq->io_streams[0].sreq_max_len = size;
172 
173 	rdata->credits.in_flight_check = 1;
174 	rdata->credits.rreq_debug_id = rreq->debug_id;
175 	rdata->credits.rreq_debug_index = subreq->debug_index;
176 
177 	trace_smb3_rw_credits(rdata->rreq->debug_id,
178 			      rdata->subreq.debug_index,
179 			      rdata->credits.value,
180 			      server->credits, server->in_flight, 0,
181 			      cifs_trace_rw_credits_read_submit);
182 
183 #ifdef CONFIG_CIFS_SMB_DIRECT
184 	if (server->smbd_conn)
185 		rreq->io_streams[0].sreq_max_segs = server->smbd_conn->max_frmr_depth;
186 #endif
187 	return 0;
188 }
189 
190 /*
191  * Issue a read operation on behalf of the netfs helper functions.  We're asked
192  * to make a read of a certain size at a point in the file.  We are permitted
193  * to only read a portion of that, but as long as we read something, the netfs
194  * helper will call us again so that we can issue another read.
195  */
196 static void cifs_issue_read(struct netfs_io_subrequest *subreq)
197 {
198 	struct netfs_io_request *rreq = subreq->rreq;
199 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
200 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
201 	struct TCP_Server_Info *server = rdata->server;
202 	int rc = 0;
203 
204 	cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
205 		 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
206 		 subreq->transferred, subreq->len);
207 
208 	rc = adjust_credits(server, rdata, cifs_trace_rw_credits_issue_read_adjust);
209 	if (rc)
210 		goto failed;
211 
212 	if (req->cfile->invalidHandle) {
213 		do {
214 			rc = cifs_reopen_file(req->cfile, true);
215 		} while (rc == -EAGAIN);
216 		if (rc)
217 			goto failed;
218 	}
219 
220 	if (subreq->rreq->origin != NETFS_DIO_READ)
221 		__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
222 
223 	trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
224 	rc = rdata->server->ops->async_readv(rdata);
225 	if (rc)
226 		goto failed;
227 	return;
228 
229 failed:
230 	subreq->error = rc;
231 	netfs_read_subreq_terminated(subreq);
232 }
233 
234 /*
235  * Writeback calls this when it finds a folio that needs uploading.  This isn't
236  * called if writeback only has copy-to-cache to deal with.
237  */
238 static void cifs_begin_writeback(struct netfs_io_request *wreq)
239 {
240 	struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
241 	int ret;
242 
243 	ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
244 	if (ret) {
245 		cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
246 		return;
247 	}
248 
249 	wreq->io_streams[0].avail = true;
250 }
251 
252 /*
253  * Initialise a request.
254  */
255 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
256 {
257 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
258 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
259 	struct cifsFileInfo *open_file = NULL;
260 
261 	rreq->rsize = cifs_sb->ctx->rsize;
262 	rreq->wsize = cifs_sb->ctx->wsize;
263 	req->pid = current->tgid; // Ummm...  This may be a workqueue
264 
265 	if (file) {
266 		open_file = file->private_data;
267 		rreq->netfs_priv = file->private_data;
268 		req->cfile = cifsFileInfo_get(open_file);
269 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
270 			req->pid = req->cfile->pid;
271 	} else if (rreq->origin != NETFS_WRITEBACK) {
272 		WARN_ON_ONCE(1);
273 		return -EIO;
274 	}
275 
276 	return 0;
277 }
278 
279 /*
280  * Completion of a request operation.
281  */
282 static void cifs_rreq_done(struct netfs_io_request *rreq)
283 {
284 	struct timespec64 atime, mtime;
285 	struct inode *inode = rreq->inode;
286 
287 	/* we do not want atime to be less than mtime, it broke some apps */
288 	atime = inode_set_atime_to_ts(inode, current_time(inode));
289 	mtime = inode_get_mtime(inode);
290 	if (timespec64_compare(&atime, &mtime))
291 		inode_set_atime_to_ts(inode, inode_get_mtime(inode));
292 }
293 
294 static void cifs_free_request(struct netfs_io_request *rreq)
295 {
296 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
297 
298 	if (req->cfile)
299 		cifsFileInfo_put(req->cfile);
300 }
301 
302 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
303 {
304 	struct cifs_io_subrequest *rdata =
305 		container_of(subreq, struct cifs_io_subrequest, subreq);
306 	int rc = subreq->error;
307 
308 	if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
309 #ifdef CONFIG_CIFS_SMB_DIRECT
310 		if (rdata->mr) {
311 			smbd_deregister_mr(rdata->mr);
312 			rdata->mr = NULL;
313 		}
314 #endif
315 	}
316 
317 	if (rdata->credits.value != 0) {
318 		trace_smb3_rw_credits(rdata->rreq->debug_id,
319 				      rdata->subreq.debug_index,
320 				      rdata->credits.value,
321 				      rdata->server ? rdata->server->credits : 0,
322 				      rdata->server ? rdata->server->in_flight : 0,
323 				      -rdata->credits.value,
324 				      cifs_trace_rw_credits_free_subreq);
325 		if (rdata->server)
326 			add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
327 		else
328 			rdata->credits.value = 0;
329 	}
330 
331 	if (rdata->have_xid)
332 		free_xid(rdata->xid);
333 }
334 
335 const struct netfs_request_ops cifs_req_ops = {
336 	.request_pool		= &cifs_io_request_pool,
337 	.subrequest_pool	= &cifs_io_subrequest_pool,
338 	.init_request		= cifs_init_request,
339 	.free_request		= cifs_free_request,
340 	.free_subrequest	= cifs_free_subrequest,
341 	.prepare_read		= cifs_prepare_read,
342 	.issue_read		= cifs_issue_read,
343 	.done			= cifs_rreq_done,
344 	.begin_writeback	= cifs_begin_writeback,
345 	.prepare_write		= cifs_prepare_write,
346 	.issue_write		= cifs_issue_write,
347 	.invalidate_cache	= cifs_netfs_invalidate_cache,
348 };
349 
350 /*
351  * Mark as invalid, all open files on tree connections since they
352  * were closed when session to server was lost.
353  */
354 void
355 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
356 {
357 	struct cifsFileInfo *open_file = NULL;
358 	struct list_head *tmp;
359 	struct list_head *tmp1;
360 
361 	/* only send once per connect */
362 	spin_lock(&tcon->tc_lock);
363 	if (tcon->need_reconnect)
364 		tcon->status = TID_NEED_RECON;
365 
366 	if (tcon->status != TID_NEED_RECON) {
367 		spin_unlock(&tcon->tc_lock);
368 		return;
369 	}
370 	tcon->status = TID_IN_FILES_INVALIDATE;
371 	spin_unlock(&tcon->tc_lock);
372 
373 	/* list all files open on tree connection and mark them invalid */
374 	spin_lock(&tcon->open_file_lock);
375 	list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
376 		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
377 		open_file->invalidHandle = true;
378 		open_file->oplock_break_cancelled = true;
379 	}
380 	spin_unlock(&tcon->open_file_lock);
381 
382 	invalidate_all_cached_dirs(tcon);
383 	spin_lock(&tcon->tc_lock);
384 	if (tcon->status == TID_IN_FILES_INVALIDATE)
385 		tcon->status = TID_NEED_TCON;
386 	spin_unlock(&tcon->tc_lock);
387 
388 	/*
389 	 * BB Add call to evict_inodes(sb) for all superblocks mounted
390 	 * to this tcon.
391 	 */
392 }
393 
394 static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
395 {
396 	if ((flags & O_ACCMODE) == O_RDONLY)
397 		return GENERIC_READ;
398 	else if ((flags & O_ACCMODE) == O_WRONLY)
399 		return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
400 	else if ((flags & O_ACCMODE) == O_RDWR) {
401 		/* GENERIC_ALL is too much permission to request
402 		   can cause unnecessary access denied on create */
403 		/* return GENERIC_ALL; */
404 		return (GENERIC_READ | GENERIC_WRITE);
405 	}
406 
407 	return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
408 		FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
409 		FILE_READ_DATA);
410 }
411 
412 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
413 static u32 cifs_posix_convert_flags(unsigned int flags)
414 {
415 	u32 posix_flags = 0;
416 
417 	if ((flags & O_ACCMODE) == O_RDONLY)
418 		posix_flags = SMB_O_RDONLY;
419 	else if ((flags & O_ACCMODE) == O_WRONLY)
420 		posix_flags = SMB_O_WRONLY;
421 	else if ((flags & O_ACCMODE) == O_RDWR)
422 		posix_flags = SMB_O_RDWR;
423 
424 	if (flags & O_CREAT) {
425 		posix_flags |= SMB_O_CREAT;
426 		if (flags & O_EXCL)
427 			posix_flags |= SMB_O_EXCL;
428 	} else if (flags & O_EXCL)
429 		cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
430 			 current->comm, current->tgid);
431 
432 	if (flags & O_TRUNC)
433 		posix_flags |= SMB_O_TRUNC;
434 	/* be safe and imply O_SYNC for O_DSYNC */
435 	if (flags & O_DSYNC)
436 		posix_flags |= SMB_O_SYNC;
437 	if (flags & O_DIRECTORY)
438 		posix_flags |= SMB_O_DIRECTORY;
439 	if (flags & O_NOFOLLOW)
440 		posix_flags |= SMB_O_NOFOLLOW;
441 	if (flags & O_DIRECT)
442 		posix_flags |= SMB_O_DIRECT;
443 
444 	return posix_flags;
445 }
446 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
447 
448 static inline int cifs_get_disposition(unsigned int flags)
449 {
450 	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
451 		return FILE_CREATE;
452 	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
453 		return FILE_OVERWRITE_IF;
454 	else if ((flags & O_CREAT) == O_CREAT)
455 		return FILE_OPEN_IF;
456 	else if ((flags & O_TRUNC) == O_TRUNC)
457 		return FILE_OVERWRITE;
458 	else
459 		return FILE_OPEN;
460 }
461 
462 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
463 int cifs_posix_open(const char *full_path, struct inode **pinode,
464 			struct super_block *sb, int mode, unsigned int f_flags,
465 			__u32 *poplock, __u16 *pnetfid, unsigned int xid)
466 {
467 	int rc;
468 	FILE_UNIX_BASIC_INFO *presp_data;
469 	__u32 posix_flags = 0;
470 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
471 	struct cifs_fattr fattr;
472 	struct tcon_link *tlink;
473 	struct cifs_tcon *tcon;
474 
475 	cifs_dbg(FYI, "posix open %s\n", full_path);
476 
477 	presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
478 	if (presp_data == NULL)
479 		return -ENOMEM;
480 
481 	tlink = cifs_sb_tlink(cifs_sb);
482 	if (IS_ERR(tlink)) {
483 		rc = PTR_ERR(tlink);
484 		goto posix_open_ret;
485 	}
486 
487 	tcon = tlink_tcon(tlink);
488 	mode &= ~current_umask();
489 
490 	posix_flags = cifs_posix_convert_flags(f_flags);
491 	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
492 			     poplock, full_path, cifs_sb->local_nls,
493 			     cifs_remap(cifs_sb));
494 	cifs_put_tlink(tlink);
495 
496 	if (rc)
497 		goto posix_open_ret;
498 
499 	if (presp_data->Type == cpu_to_le32(-1))
500 		goto posix_open_ret; /* open ok, caller does qpathinfo */
501 
502 	if (!pinode)
503 		goto posix_open_ret; /* caller does not need info */
504 
505 	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
506 
507 	/* get new inode and set it up */
508 	if (*pinode == NULL) {
509 		cifs_fill_uniqueid(sb, &fattr);
510 		*pinode = cifs_iget(sb, &fattr);
511 		if (!*pinode) {
512 			rc = -ENOMEM;
513 			goto posix_open_ret;
514 		}
515 	} else {
516 		cifs_revalidate_mapping(*pinode);
517 		rc = cifs_fattr_to_inode(*pinode, &fattr, false);
518 	}
519 
520 posix_open_ret:
521 	kfree(presp_data);
522 	return rc;
523 }
524 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
525 
526 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
527 			struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
528 			struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
529 {
530 	int rc;
531 	int desired_access;
532 	int disposition;
533 	int create_options = CREATE_NOT_DIR;
534 	struct TCP_Server_Info *server = tcon->ses->server;
535 	struct cifs_open_parms oparms;
536 	int rdwr_for_fscache = 0;
537 
538 	if (!server->ops->open)
539 		return -ENOSYS;
540 
541 	/* If we're caching, we need to be able to fill in around partial writes. */
542 	if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
543 		rdwr_for_fscache = 1;
544 
545 	desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
546 
547 /*********************************************************************
548  *  open flag mapping table:
549  *
550  *	POSIX Flag            CIFS Disposition
551  *	----------            ----------------
552  *	O_CREAT               FILE_OPEN_IF
553  *	O_CREAT | O_EXCL      FILE_CREATE
554  *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
555  *	O_TRUNC               FILE_OVERWRITE
556  *	none of the above     FILE_OPEN
557  *
558  *	Note that there is not a direct match between disposition
559  *	FILE_SUPERSEDE (ie create whether or not file exists although
560  *	O_CREAT | O_TRUNC is similar but truncates the existing
561  *	file rather than creating a new file as FILE_SUPERSEDE does
562  *	(which uses the attributes / metadata passed in on open call)
563  *?
564  *?  O_SYNC is a reasonable match to CIFS writethrough flag
565  *?  and the read write flags match reasonably.  O_LARGEFILE
566  *?  is irrelevant because largefile support is always used
567  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
568  *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
569  *********************************************************************/
570 
571 	disposition = cifs_get_disposition(f_flags);
572 
573 	/* BB pass O_SYNC flag through on file attributes .. BB */
574 
575 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
576 	if (f_flags & O_SYNC)
577 		create_options |= CREATE_WRITE_THROUGH;
578 
579 	if (f_flags & O_DIRECT)
580 		create_options |= CREATE_NO_BUFFER;
581 
582 retry_open:
583 	oparms = (struct cifs_open_parms) {
584 		.tcon = tcon,
585 		.cifs_sb = cifs_sb,
586 		.desired_access = desired_access,
587 		.create_options = cifs_create_options(cifs_sb, create_options),
588 		.disposition = disposition,
589 		.path = full_path,
590 		.fid = fid,
591 	};
592 
593 	rc = server->ops->open(xid, &oparms, oplock, buf);
594 	if (rc) {
595 		if (rc == -EACCES && rdwr_for_fscache == 1) {
596 			desired_access = cifs_convert_flags(f_flags, 0);
597 			rdwr_for_fscache = 2;
598 			goto retry_open;
599 		}
600 		return rc;
601 	}
602 	if (rdwr_for_fscache == 2)
603 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
604 
605 	/* TODO: Add support for calling posix query info but with passing in fid */
606 	if (tcon->unix_ext)
607 		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
608 					      xid);
609 	else
610 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
611 					 xid, fid);
612 
613 	if (rc) {
614 		server->ops->close(xid, tcon, fid);
615 		if (rc == -ESTALE)
616 			rc = -EOPENSTALE;
617 	}
618 
619 	return rc;
620 }
621 
622 static bool
623 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
624 {
625 	struct cifs_fid_locks *cur;
626 	bool has_locks = false;
627 
628 	down_read(&cinode->lock_sem);
629 	list_for_each_entry(cur, &cinode->llist, llist) {
630 		if (!list_empty(&cur->locks)) {
631 			has_locks = true;
632 			break;
633 		}
634 	}
635 	up_read(&cinode->lock_sem);
636 	return has_locks;
637 }
638 
639 void
640 cifs_down_write(struct rw_semaphore *sem)
641 {
642 	while (!down_write_trylock(sem))
643 		msleep(10);
644 }
645 
646 static void cifsFileInfo_put_work(struct work_struct *work);
647 void serverclose_work(struct work_struct *work);
648 
649 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
650 				       struct tcon_link *tlink, __u32 oplock,
651 				       const char *symlink_target)
652 {
653 	struct dentry *dentry = file_dentry(file);
654 	struct inode *inode = d_inode(dentry);
655 	struct cifsInodeInfo *cinode = CIFS_I(inode);
656 	struct cifsFileInfo *cfile;
657 	struct cifs_fid_locks *fdlocks;
658 	struct cifs_tcon *tcon = tlink_tcon(tlink);
659 	struct TCP_Server_Info *server = tcon->ses->server;
660 
661 	cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
662 	if (cfile == NULL)
663 		return cfile;
664 
665 	fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
666 	if (!fdlocks) {
667 		kfree(cfile);
668 		return NULL;
669 	}
670 
671 	if (symlink_target) {
672 		cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
673 		if (!cfile->symlink_target) {
674 			kfree(fdlocks);
675 			kfree(cfile);
676 			return NULL;
677 		}
678 	}
679 
680 	INIT_LIST_HEAD(&fdlocks->locks);
681 	fdlocks->cfile = cfile;
682 	cfile->llist = fdlocks;
683 
684 	cfile->count = 1;
685 	cfile->pid = current->tgid;
686 	cfile->uid = current_fsuid();
687 	cfile->dentry = dget(dentry);
688 	cfile->f_flags = file->f_flags;
689 	cfile->invalidHandle = false;
690 	cfile->deferred_close_scheduled = false;
691 	cfile->tlink = cifs_get_tlink(tlink);
692 	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
693 	INIT_WORK(&cfile->put, cifsFileInfo_put_work);
694 	INIT_WORK(&cfile->serverclose, serverclose_work);
695 	INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
696 	mutex_init(&cfile->fh_mutex);
697 	spin_lock_init(&cfile->file_info_lock);
698 
699 	cifs_sb_active(inode->i_sb);
700 
701 	/*
702 	 * If the server returned a read oplock and we have mandatory brlocks,
703 	 * set oplock level to None.
704 	 */
705 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
706 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
707 		oplock = 0;
708 	}
709 
710 	cifs_down_write(&cinode->lock_sem);
711 	list_add(&fdlocks->llist, &cinode->llist);
712 	up_write(&cinode->lock_sem);
713 
714 	spin_lock(&tcon->open_file_lock);
715 	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
716 		oplock = fid->pending_open->oplock;
717 	list_del(&fid->pending_open->olist);
718 
719 	fid->purge_cache = false;
720 	server->ops->set_fid(cfile, fid, oplock);
721 
722 	list_add(&cfile->tlist, &tcon->openFileList);
723 	atomic_inc(&tcon->num_local_opens);
724 
725 	/* if readable file instance put first in list*/
726 	spin_lock(&cinode->open_file_lock);
727 	if (file->f_mode & FMODE_READ)
728 		list_add(&cfile->flist, &cinode->openFileList);
729 	else
730 		list_add_tail(&cfile->flist, &cinode->openFileList);
731 	spin_unlock(&cinode->open_file_lock);
732 	spin_unlock(&tcon->open_file_lock);
733 
734 	if (fid->purge_cache)
735 		cifs_zap_mapping(inode);
736 
737 	file->private_data = cfile;
738 	return cfile;
739 }
740 
741 struct cifsFileInfo *
742 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
743 {
744 	spin_lock(&cifs_file->file_info_lock);
745 	cifsFileInfo_get_locked(cifs_file);
746 	spin_unlock(&cifs_file->file_info_lock);
747 	return cifs_file;
748 }
749 
750 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
751 {
752 	struct inode *inode = d_inode(cifs_file->dentry);
753 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
754 	struct cifsLockInfo *li, *tmp;
755 	struct super_block *sb = inode->i_sb;
756 
757 	/*
758 	 * Delete any outstanding lock records. We'll lose them when the file
759 	 * is closed anyway.
760 	 */
761 	cifs_down_write(&cifsi->lock_sem);
762 	list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
763 		list_del(&li->llist);
764 		cifs_del_lock_waiters(li);
765 		kfree(li);
766 	}
767 	list_del(&cifs_file->llist->llist);
768 	kfree(cifs_file->llist);
769 	up_write(&cifsi->lock_sem);
770 
771 	cifs_put_tlink(cifs_file->tlink);
772 	dput(cifs_file->dentry);
773 	cifs_sb_deactive(sb);
774 	kfree(cifs_file->symlink_target);
775 	kfree(cifs_file);
776 }
777 
778 static void cifsFileInfo_put_work(struct work_struct *work)
779 {
780 	struct cifsFileInfo *cifs_file = container_of(work,
781 			struct cifsFileInfo, put);
782 
783 	cifsFileInfo_put_final(cifs_file);
784 }
785 
786 void serverclose_work(struct work_struct *work)
787 {
788 	struct cifsFileInfo *cifs_file = container_of(work,
789 			struct cifsFileInfo, serverclose);
790 
791 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
792 
793 	struct TCP_Server_Info *server = tcon->ses->server;
794 	int rc = 0;
795 	int retries = 0;
796 	int MAX_RETRIES = 4;
797 
798 	do {
799 		if (server->ops->close_getattr)
800 			rc = server->ops->close_getattr(0, tcon, cifs_file);
801 		else if (server->ops->close)
802 			rc = server->ops->close(0, tcon, &cifs_file->fid);
803 
804 		if (rc == -EBUSY || rc == -EAGAIN) {
805 			retries++;
806 			msleep(250);
807 		}
808 	} while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
809 	);
810 
811 	if (retries == MAX_RETRIES)
812 		pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
813 
814 	if (cifs_file->offload)
815 		queue_work(fileinfo_put_wq, &cifs_file->put);
816 	else
817 		cifsFileInfo_put_final(cifs_file);
818 }
819 
820 /**
821  * cifsFileInfo_put - release a reference of file priv data
822  *
823  * Always potentially wait for oplock handler. See _cifsFileInfo_put().
824  *
825  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
826  */
827 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
828 {
829 	_cifsFileInfo_put(cifs_file, true, true);
830 }
831 
832 /**
833  * _cifsFileInfo_put - release a reference of file priv data
834  *
835  * This may involve closing the filehandle @cifs_file out on the
836  * server. Must be called without holding tcon->open_file_lock,
837  * cinode->open_file_lock and cifs_file->file_info_lock.
838  *
839  * If @wait_for_oplock_handler is true and we are releasing the last
840  * reference, wait for any running oplock break handler of the file
841  * and cancel any pending one.
842  *
843  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
844  * @wait_oplock_handler: must be false if called from oplock_break_handler
845  * @offload:	not offloaded on close and oplock breaks
846  *
847  */
848 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
849 		       bool wait_oplock_handler, bool offload)
850 {
851 	struct inode *inode = d_inode(cifs_file->dentry);
852 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
853 	struct TCP_Server_Info *server = tcon->ses->server;
854 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
855 	struct super_block *sb = inode->i_sb;
856 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
857 	struct cifs_fid fid = {};
858 	struct cifs_pending_open open;
859 	bool oplock_break_cancelled;
860 	bool serverclose_offloaded = false;
861 
862 	spin_lock(&tcon->open_file_lock);
863 	spin_lock(&cifsi->open_file_lock);
864 	spin_lock(&cifs_file->file_info_lock);
865 
866 	cifs_file->offload = offload;
867 	if (--cifs_file->count > 0) {
868 		spin_unlock(&cifs_file->file_info_lock);
869 		spin_unlock(&cifsi->open_file_lock);
870 		spin_unlock(&tcon->open_file_lock);
871 		return;
872 	}
873 	spin_unlock(&cifs_file->file_info_lock);
874 
875 	if (server->ops->get_lease_key)
876 		server->ops->get_lease_key(inode, &fid);
877 
878 	/* store open in pending opens to make sure we don't miss lease break */
879 	cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
880 
881 	/* remove it from the lists */
882 	list_del(&cifs_file->flist);
883 	list_del(&cifs_file->tlist);
884 	atomic_dec(&tcon->num_local_opens);
885 
886 	if (list_empty(&cifsi->openFileList)) {
887 		cifs_dbg(FYI, "closing last open instance for inode %p\n",
888 			 d_inode(cifs_file->dentry));
889 		/*
890 		 * In strict cache mode we need invalidate mapping on the last
891 		 * close  because it may cause a error when we open this file
892 		 * again and get at least level II oplock.
893 		 */
894 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
895 			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
896 		cifs_set_oplock_level(cifsi, 0);
897 	}
898 
899 	spin_unlock(&cifsi->open_file_lock);
900 	spin_unlock(&tcon->open_file_lock);
901 
902 	oplock_break_cancelled = wait_oplock_handler ?
903 		cancel_work_sync(&cifs_file->oplock_break) : false;
904 
905 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
906 		struct TCP_Server_Info *server = tcon->ses->server;
907 		unsigned int xid;
908 		int rc = 0;
909 
910 		xid = get_xid();
911 		if (server->ops->close_getattr)
912 			rc = server->ops->close_getattr(xid, tcon, cifs_file);
913 		else if (server->ops->close)
914 			rc = server->ops->close(xid, tcon, &cifs_file->fid);
915 		_free_xid(xid);
916 
917 		if (rc == -EBUSY || rc == -EAGAIN) {
918 			// Server close failed, hence offloading it as an async op
919 			queue_work(serverclose_wq, &cifs_file->serverclose);
920 			serverclose_offloaded = true;
921 		}
922 	}
923 
924 	if (oplock_break_cancelled)
925 		cifs_done_oplock_break(cifsi);
926 
927 	cifs_del_pending_open(&open);
928 
929 	// if serverclose has been offloaded to wq (on failure), it will
930 	// handle offloading put as well. If serverclose not offloaded,
931 	// we need to handle offloading put here.
932 	if (!serverclose_offloaded) {
933 		if (offload)
934 			queue_work(fileinfo_put_wq, &cifs_file->put);
935 		else
936 			cifsFileInfo_put_final(cifs_file);
937 	}
938 }
939 
940 int cifs_open(struct inode *inode, struct file *file)
941 
942 {
943 	int rc = -EACCES;
944 	unsigned int xid;
945 	__u32 oplock;
946 	struct cifs_sb_info *cifs_sb;
947 	struct TCP_Server_Info *server;
948 	struct cifs_tcon *tcon;
949 	struct tcon_link *tlink;
950 	struct cifsFileInfo *cfile = NULL;
951 	void *page;
952 	const char *full_path;
953 	bool posix_open_ok = false;
954 	struct cifs_fid fid = {};
955 	struct cifs_pending_open open;
956 	struct cifs_open_info_data data = {};
957 
958 	xid = get_xid();
959 
960 	cifs_sb = CIFS_SB(inode->i_sb);
961 	if (unlikely(cifs_forced_shutdown(cifs_sb))) {
962 		free_xid(xid);
963 		return -EIO;
964 	}
965 
966 	tlink = cifs_sb_tlink(cifs_sb);
967 	if (IS_ERR(tlink)) {
968 		free_xid(xid);
969 		return PTR_ERR(tlink);
970 	}
971 	tcon = tlink_tcon(tlink);
972 	server = tcon->ses->server;
973 
974 	page = alloc_dentry_path();
975 	full_path = build_path_from_dentry(file_dentry(file), page);
976 	if (IS_ERR(full_path)) {
977 		rc = PTR_ERR(full_path);
978 		goto out;
979 	}
980 
981 	cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
982 		 inode, file->f_flags, full_path);
983 
984 	if (file->f_flags & O_DIRECT &&
985 	    cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
986 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
987 			file->f_op = &cifs_file_direct_nobrl_ops;
988 		else
989 			file->f_op = &cifs_file_direct_ops;
990 	}
991 
992 	/* Get the cached handle as SMB2 close is deferred */
993 	if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) {
994 		rc = cifs_get_writable_path(tcon, full_path, FIND_WR_FSUID_ONLY, &cfile);
995 	} else {
996 		rc = cifs_get_readable_path(tcon, full_path, &cfile);
997 	}
998 	if (rc == 0) {
999 		if (file->f_flags == cfile->f_flags) {
1000 			file->private_data = cfile;
1001 			spin_lock(&CIFS_I(inode)->deferred_lock);
1002 			cifs_del_deferred_close(cfile);
1003 			spin_unlock(&CIFS_I(inode)->deferred_lock);
1004 			goto use_cache;
1005 		} else {
1006 			_cifsFileInfo_put(cfile, true, false);
1007 		}
1008 	} else {
1009 		/* hard link on the defeered close file */
1010 		rc = cifs_get_hardlink_path(tcon, inode, file);
1011 		if (rc)
1012 			cifs_close_deferred_file(CIFS_I(inode));
1013 	}
1014 
1015 	if (server->oplocks)
1016 		oplock = REQ_OPLOCK;
1017 	else
1018 		oplock = 0;
1019 
1020 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1021 	if (!tcon->broken_posix_open && tcon->unix_ext &&
1022 	    cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1023 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1024 		/* can not refresh inode info since size could be stale */
1025 		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
1026 				cifs_sb->ctx->file_mode /* ignored */,
1027 				file->f_flags, &oplock, &fid.netfid, xid);
1028 		if (rc == 0) {
1029 			cifs_dbg(FYI, "posix open succeeded\n");
1030 			posix_open_ok = true;
1031 		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1032 			if (tcon->ses->serverNOS)
1033 				cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1034 					 tcon->ses->ip_addr,
1035 					 tcon->ses->serverNOS);
1036 			tcon->broken_posix_open = true;
1037 		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
1038 			 (rc != -EOPNOTSUPP)) /* path not found or net err */
1039 			goto out;
1040 		/*
1041 		 * Else fallthrough to retry open the old way on network i/o
1042 		 * or DFS errors.
1043 		 */
1044 	}
1045 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1046 
1047 	if (server->ops->get_lease_key)
1048 		server->ops->get_lease_key(inode, &fid);
1049 
1050 	cifs_add_pending_open(&fid, tlink, &open);
1051 
1052 	if (!posix_open_ok) {
1053 		if (server->ops->get_lease_key)
1054 			server->ops->get_lease_key(inode, &fid);
1055 
1056 		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1057 				  xid, &data);
1058 		if (rc) {
1059 			cifs_del_pending_open(&open);
1060 			goto out;
1061 		}
1062 	}
1063 
1064 	cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1065 	if (cfile == NULL) {
1066 		if (server->ops->close)
1067 			server->ops->close(xid, tcon, &fid);
1068 		cifs_del_pending_open(&open);
1069 		rc = -ENOMEM;
1070 		goto out;
1071 	}
1072 
1073 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1074 	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1075 		/*
1076 		 * Time to set mode which we can not set earlier due to
1077 		 * problems creating new read-only files.
1078 		 */
1079 		struct cifs_unix_set_info_args args = {
1080 			.mode	= inode->i_mode,
1081 			.uid	= INVALID_UID, /* no change */
1082 			.gid	= INVALID_GID, /* no change */
1083 			.ctime	= NO_CHANGE_64,
1084 			.atime	= NO_CHANGE_64,
1085 			.mtime	= NO_CHANGE_64,
1086 			.device	= 0,
1087 		};
1088 		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1089 				       cfile->pid);
1090 	}
1091 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1092 
1093 use_cache:
1094 	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1095 			   file->f_mode & FMODE_WRITE);
1096 	if (!(file->f_flags & O_DIRECT))
1097 		goto out;
1098 	if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1099 		goto out;
1100 	cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1101 
1102 out:
1103 	free_dentry_path(page);
1104 	free_xid(xid);
1105 	cifs_put_tlink(tlink);
1106 	cifs_free_open_info(&data);
1107 	return rc;
1108 }
1109 
1110 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1111 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1112 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1113 
1114 /*
1115  * Try to reacquire byte range locks that were released when session
1116  * to server was lost.
1117  */
1118 static int
1119 cifs_relock_file(struct cifsFileInfo *cfile)
1120 {
1121 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1122 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1123 	int rc = 0;
1124 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1125 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1126 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1127 
1128 	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1129 	if (cinode->can_cache_brlcks) {
1130 		/* can cache locks - no need to relock */
1131 		up_read(&cinode->lock_sem);
1132 		return rc;
1133 	}
1134 
1135 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1136 	if (cap_unix(tcon->ses) &&
1137 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1138 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1139 		rc = cifs_push_posix_locks(cfile);
1140 	else
1141 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1142 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1143 
1144 	up_read(&cinode->lock_sem);
1145 	return rc;
1146 }
1147 
1148 static int
1149 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1150 {
1151 	int rc = -EACCES;
1152 	unsigned int xid;
1153 	__u32 oplock;
1154 	struct cifs_sb_info *cifs_sb;
1155 	struct cifs_tcon *tcon;
1156 	struct TCP_Server_Info *server;
1157 	struct cifsInodeInfo *cinode;
1158 	struct inode *inode;
1159 	void *page;
1160 	const char *full_path;
1161 	int desired_access;
1162 	int disposition = FILE_OPEN;
1163 	int create_options = CREATE_NOT_DIR;
1164 	struct cifs_open_parms oparms;
1165 	int rdwr_for_fscache = 0;
1166 
1167 	xid = get_xid();
1168 	mutex_lock(&cfile->fh_mutex);
1169 	if (!cfile->invalidHandle) {
1170 		mutex_unlock(&cfile->fh_mutex);
1171 		free_xid(xid);
1172 		return 0;
1173 	}
1174 
1175 	inode = d_inode(cfile->dentry);
1176 	cifs_sb = CIFS_SB(inode->i_sb);
1177 	tcon = tlink_tcon(cfile->tlink);
1178 	server = tcon->ses->server;
1179 
1180 	/*
1181 	 * Can not grab rename sem here because various ops, including those
1182 	 * that already have the rename sem can end up causing writepage to get
1183 	 * called and if the server was down that means we end up here, and we
1184 	 * can never tell if the caller already has the rename_sem.
1185 	 */
1186 	page = alloc_dentry_path();
1187 	full_path = build_path_from_dentry(cfile->dentry, page);
1188 	if (IS_ERR(full_path)) {
1189 		mutex_unlock(&cfile->fh_mutex);
1190 		free_dentry_path(page);
1191 		free_xid(xid);
1192 		return PTR_ERR(full_path);
1193 	}
1194 
1195 	cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1196 		 inode, cfile->f_flags, full_path);
1197 
1198 	if (tcon->ses->server->oplocks)
1199 		oplock = REQ_OPLOCK;
1200 	else
1201 		oplock = 0;
1202 
1203 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1204 	if (tcon->unix_ext && cap_unix(tcon->ses) &&
1205 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1206 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1207 		/*
1208 		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1209 		 * original open. Must mask them off for a reopen.
1210 		 */
1211 		unsigned int oflags = cfile->f_flags &
1212 						~(O_CREAT | O_EXCL | O_TRUNC);
1213 
1214 		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1215 				     cifs_sb->ctx->file_mode /* ignored */,
1216 				     oflags, &oplock, &cfile->fid.netfid, xid);
1217 		if (rc == 0) {
1218 			cifs_dbg(FYI, "posix reopen succeeded\n");
1219 			oparms.reconnect = true;
1220 			goto reopen_success;
1221 		}
1222 		/*
1223 		 * fallthrough to retry open the old way on errors, especially
1224 		 * in the reconnect path it is important to retry hard
1225 		 */
1226 	}
1227 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1228 
1229 	/* If we're caching, we need to be able to fill in around partial writes. */
1230 	if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1231 		rdwr_for_fscache = 1;
1232 
1233 	desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1234 
1235 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
1236 	if (cfile->f_flags & O_SYNC)
1237 		create_options |= CREATE_WRITE_THROUGH;
1238 
1239 	if (cfile->f_flags & O_DIRECT)
1240 		create_options |= CREATE_NO_BUFFER;
1241 
1242 	if (server->ops->get_lease_key)
1243 		server->ops->get_lease_key(inode, &cfile->fid);
1244 
1245 retry_open:
1246 	oparms = (struct cifs_open_parms) {
1247 		.tcon = tcon,
1248 		.cifs_sb = cifs_sb,
1249 		.desired_access = desired_access,
1250 		.create_options = cifs_create_options(cifs_sb, create_options),
1251 		.disposition = disposition,
1252 		.path = full_path,
1253 		.fid = &cfile->fid,
1254 		.reconnect = true,
1255 	};
1256 
1257 	/*
1258 	 * Can not refresh inode by passing in file_info buf to be returned by
1259 	 * ops->open and then calling get_inode_info with returned buf since
1260 	 * file might have write behind data that needs to be flushed and server
1261 	 * version of file size can be stale. If we knew for sure that inode was
1262 	 * not dirty locally we could do this.
1263 	 */
1264 	rc = server->ops->open(xid, &oparms, &oplock, NULL);
1265 	if (rc == -ENOENT && oparms.reconnect == false) {
1266 		/* durable handle timeout is expired - open the file again */
1267 		rc = server->ops->open(xid, &oparms, &oplock, NULL);
1268 		/* indicate that we need to relock the file */
1269 		oparms.reconnect = true;
1270 	}
1271 	if (rc == -EACCES && rdwr_for_fscache == 1) {
1272 		desired_access = cifs_convert_flags(cfile->f_flags, 0);
1273 		rdwr_for_fscache = 2;
1274 		goto retry_open;
1275 	}
1276 
1277 	if (rc) {
1278 		mutex_unlock(&cfile->fh_mutex);
1279 		cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1280 		cifs_dbg(FYI, "oplock: %d\n", oplock);
1281 		goto reopen_error_exit;
1282 	}
1283 
1284 	if (rdwr_for_fscache == 2)
1285 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1286 
1287 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1288 reopen_success:
1289 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1290 	cfile->invalidHandle = false;
1291 	mutex_unlock(&cfile->fh_mutex);
1292 	cinode = CIFS_I(inode);
1293 
1294 	if (can_flush) {
1295 		rc = filemap_write_and_wait(inode->i_mapping);
1296 		if (!is_interrupt_error(rc))
1297 			mapping_set_error(inode->i_mapping, rc);
1298 
1299 		if (tcon->posix_extensions) {
1300 			rc = smb311_posix_get_inode_info(&inode, full_path,
1301 							 NULL, inode->i_sb, xid);
1302 		} else if (tcon->unix_ext) {
1303 			rc = cifs_get_inode_info_unix(&inode, full_path,
1304 						      inode->i_sb, xid);
1305 		} else {
1306 			rc = cifs_get_inode_info(&inode, full_path, NULL,
1307 						 inode->i_sb, xid, NULL);
1308 		}
1309 	}
1310 	/*
1311 	 * Else we are writing out data to server already and could deadlock if
1312 	 * we tried to flush data, and since we do not know if we have data that
1313 	 * would invalidate the current end of file on the server we can not go
1314 	 * to the server to get the new inode info.
1315 	 */
1316 
1317 	/*
1318 	 * If the server returned a read oplock and we have mandatory brlocks,
1319 	 * set oplock level to None.
1320 	 */
1321 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1322 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1323 		oplock = 0;
1324 	}
1325 
1326 	server->ops->set_fid(cfile, &cfile->fid, oplock);
1327 	if (oparms.reconnect)
1328 		cifs_relock_file(cfile);
1329 
1330 reopen_error_exit:
1331 	free_dentry_path(page);
1332 	free_xid(xid);
1333 	return rc;
1334 }
1335 
1336 void smb2_deferred_work_close(struct work_struct *work)
1337 {
1338 	struct cifsFileInfo *cfile = container_of(work,
1339 			struct cifsFileInfo, deferred.work);
1340 
1341 	spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1342 	cifs_del_deferred_close(cfile);
1343 	cfile->deferred_close_scheduled = false;
1344 	spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1345 	_cifsFileInfo_put(cfile, true, false);
1346 }
1347 
1348 static bool
1349 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1350 {
1351 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1352 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1353 
1354 	return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1355 			(cinode->oplock == CIFS_CACHE_RHW_FLG ||
1356 			 cinode->oplock == CIFS_CACHE_RH_FLG) &&
1357 			!test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags));
1358 
1359 }
1360 
1361 int cifs_close(struct inode *inode, struct file *file)
1362 {
1363 	struct cifsFileInfo *cfile;
1364 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1365 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1366 	struct cifs_deferred_close *dclose;
1367 
1368 	cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1369 
1370 	if (file->private_data != NULL) {
1371 		cfile = file->private_data;
1372 		file->private_data = NULL;
1373 		dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
1374 		if ((cfile->status_file_deleted == false) &&
1375 		    (smb2_can_defer_close(inode, dclose))) {
1376 			if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR, &cinode->netfs.flags)) {
1377 				inode_set_mtime_to_ts(inode,
1378 						      inode_set_ctime_current(inode));
1379 			}
1380 			spin_lock(&cinode->deferred_lock);
1381 			cifs_add_deferred_close(cfile, dclose);
1382 			if (cfile->deferred_close_scheduled &&
1383 			    delayed_work_pending(&cfile->deferred)) {
1384 				/*
1385 				 * If there is no pending work, mod_delayed_work queues new work.
1386 				 * So, Increase the ref count to avoid use-after-free.
1387 				 */
1388 				if (!mod_delayed_work(deferredclose_wq,
1389 						&cfile->deferred, cifs_sb->ctx->closetimeo))
1390 					cifsFileInfo_get(cfile);
1391 			} else {
1392 				/* Deferred close for files */
1393 				queue_delayed_work(deferredclose_wq,
1394 						&cfile->deferred, cifs_sb->ctx->closetimeo);
1395 				cfile->deferred_close_scheduled = true;
1396 				spin_unlock(&cinode->deferred_lock);
1397 				return 0;
1398 			}
1399 			spin_unlock(&cinode->deferred_lock);
1400 			_cifsFileInfo_put(cfile, true, false);
1401 		} else {
1402 			_cifsFileInfo_put(cfile, true, false);
1403 			kfree(dclose);
1404 		}
1405 	}
1406 
1407 	/* return code from the ->release op is always ignored */
1408 	return 0;
1409 }
1410 
1411 void
1412 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1413 {
1414 	struct cifsFileInfo *open_file, *tmp;
1415 	LIST_HEAD(tmp_list);
1416 
1417 	if (!tcon->use_persistent || !tcon->need_reopen_files)
1418 		return;
1419 
1420 	tcon->need_reopen_files = false;
1421 
1422 	cifs_dbg(FYI, "Reopen persistent handles\n");
1423 
1424 	/* list all files open on tree connection, reopen resilient handles  */
1425 	spin_lock(&tcon->open_file_lock);
1426 	list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1427 		if (!open_file->invalidHandle)
1428 			continue;
1429 		cifsFileInfo_get(open_file);
1430 		list_add_tail(&open_file->rlist, &tmp_list);
1431 	}
1432 	spin_unlock(&tcon->open_file_lock);
1433 
1434 	list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1435 		if (cifs_reopen_file(open_file, false /* do not flush */))
1436 			tcon->need_reopen_files = true;
1437 		list_del_init(&open_file->rlist);
1438 		cifsFileInfo_put(open_file);
1439 	}
1440 }
1441 
1442 int cifs_closedir(struct inode *inode, struct file *file)
1443 {
1444 	int rc = 0;
1445 	unsigned int xid;
1446 	struct cifsFileInfo *cfile = file->private_data;
1447 	struct cifs_tcon *tcon;
1448 	struct TCP_Server_Info *server;
1449 	char *buf;
1450 
1451 	cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1452 
1453 	if (cfile == NULL)
1454 		return rc;
1455 
1456 	xid = get_xid();
1457 	tcon = tlink_tcon(cfile->tlink);
1458 	server = tcon->ses->server;
1459 
1460 	cifs_dbg(FYI, "Freeing private data in close dir\n");
1461 	spin_lock(&cfile->file_info_lock);
1462 	if (server->ops->dir_needs_close(cfile)) {
1463 		cfile->invalidHandle = true;
1464 		spin_unlock(&cfile->file_info_lock);
1465 		if (server->ops->close_dir)
1466 			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1467 		else
1468 			rc = -ENOSYS;
1469 		cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1470 		/* not much we can do if it fails anyway, ignore rc */
1471 		rc = 0;
1472 	} else
1473 		spin_unlock(&cfile->file_info_lock);
1474 
1475 	buf = cfile->srch_inf.ntwrk_buf_start;
1476 	if (buf) {
1477 		cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1478 		cfile->srch_inf.ntwrk_buf_start = NULL;
1479 		if (cfile->srch_inf.smallBuf)
1480 			cifs_small_buf_release(buf);
1481 		else
1482 			cifs_buf_release(buf);
1483 	}
1484 
1485 	cifs_put_tlink(cfile->tlink);
1486 	kfree(file->private_data);
1487 	file->private_data = NULL;
1488 	/* BB can we lock the filestruct while this is going on? */
1489 	free_xid(xid);
1490 	return rc;
1491 }
1492 
1493 static struct cifsLockInfo *
1494 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1495 {
1496 	struct cifsLockInfo *lock =
1497 		kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
1498 	if (!lock)
1499 		return lock;
1500 	lock->offset = offset;
1501 	lock->length = length;
1502 	lock->type = type;
1503 	lock->pid = current->tgid;
1504 	lock->flags = flags;
1505 	INIT_LIST_HEAD(&lock->blist);
1506 	init_waitqueue_head(&lock->block_q);
1507 	return lock;
1508 }
1509 
1510 void
1511 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1512 {
1513 	struct cifsLockInfo *li, *tmp;
1514 	list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1515 		list_del_init(&li->blist);
1516 		wake_up(&li->block_q);
1517 	}
1518 }
1519 
1520 #define CIFS_LOCK_OP	0
1521 #define CIFS_READ_OP	1
1522 #define CIFS_WRITE_OP	2
1523 
1524 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1525 static bool
1526 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1527 			    __u64 length, __u8 type, __u16 flags,
1528 			    struct cifsFileInfo *cfile,
1529 			    struct cifsLockInfo **conf_lock, int rw_check)
1530 {
1531 	struct cifsLockInfo *li;
1532 	struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1533 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1534 
1535 	list_for_each_entry(li, &fdlocks->locks, llist) {
1536 		if (offset + length <= li->offset ||
1537 		    offset >= li->offset + li->length)
1538 			continue;
1539 		if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1540 		    server->ops->compare_fids(cfile, cur_cfile)) {
1541 			/* shared lock prevents write op through the same fid */
1542 			if (!(li->type & server->vals->shared_lock_type) ||
1543 			    rw_check != CIFS_WRITE_OP)
1544 				continue;
1545 		}
1546 		if ((type & server->vals->shared_lock_type) &&
1547 		    ((server->ops->compare_fids(cfile, cur_cfile) &&
1548 		     current->tgid == li->pid) || type == li->type))
1549 			continue;
1550 		if (rw_check == CIFS_LOCK_OP &&
1551 		    (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1552 		    server->ops->compare_fids(cfile, cur_cfile))
1553 			continue;
1554 		if (conf_lock)
1555 			*conf_lock = li;
1556 		return true;
1557 	}
1558 	return false;
1559 }
1560 
1561 bool
1562 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1563 			__u8 type, __u16 flags,
1564 			struct cifsLockInfo **conf_lock, int rw_check)
1565 {
1566 	bool rc = false;
1567 	struct cifs_fid_locks *cur;
1568 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1569 
1570 	list_for_each_entry(cur, &cinode->llist, llist) {
1571 		rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1572 						 flags, cfile, conf_lock,
1573 						 rw_check);
1574 		if (rc)
1575 			break;
1576 	}
1577 
1578 	return rc;
1579 }
1580 
1581 /*
1582  * Check if there is another lock that prevents us to set the lock (mandatory
1583  * style). If such a lock exists, update the flock structure with its
1584  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1585  * or leave it the same if we can't. Returns 0 if we don't need to request to
1586  * the server or 1 otherwise.
1587  */
1588 static int
1589 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1590 	       __u8 type, struct file_lock *flock)
1591 {
1592 	int rc = 0;
1593 	struct cifsLockInfo *conf_lock;
1594 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1595 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1596 	bool exist;
1597 
1598 	down_read(&cinode->lock_sem);
1599 
1600 	exist = cifs_find_lock_conflict(cfile, offset, length, type,
1601 					flock->c.flc_flags, &conf_lock,
1602 					CIFS_LOCK_OP);
1603 	if (exist) {
1604 		flock->fl_start = conf_lock->offset;
1605 		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1606 		flock->c.flc_pid = conf_lock->pid;
1607 		if (conf_lock->type & server->vals->shared_lock_type)
1608 			flock->c.flc_type = F_RDLCK;
1609 		else
1610 			flock->c.flc_type = F_WRLCK;
1611 	} else if (!cinode->can_cache_brlcks)
1612 		rc = 1;
1613 	else
1614 		flock->c.flc_type = F_UNLCK;
1615 
1616 	up_read(&cinode->lock_sem);
1617 	return rc;
1618 }
1619 
1620 static void
1621 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1622 {
1623 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1624 	cifs_down_write(&cinode->lock_sem);
1625 	list_add_tail(&lock->llist, &cfile->llist->locks);
1626 	up_write(&cinode->lock_sem);
1627 }
1628 
1629 /*
1630  * Set the byte-range lock (mandatory style). Returns:
1631  * 1) 0, if we set the lock and don't need to request to the server;
1632  * 2) 1, if no locks prevent us but we need to request to the server;
1633  * 3) -EACCES, if there is a lock that prevents us and wait is false.
1634  */
1635 static int
1636 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1637 		 bool wait)
1638 {
1639 	struct cifsLockInfo *conf_lock;
1640 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1641 	bool exist;
1642 	int rc = 0;
1643 
1644 try_again:
1645 	exist = false;
1646 	cifs_down_write(&cinode->lock_sem);
1647 
1648 	exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1649 					lock->type, lock->flags, &conf_lock,
1650 					CIFS_LOCK_OP);
1651 	if (!exist && cinode->can_cache_brlcks) {
1652 		list_add_tail(&lock->llist, &cfile->llist->locks);
1653 		up_write(&cinode->lock_sem);
1654 		return rc;
1655 	}
1656 
1657 	if (!exist)
1658 		rc = 1;
1659 	else if (!wait)
1660 		rc = -EACCES;
1661 	else {
1662 		list_add_tail(&lock->blist, &conf_lock->blist);
1663 		up_write(&cinode->lock_sem);
1664 		rc = wait_event_interruptible(lock->block_q,
1665 					(lock->blist.prev == &lock->blist) &&
1666 					(lock->blist.next == &lock->blist));
1667 		if (!rc)
1668 			goto try_again;
1669 		cifs_down_write(&cinode->lock_sem);
1670 		list_del_init(&lock->blist);
1671 	}
1672 
1673 	up_write(&cinode->lock_sem);
1674 	return rc;
1675 }
1676 
1677 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1678 /*
1679  * Check if there is another lock that prevents us to set the lock (posix
1680  * style). If such a lock exists, update the flock structure with its
1681  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1682  * or leave it the same if we can't. Returns 0 if we don't need to request to
1683  * the server or 1 otherwise.
1684  */
1685 static int
1686 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1687 {
1688 	int rc = 0;
1689 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1690 	unsigned char saved_type = flock->c.flc_type;
1691 
1692 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1693 		return 1;
1694 
1695 	down_read(&cinode->lock_sem);
1696 	posix_test_lock(file, flock);
1697 
1698 	if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1699 		flock->c.flc_type = saved_type;
1700 		rc = 1;
1701 	}
1702 
1703 	up_read(&cinode->lock_sem);
1704 	return rc;
1705 }
1706 
1707 /*
1708  * Set the byte-range lock (posix style). Returns:
1709  * 1) <0, if the error occurs while setting the lock;
1710  * 2) 0, if we set the lock and don't need to request to the server;
1711  * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1712  * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1713  */
1714 static int
1715 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1716 {
1717 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1718 	int rc = FILE_LOCK_DEFERRED + 1;
1719 
1720 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1721 		return rc;
1722 
1723 	cifs_down_write(&cinode->lock_sem);
1724 	if (!cinode->can_cache_brlcks) {
1725 		up_write(&cinode->lock_sem);
1726 		return rc;
1727 	}
1728 
1729 	rc = posix_lock_file(file, flock, NULL);
1730 	up_write(&cinode->lock_sem);
1731 	return rc;
1732 }
1733 
1734 int
1735 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1736 {
1737 	unsigned int xid;
1738 	int rc = 0, stored_rc;
1739 	struct cifsLockInfo *li, *tmp;
1740 	struct cifs_tcon *tcon;
1741 	unsigned int num, max_num, max_buf;
1742 	LOCKING_ANDX_RANGE *buf, *cur;
1743 	static const int types[] = {
1744 		LOCKING_ANDX_LARGE_FILES,
1745 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1746 	};
1747 	int i;
1748 
1749 	xid = get_xid();
1750 	tcon = tlink_tcon(cfile->tlink);
1751 
1752 	/*
1753 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1754 	 * and check it before using.
1755 	 */
1756 	max_buf = tcon->ses->server->maxBuf;
1757 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1758 		free_xid(xid);
1759 		return -EINVAL;
1760 	}
1761 
1762 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1763 		     PAGE_SIZE);
1764 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1765 			PAGE_SIZE);
1766 	max_num = (max_buf - sizeof(struct smb_hdr)) /
1767 						sizeof(LOCKING_ANDX_RANGE);
1768 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1769 	if (!buf) {
1770 		free_xid(xid);
1771 		return -ENOMEM;
1772 	}
1773 
1774 	for (i = 0; i < 2; i++) {
1775 		cur = buf;
1776 		num = 0;
1777 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1778 			if (li->type != types[i])
1779 				continue;
1780 			cur->Pid = cpu_to_le16(li->pid);
1781 			cur->LengthLow = cpu_to_le32((u32)li->length);
1782 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1783 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
1784 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1785 			if (++num == max_num) {
1786 				stored_rc = cifs_lockv(xid, tcon,
1787 						       cfile->fid.netfid,
1788 						       (__u8)li->type, 0, num,
1789 						       buf);
1790 				if (stored_rc)
1791 					rc = stored_rc;
1792 				cur = buf;
1793 				num = 0;
1794 			} else
1795 				cur++;
1796 		}
1797 
1798 		if (num) {
1799 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1800 					       (__u8)types[i], 0, num, buf);
1801 			if (stored_rc)
1802 				rc = stored_rc;
1803 		}
1804 	}
1805 
1806 	kfree(buf);
1807 	free_xid(xid);
1808 	return rc;
1809 }
1810 
1811 static __u32
1812 hash_lockowner(fl_owner_t owner)
1813 {
1814 	return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1815 }
1816 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1817 
1818 struct lock_to_push {
1819 	struct list_head llist;
1820 	__u64 offset;
1821 	__u64 length;
1822 	__u32 pid;
1823 	__u16 netfid;
1824 	__u8 type;
1825 };
1826 
1827 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1828 static int
1829 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1830 {
1831 	struct inode *inode = d_inode(cfile->dentry);
1832 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1833 	struct file_lock *flock;
1834 	struct file_lock_context *flctx = locks_inode_context(inode);
1835 	unsigned int count = 0, i;
1836 	int rc = 0, xid, type;
1837 	struct list_head locks_to_send, *el;
1838 	struct lock_to_push *lck, *tmp;
1839 	__u64 length;
1840 
1841 	xid = get_xid();
1842 
1843 	if (!flctx)
1844 		goto out;
1845 
1846 	spin_lock(&flctx->flc_lock);
1847 	list_for_each(el, &flctx->flc_posix) {
1848 		count++;
1849 	}
1850 	spin_unlock(&flctx->flc_lock);
1851 
1852 	INIT_LIST_HEAD(&locks_to_send);
1853 
1854 	/*
1855 	 * Allocating count locks is enough because no FL_POSIX locks can be
1856 	 * added to the list while we are holding cinode->lock_sem that
1857 	 * protects locking operations of this inode.
1858 	 */
1859 	for (i = 0; i < count; i++) {
1860 		lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1861 		if (!lck) {
1862 			rc = -ENOMEM;
1863 			goto err_out;
1864 		}
1865 		list_add_tail(&lck->llist, &locks_to_send);
1866 	}
1867 
1868 	el = locks_to_send.next;
1869 	spin_lock(&flctx->flc_lock);
1870 	for_each_file_lock(flock, &flctx->flc_posix) {
1871 		unsigned char ftype = flock->c.flc_type;
1872 
1873 		if (el == &locks_to_send) {
1874 			/*
1875 			 * The list ended. We don't have enough allocated
1876 			 * structures - something is really wrong.
1877 			 */
1878 			cifs_dbg(VFS, "Can't push all brlocks!\n");
1879 			break;
1880 		}
1881 		length = cifs_flock_len(flock);
1882 		if (ftype == F_RDLCK || ftype == F_SHLCK)
1883 			type = CIFS_RDLCK;
1884 		else
1885 			type = CIFS_WRLCK;
1886 		lck = list_entry(el, struct lock_to_push, llist);
1887 		lck->pid = hash_lockowner(flock->c.flc_owner);
1888 		lck->netfid = cfile->fid.netfid;
1889 		lck->length = length;
1890 		lck->type = type;
1891 		lck->offset = flock->fl_start;
1892 	}
1893 	spin_unlock(&flctx->flc_lock);
1894 
1895 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1896 		int stored_rc;
1897 
1898 		stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1899 					     lck->offset, lck->length, NULL,
1900 					     lck->type, 0);
1901 		if (stored_rc)
1902 			rc = stored_rc;
1903 		list_del(&lck->llist);
1904 		kfree(lck);
1905 	}
1906 
1907 out:
1908 	free_xid(xid);
1909 	return rc;
1910 err_out:
1911 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1912 		list_del(&lck->llist);
1913 		kfree(lck);
1914 	}
1915 	goto out;
1916 }
1917 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1918 
1919 static int
1920 cifs_push_locks(struct cifsFileInfo *cfile)
1921 {
1922 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1923 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1924 	int rc = 0;
1925 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1926 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1927 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1928 
1929 	/* we are going to update can_cache_brlcks here - need a write access */
1930 	cifs_down_write(&cinode->lock_sem);
1931 	if (!cinode->can_cache_brlcks) {
1932 		up_write(&cinode->lock_sem);
1933 		return rc;
1934 	}
1935 
1936 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1937 	if (cap_unix(tcon->ses) &&
1938 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1939 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1940 		rc = cifs_push_posix_locks(cfile);
1941 	else
1942 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1943 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1944 
1945 	cinode->can_cache_brlcks = false;
1946 	up_write(&cinode->lock_sem);
1947 	return rc;
1948 }
1949 
1950 static void
1951 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1952 		bool *wait_flag, struct TCP_Server_Info *server)
1953 {
1954 	if (flock->c.flc_flags & FL_POSIX)
1955 		cifs_dbg(FYI, "Posix\n");
1956 	if (flock->c.flc_flags & FL_FLOCK)
1957 		cifs_dbg(FYI, "Flock\n");
1958 	if (flock->c.flc_flags & FL_SLEEP) {
1959 		cifs_dbg(FYI, "Blocking lock\n");
1960 		*wait_flag = true;
1961 	}
1962 	if (flock->c.flc_flags & FL_ACCESS)
1963 		cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1964 	if (flock->c.flc_flags & FL_LEASE)
1965 		cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1966 	if (flock->c.flc_flags &
1967 	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1968 	       FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
1969 		cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
1970 		         flock->c.flc_flags);
1971 
1972 	*type = server->vals->large_lock_type;
1973 	if (lock_is_write(flock)) {
1974 		cifs_dbg(FYI, "F_WRLCK\n");
1975 		*type |= server->vals->exclusive_lock_type;
1976 		*lock = 1;
1977 	} else if (lock_is_unlock(flock)) {
1978 		cifs_dbg(FYI, "F_UNLCK\n");
1979 		*type |= server->vals->unlock_lock_type;
1980 		*unlock = 1;
1981 		/* Check if unlock includes more than one lock range */
1982 	} else if (lock_is_read(flock)) {
1983 		cifs_dbg(FYI, "F_RDLCK\n");
1984 		*type |= server->vals->shared_lock_type;
1985 		*lock = 1;
1986 	} else if (flock->c.flc_type == F_EXLCK) {
1987 		cifs_dbg(FYI, "F_EXLCK\n");
1988 		*type |= server->vals->exclusive_lock_type;
1989 		*lock = 1;
1990 	} else if (flock->c.flc_type == F_SHLCK) {
1991 		cifs_dbg(FYI, "F_SHLCK\n");
1992 		*type |= server->vals->shared_lock_type;
1993 		*lock = 1;
1994 	} else
1995 		cifs_dbg(FYI, "Unknown type of lock\n");
1996 }
1997 
1998 static int
1999 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
2000 	   bool wait_flag, bool posix_lck, unsigned int xid)
2001 {
2002 	int rc = 0;
2003 	__u64 length = cifs_flock_len(flock);
2004 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2005 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2006 	struct TCP_Server_Info *server = tcon->ses->server;
2007 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2008 	__u16 netfid = cfile->fid.netfid;
2009 
2010 	if (posix_lck) {
2011 		int posix_lock_type;
2012 
2013 		rc = cifs_posix_lock_test(file, flock);
2014 		if (!rc)
2015 			return rc;
2016 
2017 		if (type & server->vals->shared_lock_type)
2018 			posix_lock_type = CIFS_RDLCK;
2019 		else
2020 			posix_lock_type = CIFS_WRLCK;
2021 		rc = CIFSSMBPosixLock(xid, tcon, netfid,
2022 				      hash_lockowner(flock->c.flc_owner),
2023 				      flock->fl_start, length, flock,
2024 				      posix_lock_type, wait_flag);
2025 		return rc;
2026 	}
2027 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2028 
2029 	rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
2030 	if (!rc)
2031 		return rc;
2032 
2033 	/* BB we could chain these into one lock request BB */
2034 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2035 				    1, 0, false);
2036 	if (rc == 0) {
2037 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2038 					    type, 0, 1, false);
2039 		flock->c.flc_type = F_UNLCK;
2040 		if (rc != 0)
2041 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2042 				 rc);
2043 		return 0;
2044 	}
2045 
2046 	if (type & server->vals->shared_lock_type) {
2047 		flock->c.flc_type = F_WRLCK;
2048 		return 0;
2049 	}
2050 
2051 	type &= ~server->vals->exclusive_lock_type;
2052 
2053 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2054 				    type | server->vals->shared_lock_type,
2055 				    1, 0, false);
2056 	if (rc == 0) {
2057 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2058 			type | server->vals->shared_lock_type, 0, 1, false);
2059 		flock->c.flc_type = F_RDLCK;
2060 		if (rc != 0)
2061 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2062 				 rc);
2063 	} else
2064 		flock->c.flc_type = F_WRLCK;
2065 
2066 	return 0;
2067 }
2068 
2069 void
2070 cifs_move_llist(struct list_head *source, struct list_head *dest)
2071 {
2072 	struct list_head *li, *tmp;
2073 	list_for_each_safe(li, tmp, source)
2074 		list_move(li, dest);
2075 }
2076 
2077 int
2078 cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode,
2079 				struct file *file)
2080 {
2081 	struct cifsFileInfo *open_file = NULL;
2082 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2083 	int rc = 0;
2084 
2085 	spin_lock(&tcon->open_file_lock);
2086 	spin_lock(&cinode->open_file_lock);
2087 
2088 	list_for_each_entry(open_file, &cinode->openFileList, flist) {
2089 		if (file->f_flags == open_file->f_flags) {
2090 			rc = -EINVAL;
2091 			break;
2092 		}
2093 	}
2094 
2095 	spin_unlock(&cinode->open_file_lock);
2096 	spin_unlock(&tcon->open_file_lock);
2097 	return rc;
2098 }
2099 
2100 void
2101 cifs_free_llist(struct list_head *llist)
2102 {
2103 	struct cifsLockInfo *li, *tmp;
2104 	list_for_each_entry_safe(li, tmp, llist, llist) {
2105 		cifs_del_lock_waiters(li);
2106 		list_del(&li->llist);
2107 		kfree(li);
2108 	}
2109 }
2110 
2111 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2112 int
2113 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2114 		  unsigned int xid)
2115 {
2116 	int rc = 0, stored_rc;
2117 	static const int types[] = {
2118 		LOCKING_ANDX_LARGE_FILES,
2119 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2120 	};
2121 	unsigned int i;
2122 	unsigned int max_num, num, max_buf;
2123 	LOCKING_ANDX_RANGE *buf, *cur;
2124 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2125 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2126 	struct cifsLockInfo *li, *tmp;
2127 	__u64 length = cifs_flock_len(flock);
2128 	LIST_HEAD(tmp_llist);
2129 
2130 	/*
2131 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2132 	 * and check it before using.
2133 	 */
2134 	max_buf = tcon->ses->server->maxBuf;
2135 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2136 		return -EINVAL;
2137 
2138 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2139 		     PAGE_SIZE);
2140 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2141 			PAGE_SIZE);
2142 	max_num = (max_buf - sizeof(struct smb_hdr)) /
2143 						sizeof(LOCKING_ANDX_RANGE);
2144 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
2145 	if (!buf)
2146 		return -ENOMEM;
2147 
2148 	cifs_down_write(&cinode->lock_sem);
2149 	for (i = 0; i < 2; i++) {
2150 		cur = buf;
2151 		num = 0;
2152 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2153 			if (flock->fl_start > li->offset ||
2154 			    (flock->fl_start + length) <
2155 			    (li->offset + li->length))
2156 				continue;
2157 			if (current->tgid != li->pid)
2158 				continue;
2159 			if (types[i] != li->type)
2160 				continue;
2161 			if (cinode->can_cache_brlcks) {
2162 				/*
2163 				 * We can cache brlock requests - simply remove
2164 				 * a lock from the file's list.
2165 				 */
2166 				list_del(&li->llist);
2167 				cifs_del_lock_waiters(li);
2168 				kfree(li);
2169 				continue;
2170 			}
2171 			cur->Pid = cpu_to_le16(li->pid);
2172 			cur->LengthLow = cpu_to_le32((u32)li->length);
2173 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2174 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
2175 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2176 			/*
2177 			 * We need to save a lock here to let us add it again to
2178 			 * the file's list if the unlock range request fails on
2179 			 * the server.
2180 			 */
2181 			list_move(&li->llist, &tmp_llist);
2182 			if (++num == max_num) {
2183 				stored_rc = cifs_lockv(xid, tcon,
2184 						       cfile->fid.netfid,
2185 						       li->type, num, 0, buf);
2186 				if (stored_rc) {
2187 					/*
2188 					 * We failed on the unlock range
2189 					 * request - add all locks from the tmp
2190 					 * list to the head of the file's list.
2191 					 */
2192 					cifs_move_llist(&tmp_llist,
2193 							&cfile->llist->locks);
2194 					rc = stored_rc;
2195 				} else
2196 					/*
2197 					 * The unlock range request succeed -
2198 					 * free the tmp list.
2199 					 */
2200 					cifs_free_llist(&tmp_llist);
2201 				cur = buf;
2202 				num = 0;
2203 			} else
2204 				cur++;
2205 		}
2206 		if (num) {
2207 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2208 					       types[i], num, 0, buf);
2209 			if (stored_rc) {
2210 				cifs_move_llist(&tmp_llist,
2211 						&cfile->llist->locks);
2212 				rc = stored_rc;
2213 			} else
2214 				cifs_free_llist(&tmp_llist);
2215 		}
2216 	}
2217 
2218 	up_write(&cinode->lock_sem);
2219 	kfree(buf);
2220 	return rc;
2221 }
2222 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2223 
2224 static int
2225 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2226 	   bool wait_flag, bool posix_lck, int lock, int unlock,
2227 	   unsigned int xid)
2228 {
2229 	int rc = 0;
2230 	__u64 length = cifs_flock_len(flock);
2231 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2232 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2233 	struct TCP_Server_Info *server = tcon->ses->server;
2234 	struct inode *inode = d_inode(cfile->dentry);
2235 
2236 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2237 	if (posix_lck) {
2238 		int posix_lock_type;
2239 
2240 		rc = cifs_posix_lock_set(file, flock);
2241 		if (rc <= FILE_LOCK_DEFERRED)
2242 			return rc;
2243 
2244 		if (type & server->vals->shared_lock_type)
2245 			posix_lock_type = CIFS_RDLCK;
2246 		else
2247 			posix_lock_type = CIFS_WRLCK;
2248 
2249 		if (unlock == 1)
2250 			posix_lock_type = CIFS_UNLCK;
2251 
2252 		rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2253 				      hash_lockowner(flock->c.flc_owner),
2254 				      flock->fl_start, length,
2255 				      NULL, posix_lock_type, wait_flag);
2256 		goto out;
2257 	}
2258 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2259 	if (lock) {
2260 		struct cifsLockInfo *lock;
2261 
2262 		lock = cifs_lock_init(flock->fl_start, length, type,
2263 				      flock->c.flc_flags);
2264 		if (!lock)
2265 			return -ENOMEM;
2266 
2267 		rc = cifs_lock_add_if(cfile, lock, wait_flag);
2268 		if (rc < 0) {
2269 			kfree(lock);
2270 			return rc;
2271 		}
2272 		if (!rc)
2273 			goto out;
2274 
2275 		/*
2276 		 * Windows 7 server can delay breaking lease from read to None
2277 		 * if we set a byte-range lock on a file - break it explicitly
2278 		 * before sending the lock to the server to be sure the next
2279 		 * read won't conflict with non-overlapted locks due to
2280 		 * pagereading.
2281 		 */
2282 		if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2283 					CIFS_CACHE_READ(CIFS_I(inode))) {
2284 			cifs_zap_mapping(inode);
2285 			cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2286 				 inode);
2287 			CIFS_I(inode)->oplock = 0;
2288 		}
2289 
2290 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2291 					    type, 1, 0, wait_flag);
2292 		if (rc) {
2293 			kfree(lock);
2294 			return rc;
2295 		}
2296 
2297 		cifs_lock_add(cfile, lock);
2298 	} else if (unlock)
2299 		rc = server->ops->mand_unlock_range(cfile, flock, xid);
2300 
2301 out:
2302 	if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2303 		/*
2304 		 * If this is a request to remove all locks because we
2305 		 * are closing the file, it doesn't matter if the
2306 		 * unlocking failed as both cifs.ko and the SMB server
2307 		 * remove the lock on file close
2308 		 */
2309 		if (rc) {
2310 			cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2311 			if (!(flock->c.flc_flags & FL_CLOSE))
2312 				return rc;
2313 		}
2314 		rc = locks_lock_file_wait(file, flock);
2315 	}
2316 	return rc;
2317 }
2318 
2319 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2320 {
2321 	int rc, xid;
2322 	int lock = 0, unlock = 0;
2323 	bool wait_flag = false;
2324 	bool posix_lck = false;
2325 	struct cifs_sb_info *cifs_sb;
2326 	struct cifs_tcon *tcon;
2327 	struct cifsFileInfo *cfile;
2328 	__u32 type;
2329 
2330 	xid = get_xid();
2331 
2332 	if (!(fl->c.flc_flags & FL_FLOCK)) {
2333 		rc = -ENOLCK;
2334 		free_xid(xid);
2335 		return rc;
2336 	}
2337 
2338 	cfile = (struct cifsFileInfo *)file->private_data;
2339 	tcon = tlink_tcon(cfile->tlink);
2340 
2341 	cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2342 			tcon->ses->server);
2343 	cifs_sb = CIFS_FILE_SB(file);
2344 
2345 	if (cap_unix(tcon->ses) &&
2346 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2347 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2348 		posix_lck = true;
2349 
2350 	if (!lock && !unlock) {
2351 		/*
2352 		 * if no lock or unlock then nothing to do since we do not
2353 		 * know what it is
2354 		 */
2355 		rc = -EOPNOTSUPP;
2356 		free_xid(xid);
2357 		return rc;
2358 	}
2359 
2360 	rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2361 			xid);
2362 	free_xid(xid);
2363 	return rc;
2364 
2365 
2366 }
2367 
2368 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2369 {
2370 	int rc, xid;
2371 	int lock = 0, unlock = 0;
2372 	bool wait_flag = false;
2373 	bool posix_lck = false;
2374 	struct cifs_sb_info *cifs_sb;
2375 	struct cifs_tcon *tcon;
2376 	struct cifsFileInfo *cfile;
2377 	__u32 type;
2378 
2379 	rc = -EACCES;
2380 	xid = get_xid();
2381 
2382 	cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2383 		 flock->c.flc_flags, flock->c.flc_type,
2384 		 (long long)flock->fl_start,
2385 		 (long long)flock->fl_end);
2386 
2387 	cfile = (struct cifsFileInfo *)file->private_data;
2388 	tcon = tlink_tcon(cfile->tlink);
2389 
2390 	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2391 			tcon->ses->server);
2392 	cifs_sb = CIFS_FILE_SB(file);
2393 	set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2394 
2395 	if (cap_unix(tcon->ses) &&
2396 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2397 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2398 		posix_lck = true;
2399 	/*
2400 	 * BB add code here to normalize offset and length to account for
2401 	 * negative length which we can not accept over the wire.
2402 	 */
2403 	if (IS_GETLK(cmd)) {
2404 		rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2405 		free_xid(xid);
2406 		return rc;
2407 	}
2408 
2409 	if (!lock && !unlock) {
2410 		/*
2411 		 * if no lock or unlock then nothing to do since we do not
2412 		 * know what it is
2413 		 */
2414 		free_xid(xid);
2415 		return -EOPNOTSUPP;
2416 	}
2417 
2418 	rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2419 			xid);
2420 	free_xid(xid);
2421 	return rc;
2422 }
2423 
2424 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result,
2425 				      bool was_async)
2426 {
2427 	struct netfs_io_request *wreq = wdata->rreq;
2428 	struct netfs_inode *ictx = netfs_inode(wreq->inode);
2429 	loff_t wrend;
2430 
2431 	if (result > 0) {
2432 		wrend = wdata->subreq.start + wdata->subreq.transferred + result;
2433 
2434 		if (wrend > ictx->zero_point &&
2435 		    (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE ||
2436 		     wdata->rreq->origin == NETFS_DIO_WRITE))
2437 			ictx->zero_point = wrend;
2438 		if (wrend > ictx->remote_i_size)
2439 			netfs_resize_file(ictx, wrend, true);
2440 	}
2441 
2442 	netfs_write_subrequest_terminated(&wdata->subreq, result, was_async);
2443 }
2444 
2445 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
2446 					bool fsuid_only)
2447 {
2448 	struct cifsFileInfo *open_file = NULL;
2449 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2450 
2451 	/* only filter by fsuid on multiuser mounts */
2452 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2453 		fsuid_only = false;
2454 
2455 	spin_lock(&cifs_inode->open_file_lock);
2456 	/* we could simply get the first_list_entry since write-only entries
2457 	   are always at the end of the list but since the first entry might
2458 	   have a close pending, we go through the whole list */
2459 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2460 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2461 			continue;
2462 		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2463 			if ((!open_file->invalidHandle)) {
2464 				/* found a good file */
2465 				/* lock it so it will not be closed on us */
2466 				cifsFileInfo_get(open_file);
2467 				spin_unlock(&cifs_inode->open_file_lock);
2468 				return open_file;
2469 			} /* else might as well continue, and look for
2470 			     another, or simply have the caller reopen it
2471 			     again rather than trying to fix this handle */
2472 		} else /* write only file */
2473 			break; /* write only files are last so must be done */
2474 	}
2475 	spin_unlock(&cifs_inode->open_file_lock);
2476 	return NULL;
2477 }
2478 
2479 /* Return -EBADF if no handle is found and general rc otherwise */
2480 int
2481 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
2482 		       struct cifsFileInfo **ret_file)
2483 {
2484 	struct cifsFileInfo *open_file, *inv_file = NULL;
2485 	struct cifs_sb_info *cifs_sb;
2486 	bool any_available = false;
2487 	int rc = -EBADF;
2488 	unsigned int refind = 0;
2489 	bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2490 	bool with_delete = flags & FIND_WR_WITH_DELETE;
2491 	*ret_file = NULL;
2492 
2493 	/*
2494 	 * Having a null inode here (because mapping->host was set to zero by
2495 	 * the VFS or MM) should not happen but we had reports of on oops (due
2496 	 * to it being zero) during stress testcases so we need to check for it
2497 	 */
2498 
2499 	if (cifs_inode == NULL) {
2500 		cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2501 		dump_stack();
2502 		return rc;
2503 	}
2504 
2505 	cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2506 
2507 	/* only filter by fsuid on multiuser mounts */
2508 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2509 		fsuid_only = false;
2510 
2511 	spin_lock(&cifs_inode->open_file_lock);
2512 refind_writable:
2513 	if (refind > MAX_REOPEN_ATT) {
2514 		spin_unlock(&cifs_inode->open_file_lock);
2515 		return rc;
2516 	}
2517 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2518 		if (!any_available && open_file->pid != current->tgid)
2519 			continue;
2520 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2521 			continue;
2522 		if (with_delete && !(open_file->fid.access & DELETE))
2523 			continue;
2524 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2525 			if (!open_file->invalidHandle) {
2526 				/* found a good writable file */
2527 				cifsFileInfo_get(open_file);
2528 				spin_unlock(&cifs_inode->open_file_lock);
2529 				*ret_file = open_file;
2530 				return 0;
2531 			} else {
2532 				if (!inv_file)
2533 					inv_file = open_file;
2534 			}
2535 		}
2536 	}
2537 	/* couldn't find usable FH with same pid, try any available */
2538 	if (!any_available) {
2539 		any_available = true;
2540 		goto refind_writable;
2541 	}
2542 
2543 	if (inv_file) {
2544 		any_available = false;
2545 		cifsFileInfo_get(inv_file);
2546 	}
2547 
2548 	spin_unlock(&cifs_inode->open_file_lock);
2549 
2550 	if (inv_file) {
2551 		rc = cifs_reopen_file(inv_file, false);
2552 		if (!rc) {
2553 			*ret_file = inv_file;
2554 			return 0;
2555 		}
2556 
2557 		spin_lock(&cifs_inode->open_file_lock);
2558 		list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2559 		spin_unlock(&cifs_inode->open_file_lock);
2560 		cifsFileInfo_put(inv_file);
2561 		++refind;
2562 		inv_file = NULL;
2563 		spin_lock(&cifs_inode->open_file_lock);
2564 		goto refind_writable;
2565 	}
2566 
2567 	return rc;
2568 }
2569 
2570 struct cifsFileInfo *
2571 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2572 {
2573 	struct cifsFileInfo *cfile;
2574 	int rc;
2575 
2576 	rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2577 	if (rc)
2578 		cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2579 
2580 	return cfile;
2581 }
2582 
2583 int
2584 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2585 		       int flags,
2586 		       struct cifsFileInfo **ret_file)
2587 {
2588 	struct cifsFileInfo *cfile;
2589 	void *page = alloc_dentry_path();
2590 
2591 	*ret_file = NULL;
2592 
2593 	spin_lock(&tcon->open_file_lock);
2594 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2595 		struct cifsInodeInfo *cinode;
2596 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2597 		if (IS_ERR(full_path)) {
2598 			spin_unlock(&tcon->open_file_lock);
2599 			free_dentry_path(page);
2600 			return PTR_ERR(full_path);
2601 		}
2602 		if (strcmp(full_path, name))
2603 			continue;
2604 
2605 		cinode = CIFS_I(d_inode(cfile->dentry));
2606 		spin_unlock(&tcon->open_file_lock);
2607 		free_dentry_path(page);
2608 		return cifs_get_writable_file(cinode, flags, ret_file);
2609 	}
2610 
2611 	spin_unlock(&tcon->open_file_lock);
2612 	free_dentry_path(page);
2613 	return -ENOENT;
2614 }
2615 
2616 int
2617 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2618 		       struct cifsFileInfo **ret_file)
2619 {
2620 	struct cifsFileInfo *cfile;
2621 	void *page = alloc_dentry_path();
2622 
2623 	*ret_file = NULL;
2624 
2625 	spin_lock(&tcon->open_file_lock);
2626 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2627 		struct cifsInodeInfo *cinode;
2628 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2629 		if (IS_ERR(full_path)) {
2630 			spin_unlock(&tcon->open_file_lock);
2631 			free_dentry_path(page);
2632 			return PTR_ERR(full_path);
2633 		}
2634 		if (strcmp(full_path, name))
2635 			continue;
2636 
2637 		cinode = CIFS_I(d_inode(cfile->dentry));
2638 		spin_unlock(&tcon->open_file_lock);
2639 		free_dentry_path(page);
2640 		*ret_file = find_readable_file(cinode, 0);
2641 		return *ret_file ? 0 : -ENOENT;
2642 	}
2643 
2644 	spin_unlock(&tcon->open_file_lock);
2645 	free_dentry_path(page);
2646 	return -ENOENT;
2647 }
2648 
2649 /*
2650  * Flush data on a strict file.
2651  */
2652 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2653 		      int datasync)
2654 {
2655 	unsigned int xid;
2656 	int rc = 0;
2657 	struct cifs_tcon *tcon;
2658 	struct TCP_Server_Info *server;
2659 	struct cifsFileInfo *smbfile = file->private_data;
2660 	struct inode *inode = file_inode(file);
2661 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2662 
2663 	rc = file_write_and_wait_range(file, start, end);
2664 	if (rc) {
2665 		trace_cifs_fsync_err(inode->i_ino, rc);
2666 		return rc;
2667 	}
2668 
2669 	xid = get_xid();
2670 
2671 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2672 		 file, datasync);
2673 
2674 	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2675 		rc = cifs_zap_mapping(inode);
2676 		if (rc) {
2677 			cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2678 			rc = 0; /* don't care about it in fsync */
2679 		}
2680 	}
2681 
2682 	tcon = tlink_tcon(smbfile->tlink);
2683 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2684 		server = tcon->ses->server;
2685 		if (server->ops->flush == NULL) {
2686 			rc = -ENOSYS;
2687 			goto strict_fsync_exit;
2688 		}
2689 
2690 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2691 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2692 			if (smbfile) {
2693 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2694 				cifsFileInfo_put(smbfile);
2695 			} else
2696 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2697 		} else
2698 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2699 	}
2700 
2701 strict_fsync_exit:
2702 	free_xid(xid);
2703 	return rc;
2704 }
2705 
2706 /*
2707  * Flush data on a non-strict data.
2708  */
2709 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2710 {
2711 	unsigned int xid;
2712 	int rc = 0;
2713 	struct cifs_tcon *tcon;
2714 	struct TCP_Server_Info *server;
2715 	struct cifsFileInfo *smbfile = file->private_data;
2716 	struct inode *inode = file_inode(file);
2717 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2718 
2719 	rc = file_write_and_wait_range(file, start, end);
2720 	if (rc) {
2721 		trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2722 		return rc;
2723 	}
2724 
2725 	xid = get_xid();
2726 
2727 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2728 		 file, datasync);
2729 
2730 	tcon = tlink_tcon(smbfile->tlink);
2731 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2732 		server = tcon->ses->server;
2733 		if (server->ops->flush == NULL) {
2734 			rc = -ENOSYS;
2735 			goto fsync_exit;
2736 		}
2737 
2738 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2739 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2740 			if (smbfile) {
2741 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2742 				cifsFileInfo_put(smbfile);
2743 			} else
2744 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2745 		} else
2746 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2747 	}
2748 
2749 fsync_exit:
2750 	free_xid(xid);
2751 	return rc;
2752 }
2753 
2754 /*
2755  * As file closes, flush all cached write data for this inode checking
2756  * for write behind errors.
2757  */
2758 int cifs_flush(struct file *file, fl_owner_t id)
2759 {
2760 	struct inode *inode = file_inode(file);
2761 	int rc = 0;
2762 
2763 	if (file->f_mode & FMODE_WRITE)
2764 		rc = filemap_write_and_wait(inode->i_mapping);
2765 
2766 	cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2767 	if (rc) {
2768 		/* get more nuanced writeback errors */
2769 		rc = filemap_check_wb_err(file->f_mapping, 0);
2770 		trace_cifs_flush_err(inode->i_ino, rc);
2771 	}
2772 	return rc;
2773 }
2774 
2775 static ssize_t
2776 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2777 {
2778 	struct file *file = iocb->ki_filp;
2779 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2780 	struct inode *inode = file->f_mapping->host;
2781 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2782 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2783 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2784 	ssize_t rc;
2785 
2786 	rc = netfs_start_io_write(inode);
2787 	if (rc < 0)
2788 		return rc;
2789 
2790 	/*
2791 	 * We need to hold the sem to be sure nobody modifies lock list
2792 	 * with a brlock that prevents writing.
2793 	 */
2794 	down_read(&cinode->lock_sem);
2795 
2796 	rc = generic_write_checks(iocb, from);
2797 	if (rc <= 0)
2798 		goto out;
2799 
2800 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) &&
2801 	    (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2802 				     server->vals->exclusive_lock_type, 0,
2803 				     NULL, CIFS_WRITE_OP))) {
2804 		rc = -EACCES;
2805 		goto out;
2806 	}
2807 
2808 	rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2809 
2810 out:
2811 	up_read(&cinode->lock_sem);
2812 	netfs_end_io_write(inode);
2813 	if (rc > 0)
2814 		rc = generic_write_sync(iocb, rc);
2815 	return rc;
2816 }
2817 
2818 ssize_t
2819 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2820 {
2821 	struct inode *inode = file_inode(iocb->ki_filp);
2822 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2823 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2824 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2825 						iocb->ki_filp->private_data;
2826 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2827 	ssize_t written;
2828 
2829 	written = cifs_get_writer(cinode);
2830 	if (written)
2831 		return written;
2832 
2833 	if (CIFS_CACHE_WRITE(cinode)) {
2834 		if (cap_unix(tcon->ses) &&
2835 		    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2836 		    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2837 			written = netfs_file_write_iter(iocb, from);
2838 			goto out;
2839 		}
2840 		written = cifs_writev(iocb, from);
2841 		goto out;
2842 	}
2843 	/*
2844 	 * For non-oplocked files in strict cache mode we need to write the data
2845 	 * to the server exactly from the pos to pos+len-1 rather than flush all
2846 	 * affected pages because it may cause a error with mandatory locks on
2847 	 * these pages but not on the region from pos to ppos+len-1.
2848 	 */
2849 	written = netfs_file_write_iter(iocb, from);
2850 	if (CIFS_CACHE_READ(cinode)) {
2851 		/*
2852 		 * We have read level caching and we have just sent a write
2853 		 * request to the server thus making data in the cache stale.
2854 		 * Zap the cache and set oplock/lease level to NONE to avoid
2855 		 * reading stale data from the cache. All subsequent read
2856 		 * operations will read new data from the server.
2857 		 */
2858 		cifs_zap_mapping(inode);
2859 		cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2860 			 inode);
2861 		cinode->oplock = 0;
2862 	}
2863 out:
2864 	cifs_put_writer(cinode);
2865 	return written;
2866 }
2867 
2868 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2869 {
2870 	ssize_t rc;
2871 	struct inode *inode = file_inode(iocb->ki_filp);
2872 
2873 	if (iocb->ki_flags & IOCB_DIRECT)
2874 		return netfs_unbuffered_read_iter(iocb, iter);
2875 
2876 	rc = cifs_revalidate_mapping(inode);
2877 	if (rc)
2878 		return rc;
2879 
2880 	return netfs_file_read_iter(iocb, iter);
2881 }
2882 
2883 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2884 {
2885 	struct inode *inode = file_inode(iocb->ki_filp);
2886 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2887 	ssize_t written;
2888 	int rc;
2889 
2890 	if (iocb->ki_filp->f_flags & O_DIRECT) {
2891 		written = netfs_unbuffered_write_iter(iocb, from);
2892 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
2893 			cifs_zap_mapping(inode);
2894 			cifs_dbg(FYI,
2895 				 "Set no oplock for inode=%p after a write operation\n",
2896 				 inode);
2897 			cinode->oplock = 0;
2898 		}
2899 		return written;
2900 	}
2901 
2902 	written = cifs_get_writer(cinode);
2903 	if (written)
2904 		return written;
2905 
2906 	written = netfs_file_write_iter(iocb, from);
2907 
2908 	if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
2909 		rc = filemap_fdatawrite(inode->i_mapping);
2910 		if (rc)
2911 			cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
2912 				 rc, inode);
2913 	}
2914 
2915 	cifs_put_writer(cinode);
2916 	return written;
2917 }
2918 
2919 ssize_t
2920 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
2921 {
2922 	struct inode *inode = file_inode(iocb->ki_filp);
2923 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2924 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2925 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2926 						iocb->ki_filp->private_data;
2927 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2928 	int rc = -EACCES;
2929 
2930 	/*
2931 	 * In strict cache mode we need to read from the server all the time
2932 	 * if we don't have level II oplock because the server can delay mtime
2933 	 * change - so we can't make a decision about inode invalidating.
2934 	 * And we can also fail with pagereading if there are mandatory locks
2935 	 * on pages affected by this read but not on the region from pos to
2936 	 * pos+len-1.
2937 	 */
2938 	if (!CIFS_CACHE_READ(cinode))
2939 		return netfs_unbuffered_read_iter(iocb, to);
2940 
2941 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) {
2942 		if (iocb->ki_flags & IOCB_DIRECT)
2943 			return netfs_unbuffered_read_iter(iocb, to);
2944 		return netfs_buffered_read_iter(iocb, to);
2945 	}
2946 
2947 	/*
2948 	 * We need to hold the sem to be sure nobody modifies lock list
2949 	 * with a brlock that prevents reading.
2950 	 */
2951 	if (iocb->ki_flags & IOCB_DIRECT) {
2952 		rc = netfs_start_io_direct(inode);
2953 		if (rc < 0)
2954 			goto out;
2955 		rc = -EACCES;
2956 		down_read(&cinode->lock_sem);
2957 		if (!cifs_find_lock_conflict(
2958 			    cfile, iocb->ki_pos, iov_iter_count(to),
2959 			    tcon->ses->server->vals->shared_lock_type,
2960 			    0, NULL, CIFS_READ_OP))
2961 			rc = netfs_unbuffered_read_iter_locked(iocb, to);
2962 		up_read(&cinode->lock_sem);
2963 		netfs_end_io_direct(inode);
2964 	} else {
2965 		rc = netfs_start_io_read(inode);
2966 		if (rc < 0)
2967 			goto out;
2968 		rc = -EACCES;
2969 		down_read(&cinode->lock_sem);
2970 		if (!cifs_find_lock_conflict(
2971 			    cfile, iocb->ki_pos, iov_iter_count(to),
2972 			    tcon->ses->server->vals->shared_lock_type,
2973 			    0, NULL, CIFS_READ_OP))
2974 			rc = filemap_read(iocb, to, 0);
2975 		up_read(&cinode->lock_sem);
2976 		netfs_end_io_read(inode);
2977 	}
2978 out:
2979 	return rc;
2980 }
2981 
2982 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
2983 {
2984 	return netfs_page_mkwrite(vmf, NULL);
2985 }
2986 
2987 static const struct vm_operations_struct cifs_file_vm_ops = {
2988 	.fault = filemap_fault,
2989 	.map_pages = filemap_map_pages,
2990 	.page_mkwrite = cifs_page_mkwrite,
2991 };
2992 
2993 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2994 {
2995 	int xid, rc = 0;
2996 	struct inode *inode = file_inode(file);
2997 
2998 	xid = get_xid();
2999 
3000 	if (!CIFS_CACHE_READ(CIFS_I(inode)))
3001 		rc = cifs_zap_mapping(inode);
3002 	if (!rc)
3003 		rc = generic_file_mmap(file, vma);
3004 	if (!rc)
3005 		vma->vm_ops = &cifs_file_vm_ops;
3006 
3007 	free_xid(xid);
3008 	return rc;
3009 }
3010 
3011 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3012 {
3013 	int rc, xid;
3014 
3015 	xid = get_xid();
3016 
3017 	rc = cifs_revalidate_file(file);
3018 	if (rc)
3019 		cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3020 			 rc);
3021 	if (!rc)
3022 		rc = generic_file_mmap(file, vma);
3023 	if (!rc)
3024 		vma->vm_ops = &cifs_file_vm_ops;
3025 
3026 	free_xid(xid);
3027 	return rc;
3028 }
3029 
3030 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3031 {
3032 	struct cifsFileInfo *open_file;
3033 
3034 	spin_lock(&cifs_inode->open_file_lock);
3035 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3036 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3037 			spin_unlock(&cifs_inode->open_file_lock);
3038 			return 1;
3039 		}
3040 	}
3041 	spin_unlock(&cifs_inode->open_file_lock);
3042 	return 0;
3043 }
3044 
3045 /* We do not want to update the file size from server for inodes
3046    open for write - to avoid races with writepage extending
3047    the file - in the future we could consider allowing
3048    refreshing the inode only on increases in the file size
3049    but this is tricky to do without racing with writebehind
3050    page caching in the current Linux kernel design */
3051 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3052 			    bool from_readdir)
3053 {
3054 	if (!cifsInode)
3055 		return true;
3056 
3057 	if (is_inode_writable(cifsInode) ||
3058 		((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
3059 		/* This inode is open for write at least once */
3060 		struct cifs_sb_info *cifs_sb;
3061 
3062 		cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
3063 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
3064 			/* since no page cache to corrupt on directio
3065 			we can change size safely */
3066 			return true;
3067 		}
3068 
3069 		if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
3070 			return true;
3071 
3072 		return false;
3073 	} else
3074 		return true;
3075 }
3076 
3077 void cifs_oplock_break(struct work_struct *work)
3078 {
3079 	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3080 						  oplock_break);
3081 	struct inode *inode = d_inode(cfile->dentry);
3082 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3083 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3084 	struct cifs_tcon *tcon;
3085 	struct TCP_Server_Info *server;
3086 	struct tcon_link *tlink;
3087 	int rc = 0;
3088 	bool purge_cache = false, oplock_break_cancelled;
3089 	__u64 persistent_fid, volatile_fid;
3090 	__u16 net_fid;
3091 
3092 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3093 			TASK_UNINTERRUPTIBLE);
3094 
3095 	tlink = cifs_sb_tlink(cifs_sb);
3096 	if (IS_ERR(tlink))
3097 		goto out;
3098 	tcon = tlink_tcon(tlink);
3099 	server = tcon->ses->server;
3100 
3101 	server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3102 				      cfile->oplock_epoch, &purge_cache);
3103 
3104 	if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
3105 						cifs_has_mand_locks(cinode)) {
3106 		cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3107 			 inode);
3108 		cinode->oplock = 0;
3109 	}
3110 
3111 	if (S_ISREG(inode->i_mode)) {
3112 		if (CIFS_CACHE_READ(cinode))
3113 			break_lease(inode, O_RDONLY);
3114 		else
3115 			break_lease(inode, O_WRONLY);
3116 		rc = filemap_fdatawrite(inode->i_mapping);
3117 		if (!CIFS_CACHE_READ(cinode) || purge_cache) {
3118 			rc = filemap_fdatawait(inode->i_mapping);
3119 			mapping_set_error(inode->i_mapping, rc);
3120 			cifs_zap_mapping(inode);
3121 		}
3122 		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3123 		if (CIFS_CACHE_WRITE(cinode))
3124 			goto oplock_break_ack;
3125 	}
3126 
3127 	rc = cifs_push_locks(cfile);
3128 	if (rc)
3129 		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3130 
3131 oplock_break_ack:
3132 	/*
3133 	 * When oplock break is received and there are no active
3134 	 * file handles but cached, then schedule deferred close immediately.
3135 	 * So, new open will not use cached handle.
3136 	 */
3137 
3138 	if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
3139 		cifs_close_deferred_file(cinode);
3140 
3141 	persistent_fid = cfile->fid.persistent_fid;
3142 	volatile_fid = cfile->fid.volatile_fid;
3143 	net_fid = cfile->fid.netfid;
3144 	oplock_break_cancelled = cfile->oplock_break_cancelled;
3145 
3146 	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3147 	/*
3148 	 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3149 	 * an acknowledgment to be sent when the file has already been closed.
3150 	 */
3151 	spin_lock(&cinode->open_file_lock);
3152 	/* check list empty since can race with kill_sb calling tree disconnect */
3153 	if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3154 		spin_unlock(&cinode->open_file_lock);
3155 		rc = server->ops->oplock_response(tcon, persistent_fid,
3156 						  volatile_fid, net_fid, cinode);
3157 		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3158 	} else
3159 		spin_unlock(&cinode->open_file_lock);
3160 
3161 	cifs_put_tlink(tlink);
3162 out:
3163 	cifs_done_oplock_break(cinode);
3164 }
3165 
3166 static int cifs_swap_activate(struct swap_info_struct *sis,
3167 			      struct file *swap_file, sector_t *span)
3168 {
3169 	struct cifsFileInfo *cfile = swap_file->private_data;
3170 	struct inode *inode = swap_file->f_mapping->host;
3171 	unsigned long blocks;
3172 	long long isize;
3173 
3174 	cifs_dbg(FYI, "swap activate\n");
3175 
3176 	if (!swap_file->f_mapping->a_ops->swap_rw)
3177 		/* Cannot support swap */
3178 		return -EINVAL;
3179 
3180 	spin_lock(&inode->i_lock);
3181 	blocks = inode->i_blocks;
3182 	isize = inode->i_size;
3183 	spin_unlock(&inode->i_lock);
3184 	if (blocks*512 < isize) {
3185 		pr_warn("swap activate: swapfile has holes\n");
3186 		return -EINVAL;
3187 	}
3188 	*span = sis->pages;
3189 
3190 	pr_warn_once("Swap support over SMB3 is experimental\n");
3191 
3192 	/*
3193 	 * TODO: consider adding ACL (or documenting how) to prevent other
3194 	 * users (on this or other systems) from reading it
3195 	 */
3196 
3197 
3198 	/* TODO: add sk_set_memalloc(inet) or similar */
3199 
3200 	if (cfile)
3201 		cfile->swapfile = true;
3202 	/*
3203 	 * TODO: Since file already open, we can't open with DENY_ALL here
3204 	 * but we could add call to grab a byte range lock to prevent others
3205 	 * from reading or writing the file
3206 	 */
3207 
3208 	sis->flags |= SWP_FS_OPS;
3209 	return add_swap_extent(sis, 0, sis->max, 0);
3210 }
3211 
3212 static void cifs_swap_deactivate(struct file *file)
3213 {
3214 	struct cifsFileInfo *cfile = file->private_data;
3215 
3216 	cifs_dbg(FYI, "swap deactivate\n");
3217 
3218 	/* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3219 
3220 	if (cfile)
3221 		cfile->swapfile = false;
3222 
3223 	/* do we need to unpin (or unlock) the file */
3224 }
3225 
3226 /**
3227  * cifs_swap_rw - SMB3 address space operation for swap I/O
3228  * @iocb: target I/O control block
3229  * @iter: I/O buffer
3230  *
3231  * Perform IO to the swap-file.  This is much like direct IO.
3232  */
3233 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3234 {
3235 	ssize_t ret;
3236 
3237 	if (iov_iter_rw(iter) == READ)
3238 		ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3239 	else
3240 		ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3241 	if (ret < 0)
3242 		return ret;
3243 	return 0;
3244 }
3245 
3246 const struct address_space_operations cifs_addr_ops = {
3247 	.read_folio	= netfs_read_folio,
3248 	.readahead	= netfs_readahead,
3249 	.writepages	= netfs_writepages,
3250 	.dirty_folio	= netfs_dirty_folio,
3251 	.release_folio	= netfs_release_folio,
3252 	.direct_IO	= noop_direct_IO,
3253 	.invalidate_folio = netfs_invalidate_folio,
3254 	.migrate_folio	= filemap_migrate_folio,
3255 	/*
3256 	 * TODO: investigate and if useful we could add an is_dirty_writeback
3257 	 * helper if needed
3258 	 */
3259 	.swap_activate	= cifs_swap_activate,
3260 	.swap_deactivate = cifs_swap_deactivate,
3261 	.swap_rw = cifs_swap_rw,
3262 };
3263 
3264 /*
3265  * cifs_readahead requires the server to support a buffer large enough to
3266  * contain the header plus one complete page of data.  Otherwise, we need
3267  * to leave cifs_readahead out of the address space operations.
3268  */
3269 const struct address_space_operations cifs_addr_ops_smallbuf = {
3270 	.read_folio	= netfs_read_folio,
3271 	.writepages	= netfs_writepages,
3272 	.dirty_folio	= netfs_dirty_folio,
3273 	.release_folio	= netfs_release_folio,
3274 	.invalidate_folio = netfs_invalidate_folio,
3275 	.migrate_folio	= filemap_migrate_folio,
3276 };
3277