xref: /linux/fs/smb/client/file.c (revision de008c9ba5684f14e83bcf86cd45fb0e4e6c4d82)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  */
11 #include <linux/fs.h>
12 #include <linux/fs_struct.h>
13 #include <linux/filelock.h>
14 #include <linux/backing-dev.h>
15 #include <linux/stat.h>
16 #include <linux/fcntl.h>
17 #include <linux/pagemap.h>
18 #include <linux/writeback.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/delay.h>
21 #include <linux/mount.h>
22 #include <linux/slab.h>
23 #include <linux/swap.h>
24 #include <linux/mm.h>
25 #include <asm/div64.h>
26 #include "cifsfs.h"
27 #include "cifsglob.h"
28 #include "cifsproto.h"
29 #include "smb2proto.h"
30 #include "cifs_unicode.h"
31 #include "cifs_debug.h"
32 #include "cifs_fs_sb.h"
33 #include "fscache.h"
34 #include "smbdirect.h"
35 #include "fs_context.h"
36 #include "cifs_ioctl.h"
37 #include "cached_dir.h"
38 #include <trace/events/netfs.h>
39 
40 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
41 
42 /*
43  * Prepare a subrequest to upload to the server.  We need to allocate credits
44  * so that we know the maximum amount of data that we can include in it.
45  */
46 static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
47 {
48 	struct cifs_io_subrequest *wdata =
49 		container_of(subreq, struct cifs_io_subrequest, subreq);
50 	struct cifs_io_request *req = wdata->req;
51 	struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr];
52 	struct TCP_Server_Info *server;
53 	struct cifsFileInfo *open_file = req->cfile;
54 	struct cifs_sb_info *cifs_sb = CIFS_SB(wdata->rreq->inode->i_sb);
55 	size_t wsize = req->rreq.wsize;
56 	int rc;
57 
58 	if (!wdata->have_xid) {
59 		wdata->xid = get_xid();
60 		wdata->have_xid = true;
61 	}
62 
63 	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
64 	wdata->server = server;
65 
66 	if (cifs_sb->ctx->wsize == 0)
67 		cifs_negotiate_wsize(server, cifs_sb->ctx,
68 				     tlink_tcon(req->cfile->tlink));
69 
70 retry:
71 	if (open_file->invalidHandle) {
72 		rc = cifs_reopen_file(open_file, false);
73 		if (rc < 0) {
74 			if (rc == -EAGAIN)
75 				goto retry;
76 			subreq->error = rc;
77 			return netfs_prepare_write_failed(subreq);
78 		}
79 	}
80 
81 	rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len,
82 					   &wdata->credits);
83 	if (rc < 0) {
84 		subreq->error = rc;
85 		return netfs_prepare_write_failed(subreq);
86 	}
87 
88 	wdata->credits.rreq_debug_id = subreq->rreq->debug_id;
89 	wdata->credits.rreq_debug_index = subreq->debug_index;
90 	wdata->credits.in_flight_check = 1;
91 	trace_smb3_rw_credits(wdata->rreq->debug_id,
92 			      wdata->subreq.debug_index,
93 			      wdata->credits.value,
94 			      server->credits, server->in_flight,
95 			      wdata->credits.value,
96 			      cifs_trace_rw_credits_write_prepare);
97 
98 #ifdef CONFIG_CIFS_SMB_DIRECT
99 	if (server->smbd_conn) {
100 		const struct smbdirect_socket_parameters *sp =
101 			smbd_get_parameters(server->smbd_conn);
102 
103 		stream->sreq_max_segs = sp->max_frmr_depth;
104 	}
105 #endif
106 }
107 
108 /*
109  * Issue a subrequest to upload to the server.
110  */
111 static void cifs_issue_write(struct netfs_io_subrequest *subreq)
112 {
113 	struct cifs_io_subrequest *wdata =
114 		container_of(subreq, struct cifs_io_subrequest, subreq);
115 	struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
116 	int rc;
117 
118 	if (cifs_forced_shutdown(sbi)) {
119 		rc = smb_EIO(smb_eio_trace_forced_shutdown);
120 		goto fail;
121 	}
122 
123 	rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
124 	if (rc)
125 		goto fail;
126 
127 	rc = -EAGAIN;
128 	if (wdata->req->cfile->invalidHandle)
129 		goto fail;
130 
131 	wdata->server->ops->async_writev(wdata);
132 out:
133 	return;
134 
135 fail:
136 	if (rc == -EAGAIN)
137 		trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
138 	else
139 		trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
140 	add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
141 	cifs_write_subrequest_terminated(wdata, rc);
142 	goto out;
143 }
144 
145 static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq)
146 {
147 	cifs_invalidate_cache(wreq->inode, 0);
148 }
149 
150 /*
151  * Negotiate the size of a read operation on behalf of the netfs library.
152  */
153 static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
154 {
155 	struct netfs_io_request *rreq = subreq->rreq;
156 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
157 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
158 	struct TCP_Server_Info *server;
159 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
160 	size_t size;
161 	int rc = 0;
162 
163 	if (!rdata->have_xid) {
164 		rdata->xid = get_xid();
165 		rdata->have_xid = true;
166 	}
167 
168 	server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
169 	rdata->server = server;
170 
171 	if (cifs_sb->ctx->rsize == 0)
172 		cifs_negotiate_rsize(server, cifs_sb->ctx,
173 				     tlink_tcon(req->cfile->tlink));
174 
175 	rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
176 					   &size, &rdata->credits);
177 	if (rc)
178 		return rc;
179 
180 	rreq->io_streams[0].sreq_max_len = size;
181 
182 	rdata->credits.in_flight_check = 1;
183 	rdata->credits.rreq_debug_id = rreq->debug_id;
184 	rdata->credits.rreq_debug_index = subreq->debug_index;
185 
186 	trace_smb3_rw_credits(rdata->rreq->debug_id,
187 			      rdata->subreq.debug_index,
188 			      rdata->credits.value,
189 			      server->credits, server->in_flight, 0,
190 			      cifs_trace_rw_credits_read_submit);
191 
192 #ifdef CONFIG_CIFS_SMB_DIRECT
193 	if (server->smbd_conn) {
194 		const struct smbdirect_socket_parameters *sp =
195 			smbd_get_parameters(server->smbd_conn);
196 
197 		rreq->io_streams[0].sreq_max_segs = sp->max_frmr_depth;
198 	}
199 #endif
200 	return 0;
201 }
202 
203 /*
204  * Issue a read operation on behalf of the netfs helper functions.  We're asked
205  * to make a read of a certain size at a point in the file.  We are permitted
206  * to only read a portion of that, but as long as we read something, the netfs
207  * helper will call us again so that we can issue another read.
208  */
209 static void cifs_issue_read(struct netfs_io_subrequest *subreq)
210 {
211 	struct netfs_io_request *rreq = subreq->rreq;
212 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
213 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
214 	struct TCP_Server_Info *server = rdata->server;
215 	int rc = 0;
216 
217 	cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
218 		 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
219 		 subreq->transferred, subreq->len);
220 
221 	rc = adjust_credits(server, rdata, cifs_trace_rw_credits_issue_read_adjust);
222 	if (rc)
223 		goto failed;
224 
225 	if (req->cfile->invalidHandle) {
226 		do {
227 			rc = cifs_reopen_file(req->cfile, true);
228 		} while (rc == -EAGAIN);
229 		if (rc)
230 			goto failed;
231 	}
232 
233 	if (subreq->rreq->origin != NETFS_UNBUFFERED_READ &&
234 	    subreq->rreq->origin != NETFS_DIO_READ)
235 		__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
236 
237 	trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
238 	rc = rdata->server->ops->async_readv(rdata);
239 	if (rc)
240 		goto failed;
241 	return;
242 
243 failed:
244 	subreq->error = rc;
245 	netfs_read_subreq_terminated(subreq);
246 }
247 
248 /*
249  * Writeback calls this when it finds a folio that needs uploading.  This isn't
250  * called if writeback only has copy-to-cache to deal with.
251  */
252 static void cifs_begin_writeback(struct netfs_io_request *wreq)
253 {
254 	struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
255 	int ret;
256 
257 	ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_ANY, &req->cfile);
258 	if (ret) {
259 		cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
260 		return;
261 	}
262 
263 	wreq->io_streams[0].avail = true;
264 }
265 
266 /*
267  * Initialise a request.
268  */
269 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
270 {
271 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
272 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode);
273 	struct cifsFileInfo *open_file = NULL;
274 
275 	rreq->rsize = cifs_sb->ctx->rsize;
276 	rreq->wsize = cifs_sb->ctx->wsize;
277 	req->pid = current->tgid; // Ummm...  This may be a workqueue
278 
279 	if (file) {
280 		open_file = file->private_data;
281 		rreq->netfs_priv = file->private_data;
282 		req->cfile = cifsFileInfo_get(open_file);
283 		if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_RWPIDFORWARD)
284 			req->pid = req->cfile->pid;
285 	} else if (rreq->origin != NETFS_WRITEBACK) {
286 		WARN_ON_ONCE(1);
287 		return smb_EIO1(smb_eio_trace_not_netfs_writeback, rreq->origin);
288 	}
289 
290 	return 0;
291 }
292 
293 /*
294  * Completion of a request operation.
295  */
296 static void cifs_rreq_done(struct netfs_io_request *rreq)
297 {
298 	struct timespec64 atime, mtime;
299 	struct inode *inode = rreq->inode;
300 
301 	/* we do not want atime to be less than mtime, it broke some apps */
302 	atime = inode_set_atime_to_ts(inode, current_time(inode));
303 	mtime = inode_get_mtime(inode);
304 	if (timespec64_compare(&atime, &mtime))
305 		inode_set_atime_to_ts(inode, inode_get_mtime(inode));
306 }
307 
308 static void cifs_free_request(struct netfs_io_request *rreq)
309 {
310 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
311 
312 	if (req->cfile)
313 		cifsFileInfo_put(req->cfile);
314 }
315 
316 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
317 {
318 	struct cifs_io_subrequest *rdata =
319 		container_of(subreq, struct cifs_io_subrequest, subreq);
320 	int rc = subreq->error;
321 
322 	if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
323 #ifdef CONFIG_CIFS_SMB_DIRECT
324 		if (rdata->mr) {
325 			smbd_deregister_mr(rdata->mr);
326 			rdata->mr = NULL;
327 		}
328 #endif
329 	}
330 
331 	if (rdata->credits.value != 0) {
332 		trace_smb3_rw_credits(rdata->rreq->debug_id,
333 				      rdata->subreq.debug_index,
334 				      rdata->credits.value,
335 				      rdata->server ? rdata->server->credits : 0,
336 				      rdata->server ? rdata->server->in_flight : 0,
337 				      -rdata->credits.value,
338 				      cifs_trace_rw_credits_free_subreq);
339 		if (rdata->server)
340 			add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
341 		else
342 			rdata->credits.value = 0;
343 	}
344 
345 	if (rdata->have_xid)
346 		free_xid(rdata->xid);
347 }
348 
349 const struct netfs_request_ops cifs_req_ops = {
350 	.request_pool		= &cifs_io_request_pool,
351 	.subrequest_pool	= &cifs_io_subrequest_pool,
352 	.init_request		= cifs_init_request,
353 	.free_request		= cifs_free_request,
354 	.free_subrequest	= cifs_free_subrequest,
355 	.prepare_read		= cifs_prepare_read,
356 	.issue_read		= cifs_issue_read,
357 	.done			= cifs_rreq_done,
358 	.begin_writeback	= cifs_begin_writeback,
359 	.prepare_write		= cifs_prepare_write,
360 	.issue_write		= cifs_issue_write,
361 	.invalidate_cache	= cifs_netfs_invalidate_cache,
362 };
363 
364 /*
365  * Mark as invalid, all open files on tree connections since they
366  * were closed when session to server was lost.
367  */
368 void
369 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
370 {
371 	struct cifsFileInfo *open_file = NULL;
372 	struct list_head *tmp;
373 	struct list_head *tmp1;
374 
375 	/* only send once per connect */
376 	spin_lock(&tcon->tc_lock);
377 	if (tcon->need_reconnect)
378 		tcon->status = TID_NEED_RECON;
379 
380 	if (tcon->status != TID_NEED_RECON) {
381 		spin_unlock(&tcon->tc_lock);
382 		return;
383 	}
384 	tcon->status = TID_IN_FILES_INVALIDATE;
385 	spin_unlock(&tcon->tc_lock);
386 
387 	/* list all files open on tree connection and mark them invalid */
388 	spin_lock(&tcon->open_file_lock);
389 	list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
390 		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
391 		open_file->invalidHandle = true;
392 		open_file->oplock_break_cancelled = true;
393 	}
394 	spin_unlock(&tcon->open_file_lock);
395 
396 	invalidate_all_cached_dirs(tcon);
397 	spin_lock(&tcon->tc_lock);
398 	if (tcon->status == TID_IN_FILES_INVALIDATE)
399 		tcon->status = TID_NEED_TCON;
400 	spin_unlock(&tcon->tc_lock);
401 
402 	/*
403 	 * BB Add call to evict_inodes(sb) for all superblocks mounted
404 	 * to this tcon.
405 	 */
406 }
407 
408 static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
409 {
410 	if ((flags & O_ACCMODE) == O_RDONLY)
411 		return GENERIC_READ;
412 	else if ((flags & O_ACCMODE) == O_WRONLY)
413 		return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
414 	else if ((flags & O_ACCMODE) == O_RDWR) {
415 		/* GENERIC_ALL is too much permission to request
416 		   can cause unnecessary access denied on create */
417 		/* return GENERIC_ALL; */
418 		return (GENERIC_READ | GENERIC_WRITE);
419 	}
420 
421 	return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
422 		FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
423 		FILE_READ_DATA);
424 }
425 
426 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
427 static u32 cifs_posix_convert_flags(unsigned int flags)
428 {
429 	u32 posix_flags = 0;
430 
431 	if ((flags & O_ACCMODE) == O_RDONLY)
432 		posix_flags = SMB_O_RDONLY;
433 	else if ((flags & O_ACCMODE) == O_WRONLY)
434 		posix_flags = SMB_O_WRONLY;
435 	else if ((flags & O_ACCMODE) == O_RDWR)
436 		posix_flags = SMB_O_RDWR;
437 
438 	if (flags & O_CREAT) {
439 		posix_flags |= SMB_O_CREAT;
440 		if (flags & O_EXCL)
441 			posix_flags |= SMB_O_EXCL;
442 	} else if (flags & O_EXCL)
443 		cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
444 			 current->comm, current->tgid);
445 
446 	if (flags & O_TRUNC)
447 		posix_flags |= SMB_O_TRUNC;
448 	/* be safe and imply O_SYNC for O_DSYNC */
449 	if (flags & O_DSYNC)
450 		posix_flags |= SMB_O_SYNC;
451 	if (flags & O_DIRECTORY)
452 		posix_flags |= SMB_O_DIRECTORY;
453 	if (flags & O_NOFOLLOW)
454 		posix_flags |= SMB_O_NOFOLLOW;
455 	if (flags & O_DIRECT)
456 		posix_flags |= SMB_O_DIRECT;
457 
458 	return posix_flags;
459 }
460 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
461 
462 static inline int cifs_get_disposition(unsigned int flags)
463 {
464 	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
465 		return FILE_CREATE;
466 	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
467 		return FILE_OVERWRITE_IF;
468 	else if ((flags & O_CREAT) == O_CREAT)
469 		return FILE_OPEN_IF;
470 	else if ((flags & O_TRUNC) == O_TRUNC)
471 		return FILE_OVERWRITE;
472 	else
473 		return FILE_OPEN;
474 }
475 
476 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
477 int cifs_posix_open(const char *full_path, struct inode **pinode,
478 			struct super_block *sb, int mode, unsigned int f_flags,
479 			__u32 *poplock, __u16 *pnetfid, unsigned int xid)
480 {
481 	int rc;
482 	FILE_UNIX_BASIC_INFO *presp_data;
483 	__u32 posix_flags = 0;
484 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
485 	struct cifs_fattr fattr;
486 	struct tcon_link *tlink;
487 	struct cifs_tcon *tcon;
488 
489 	cifs_dbg(FYI, "posix open %s\n", full_path);
490 
491 	presp_data = kzalloc_obj(FILE_UNIX_BASIC_INFO);
492 	if (presp_data == NULL)
493 		return -ENOMEM;
494 
495 	tlink = cifs_sb_tlink(cifs_sb);
496 	if (IS_ERR(tlink)) {
497 		rc = PTR_ERR(tlink);
498 		goto posix_open_ret;
499 	}
500 
501 	tcon = tlink_tcon(tlink);
502 	mode &= ~current_umask();
503 
504 	posix_flags = cifs_posix_convert_flags(f_flags);
505 	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
506 			     poplock, full_path, cifs_sb->local_nls,
507 			     cifs_remap(cifs_sb));
508 	cifs_put_tlink(tlink);
509 
510 	if (rc)
511 		goto posix_open_ret;
512 
513 	if (presp_data->Type == cpu_to_le32(-1))
514 		goto posix_open_ret; /* open ok, caller does qpathinfo */
515 
516 	if (!pinode)
517 		goto posix_open_ret; /* caller does not need info */
518 
519 	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
520 
521 	/* get new inode and set it up */
522 	if (*pinode == NULL) {
523 		cifs_fill_uniqueid(sb, &fattr);
524 		*pinode = cifs_iget(sb, &fattr);
525 		if (!*pinode) {
526 			rc = -ENOMEM;
527 			goto posix_open_ret;
528 		}
529 	} else {
530 		cifs_revalidate_mapping(*pinode);
531 		rc = cifs_fattr_to_inode(*pinode, &fattr, false);
532 	}
533 
534 posix_open_ret:
535 	kfree(presp_data);
536 	return rc;
537 }
538 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
539 
540 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
541 			struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
542 			struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
543 {
544 	int rc;
545 	int desired_access;
546 	int disposition;
547 	int create_options = CREATE_NOT_DIR;
548 	struct TCP_Server_Info *server = tcon->ses->server;
549 	struct cifs_open_parms oparms;
550 	int rdwr_for_fscache = 0;
551 
552 	if (!server->ops->open)
553 		return -ENOSYS;
554 
555 	/* If we're caching, we need to be able to fill in around partial writes. */
556 	if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
557 		rdwr_for_fscache = 1;
558 
559 	desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
560 
561 /*********************************************************************
562  *  open flag mapping table:
563  *
564  *	POSIX Flag            CIFS Disposition
565  *	----------            ----------------
566  *	O_CREAT               FILE_OPEN_IF
567  *	O_CREAT | O_EXCL      FILE_CREATE
568  *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
569  *	O_TRUNC               FILE_OVERWRITE
570  *	none of the above     FILE_OPEN
571  *
572  *	Note that there is not a direct match between disposition
573  *	FILE_SUPERSEDE (ie create whether or not file exists although
574  *	O_CREAT | O_TRUNC is similar but truncates the existing
575  *	file rather than creating a new file as FILE_SUPERSEDE does
576  *	(which uses the attributes / metadata passed in on open call)
577  *?
578  *?  O_SYNC is a reasonable match to CIFS writethrough flag
579  *?  and the read write flags match reasonably.  O_LARGEFILE
580  *?  is irrelevant because largefile support is always used
581  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
582  *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
583  *********************************************************************/
584 
585 	disposition = cifs_get_disposition(f_flags);
586 	/* BB pass O_SYNC flag through on file attributes .. BB */
587 	create_options |= cifs_open_create_options(f_flags, create_options);
588 
589 retry_open:
590 	oparms = (struct cifs_open_parms) {
591 		.tcon = tcon,
592 		.cifs_sb = cifs_sb,
593 		.desired_access = desired_access,
594 		.create_options = cifs_create_options(cifs_sb, create_options),
595 		.disposition = disposition,
596 		.path = full_path,
597 		.fid = fid,
598 	};
599 
600 	rc = server->ops->open(xid, &oparms, oplock, buf);
601 	if (rc) {
602 		if (rc == -EACCES && rdwr_for_fscache == 1) {
603 			desired_access = cifs_convert_flags(f_flags, 0);
604 			rdwr_for_fscache = 2;
605 			goto retry_open;
606 		}
607 		return rc;
608 	}
609 	if (rdwr_for_fscache == 2)
610 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
611 
612 	/* TODO: Add support for calling posix query info but with passing in fid */
613 	if (tcon->unix_ext)
614 		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
615 					      xid);
616 	else
617 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
618 					 xid, fid);
619 
620 	if (rc) {
621 		server->ops->close(xid, tcon, fid);
622 		if (rc == -ESTALE)
623 			rc = -EOPENSTALE;
624 	}
625 
626 	return rc;
627 }
628 
629 static bool
630 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
631 {
632 	struct cifs_fid_locks *cur;
633 	bool has_locks = false;
634 
635 	down_read(&cinode->lock_sem);
636 	list_for_each_entry(cur, &cinode->llist, llist) {
637 		if (!list_empty(&cur->locks)) {
638 			has_locks = true;
639 			break;
640 		}
641 	}
642 	up_read(&cinode->lock_sem);
643 	return has_locks;
644 }
645 
646 void
647 cifs_down_write(struct rw_semaphore *sem)
648 {
649 	while (!down_write_trylock(sem))
650 		msleep(10);
651 }
652 
653 static void cifsFileInfo_put_work(struct work_struct *work);
654 void serverclose_work(struct work_struct *work);
655 
656 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
657 				       struct tcon_link *tlink, __u32 oplock,
658 				       const char *symlink_target)
659 {
660 	struct dentry *dentry = file_dentry(file);
661 	struct inode *inode = d_inode(dentry);
662 	struct cifsInodeInfo *cinode = CIFS_I(inode);
663 	struct cifsFileInfo *cfile;
664 	struct cifs_fid_locks *fdlocks;
665 	struct cifs_tcon *tcon = tlink_tcon(tlink);
666 	struct TCP_Server_Info *server = tcon->ses->server;
667 
668 	cfile = kzalloc_obj(struct cifsFileInfo);
669 	if (cfile == NULL)
670 		return cfile;
671 
672 	fdlocks = kzalloc_obj(struct cifs_fid_locks);
673 	if (!fdlocks) {
674 		kfree(cfile);
675 		return NULL;
676 	}
677 
678 	if (symlink_target) {
679 		cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
680 		if (!cfile->symlink_target) {
681 			kfree(fdlocks);
682 			kfree(cfile);
683 			return NULL;
684 		}
685 	}
686 
687 	INIT_LIST_HEAD(&fdlocks->locks);
688 	fdlocks->cfile = cfile;
689 	cfile->llist = fdlocks;
690 
691 	cfile->count = 1;
692 	cfile->pid = current->tgid;
693 	cfile->uid = current_fsuid();
694 	cfile->dentry = dget(dentry);
695 	cfile->f_flags = file->f_flags;
696 	cfile->invalidHandle = false;
697 	cfile->deferred_close_scheduled = false;
698 	cfile->tlink = cifs_get_tlink(tlink);
699 	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
700 	INIT_WORK(&cfile->put, cifsFileInfo_put_work);
701 	INIT_WORK(&cfile->serverclose, serverclose_work);
702 	INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
703 	mutex_init(&cfile->fh_mutex);
704 	spin_lock_init(&cfile->file_info_lock);
705 
706 	/*
707 	 * If the server returned a read oplock and we have mandatory brlocks,
708 	 * set oplock level to None.
709 	 */
710 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
711 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
712 		oplock = 0;
713 	}
714 
715 	cifs_down_write(&cinode->lock_sem);
716 	list_add(&fdlocks->llist, &cinode->llist);
717 	up_write(&cinode->lock_sem);
718 
719 	spin_lock(&tcon->open_file_lock);
720 	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
721 		oplock = fid->pending_open->oplock;
722 	list_del(&fid->pending_open->olist);
723 
724 	list_add(&cfile->tlist, &tcon->openFileList);
725 	atomic_inc(&tcon->num_local_opens);
726 
727 	/* if readable file instance put first in list*/
728 	spin_lock(&cinode->open_file_lock);
729 	fid->purge_cache = false;
730 	server->ops->set_fid(cfile, fid, oplock);
731 
732 	if (file->f_mode & FMODE_READ)
733 		list_add(&cfile->flist, &cinode->openFileList);
734 	else
735 		list_add_tail(&cfile->flist, &cinode->openFileList);
736 	spin_unlock(&cinode->open_file_lock);
737 	spin_unlock(&tcon->open_file_lock);
738 
739 	if (fid->purge_cache)
740 		cifs_zap_mapping(inode);
741 
742 	file->private_data = cfile;
743 	return cfile;
744 }
745 
746 struct cifsFileInfo *
747 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
748 {
749 	spin_lock(&cifs_file->file_info_lock);
750 	cifsFileInfo_get_locked(cifs_file);
751 	spin_unlock(&cifs_file->file_info_lock);
752 	return cifs_file;
753 }
754 
755 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
756 {
757 	struct inode *inode = d_inode(cifs_file->dentry);
758 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
759 	struct cifsLockInfo *li, *tmp;
760 
761 	/*
762 	 * Delete any outstanding lock records. We'll lose them when the file
763 	 * is closed anyway.
764 	 */
765 	cifs_down_write(&cifsi->lock_sem);
766 	list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
767 		list_del(&li->llist);
768 		cifs_del_lock_waiters(li);
769 		kfree(li);
770 	}
771 	list_del(&cifs_file->llist->llist);
772 	kfree(cifs_file->llist);
773 	up_write(&cifsi->lock_sem);
774 
775 	cifs_put_tlink(cifs_file->tlink);
776 	dput(cifs_file->dentry);
777 	kfree(cifs_file->symlink_target);
778 	kfree(cifs_file);
779 }
780 
781 static void cifsFileInfo_put_work(struct work_struct *work)
782 {
783 	struct cifsFileInfo *cifs_file = container_of(work,
784 			struct cifsFileInfo, put);
785 
786 	cifsFileInfo_put_final(cifs_file);
787 }
788 
789 void serverclose_work(struct work_struct *work)
790 {
791 	struct cifsFileInfo *cifs_file = container_of(work,
792 			struct cifsFileInfo, serverclose);
793 
794 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
795 
796 	struct TCP_Server_Info *server = tcon->ses->server;
797 	int rc = 0;
798 	int retries = 0;
799 	int MAX_RETRIES = 4;
800 
801 	do {
802 		if (server->ops->close_getattr)
803 			rc = server->ops->close_getattr(0, tcon, cifs_file);
804 		else if (server->ops->close)
805 			rc = server->ops->close(0, tcon, &cifs_file->fid);
806 
807 		if (rc == -EBUSY || rc == -EAGAIN) {
808 			retries++;
809 			msleep(250);
810 		}
811 	} while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
812 	);
813 
814 	if (retries == MAX_RETRIES)
815 		pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
816 
817 	if (cifs_file->offload)
818 		queue_work(fileinfo_put_wq, &cifs_file->put);
819 	else
820 		cifsFileInfo_put_final(cifs_file);
821 }
822 
823 /**
824  * cifsFileInfo_put - release a reference of file priv data
825  *
826  * Always potentially wait for oplock handler. See _cifsFileInfo_put().
827  *
828  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
829  */
830 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
831 {
832 	_cifsFileInfo_put(cifs_file, true, true);
833 }
834 
835 /**
836  * _cifsFileInfo_put - release a reference of file priv data
837  *
838  * This may involve closing the filehandle @cifs_file out on the
839  * server. Must be called without holding tcon->open_file_lock,
840  * cinode->open_file_lock and cifs_file->file_info_lock.
841  *
842  * If @wait_for_oplock_handler is true and we are releasing the last
843  * reference, wait for any running oplock break handler of the file
844  * and cancel any pending one.
845  *
846  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
847  * @wait_oplock_handler: must be false if called from oplock_break_handler
848  * @offload:	not offloaded on close and oplock breaks
849  *
850  */
851 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
852 		       bool wait_oplock_handler, bool offload)
853 {
854 	struct inode *inode = d_inode(cifs_file->dentry);
855 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
856 	struct TCP_Server_Info *server = tcon->ses->server;
857 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
858 	struct super_block *sb = inode->i_sb;
859 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
860 	struct cifs_fid fid = {};
861 	struct cifs_pending_open open;
862 	bool oplock_break_cancelled;
863 	bool serverclose_offloaded = false;
864 
865 	spin_lock(&tcon->open_file_lock);
866 	spin_lock(&cifsi->open_file_lock);
867 	spin_lock(&cifs_file->file_info_lock);
868 
869 	cifs_file->offload = offload;
870 	if (--cifs_file->count > 0) {
871 		spin_unlock(&cifs_file->file_info_lock);
872 		spin_unlock(&cifsi->open_file_lock);
873 		spin_unlock(&tcon->open_file_lock);
874 		return;
875 	}
876 	spin_unlock(&cifs_file->file_info_lock);
877 
878 	if (server->ops->get_lease_key)
879 		server->ops->get_lease_key(inode, &fid);
880 
881 	/* store open in pending opens to make sure we don't miss lease break */
882 	cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
883 
884 	/* remove it from the lists */
885 	list_del(&cifs_file->flist);
886 	list_del(&cifs_file->tlist);
887 	atomic_dec(&tcon->num_local_opens);
888 
889 	if (list_empty(&cifsi->openFileList)) {
890 		cifs_dbg(FYI, "closing last open instance for inode %p\n",
891 			 d_inode(cifs_file->dentry));
892 		/*
893 		 * In strict cache mode we need invalidate mapping on the last
894 		 * close  because it may cause a error when we open this file
895 		 * again and get at least level II oplock.
896 		 */
897 		if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_STRICT_IO)
898 			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
899 		cifs_set_oplock_level(cifsi, 0);
900 	}
901 
902 	spin_unlock(&cifsi->open_file_lock);
903 	spin_unlock(&tcon->open_file_lock);
904 
905 	oplock_break_cancelled = wait_oplock_handler ?
906 		cancel_work_sync(&cifs_file->oplock_break) : false;
907 
908 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
909 		struct TCP_Server_Info *server = tcon->ses->server;
910 		unsigned int xid;
911 		int rc = 0;
912 
913 		xid = get_xid();
914 		if (server->ops->close_getattr)
915 			rc = server->ops->close_getattr(xid, tcon, cifs_file);
916 		else if (server->ops->close)
917 			rc = server->ops->close(xid, tcon, &cifs_file->fid);
918 		_free_xid(xid);
919 
920 		if (rc == -EBUSY || rc == -EAGAIN) {
921 			// Server close failed, hence offloading it as an async op
922 			queue_work(serverclose_wq, &cifs_file->serverclose);
923 			serverclose_offloaded = true;
924 		}
925 	}
926 
927 	if (oplock_break_cancelled)
928 		cifs_done_oplock_break(cifsi);
929 
930 	cifs_del_pending_open(&open);
931 
932 	// if serverclose has been offloaded to wq (on failure), it will
933 	// handle offloading put as well. If serverclose not offloaded,
934 	// we need to handle offloading put here.
935 	if (!serverclose_offloaded) {
936 		if (offload)
937 			queue_work(fileinfo_put_wq, &cifs_file->put);
938 		else
939 			cifsFileInfo_put_final(cifs_file);
940 	}
941 }
942 
943 int cifs_file_flush(const unsigned int xid, struct inode *inode,
944 		    struct cifsFileInfo *cfile)
945 {
946 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
947 	struct cifs_tcon *tcon;
948 	int rc;
949 
950 	if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOSSYNC)
951 		return 0;
952 
953 	if (cfile && (OPEN_FMODE(cfile->f_flags) & FMODE_WRITE)) {
954 		tcon = tlink_tcon(cfile->tlink);
955 		return tcon->ses->server->ops->flush(xid, tcon,
956 						     &cfile->fid);
957 	}
958 	rc = cifs_get_writable_file(CIFS_I(inode), FIND_ANY, &cfile);
959 	if (!rc) {
960 		tcon = tlink_tcon(cfile->tlink);
961 		rc = tcon->ses->server->ops->flush(xid, tcon, &cfile->fid);
962 		cifsFileInfo_put(cfile);
963 	} else if (rc == -EBADF) {
964 		rc = 0;
965 	}
966 	return rc;
967 }
968 
969 static int cifs_do_truncate(const unsigned int xid, struct dentry *dentry)
970 {
971 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(dentry));
972 	struct inode *inode = d_inode(dentry);
973 	struct cifsFileInfo *cfile = NULL;
974 	struct TCP_Server_Info *server;
975 	struct cifs_tcon *tcon;
976 	int rc;
977 
978 	rc = filemap_write_and_wait(inode->i_mapping);
979 	if (is_interrupt_error(rc))
980 		return -ERESTARTSYS;
981 	mapping_set_error(inode->i_mapping, rc);
982 
983 	cfile = find_writable_file(cinode, FIND_FSUID_ONLY);
984 	rc = cifs_file_flush(xid, inode, cfile);
985 	if (!rc) {
986 		if (cfile) {
987 			tcon = tlink_tcon(cfile->tlink);
988 			server = tcon->ses->server;
989 			rc = server->ops->set_file_size(xid, tcon,
990 							cfile, 0, false);
991 		}
992 		if (!rc) {
993 			netfs_resize_file(&cinode->netfs, 0, true);
994 			cifs_setsize(inode, 0);
995 		}
996 	}
997 	if (cfile)
998 		cifsFileInfo_put(cfile);
999 	return rc;
1000 }
1001 
1002 int cifs_open(struct inode *inode, struct file *file)
1003 
1004 {
1005 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
1006 	struct cifs_open_info_data data = {};
1007 	struct cifsFileInfo *cfile = NULL;
1008 	struct TCP_Server_Info *server;
1009 	struct cifs_pending_open open;
1010 	bool posix_open_ok = false;
1011 	struct cifs_fid fid = {};
1012 	struct tcon_link *tlink;
1013 	struct cifs_tcon *tcon;
1014 	const char *full_path;
1015 	unsigned int sbflags;
1016 	int rc = -EACCES;
1017 	unsigned int xid;
1018 	__u32 oplock;
1019 	void *page;
1020 
1021 	xid = get_xid();
1022 
1023 	if (unlikely(cifs_forced_shutdown(cifs_sb))) {
1024 		free_xid(xid);
1025 		return smb_EIO(smb_eio_trace_forced_shutdown);
1026 	}
1027 
1028 	tlink = cifs_sb_tlink(cifs_sb);
1029 	if (IS_ERR(tlink)) {
1030 		free_xid(xid);
1031 		return PTR_ERR(tlink);
1032 	}
1033 	tcon = tlink_tcon(tlink);
1034 	server = tcon->ses->server;
1035 
1036 	page = alloc_dentry_path();
1037 	full_path = build_path_from_dentry(file_dentry(file), page);
1038 	if (IS_ERR(full_path)) {
1039 		rc = PTR_ERR(full_path);
1040 		goto out;
1041 	}
1042 
1043 	cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
1044 		 inode, file->f_flags, full_path);
1045 
1046 	sbflags = cifs_sb_flags(cifs_sb);
1047 	if ((file->f_flags & O_DIRECT) && (sbflags & CIFS_MOUNT_STRICT_IO)) {
1048 		if (sbflags & CIFS_MOUNT_NO_BRL)
1049 			file->f_op = &cifs_file_direct_nobrl_ops;
1050 		else
1051 			file->f_op = &cifs_file_direct_ops;
1052 	}
1053 
1054 	if (file->f_flags & O_TRUNC) {
1055 		rc = cifs_do_truncate(xid, file_dentry(file));
1056 		if (rc)
1057 			goto out;
1058 	}
1059 
1060 	/* Get the cached handle as SMB2 close is deferred */
1061 	if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) {
1062 		rc = __cifs_get_writable_file(CIFS_I(inode),
1063 					      FIND_FSUID_ONLY |
1064 					      FIND_NO_PENDING_DELETE |
1065 					      FIND_OPEN_FLAGS,
1066 					      file->f_flags, &cfile);
1067 	} else {
1068 		cfile = __find_readable_file(CIFS_I(inode),
1069 					     FIND_NO_PENDING_DELETE |
1070 					     FIND_OPEN_FLAGS,
1071 					     file->f_flags);
1072 		rc = cfile ? 0 : -ENOENT;
1073 	}
1074 	if (rc == 0) {
1075 		file->private_data = cfile;
1076 		spin_lock(&CIFS_I(inode)->deferred_lock);
1077 		cifs_del_deferred_close(cfile);
1078 		spin_unlock(&CIFS_I(inode)->deferred_lock);
1079 		goto use_cache;
1080 	}
1081 	/* hard link on the deferred close file */
1082 	rc = cifs_get_hardlink_path(tcon, inode, file);
1083 	if (rc)
1084 		cifs_close_deferred_file(CIFS_I(inode));
1085 
1086 	if (server->oplocks)
1087 		oplock = REQ_OPLOCK;
1088 	else
1089 		oplock = 0;
1090 
1091 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1092 	if (!tcon->broken_posix_open && tcon->unix_ext &&
1093 	    cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1094 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1095 		/* can not refresh inode info since size could be stale */
1096 		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
1097 				cifs_sb->ctx->file_mode /* ignored */,
1098 				file->f_flags, &oplock, &fid.netfid, xid);
1099 		if (rc == 0) {
1100 			cifs_dbg(FYI, "posix open succeeded\n");
1101 			posix_open_ok = true;
1102 		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1103 			if (tcon->ses->serverNOS)
1104 				cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1105 					 tcon->ses->ip_addr,
1106 					 tcon->ses->serverNOS);
1107 			tcon->broken_posix_open = true;
1108 		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
1109 			 (rc != -EOPNOTSUPP)) /* path not found or net err */
1110 			goto out;
1111 		/*
1112 		 * Else fallthrough to retry open the old way on network i/o
1113 		 * or DFS errors.
1114 		 */
1115 	}
1116 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1117 
1118 	if (server->ops->get_lease_key)
1119 		server->ops->get_lease_key(inode, &fid);
1120 
1121 	cifs_add_pending_open(&fid, tlink, &open);
1122 
1123 	if (!posix_open_ok) {
1124 		if (server->ops->get_lease_key)
1125 			server->ops->get_lease_key(inode, &fid);
1126 
1127 		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1128 				  xid, &data);
1129 		if (rc) {
1130 			cifs_del_pending_open(&open);
1131 			goto out;
1132 		}
1133 	}
1134 
1135 	cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1136 	if (cfile == NULL) {
1137 		if (server->ops->close)
1138 			server->ops->close(xid, tcon, &fid);
1139 		cifs_del_pending_open(&open);
1140 		rc = -ENOMEM;
1141 		goto out;
1142 	}
1143 
1144 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1145 	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1146 		/*
1147 		 * Time to set mode which we can not set earlier due to
1148 		 * problems creating new read-only files.
1149 		 */
1150 		struct cifs_unix_set_info_args args = {
1151 			.mode	= inode->i_mode,
1152 			.uid	= INVALID_UID, /* no change */
1153 			.gid	= INVALID_GID, /* no change */
1154 			.ctime	= NO_CHANGE_64,
1155 			.atime	= NO_CHANGE_64,
1156 			.mtime	= NO_CHANGE_64,
1157 			.device	= 0,
1158 		};
1159 		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1160 				       cfile->pid);
1161 	}
1162 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1163 
1164 use_cache:
1165 	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1166 			   file->f_mode & FMODE_WRITE);
1167 	if (!(file->f_flags & O_DIRECT))
1168 		goto out;
1169 	if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1170 		goto out;
1171 	cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1172 
1173 out:
1174 	free_dentry_path(page);
1175 	free_xid(xid);
1176 	cifs_put_tlink(tlink);
1177 	cifs_free_open_info(&data);
1178 	return rc;
1179 }
1180 
1181 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1182 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1183 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1184 
1185 /*
1186  * Try to reacquire byte range locks that were released when session
1187  * to server was lost.
1188  */
1189 static int
1190 cifs_relock_file(struct cifsFileInfo *cfile)
1191 {
1192 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1193 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1194 	int rc = 0;
1195 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1196 	struct cifs_sb_info *cifs_sb = CIFS_SB(cinode);
1197 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1198 
1199 	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1200 	if (cinode->can_cache_brlcks) {
1201 		/* can cache locks - no need to relock */
1202 		up_read(&cinode->lock_sem);
1203 		return rc;
1204 	}
1205 
1206 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1207 	if (cap_unix(tcon->ses) &&
1208 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1209 	    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
1210 		rc = cifs_push_posix_locks(cfile);
1211 	else
1212 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1213 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1214 
1215 	up_read(&cinode->lock_sem);
1216 	return rc;
1217 }
1218 
1219 static int
1220 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1221 {
1222 	int rc = -EACCES;
1223 	unsigned int xid;
1224 	__u32 oplock;
1225 	struct cifs_sb_info *cifs_sb;
1226 	struct cifs_tcon *tcon;
1227 	struct TCP_Server_Info *server;
1228 	struct cifsInodeInfo *cinode;
1229 	struct inode *inode;
1230 	void *page;
1231 	const char *full_path;
1232 	int desired_access;
1233 	int disposition = FILE_OPEN;
1234 	int create_options = CREATE_NOT_DIR;
1235 	struct cifs_open_parms oparms;
1236 	int rdwr_for_fscache = 0;
1237 
1238 	xid = get_xid();
1239 	mutex_lock(&cfile->fh_mutex);
1240 	if (!cfile->invalidHandle) {
1241 		mutex_unlock(&cfile->fh_mutex);
1242 		free_xid(xid);
1243 		return 0;
1244 	}
1245 
1246 	inode = d_inode(cfile->dentry);
1247 	cifs_sb = CIFS_SB(inode->i_sb);
1248 	tcon = tlink_tcon(cfile->tlink);
1249 	server = tcon->ses->server;
1250 
1251 	/*
1252 	 * Can not grab rename sem here because various ops, including those
1253 	 * that already have the rename sem can end up causing writepage to get
1254 	 * called and if the server was down that means we end up here, and we
1255 	 * can never tell if the caller already has the rename_sem.
1256 	 */
1257 	page = alloc_dentry_path();
1258 	full_path = build_path_from_dentry(cfile->dentry, page);
1259 	if (IS_ERR(full_path)) {
1260 		mutex_unlock(&cfile->fh_mutex);
1261 		free_dentry_path(page);
1262 		free_xid(xid);
1263 		return PTR_ERR(full_path);
1264 	}
1265 
1266 	cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1267 		 inode, cfile->f_flags, full_path);
1268 
1269 	if (tcon->ses->server->oplocks)
1270 		oplock = REQ_OPLOCK;
1271 	else
1272 		oplock = 0;
1273 
1274 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1275 	if (tcon->unix_ext && cap_unix(tcon->ses) &&
1276 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1277 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1278 		/*
1279 		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1280 		 * original open. Must mask them off for a reopen.
1281 		 */
1282 		unsigned int oflags = cfile->f_flags &
1283 						~(O_CREAT | O_EXCL | O_TRUNC);
1284 
1285 		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1286 				     cifs_sb->ctx->file_mode /* ignored */,
1287 				     oflags, &oplock, &cfile->fid.netfid, xid);
1288 		if (rc == 0) {
1289 			cifs_dbg(FYI, "posix reopen succeeded\n");
1290 			oparms.reconnect = true;
1291 			goto reopen_success;
1292 		}
1293 		/*
1294 		 * fallthrough to retry open the old way on errors, especially
1295 		 * in the reconnect path it is important to retry hard
1296 		 */
1297 	}
1298 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1299 
1300 	/* If we're caching, we need to be able to fill in around partial writes. */
1301 	if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1302 		rdwr_for_fscache = 1;
1303 
1304 	desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1305 	create_options |= cifs_open_create_options(cfile->f_flags,
1306 						   create_options);
1307 
1308 	if (server->ops->get_lease_key)
1309 		server->ops->get_lease_key(inode, &cfile->fid);
1310 
1311 retry_open:
1312 	oparms = (struct cifs_open_parms) {
1313 		.tcon = tcon,
1314 		.cifs_sb = cifs_sb,
1315 		.desired_access = desired_access,
1316 		.create_options = cifs_create_options(cifs_sb, create_options),
1317 		.disposition = disposition,
1318 		.path = full_path,
1319 		.fid = &cfile->fid,
1320 		.reconnect = true,
1321 	};
1322 
1323 	/*
1324 	 * Can not refresh inode by passing in file_info buf to be returned by
1325 	 * ops->open and then calling get_inode_info with returned buf since
1326 	 * file might have write behind data that needs to be flushed and server
1327 	 * version of file size can be stale. If we knew for sure that inode was
1328 	 * not dirty locally we could do this.
1329 	 */
1330 	rc = server->ops->open(xid, &oparms, &oplock, NULL);
1331 	if (rc == -ENOENT && oparms.reconnect == false) {
1332 		/* durable handle timeout is expired - open the file again */
1333 		rc = server->ops->open(xid, &oparms, &oplock, NULL);
1334 		/* indicate that we need to relock the file */
1335 		oparms.reconnect = true;
1336 	}
1337 	if (rc == -EACCES && rdwr_for_fscache == 1) {
1338 		desired_access = cifs_convert_flags(cfile->f_flags, 0);
1339 		rdwr_for_fscache = 2;
1340 		goto retry_open;
1341 	}
1342 
1343 	if (rc) {
1344 		mutex_unlock(&cfile->fh_mutex);
1345 		cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1346 		cifs_dbg(FYI, "oplock: %d\n", oplock);
1347 		goto reopen_error_exit;
1348 	}
1349 
1350 	if (rdwr_for_fscache == 2)
1351 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1352 
1353 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1354 reopen_success:
1355 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1356 	cfile->invalidHandle = false;
1357 	mutex_unlock(&cfile->fh_mutex);
1358 	cinode = CIFS_I(inode);
1359 
1360 	if (can_flush) {
1361 		rc = filemap_write_and_wait(inode->i_mapping);
1362 		if (!is_interrupt_error(rc))
1363 			mapping_set_error(inode->i_mapping, rc);
1364 
1365 		if (tcon->posix_extensions) {
1366 			rc = smb311_posix_get_inode_info(&inode, full_path,
1367 							 NULL, inode->i_sb, xid);
1368 		} else if (tcon->unix_ext) {
1369 			rc = cifs_get_inode_info_unix(&inode, full_path,
1370 						      inode->i_sb, xid);
1371 		} else {
1372 			rc = cifs_get_inode_info(&inode, full_path, NULL,
1373 						 inode->i_sb, xid, NULL);
1374 		}
1375 	}
1376 	/*
1377 	 * Else we are writing out data to server already and could deadlock if
1378 	 * we tried to flush data, and since we do not know if we have data that
1379 	 * would invalidate the current end of file on the server we can not go
1380 	 * to the server to get the new inode info.
1381 	 */
1382 
1383 	/*
1384 	 * If the server returned a read oplock and we have mandatory brlocks,
1385 	 * set oplock level to None.
1386 	 */
1387 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1388 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1389 		oplock = 0;
1390 	}
1391 
1392 	scoped_guard(spinlock, &cinode->open_file_lock)
1393 		server->ops->set_fid(cfile, &cfile->fid, oplock);
1394 	if (oparms.reconnect)
1395 		cifs_relock_file(cfile);
1396 
1397 reopen_error_exit:
1398 	free_dentry_path(page);
1399 	free_xid(xid);
1400 	return rc;
1401 }
1402 
1403 void smb2_deferred_work_close(struct work_struct *work)
1404 {
1405 	struct cifsFileInfo *cfile = container_of(work,
1406 			struct cifsFileInfo, deferred.work);
1407 
1408 	spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1409 	cifs_del_deferred_close(cfile);
1410 	cfile->deferred_close_scheduled = false;
1411 	spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1412 	_cifsFileInfo_put(cfile, true, false);
1413 }
1414 
1415 static bool
1416 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1417 {
1418 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1419 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1420 	unsigned int oplock = READ_ONCE(cinode->oplock);
1421 
1422 	return cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1423 		(oplock == CIFS_CACHE_RHW_FLG || oplock == CIFS_CACHE_RH_FLG) &&
1424 		!test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags);
1425 
1426 }
1427 
1428 int cifs_close(struct inode *inode, struct file *file)
1429 {
1430 	struct cifsFileInfo *cfile;
1431 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1432 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1433 	struct cifs_deferred_close *dclose;
1434 
1435 	cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1436 
1437 	if (file->private_data != NULL) {
1438 		cfile = file->private_data;
1439 		file->private_data = NULL;
1440 		dclose = kmalloc_obj(struct cifs_deferred_close);
1441 		if ((cfile->status_file_deleted == false) &&
1442 		    (smb2_can_defer_close(inode, dclose))) {
1443 			if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR, &cinode->netfs.flags)) {
1444 				inode_set_mtime_to_ts(inode,
1445 						      inode_set_ctime_current(inode));
1446 			}
1447 			spin_lock(&cinode->deferred_lock);
1448 			cifs_add_deferred_close(cfile, dclose);
1449 			if (cfile->deferred_close_scheduled &&
1450 			    delayed_work_pending(&cfile->deferred)) {
1451 				/*
1452 				 * If there is no pending work, mod_delayed_work queues new work.
1453 				 * So, Increase the ref count to avoid use-after-free.
1454 				 */
1455 				if (!mod_delayed_work(deferredclose_wq,
1456 						&cfile->deferred, cifs_sb->ctx->closetimeo))
1457 					cifsFileInfo_get(cfile);
1458 			} else {
1459 				/* Deferred close for files */
1460 				queue_delayed_work(deferredclose_wq,
1461 						&cfile->deferred, cifs_sb->ctx->closetimeo);
1462 				cfile->deferred_close_scheduled = true;
1463 				spin_unlock(&cinode->deferred_lock);
1464 				return 0;
1465 			}
1466 			spin_unlock(&cinode->deferred_lock);
1467 			_cifsFileInfo_put(cfile, true, false);
1468 		} else {
1469 			_cifsFileInfo_put(cfile, true, false);
1470 			kfree(dclose);
1471 		}
1472 	}
1473 
1474 	/* return code from the ->release op is always ignored */
1475 	return 0;
1476 }
1477 
1478 void
1479 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1480 {
1481 	struct cifsFileInfo *open_file, *tmp;
1482 	LIST_HEAD(tmp_list);
1483 
1484 	if (!tcon->use_persistent || !tcon->need_reopen_files)
1485 		return;
1486 
1487 	tcon->need_reopen_files = false;
1488 
1489 	cifs_dbg(FYI, "Reopen persistent handles\n");
1490 
1491 	/* list all files open on tree connection, reopen resilient handles  */
1492 	spin_lock(&tcon->open_file_lock);
1493 	list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1494 		if (!open_file->invalidHandle)
1495 			continue;
1496 		cifsFileInfo_get(open_file);
1497 		list_add_tail(&open_file->rlist, &tmp_list);
1498 	}
1499 	spin_unlock(&tcon->open_file_lock);
1500 
1501 	list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1502 		if (cifs_reopen_file(open_file, false /* do not flush */))
1503 			tcon->need_reopen_files = true;
1504 		list_del_init(&open_file->rlist);
1505 		cifsFileInfo_put(open_file);
1506 	}
1507 }
1508 
1509 int cifs_closedir(struct inode *inode, struct file *file)
1510 {
1511 	int rc = 0;
1512 	unsigned int xid;
1513 	struct cifsFileInfo *cfile = file->private_data;
1514 	struct cifs_tcon *tcon;
1515 	struct TCP_Server_Info *server;
1516 	char *buf;
1517 
1518 	cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1519 
1520 	if (cfile == NULL)
1521 		return rc;
1522 
1523 	xid = get_xid();
1524 	tcon = tlink_tcon(cfile->tlink);
1525 	server = tcon->ses->server;
1526 
1527 	cifs_dbg(FYI, "Freeing private data in close dir\n");
1528 	spin_lock(&cfile->file_info_lock);
1529 	if (server->ops->dir_needs_close(cfile)) {
1530 		cfile->invalidHandle = true;
1531 		spin_unlock(&cfile->file_info_lock);
1532 		if (server->ops->close_dir)
1533 			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1534 		else
1535 			rc = -ENOSYS;
1536 		cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1537 		/* not much we can do if it fails anyway, ignore rc */
1538 		rc = 0;
1539 	} else
1540 		spin_unlock(&cfile->file_info_lock);
1541 
1542 	buf = cfile->srch_inf.ntwrk_buf_start;
1543 	if (buf) {
1544 		cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1545 		cfile->srch_inf.ntwrk_buf_start = NULL;
1546 		if (cfile->srch_inf.smallBuf)
1547 			cifs_small_buf_release(buf);
1548 		else
1549 			cifs_buf_release(buf);
1550 	}
1551 
1552 	cifs_put_tlink(cfile->tlink);
1553 	kfree(file->private_data);
1554 	file->private_data = NULL;
1555 	/* BB can we lock the filestruct while this is going on? */
1556 	free_xid(xid);
1557 	return rc;
1558 }
1559 
1560 static struct cifsLockInfo *
1561 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1562 {
1563 	struct cifsLockInfo *lock =
1564 		kmalloc_obj(struct cifsLockInfo);
1565 	if (!lock)
1566 		return lock;
1567 	lock->offset = offset;
1568 	lock->length = length;
1569 	lock->type = type;
1570 	lock->pid = current->tgid;
1571 	lock->flags = flags;
1572 	INIT_LIST_HEAD(&lock->blist);
1573 	init_waitqueue_head(&lock->block_q);
1574 	return lock;
1575 }
1576 
1577 void
1578 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1579 {
1580 	struct cifsLockInfo *li, *tmp;
1581 	list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1582 		list_del_init(&li->blist);
1583 		wake_up(&li->block_q);
1584 	}
1585 }
1586 
1587 #define CIFS_LOCK_OP	0
1588 #define CIFS_READ_OP	1
1589 #define CIFS_WRITE_OP	2
1590 
1591 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1592 static bool
1593 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1594 			    __u64 length, __u8 type, __u16 flags,
1595 			    struct cifsFileInfo *cfile,
1596 			    struct cifsLockInfo **conf_lock, int rw_check)
1597 {
1598 	struct cifsLockInfo *li;
1599 	struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1600 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1601 
1602 	list_for_each_entry(li, &fdlocks->locks, llist) {
1603 		if (offset + length <= li->offset ||
1604 		    offset >= li->offset + li->length)
1605 			continue;
1606 		if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1607 		    server->ops->compare_fids(cfile, cur_cfile)) {
1608 			/* shared lock prevents write op through the same fid */
1609 			if (!(li->type & server->vals->shared_lock_type) ||
1610 			    rw_check != CIFS_WRITE_OP)
1611 				continue;
1612 		}
1613 		if ((type & server->vals->shared_lock_type) &&
1614 		    ((server->ops->compare_fids(cfile, cur_cfile) &&
1615 		     current->tgid == li->pid) || type == li->type))
1616 			continue;
1617 		if (rw_check == CIFS_LOCK_OP &&
1618 		    (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1619 		    server->ops->compare_fids(cfile, cur_cfile))
1620 			continue;
1621 		if (conf_lock)
1622 			*conf_lock = li;
1623 		return true;
1624 	}
1625 	return false;
1626 }
1627 
1628 bool
1629 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1630 			__u8 type, __u16 flags,
1631 			struct cifsLockInfo **conf_lock, int rw_check)
1632 {
1633 	bool rc = false;
1634 	struct cifs_fid_locks *cur;
1635 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1636 
1637 	list_for_each_entry(cur, &cinode->llist, llist) {
1638 		rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1639 						 flags, cfile, conf_lock,
1640 						 rw_check);
1641 		if (rc)
1642 			break;
1643 	}
1644 
1645 	return rc;
1646 }
1647 
1648 /*
1649  * Check if there is another lock that prevents us to set the lock (mandatory
1650  * style). If such a lock exists, update the flock structure with its
1651  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1652  * or leave it the same if we can't. Returns 0 if we don't need to request to
1653  * the server or 1 otherwise.
1654  */
1655 static int
1656 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1657 	       __u8 type, struct file_lock *flock)
1658 {
1659 	int rc = 0;
1660 	struct cifsLockInfo *conf_lock;
1661 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1662 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1663 	bool exist;
1664 
1665 	down_read(&cinode->lock_sem);
1666 
1667 	exist = cifs_find_lock_conflict(cfile, offset, length, type,
1668 					flock->c.flc_flags, &conf_lock,
1669 					CIFS_LOCK_OP);
1670 	if (exist) {
1671 		flock->fl_start = conf_lock->offset;
1672 		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1673 		flock->c.flc_pid = conf_lock->pid;
1674 		if (conf_lock->type & server->vals->shared_lock_type)
1675 			flock->c.flc_type = F_RDLCK;
1676 		else
1677 			flock->c.flc_type = F_WRLCK;
1678 	} else if (!cinode->can_cache_brlcks)
1679 		rc = 1;
1680 	else
1681 		flock->c.flc_type = F_UNLCK;
1682 
1683 	up_read(&cinode->lock_sem);
1684 	return rc;
1685 }
1686 
1687 static void
1688 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1689 {
1690 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1691 	cifs_down_write(&cinode->lock_sem);
1692 	list_add_tail(&lock->llist, &cfile->llist->locks);
1693 	up_write(&cinode->lock_sem);
1694 }
1695 
1696 /*
1697  * Set the byte-range lock (mandatory style). Returns:
1698  * 1) 0, if we set the lock and don't need to request to the server;
1699  * 2) 1, if no locks prevent us but we need to request to the server;
1700  * 3) -EACCES, if there is a lock that prevents us and wait is false.
1701  */
1702 static int
1703 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1704 		 bool wait)
1705 {
1706 	struct cifsLockInfo *conf_lock;
1707 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1708 	bool exist;
1709 	int rc = 0;
1710 
1711 try_again:
1712 	exist = false;
1713 	cifs_down_write(&cinode->lock_sem);
1714 
1715 	exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1716 					lock->type, lock->flags, &conf_lock,
1717 					CIFS_LOCK_OP);
1718 	if (!exist && cinode->can_cache_brlcks) {
1719 		list_add_tail(&lock->llist, &cfile->llist->locks);
1720 		up_write(&cinode->lock_sem);
1721 		return rc;
1722 	}
1723 
1724 	if (!exist)
1725 		rc = 1;
1726 	else if (!wait)
1727 		rc = -EACCES;
1728 	else {
1729 		list_add_tail(&lock->blist, &conf_lock->blist);
1730 		up_write(&cinode->lock_sem);
1731 		rc = wait_event_interruptible(lock->block_q,
1732 					(lock->blist.prev == &lock->blist) &&
1733 					(lock->blist.next == &lock->blist));
1734 		if (!rc)
1735 			goto try_again;
1736 		cifs_down_write(&cinode->lock_sem);
1737 		list_del_init(&lock->blist);
1738 	}
1739 
1740 	up_write(&cinode->lock_sem);
1741 	return rc;
1742 }
1743 
1744 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1745 /*
1746  * Check if there is another lock that prevents us to set the lock (posix
1747  * style). If such a lock exists, update the flock structure with its
1748  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1749  * or leave it the same if we can't. Returns 0 if we don't need to request to
1750  * the server or 1 otherwise.
1751  */
1752 static int
1753 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1754 {
1755 	int rc = 0;
1756 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1757 	unsigned char saved_type = flock->c.flc_type;
1758 
1759 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1760 		return 1;
1761 
1762 	down_read(&cinode->lock_sem);
1763 	posix_test_lock(file, flock);
1764 
1765 	if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1766 		flock->c.flc_type = saved_type;
1767 		rc = 1;
1768 	}
1769 
1770 	up_read(&cinode->lock_sem);
1771 	return rc;
1772 }
1773 
1774 /*
1775  * Set the byte-range lock (posix style). Returns:
1776  * 1) <0, if the error occurs while setting the lock;
1777  * 2) 0, if we set the lock and don't need to request to the server;
1778  * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1779  * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1780  */
1781 static int
1782 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1783 {
1784 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1785 	int rc = FILE_LOCK_DEFERRED + 1;
1786 
1787 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1788 		return rc;
1789 
1790 	cifs_down_write(&cinode->lock_sem);
1791 	if (!cinode->can_cache_brlcks) {
1792 		up_write(&cinode->lock_sem);
1793 		return rc;
1794 	}
1795 
1796 	rc = posix_lock_file(file, flock, NULL);
1797 	up_write(&cinode->lock_sem);
1798 	return rc;
1799 }
1800 
1801 int
1802 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1803 {
1804 	unsigned int xid;
1805 	int rc = 0, stored_rc;
1806 	struct cifsLockInfo *li, *tmp;
1807 	struct cifs_tcon *tcon;
1808 	unsigned int num, max_num, max_buf;
1809 	LOCKING_ANDX_RANGE *buf, *cur;
1810 	static const int types[] = {
1811 		LOCKING_ANDX_LARGE_FILES,
1812 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1813 	};
1814 	int i;
1815 
1816 	xid = get_xid();
1817 	tcon = tlink_tcon(cfile->tlink);
1818 
1819 	/*
1820 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1821 	 * and check it before using.
1822 	 */
1823 	max_buf = tcon->ses->server->maxBuf;
1824 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1825 		free_xid(xid);
1826 		return -EINVAL;
1827 	}
1828 
1829 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1830 		     PAGE_SIZE);
1831 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1832 			PAGE_SIZE);
1833 	max_num = (max_buf - sizeof(struct smb_hdr)) /
1834 						sizeof(LOCKING_ANDX_RANGE);
1835 	buf = kzalloc_objs(LOCKING_ANDX_RANGE, max_num);
1836 	if (!buf) {
1837 		free_xid(xid);
1838 		return -ENOMEM;
1839 	}
1840 
1841 	for (i = 0; i < 2; i++) {
1842 		cur = buf;
1843 		num = 0;
1844 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1845 			if (li->type != types[i])
1846 				continue;
1847 			cur->Pid = cpu_to_le16(li->pid);
1848 			cur->LengthLow = cpu_to_le32((u32)li->length);
1849 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1850 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
1851 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1852 			if (++num == max_num) {
1853 				stored_rc = cifs_lockv(xid, tcon,
1854 						       cfile->fid.netfid,
1855 						       (__u8)li->type, 0, num,
1856 						       buf);
1857 				if (stored_rc)
1858 					rc = stored_rc;
1859 				cur = buf;
1860 				num = 0;
1861 			} else
1862 				cur++;
1863 		}
1864 
1865 		if (num) {
1866 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1867 					       (__u8)types[i], 0, num, buf);
1868 			if (stored_rc)
1869 				rc = stored_rc;
1870 		}
1871 	}
1872 
1873 	kfree(buf);
1874 	free_xid(xid);
1875 	return rc;
1876 }
1877 
1878 static __u32
1879 hash_lockowner(fl_owner_t owner)
1880 {
1881 	return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1882 }
1883 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1884 
1885 struct lock_to_push {
1886 	struct list_head llist;
1887 	__u64 offset;
1888 	__u64 length;
1889 	__u32 pid;
1890 	__u16 netfid;
1891 	__u8 type;
1892 };
1893 
1894 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1895 static int
1896 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1897 {
1898 	struct inode *inode = d_inode(cfile->dentry);
1899 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1900 	struct file_lock *flock;
1901 	struct file_lock_context *flctx = locks_inode_context(inode);
1902 	unsigned int count = 0, i;
1903 	int rc = 0, xid, type;
1904 	struct list_head locks_to_send, *el;
1905 	struct lock_to_push *lck, *tmp;
1906 	__u64 length;
1907 
1908 	xid = get_xid();
1909 
1910 	if (!flctx)
1911 		goto out;
1912 
1913 	spin_lock(&flctx->flc_lock);
1914 	list_for_each(el, &flctx->flc_posix) {
1915 		count++;
1916 	}
1917 	spin_unlock(&flctx->flc_lock);
1918 
1919 	INIT_LIST_HEAD(&locks_to_send);
1920 
1921 	/*
1922 	 * Allocating count locks is enough because no FL_POSIX locks can be
1923 	 * added to the list while we are holding cinode->lock_sem that
1924 	 * protects locking operations of this inode.
1925 	 */
1926 	for (i = 0; i < count; i++) {
1927 		lck = kmalloc_obj(struct lock_to_push);
1928 		if (!lck) {
1929 			rc = -ENOMEM;
1930 			goto err_out;
1931 		}
1932 		list_add_tail(&lck->llist, &locks_to_send);
1933 	}
1934 
1935 	el = locks_to_send.next;
1936 	spin_lock(&flctx->flc_lock);
1937 	for_each_file_lock(flock, &flctx->flc_posix) {
1938 		unsigned char ftype = flock->c.flc_type;
1939 
1940 		if (el == &locks_to_send) {
1941 			/*
1942 			 * The list ended. We don't have enough allocated
1943 			 * structures - something is really wrong.
1944 			 */
1945 			cifs_dbg(VFS, "Can't push all brlocks!\n");
1946 			break;
1947 		}
1948 		length = cifs_flock_len(flock);
1949 		if (ftype == F_RDLCK || ftype == F_SHLCK)
1950 			type = CIFS_RDLCK;
1951 		else
1952 			type = CIFS_WRLCK;
1953 		lck = list_entry(el, struct lock_to_push, llist);
1954 		lck->pid = hash_lockowner(flock->c.flc_owner);
1955 		lck->netfid = cfile->fid.netfid;
1956 		lck->length = length;
1957 		lck->type = type;
1958 		lck->offset = flock->fl_start;
1959 	}
1960 	spin_unlock(&flctx->flc_lock);
1961 
1962 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1963 		int stored_rc;
1964 
1965 		stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1966 					     lck->offset, lck->length, NULL,
1967 					     lck->type, 0);
1968 		if (stored_rc)
1969 			rc = stored_rc;
1970 		list_del(&lck->llist);
1971 		kfree(lck);
1972 	}
1973 
1974 out:
1975 	free_xid(xid);
1976 	return rc;
1977 err_out:
1978 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1979 		list_del(&lck->llist);
1980 		kfree(lck);
1981 	}
1982 	goto out;
1983 }
1984 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1985 
1986 static int
1987 cifs_push_locks(struct cifsFileInfo *cfile)
1988 {
1989 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1990 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1991 	int rc = 0;
1992 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1993 	struct cifs_sb_info *cifs_sb = CIFS_SB(cinode);
1994 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1995 
1996 	/* we are going to update can_cache_brlcks here - need a write access */
1997 	cifs_down_write(&cinode->lock_sem);
1998 	if (!cinode->can_cache_brlcks) {
1999 		up_write(&cinode->lock_sem);
2000 		return rc;
2001 	}
2002 
2003 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2004 	if (cap_unix(tcon->ses) &&
2005 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2006 	    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
2007 		rc = cifs_push_posix_locks(cfile);
2008 	else
2009 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2010 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
2011 
2012 	cinode->can_cache_brlcks = false;
2013 	up_write(&cinode->lock_sem);
2014 	return rc;
2015 }
2016 
2017 static void
2018 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
2019 		bool *wait_flag, struct TCP_Server_Info *server)
2020 {
2021 	if (flock->c.flc_flags & FL_POSIX)
2022 		cifs_dbg(FYI, "Posix\n");
2023 	if (flock->c.flc_flags & FL_FLOCK)
2024 		cifs_dbg(FYI, "Flock\n");
2025 	if (flock->c.flc_flags & FL_SLEEP) {
2026 		cifs_dbg(FYI, "Blocking lock\n");
2027 		*wait_flag = true;
2028 	}
2029 	if (flock->c.flc_flags & FL_ACCESS)
2030 		cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
2031 	if (flock->c.flc_flags & FL_LEASE)
2032 		cifs_dbg(FYI, "Lease on file - not implemented yet\n");
2033 	if (flock->c.flc_flags &
2034 	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
2035 	       FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
2036 		cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
2037 		         flock->c.flc_flags);
2038 
2039 	*type = server->vals->large_lock_type;
2040 	if (lock_is_write(flock)) {
2041 		cifs_dbg(FYI, "F_WRLCK\n");
2042 		*type |= server->vals->exclusive_lock_type;
2043 		*lock = 1;
2044 	} else if (lock_is_unlock(flock)) {
2045 		cifs_dbg(FYI, "F_UNLCK\n");
2046 		*type |= server->vals->unlock_lock_type;
2047 		*unlock = 1;
2048 		/* Check if unlock includes more than one lock range */
2049 	} else if (lock_is_read(flock)) {
2050 		cifs_dbg(FYI, "F_RDLCK\n");
2051 		*type |= server->vals->shared_lock_type;
2052 		*lock = 1;
2053 	} else if (flock->c.flc_type == F_EXLCK) {
2054 		cifs_dbg(FYI, "F_EXLCK\n");
2055 		*type |= server->vals->exclusive_lock_type;
2056 		*lock = 1;
2057 	} else if (flock->c.flc_type == F_SHLCK) {
2058 		cifs_dbg(FYI, "F_SHLCK\n");
2059 		*type |= server->vals->shared_lock_type;
2060 		*lock = 1;
2061 	} else
2062 		cifs_dbg(FYI, "Unknown type of lock\n");
2063 }
2064 
2065 static int
2066 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
2067 	   bool wait_flag, bool posix_lck, unsigned int xid)
2068 {
2069 	int rc = 0;
2070 	__u64 length = cifs_flock_len(flock);
2071 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2072 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2073 	struct TCP_Server_Info *server = tcon->ses->server;
2074 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2075 	__u16 netfid = cfile->fid.netfid;
2076 
2077 	if (posix_lck) {
2078 		int posix_lock_type;
2079 
2080 		rc = cifs_posix_lock_test(file, flock);
2081 		if (!rc)
2082 			return rc;
2083 
2084 		if (type & server->vals->shared_lock_type)
2085 			posix_lock_type = CIFS_RDLCK;
2086 		else
2087 			posix_lock_type = CIFS_WRLCK;
2088 		rc = CIFSSMBPosixLock(xid, tcon, netfid,
2089 				      hash_lockowner(flock->c.flc_owner),
2090 				      flock->fl_start, length, flock,
2091 				      posix_lock_type, wait_flag);
2092 		return rc;
2093 	}
2094 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2095 
2096 	rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
2097 	if (!rc)
2098 		return rc;
2099 
2100 	/* BB we could chain these into one lock request BB */
2101 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2102 				    1, 0, false);
2103 	if (rc == 0) {
2104 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2105 					    type, 0, 1, false);
2106 		flock->c.flc_type = F_UNLCK;
2107 		if (rc != 0)
2108 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2109 				 rc);
2110 		return 0;
2111 	}
2112 
2113 	if (type & server->vals->shared_lock_type) {
2114 		flock->c.flc_type = F_WRLCK;
2115 		return 0;
2116 	}
2117 
2118 	type &= ~server->vals->exclusive_lock_type;
2119 
2120 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2121 				    type | server->vals->shared_lock_type,
2122 				    1, 0, false);
2123 	if (rc == 0) {
2124 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2125 			type | server->vals->shared_lock_type, 0, 1, false);
2126 		flock->c.flc_type = F_RDLCK;
2127 		if (rc != 0)
2128 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2129 				 rc);
2130 	} else
2131 		flock->c.flc_type = F_WRLCK;
2132 
2133 	return 0;
2134 }
2135 
2136 void
2137 cifs_move_llist(struct list_head *source, struct list_head *dest)
2138 {
2139 	struct list_head *li, *tmp;
2140 	list_for_each_safe(li, tmp, source)
2141 		list_move(li, dest);
2142 }
2143 
2144 int
2145 cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode,
2146 				struct file *file)
2147 {
2148 	struct cifsFileInfo *open_file = NULL;
2149 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2150 	int rc = 0;
2151 
2152 	spin_lock(&tcon->open_file_lock);
2153 	spin_lock(&cinode->open_file_lock);
2154 
2155 	list_for_each_entry(open_file, &cinode->openFileList, flist) {
2156 		if (file->f_flags == open_file->f_flags) {
2157 			rc = -EINVAL;
2158 			break;
2159 		}
2160 	}
2161 
2162 	spin_unlock(&cinode->open_file_lock);
2163 	spin_unlock(&tcon->open_file_lock);
2164 	return rc;
2165 }
2166 
2167 void
2168 cifs_free_llist(struct list_head *llist)
2169 {
2170 	struct cifsLockInfo *li, *tmp;
2171 	list_for_each_entry_safe(li, tmp, llist, llist) {
2172 		cifs_del_lock_waiters(li);
2173 		list_del(&li->llist);
2174 		kfree(li);
2175 	}
2176 }
2177 
2178 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2179 int
2180 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2181 		  unsigned int xid)
2182 {
2183 	int rc = 0, stored_rc;
2184 	static const int types[] = {
2185 		LOCKING_ANDX_LARGE_FILES,
2186 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2187 	};
2188 	unsigned int i;
2189 	unsigned int max_num, num, max_buf;
2190 	LOCKING_ANDX_RANGE *buf, *cur;
2191 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2192 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2193 	struct cifsLockInfo *li, *tmp;
2194 	__u64 length = cifs_flock_len(flock);
2195 	LIST_HEAD(tmp_llist);
2196 
2197 	/*
2198 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2199 	 * and check it before using.
2200 	 */
2201 	max_buf = tcon->ses->server->maxBuf;
2202 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2203 		return -EINVAL;
2204 
2205 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2206 		     PAGE_SIZE);
2207 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2208 			PAGE_SIZE);
2209 	max_num = (max_buf - sizeof(struct smb_hdr)) /
2210 						sizeof(LOCKING_ANDX_RANGE);
2211 	buf = kzalloc_objs(LOCKING_ANDX_RANGE, max_num);
2212 	if (!buf)
2213 		return -ENOMEM;
2214 
2215 	cifs_down_write(&cinode->lock_sem);
2216 	for (i = 0; i < 2; i++) {
2217 		cur = buf;
2218 		num = 0;
2219 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2220 			if (flock->fl_start > li->offset ||
2221 			    (flock->fl_start + length) <
2222 			    (li->offset + li->length))
2223 				continue;
2224 			if (current->tgid != li->pid)
2225 				continue;
2226 			if (types[i] != li->type)
2227 				continue;
2228 			if (cinode->can_cache_brlcks) {
2229 				/*
2230 				 * We can cache brlock requests - simply remove
2231 				 * a lock from the file's list.
2232 				 */
2233 				list_del(&li->llist);
2234 				cifs_del_lock_waiters(li);
2235 				kfree(li);
2236 				continue;
2237 			}
2238 			cur->Pid = cpu_to_le16(li->pid);
2239 			cur->LengthLow = cpu_to_le32((u32)li->length);
2240 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2241 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
2242 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2243 			/*
2244 			 * We need to save a lock here to let us add it again to
2245 			 * the file's list if the unlock range request fails on
2246 			 * the server.
2247 			 */
2248 			list_move(&li->llist, &tmp_llist);
2249 			if (++num == max_num) {
2250 				stored_rc = cifs_lockv(xid, tcon,
2251 						       cfile->fid.netfid,
2252 						       li->type, num, 0, buf);
2253 				if (stored_rc) {
2254 					/*
2255 					 * We failed on the unlock range
2256 					 * request - add all locks from the tmp
2257 					 * list to the head of the file's list.
2258 					 */
2259 					cifs_move_llist(&tmp_llist,
2260 							&cfile->llist->locks);
2261 					rc = stored_rc;
2262 				} else
2263 					/*
2264 					 * The unlock range request succeed -
2265 					 * free the tmp list.
2266 					 */
2267 					cifs_free_llist(&tmp_llist);
2268 				cur = buf;
2269 				num = 0;
2270 			} else
2271 				cur++;
2272 		}
2273 		if (num) {
2274 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2275 					       types[i], num, 0, buf);
2276 			if (stored_rc) {
2277 				cifs_move_llist(&tmp_llist,
2278 						&cfile->llist->locks);
2279 				rc = stored_rc;
2280 			} else
2281 				cifs_free_llist(&tmp_llist);
2282 		}
2283 	}
2284 
2285 	up_write(&cinode->lock_sem);
2286 	kfree(buf);
2287 	return rc;
2288 }
2289 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2290 
2291 static int
2292 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2293 	   bool wait_flag, bool posix_lck, int lock, int unlock,
2294 	   unsigned int xid)
2295 {
2296 	int rc = 0;
2297 	__u64 length = cifs_flock_len(flock);
2298 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2299 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2300 	struct TCP_Server_Info *server = tcon->ses->server;
2301 	struct inode *inode = d_inode(cfile->dentry);
2302 
2303 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2304 	if (posix_lck) {
2305 		int posix_lock_type;
2306 
2307 		rc = cifs_posix_lock_set(file, flock);
2308 		if (rc <= FILE_LOCK_DEFERRED)
2309 			return rc;
2310 
2311 		if (type & server->vals->shared_lock_type)
2312 			posix_lock_type = CIFS_RDLCK;
2313 		else
2314 			posix_lock_type = CIFS_WRLCK;
2315 
2316 		if (unlock == 1)
2317 			posix_lock_type = CIFS_UNLCK;
2318 
2319 		rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2320 				      hash_lockowner(flock->c.flc_owner),
2321 				      flock->fl_start, length,
2322 				      NULL, posix_lock_type, wait_flag);
2323 		goto out;
2324 	}
2325 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2326 	if (lock) {
2327 		struct cifsLockInfo *lock;
2328 
2329 		lock = cifs_lock_init(flock->fl_start, length, type,
2330 				      flock->c.flc_flags);
2331 		if (!lock)
2332 			return -ENOMEM;
2333 
2334 		rc = cifs_lock_add_if(cfile, lock, wait_flag);
2335 		if (rc < 0) {
2336 			kfree(lock);
2337 			return rc;
2338 		}
2339 		if (!rc)
2340 			goto out;
2341 
2342 		/*
2343 		 * Windows 7 server can delay breaking lease from read to None
2344 		 * if we set a byte-range lock on a file - break it explicitly
2345 		 * before sending the lock to the server to be sure the next
2346 		 * read won't conflict with non-overlapted locks due to
2347 		 * pagereading.
2348 		 */
2349 		if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2350 					CIFS_CACHE_READ(CIFS_I(inode))) {
2351 			cifs_zap_mapping(inode);
2352 			cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2353 				 inode);
2354 			cifs_reset_oplock(CIFS_I(inode));
2355 		}
2356 
2357 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2358 					    type, 1, 0, wait_flag);
2359 		if (rc) {
2360 			kfree(lock);
2361 			return rc;
2362 		}
2363 
2364 		cifs_lock_add(cfile, lock);
2365 	} else if (unlock)
2366 		rc = server->ops->mand_unlock_range(cfile, flock, xid);
2367 
2368 out:
2369 	if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2370 		/*
2371 		 * If this is a request to remove all locks because we
2372 		 * are closing the file, it doesn't matter if the
2373 		 * unlocking failed as both cifs.ko and the SMB server
2374 		 * remove the lock on file close
2375 		 */
2376 		if (rc) {
2377 			cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2378 			if (!(flock->c.flc_flags & FL_CLOSE))
2379 				return rc;
2380 		}
2381 		rc = locks_lock_file_wait(file, flock);
2382 	}
2383 	return rc;
2384 }
2385 
2386 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2387 {
2388 	int rc, xid;
2389 	int lock = 0, unlock = 0;
2390 	bool wait_flag = false;
2391 	bool posix_lck = false;
2392 	struct cifs_sb_info *cifs_sb;
2393 	struct cifs_tcon *tcon;
2394 	struct cifsFileInfo *cfile;
2395 	__u32 type;
2396 
2397 	xid = get_xid();
2398 
2399 	if (!(fl->c.flc_flags & FL_FLOCK)) {
2400 		rc = -ENOLCK;
2401 		free_xid(xid);
2402 		return rc;
2403 	}
2404 
2405 	cfile = (struct cifsFileInfo *)file->private_data;
2406 	tcon = tlink_tcon(cfile->tlink);
2407 
2408 	cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2409 			tcon->ses->server);
2410 	cifs_sb = CIFS_SB(file);
2411 
2412 	if (cap_unix(tcon->ses) &&
2413 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2414 	    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
2415 		posix_lck = true;
2416 
2417 	if (!lock && !unlock) {
2418 		/*
2419 		 * if no lock or unlock then nothing to do since we do not
2420 		 * know what it is
2421 		 */
2422 		rc = -EOPNOTSUPP;
2423 		free_xid(xid);
2424 		return rc;
2425 	}
2426 
2427 	rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2428 			xid);
2429 	free_xid(xid);
2430 	return rc;
2431 
2432 
2433 }
2434 
2435 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2436 {
2437 	struct cifs_sb_info *cifs_sb = CIFS_SB(file);
2438 	struct cifsFileInfo *cfile;
2439 	int lock = 0, unlock = 0;
2440 	bool wait_flag = false;
2441 	bool posix_lck = false;
2442 	struct cifs_tcon *tcon;
2443 	__u32 type;
2444 	int rc, xid;
2445 
2446 	rc = -EACCES;
2447 	xid = get_xid();
2448 
2449 	cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2450 		 flock->c.flc_flags, flock->c.flc_type,
2451 		 (long long)flock->fl_start,
2452 		 (long long)flock->fl_end);
2453 
2454 	cfile = (struct cifsFileInfo *)file->private_data;
2455 	tcon = tlink_tcon(cfile->tlink);
2456 
2457 	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2458 			tcon->ses->server);
2459 	set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2460 
2461 	if (cap_unix(tcon->ses) &&
2462 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2463 	    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
2464 		posix_lck = true;
2465 	/*
2466 	 * BB add code here to normalize offset and length to account for
2467 	 * negative length which we can not accept over the wire.
2468 	 */
2469 	if (IS_GETLK(cmd)) {
2470 		rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2471 		free_xid(xid);
2472 		return rc;
2473 	}
2474 
2475 	if (!lock && !unlock) {
2476 		/*
2477 		 * if no lock or unlock then nothing to do since we do not
2478 		 * know what it is
2479 		 */
2480 		free_xid(xid);
2481 		return -EOPNOTSUPP;
2482 	}
2483 
2484 	rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2485 			xid);
2486 	free_xid(xid);
2487 	return rc;
2488 }
2489 
2490 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result)
2491 {
2492 	struct netfs_io_request *wreq = wdata->rreq;
2493 	struct netfs_inode *ictx = netfs_inode(wreq->inode);
2494 	loff_t wrend;
2495 
2496 	if (result > 0) {
2497 		wrend = wdata->subreq.start + wdata->subreq.transferred + result;
2498 
2499 		if (wrend > ictx->zero_point &&
2500 		    (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE ||
2501 		     wdata->rreq->origin == NETFS_DIO_WRITE))
2502 			ictx->zero_point = wrend;
2503 		if (wrend > ictx->remote_i_size)
2504 			netfs_resize_file(ictx, wrend, true);
2505 	}
2506 
2507 	netfs_write_subrequest_terminated(&wdata->subreq, result);
2508 }
2509 
2510 static bool open_flags_match(struct cifsInodeInfo *cinode,
2511 			     unsigned int oflags, unsigned int cflags)
2512 {
2513 	struct inode *inode = &cinode->netfs.inode;
2514 	int crw = 0, orw = 0;
2515 
2516 	oflags &= ~(O_CREAT | O_EXCL | O_TRUNC);
2517 	cflags &= ~(O_CREAT | O_EXCL | O_TRUNC);
2518 
2519 	if (cifs_fscache_enabled(inode)) {
2520 		if (OPEN_FMODE(cflags) & FMODE_WRITE)
2521 			crw = 1;
2522 		if (OPEN_FMODE(oflags) & FMODE_WRITE)
2523 			orw = 1;
2524 	}
2525 	if (cifs_convert_flags(oflags, orw) != cifs_convert_flags(cflags, crw))
2526 		return false;
2527 
2528 	return (oflags & (O_SYNC | O_DIRECT)) == (cflags & (O_SYNC | O_DIRECT));
2529 }
2530 
2531 struct cifsFileInfo *__find_readable_file(struct cifsInodeInfo *cifs_inode,
2532 					  unsigned int find_flags,
2533 					  unsigned int open_flags)
2534 {
2535 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode);
2536 	bool fsuid_only = find_flags & FIND_FSUID_ONLY;
2537 	struct cifsFileInfo *open_file = NULL;
2538 
2539 	/* only filter by fsuid on multiuser mounts */
2540 	if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_MULTIUSER))
2541 		fsuid_only = false;
2542 
2543 	spin_lock(&cifs_inode->open_file_lock);
2544 	/* we could simply get the first_list_entry since write-only entries
2545 	   are always at the end of the list but since the first entry might
2546 	   have a close pending, we go through the whole list */
2547 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2548 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2549 			continue;
2550 		if ((find_flags & FIND_NO_PENDING_DELETE) &&
2551 		    open_file->status_file_deleted)
2552 			continue;
2553 		if ((find_flags & FIND_OPEN_FLAGS) &&
2554 		    !open_flags_match(cifs_inode, open_flags,
2555 				      open_file->f_flags))
2556 			continue;
2557 		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2558 			if ((!open_file->invalidHandle)) {
2559 				/* found a good file */
2560 				/* lock it so it will not be closed on us */
2561 				cifsFileInfo_get(open_file);
2562 				spin_unlock(&cifs_inode->open_file_lock);
2563 				return open_file;
2564 			} /* else might as well continue, and look for
2565 			     another, or simply have the caller reopen it
2566 			     again rather than trying to fix this handle */
2567 		} else /* write only file */
2568 			break; /* write only files are last so must be done */
2569 	}
2570 	spin_unlock(&cifs_inode->open_file_lock);
2571 	return NULL;
2572 }
2573 
2574 /* Return -EBADF if no handle is found and general rc otherwise */
2575 int __cifs_get_writable_file(struct cifsInodeInfo *cifs_inode,
2576 			     unsigned int find_flags, unsigned int open_flags,
2577 			     struct cifsFileInfo **ret_file)
2578 {
2579 	struct cifsFileInfo *open_file, *inv_file = NULL;
2580 	struct cifs_sb_info *cifs_sb;
2581 	bool any_available = false;
2582 	int rc = -EBADF;
2583 	unsigned int refind = 0;
2584 	bool fsuid_only = find_flags & FIND_FSUID_ONLY;
2585 	bool with_delete = find_flags & FIND_WITH_DELETE;
2586 	*ret_file = NULL;
2587 
2588 	/*
2589 	 * Having a null inode here (because mapping->host was set to zero by
2590 	 * the VFS or MM) should not happen but we had reports of on oops (due
2591 	 * to it being zero) during stress testcases so we need to check for it
2592 	 */
2593 
2594 	if (cifs_inode == NULL) {
2595 		cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2596 		dump_stack();
2597 		return rc;
2598 	}
2599 
2600 	cifs_sb = CIFS_SB(cifs_inode);
2601 
2602 	/* only filter by fsuid on multiuser mounts */
2603 	if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_MULTIUSER))
2604 		fsuid_only = false;
2605 
2606 	spin_lock(&cifs_inode->open_file_lock);
2607 refind_writable:
2608 	if (refind > MAX_REOPEN_ATT) {
2609 		spin_unlock(&cifs_inode->open_file_lock);
2610 		return rc;
2611 	}
2612 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2613 		if (!any_available && open_file->pid != current->tgid)
2614 			continue;
2615 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2616 			continue;
2617 		if (with_delete && !(open_file->fid.access & DELETE))
2618 			continue;
2619 		if ((find_flags & FIND_NO_PENDING_DELETE) &&
2620 		    open_file->status_file_deleted)
2621 			continue;
2622 		if ((find_flags & FIND_OPEN_FLAGS) &&
2623 		    !open_flags_match(cifs_inode, open_flags,
2624 				      open_file->f_flags))
2625 			continue;
2626 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2627 			if (!open_file->invalidHandle) {
2628 				/* found a good writable file */
2629 				cifsFileInfo_get(open_file);
2630 				spin_unlock(&cifs_inode->open_file_lock);
2631 				*ret_file = open_file;
2632 				return 0;
2633 			} else {
2634 				if (!inv_file)
2635 					inv_file = open_file;
2636 			}
2637 		}
2638 	}
2639 	/* couldn't find usable FH with same pid, try any available */
2640 	if (!any_available) {
2641 		any_available = true;
2642 		goto refind_writable;
2643 	}
2644 
2645 	if (inv_file) {
2646 		any_available = false;
2647 		cifsFileInfo_get(inv_file);
2648 	}
2649 
2650 	spin_unlock(&cifs_inode->open_file_lock);
2651 
2652 	if (inv_file) {
2653 		rc = cifs_reopen_file(inv_file, false);
2654 		if (!rc) {
2655 			*ret_file = inv_file;
2656 			return 0;
2657 		}
2658 
2659 		spin_lock(&cifs_inode->open_file_lock);
2660 		list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2661 		spin_unlock(&cifs_inode->open_file_lock);
2662 		cifsFileInfo_put(inv_file);
2663 		++refind;
2664 		inv_file = NULL;
2665 		spin_lock(&cifs_inode->open_file_lock);
2666 		goto refind_writable;
2667 	}
2668 
2669 	return rc;
2670 }
2671 
2672 struct cifsFileInfo *
2673 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2674 {
2675 	struct cifsFileInfo *cfile;
2676 	int rc;
2677 
2678 	rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2679 	if (rc)
2680 		cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2681 
2682 	return cfile;
2683 }
2684 
2685 int
2686 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2687 		       int flags,
2688 		       struct cifsFileInfo **ret_file)
2689 {
2690 	struct cifsFileInfo *cfile;
2691 	void *page = alloc_dentry_path();
2692 
2693 	*ret_file = NULL;
2694 
2695 	spin_lock(&tcon->open_file_lock);
2696 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2697 		struct cifsInodeInfo *cinode;
2698 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2699 		if (IS_ERR(full_path)) {
2700 			spin_unlock(&tcon->open_file_lock);
2701 			free_dentry_path(page);
2702 			return PTR_ERR(full_path);
2703 		}
2704 		if (strcmp(full_path, name))
2705 			continue;
2706 
2707 		cinode = CIFS_I(d_inode(cfile->dentry));
2708 		spin_unlock(&tcon->open_file_lock);
2709 		free_dentry_path(page);
2710 		return cifs_get_writable_file(cinode, flags, ret_file);
2711 	}
2712 
2713 	spin_unlock(&tcon->open_file_lock);
2714 	free_dentry_path(page);
2715 	return -ENOENT;
2716 }
2717 
2718 int
2719 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2720 		       struct cifsFileInfo **ret_file)
2721 {
2722 	struct cifsFileInfo *cfile;
2723 	void *page = alloc_dentry_path();
2724 
2725 	*ret_file = NULL;
2726 
2727 	spin_lock(&tcon->open_file_lock);
2728 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2729 		struct cifsInodeInfo *cinode;
2730 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2731 		if (IS_ERR(full_path)) {
2732 			spin_unlock(&tcon->open_file_lock);
2733 			free_dentry_path(page);
2734 			return PTR_ERR(full_path);
2735 		}
2736 		if (strcmp(full_path, name))
2737 			continue;
2738 
2739 		cinode = CIFS_I(d_inode(cfile->dentry));
2740 		spin_unlock(&tcon->open_file_lock);
2741 		free_dentry_path(page);
2742 		*ret_file = find_readable_file(cinode, FIND_ANY);
2743 		return *ret_file ? 0 : -ENOENT;
2744 	}
2745 
2746 	spin_unlock(&tcon->open_file_lock);
2747 	free_dentry_path(page);
2748 	return -ENOENT;
2749 }
2750 
2751 /*
2752  * Flush data on a strict file.
2753  */
2754 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2755 		      int datasync)
2756 {
2757 	struct cifsFileInfo *smbfile = file->private_data;
2758 	struct inode *inode = file_inode(file);
2759 	unsigned int xid;
2760 	int rc;
2761 
2762 	rc = file_write_and_wait_range(file, start, end);
2763 	if (rc) {
2764 		trace_cifs_fsync_err(inode->i_ino, rc);
2765 		return rc;
2766 	}
2767 
2768 	cifs_dbg(FYI, "%s: name=%pD datasync=0x%x\n", __func__, file, datasync);
2769 
2770 	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2771 		rc = cifs_zap_mapping(inode);
2772 		cifs_dbg(FYI, "%s: invalidate mapping: rc = %d\n", __func__, rc);
2773 	}
2774 
2775 	xid = get_xid();
2776 	rc = cifs_file_flush(xid, inode, smbfile);
2777 	free_xid(xid);
2778 	return rc;
2779 }
2780 
2781 /*
2782  * Flush data on a non-strict data.
2783  */
2784 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2785 {
2786 	unsigned int xid;
2787 	int rc = 0;
2788 	struct cifs_tcon *tcon;
2789 	struct TCP_Server_Info *server;
2790 	struct cifsFileInfo *smbfile = file->private_data;
2791 	struct inode *inode = file_inode(file);
2792 	struct cifs_sb_info *cifs_sb = CIFS_SB(file);
2793 
2794 	rc = file_write_and_wait_range(file, start, end);
2795 	if (rc) {
2796 		trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2797 		return rc;
2798 	}
2799 
2800 	xid = get_xid();
2801 
2802 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2803 		 file, datasync);
2804 
2805 	tcon = tlink_tcon(smbfile->tlink);
2806 	if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOSSYNC)) {
2807 		server = tcon->ses->server;
2808 		if (server->ops->flush == NULL) {
2809 			rc = -ENOSYS;
2810 			goto fsync_exit;
2811 		}
2812 
2813 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2814 			smbfile = find_writable_file(CIFS_I(inode), FIND_ANY);
2815 			if (smbfile) {
2816 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2817 				cifsFileInfo_put(smbfile);
2818 			} else
2819 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2820 		} else
2821 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2822 	}
2823 
2824 fsync_exit:
2825 	free_xid(xid);
2826 	return rc;
2827 }
2828 
2829 /*
2830  * As file closes, flush all cached write data for this inode checking
2831  * for write behind errors.
2832  */
2833 int cifs_flush(struct file *file, fl_owner_t id)
2834 {
2835 	struct inode *inode = file_inode(file);
2836 	int rc = 0;
2837 
2838 	if (file->f_mode & FMODE_WRITE)
2839 		rc = filemap_write_and_wait(inode->i_mapping);
2840 
2841 	cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2842 	if (rc) {
2843 		/* get more nuanced writeback errors */
2844 		rc = filemap_check_wb_err(file->f_mapping, 0);
2845 		trace_cifs_flush_err(inode->i_ino, rc);
2846 	}
2847 	return rc;
2848 }
2849 
2850 static ssize_t
2851 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2852 {
2853 	struct file *file = iocb->ki_filp;
2854 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2855 	struct inode *inode = file->f_mapping->host;
2856 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2857 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2858 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
2859 	ssize_t rc;
2860 
2861 	rc = netfs_start_io_write(inode);
2862 	if (rc < 0)
2863 		return rc;
2864 
2865 	/*
2866 	 * We need to hold the sem to be sure nobody modifies lock list
2867 	 * with a brlock that prevents writing.
2868 	 */
2869 	down_read(&cinode->lock_sem);
2870 
2871 	rc = generic_write_checks(iocb, from);
2872 	if (rc <= 0)
2873 		goto out;
2874 
2875 	if ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) &&
2876 	    (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2877 				     server->vals->exclusive_lock_type, 0,
2878 				     NULL, CIFS_WRITE_OP))) {
2879 		rc = -EACCES;
2880 		goto out;
2881 	}
2882 
2883 	rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2884 
2885 out:
2886 	up_read(&cinode->lock_sem);
2887 	netfs_end_io_write(inode);
2888 	if (rc > 0)
2889 		rc = generic_write_sync(iocb, rc);
2890 	return rc;
2891 }
2892 
2893 ssize_t
2894 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2895 {
2896 	struct inode *inode = file_inode(iocb->ki_filp);
2897 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2898 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
2899 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2900 						iocb->ki_filp->private_data;
2901 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2902 	ssize_t written;
2903 
2904 	written = cifs_get_writer(cinode);
2905 	if (written)
2906 		return written;
2907 
2908 	if (CIFS_CACHE_WRITE(cinode)) {
2909 		if (cap_unix(tcon->ses) &&
2910 		    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2911 		    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2912 			written = netfs_file_write_iter(iocb, from);
2913 			goto out;
2914 		}
2915 		written = cifs_writev(iocb, from);
2916 		goto out;
2917 	}
2918 	/*
2919 	 * For non-oplocked files in strict cache mode we need to write the data
2920 	 * to the server exactly from the pos to pos+len-1 rather than flush all
2921 	 * affected pages because it may cause a error with mandatory locks on
2922 	 * these pages but not on the region from pos to ppos+len-1.
2923 	 */
2924 	written = netfs_file_write_iter(iocb, from);
2925 	if (CIFS_CACHE_READ(cinode)) {
2926 		/*
2927 		 * We have read level caching and we have just sent a write
2928 		 * request to the server thus making data in the cache stale.
2929 		 * Zap the cache and set oplock/lease level to NONE to avoid
2930 		 * reading stale data from the cache. All subsequent read
2931 		 * operations will read new data from the server.
2932 		 */
2933 		cifs_zap_mapping(inode);
2934 		cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2935 			 inode);
2936 		cifs_reset_oplock(cinode);
2937 	}
2938 out:
2939 	cifs_put_writer(cinode);
2940 	return written;
2941 }
2942 
2943 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2944 {
2945 	ssize_t rc;
2946 	struct inode *inode = file_inode(iocb->ki_filp);
2947 
2948 	if (iocb->ki_flags & IOCB_DIRECT)
2949 		return netfs_unbuffered_read_iter(iocb, iter);
2950 
2951 	rc = cifs_revalidate_mapping(inode);
2952 	if (rc)
2953 		return rc;
2954 
2955 	return netfs_file_read_iter(iocb, iter);
2956 }
2957 
2958 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2959 {
2960 	struct inode *inode = file_inode(iocb->ki_filp);
2961 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2962 	ssize_t written;
2963 	int rc;
2964 
2965 	if (iocb->ki_filp->f_flags & O_DIRECT) {
2966 		written = netfs_unbuffered_write_iter(iocb, from);
2967 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
2968 			cifs_zap_mapping(inode);
2969 			cifs_dbg(FYI,
2970 				 "Set no oplock for inode=%p after a write operation\n",
2971 				 inode);
2972 			cifs_reset_oplock(cinode);
2973 		}
2974 		return written;
2975 	}
2976 
2977 	written = cifs_get_writer(cinode);
2978 	if (written)
2979 		return written;
2980 
2981 	written = netfs_file_write_iter(iocb, from);
2982 
2983 	if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
2984 		rc = filemap_fdatawrite(inode->i_mapping);
2985 		if (rc)
2986 			cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
2987 				 rc, inode);
2988 	}
2989 
2990 	cifs_put_writer(cinode);
2991 	return written;
2992 }
2993 
2994 ssize_t
2995 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
2996 {
2997 	struct inode *inode = file_inode(iocb->ki_filp);
2998 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2999 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
3000 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3001 						iocb->ki_filp->private_data;
3002 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3003 	int rc = -EACCES;
3004 
3005 	/*
3006 	 * In strict cache mode we need to read from the server all the time
3007 	 * if we don't have level II oplock because the server can delay mtime
3008 	 * change - so we can't make a decision about inode invalidating.
3009 	 * And we can also fail with pagereading if there are mandatory locks
3010 	 * on pages affected by this read but not on the region from pos to
3011 	 * pos+len-1.
3012 	 */
3013 	if (!CIFS_CACHE_READ(cinode))
3014 		return netfs_unbuffered_read_iter(iocb, to);
3015 
3016 	if ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0) {
3017 		if (iocb->ki_flags & IOCB_DIRECT)
3018 			return netfs_unbuffered_read_iter(iocb, to);
3019 		return netfs_buffered_read_iter(iocb, to);
3020 	}
3021 
3022 	/*
3023 	 * We need to hold the sem to be sure nobody modifies lock list
3024 	 * with a brlock that prevents reading.
3025 	 */
3026 	if (iocb->ki_flags & IOCB_DIRECT) {
3027 		rc = netfs_start_io_direct(inode);
3028 		if (rc < 0)
3029 			goto out;
3030 		rc = -EACCES;
3031 		down_read(&cinode->lock_sem);
3032 		if (!cifs_find_lock_conflict(
3033 			    cfile, iocb->ki_pos, iov_iter_count(to),
3034 			    tcon->ses->server->vals->shared_lock_type,
3035 			    0, NULL, CIFS_READ_OP))
3036 			rc = netfs_unbuffered_read_iter_locked(iocb, to);
3037 		up_read(&cinode->lock_sem);
3038 		netfs_end_io_direct(inode);
3039 	} else {
3040 		rc = netfs_start_io_read(inode);
3041 		if (rc < 0)
3042 			goto out;
3043 		rc = -EACCES;
3044 		down_read(&cinode->lock_sem);
3045 		if (!cifs_find_lock_conflict(
3046 			    cfile, iocb->ki_pos, iov_iter_count(to),
3047 			    tcon->ses->server->vals->shared_lock_type,
3048 			    0, NULL, CIFS_READ_OP))
3049 			rc = filemap_read(iocb, to, 0);
3050 		up_read(&cinode->lock_sem);
3051 		netfs_end_io_read(inode);
3052 	}
3053 out:
3054 	return rc;
3055 }
3056 
3057 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
3058 {
3059 	return netfs_page_mkwrite(vmf, NULL);
3060 }
3061 
3062 static const struct vm_operations_struct cifs_file_vm_ops = {
3063 	.fault = filemap_fault,
3064 	.map_pages = filemap_map_pages,
3065 	.page_mkwrite = cifs_page_mkwrite,
3066 };
3067 
3068 int cifs_file_strict_mmap_prepare(struct vm_area_desc *desc)
3069 {
3070 	int xid, rc = 0;
3071 	struct inode *inode = file_inode(desc->file);
3072 
3073 	xid = get_xid();
3074 
3075 	if (!CIFS_CACHE_READ(CIFS_I(inode)))
3076 		rc = cifs_zap_mapping(inode);
3077 	if (!rc)
3078 		rc = generic_file_mmap_prepare(desc);
3079 	if (!rc)
3080 		desc->vm_ops = &cifs_file_vm_ops;
3081 
3082 	free_xid(xid);
3083 	return rc;
3084 }
3085 
3086 int cifs_file_mmap_prepare(struct vm_area_desc *desc)
3087 {
3088 	int rc, xid;
3089 
3090 	xid = get_xid();
3091 
3092 	rc = cifs_revalidate_file(desc->file);
3093 	if (rc)
3094 		cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3095 			 rc);
3096 	if (!rc)
3097 		rc = generic_file_mmap_prepare(desc);
3098 	if (!rc)
3099 		desc->vm_ops = &cifs_file_vm_ops;
3100 
3101 	free_xid(xid);
3102 	return rc;
3103 }
3104 
3105 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3106 {
3107 	struct cifsFileInfo *open_file;
3108 
3109 	spin_lock(&cifs_inode->open_file_lock);
3110 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3111 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3112 			spin_unlock(&cifs_inode->open_file_lock);
3113 			return 1;
3114 		}
3115 	}
3116 	spin_unlock(&cifs_inode->open_file_lock);
3117 	return 0;
3118 }
3119 
3120 /* We do not want to update the file size from server for inodes
3121    open for write - to avoid races with writepage extending
3122    the file - in the future we could consider allowing
3123    refreshing the inode only on increases in the file size
3124    but this is tricky to do without racing with writebehind
3125    page caching in the current Linux kernel design */
3126 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3127 			    bool from_readdir)
3128 {
3129 	if (!cifsInode)
3130 		return true;
3131 
3132 	if (is_inode_writable(cifsInode) ||
3133 		((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
3134 		/* This inode is open for write at least once */
3135 		struct cifs_sb_info *cifs_sb = CIFS_SB(cifsInode);
3136 
3137 		if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_DIRECT_IO) {
3138 			/* since no page cache to corrupt on directio
3139 			we can change size safely */
3140 			return true;
3141 		}
3142 
3143 		if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
3144 			return true;
3145 
3146 		return false;
3147 	} else
3148 		return true;
3149 }
3150 
3151 void cifs_oplock_break(struct work_struct *work)
3152 {
3153 	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3154 						  oplock_break);
3155 	struct inode *inode = d_inode(cfile->dentry);
3156 	struct super_block *sb = inode->i_sb;
3157 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
3158 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3159 	bool cache_read, cache_write, cache_handle;
3160 	struct cifs_tcon *tcon;
3161 	struct TCP_Server_Info *server;
3162 	struct tcon_link *tlink;
3163 	unsigned int oplock;
3164 	int rc = 0;
3165 	bool purge_cache = false, oplock_break_cancelled;
3166 	__u64 persistent_fid, volatile_fid;
3167 	__u16 net_fid;
3168 
3169 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3170 			TASK_UNINTERRUPTIBLE);
3171 
3172 	tlink = cifs_sb_tlink(cifs_sb);
3173 	if (IS_ERR(tlink))
3174 		goto out;
3175 	tcon = tlink_tcon(tlink);
3176 	server = tcon->ses->server;
3177 
3178 	scoped_guard(spinlock, &cinode->open_file_lock) {
3179 		unsigned int sbflags = cifs_sb_flags(cifs_sb);
3180 
3181 		server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3182 					      cfile->oplock_epoch, &purge_cache);
3183 		oplock = READ_ONCE(cinode->oplock);
3184 		cache_read = (oplock & CIFS_CACHE_READ_FLG) ||
3185 			(sbflags & CIFS_MOUNT_RO_CACHE);
3186 		cache_write = (oplock & CIFS_CACHE_WRITE_FLG) ||
3187 			(sbflags & CIFS_MOUNT_RW_CACHE);
3188 		cache_handle = oplock & CIFS_CACHE_HANDLE_FLG;
3189 	}
3190 
3191 	if (!cache_write && cache_read && cifs_has_mand_locks(cinode)) {
3192 		cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3193 			 inode);
3194 		cifs_reset_oplock(cinode);
3195 		oplock = 0;
3196 		cache_read = cache_write = cache_handle = false;
3197 	}
3198 
3199 	if (S_ISREG(inode->i_mode)) {
3200 		if (cache_read)
3201 			break_lease(inode, O_RDONLY);
3202 		else
3203 			break_lease(inode, O_WRONLY);
3204 		rc = filemap_fdatawrite(inode->i_mapping);
3205 		if (!cache_read || purge_cache) {
3206 			rc = filemap_fdatawait(inode->i_mapping);
3207 			mapping_set_error(inode->i_mapping, rc);
3208 			cifs_zap_mapping(inode);
3209 		}
3210 		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3211 		if (cache_write)
3212 			goto oplock_break_ack;
3213 	}
3214 
3215 	rc = cifs_push_locks(cfile);
3216 	if (rc)
3217 		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3218 
3219 oplock_break_ack:
3220 	/*
3221 	 * When oplock break is received and there are no active
3222 	 * file handles but cached, then schedule deferred close immediately.
3223 	 * So, new open will not use cached handle.
3224 	 */
3225 
3226 	if (!cache_handle && !list_empty(&cinode->deferred_closes))
3227 		cifs_close_deferred_file(cinode);
3228 
3229 	persistent_fid = cfile->fid.persistent_fid;
3230 	volatile_fid = cfile->fid.volatile_fid;
3231 	net_fid = cfile->fid.netfid;
3232 	oplock_break_cancelled = cfile->oplock_break_cancelled;
3233 
3234 	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3235 	/*
3236 	 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3237 	 * an acknowledgment to be sent when the file has already been closed.
3238 	 */
3239 	spin_lock(&cinode->open_file_lock);
3240 	/* check list empty since can race with kill_sb calling tree disconnect */
3241 	if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3242 		spin_unlock(&cinode->open_file_lock);
3243 		rc = server->ops->oplock_response(tcon, persistent_fid,
3244 						  volatile_fid, net_fid,
3245 						  cinode, oplock);
3246 		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3247 	} else
3248 		spin_unlock(&cinode->open_file_lock);
3249 
3250 	cifs_put_tlink(tlink);
3251 out:
3252 	cifs_done_oplock_break(cinode);
3253 }
3254 
3255 static int cifs_swap_activate(struct swap_info_struct *sis,
3256 			      struct file *swap_file, sector_t *span)
3257 {
3258 	struct cifsFileInfo *cfile = swap_file->private_data;
3259 	struct inode *inode = swap_file->f_mapping->host;
3260 	unsigned long blocks;
3261 	long long isize;
3262 
3263 	cifs_dbg(FYI, "swap activate\n");
3264 
3265 	if (!swap_file->f_mapping->a_ops->swap_rw)
3266 		/* Cannot support swap */
3267 		return -EINVAL;
3268 
3269 	spin_lock(&inode->i_lock);
3270 	blocks = inode->i_blocks;
3271 	isize = inode->i_size;
3272 	spin_unlock(&inode->i_lock);
3273 	if (blocks*512 < isize) {
3274 		pr_warn("swap activate: swapfile has holes\n");
3275 		return -EINVAL;
3276 	}
3277 	*span = sis->pages;
3278 
3279 	pr_warn_once("Swap support over SMB3 is experimental\n");
3280 
3281 	/*
3282 	 * TODO: consider adding ACL (or documenting how) to prevent other
3283 	 * users (on this or other systems) from reading it
3284 	 */
3285 
3286 
3287 	/* TODO: add sk_set_memalloc(inet) or similar */
3288 
3289 	if (cfile)
3290 		cfile->swapfile = true;
3291 	/*
3292 	 * TODO: Since file already open, we can't open with DENY_ALL here
3293 	 * but we could add call to grab a byte range lock to prevent others
3294 	 * from reading or writing the file
3295 	 */
3296 
3297 	sis->flags |= SWP_FS_OPS;
3298 	return add_swap_extent(sis, 0, sis->max, 0);
3299 }
3300 
3301 static void cifs_swap_deactivate(struct file *file)
3302 {
3303 	struct cifsFileInfo *cfile = file->private_data;
3304 
3305 	cifs_dbg(FYI, "swap deactivate\n");
3306 
3307 	/* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3308 
3309 	if (cfile)
3310 		cfile->swapfile = false;
3311 
3312 	/* do we need to unpin (or unlock) the file */
3313 }
3314 
3315 /**
3316  * cifs_swap_rw - SMB3 address space operation for swap I/O
3317  * @iocb: target I/O control block
3318  * @iter: I/O buffer
3319  *
3320  * Perform IO to the swap-file.  This is much like direct IO.
3321  */
3322 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3323 {
3324 	ssize_t ret;
3325 
3326 	if (iov_iter_rw(iter) == READ)
3327 		ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3328 	else
3329 		ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3330 	if (ret < 0)
3331 		return ret;
3332 	return 0;
3333 }
3334 
3335 const struct address_space_operations cifs_addr_ops = {
3336 	.read_folio	= netfs_read_folio,
3337 	.readahead	= netfs_readahead,
3338 	.writepages	= netfs_writepages,
3339 	.dirty_folio	= netfs_dirty_folio,
3340 	.release_folio	= netfs_release_folio,
3341 	.direct_IO	= noop_direct_IO,
3342 	.invalidate_folio = netfs_invalidate_folio,
3343 	.migrate_folio	= filemap_migrate_folio,
3344 	/*
3345 	 * TODO: investigate and if useful we could add an is_dirty_writeback
3346 	 * helper if needed
3347 	 */
3348 	.swap_activate	= cifs_swap_activate,
3349 	.swap_deactivate = cifs_swap_deactivate,
3350 	.swap_rw = cifs_swap_rw,
3351 };
3352 
3353 /*
3354  * cifs_readahead requires the server to support a buffer large enough to
3355  * contain the header plus one complete page of data.  Otherwise, we need
3356  * to leave cifs_readahead out of the address space operations.
3357  */
3358 const struct address_space_operations cifs_addr_ops_smallbuf = {
3359 	.read_folio	= netfs_read_folio,
3360 	.writepages	= netfs_writepages,
3361 	.dirty_folio	= netfs_dirty_folio,
3362 	.release_folio	= netfs_release_folio,
3363 	.invalidate_folio = netfs_invalidate_folio,
3364 	.migrate_folio	= filemap_migrate_folio,
3365 };
3366