xref: /linux/fs/smb/client/file.c (revision 7006433ca2de80e4aa7d11dceb3124335cff5a43)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  */
11 #include <linux/fs.h>
12 #include <linux/fs_struct.h>
13 #include <linux/filelock.h>
14 #include <linux/backing-dev.h>
15 #include <linux/stat.h>
16 #include <linux/fcntl.h>
17 #include <linux/pagemap.h>
18 #include <linux/pagevec.h>
19 #include <linux/writeback.h>
20 #include <linux/task_io_accounting_ops.h>
21 #include <linux/delay.h>
22 #include <linux/mount.h>
23 #include <linux/slab.h>
24 #include <linux/swap.h>
25 #include <linux/mm.h>
26 #include <asm/div64.h>
27 #include "cifsfs.h"
28 #include "cifsglob.h"
29 #include "cifsproto.h"
30 #include "smb2proto.h"
31 #include "cifs_unicode.h"
32 #include "cifs_debug.h"
33 #include "cifs_fs_sb.h"
34 #include "fscache.h"
35 #include "smbdirect.h"
36 #include "fs_context.h"
37 #include "cifs_ioctl.h"
38 #include "cached_dir.h"
39 #include <trace/events/netfs.h>
40 
41 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
42 
43 /*
44  * Prepare a subrequest to upload to the server.  We need to allocate credits
45  * so that we know the maximum amount of data that we can include in it.
46  */
cifs_prepare_write(struct netfs_io_subrequest * subreq)47 static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
48 {
49 	struct cifs_io_subrequest *wdata =
50 		container_of(subreq, struct cifs_io_subrequest, subreq);
51 	struct cifs_io_request *req = wdata->req;
52 	struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr];
53 	struct TCP_Server_Info *server;
54 	struct cifsFileInfo *open_file = req->cfile;
55 	struct cifs_sb_info *cifs_sb = CIFS_SB(wdata->rreq->inode->i_sb);
56 	size_t wsize = req->rreq.wsize;
57 	int rc;
58 
59 	if (!wdata->have_xid) {
60 		wdata->xid = get_xid();
61 		wdata->have_xid = true;
62 	}
63 
64 	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
65 	wdata->server = server;
66 
67 	if (cifs_sb->ctx->wsize == 0)
68 		cifs_negotiate_wsize(server, cifs_sb->ctx,
69 				     tlink_tcon(req->cfile->tlink));
70 
71 retry:
72 	if (open_file->invalidHandle) {
73 		rc = cifs_reopen_file(open_file, false);
74 		if (rc < 0) {
75 			if (rc == -EAGAIN)
76 				goto retry;
77 			subreq->error = rc;
78 			return netfs_prepare_write_failed(subreq);
79 		}
80 	}
81 
82 	rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len,
83 					   &wdata->credits);
84 	if (rc < 0) {
85 		subreq->error = rc;
86 		return netfs_prepare_write_failed(subreq);
87 	}
88 
89 	wdata->credits.rreq_debug_id = subreq->rreq->debug_id;
90 	wdata->credits.rreq_debug_index = subreq->debug_index;
91 	wdata->credits.in_flight_check = 1;
92 	trace_smb3_rw_credits(wdata->rreq->debug_id,
93 			      wdata->subreq.debug_index,
94 			      wdata->credits.value,
95 			      server->credits, server->in_flight,
96 			      wdata->credits.value,
97 			      cifs_trace_rw_credits_write_prepare);
98 
99 #ifdef CONFIG_CIFS_SMB_DIRECT
100 	if (server->smbd_conn) {
101 		const struct smbdirect_socket_parameters *sp =
102 			smbd_get_parameters(server->smbd_conn);
103 
104 		stream->sreq_max_segs = sp->max_frmr_depth;
105 	}
106 #endif
107 }
108 
109 /*
110  * Issue a subrequest to upload to the server.
111  */
cifs_issue_write(struct netfs_io_subrequest * subreq)112 static void cifs_issue_write(struct netfs_io_subrequest *subreq)
113 {
114 	struct cifs_io_subrequest *wdata =
115 		container_of(subreq, struct cifs_io_subrequest, subreq);
116 	struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
117 	int rc;
118 
119 	if (cifs_forced_shutdown(sbi)) {
120 		rc = smb_EIO(smb_eio_trace_forced_shutdown);
121 		goto fail;
122 	}
123 
124 	rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
125 	if (rc)
126 		goto fail;
127 
128 	rc = -EAGAIN;
129 	if (wdata->req->cfile->invalidHandle)
130 		goto fail;
131 
132 	wdata->server->ops->async_writev(wdata);
133 out:
134 	return;
135 
136 fail:
137 	if (rc == -EAGAIN)
138 		trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
139 	else
140 		trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
141 	add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
142 	cifs_write_subrequest_terminated(wdata, rc);
143 	goto out;
144 }
145 
cifs_netfs_invalidate_cache(struct netfs_io_request * wreq)146 static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq)
147 {
148 	cifs_invalidate_cache(wreq->inode, 0);
149 }
150 
151 /*
152  * Negotiate the size of a read operation on behalf of the netfs library.
153  */
cifs_prepare_read(struct netfs_io_subrequest * subreq)154 static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
155 {
156 	struct netfs_io_request *rreq = subreq->rreq;
157 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
158 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
159 	struct TCP_Server_Info *server;
160 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
161 	size_t size;
162 	int rc = 0;
163 
164 	if (!rdata->have_xid) {
165 		rdata->xid = get_xid();
166 		rdata->have_xid = true;
167 	}
168 
169 	server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
170 	rdata->server = server;
171 
172 	if (cifs_sb->ctx->rsize == 0)
173 		cifs_negotiate_rsize(server, cifs_sb->ctx,
174 				     tlink_tcon(req->cfile->tlink));
175 
176 	rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
177 					   &size, &rdata->credits);
178 	if (rc)
179 		return rc;
180 
181 	rreq->io_streams[0].sreq_max_len = size;
182 
183 	rdata->credits.in_flight_check = 1;
184 	rdata->credits.rreq_debug_id = rreq->debug_id;
185 	rdata->credits.rreq_debug_index = subreq->debug_index;
186 
187 	trace_smb3_rw_credits(rdata->rreq->debug_id,
188 			      rdata->subreq.debug_index,
189 			      rdata->credits.value,
190 			      server->credits, server->in_flight, 0,
191 			      cifs_trace_rw_credits_read_submit);
192 
193 #ifdef CONFIG_CIFS_SMB_DIRECT
194 	if (server->smbd_conn) {
195 		const struct smbdirect_socket_parameters *sp =
196 			smbd_get_parameters(server->smbd_conn);
197 
198 		rreq->io_streams[0].sreq_max_segs = sp->max_frmr_depth;
199 	}
200 #endif
201 	return 0;
202 }
203 
204 /*
205  * Issue a read operation on behalf of the netfs helper functions.  We're asked
206  * to make a read of a certain size at a point in the file.  We are permitted
207  * to only read a portion of that, but as long as we read something, the netfs
208  * helper will call us again so that we can issue another read.
209  */
cifs_issue_read(struct netfs_io_subrequest * subreq)210 static void cifs_issue_read(struct netfs_io_subrequest *subreq)
211 {
212 	struct netfs_io_request *rreq = subreq->rreq;
213 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
214 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
215 	struct TCP_Server_Info *server = rdata->server;
216 	int rc = 0;
217 
218 	cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
219 		 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
220 		 subreq->transferred, subreq->len);
221 
222 	rc = adjust_credits(server, rdata, cifs_trace_rw_credits_issue_read_adjust);
223 	if (rc)
224 		goto failed;
225 
226 	if (req->cfile->invalidHandle) {
227 		do {
228 			rc = cifs_reopen_file(req->cfile, true);
229 		} while (rc == -EAGAIN);
230 		if (rc)
231 			goto failed;
232 	}
233 
234 	if (subreq->rreq->origin != NETFS_UNBUFFERED_READ &&
235 	    subreq->rreq->origin != NETFS_DIO_READ)
236 		__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
237 
238 	trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
239 	rc = rdata->server->ops->async_readv(rdata);
240 	if (rc)
241 		goto failed;
242 	return;
243 
244 failed:
245 	subreq->error = rc;
246 	netfs_read_subreq_terminated(subreq);
247 }
248 
249 /*
250  * Writeback calls this when it finds a folio that needs uploading.  This isn't
251  * called if writeback only has copy-to-cache to deal with.
252  */
cifs_begin_writeback(struct netfs_io_request * wreq)253 static void cifs_begin_writeback(struct netfs_io_request *wreq)
254 {
255 	struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
256 	int ret;
257 
258 	ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_ANY, &req->cfile);
259 	if (ret) {
260 		cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
261 		return;
262 	}
263 
264 	wreq->io_streams[0].avail = true;
265 }
266 
267 /*
268  * Initialise a request.
269  */
cifs_init_request(struct netfs_io_request * rreq,struct file * file)270 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
271 {
272 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
273 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode);
274 	struct cifsFileInfo *open_file = NULL;
275 
276 	rreq->rsize = cifs_sb->ctx->rsize;
277 	rreq->wsize = cifs_sb->ctx->wsize;
278 	req->pid = current->tgid; // Ummm...  This may be a workqueue
279 
280 	if (file) {
281 		open_file = file->private_data;
282 		rreq->netfs_priv = file->private_data;
283 		req->cfile = cifsFileInfo_get(open_file);
284 		if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_RWPIDFORWARD)
285 			req->pid = req->cfile->pid;
286 	} else if (rreq->origin != NETFS_WRITEBACK) {
287 		WARN_ON_ONCE(1);
288 		return smb_EIO1(smb_eio_trace_not_netfs_writeback, rreq->origin);
289 	}
290 
291 	return 0;
292 }
293 
294 /*
295  * Completion of a request operation.
296  */
cifs_rreq_done(struct netfs_io_request * rreq)297 static void cifs_rreq_done(struct netfs_io_request *rreq)
298 {
299 	struct timespec64 atime, mtime;
300 	struct inode *inode = rreq->inode;
301 
302 	/* we do not want atime to be less than mtime, it broke some apps */
303 	atime = inode_set_atime_to_ts(inode, current_time(inode));
304 	mtime = inode_get_mtime(inode);
305 	if (timespec64_compare(&atime, &mtime))
306 		inode_set_atime_to_ts(inode, inode_get_mtime(inode));
307 }
308 
cifs_free_request(struct netfs_io_request * rreq)309 static void cifs_free_request(struct netfs_io_request *rreq)
310 {
311 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
312 
313 	if (req->cfile)
314 		cifsFileInfo_put(req->cfile);
315 }
316 
cifs_free_subrequest(struct netfs_io_subrequest * subreq)317 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
318 {
319 	struct cifs_io_subrequest *rdata =
320 		container_of(subreq, struct cifs_io_subrequest, subreq);
321 	int rc = subreq->error;
322 
323 	if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
324 #ifdef CONFIG_CIFS_SMB_DIRECT
325 		if (rdata->mr) {
326 			smbd_deregister_mr(rdata->mr);
327 			rdata->mr = NULL;
328 		}
329 #endif
330 	}
331 
332 	if (rdata->credits.value != 0) {
333 		trace_smb3_rw_credits(rdata->rreq->debug_id,
334 				      rdata->subreq.debug_index,
335 				      rdata->credits.value,
336 				      rdata->server ? rdata->server->credits : 0,
337 				      rdata->server ? rdata->server->in_flight : 0,
338 				      -rdata->credits.value,
339 				      cifs_trace_rw_credits_free_subreq);
340 		if (rdata->server)
341 			add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
342 		else
343 			rdata->credits.value = 0;
344 	}
345 
346 	if (rdata->have_xid)
347 		free_xid(rdata->xid);
348 }
349 
350 const struct netfs_request_ops cifs_req_ops = {
351 	.request_pool		= &cifs_io_request_pool,
352 	.subrequest_pool	= &cifs_io_subrequest_pool,
353 	.init_request		= cifs_init_request,
354 	.free_request		= cifs_free_request,
355 	.free_subrequest	= cifs_free_subrequest,
356 	.prepare_read		= cifs_prepare_read,
357 	.issue_read		= cifs_issue_read,
358 	.done			= cifs_rreq_done,
359 	.begin_writeback	= cifs_begin_writeback,
360 	.prepare_write		= cifs_prepare_write,
361 	.issue_write		= cifs_issue_write,
362 	.invalidate_cache	= cifs_netfs_invalidate_cache,
363 };
364 
365 /*
366  * Mark as invalid, all open files on tree connections since they
367  * were closed when session to server was lost.
368  */
369 void
cifs_mark_open_files_invalid(struct cifs_tcon * tcon)370 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
371 {
372 	struct cifsFileInfo *open_file = NULL;
373 	struct list_head *tmp;
374 	struct list_head *tmp1;
375 
376 	/* only send once per connect */
377 	spin_lock(&tcon->tc_lock);
378 	if (tcon->need_reconnect)
379 		tcon->status = TID_NEED_RECON;
380 
381 	if (tcon->status != TID_NEED_RECON) {
382 		spin_unlock(&tcon->tc_lock);
383 		return;
384 	}
385 	tcon->status = TID_IN_FILES_INVALIDATE;
386 	spin_unlock(&tcon->tc_lock);
387 
388 	/* list all files open on tree connection and mark them invalid */
389 	spin_lock(&tcon->open_file_lock);
390 	list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
391 		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
392 		open_file->invalidHandle = true;
393 		open_file->oplock_break_cancelled = true;
394 	}
395 	spin_unlock(&tcon->open_file_lock);
396 
397 	invalidate_all_cached_dirs(tcon);
398 	spin_lock(&tcon->tc_lock);
399 	if (tcon->status == TID_IN_FILES_INVALIDATE)
400 		tcon->status = TID_NEED_TCON;
401 	spin_unlock(&tcon->tc_lock);
402 
403 	/*
404 	 * BB Add call to evict_inodes(sb) for all superblocks mounted
405 	 * to this tcon.
406 	 */
407 }
408 
cifs_convert_flags(unsigned int flags,int rdwr_for_fscache)409 static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
410 {
411 	if ((flags & O_ACCMODE) == O_RDONLY)
412 		return GENERIC_READ;
413 	else if ((flags & O_ACCMODE) == O_WRONLY)
414 		return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
415 	else if ((flags & O_ACCMODE) == O_RDWR) {
416 		/* GENERIC_ALL is too much permission to request
417 		   can cause unnecessary access denied on create */
418 		/* return GENERIC_ALL; */
419 		return (GENERIC_READ | GENERIC_WRITE);
420 	}
421 
422 	return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
423 		FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
424 		FILE_READ_DATA);
425 }
426 
427 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_convert_flags(unsigned int flags)428 static u32 cifs_posix_convert_flags(unsigned int flags)
429 {
430 	u32 posix_flags = 0;
431 
432 	if ((flags & O_ACCMODE) == O_RDONLY)
433 		posix_flags = SMB_O_RDONLY;
434 	else if ((flags & O_ACCMODE) == O_WRONLY)
435 		posix_flags = SMB_O_WRONLY;
436 	else if ((flags & O_ACCMODE) == O_RDWR)
437 		posix_flags = SMB_O_RDWR;
438 
439 	if (flags & O_CREAT) {
440 		posix_flags |= SMB_O_CREAT;
441 		if (flags & O_EXCL)
442 			posix_flags |= SMB_O_EXCL;
443 	} else if (flags & O_EXCL)
444 		cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
445 			 current->comm, current->tgid);
446 
447 	if (flags & O_TRUNC)
448 		posix_flags |= SMB_O_TRUNC;
449 	/* be safe and imply O_SYNC for O_DSYNC */
450 	if (flags & O_DSYNC)
451 		posix_flags |= SMB_O_SYNC;
452 	if (flags & O_DIRECTORY)
453 		posix_flags |= SMB_O_DIRECTORY;
454 	if (flags & O_NOFOLLOW)
455 		posix_flags |= SMB_O_NOFOLLOW;
456 	if (flags & O_DIRECT)
457 		posix_flags |= SMB_O_DIRECT;
458 
459 	return posix_flags;
460 }
461 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
462 
cifs_get_disposition(unsigned int flags)463 static inline int cifs_get_disposition(unsigned int flags)
464 {
465 	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
466 		return FILE_CREATE;
467 	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
468 		return FILE_OVERWRITE_IF;
469 	else if ((flags & O_CREAT) == O_CREAT)
470 		return FILE_OPEN_IF;
471 	else if ((flags & O_TRUNC) == O_TRUNC)
472 		return FILE_OVERWRITE;
473 	else
474 		return FILE_OPEN;
475 }
476 
477 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_open(const char * full_path,struct inode ** pinode,struct super_block * sb,int mode,unsigned int f_flags,__u32 * poplock,__u16 * pnetfid,unsigned int xid)478 int cifs_posix_open(const char *full_path, struct inode **pinode,
479 			struct super_block *sb, int mode, unsigned int f_flags,
480 			__u32 *poplock, __u16 *pnetfid, unsigned int xid)
481 {
482 	int rc;
483 	FILE_UNIX_BASIC_INFO *presp_data;
484 	__u32 posix_flags = 0;
485 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
486 	struct cifs_fattr fattr;
487 	struct tcon_link *tlink;
488 	struct cifs_tcon *tcon;
489 
490 	cifs_dbg(FYI, "posix open %s\n", full_path);
491 
492 	presp_data = kzalloc_obj(FILE_UNIX_BASIC_INFO);
493 	if (presp_data == NULL)
494 		return -ENOMEM;
495 
496 	tlink = cifs_sb_tlink(cifs_sb);
497 	if (IS_ERR(tlink)) {
498 		rc = PTR_ERR(tlink);
499 		goto posix_open_ret;
500 	}
501 
502 	tcon = tlink_tcon(tlink);
503 	mode &= ~current_umask();
504 
505 	posix_flags = cifs_posix_convert_flags(f_flags);
506 	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
507 			     poplock, full_path, cifs_sb->local_nls,
508 			     cifs_remap(cifs_sb));
509 	cifs_put_tlink(tlink);
510 
511 	if (rc)
512 		goto posix_open_ret;
513 
514 	if (presp_data->Type == cpu_to_le32(-1))
515 		goto posix_open_ret; /* open ok, caller does qpathinfo */
516 
517 	if (!pinode)
518 		goto posix_open_ret; /* caller does not need info */
519 
520 	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
521 
522 	/* get new inode and set it up */
523 	if (*pinode == NULL) {
524 		cifs_fill_uniqueid(sb, &fattr);
525 		*pinode = cifs_iget(sb, &fattr);
526 		if (!*pinode) {
527 			rc = -ENOMEM;
528 			goto posix_open_ret;
529 		}
530 	} else {
531 		cifs_revalidate_mapping(*pinode);
532 		rc = cifs_fattr_to_inode(*pinode, &fattr, false);
533 	}
534 
535 posix_open_ret:
536 	kfree(presp_data);
537 	return rc;
538 }
539 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
540 
cifs_nt_open(const char * full_path,struct inode * inode,struct cifs_sb_info * cifs_sb,struct cifs_tcon * tcon,unsigned int f_flags,__u32 * oplock,struct cifs_fid * fid,unsigned int xid,struct cifs_open_info_data * buf)541 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
542 			struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
543 			struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
544 {
545 	int rc;
546 	int desired_access;
547 	int disposition;
548 	int create_options = CREATE_NOT_DIR;
549 	struct TCP_Server_Info *server = tcon->ses->server;
550 	struct cifs_open_parms oparms;
551 	int rdwr_for_fscache = 0;
552 
553 	if (!server->ops->open)
554 		return -ENOSYS;
555 
556 	/* If we're caching, we need to be able to fill in around partial writes. */
557 	if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
558 		rdwr_for_fscache = 1;
559 
560 	desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
561 
562 /*********************************************************************
563  *  open flag mapping table:
564  *
565  *	POSIX Flag            CIFS Disposition
566  *	----------            ----------------
567  *	O_CREAT               FILE_OPEN_IF
568  *	O_CREAT | O_EXCL      FILE_CREATE
569  *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
570  *	O_TRUNC               FILE_OVERWRITE
571  *	none of the above     FILE_OPEN
572  *
573  *	Note that there is not a direct match between disposition
574  *	FILE_SUPERSEDE (ie create whether or not file exists although
575  *	O_CREAT | O_TRUNC is similar but truncates the existing
576  *	file rather than creating a new file as FILE_SUPERSEDE does
577  *	(which uses the attributes / metadata passed in on open call)
578  *?
579  *?  O_SYNC is a reasonable match to CIFS writethrough flag
580  *?  and the read write flags match reasonably.  O_LARGEFILE
581  *?  is irrelevant because largefile support is always used
582  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
583  *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
584  *********************************************************************/
585 
586 	disposition = cifs_get_disposition(f_flags);
587 	/* BB pass O_SYNC flag through on file attributes .. BB */
588 	create_options |= cifs_open_create_options(f_flags, create_options);
589 
590 retry_open:
591 	oparms = (struct cifs_open_parms) {
592 		.tcon = tcon,
593 		.cifs_sb = cifs_sb,
594 		.desired_access = desired_access,
595 		.create_options = cifs_create_options(cifs_sb, create_options),
596 		.disposition = disposition,
597 		.path = full_path,
598 		.fid = fid,
599 	};
600 
601 	rc = server->ops->open(xid, &oparms, oplock, buf);
602 	if (rc) {
603 		if (rc == -EACCES && rdwr_for_fscache == 1) {
604 			desired_access = cifs_convert_flags(f_flags, 0);
605 			rdwr_for_fscache = 2;
606 			goto retry_open;
607 		}
608 		return rc;
609 	}
610 	if (rdwr_for_fscache == 2)
611 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
612 
613 	/* TODO: Add support for calling posix query info but with passing in fid */
614 	if (tcon->unix_ext)
615 		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
616 					      xid);
617 	else
618 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
619 					 xid, fid);
620 
621 	if (rc) {
622 		server->ops->close(xid, tcon, fid);
623 		if (rc == -ESTALE)
624 			rc = -EOPENSTALE;
625 	}
626 
627 	return rc;
628 }
629 
630 static bool
cifs_has_mand_locks(struct cifsInodeInfo * cinode)631 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
632 {
633 	struct cifs_fid_locks *cur;
634 	bool has_locks = false;
635 
636 	down_read(&cinode->lock_sem);
637 	list_for_each_entry(cur, &cinode->llist, llist) {
638 		if (!list_empty(&cur->locks)) {
639 			has_locks = true;
640 			break;
641 		}
642 	}
643 	up_read(&cinode->lock_sem);
644 	return has_locks;
645 }
646 
647 void
cifs_down_write(struct rw_semaphore * sem)648 cifs_down_write(struct rw_semaphore *sem)
649 {
650 	while (!down_write_trylock(sem))
651 		msleep(10);
652 }
653 
654 static void cifsFileInfo_put_work(struct work_struct *work);
655 void serverclose_work(struct work_struct *work);
656 
cifs_new_fileinfo(struct cifs_fid * fid,struct file * file,struct tcon_link * tlink,__u32 oplock,const char * symlink_target)657 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
658 				       struct tcon_link *tlink, __u32 oplock,
659 				       const char *symlink_target)
660 {
661 	struct dentry *dentry = file_dentry(file);
662 	struct inode *inode = d_inode(dentry);
663 	struct cifsInodeInfo *cinode = CIFS_I(inode);
664 	struct cifsFileInfo *cfile;
665 	struct cifs_fid_locks *fdlocks;
666 	struct cifs_tcon *tcon = tlink_tcon(tlink);
667 	struct TCP_Server_Info *server = tcon->ses->server;
668 
669 	cfile = kzalloc_obj(struct cifsFileInfo);
670 	if (cfile == NULL)
671 		return cfile;
672 
673 	fdlocks = kzalloc_obj(struct cifs_fid_locks);
674 	if (!fdlocks) {
675 		kfree(cfile);
676 		return NULL;
677 	}
678 
679 	if (symlink_target) {
680 		cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
681 		if (!cfile->symlink_target) {
682 			kfree(fdlocks);
683 			kfree(cfile);
684 			return NULL;
685 		}
686 	}
687 
688 	INIT_LIST_HEAD(&fdlocks->locks);
689 	fdlocks->cfile = cfile;
690 	cfile->llist = fdlocks;
691 
692 	cfile->count = 1;
693 	cfile->pid = current->tgid;
694 	cfile->uid = current_fsuid();
695 	cfile->dentry = dget(dentry);
696 	cfile->f_flags = file->f_flags;
697 	cfile->invalidHandle = false;
698 	cfile->deferred_close_scheduled = false;
699 	cfile->tlink = cifs_get_tlink(tlink);
700 	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
701 	INIT_WORK(&cfile->put, cifsFileInfo_put_work);
702 	INIT_WORK(&cfile->serverclose, serverclose_work);
703 	INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
704 	mutex_init(&cfile->fh_mutex);
705 	spin_lock_init(&cfile->file_info_lock);
706 
707 	/*
708 	 * If the server returned a read oplock and we have mandatory brlocks,
709 	 * set oplock level to None.
710 	 */
711 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
712 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
713 		oplock = 0;
714 	}
715 
716 	cifs_down_write(&cinode->lock_sem);
717 	list_add(&fdlocks->llist, &cinode->llist);
718 	up_write(&cinode->lock_sem);
719 
720 	spin_lock(&tcon->open_file_lock);
721 	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
722 		oplock = fid->pending_open->oplock;
723 	list_del(&fid->pending_open->olist);
724 
725 	list_add(&cfile->tlist, &tcon->openFileList);
726 	atomic_inc(&tcon->num_local_opens);
727 
728 	/* if readable file instance put first in list*/
729 	spin_lock(&cinode->open_file_lock);
730 	fid->purge_cache = false;
731 	server->ops->set_fid(cfile, fid, oplock);
732 
733 	if (file->f_mode & FMODE_READ)
734 		list_add(&cfile->flist, &cinode->openFileList);
735 	else
736 		list_add_tail(&cfile->flist, &cinode->openFileList);
737 	spin_unlock(&cinode->open_file_lock);
738 	spin_unlock(&tcon->open_file_lock);
739 
740 	if (fid->purge_cache)
741 		cifs_zap_mapping(inode);
742 
743 	file->private_data = cfile;
744 	return cfile;
745 }
746 
747 struct cifsFileInfo *
cifsFileInfo_get(struct cifsFileInfo * cifs_file)748 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
749 {
750 	spin_lock(&cifs_file->file_info_lock);
751 	cifsFileInfo_get_locked(cifs_file);
752 	spin_unlock(&cifs_file->file_info_lock);
753 	return cifs_file;
754 }
755 
cifsFileInfo_put_final(struct cifsFileInfo * cifs_file)756 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
757 {
758 	struct inode *inode = d_inode(cifs_file->dentry);
759 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
760 	struct cifsLockInfo *li, *tmp;
761 
762 	/*
763 	 * Delete any outstanding lock records. We'll lose them when the file
764 	 * is closed anyway.
765 	 */
766 	cifs_down_write(&cifsi->lock_sem);
767 	list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
768 		list_del(&li->llist);
769 		cifs_del_lock_waiters(li);
770 		kfree(li);
771 	}
772 	list_del(&cifs_file->llist->llist);
773 	kfree(cifs_file->llist);
774 	up_write(&cifsi->lock_sem);
775 
776 	cifs_put_tlink(cifs_file->tlink);
777 	dput(cifs_file->dentry);
778 	kfree(cifs_file->symlink_target);
779 	kfree(cifs_file);
780 }
781 
cifsFileInfo_put_work(struct work_struct * work)782 static void cifsFileInfo_put_work(struct work_struct *work)
783 {
784 	struct cifsFileInfo *cifs_file = container_of(work,
785 			struct cifsFileInfo, put);
786 
787 	cifsFileInfo_put_final(cifs_file);
788 }
789 
serverclose_work(struct work_struct * work)790 void serverclose_work(struct work_struct *work)
791 {
792 	struct cifsFileInfo *cifs_file = container_of(work,
793 			struct cifsFileInfo, serverclose);
794 
795 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
796 
797 	struct TCP_Server_Info *server = tcon->ses->server;
798 	int rc = 0;
799 	int retries = 0;
800 	int MAX_RETRIES = 4;
801 
802 	do {
803 		if (server->ops->close_getattr)
804 			rc = server->ops->close_getattr(0, tcon, cifs_file);
805 		else if (server->ops->close)
806 			rc = server->ops->close(0, tcon, &cifs_file->fid);
807 
808 		if (rc == -EBUSY || rc == -EAGAIN) {
809 			retries++;
810 			msleep(250);
811 		}
812 	} while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
813 	);
814 
815 	if (retries == MAX_RETRIES)
816 		pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
817 
818 	if (cifs_file->offload)
819 		queue_work(fileinfo_put_wq, &cifs_file->put);
820 	else
821 		cifsFileInfo_put_final(cifs_file);
822 }
823 
824 /**
825  * cifsFileInfo_put - release a reference of file priv data
826  *
827  * Always potentially wait for oplock handler. See _cifsFileInfo_put().
828  *
829  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
830  */
cifsFileInfo_put(struct cifsFileInfo * cifs_file)831 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
832 {
833 	_cifsFileInfo_put(cifs_file, true, true);
834 }
835 
836 /**
837  * _cifsFileInfo_put - release a reference of file priv data
838  *
839  * This may involve closing the filehandle @cifs_file out on the
840  * server. Must be called without holding tcon->open_file_lock,
841  * cinode->open_file_lock and cifs_file->file_info_lock.
842  *
843  * If @wait_for_oplock_handler is true and we are releasing the last
844  * reference, wait for any running oplock break handler of the file
845  * and cancel any pending one.
846  *
847  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
848  * @wait_oplock_handler: must be false if called from oplock_break_handler
849  * @offload:	not offloaded on close and oplock breaks
850  *
851  */
_cifsFileInfo_put(struct cifsFileInfo * cifs_file,bool wait_oplock_handler,bool offload)852 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
853 		       bool wait_oplock_handler, bool offload)
854 {
855 	struct inode *inode = d_inode(cifs_file->dentry);
856 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
857 	struct TCP_Server_Info *server = tcon->ses->server;
858 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
859 	struct super_block *sb = inode->i_sb;
860 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
861 	struct cifs_fid fid = {};
862 	struct cifs_pending_open open;
863 	bool oplock_break_cancelled;
864 	bool serverclose_offloaded = false;
865 
866 	spin_lock(&tcon->open_file_lock);
867 	spin_lock(&cifsi->open_file_lock);
868 	spin_lock(&cifs_file->file_info_lock);
869 
870 	cifs_file->offload = offload;
871 	if (--cifs_file->count > 0) {
872 		spin_unlock(&cifs_file->file_info_lock);
873 		spin_unlock(&cifsi->open_file_lock);
874 		spin_unlock(&tcon->open_file_lock);
875 		return;
876 	}
877 	spin_unlock(&cifs_file->file_info_lock);
878 
879 	if (server->ops->get_lease_key)
880 		server->ops->get_lease_key(inode, &fid);
881 
882 	/* store open in pending opens to make sure we don't miss lease break */
883 	cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
884 
885 	/* remove it from the lists */
886 	list_del(&cifs_file->flist);
887 	list_del(&cifs_file->tlist);
888 	atomic_dec(&tcon->num_local_opens);
889 
890 	if (list_empty(&cifsi->openFileList)) {
891 		cifs_dbg(FYI, "closing last open instance for inode %p\n",
892 			 d_inode(cifs_file->dentry));
893 		/*
894 		 * In strict cache mode we need invalidate mapping on the last
895 		 * close  because it may cause a error when we open this file
896 		 * again and get at least level II oplock.
897 		 */
898 		if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_STRICT_IO)
899 			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
900 		cifs_set_oplock_level(cifsi, 0);
901 	}
902 
903 	spin_unlock(&cifsi->open_file_lock);
904 	spin_unlock(&tcon->open_file_lock);
905 
906 	oplock_break_cancelled = wait_oplock_handler ?
907 		cancel_work_sync(&cifs_file->oplock_break) : false;
908 
909 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
910 		struct TCP_Server_Info *server = tcon->ses->server;
911 		unsigned int xid;
912 		int rc = 0;
913 
914 		xid = get_xid();
915 		if (server->ops->close_getattr)
916 			rc = server->ops->close_getattr(xid, tcon, cifs_file);
917 		else if (server->ops->close)
918 			rc = server->ops->close(xid, tcon, &cifs_file->fid);
919 		_free_xid(xid);
920 
921 		if (rc == -EBUSY || rc == -EAGAIN) {
922 			// Server close failed, hence offloading it as an async op
923 			queue_work(serverclose_wq, &cifs_file->serverclose);
924 			serverclose_offloaded = true;
925 		}
926 	}
927 
928 	if (oplock_break_cancelled)
929 		cifs_done_oplock_break(cifsi);
930 
931 	cifs_del_pending_open(&open);
932 
933 	// if serverclose has been offloaded to wq (on failure), it will
934 	// handle offloading put as well. If serverclose not offloaded,
935 	// we need to handle offloading put here.
936 	if (!serverclose_offloaded) {
937 		if (offload)
938 			queue_work(fileinfo_put_wq, &cifs_file->put);
939 		else
940 			cifsFileInfo_put_final(cifs_file);
941 	}
942 }
943 
cifs_file_flush(const unsigned int xid,struct inode * inode,struct cifsFileInfo * cfile)944 int cifs_file_flush(const unsigned int xid, struct inode *inode,
945 		    struct cifsFileInfo *cfile)
946 {
947 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
948 	struct cifs_tcon *tcon;
949 	int rc;
950 
951 	if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOSSYNC)
952 		return 0;
953 
954 	if (cfile && (OPEN_FMODE(cfile->f_flags) & FMODE_WRITE)) {
955 		tcon = tlink_tcon(cfile->tlink);
956 		return tcon->ses->server->ops->flush(xid, tcon,
957 						     &cfile->fid);
958 	}
959 	rc = cifs_get_writable_file(CIFS_I(inode), FIND_ANY, &cfile);
960 	if (!rc) {
961 		tcon = tlink_tcon(cfile->tlink);
962 		rc = tcon->ses->server->ops->flush(xid, tcon, &cfile->fid);
963 		cifsFileInfo_put(cfile);
964 	} else if (rc == -EBADF) {
965 		rc = 0;
966 	}
967 	return rc;
968 }
969 
cifs_do_truncate(const unsigned int xid,struct dentry * dentry)970 static int cifs_do_truncate(const unsigned int xid, struct dentry *dentry)
971 {
972 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(dentry));
973 	struct inode *inode = d_inode(dentry);
974 	struct cifsFileInfo *cfile = NULL;
975 	struct TCP_Server_Info *server;
976 	struct cifs_tcon *tcon;
977 	int rc;
978 
979 	rc = filemap_write_and_wait(inode->i_mapping);
980 	if (is_interrupt_error(rc))
981 		return -ERESTARTSYS;
982 	mapping_set_error(inode->i_mapping, rc);
983 
984 	cfile = find_writable_file(cinode, FIND_FSUID_ONLY);
985 	rc = cifs_file_flush(xid, inode, cfile);
986 	if (!rc) {
987 		if (cfile) {
988 			tcon = tlink_tcon(cfile->tlink);
989 			server = tcon->ses->server;
990 			rc = server->ops->set_file_size(xid, tcon,
991 							cfile, 0, false);
992 		}
993 		if (!rc) {
994 			netfs_resize_file(&cinode->netfs, 0, true);
995 			cifs_setsize(inode, 0);
996 		}
997 	}
998 	if (cfile)
999 		cifsFileInfo_put(cfile);
1000 	return rc;
1001 }
1002 
cifs_open(struct inode * inode,struct file * file)1003 int cifs_open(struct inode *inode, struct file *file)
1004 
1005 {
1006 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
1007 	struct cifs_open_info_data data = {};
1008 	struct cifsFileInfo *cfile = NULL;
1009 	struct TCP_Server_Info *server;
1010 	struct cifs_pending_open open;
1011 	bool posix_open_ok = false;
1012 	struct cifs_fid fid = {};
1013 	struct tcon_link *tlink;
1014 	struct cifs_tcon *tcon;
1015 	const char *full_path;
1016 	unsigned int sbflags;
1017 	int rc = -EACCES;
1018 	unsigned int xid;
1019 	__u32 oplock;
1020 	void *page;
1021 
1022 	xid = get_xid();
1023 
1024 	if (unlikely(cifs_forced_shutdown(cifs_sb))) {
1025 		free_xid(xid);
1026 		return smb_EIO(smb_eio_trace_forced_shutdown);
1027 	}
1028 
1029 	tlink = cifs_sb_tlink(cifs_sb);
1030 	if (IS_ERR(tlink)) {
1031 		free_xid(xid);
1032 		return PTR_ERR(tlink);
1033 	}
1034 	tcon = tlink_tcon(tlink);
1035 	server = tcon->ses->server;
1036 
1037 	page = alloc_dentry_path();
1038 	full_path = build_path_from_dentry(file_dentry(file), page);
1039 	if (IS_ERR(full_path)) {
1040 		rc = PTR_ERR(full_path);
1041 		goto out;
1042 	}
1043 
1044 	cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
1045 		 inode, file->f_flags, full_path);
1046 
1047 	sbflags = cifs_sb_flags(cifs_sb);
1048 	if ((file->f_flags & O_DIRECT) && (sbflags & CIFS_MOUNT_STRICT_IO)) {
1049 		if (sbflags & CIFS_MOUNT_NO_BRL)
1050 			file->f_op = &cifs_file_direct_nobrl_ops;
1051 		else
1052 			file->f_op = &cifs_file_direct_ops;
1053 	}
1054 
1055 	if (file->f_flags & O_TRUNC) {
1056 		rc = cifs_do_truncate(xid, file_dentry(file));
1057 		if (rc)
1058 			goto out;
1059 	}
1060 
1061 	/* Get the cached handle as SMB2 close is deferred */
1062 	if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) {
1063 		rc = __cifs_get_writable_file(CIFS_I(inode),
1064 					      FIND_FSUID_ONLY |
1065 					      FIND_NO_PENDING_DELETE |
1066 					      FIND_OPEN_FLAGS,
1067 					      file->f_flags, &cfile);
1068 	} else {
1069 		cfile = __find_readable_file(CIFS_I(inode),
1070 					     FIND_NO_PENDING_DELETE |
1071 					     FIND_OPEN_FLAGS,
1072 					     file->f_flags);
1073 		rc = cfile ? 0 : -ENOENT;
1074 	}
1075 	if (rc == 0) {
1076 		file->private_data = cfile;
1077 		spin_lock(&CIFS_I(inode)->deferred_lock);
1078 		cifs_del_deferred_close(cfile);
1079 		spin_unlock(&CIFS_I(inode)->deferred_lock);
1080 		goto use_cache;
1081 	}
1082 	/* hard link on the deferred close file */
1083 	rc = cifs_get_hardlink_path(tcon, inode, file);
1084 	if (rc)
1085 		cifs_close_deferred_file(CIFS_I(inode));
1086 
1087 	if (server->oplocks)
1088 		oplock = REQ_OPLOCK;
1089 	else
1090 		oplock = 0;
1091 
1092 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1093 	if (!tcon->broken_posix_open && tcon->unix_ext &&
1094 	    cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1095 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1096 		/* can not refresh inode info since size could be stale */
1097 		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
1098 				cifs_sb->ctx->file_mode /* ignored */,
1099 				file->f_flags, &oplock, &fid.netfid, xid);
1100 		if (rc == 0) {
1101 			cifs_dbg(FYI, "posix open succeeded\n");
1102 			posix_open_ok = true;
1103 		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1104 			if (tcon->ses->serverNOS)
1105 				cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1106 					 tcon->ses->ip_addr,
1107 					 tcon->ses->serverNOS);
1108 			tcon->broken_posix_open = true;
1109 		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
1110 			 (rc != -EOPNOTSUPP)) /* path not found or net err */
1111 			goto out;
1112 		/*
1113 		 * Else fallthrough to retry open the old way on network i/o
1114 		 * or DFS errors.
1115 		 */
1116 	}
1117 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1118 
1119 	if (server->ops->get_lease_key)
1120 		server->ops->get_lease_key(inode, &fid);
1121 
1122 	cifs_add_pending_open(&fid, tlink, &open);
1123 
1124 	if (!posix_open_ok) {
1125 		if (server->ops->get_lease_key)
1126 			server->ops->get_lease_key(inode, &fid);
1127 
1128 		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1129 				  xid, &data);
1130 		if (rc) {
1131 			cifs_del_pending_open(&open);
1132 			goto out;
1133 		}
1134 	}
1135 
1136 	cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1137 	if (cfile == NULL) {
1138 		if (server->ops->close)
1139 			server->ops->close(xid, tcon, &fid);
1140 		cifs_del_pending_open(&open);
1141 		rc = -ENOMEM;
1142 		goto out;
1143 	}
1144 
1145 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1146 	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1147 		/*
1148 		 * Time to set mode which we can not set earlier due to
1149 		 * problems creating new read-only files.
1150 		 */
1151 		struct cifs_unix_set_info_args args = {
1152 			.mode	= inode->i_mode,
1153 			.uid	= INVALID_UID, /* no change */
1154 			.gid	= INVALID_GID, /* no change */
1155 			.ctime	= NO_CHANGE_64,
1156 			.atime	= NO_CHANGE_64,
1157 			.mtime	= NO_CHANGE_64,
1158 			.device	= 0,
1159 		};
1160 		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1161 				       cfile->pid);
1162 	}
1163 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1164 
1165 use_cache:
1166 	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1167 			   file->f_mode & FMODE_WRITE);
1168 	if (!(file->f_flags & O_DIRECT))
1169 		goto out;
1170 	if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1171 		goto out;
1172 	cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1173 
1174 out:
1175 	free_dentry_path(page);
1176 	free_xid(xid);
1177 	cifs_put_tlink(tlink);
1178 	cifs_free_open_info(&data);
1179 	return rc;
1180 }
1181 
1182 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1183 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1184 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1185 
1186 /*
1187  * Try to reacquire byte range locks that were released when session
1188  * to server was lost.
1189  */
1190 static int
cifs_relock_file(struct cifsFileInfo * cfile)1191 cifs_relock_file(struct cifsFileInfo *cfile)
1192 {
1193 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1194 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1195 	int rc = 0;
1196 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1197 	struct cifs_sb_info *cifs_sb = CIFS_SB(cinode);
1198 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1199 
1200 	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1201 	if (cinode->can_cache_brlcks) {
1202 		/* can cache locks - no need to relock */
1203 		up_read(&cinode->lock_sem);
1204 		return rc;
1205 	}
1206 
1207 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1208 	if (cap_unix(tcon->ses) &&
1209 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1210 	    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
1211 		rc = cifs_push_posix_locks(cfile);
1212 	else
1213 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1214 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1215 
1216 	up_read(&cinode->lock_sem);
1217 	return rc;
1218 }
1219 
1220 static int
cifs_reopen_file(struct cifsFileInfo * cfile,bool can_flush)1221 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1222 {
1223 	int rc = -EACCES;
1224 	unsigned int xid;
1225 	__u32 oplock;
1226 	struct cifs_sb_info *cifs_sb;
1227 	struct cifs_tcon *tcon;
1228 	struct TCP_Server_Info *server;
1229 	struct cifsInodeInfo *cinode;
1230 	struct inode *inode;
1231 	void *page;
1232 	const char *full_path;
1233 	int desired_access;
1234 	int disposition = FILE_OPEN;
1235 	int create_options = CREATE_NOT_DIR;
1236 	struct cifs_open_parms oparms;
1237 	int rdwr_for_fscache = 0;
1238 
1239 	xid = get_xid();
1240 	mutex_lock(&cfile->fh_mutex);
1241 	if (!cfile->invalidHandle) {
1242 		mutex_unlock(&cfile->fh_mutex);
1243 		free_xid(xid);
1244 		return 0;
1245 	}
1246 
1247 	inode = d_inode(cfile->dentry);
1248 	cifs_sb = CIFS_SB(inode->i_sb);
1249 	tcon = tlink_tcon(cfile->tlink);
1250 	server = tcon->ses->server;
1251 
1252 	/*
1253 	 * Can not grab rename sem here because various ops, including those
1254 	 * that already have the rename sem can end up causing writepage to get
1255 	 * called and if the server was down that means we end up here, and we
1256 	 * can never tell if the caller already has the rename_sem.
1257 	 */
1258 	page = alloc_dentry_path();
1259 	full_path = build_path_from_dentry(cfile->dentry, page);
1260 	if (IS_ERR(full_path)) {
1261 		mutex_unlock(&cfile->fh_mutex);
1262 		free_dentry_path(page);
1263 		free_xid(xid);
1264 		return PTR_ERR(full_path);
1265 	}
1266 
1267 	cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1268 		 inode, cfile->f_flags, full_path);
1269 
1270 	if (tcon->ses->server->oplocks)
1271 		oplock = REQ_OPLOCK;
1272 	else
1273 		oplock = 0;
1274 
1275 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1276 	if (tcon->unix_ext && cap_unix(tcon->ses) &&
1277 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1278 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1279 		/*
1280 		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1281 		 * original open. Must mask them off for a reopen.
1282 		 */
1283 		unsigned int oflags = cfile->f_flags &
1284 						~(O_CREAT | O_EXCL | O_TRUNC);
1285 
1286 		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1287 				     cifs_sb->ctx->file_mode /* ignored */,
1288 				     oflags, &oplock, &cfile->fid.netfid, xid);
1289 		if (rc == 0) {
1290 			cifs_dbg(FYI, "posix reopen succeeded\n");
1291 			oparms.reconnect = true;
1292 			goto reopen_success;
1293 		}
1294 		/*
1295 		 * fallthrough to retry open the old way on errors, especially
1296 		 * in the reconnect path it is important to retry hard
1297 		 */
1298 	}
1299 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1300 
1301 	/* If we're caching, we need to be able to fill in around partial writes. */
1302 	if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1303 		rdwr_for_fscache = 1;
1304 
1305 	desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1306 	create_options |= cifs_open_create_options(cfile->f_flags,
1307 						   create_options);
1308 
1309 	if (server->ops->get_lease_key)
1310 		server->ops->get_lease_key(inode, &cfile->fid);
1311 
1312 retry_open:
1313 	oparms = (struct cifs_open_parms) {
1314 		.tcon = tcon,
1315 		.cifs_sb = cifs_sb,
1316 		.desired_access = desired_access,
1317 		.create_options = cifs_create_options(cifs_sb, create_options),
1318 		.disposition = disposition,
1319 		.path = full_path,
1320 		.fid = &cfile->fid,
1321 		.reconnect = true,
1322 	};
1323 
1324 	/*
1325 	 * Can not refresh inode by passing in file_info buf to be returned by
1326 	 * ops->open and then calling get_inode_info with returned buf since
1327 	 * file might have write behind data that needs to be flushed and server
1328 	 * version of file size can be stale. If we knew for sure that inode was
1329 	 * not dirty locally we could do this.
1330 	 */
1331 	rc = server->ops->open(xid, &oparms, &oplock, NULL);
1332 	if (rc == -ENOENT && oparms.reconnect == false) {
1333 		/* durable handle timeout is expired - open the file again */
1334 		rc = server->ops->open(xid, &oparms, &oplock, NULL);
1335 		/* indicate that we need to relock the file */
1336 		oparms.reconnect = true;
1337 	}
1338 	if (rc == -EACCES && rdwr_for_fscache == 1) {
1339 		desired_access = cifs_convert_flags(cfile->f_flags, 0);
1340 		rdwr_for_fscache = 2;
1341 		goto retry_open;
1342 	}
1343 
1344 	if (rc) {
1345 		mutex_unlock(&cfile->fh_mutex);
1346 		cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1347 		cifs_dbg(FYI, "oplock: %d\n", oplock);
1348 		goto reopen_error_exit;
1349 	}
1350 
1351 	if (rdwr_for_fscache == 2)
1352 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1353 
1354 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1355 reopen_success:
1356 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1357 	cfile->invalidHandle = false;
1358 	mutex_unlock(&cfile->fh_mutex);
1359 	cinode = CIFS_I(inode);
1360 
1361 	if (can_flush) {
1362 		rc = filemap_write_and_wait(inode->i_mapping);
1363 		if (!is_interrupt_error(rc))
1364 			mapping_set_error(inode->i_mapping, rc);
1365 
1366 		if (tcon->posix_extensions) {
1367 			rc = smb311_posix_get_inode_info(&inode, full_path,
1368 							 NULL, inode->i_sb, xid);
1369 		} else if (tcon->unix_ext) {
1370 			rc = cifs_get_inode_info_unix(&inode, full_path,
1371 						      inode->i_sb, xid);
1372 		} else {
1373 			rc = cifs_get_inode_info(&inode, full_path, NULL,
1374 						 inode->i_sb, xid, NULL);
1375 		}
1376 	}
1377 	/*
1378 	 * Else we are writing out data to server already and could deadlock if
1379 	 * we tried to flush data, and since we do not know if we have data that
1380 	 * would invalidate the current end of file on the server we can not go
1381 	 * to the server to get the new inode info.
1382 	 */
1383 
1384 	/*
1385 	 * If the server returned a read oplock and we have mandatory brlocks,
1386 	 * set oplock level to None.
1387 	 */
1388 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1389 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1390 		oplock = 0;
1391 	}
1392 
1393 	scoped_guard(spinlock, &cinode->open_file_lock)
1394 		server->ops->set_fid(cfile, &cfile->fid, oplock);
1395 	if (oparms.reconnect)
1396 		cifs_relock_file(cfile);
1397 
1398 reopen_error_exit:
1399 	free_dentry_path(page);
1400 	free_xid(xid);
1401 	return rc;
1402 }
1403 
smb2_deferred_work_close(struct work_struct * work)1404 void smb2_deferred_work_close(struct work_struct *work)
1405 {
1406 	struct cifsFileInfo *cfile = container_of(work,
1407 			struct cifsFileInfo, deferred.work);
1408 
1409 	spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1410 	cifs_del_deferred_close(cfile);
1411 	cfile->deferred_close_scheduled = false;
1412 	spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1413 	_cifsFileInfo_put(cfile, true, false);
1414 }
1415 
1416 static bool
smb2_can_defer_close(struct inode * inode,struct cifs_deferred_close * dclose)1417 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1418 {
1419 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1420 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1421 	unsigned int oplock = READ_ONCE(cinode->oplock);
1422 
1423 	return cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1424 		(oplock == CIFS_CACHE_RHW_FLG || oplock == CIFS_CACHE_RH_FLG) &&
1425 		!test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags);
1426 
1427 }
1428 
cifs_close(struct inode * inode,struct file * file)1429 int cifs_close(struct inode *inode, struct file *file)
1430 {
1431 	struct cifsFileInfo *cfile;
1432 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1433 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1434 	struct cifs_deferred_close *dclose;
1435 
1436 	cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1437 
1438 	if (file->private_data != NULL) {
1439 		cfile = file->private_data;
1440 		file->private_data = NULL;
1441 		dclose = kmalloc_obj(struct cifs_deferred_close);
1442 		if ((cfile->status_file_deleted == false) &&
1443 		    (smb2_can_defer_close(inode, dclose))) {
1444 			if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR, &cinode->netfs.flags)) {
1445 				inode_set_mtime_to_ts(inode,
1446 						      inode_set_ctime_current(inode));
1447 			}
1448 			spin_lock(&cinode->deferred_lock);
1449 			cifs_add_deferred_close(cfile, dclose);
1450 			if (cfile->deferred_close_scheduled &&
1451 			    delayed_work_pending(&cfile->deferred)) {
1452 				/*
1453 				 * If there is no pending work, mod_delayed_work queues new work.
1454 				 * So, Increase the ref count to avoid use-after-free.
1455 				 */
1456 				if (!mod_delayed_work(deferredclose_wq,
1457 						&cfile->deferred, cifs_sb->ctx->closetimeo))
1458 					cifsFileInfo_get(cfile);
1459 			} else {
1460 				/* Deferred close for files */
1461 				queue_delayed_work(deferredclose_wq,
1462 						&cfile->deferred, cifs_sb->ctx->closetimeo);
1463 				cfile->deferred_close_scheduled = true;
1464 				spin_unlock(&cinode->deferred_lock);
1465 				return 0;
1466 			}
1467 			spin_unlock(&cinode->deferred_lock);
1468 			_cifsFileInfo_put(cfile, true, false);
1469 		} else {
1470 			_cifsFileInfo_put(cfile, true, false);
1471 			kfree(dclose);
1472 		}
1473 	}
1474 
1475 	/* return code from the ->release op is always ignored */
1476 	return 0;
1477 }
1478 
1479 void
cifs_reopen_persistent_handles(struct cifs_tcon * tcon)1480 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1481 {
1482 	struct cifsFileInfo *open_file, *tmp;
1483 	LIST_HEAD(tmp_list);
1484 
1485 	if (!tcon->use_persistent || !tcon->need_reopen_files)
1486 		return;
1487 
1488 	tcon->need_reopen_files = false;
1489 
1490 	cifs_dbg(FYI, "Reopen persistent handles\n");
1491 
1492 	/* list all files open on tree connection, reopen resilient handles  */
1493 	spin_lock(&tcon->open_file_lock);
1494 	list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1495 		if (!open_file->invalidHandle)
1496 			continue;
1497 		cifsFileInfo_get(open_file);
1498 		list_add_tail(&open_file->rlist, &tmp_list);
1499 	}
1500 	spin_unlock(&tcon->open_file_lock);
1501 
1502 	list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1503 		if (cifs_reopen_file(open_file, false /* do not flush */))
1504 			tcon->need_reopen_files = true;
1505 		list_del_init(&open_file->rlist);
1506 		cifsFileInfo_put(open_file);
1507 	}
1508 }
1509 
cifs_closedir(struct inode * inode,struct file * file)1510 int cifs_closedir(struct inode *inode, struct file *file)
1511 {
1512 	int rc = 0;
1513 	unsigned int xid;
1514 	struct cifsFileInfo *cfile = file->private_data;
1515 	struct cifs_tcon *tcon;
1516 	struct TCP_Server_Info *server;
1517 	char *buf;
1518 
1519 	cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1520 
1521 	if (cfile == NULL)
1522 		return rc;
1523 
1524 	xid = get_xid();
1525 	tcon = tlink_tcon(cfile->tlink);
1526 	server = tcon->ses->server;
1527 
1528 	cifs_dbg(FYI, "Freeing private data in close dir\n");
1529 	spin_lock(&cfile->file_info_lock);
1530 	if (server->ops->dir_needs_close(cfile)) {
1531 		cfile->invalidHandle = true;
1532 		spin_unlock(&cfile->file_info_lock);
1533 		if (server->ops->close_dir)
1534 			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1535 		else
1536 			rc = -ENOSYS;
1537 		cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1538 		/* not much we can do if it fails anyway, ignore rc */
1539 		rc = 0;
1540 	} else
1541 		spin_unlock(&cfile->file_info_lock);
1542 
1543 	buf = cfile->srch_inf.ntwrk_buf_start;
1544 	if (buf) {
1545 		cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1546 		cfile->srch_inf.ntwrk_buf_start = NULL;
1547 		if (cfile->srch_inf.smallBuf)
1548 			cifs_small_buf_release(buf);
1549 		else
1550 			cifs_buf_release(buf);
1551 	}
1552 
1553 	cifs_put_tlink(cfile->tlink);
1554 	kfree(file->private_data);
1555 	file->private_data = NULL;
1556 	/* BB can we lock the filestruct while this is going on? */
1557 	free_xid(xid);
1558 	return rc;
1559 }
1560 
1561 static struct cifsLockInfo *
cifs_lock_init(__u64 offset,__u64 length,__u8 type,__u16 flags)1562 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1563 {
1564 	struct cifsLockInfo *lock =
1565 		kmalloc_obj(struct cifsLockInfo);
1566 	if (!lock)
1567 		return lock;
1568 	lock->offset = offset;
1569 	lock->length = length;
1570 	lock->type = type;
1571 	lock->pid = current->tgid;
1572 	lock->flags = flags;
1573 	INIT_LIST_HEAD(&lock->blist);
1574 	init_waitqueue_head(&lock->block_q);
1575 	return lock;
1576 }
1577 
1578 void
cifs_del_lock_waiters(struct cifsLockInfo * lock)1579 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1580 {
1581 	struct cifsLockInfo *li, *tmp;
1582 	list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1583 		list_del_init(&li->blist);
1584 		wake_up(&li->block_q);
1585 	}
1586 }
1587 
1588 #define CIFS_LOCK_OP	0
1589 #define CIFS_READ_OP	1
1590 #define CIFS_WRITE_OP	2
1591 
1592 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1593 static bool
cifs_find_fid_lock_conflict(struct cifs_fid_locks * fdlocks,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsFileInfo * cfile,struct cifsLockInfo ** conf_lock,int rw_check)1594 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1595 			    __u64 length, __u8 type, __u16 flags,
1596 			    struct cifsFileInfo *cfile,
1597 			    struct cifsLockInfo **conf_lock, int rw_check)
1598 {
1599 	struct cifsLockInfo *li;
1600 	struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1601 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1602 
1603 	list_for_each_entry(li, &fdlocks->locks, llist) {
1604 		if (offset + length <= li->offset ||
1605 		    offset >= li->offset + li->length)
1606 			continue;
1607 		if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1608 		    server->ops->compare_fids(cfile, cur_cfile)) {
1609 			/* shared lock prevents write op through the same fid */
1610 			if (!(li->type & server->vals->shared_lock_type) ||
1611 			    rw_check != CIFS_WRITE_OP)
1612 				continue;
1613 		}
1614 		if ((type & server->vals->shared_lock_type) &&
1615 		    ((server->ops->compare_fids(cfile, cur_cfile) &&
1616 		     current->tgid == li->pid) || type == li->type))
1617 			continue;
1618 		if (rw_check == CIFS_LOCK_OP &&
1619 		    (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1620 		    server->ops->compare_fids(cfile, cur_cfile))
1621 			continue;
1622 		if (conf_lock)
1623 			*conf_lock = li;
1624 		return true;
1625 	}
1626 	return false;
1627 }
1628 
1629 bool
cifs_find_lock_conflict(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsLockInfo ** conf_lock,int rw_check)1630 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1631 			__u8 type, __u16 flags,
1632 			struct cifsLockInfo **conf_lock, int rw_check)
1633 {
1634 	bool rc = false;
1635 	struct cifs_fid_locks *cur;
1636 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1637 
1638 	list_for_each_entry(cur, &cinode->llist, llist) {
1639 		rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1640 						 flags, cfile, conf_lock,
1641 						 rw_check);
1642 		if (rc)
1643 			break;
1644 	}
1645 
1646 	return rc;
1647 }
1648 
1649 /*
1650  * Check if there is another lock that prevents us to set the lock (mandatory
1651  * style). If such a lock exists, update the flock structure with its
1652  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1653  * or leave it the same if we can't. Returns 0 if we don't need to request to
1654  * the server or 1 otherwise.
1655  */
1656 static int
cifs_lock_test(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,struct file_lock * flock)1657 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1658 	       __u8 type, struct file_lock *flock)
1659 {
1660 	int rc = 0;
1661 	struct cifsLockInfo *conf_lock;
1662 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1663 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1664 	bool exist;
1665 
1666 	down_read(&cinode->lock_sem);
1667 
1668 	exist = cifs_find_lock_conflict(cfile, offset, length, type,
1669 					flock->c.flc_flags, &conf_lock,
1670 					CIFS_LOCK_OP);
1671 	if (exist) {
1672 		flock->fl_start = conf_lock->offset;
1673 		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1674 		flock->c.flc_pid = conf_lock->pid;
1675 		if (conf_lock->type & server->vals->shared_lock_type)
1676 			flock->c.flc_type = F_RDLCK;
1677 		else
1678 			flock->c.flc_type = F_WRLCK;
1679 	} else if (!cinode->can_cache_brlcks)
1680 		rc = 1;
1681 	else
1682 		flock->c.flc_type = F_UNLCK;
1683 
1684 	up_read(&cinode->lock_sem);
1685 	return rc;
1686 }
1687 
1688 static void
cifs_lock_add(struct cifsFileInfo * cfile,struct cifsLockInfo * lock)1689 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1690 {
1691 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1692 	cifs_down_write(&cinode->lock_sem);
1693 	list_add_tail(&lock->llist, &cfile->llist->locks);
1694 	up_write(&cinode->lock_sem);
1695 }
1696 
1697 /*
1698  * Set the byte-range lock (mandatory style). Returns:
1699  * 1) 0, if we set the lock and don't need to request to the server;
1700  * 2) 1, if no locks prevent us but we need to request to the server;
1701  * 3) -EACCES, if there is a lock that prevents us and wait is false.
1702  */
1703 static int
cifs_lock_add_if(struct cifsFileInfo * cfile,struct cifsLockInfo * lock,bool wait)1704 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1705 		 bool wait)
1706 {
1707 	struct cifsLockInfo *conf_lock;
1708 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1709 	bool exist;
1710 	int rc = 0;
1711 
1712 try_again:
1713 	exist = false;
1714 	cifs_down_write(&cinode->lock_sem);
1715 
1716 	exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1717 					lock->type, lock->flags, &conf_lock,
1718 					CIFS_LOCK_OP);
1719 	if (!exist && cinode->can_cache_brlcks) {
1720 		list_add_tail(&lock->llist, &cfile->llist->locks);
1721 		up_write(&cinode->lock_sem);
1722 		return rc;
1723 	}
1724 
1725 	if (!exist)
1726 		rc = 1;
1727 	else if (!wait)
1728 		rc = -EACCES;
1729 	else {
1730 		list_add_tail(&lock->blist, &conf_lock->blist);
1731 		up_write(&cinode->lock_sem);
1732 		rc = wait_event_interruptible(lock->block_q,
1733 					(lock->blist.prev == &lock->blist) &&
1734 					(lock->blist.next == &lock->blist));
1735 		if (!rc)
1736 			goto try_again;
1737 		cifs_down_write(&cinode->lock_sem);
1738 		list_del_init(&lock->blist);
1739 	}
1740 
1741 	up_write(&cinode->lock_sem);
1742 	return rc;
1743 }
1744 
1745 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1746 /*
1747  * Check if there is another lock that prevents us to set the lock (posix
1748  * style). If such a lock exists, update the flock structure with its
1749  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1750  * or leave it the same if we can't. Returns 0 if we don't need to request to
1751  * the server or 1 otherwise.
1752  */
1753 static int
cifs_posix_lock_test(struct file * file,struct file_lock * flock)1754 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1755 {
1756 	int rc = 0;
1757 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1758 	unsigned char saved_type = flock->c.flc_type;
1759 
1760 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1761 		return 1;
1762 
1763 	down_read(&cinode->lock_sem);
1764 	posix_test_lock(file, flock);
1765 
1766 	if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1767 		flock->c.flc_type = saved_type;
1768 		rc = 1;
1769 	}
1770 
1771 	up_read(&cinode->lock_sem);
1772 	return rc;
1773 }
1774 
1775 /*
1776  * Set the byte-range lock (posix style). Returns:
1777  * 1) <0, if the error occurs while setting the lock;
1778  * 2) 0, if we set the lock and don't need to request to the server;
1779  * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1780  * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1781  */
1782 static int
cifs_posix_lock_set(struct file * file,struct file_lock * flock)1783 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1784 {
1785 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1786 	int rc = FILE_LOCK_DEFERRED + 1;
1787 
1788 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1789 		return rc;
1790 
1791 	cifs_down_write(&cinode->lock_sem);
1792 	if (!cinode->can_cache_brlcks) {
1793 		up_write(&cinode->lock_sem);
1794 		return rc;
1795 	}
1796 
1797 	rc = posix_lock_file(file, flock, NULL);
1798 	up_write(&cinode->lock_sem);
1799 	return rc;
1800 }
1801 
1802 int
cifs_push_mandatory_locks(struct cifsFileInfo * cfile)1803 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1804 {
1805 	unsigned int xid;
1806 	int rc = 0, stored_rc;
1807 	struct cifsLockInfo *li, *tmp;
1808 	struct cifs_tcon *tcon;
1809 	unsigned int num, max_num, max_buf;
1810 	LOCKING_ANDX_RANGE *buf, *cur;
1811 	static const int types[] = {
1812 		LOCKING_ANDX_LARGE_FILES,
1813 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1814 	};
1815 	int i;
1816 
1817 	xid = get_xid();
1818 	tcon = tlink_tcon(cfile->tlink);
1819 
1820 	/*
1821 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1822 	 * and check it before using.
1823 	 */
1824 	max_buf = tcon->ses->server->maxBuf;
1825 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1826 		free_xid(xid);
1827 		return -EINVAL;
1828 	}
1829 
1830 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1831 		     PAGE_SIZE);
1832 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1833 			PAGE_SIZE);
1834 	max_num = (max_buf - sizeof(struct smb_hdr)) /
1835 						sizeof(LOCKING_ANDX_RANGE);
1836 	buf = kzalloc_objs(LOCKING_ANDX_RANGE, max_num);
1837 	if (!buf) {
1838 		free_xid(xid);
1839 		return -ENOMEM;
1840 	}
1841 
1842 	for (i = 0; i < 2; i++) {
1843 		cur = buf;
1844 		num = 0;
1845 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1846 			if (li->type != types[i])
1847 				continue;
1848 			cur->Pid = cpu_to_le16(li->pid);
1849 			cur->LengthLow = cpu_to_le32((u32)li->length);
1850 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1851 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
1852 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1853 			if (++num == max_num) {
1854 				stored_rc = cifs_lockv(xid, tcon,
1855 						       cfile->fid.netfid,
1856 						       (__u8)li->type, 0, num,
1857 						       buf);
1858 				if (stored_rc)
1859 					rc = stored_rc;
1860 				cur = buf;
1861 				num = 0;
1862 			} else
1863 				cur++;
1864 		}
1865 
1866 		if (num) {
1867 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1868 					       (__u8)types[i], 0, num, buf);
1869 			if (stored_rc)
1870 				rc = stored_rc;
1871 		}
1872 	}
1873 
1874 	kfree(buf);
1875 	free_xid(xid);
1876 	return rc;
1877 }
1878 
1879 static __u32
hash_lockowner(fl_owner_t owner)1880 hash_lockowner(fl_owner_t owner)
1881 {
1882 	return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1883 }
1884 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1885 
1886 struct lock_to_push {
1887 	struct list_head llist;
1888 	__u64 offset;
1889 	__u64 length;
1890 	__u32 pid;
1891 	__u16 netfid;
1892 	__u8 type;
1893 };
1894 
1895 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1896 static int
cifs_push_posix_locks(struct cifsFileInfo * cfile)1897 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1898 {
1899 	struct inode *inode = d_inode(cfile->dentry);
1900 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1901 	struct file_lock *flock;
1902 	struct file_lock_context *flctx = locks_inode_context(inode);
1903 	unsigned int count = 0, i;
1904 	int rc = 0, xid, type;
1905 	struct list_head locks_to_send, *el;
1906 	struct lock_to_push *lck, *tmp;
1907 	__u64 length;
1908 
1909 	xid = get_xid();
1910 
1911 	if (!flctx)
1912 		goto out;
1913 
1914 	spin_lock(&flctx->flc_lock);
1915 	list_for_each(el, &flctx->flc_posix) {
1916 		count++;
1917 	}
1918 	spin_unlock(&flctx->flc_lock);
1919 
1920 	INIT_LIST_HEAD(&locks_to_send);
1921 
1922 	/*
1923 	 * Allocating count locks is enough because no FL_POSIX locks can be
1924 	 * added to the list while we are holding cinode->lock_sem that
1925 	 * protects locking operations of this inode.
1926 	 */
1927 	for (i = 0; i < count; i++) {
1928 		lck = kmalloc_obj(struct lock_to_push);
1929 		if (!lck) {
1930 			rc = -ENOMEM;
1931 			goto err_out;
1932 		}
1933 		list_add_tail(&lck->llist, &locks_to_send);
1934 	}
1935 
1936 	el = locks_to_send.next;
1937 	spin_lock(&flctx->flc_lock);
1938 	for_each_file_lock(flock, &flctx->flc_posix) {
1939 		unsigned char ftype = flock->c.flc_type;
1940 
1941 		if (el == &locks_to_send) {
1942 			/*
1943 			 * The list ended. We don't have enough allocated
1944 			 * structures - something is really wrong.
1945 			 */
1946 			cifs_dbg(VFS, "Can't push all brlocks!\n");
1947 			break;
1948 		}
1949 		length = cifs_flock_len(flock);
1950 		if (ftype == F_RDLCK || ftype == F_SHLCK)
1951 			type = CIFS_RDLCK;
1952 		else
1953 			type = CIFS_WRLCK;
1954 		lck = list_entry(el, struct lock_to_push, llist);
1955 		lck->pid = hash_lockowner(flock->c.flc_owner);
1956 		lck->netfid = cfile->fid.netfid;
1957 		lck->length = length;
1958 		lck->type = type;
1959 		lck->offset = flock->fl_start;
1960 	}
1961 	spin_unlock(&flctx->flc_lock);
1962 
1963 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1964 		int stored_rc;
1965 
1966 		stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1967 					     lck->offset, lck->length, NULL,
1968 					     lck->type, 0);
1969 		if (stored_rc)
1970 			rc = stored_rc;
1971 		list_del(&lck->llist);
1972 		kfree(lck);
1973 	}
1974 
1975 out:
1976 	free_xid(xid);
1977 	return rc;
1978 err_out:
1979 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1980 		list_del(&lck->llist);
1981 		kfree(lck);
1982 	}
1983 	goto out;
1984 }
1985 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1986 
1987 static int
cifs_push_locks(struct cifsFileInfo * cfile)1988 cifs_push_locks(struct cifsFileInfo *cfile)
1989 {
1990 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1991 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1992 	int rc = 0;
1993 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1994 	struct cifs_sb_info *cifs_sb = CIFS_SB(cinode);
1995 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1996 
1997 	/* we are going to update can_cache_brlcks here - need a write access */
1998 	cifs_down_write(&cinode->lock_sem);
1999 	if (!cinode->can_cache_brlcks) {
2000 		up_write(&cinode->lock_sem);
2001 		return rc;
2002 	}
2003 
2004 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2005 	if (cap_unix(tcon->ses) &&
2006 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2007 	    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
2008 		rc = cifs_push_posix_locks(cfile);
2009 	else
2010 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2011 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
2012 
2013 	cinode->can_cache_brlcks = false;
2014 	up_write(&cinode->lock_sem);
2015 	return rc;
2016 }
2017 
2018 static void
cifs_read_flock(struct file_lock * flock,__u32 * type,int * lock,int * unlock,bool * wait_flag,struct TCP_Server_Info * server)2019 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
2020 		bool *wait_flag, struct TCP_Server_Info *server)
2021 {
2022 	if (flock->c.flc_flags & FL_POSIX)
2023 		cifs_dbg(FYI, "Posix\n");
2024 	if (flock->c.flc_flags & FL_FLOCK)
2025 		cifs_dbg(FYI, "Flock\n");
2026 	if (flock->c.flc_flags & FL_SLEEP) {
2027 		cifs_dbg(FYI, "Blocking lock\n");
2028 		*wait_flag = true;
2029 	}
2030 	if (flock->c.flc_flags & FL_ACCESS)
2031 		cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
2032 	if (flock->c.flc_flags & FL_LEASE)
2033 		cifs_dbg(FYI, "Lease on file - not implemented yet\n");
2034 	if (flock->c.flc_flags &
2035 	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
2036 	       FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
2037 		cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
2038 		         flock->c.flc_flags);
2039 
2040 	*type = server->vals->large_lock_type;
2041 	if (lock_is_write(flock)) {
2042 		cifs_dbg(FYI, "F_WRLCK\n");
2043 		*type |= server->vals->exclusive_lock_type;
2044 		*lock = 1;
2045 	} else if (lock_is_unlock(flock)) {
2046 		cifs_dbg(FYI, "F_UNLCK\n");
2047 		*type |= server->vals->unlock_lock_type;
2048 		*unlock = 1;
2049 		/* Check if unlock includes more than one lock range */
2050 	} else if (lock_is_read(flock)) {
2051 		cifs_dbg(FYI, "F_RDLCK\n");
2052 		*type |= server->vals->shared_lock_type;
2053 		*lock = 1;
2054 	} else if (flock->c.flc_type == F_EXLCK) {
2055 		cifs_dbg(FYI, "F_EXLCK\n");
2056 		*type |= server->vals->exclusive_lock_type;
2057 		*lock = 1;
2058 	} else if (flock->c.flc_type == F_SHLCK) {
2059 		cifs_dbg(FYI, "F_SHLCK\n");
2060 		*type |= server->vals->shared_lock_type;
2061 		*lock = 1;
2062 	} else
2063 		cifs_dbg(FYI, "Unknown type of lock\n");
2064 }
2065 
2066 static int
cifs_getlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,unsigned int xid)2067 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
2068 	   bool wait_flag, bool posix_lck, unsigned int xid)
2069 {
2070 	int rc = 0;
2071 	__u64 length = cifs_flock_len(flock);
2072 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2073 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2074 	struct TCP_Server_Info *server = tcon->ses->server;
2075 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2076 	__u16 netfid = cfile->fid.netfid;
2077 
2078 	if (posix_lck) {
2079 		int posix_lock_type;
2080 
2081 		rc = cifs_posix_lock_test(file, flock);
2082 		if (!rc)
2083 			return rc;
2084 
2085 		if (type & server->vals->shared_lock_type)
2086 			posix_lock_type = CIFS_RDLCK;
2087 		else
2088 			posix_lock_type = CIFS_WRLCK;
2089 		rc = CIFSSMBPosixLock(xid, tcon, netfid,
2090 				      hash_lockowner(flock->c.flc_owner),
2091 				      flock->fl_start, length, flock,
2092 				      posix_lock_type, wait_flag);
2093 		return rc;
2094 	}
2095 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2096 
2097 	rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
2098 	if (!rc)
2099 		return rc;
2100 
2101 	/* BB we could chain these into one lock request BB */
2102 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2103 				    1, 0, false);
2104 	if (rc == 0) {
2105 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2106 					    type, 0, 1, false);
2107 		flock->c.flc_type = F_UNLCK;
2108 		if (rc != 0)
2109 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2110 				 rc);
2111 		return 0;
2112 	}
2113 
2114 	if (type & server->vals->shared_lock_type) {
2115 		flock->c.flc_type = F_WRLCK;
2116 		return 0;
2117 	}
2118 
2119 	type &= ~server->vals->exclusive_lock_type;
2120 
2121 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2122 				    type | server->vals->shared_lock_type,
2123 				    1, 0, false);
2124 	if (rc == 0) {
2125 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2126 			type | server->vals->shared_lock_type, 0, 1, false);
2127 		flock->c.flc_type = F_RDLCK;
2128 		if (rc != 0)
2129 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2130 				 rc);
2131 	} else
2132 		flock->c.flc_type = F_WRLCK;
2133 
2134 	return 0;
2135 }
2136 
2137 void
cifs_move_llist(struct list_head * source,struct list_head * dest)2138 cifs_move_llist(struct list_head *source, struct list_head *dest)
2139 {
2140 	struct list_head *li, *tmp;
2141 	list_for_each_safe(li, tmp, source)
2142 		list_move(li, dest);
2143 }
2144 
2145 int
cifs_get_hardlink_path(struct cifs_tcon * tcon,struct inode * inode,struct file * file)2146 cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode,
2147 				struct file *file)
2148 {
2149 	struct cifsFileInfo *open_file = NULL;
2150 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2151 	int rc = 0;
2152 
2153 	spin_lock(&tcon->open_file_lock);
2154 	spin_lock(&cinode->open_file_lock);
2155 
2156 	list_for_each_entry(open_file, &cinode->openFileList, flist) {
2157 		if (file->f_flags == open_file->f_flags) {
2158 			rc = -EINVAL;
2159 			break;
2160 		}
2161 	}
2162 
2163 	spin_unlock(&cinode->open_file_lock);
2164 	spin_unlock(&tcon->open_file_lock);
2165 	return rc;
2166 }
2167 
2168 void
cifs_free_llist(struct list_head * llist)2169 cifs_free_llist(struct list_head *llist)
2170 {
2171 	struct cifsLockInfo *li, *tmp;
2172 	list_for_each_entry_safe(li, tmp, llist, llist) {
2173 		cifs_del_lock_waiters(li);
2174 		list_del(&li->llist);
2175 		kfree(li);
2176 	}
2177 }
2178 
2179 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2180 int
cifs_unlock_range(struct cifsFileInfo * cfile,struct file_lock * flock,unsigned int xid)2181 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2182 		  unsigned int xid)
2183 {
2184 	int rc = 0, stored_rc;
2185 	static const int types[] = {
2186 		LOCKING_ANDX_LARGE_FILES,
2187 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2188 	};
2189 	unsigned int i;
2190 	unsigned int max_num, num, max_buf;
2191 	LOCKING_ANDX_RANGE *buf, *cur;
2192 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2193 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2194 	struct cifsLockInfo *li, *tmp;
2195 	__u64 length = cifs_flock_len(flock);
2196 	LIST_HEAD(tmp_llist);
2197 
2198 	/*
2199 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2200 	 * and check it before using.
2201 	 */
2202 	max_buf = tcon->ses->server->maxBuf;
2203 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2204 		return -EINVAL;
2205 
2206 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2207 		     PAGE_SIZE);
2208 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2209 			PAGE_SIZE);
2210 	max_num = (max_buf - sizeof(struct smb_hdr)) /
2211 						sizeof(LOCKING_ANDX_RANGE);
2212 	buf = kzalloc_objs(LOCKING_ANDX_RANGE, max_num);
2213 	if (!buf)
2214 		return -ENOMEM;
2215 
2216 	cifs_down_write(&cinode->lock_sem);
2217 	for (i = 0; i < 2; i++) {
2218 		cur = buf;
2219 		num = 0;
2220 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2221 			if (flock->fl_start > li->offset ||
2222 			    (flock->fl_start + length) <
2223 			    (li->offset + li->length))
2224 				continue;
2225 			if (current->tgid != li->pid)
2226 				continue;
2227 			if (types[i] != li->type)
2228 				continue;
2229 			if (cinode->can_cache_brlcks) {
2230 				/*
2231 				 * We can cache brlock requests - simply remove
2232 				 * a lock from the file's list.
2233 				 */
2234 				list_del(&li->llist);
2235 				cifs_del_lock_waiters(li);
2236 				kfree(li);
2237 				continue;
2238 			}
2239 			cur->Pid = cpu_to_le16(li->pid);
2240 			cur->LengthLow = cpu_to_le32((u32)li->length);
2241 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2242 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
2243 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2244 			/*
2245 			 * We need to save a lock here to let us add it again to
2246 			 * the file's list if the unlock range request fails on
2247 			 * the server.
2248 			 */
2249 			list_move(&li->llist, &tmp_llist);
2250 			if (++num == max_num) {
2251 				stored_rc = cifs_lockv(xid, tcon,
2252 						       cfile->fid.netfid,
2253 						       li->type, num, 0, buf);
2254 				if (stored_rc) {
2255 					/*
2256 					 * We failed on the unlock range
2257 					 * request - add all locks from the tmp
2258 					 * list to the head of the file's list.
2259 					 */
2260 					cifs_move_llist(&tmp_llist,
2261 							&cfile->llist->locks);
2262 					rc = stored_rc;
2263 				} else
2264 					/*
2265 					 * The unlock range request succeed -
2266 					 * free the tmp list.
2267 					 */
2268 					cifs_free_llist(&tmp_llist);
2269 				cur = buf;
2270 				num = 0;
2271 			} else
2272 				cur++;
2273 		}
2274 		if (num) {
2275 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2276 					       types[i], num, 0, buf);
2277 			if (stored_rc) {
2278 				cifs_move_llist(&tmp_llist,
2279 						&cfile->llist->locks);
2280 				rc = stored_rc;
2281 			} else
2282 				cifs_free_llist(&tmp_llist);
2283 		}
2284 	}
2285 
2286 	up_write(&cinode->lock_sem);
2287 	kfree(buf);
2288 	return rc;
2289 }
2290 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2291 
2292 static int
cifs_setlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,int lock,int unlock,unsigned int xid)2293 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2294 	   bool wait_flag, bool posix_lck, int lock, int unlock,
2295 	   unsigned int xid)
2296 {
2297 	int rc = 0;
2298 	__u64 length = cifs_flock_len(flock);
2299 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2300 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2301 	struct TCP_Server_Info *server = tcon->ses->server;
2302 	struct inode *inode = d_inode(cfile->dentry);
2303 
2304 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2305 	if (posix_lck) {
2306 		int posix_lock_type;
2307 
2308 		rc = cifs_posix_lock_set(file, flock);
2309 		if (rc <= FILE_LOCK_DEFERRED)
2310 			return rc;
2311 
2312 		if (type & server->vals->shared_lock_type)
2313 			posix_lock_type = CIFS_RDLCK;
2314 		else
2315 			posix_lock_type = CIFS_WRLCK;
2316 
2317 		if (unlock == 1)
2318 			posix_lock_type = CIFS_UNLCK;
2319 
2320 		rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2321 				      hash_lockowner(flock->c.flc_owner),
2322 				      flock->fl_start, length,
2323 				      NULL, posix_lock_type, wait_flag);
2324 		goto out;
2325 	}
2326 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2327 	if (lock) {
2328 		struct cifsLockInfo *lock;
2329 
2330 		lock = cifs_lock_init(flock->fl_start, length, type,
2331 				      flock->c.flc_flags);
2332 		if (!lock)
2333 			return -ENOMEM;
2334 
2335 		rc = cifs_lock_add_if(cfile, lock, wait_flag);
2336 		if (rc < 0) {
2337 			kfree(lock);
2338 			return rc;
2339 		}
2340 		if (!rc)
2341 			goto out;
2342 
2343 		/*
2344 		 * Windows 7 server can delay breaking lease from read to None
2345 		 * if we set a byte-range lock on a file - break it explicitly
2346 		 * before sending the lock to the server to be sure the next
2347 		 * read won't conflict with non-overlapted locks due to
2348 		 * pagereading.
2349 		 */
2350 		if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2351 					CIFS_CACHE_READ(CIFS_I(inode))) {
2352 			cifs_zap_mapping(inode);
2353 			cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2354 				 inode);
2355 			cifs_reset_oplock(CIFS_I(inode));
2356 		}
2357 
2358 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2359 					    type, 1, 0, wait_flag);
2360 		if (rc) {
2361 			kfree(lock);
2362 			return rc;
2363 		}
2364 
2365 		cifs_lock_add(cfile, lock);
2366 	} else if (unlock)
2367 		rc = server->ops->mand_unlock_range(cfile, flock, xid);
2368 
2369 out:
2370 	if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2371 		/*
2372 		 * If this is a request to remove all locks because we
2373 		 * are closing the file, it doesn't matter if the
2374 		 * unlocking failed as both cifs.ko and the SMB server
2375 		 * remove the lock on file close
2376 		 */
2377 		if (rc) {
2378 			cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2379 			if (!(flock->c.flc_flags & FL_CLOSE))
2380 				return rc;
2381 		}
2382 		rc = locks_lock_file_wait(file, flock);
2383 	}
2384 	return rc;
2385 }
2386 
cifs_flock(struct file * file,int cmd,struct file_lock * fl)2387 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2388 {
2389 	int rc, xid;
2390 	int lock = 0, unlock = 0;
2391 	bool wait_flag = false;
2392 	bool posix_lck = false;
2393 	struct cifs_sb_info *cifs_sb;
2394 	struct cifs_tcon *tcon;
2395 	struct cifsFileInfo *cfile;
2396 	__u32 type;
2397 
2398 	xid = get_xid();
2399 
2400 	if (!(fl->c.flc_flags & FL_FLOCK)) {
2401 		rc = -ENOLCK;
2402 		free_xid(xid);
2403 		return rc;
2404 	}
2405 
2406 	cfile = (struct cifsFileInfo *)file->private_data;
2407 	tcon = tlink_tcon(cfile->tlink);
2408 
2409 	cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2410 			tcon->ses->server);
2411 	cifs_sb = CIFS_SB(file);
2412 
2413 	if (cap_unix(tcon->ses) &&
2414 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2415 	    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
2416 		posix_lck = true;
2417 
2418 	if (!lock && !unlock) {
2419 		/*
2420 		 * if no lock or unlock then nothing to do since we do not
2421 		 * know what it is
2422 		 */
2423 		rc = -EOPNOTSUPP;
2424 		free_xid(xid);
2425 		return rc;
2426 	}
2427 
2428 	rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2429 			xid);
2430 	free_xid(xid);
2431 	return rc;
2432 
2433 
2434 }
2435 
cifs_lock(struct file * file,int cmd,struct file_lock * flock)2436 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2437 {
2438 	struct cifs_sb_info *cifs_sb = CIFS_SB(file);
2439 	struct cifsFileInfo *cfile;
2440 	int lock = 0, unlock = 0;
2441 	bool wait_flag = false;
2442 	bool posix_lck = false;
2443 	struct cifs_tcon *tcon;
2444 	__u32 type;
2445 	int rc, xid;
2446 
2447 	rc = -EACCES;
2448 	xid = get_xid();
2449 
2450 	cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2451 		 flock->c.flc_flags, flock->c.flc_type,
2452 		 (long long)flock->fl_start,
2453 		 (long long)flock->fl_end);
2454 
2455 	cfile = (struct cifsFileInfo *)file->private_data;
2456 	tcon = tlink_tcon(cfile->tlink);
2457 
2458 	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2459 			tcon->ses->server);
2460 	set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2461 
2462 	if (cap_unix(tcon->ses) &&
2463 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2464 	    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
2465 		posix_lck = true;
2466 	/*
2467 	 * BB add code here to normalize offset and length to account for
2468 	 * negative length which we can not accept over the wire.
2469 	 */
2470 	if (IS_GETLK(cmd)) {
2471 		rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2472 		free_xid(xid);
2473 		return rc;
2474 	}
2475 
2476 	if (!lock && !unlock) {
2477 		/*
2478 		 * if no lock or unlock then nothing to do since we do not
2479 		 * know what it is
2480 		 */
2481 		free_xid(xid);
2482 		return -EOPNOTSUPP;
2483 	}
2484 
2485 	rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2486 			xid);
2487 	free_xid(xid);
2488 	return rc;
2489 }
2490 
cifs_write_subrequest_terminated(struct cifs_io_subrequest * wdata,ssize_t result)2491 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result)
2492 {
2493 	struct netfs_io_request *wreq = wdata->rreq;
2494 	struct netfs_inode *ictx = netfs_inode(wreq->inode);
2495 	loff_t wrend;
2496 
2497 	if (result > 0) {
2498 		wrend = wdata->subreq.start + wdata->subreq.transferred + result;
2499 
2500 		if (wrend > ictx->zero_point &&
2501 		    (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE ||
2502 		     wdata->rreq->origin == NETFS_DIO_WRITE))
2503 			ictx->zero_point = wrend;
2504 		if (wrend > ictx->remote_i_size)
2505 			netfs_resize_file(ictx, wrend, true);
2506 	}
2507 
2508 	netfs_write_subrequest_terminated(&wdata->subreq, result);
2509 }
2510 
open_flags_match(struct cifsInodeInfo * cinode,unsigned int oflags,unsigned int cflags)2511 static bool open_flags_match(struct cifsInodeInfo *cinode,
2512 			     unsigned int oflags, unsigned int cflags)
2513 {
2514 	struct inode *inode = &cinode->netfs.inode;
2515 	int crw = 0, orw = 0;
2516 
2517 	oflags &= ~(O_CREAT | O_EXCL | O_TRUNC);
2518 	cflags &= ~(O_CREAT | O_EXCL | O_TRUNC);
2519 
2520 	if (cifs_fscache_enabled(inode)) {
2521 		if (OPEN_FMODE(cflags) & FMODE_WRITE)
2522 			crw = 1;
2523 		if (OPEN_FMODE(oflags) & FMODE_WRITE)
2524 			orw = 1;
2525 	}
2526 	if (cifs_convert_flags(oflags, orw) != cifs_convert_flags(cflags, crw))
2527 		return false;
2528 
2529 	return (oflags & (O_SYNC | O_DIRECT)) == (cflags & (O_SYNC | O_DIRECT));
2530 }
2531 
__find_readable_file(struct cifsInodeInfo * cifs_inode,unsigned int find_flags,unsigned int open_flags)2532 struct cifsFileInfo *__find_readable_file(struct cifsInodeInfo *cifs_inode,
2533 					  unsigned int find_flags,
2534 					  unsigned int open_flags)
2535 {
2536 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode);
2537 	bool fsuid_only = find_flags & FIND_FSUID_ONLY;
2538 	struct cifsFileInfo *open_file = NULL;
2539 
2540 	/* only filter by fsuid on multiuser mounts */
2541 	if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_MULTIUSER))
2542 		fsuid_only = false;
2543 
2544 	spin_lock(&cifs_inode->open_file_lock);
2545 	/* we could simply get the first_list_entry since write-only entries
2546 	   are always at the end of the list but since the first entry might
2547 	   have a close pending, we go through the whole list */
2548 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2549 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2550 			continue;
2551 		if ((find_flags & FIND_NO_PENDING_DELETE) &&
2552 		    open_file->status_file_deleted)
2553 			continue;
2554 		if ((find_flags & FIND_OPEN_FLAGS) &&
2555 		    !open_flags_match(cifs_inode, open_flags,
2556 				      open_file->f_flags))
2557 			continue;
2558 		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2559 			if ((!open_file->invalidHandle)) {
2560 				/* found a good file */
2561 				/* lock it so it will not be closed on us */
2562 				cifsFileInfo_get(open_file);
2563 				spin_unlock(&cifs_inode->open_file_lock);
2564 				return open_file;
2565 			} /* else might as well continue, and look for
2566 			     another, or simply have the caller reopen it
2567 			     again rather than trying to fix this handle */
2568 		} else /* write only file */
2569 			break; /* write only files are last so must be done */
2570 	}
2571 	spin_unlock(&cifs_inode->open_file_lock);
2572 	return NULL;
2573 }
2574 
2575 /* Return -EBADF if no handle is found and general rc otherwise */
__cifs_get_writable_file(struct cifsInodeInfo * cifs_inode,unsigned int find_flags,unsigned int open_flags,struct cifsFileInfo ** ret_file)2576 int __cifs_get_writable_file(struct cifsInodeInfo *cifs_inode,
2577 			     unsigned int find_flags, unsigned int open_flags,
2578 			     struct cifsFileInfo **ret_file)
2579 {
2580 	struct cifsFileInfo *open_file, *inv_file = NULL;
2581 	struct cifs_sb_info *cifs_sb;
2582 	bool any_available = false;
2583 	int rc = -EBADF;
2584 	unsigned int refind = 0;
2585 	bool fsuid_only = find_flags & FIND_FSUID_ONLY;
2586 	bool with_delete = find_flags & FIND_WITH_DELETE;
2587 	*ret_file = NULL;
2588 
2589 	/*
2590 	 * Having a null inode here (because mapping->host was set to zero by
2591 	 * the VFS or MM) should not happen but we had reports of on oops (due
2592 	 * to it being zero) during stress testcases so we need to check for it
2593 	 */
2594 
2595 	if (cifs_inode == NULL) {
2596 		cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2597 		dump_stack();
2598 		return rc;
2599 	}
2600 
2601 	cifs_sb = CIFS_SB(cifs_inode);
2602 
2603 	/* only filter by fsuid on multiuser mounts */
2604 	if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_MULTIUSER))
2605 		fsuid_only = false;
2606 
2607 	spin_lock(&cifs_inode->open_file_lock);
2608 refind_writable:
2609 	if (refind > MAX_REOPEN_ATT) {
2610 		spin_unlock(&cifs_inode->open_file_lock);
2611 		return rc;
2612 	}
2613 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2614 		if (!any_available && open_file->pid != current->tgid)
2615 			continue;
2616 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2617 			continue;
2618 		if (with_delete && !(open_file->fid.access & DELETE))
2619 			continue;
2620 		if ((find_flags & FIND_NO_PENDING_DELETE) &&
2621 		    open_file->status_file_deleted)
2622 			continue;
2623 		if ((find_flags & FIND_OPEN_FLAGS) &&
2624 		    !open_flags_match(cifs_inode, open_flags,
2625 				      open_file->f_flags))
2626 			continue;
2627 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2628 			if (!open_file->invalidHandle) {
2629 				/* found a good writable file */
2630 				cifsFileInfo_get(open_file);
2631 				spin_unlock(&cifs_inode->open_file_lock);
2632 				*ret_file = open_file;
2633 				return 0;
2634 			} else {
2635 				if (!inv_file)
2636 					inv_file = open_file;
2637 			}
2638 		}
2639 	}
2640 	/* couldn't find usable FH with same pid, try any available */
2641 	if (!any_available) {
2642 		any_available = true;
2643 		goto refind_writable;
2644 	}
2645 
2646 	if (inv_file) {
2647 		any_available = false;
2648 		cifsFileInfo_get(inv_file);
2649 	}
2650 
2651 	spin_unlock(&cifs_inode->open_file_lock);
2652 
2653 	if (inv_file) {
2654 		rc = cifs_reopen_file(inv_file, false);
2655 		if (!rc) {
2656 			*ret_file = inv_file;
2657 			return 0;
2658 		}
2659 
2660 		spin_lock(&cifs_inode->open_file_lock);
2661 		list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2662 		spin_unlock(&cifs_inode->open_file_lock);
2663 		cifsFileInfo_put(inv_file);
2664 		++refind;
2665 		inv_file = NULL;
2666 		spin_lock(&cifs_inode->open_file_lock);
2667 		goto refind_writable;
2668 	}
2669 
2670 	return rc;
2671 }
2672 
2673 struct cifsFileInfo *
find_writable_file(struct cifsInodeInfo * cifs_inode,int flags)2674 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2675 {
2676 	struct cifsFileInfo *cfile;
2677 	int rc;
2678 
2679 	rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2680 	if (rc)
2681 		cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2682 
2683 	return cfile;
2684 }
2685 
2686 int
cifs_get_writable_path(struct cifs_tcon * tcon,const char * name,int flags,struct cifsFileInfo ** ret_file)2687 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2688 		       int flags,
2689 		       struct cifsFileInfo **ret_file)
2690 {
2691 	struct cifsFileInfo *cfile;
2692 	void *page = alloc_dentry_path();
2693 
2694 	*ret_file = NULL;
2695 
2696 	spin_lock(&tcon->open_file_lock);
2697 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2698 		struct cifsInodeInfo *cinode;
2699 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2700 		if (IS_ERR(full_path)) {
2701 			spin_unlock(&tcon->open_file_lock);
2702 			free_dentry_path(page);
2703 			return PTR_ERR(full_path);
2704 		}
2705 		if (strcmp(full_path, name))
2706 			continue;
2707 
2708 		cinode = CIFS_I(d_inode(cfile->dentry));
2709 		spin_unlock(&tcon->open_file_lock);
2710 		free_dentry_path(page);
2711 		return cifs_get_writable_file(cinode, flags, ret_file);
2712 	}
2713 
2714 	spin_unlock(&tcon->open_file_lock);
2715 	free_dentry_path(page);
2716 	return -ENOENT;
2717 }
2718 
2719 int
cifs_get_readable_path(struct cifs_tcon * tcon,const char * name,struct cifsFileInfo ** ret_file)2720 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2721 		       struct cifsFileInfo **ret_file)
2722 {
2723 	struct cifsFileInfo *cfile;
2724 	void *page = alloc_dentry_path();
2725 
2726 	*ret_file = NULL;
2727 
2728 	spin_lock(&tcon->open_file_lock);
2729 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2730 		struct cifsInodeInfo *cinode;
2731 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2732 		if (IS_ERR(full_path)) {
2733 			spin_unlock(&tcon->open_file_lock);
2734 			free_dentry_path(page);
2735 			return PTR_ERR(full_path);
2736 		}
2737 		if (strcmp(full_path, name))
2738 			continue;
2739 
2740 		cinode = CIFS_I(d_inode(cfile->dentry));
2741 		spin_unlock(&tcon->open_file_lock);
2742 		free_dentry_path(page);
2743 		*ret_file = find_readable_file(cinode, FIND_ANY);
2744 		return *ret_file ? 0 : -ENOENT;
2745 	}
2746 
2747 	spin_unlock(&tcon->open_file_lock);
2748 	free_dentry_path(page);
2749 	return -ENOENT;
2750 }
2751 
2752 /*
2753  * Flush data on a strict file.
2754  */
cifs_strict_fsync(struct file * file,loff_t start,loff_t end,int datasync)2755 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2756 		      int datasync)
2757 {
2758 	struct cifsFileInfo *smbfile = file->private_data;
2759 	struct inode *inode = file_inode(file);
2760 	unsigned int xid;
2761 	int rc;
2762 
2763 	rc = file_write_and_wait_range(file, start, end);
2764 	if (rc) {
2765 		trace_cifs_fsync_err(inode->i_ino, rc);
2766 		return rc;
2767 	}
2768 
2769 	cifs_dbg(FYI, "%s: name=%pD datasync=0x%x\n", __func__, file, datasync);
2770 
2771 	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2772 		rc = cifs_zap_mapping(inode);
2773 		cifs_dbg(FYI, "%s: invalidate mapping: rc = %d\n", __func__, rc);
2774 	}
2775 
2776 	xid = get_xid();
2777 	rc = cifs_file_flush(xid, inode, smbfile);
2778 	free_xid(xid);
2779 	return rc;
2780 }
2781 
2782 /*
2783  * Flush data on a non-strict data.
2784  */
cifs_fsync(struct file * file,loff_t start,loff_t end,int datasync)2785 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2786 {
2787 	unsigned int xid;
2788 	int rc = 0;
2789 	struct cifs_tcon *tcon;
2790 	struct TCP_Server_Info *server;
2791 	struct cifsFileInfo *smbfile = file->private_data;
2792 	struct inode *inode = file_inode(file);
2793 	struct cifs_sb_info *cifs_sb = CIFS_SB(file);
2794 
2795 	rc = file_write_and_wait_range(file, start, end);
2796 	if (rc) {
2797 		trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2798 		return rc;
2799 	}
2800 
2801 	xid = get_xid();
2802 
2803 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2804 		 file, datasync);
2805 
2806 	tcon = tlink_tcon(smbfile->tlink);
2807 	if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOSSYNC)) {
2808 		server = tcon->ses->server;
2809 		if (server->ops->flush == NULL) {
2810 			rc = -ENOSYS;
2811 			goto fsync_exit;
2812 		}
2813 
2814 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2815 			smbfile = find_writable_file(CIFS_I(inode), FIND_ANY);
2816 			if (smbfile) {
2817 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2818 				cifsFileInfo_put(smbfile);
2819 			} else
2820 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2821 		} else
2822 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2823 	}
2824 
2825 fsync_exit:
2826 	free_xid(xid);
2827 	return rc;
2828 }
2829 
2830 /*
2831  * As file closes, flush all cached write data for this inode checking
2832  * for write behind errors.
2833  */
cifs_flush(struct file * file,fl_owner_t id)2834 int cifs_flush(struct file *file, fl_owner_t id)
2835 {
2836 	struct inode *inode = file_inode(file);
2837 	int rc = 0;
2838 
2839 	if (file->f_mode & FMODE_WRITE)
2840 		rc = filemap_write_and_wait(inode->i_mapping);
2841 
2842 	cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2843 	if (rc) {
2844 		/* get more nuanced writeback errors */
2845 		rc = filemap_check_wb_err(file->f_mapping, 0);
2846 		trace_cifs_flush_err(inode->i_ino, rc);
2847 	}
2848 	return rc;
2849 }
2850 
2851 static ssize_t
cifs_writev(struct kiocb * iocb,struct iov_iter * from)2852 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2853 {
2854 	struct file *file = iocb->ki_filp;
2855 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2856 	struct inode *inode = file->f_mapping->host;
2857 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2858 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2859 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
2860 	ssize_t rc;
2861 
2862 	rc = netfs_start_io_write(inode);
2863 	if (rc < 0)
2864 		return rc;
2865 
2866 	/*
2867 	 * We need to hold the sem to be sure nobody modifies lock list
2868 	 * with a brlock that prevents writing.
2869 	 */
2870 	down_read(&cinode->lock_sem);
2871 
2872 	rc = generic_write_checks(iocb, from);
2873 	if (rc <= 0)
2874 		goto out;
2875 
2876 	if ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) &&
2877 	    (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2878 				     server->vals->exclusive_lock_type, 0,
2879 				     NULL, CIFS_WRITE_OP))) {
2880 		rc = -EACCES;
2881 		goto out;
2882 	}
2883 
2884 	rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2885 
2886 out:
2887 	up_read(&cinode->lock_sem);
2888 	netfs_end_io_write(inode);
2889 	if (rc > 0)
2890 		rc = generic_write_sync(iocb, rc);
2891 	return rc;
2892 }
2893 
2894 ssize_t
cifs_strict_writev(struct kiocb * iocb,struct iov_iter * from)2895 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2896 {
2897 	struct inode *inode = file_inode(iocb->ki_filp);
2898 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2899 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
2900 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2901 						iocb->ki_filp->private_data;
2902 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2903 	ssize_t written;
2904 
2905 	written = cifs_get_writer(cinode);
2906 	if (written)
2907 		return written;
2908 
2909 	if (CIFS_CACHE_WRITE(cinode)) {
2910 		if (cap_unix(tcon->ses) &&
2911 		    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2912 		    ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2913 			written = netfs_file_write_iter(iocb, from);
2914 			goto out;
2915 		}
2916 		written = cifs_writev(iocb, from);
2917 		goto out;
2918 	}
2919 	/*
2920 	 * For non-oplocked files in strict cache mode we need to write the data
2921 	 * to the server exactly from the pos to pos+len-1 rather than flush all
2922 	 * affected pages because it may cause a error with mandatory locks on
2923 	 * these pages but not on the region from pos to ppos+len-1.
2924 	 */
2925 	written = netfs_file_write_iter(iocb, from);
2926 	if (CIFS_CACHE_READ(cinode)) {
2927 		/*
2928 		 * We have read level caching and we have just sent a write
2929 		 * request to the server thus making data in the cache stale.
2930 		 * Zap the cache and set oplock/lease level to NONE to avoid
2931 		 * reading stale data from the cache. All subsequent read
2932 		 * operations will read new data from the server.
2933 		 */
2934 		cifs_zap_mapping(inode);
2935 		cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2936 			 inode);
2937 		cifs_reset_oplock(cinode);
2938 	}
2939 out:
2940 	cifs_put_writer(cinode);
2941 	return written;
2942 }
2943 
cifs_loose_read_iter(struct kiocb * iocb,struct iov_iter * iter)2944 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2945 {
2946 	ssize_t rc;
2947 	struct inode *inode = file_inode(iocb->ki_filp);
2948 
2949 	if (iocb->ki_flags & IOCB_DIRECT)
2950 		return netfs_unbuffered_read_iter(iocb, iter);
2951 
2952 	rc = cifs_revalidate_mapping(inode);
2953 	if (rc)
2954 		return rc;
2955 
2956 	return netfs_file_read_iter(iocb, iter);
2957 }
2958 
cifs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)2959 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2960 {
2961 	struct inode *inode = file_inode(iocb->ki_filp);
2962 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2963 	ssize_t written;
2964 	int rc;
2965 
2966 	if (iocb->ki_filp->f_flags & O_DIRECT) {
2967 		written = netfs_unbuffered_write_iter(iocb, from);
2968 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
2969 			cifs_zap_mapping(inode);
2970 			cifs_dbg(FYI,
2971 				 "Set no oplock for inode=%p after a write operation\n",
2972 				 inode);
2973 			cifs_reset_oplock(cinode);
2974 		}
2975 		return written;
2976 	}
2977 
2978 	written = cifs_get_writer(cinode);
2979 	if (written)
2980 		return written;
2981 
2982 	written = netfs_file_write_iter(iocb, from);
2983 
2984 	if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
2985 		rc = filemap_fdatawrite(inode->i_mapping);
2986 		if (rc)
2987 			cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
2988 				 rc, inode);
2989 	}
2990 
2991 	cifs_put_writer(cinode);
2992 	return written;
2993 }
2994 
2995 ssize_t
cifs_strict_readv(struct kiocb * iocb,struct iov_iter * to)2996 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
2997 {
2998 	struct inode *inode = file_inode(iocb->ki_filp);
2999 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3000 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
3001 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3002 						iocb->ki_filp->private_data;
3003 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3004 	int rc = -EACCES;
3005 
3006 	/*
3007 	 * In strict cache mode we need to read from the server all the time
3008 	 * if we don't have level II oplock because the server can delay mtime
3009 	 * change - so we can't make a decision about inode invalidating.
3010 	 * And we can also fail with pagereading if there are mandatory locks
3011 	 * on pages affected by this read but not on the region from pos to
3012 	 * pos+len-1.
3013 	 */
3014 	if (!CIFS_CACHE_READ(cinode))
3015 		return netfs_unbuffered_read_iter(iocb, to);
3016 
3017 	if ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0) {
3018 		if (iocb->ki_flags & IOCB_DIRECT)
3019 			return netfs_unbuffered_read_iter(iocb, to);
3020 		return netfs_buffered_read_iter(iocb, to);
3021 	}
3022 
3023 	/*
3024 	 * We need to hold the sem to be sure nobody modifies lock list
3025 	 * with a brlock that prevents reading.
3026 	 */
3027 	if (iocb->ki_flags & IOCB_DIRECT) {
3028 		rc = netfs_start_io_direct(inode);
3029 		if (rc < 0)
3030 			goto out;
3031 		rc = -EACCES;
3032 		down_read(&cinode->lock_sem);
3033 		if (!cifs_find_lock_conflict(
3034 			    cfile, iocb->ki_pos, iov_iter_count(to),
3035 			    tcon->ses->server->vals->shared_lock_type,
3036 			    0, NULL, CIFS_READ_OP))
3037 			rc = netfs_unbuffered_read_iter_locked(iocb, to);
3038 		up_read(&cinode->lock_sem);
3039 		netfs_end_io_direct(inode);
3040 	} else {
3041 		rc = netfs_start_io_read(inode);
3042 		if (rc < 0)
3043 			goto out;
3044 		rc = -EACCES;
3045 		down_read(&cinode->lock_sem);
3046 		if (!cifs_find_lock_conflict(
3047 			    cfile, iocb->ki_pos, iov_iter_count(to),
3048 			    tcon->ses->server->vals->shared_lock_type,
3049 			    0, NULL, CIFS_READ_OP))
3050 			rc = filemap_read(iocb, to, 0);
3051 		up_read(&cinode->lock_sem);
3052 		netfs_end_io_read(inode);
3053 	}
3054 out:
3055 	return rc;
3056 }
3057 
cifs_page_mkwrite(struct vm_fault * vmf)3058 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
3059 {
3060 	return netfs_page_mkwrite(vmf, NULL);
3061 }
3062 
3063 static const struct vm_operations_struct cifs_file_vm_ops = {
3064 	.fault = filemap_fault,
3065 	.map_pages = filemap_map_pages,
3066 	.page_mkwrite = cifs_page_mkwrite,
3067 };
3068 
cifs_file_strict_mmap_prepare(struct vm_area_desc * desc)3069 int cifs_file_strict_mmap_prepare(struct vm_area_desc *desc)
3070 {
3071 	int xid, rc = 0;
3072 	struct inode *inode = file_inode(desc->file);
3073 
3074 	xid = get_xid();
3075 
3076 	if (!CIFS_CACHE_READ(CIFS_I(inode)))
3077 		rc = cifs_zap_mapping(inode);
3078 	if (!rc)
3079 		rc = generic_file_mmap_prepare(desc);
3080 	if (!rc)
3081 		desc->vm_ops = &cifs_file_vm_ops;
3082 
3083 	free_xid(xid);
3084 	return rc;
3085 }
3086 
cifs_file_mmap_prepare(struct vm_area_desc * desc)3087 int cifs_file_mmap_prepare(struct vm_area_desc *desc)
3088 {
3089 	int rc, xid;
3090 
3091 	xid = get_xid();
3092 
3093 	rc = cifs_revalidate_file(desc->file);
3094 	if (rc)
3095 		cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3096 			 rc);
3097 	if (!rc)
3098 		rc = generic_file_mmap_prepare(desc);
3099 	if (!rc)
3100 		desc->vm_ops = &cifs_file_vm_ops;
3101 
3102 	free_xid(xid);
3103 	return rc;
3104 }
3105 
is_inode_writable(struct cifsInodeInfo * cifs_inode)3106 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3107 {
3108 	struct cifsFileInfo *open_file;
3109 
3110 	spin_lock(&cifs_inode->open_file_lock);
3111 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3112 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3113 			spin_unlock(&cifs_inode->open_file_lock);
3114 			return 1;
3115 		}
3116 	}
3117 	spin_unlock(&cifs_inode->open_file_lock);
3118 	return 0;
3119 }
3120 
3121 /* We do not want to update the file size from server for inodes
3122    open for write - to avoid races with writepage extending
3123    the file - in the future we could consider allowing
3124    refreshing the inode only on increases in the file size
3125    but this is tricky to do without racing with writebehind
3126    page caching in the current Linux kernel design */
is_size_safe_to_change(struct cifsInodeInfo * cifsInode,__u64 end_of_file,bool from_readdir)3127 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3128 			    bool from_readdir)
3129 {
3130 	if (!cifsInode)
3131 		return true;
3132 
3133 	if (is_inode_writable(cifsInode) ||
3134 		((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
3135 		/* This inode is open for write at least once */
3136 		struct cifs_sb_info *cifs_sb = CIFS_SB(cifsInode);
3137 
3138 		if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_DIRECT_IO) {
3139 			/* since no page cache to corrupt on directio
3140 			we can change size safely */
3141 			return true;
3142 		}
3143 
3144 		if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
3145 			return true;
3146 
3147 		return false;
3148 	} else
3149 		return true;
3150 }
3151 
cifs_oplock_break(struct work_struct * work)3152 void cifs_oplock_break(struct work_struct *work)
3153 {
3154 	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3155 						  oplock_break);
3156 	struct inode *inode = d_inode(cfile->dentry);
3157 	struct super_block *sb = inode->i_sb;
3158 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
3159 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3160 	bool cache_read, cache_write, cache_handle;
3161 	struct cifs_tcon *tcon;
3162 	struct TCP_Server_Info *server;
3163 	struct tcon_link *tlink;
3164 	unsigned int oplock;
3165 	int rc = 0;
3166 	bool purge_cache = false, oplock_break_cancelled;
3167 	__u64 persistent_fid, volatile_fid;
3168 	__u16 net_fid;
3169 
3170 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3171 			TASK_UNINTERRUPTIBLE);
3172 
3173 	tlink = cifs_sb_tlink(cifs_sb);
3174 	if (IS_ERR(tlink))
3175 		goto out;
3176 	tcon = tlink_tcon(tlink);
3177 	server = tcon->ses->server;
3178 
3179 	scoped_guard(spinlock, &cinode->open_file_lock) {
3180 		unsigned int sbflags = cifs_sb_flags(cifs_sb);
3181 
3182 		server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3183 					      cfile->oplock_epoch, &purge_cache);
3184 		oplock = READ_ONCE(cinode->oplock);
3185 		cache_read = (oplock & CIFS_CACHE_READ_FLG) ||
3186 			(sbflags & CIFS_MOUNT_RO_CACHE);
3187 		cache_write = (oplock & CIFS_CACHE_WRITE_FLG) ||
3188 			(sbflags & CIFS_MOUNT_RW_CACHE);
3189 		cache_handle = oplock & CIFS_CACHE_HANDLE_FLG;
3190 	}
3191 
3192 	if (!cache_write && cache_read && cifs_has_mand_locks(cinode)) {
3193 		cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3194 			 inode);
3195 		cifs_reset_oplock(cinode);
3196 		oplock = 0;
3197 		cache_read = cache_write = cache_handle = false;
3198 	}
3199 
3200 	if (S_ISREG(inode->i_mode)) {
3201 		if (cache_read)
3202 			break_lease(inode, O_RDONLY);
3203 		else
3204 			break_lease(inode, O_WRONLY);
3205 		rc = filemap_fdatawrite(inode->i_mapping);
3206 		if (!cache_read || purge_cache) {
3207 			rc = filemap_fdatawait(inode->i_mapping);
3208 			mapping_set_error(inode->i_mapping, rc);
3209 			cifs_zap_mapping(inode);
3210 		}
3211 		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3212 		if (cache_write)
3213 			goto oplock_break_ack;
3214 	}
3215 
3216 	rc = cifs_push_locks(cfile);
3217 	if (rc)
3218 		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3219 
3220 oplock_break_ack:
3221 	/*
3222 	 * When oplock break is received and there are no active
3223 	 * file handles but cached, then schedule deferred close immediately.
3224 	 * So, new open will not use cached handle.
3225 	 */
3226 
3227 	if (!cache_handle && !list_empty(&cinode->deferred_closes))
3228 		cifs_close_deferred_file(cinode);
3229 
3230 	persistent_fid = cfile->fid.persistent_fid;
3231 	volatile_fid = cfile->fid.volatile_fid;
3232 	net_fid = cfile->fid.netfid;
3233 	oplock_break_cancelled = cfile->oplock_break_cancelled;
3234 
3235 	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3236 	/*
3237 	 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3238 	 * an acknowledgment to be sent when the file has already been closed.
3239 	 */
3240 	spin_lock(&cinode->open_file_lock);
3241 	/* check list empty since can race with kill_sb calling tree disconnect */
3242 	if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3243 		spin_unlock(&cinode->open_file_lock);
3244 		rc = server->ops->oplock_response(tcon, persistent_fid,
3245 						  volatile_fid, net_fid,
3246 						  cinode, oplock);
3247 		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3248 	} else
3249 		spin_unlock(&cinode->open_file_lock);
3250 
3251 	cifs_put_tlink(tlink);
3252 out:
3253 	cifs_done_oplock_break(cinode);
3254 }
3255 
cifs_swap_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)3256 static int cifs_swap_activate(struct swap_info_struct *sis,
3257 			      struct file *swap_file, sector_t *span)
3258 {
3259 	struct cifsFileInfo *cfile = swap_file->private_data;
3260 	struct inode *inode = swap_file->f_mapping->host;
3261 	unsigned long blocks;
3262 	long long isize;
3263 
3264 	cifs_dbg(FYI, "swap activate\n");
3265 
3266 	if (!swap_file->f_mapping->a_ops->swap_rw)
3267 		/* Cannot support swap */
3268 		return -EINVAL;
3269 
3270 	spin_lock(&inode->i_lock);
3271 	blocks = inode->i_blocks;
3272 	isize = inode->i_size;
3273 	spin_unlock(&inode->i_lock);
3274 	if (blocks*512 < isize) {
3275 		pr_warn("swap activate: swapfile has holes\n");
3276 		return -EINVAL;
3277 	}
3278 	*span = sis->pages;
3279 
3280 	pr_warn_once("Swap support over SMB3 is experimental\n");
3281 
3282 	/*
3283 	 * TODO: consider adding ACL (or documenting how) to prevent other
3284 	 * users (on this or other systems) from reading it
3285 	 */
3286 
3287 
3288 	/* TODO: add sk_set_memalloc(inet) or similar */
3289 
3290 	if (cfile)
3291 		cfile->swapfile = true;
3292 	/*
3293 	 * TODO: Since file already open, we can't open with DENY_ALL here
3294 	 * but we could add call to grab a byte range lock to prevent others
3295 	 * from reading or writing the file
3296 	 */
3297 
3298 	sis->flags |= SWP_FS_OPS;
3299 	return add_swap_extent(sis, 0, sis->max, 0);
3300 }
3301 
cifs_swap_deactivate(struct file * file)3302 static void cifs_swap_deactivate(struct file *file)
3303 {
3304 	struct cifsFileInfo *cfile = file->private_data;
3305 
3306 	cifs_dbg(FYI, "swap deactivate\n");
3307 
3308 	/* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3309 
3310 	if (cfile)
3311 		cfile->swapfile = false;
3312 
3313 	/* do we need to unpin (or unlock) the file */
3314 }
3315 
3316 /**
3317  * cifs_swap_rw - SMB3 address space operation for swap I/O
3318  * @iocb: target I/O control block
3319  * @iter: I/O buffer
3320  *
3321  * Perform IO to the swap-file.  This is much like direct IO.
3322  */
cifs_swap_rw(struct kiocb * iocb,struct iov_iter * iter)3323 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3324 {
3325 	ssize_t ret;
3326 
3327 	if (iov_iter_rw(iter) == READ)
3328 		ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3329 	else
3330 		ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3331 	if (ret < 0)
3332 		return ret;
3333 	return 0;
3334 }
3335 
3336 const struct address_space_operations cifs_addr_ops = {
3337 	.read_folio	= netfs_read_folio,
3338 	.readahead	= netfs_readahead,
3339 	.writepages	= netfs_writepages,
3340 	.dirty_folio	= netfs_dirty_folio,
3341 	.release_folio	= netfs_release_folio,
3342 	.direct_IO	= noop_direct_IO,
3343 	.invalidate_folio = netfs_invalidate_folio,
3344 	.migrate_folio	= filemap_migrate_folio,
3345 	/*
3346 	 * TODO: investigate and if useful we could add an is_dirty_writeback
3347 	 * helper if needed
3348 	 */
3349 	.swap_activate	= cifs_swap_activate,
3350 	.swap_deactivate = cifs_swap_deactivate,
3351 	.swap_rw = cifs_swap_rw,
3352 };
3353 
3354 /*
3355  * cifs_readahead requires the server to support a buffer large enough to
3356  * contain the header plus one complete page of data.  Otherwise, we need
3357  * to leave cifs_readahead out of the address space operations.
3358  */
3359 const struct address_space_operations cifs_addr_ops_smallbuf = {
3360 	.read_folio	= netfs_read_folio,
3361 	.writepages	= netfs_writepages,
3362 	.dirty_folio	= netfs_dirty_folio,
3363 	.release_folio	= netfs_release_folio,
3364 	.invalidate_folio = netfs_invalidate_folio,
3365 	.migrate_folio	= filemap_migrate_folio,
3366 };
3367