xref: /linux/fs/smb/client/file.c (revision 91b436fc925ca58625e4230f53238e955223c385)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  */
11 #include <linux/fs.h>
12 #include <linux/filelock.h>
13 #include <linux/backing-dev.h>
14 #include <linux/stat.h>
15 #include <linux/fcntl.h>
16 #include <linux/pagemap.h>
17 #include <linux/pagevec.h>
18 #include <linux/writeback.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/delay.h>
21 #include <linux/mount.h>
22 #include <linux/slab.h>
23 #include <linux/swap.h>
24 #include <linux/mm.h>
25 #include <asm/div64.h>
26 #include "cifsfs.h"
27 #include "cifspdu.h"
28 #include "cifsglob.h"
29 #include "cifsproto.h"
30 #include "smb2proto.h"
31 #include "cifs_unicode.h"
32 #include "cifs_debug.h"
33 #include "cifs_fs_sb.h"
34 #include "fscache.h"
35 #include "smbdirect.h"
36 #include "fs_context.h"
37 #include "cifs_ioctl.h"
38 #include "cached_dir.h"
39 #include <trace/events/netfs.h>
40 
41 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
42 
43 /*
44  * Prepare a subrequest to upload to the server.  We need to allocate credits
45  * so that we know the maximum amount of data that we can include in it.
46  */
cifs_prepare_write(struct netfs_io_subrequest * subreq)47 static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
48 {
49 	struct cifs_io_subrequest *wdata =
50 		container_of(subreq, struct cifs_io_subrequest, subreq);
51 	struct cifs_io_request *req = wdata->req;
52 	struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr];
53 	struct TCP_Server_Info *server;
54 	struct cifsFileInfo *open_file = req->cfile;
55 	struct cifs_sb_info *cifs_sb = CIFS_SB(wdata->rreq->inode->i_sb);
56 	size_t wsize = req->rreq.wsize;
57 	int rc;
58 
59 	if (!wdata->have_xid) {
60 		wdata->xid = get_xid();
61 		wdata->have_xid = true;
62 	}
63 
64 	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
65 	wdata->server = server;
66 
67 	if (cifs_sb->ctx->wsize == 0)
68 		cifs_negotiate_wsize(server, cifs_sb->ctx,
69 				     tlink_tcon(req->cfile->tlink));
70 
71 retry:
72 	if (open_file->invalidHandle) {
73 		rc = cifs_reopen_file(open_file, false);
74 		if (rc < 0) {
75 			if (rc == -EAGAIN)
76 				goto retry;
77 			subreq->error = rc;
78 			return netfs_prepare_write_failed(subreq);
79 		}
80 	}
81 
82 	rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len,
83 					   &wdata->credits);
84 	if (rc < 0) {
85 		subreq->error = rc;
86 		return netfs_prepare_write_failed(subreq);
87 	}
88 
89 	wdata->credits.rreq_debug_id = subreq->rreq->debug_id;
90 	wdata->credits.rreq_debug_index = subreq->debug_index;
91 	wdata->credits.in_flight_check = 1;
92 	trace_smb3_rw_credits(wdata->rreq->debug_id,
93 			      wdata->subreq.debug_index,
94 			      wdata->credits.value,
95 			      server->credits, server->in_flight,
96 			      wdata->credits.value,
97 			      cifs_trace_rw_credits_write_prepare);
98 
99 #ifdef CONFIG_CIFS_SMB_DIRECT
100 	if (server->smbd_conn) {
101 		const struct smbdirect_socket_parameters *sp =
102 			smbd_get_parameters(server->smbd_conn);
103 
104 		stream->sreq_max_segs = sp->max_frmr_depth;
105 	}
106 #endif
107 }
108 
109 /*
110  * Issue a subrequest to upload to the server.
111  */
cifs_issue_write(struct netfs_io_subrequest * subreq)112 static void cifs_issue_write(struct netfs_io_subrequest *subreq)
113 {
114 	struct cifs_io_subrequest *wdata =
115 		container_of(subreq, struct cifs_io_subrequest, subreq);
116 	struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
117 	int rc;
118 
119 	if (cifs_forced_shutdown(sbi)) {
120 		rc = -EIO;
121 		goto fail;
122 	}
123 
124 	rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
125 	if (rc)
126 		goto fail;
127 
128 	rc = -EAGAIN;
129 	if (wdata->req->cfile->invalidHandle)
130 		goto fail;
131 
132 	wdata->server->ops->async_writev(wdata);
133 out:
134 	return;
135 
136 fail:
137 	if (rc == -EAGAIN)
138 		trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
139 	else
140 		trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
141 	add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
142 	cifs_write_subrequest_terminated(wdata, rc);
143 	goto out;
144 }
145 
cifs_netfs_invalidate_cache(struct netfs_io_request * wreq)146 static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq)
147 {
148 	cifs_invalidate_cache(wreq->inode, 0);
149 }
150 
151 /*
152  * Negotiate the size of a read operation on behalf of the netfs library.
153  */
cifs_prepare_read(struct netfs_io_subrequest * subreq)154 static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
155 {
156 	struct netfs_io_request *rreq = subreq->rreq;
157 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
158 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
159 	struct TCP_Server_Info *server;
160 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
161 	size_t size;
162 	int rc = 0;
163 
164 	if (!rdata->have_xid) {
165 		rdata->xid = get_xid();
166 		rdata->have_xid = true;
167 	}
168 
169 	server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
170 	rdata->server = server;
171 
172 	if (cifs_sb->ctx->rsize == 0)
173 		cifs_negotiate_rsize(server, cifs_sb->ctx,
174 				     tlink_tcon(req->cfile->tlink));
175 
176 	rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
177 					   &size, &rdata->credits);
178 	if (rc)
179 		return rc;
180 
181 	rreq->io_streams[0].sreq_max_len = size;
182 
183 	rdata->credits.in_flight_check = 1;
184 	rdata->credits.rreq_debug_id = rreq->debug_id;
185 	rdata->credits.rreq_debug_index = subreq->debug_index;
186 
187 	trace_smb3_rw_credits(rdata->rreq->debug_id,
188 			      rdata->subreq.debug_index,
189 			      rdata->credits.value,
190 			      server->credits, server->in_flight, 0,
191 			      cifs_trace_rw_credits_read_submit);
192 
193 #ifdef CONFIG_CIFS_SMB_DIRECT
194 	if (server->smbd_conn) {
195 		const struct smbdirect_socket_parameters *sp =
196 			smbd_get_parameters(server->smbd_conn);
197 
198 		rreq->io_streams[0].sreq_max_segs = sp->max_frmr_depth;
199 	}
200 #endif
201 	return 0;
202 }
203 
204 /*
205  * Issue a read operation on behalf of the netfs helper functions.  We're asked
206  * to make a read of a certain size at a point in the file.  We are permitted
207  * to only read a portion of that, but as long as we read something, the netfs
208  * helper will call us again so that we can issue another read.
209  */
cifs_issue_read(struct netfs_io_subrequest * subreq)210 static void cifs_issue_read(struct netfs_io_subrequest *subreq)
211 {
212 	struct netfs_io_request *rreq = subreq->rreq;
213 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
214 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
215 	struct TCP_Server_Info *server = rdata->server;
216 	int rc = 0;
217 
218 	cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
219 		 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
220 		 subreq->transferred, subreq->len);
221 
222 	rc = adjust_credits(server, rdata, cifs_trace_rw_credits_issue_read_adjust);
223 	if (rc)
224 		goto failed;
225 
226 	if (req->cfile->invalidHandle) {
227 		do {
228 			rc = cifs_reopen_file(req->cfile, true);
229 		} while (rc == -EAGAIN);
230 		if (rc)
231 			goto failed;
232 	}
233 
234 	if (subreq->rreq->origin != NETFS_UNBUFFERED_READ &&
235 	    subreq->rreq->origin != NETFS_DIO_READ)
236 		__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
237 
238 	trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
239 	rc = rdata->server->ops->async_readv(rdata);
240 	if (rc)
241 		goto failed;
242 	return;
243 
244 failed:
245 	subreq->error = rc;
246 	netfs_read_subreq_terminated(subreq);
247 }
248 
249 /*
250  * Writeback calls this when it finds a folio that needs uploading.  This isn't
251  * called if writeback only has copy-to-cache to deal with.
252  */
cifs_begin_writeback(struct netfs_io_request * wreq)253 static void cifs_begin_writeback(struct netfs_io_request *wreq)
254 {
255 	struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
256 	int ret;
257 
258 	ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
259 	if (ret) {
260 		cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
261 		return;
262 	}
263 
264 	wreq->io_streams[0].avail = true;
265 }
266 
267 /*
268  * Initialise a request.
269  */
cifs_init_request(struct netfs_io_request * rreq,struct file * file)270 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
271 {
272 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
273 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
274 	struct cifsFileInfo *open_file = NULL;
275 
276 	rreq->rsize = cifs_sb->ctx->rsize;
277 	rreq->wsize = cifs_sb->ctx->wsize;
278 	req->pid = current->tgid; // Ummm...  This may be a workqueue
279 
280 	if (file) {
281 		open_file = file->private_data;
282 		rreq->netfs_priv = file->private_data;
283 		req->cfile = cifsFileInfo_get(open_file);
284 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
285 			req->pid = req->cfile->pid;
286 	} else if (rreq->origin != NETFS_WRITEBACK) {
287 		WARN_ON_ONCE(1);
288 		return -EIO;
289 	}
290 
291 	return 0;
292 }
293 
294 /*
295  * Completion of a request operation.
296  */
cifs_rreq_done(struct netfs_io_request * rreq)297 static void cifs_rreq_done(struct netfs_io_request *rreq)
298 {
299 	struct timespec64 atime, mtime;
300 	struct inode *inode = rreq->inode;
301 
302 	/* we do not want atime to be less than mtime, it broke some apps */
303 	atime = inode_set_atime_to_ts(inode, current_time(inode));
304 	mtime = inode_get_mtime(inode);
305 	if (timespec64_compare(&atime, &mtime))
306 		inode_set_atime_to_ts(inode, inode_get_mtime(inode));
307 }
308 
cifs_free_request(struct netfs_io_request * rreq)309 static void cifs_free_request(struct netfs_io_request *rreq)
310 {
311 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
312 
313 	if (req->cfile)
314 		cifsFileInfo_put(req->cfile);
315 }
316 
cifs_free_subrequest(struct netfs_io_subrequest * subreq)317 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
318 {
319 	struct cifs_io_subrequest *rdata =
320 		container_of(subreq, struct cifs_io_subrequest, subreq);
321 	int rc = subreq->error;
322 
323 	if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
324 #ifdef CONFIG_CIFS_SMB_DIRECT
325 		if (rdata->mr) {
326 			smbd_deregister_mr(rdata->mr);
327 			rdata->mr = NULL;
328 		}
329 #endif
330 	}
331 
332 	if (rdata->credits.value != 0) {
333 		trace_smb3_rw_credits(rdata->rreq->debug_id,
334 				      rdata->subreq.debug_index,
335 				      rdata->credits.value,
336 				      rdata->server ? rdata->server->credits : 0,
337 				      rdata->server ? rdata->server->in_flight : 0,
338 				      -rdata->credits.value,
339 				      cifs_trace_rw_credits_free_subreq);
340 		if (rdata->server)
341 			add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
342 		else
343 			rdata->credits.value = 0;
344 	}
345 
346 	if (rdata->have_xid)
347 		free_xid(rdata->xid);
348 }
349 
350 const struct netfs_request_ops cifs_req_ops = {
351 	.request_pool		= &cifs_io_request_pool,
352 	.subrequest_pool	= &cifs_io_subrequest_pool,
353 	.init_request		= cifs_init_request,
354 	.free_request		= cifs_free_request,
355 	.free_subrequest	= cifs_free_subrequest,
356 	.prepare_read		= cifs_prepare_read,
357 	.issue_read		= cifs_issue_read,
358 	.done			= cifs_rreq_done,
359 	.begin_writeback	= cifs_begin_writeback,
360 	.prepare_write		= cifs_prepare_write,
361 	.issue_write		= cifs_issue_write,
362 	.invalidate_cache	= cifs_netfs_invalidate_cache,
363 };
364 
365 /*
366  * Mark as invalid, all open files on tree connections since they
367  * were closed when session to server was lost.
368  */
369 void
cifs_mark_open_files_invalid(struct cifs_tcon * tcon)370 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
371 {
372 	struct cifsFileInfo *open_file = NULL;
373 	struct list_head *tmp;
374 	struct list_head *tmp1;
375 
376 	/* only send once per connect */
377 	spin_lock(&tcon->tc_lock);
378 	if (tcon->need_reconnect)
379 		tcon->status = TID_NEED_RECON;
380 
381 	if (tcon->status != TID_NEED_RECON) {
382 		spin_unlock(&tcon->tc_lock);
383 		return;
384 	}
385 	tcon->status = TID_IN_FILES_INVALIDATE;
386 	spin_unlock(&tcon->tc_lock);
387 
388 	/* list all files open on tree connection and mark them invalid */
389 	spin_lock(&tcon->open_file_lock);
390 	list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
391 		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
392 		open_file->invalidHandle = true;
393 		open_file->oplock_break_cancelled = true;
394 	}
395 	spin_unlock(&tcon->open_file_lock);
396 
397 	invalidate_all_cached_dirs(tcon);
398 	spin_lock(&tcon->tc_lock);
399 	if (tcon->status == TID_IN_FILES_INVALIDATE)
400 		tcon->status = TID_NEED_TCON;
401 	spin_unlock(&tcon->tc_lock);
402 
403 	/*
404 	 * BB Add call to evict_inodes(sb) for all superblocks mounted
405 	 * to this tcon.
406 	 */
407 }
408 
cifs_convert_flags(unsigned int flags,int rdwr_for_fscache)409 static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
410 {
411 	if ((flags & O_ACCMODE) == O_RDONLY)
412 		return GENERIC_READ;
413 	else if ((flags & O_ACCMODE) == O_WRONLY)
414 		return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
415 	else if ((flags & O_ACCMODE) == O_RDWR) {
416 		/* GENERIC_ALL is too much permission to request
417 		   can cause unnecessary access denied on create */
418 		/* return GENERIC_ALL; */
419 		return (GENERIC_READ | GENERIC_WRITE);
420 	}
421 
422 	return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
423 		FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
424 		FILE_READ_DATA);
425 }
426 
427 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_convert_flags(unsigned int flags)428 static u32 cifs_posix_convert_flags(unsigned int flags)
429 {
430 	u32 posix_flags = 0;
431 
432 	if ((flags & O_ACCMODE) == O_RDONLY)
433 		posix_flags = SMB_O_RDONLY;
434 	else if ((flags & O_ACCMODE) == O_WRONLY)
435 		posix_flags = SMB_O_WRONLY;
436 	else if ((flags & O_ACCMODE) == O_RDWR)
437 		posix_flags = SMB_O_RDWR;
438 
439 	if (flags & O_CREAT) {
440 		posix_flags |= SMB_O_CREAT;
441 		if (flags & O_EXCL)
442 			posix_flags |= SMB_O_EXCL;
443 	} else if (flags & O_EXCL)
444 		cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
445 			 current->comm, current->tgid);
446 
447 	if (flags & O_TRUNC)
448 		posix_flags |= SMB_O_TRUNC;
449 	/* be safe and imply O_SYNC for O_DSYNC */
450 	if (flags & O_DSYNC)
451 		posix_flags |= SMB_O_SYNC;
452 	if (flags & O_DIRECTORY)
453 		posix_flags |= SMB_O_DIRECTORY;
454 	if (flags & O_NOFOLLOW)
455 		posix_flags |= SMB_O_NOFOLLOW;
456 	if (flags & O_DIRECT)
457 		posix_flags |= SMB_O_DIRECT;
458 
459 	return posix_flags;
460 }
461 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
462 
cifs_get_disposition(unsigned int flags)463 static inline int cifs_get_disposition(unsigned int flags)
464 {
465 	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
466 		return FILE_CREATE;
467 	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
468 		return FILE_OVERWRITE_IF;
469 	else if ((flags & O_CREAT) == O_CREAT)
470 		return FILE_OPEN_IF;
471 	else if ((flags & O_TRUNC) == O_TRUNC)
472 		return FILE_OVERWRITE;
473 	else
474 		return FILE_OPEN;
475 }
476 
477 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_open(const char * full_path,struct inode ** pinode,struct super_block * sb,int mode,unsigned int f_flags,__u32 * poplock,__u16 * pnetfid,unsigned int xid)478 int cifs_posix_open(const char *full_path, struct inode **pinode,
479 			struct super_block *sb, int mode, unsigned int f_flags,
480 			__u32 *poplock, __u16 *pnetfid, unsigned int xid)
481 {
482 	int rc;
483 	FILE_UNIX_BASIC_INFO *presp_data;
484 	__u32 posix_flags = 0;
485 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
486 	struct cifs_fattr fattr;
487 	struct tcon_link *tlink;
488 	struct cifs_tcon *tcon;
489 
490 	cifs_dbg(FYI, "posix open %s\n", full_path);
491 
492 	presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
493 	if (presp_data == NULL)
494 		return -ENOMEM;
495 
496 	tlink = cifs_sb_tlink(cifs_sb);
497 	if (IS_ERR(tlink)) {
498 		rc = PTR_ERR(tlink);
499 		goto posix_open_ret;
500 	}
501 
502 	tcon = tlink_tcon(tlink);
503 	mode &= ~current_umask();
504 
505 	posix_flags = cifs_posix_convert_flags(f_flags);
506 	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
507 			     poplock, full_path, cifs_sb->local_nls,
508 			     cifs_remap(cifs_sb));
509 	cifs_put_tlink(tlink);
510 
511 	if (rc)
512 		goto posix_open_ret;
513 
514 	if (presp_data->Type == cpu_to_le32(-1))
515 		goto posix_open_ret; /* open ok, caller does qpathinfo */
516 
517 	if (!pinode)
518 		goto posix_open_ret; /* caller does not need info */
519 
520 	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
521 
522 	/* get new inode and set it up */
523 	if (*pinode == NULL) {
524 		cifs_fill_uniqueid(sb, &fattr);
525 		*pinode = cifs_iget(sb, &fattr);
526 		if (!*pinode) {
527 			rc = -ENOMEM;
528 			goto posix_open_ret;
529 		}
530 	} else {
531 		cifs_revalidate_mapping(*pinode);
532 		rc = cifs_fattr_to_inode(*pinode, &fattr, false);
533 	}
534 
535 posix_open_ret:
536 	kfree(presp_data);
537 	return rc;
538 }
539 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
540 
cifs_nt_open(const char * full_path,struct inode * inode,struct cifs_sb_info * cifs_sb,struct cifs_tcon * tcon,unsigned int f_flags,__u32 * oplock,struct cifs_fid * fid,unsigned int xid,struct cifs_open_info_data * buf)541 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
542 			struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
543 			struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
544 {
545 	int rc;
546 	int desired_access;
547 	int disposition;
548 	int create_options = CREATE_NOT_DIR;
549 	struct TCP_Server_Info *server = tcon->ses->server;
550 	struct cifs_open_parms oparms;
551 	int rdwr_for_fscache = 0;
552 
553 	if (!server->ops->open)
554 		return -ENOSYS;
555 
556 	/* If we're caching, we need to be able to fill in around partial writes. */
557 	if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
558 		rdwr_for_fscache = 1;
559 
560 	desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
561 
562 /*********************************************************************
563  *  open flag mapping table:
564  *
565  *	POSIX Flag            CIFS Disposition
566  *	----------            ----------------
567  *	O_CREAT               FILE_OPEN_IF
568  *	O_CREAT | O_EXCL      FILE_CREATE
569  *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
570  *	O_TRUNC               FILE_OVERWRITE
571  *	none of the above     FILE_OPEN
572  *
573  *	Note that there is not a direct match between disposition
574  *	FILE_SUPERSEDE (ie create whether or not file exists although
575  *	O_CREAT | O_TRUNC is similar but truncates the existing
576  *	file rather than creating a new file as FILE_SUPERSEDE does
577  *	(which uses the attributes / metadata passed in on open call)
578  *?
579  *?  O_SYNC is a reasonable match to CIFS writethrough flag
580  *?  and the read write flags match reasonably.  O_LARGEFILE
581  *?  is irrelevant because largefile support is always used
582  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
583  *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
584  *********************************************************************/
585 
586 	disposition = cifs_get_disposition(f_flags);
587 
588 	/* BB pass O_SYNC flag through on file attributes .. BB */
589 
590 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
591 	if (f_flags & O_SYNC)
592 		create_options |= CREATE_WRITE_THROUGH;
593 
594 	if (f_flags & O_DIRECT)
595 		create_options |= CREATE_NO_BUFFER;
596 
597 retry_open:
598 	oparms = (struct cifs_open_parms) {
599 		.tcon = tcon,
600 		.cifs_sb = cifs_sb,
601 		.desired_access = desired_access,
602 		.create_options = cifs_create_options(cifs_sb, create_options),
603 		.disposition = disposition,
604 		.path = full_path,
605 		.fid = fid,
606 	};
607 
608 	rc = server->ops->open(xid, &oparms, oplock, buf);
609 	if (rc) {
610 		if (rc == -EACCES && rdwr_for_fscache == 1) {
611 			desired_access = cifs_convert_flags(f_flags, 0);
612 			rdwr_for_fscache = 2;
613 			goto retry_open;
614 		}
615 		return rc;
616 	}
617 	if (rdwr_for_fscache == 2)
618 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
619 
620 	/* TODO: Add support for calling posix query info but with passing in fid */
621 	if (tcon->unix_ext)
622 		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
623 					      xid);
624 	else
625 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
626 					 xid, fid);
627 
628 	if (rc) {
629 		server->ops->close(xid, tcon, fid);
630 		if (rc == -ESTALE)
631 			rc = -EOPENSTALE;
632 	}
633 
634 	return rc;
635 }
636 
637 static bool
cifs_has_mand_locks(struct cifsInodeInfo * cinode)638 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
639 {
640 	struct cifs_fid_locks *cur;
641 	bool has_locks = false;
642 
643 	down_read(&cinode->lock_sem);
644 	list_for_each_entry(cur, &cinode->llist, llist) {
645 		if (!list_empty(&cur->locks)) {
646 			has_locks = true;
647 			break;
648 		}
649 	}
650 	up_read(&cinode->lock_sem);
651 	return has_locks;
652 }
653 
654 void
cifs_down_write(struct rw_semaphore * sem)655 cifs_down_write(struct rw_semaphore *sem)
656 {
657 	while (!down_write_trylock(sem))
658 		msleep(10);
659 }
660 
661 static void cifsFileInfo_put_work(struct work_struct *work);
662 void serverclose_work(struct work_struct *work);
663 
cifs_new_fileinfo(struct cifs_fid * fid,struct file * file,struct tcon_link * tlink,__u32 oplock,const char * symlink_target)664 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
665 				       struct tcon_link *tlink, __u32 oplock,
666 				       const char *symlink_target)
667 {
668 	struct dentry *dentry = file_dentry(file);
669 	struct inode *inode = d_inode(dentry);
670 	struct cifsInodeInfo *cinode = CIFS_I(inode);
671 	struct cifsFileInfo *cfile;
672 	struct cifs_fid_locks *fdlocks;
673 	struct cifs_tcon *tcon = tlink_tcon(tlink);
674 	struct TCP_Server_Info *server = tcon->ses->server;
675 
676 	cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
677 	if (cfile == NULL)
678 		return cfile;
679 
680 	fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
681 	if (!fdlocks) {
682 		kfree(cfile);
683 		return NULL;
684 	}
685 
686 	if (symlink_target) {
687 		cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
688 		if (!cfile->symlink_target) {
689 			kfree(fdlocks);
690 			kfree(cfile);
691 			return NULL;
692 		}
693 	}
694 
695 	INIT_LIST_HEAD(&fdlocks->locks);
696 	fdlocks->cfile = cfile;
697 	cfile->llist = fdlocks;
698 
699 	cfile->count = 1;
700 	cfile->pid = current->tgid;
701 	cfile->uid = current_fsuid();
702 	cfile->dentry = dget(dentry);
703 	cfile->f_flags = file->f_flags;
704 	cfile->invalidHandle = false;
705 	cfile->deferred_close_scheduled = false;
706 	cfile->tlink = cifs_get_tlink(tlink);
707 	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
708 	INIT_WORK(&cfile->put, cifsFileInfo_put_work);
709 	INIT_WORK(&cfile->serverclose, serverclose_work);
710 	INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
711 	mutex_init(&cfile->fh_mutex);
712 	spin_lock_init(&cfile->file_info_lock);
713 
714 	cifs_sb_active(inode->i_sb);
715 
716 	/*
717 	 * If the server returned a read oplock and we have mandatory brlocks,
718 	 * set oplock level to None.
719 	 */
720 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
721 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
722 		oplock = 0;
723 	}
724 
725 	cifs_down_write(&cinode->lock_sem);
726 	list_add(&fdlocks->llist, &cinode->llist);
727 	up_write(&cinode->lock_sem);
728 
729 	spin_lock(&tcon->open_file_lock);
730 	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
731 		oplock = fid->pending_open->oplock;
732 	list_del(&fid->pending_open->olist);
733 
734 	fid->purge_cache = false;
735 	server->ops->set_fid(cfile, fid, oplock);
736 
737 	list_add(&cfile->tlist, &tcon->openFileList);
738 	atomic_inc(&tcon->num_local_opens);
739 
740 	/* if readable file instance put first in list*/
741 	spin_lock(&cinode->open_file_lock);
742 	if (file->f_mode & FMODE_READ)
743 		list_add(&cfile->flist, &cinode->openFileList);
744 	else
745 		list_add_tail(&cfile->flist, &cinode->openFileList);
746 	spin_unlock(&cinode->open_file_lock);
747 	spin_unlock(&tcon->open_file_lock);
748 
749 	if (fid->purge_cache)
750 		cifs_zap_mapping(inode);
751 
752 	file->private_data = cfile;
753 	return cfile;
754 }
755 
756 struct cifsFileInfo *
cifsFileInfo_get(struct cifsFileInfo * cifs_file)757 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
758 {
759 	spin_lock(&cifs_file->file_info_lock);
760 	cifsFileInfo_get_locked(cifs_file);
761 	spin_unlock(&cifs_file->file_info_lock);
762 	return cifs_file;
763 }
764 
cifsFileInfo_put_final(struct cifsFileInfo * cifs_file)765 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
766 {
767 	struct inode *inode = d_inode(cifs_file->dentry);
768 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
769 	struct cifsLockInfo *li, *tmp;
770 	struct super_block *sb = inode->i_sb;
771 
772 	/*
773 	 * Delete any outstanding lock records. We'll lose them when the file
774 	 * is closed anyway.
775 	 */
776 	cifs_down_write(&cifsi->lock_sem);
777 	list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
778 		list_del(&li->llist);
779 		cifs_del_lock_waiters(li);
780 		kfree(li);
781 	}
782 	list_del(&cifs_file->llist->llist);
783 	kfree(cifs_file->llist);
784 	up_write(&cifsi->lock_sem);
785 
786 	cifs_put_tlink(cifs_file->tlink);
787 	dput(cifs_file->dentry);
788 	cifs_sb_deactive(sb);
789 	kfree(cifs_file->symlink_target);
790 	kfree(cifs_file);
791 }
792 
cifsFileInfo_put_work(struct work_struct * work)793 static void cifsFileInfo_put_work(struct work_struct *work)
794 {
795 	struct cifsFileInfo *cifs_file = container_of(work,
796 			struct cifsFileInfo, put);
797 
798 	cifsFileInfo_put_final(cifs_file);
799 }
800 
serverclose_work(struct work_struct * work)801 void serverclose_work(struct work_struct *work)
802 {
803 	struct cifsFileInfo *cifs_file = container_of(work,
804 			struct cifsFileInfo, serverclose);
805 
806 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
807 
808 	struct TCP_Server_Info *server = tcon->ses->server;
809 	int rc = 0;
810 	int retries = 0;
811 	int MAX_RETRIES = 4;
812 
813 	do {
814 		if (server->ops->close_getattr)
815 			rc = server->ops->close_getattr(0, tcon, cifs_file);
816 		else if (server->ops->close)
817 			rc = server->ops->close(0, tcon, &cifs_file->fid);
818 
819 		if (rc == -EBUSY || rc == -EAGAIN) {
820 			retries++;
821 			msleep(250);
822 		}
823 	} while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
824 	);
825 
826 	if (retries == MAX_RETRIES)
827 		pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
828 
829 	if (cifs_file->offload)
830 		queue_work(fileinfo_put_wq, &cifs_file->put);
831 	else
832 		cifsFileInfo_put_final(cifs_file);
833 }
834 
835 /**
836  * cifsFileInfo_put - release a reference of file priv data
837  *
838  * Always potentially wait for oplock handler. See _cifsFileInfo_put().
839  *
840  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
841  */
cifsFileInfo_put(struct cifsFileInfo * cifs_file)842 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
843 {
844 	_cifsFileInfo_put(cifs_file, true, true);
845 }
846 
847 /**
848  * _cifsFileInfo_put - release a reference of file priv data
849  *
850  * This may involve closing the filehandle @cifs_file out on the
851  * server. Must be called without holding tcon->open_file_lock,
852  * cinode->open_file_lock and cifs_file->file_info_lock.
853  *
854  * If @wait_for_oplock_handler is true and we are releasing the last
855  * reference, wait for any running oplock break handler of the file
856  * and cancel any pending one.
857  *
858  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
859  * @wait_oplock_handler: must be false if called from oplock_break_handler
860  * @offload:	not offloaded on close and oplock breaks
861  *
862  */
_cifsFileInfo_put(struct cifsFileInfo * cifs_file,bool wait_oplock_handler,bool offload)863 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
864 		       bool wait_oplock_handler, bool offload)
865 {
866 	struct inode *inode = d_inode(cifs_file->dentry);
867 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
868 	struct TCP_Server_Info *server = tcon->ses->server;
869 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
870 	struct super_block *sb = inode->i_sb;
871 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
872 	struct cifs_fid fid = {};
873 	struct cifs_pending_open open;
874 	bool oplock_break_cancelled;
875 	bool serverclose_offloaded = false;
876 
877 	spin_lock(&tcon->open_file_lock);
878 	spin_lock(&cifsi->open_file_lock);
879 	spin_lock(&cifs_file->file_info_lock);
880 
881 	cifs_file->offload = offload;
882 	if (--cifs_file->count > 0) {
883 		spin_unlock(&cifs_file->file_info_lock);
884 		spin_unlock(&cifsi->open_file_lock);
885 		spin_unlock(&tcon->open_file_lock);
886 		return;
887 	}
888 	spin_unlock(&cifs_file->file_info_lock);
889 
890 	if (server->ops->get_lease_key)
891 		server->ops->get_lease_key(inode, &fid);
892 
893 	/* store open in pending opens to make sure we don't miss lease break */
894 	cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
895 
896 	/* remove it from the lists */
897 	list_del(&cifs_file->flist);
898 	list_del(&cifs_file->tlist);
899 	atomic_dec(&tcon->num_local_opens);
900 
901 	if (list_empty(&cifsi->openFileList)) {
902 		cifs_dbg(FYI, "closing last open instance for inode %p\n",
903 			 d_inode(cifs_file->dentry));
904 		/*
905 		 * In strict cache mode we need invalidate mapping on the last
906 		 * close  because it may cause a error when we open this file
907 		 * again and get at least level II oplock.
908 		 */
909 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
910 			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
911 		cifs_set_oplock_level(cifsi, 0);
912 	}
913 
914 	spin_unlock(&cifsi->open_file_lock);
915 	spin_unlock(&tcon->open_file_lock);
916 
917 	oplock_break_cancelled = wait_oplock_handler ?
918 		cancel_work_sync(&cifs_file->oplock_break) : false;
919 
920 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
921 		struct TCP_Server_Info *server = tcon->ses->server;
922 		unsigned int xid;
923 		int rc = 0;
924 
925 		xid = get_xid();
926 		if (server->ops->close_getattr)
927 			rc = server->ops->close_getattr(xid, tcon, cifs_file);
928 		else if (server->ops->close)
929 			rc = server->ops->close(xid, tcon, &cifs_file->fid);
930 		_free_xid(xid);
931 
932 		if (rc == -EBUSY || rc == -EAGAIN) {
933 			// Server close failed, hence offloading it as an async op
934 			queue_work(serverclose_wq, &cifs_file->serverclose);
935 			serverclose_offloaded = true;
936 		}
937 	}
938 
939 	if (oplock_break_cancelled)
940 		cifs_done_oplock_break(cifsi);
941 
942 	cifs_del_pending_open(&open);
943 
944 	// if serverclose has been offloaded to wq (on failure), it will
945 	// handle offloading put as well. If serverclose not offloaded,
946 	// we need to handle offloading put here.
947 	if (!serverclose_offloaded) {
948 		if (offload)
949 			queue_work(fileinfo_put_wq, &cifs_file->put);
950 		else
951 			cifsFileInfo_put_final(cifs_file);
952 	}
953 }
954 
cifs_file_flush(const unsigned int xid,struct inode * inode,struct cifsFileInfo * cfile)955 int cifs_file_flush(const unsigned int xid, struct inode *inode,
956 		    struct cifsFileInfo *cfile)
957 {
958 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
959 	struct cifs_tcon *tcon;
960 	int rc;
961 
962 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
963 		return 0;
964 
965 	if (cfile && (OPEN_FMODE(cfile->f_flags) & FMODE_WRITE)) {
966 		tcon = tlink_tcon(cfile->tlink);
967 		return tcon->ses->server->ops->flush(xid, tcon,
968 						     &cfile->fid);
969 	}
970 	rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile);
971 	if (!rc) {
972 		tcon = tlink_tcon(cfile->tlink);
973 		rc = tcon->ses->server->ops->flush(xid, tcon, &cfile->fid);
974 		cifsFileInfo_put(cfile);
975 	} else if (rc == -EBADF) {
976 		rc = 0;
977 	}
978 	return rc;
979 }
980 
cifs_do_truncate(const unsigned int xid,struct dentry * dentry)981 static int cifs_do_truncate(const unsigned int xid, struct dentry *dentry)
982 {
983 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(dentry));
984 	struct inode *inode = d_inode(dentry);
985 	struct cifsFileInfo *cfile = NULL;
986 	struct TCP_Server_Info *server;
987 	struct cifs_tcon *tcon;
988 	int rc;
989 
990 	rc = filemap_write_and_wait(inode->i_mapping);
991 	if (is_interrupt_error(rc))
992 		return -ERESTARTSYS;
993 	mapping_set_error(inode->i_mapping, rc);
994 
995 	cfile = find_writable_file(cinode, FIND_WR_FSUID_ONLY);
996 	rc = cifs_file_flush(xid, inode, cfile);
997 	if (!rc) {
998 		if (cfile) {
999 			tcon = tlink_tcon(cfile->tlink);
1000 			server = tcon->ses->server;
1001 			rc = server->ops->set_file_size(xid, tcon,
1002 							cfile, 0, false);
1003 		}
1004 		if (!rc) {
1005 			netfs_resize_file(&cinode->netfs, 0, true);
1006 			cifs_setsize(inode, 0);
1007 			inode->i_blocks = 0;
1008 		}
1009 	}
1010 	if (cfile)
1011 		cifsFileInfo_put(cfile);
1012 	return rc;
1013 }
1014 
cifs_open(struct inode * inode,struct file * file)1015 int cifs_open(struct inode *inode, struct file *file)
1016 
1017 {
1018 	int rc = -EACCES;
1019 	unsigned int xid;
1020 	__u32 oplock;
1021 	struct cifs_sb_info *cifs_sb;
1022 	struct TCP_Server_Info *server;
1023 	struct cifs_tcon *tcon;
1024 	struct tcon_link *tlink;
1025 	struct cifsFileInfo *cfile = NULL;
1026 	void *page;
1027 	const char *full_path;
1028 	bool posix_open_ok = false;
1029 	struct cifs_fid fid = {};
1030 	struct cifs_pending_open open;
1031 	struct cifs_open_info_data data = {};
1032 
1033 	xid = get_xid();
1034 
1035 	cifs_sb = CIFS_SB(inode->i_sb);
1036 	if (unlikely(cifs_forced_shutdown(cifs_sb))) {
1037 		free_xid(xid);
1038 		return -EIO;
1039 	}
1040 
1041 	tlink = cifs_sb_tlink(cifs_sb);
1042 	if (IS_ERR(tlink)) {
1043 		free_xid(xid);
1044 		return PTR_ERR(tlink);
1045 	}
1046 	tcon = tlink_tcon(tlink);
1047 	server = tcon->ses->server;
1048 
1049 	page = alloc_dentry_path();
1050 	full_path = build_path_from_dentry(file_dentry(file), page);
1051 	if (IS_ERR(full_path)) {
1052 		rc = PTR_ERR(full_path);
1053 		goto out;
1054 	}
1055 
1056 	cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
1057 		 inode, file->f_flags, full_path);
1058 
1059 	if (file->f_flags & O_DIRECT &&
1060 	    cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
1061 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
1062 			file->f_op = &cifs_file_direct_nobrl_ops;
1063 		else
1064 			file->f_op = &cifs_file_direct_ops;
1065 	}
1066 
1067 	if (file->f_flags & O_TRUNC) {
1068 		rc = cifs_do_truncate(xid, file_dentry(file));
1069 		if (rc)
1070 			goto out;
1071 	}
1072 
1073 	/* Get the cached handle as SMB2 close is deferred */
1074 	if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) {
1075 		rc = cifs_get_writable_path(tcon, full_path,
1076 					    FIND_WR_FSUID_ONLY |
1077 					    FIND_WR_NO_PENDING_DELETE,
1078 					    &cfile);
1079 	} else {
1080 		rc = cifs_get_readable_path(tcon, full_path, &cfile);
1081 	}
1082 	if (rc == 0) {
1083 		unsigned int oflags = file->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC);
1084 		unsigned int cflags = cfile->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC);
1085 
1086 		if (cifs_convert_flags(oflags, 0) == cifs_convert_flags(cflags, 0) &&
1087 		    (oflags & (O_SYNC|O_DIRECT)) == (cflags & (O_SYNC|O_DIRECT))) {
1088 			file->private_data = cfile;
1089 			spin_lock(&CIFS_I(inode)->deferred_lock);
1090 			cifs_del_deferred_close(cfile);
1091 			spin_unlock(&CIFS_I(inode)->deferred_lock);
1092 			goto use_cache;
1093 		}
1094 		_cifsFileInfo_put(cfile, true, false);
1095 	} else {
1096 		/* hard link on the defeered close file */
1097 		rc = cifs_get_hardlink_path(tcon, inode, file);
1098 		if (rc)
1099 			cifs_close_deferred_file(CIFS_I(inode));
1100 	}
1101 
1102 	if (server->oplocks)
1103 		oplock = REQ_OPLOCK;
1104 	else
1105 		oplock = 0;
1106 
1107 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1108 	if (!tcon->broken_posix_open && tcon->unix_ext &&
1109 	    cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1110 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1111 		/* can not refresh inode info since size could be stale */
1112 		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
1113 				cifs_sb->ctx->file_mode /* ignored */,
1114 				file->f_flags, &oplock, &fid.netfid, xid);
1115 		if (rc == 0) {
1116 			cifs_dbg(FYI, "posix open succeeded\n");
1117 			posix_open_ok = true;
1118 		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1119 			if (tcon->ses->serverNOS)
1120 				cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1121 					 tcon->ses->ip_addr,
1122 					 tcon->ses->serverNOS);
1123 			tcon->broken_posix_open = true;
1124 		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
1125 			 (rc != -EOPNOTSUPP)) /* path not found or net err */
1126 			goto out;
1127 		/*
1128 		 * Else fallthrough to retry open the old way on network i/o
1129 		 * or DFS errors.
1130 		 */
1131 	}
1132 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1133 
1134 	if (server->ops->get_lease_key)
1135 		server->ops->get_lease_key(inode, &fid);
1136 
1137 	cifs_add_pending_open(&fid, tlink, &open);
1138 
1139 	if (!posix_open_ok) {
1140 		if (server->ops->get_lease_key)
1141 			server->ops->get_lease_key(inode, &fid);
1142 
1143 		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1144 				  xid, &data);
1145 		if (rc) {
1146 			cifs_del_pending_open(&open);
1147 			goto out;
1148 		}
1149 	}
1150 
1151 	cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1152 	if (cfile == NULL) {
1153 		if (server->ops->close)
1154 			server->ops->close(xid, tcon, &fid);
1155 		cifs_del_pending_open(&open);
1156 		rc = -ENOMEM;
1157 		goto out;
1158 	}
1159 
1160 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1161 	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1162 		/*
1163 		 * Time to set mode which we can not set earlier due to
1164 		 * problems creating new read-only files.
1165 		 */
1166 		struct cifs_unix_set_info_args args = {
1167 			.mode	= inode->i_mode,
1168 			.uid	= INVALID_UID, /* no change */
1169 			.gid	= INVALID_GID, /* no change */
1170 			.ctime	= NO_CHANGE_64,
1171 			.atime	= NO_CHANGE_64,
1172 			.mtime	= NO_CHANGE_64,
1173 			.device	= 0,
1174 		};
1175 		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1176 				       cfile->pid);
1177 	}
1178 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1179 
1180 use_cache:
1181 	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1182 			   file->f_mode & FMODE_WRITE);
1183 	if (!(file->f_flags & O_DIRECT))
1184 		goto out;
1185 	if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1186 		goto out;
1187 	cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1188 
1189 out:
1190 	free_dentry_path(page);
1191 	free_xid(xid);
1192 	cifs_put_tlink(tlink);
1193 	cifs_free_open_info(&data);
1194 	return rc;
1195 }
1196 
1197 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1198 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1199 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1200 
1201 /*
1202  * Try to reacquire byte range locks that were released when session
1203  * to server was lost.
1204  */
1205 static int
cifs_relock_file(struct cifsFileInfo * cfile)1206 cifs_relock_file(struct cifsFileInfo *cfile)
1207 {
1208 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1209 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1210 	int rc = 0;
1211 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1212 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1213 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1214 
1215 	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1216 	if (cinode->can_cache_brlcks) {
1217 		/* can cache locks - no need to relock */
1218 		up_read(&cinode->lock_sem);
1219 		return rc;
1220 	}
1221 
1222 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1223 	if (cap_unix(tcon->ses) &&
1224 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1225 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1226 		rc = cifs_push_posix_locks(cfile);
1227 	else
1228 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1229 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1230 
1231 	up_read(&cinode->lock_sem);
1232 	return rc;
1233 }
1234 
1235 static int
cifs_reopen_file(struct cifsFileInfo * cfile,bool can_flush)1236 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1237 {
1238 	int rc = -EACCES;
1239 	unsigned int xid;
1240 	__u32 oplock;
1241 	struct cifs_sb_info *cifs_sb;
1242 	struct cifs_tcon *tcon;
1243 	struct TCP_Server_Info *server;
1244 	struct cifsInodeInfo *cinode;
1245 	struct inode *inode;
1246 	void *page;
1247 	const char *full_path;
1248 	int desired_access;
1249 	int disposition = FILE_OPEN;
1250 	int create_options = CREATE_NOT_DIR;
1251 	struct cifs_open_parms oparms;
1252 	int rdwr_for_fscache = 0;
1253 
1254 	xid = get_xid();
1255 	mutex_lock(&cfile->fh_mutex);
1256 	if (!cfile->invalidHandle) {
1257 		mutex_unlock(&cfile->fh_mutex);
1258 		free_xid(xid);
1259 		return 0;
1260 	}
1261 
1262 	inode = d_inode(cfile->dentry);
1263 	cifs_sb = CIFS_SB(inode->i_sb);
1264 	tcon = tlink_tcon(cfile->tlink);
1265 	server = tcon->ses->server;
1266 
1267 	/*
1268 	 * Can not grab rename sem here because various ops, including those
1269 	 * that already have the rename sem can end up causing writepage to get
1270 	 * called and if the server was down that means we end up here, and we
1271 	 * can never tell if the caller already has the rename_sem.
1272 	 */
1273 	page = alloc_dentry_path();
1274 	full_path = build_path_from_dentry(cfile->dentry, page);
1275 	if (IS_ERR(full_path)) {
1276 		mutex_unlock(&cfile->fh_mutex);
1277 		free_dentry_path(page);
1278 		free_xid(xid);
1279 		return PTR_ERR(full_path);
1280 	}
1281 
1282 	cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1283 		 inode, cfile->f_flags, full_path);
1284 
1285 	if (tcon->ses->server->oplocks)
1286 		oplock = REQ_OPLOCK;
1287 	else
1288 		oplock = 0;
1289 
1290 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1291 	if (tcon->unix_ext && cap_unix(tcon->ses) &&
1292 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1293 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1294 		/*
1295 		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1296 		 * original open. Must mask them off for a reopen.
1297 		 */
1298 		unsigned int oflags = cfile->f_flags &
1299 						~(O_CREAT | O_EXCL | O_TRUNC);
1300 
1301 		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1302 				     cifs_sb->ctx->file_mode /* ignored */,
1303 				     oflags, &oplock, &cfile->fid.netfid, xid);
1304 		if (rc == 0) {
1305 			cifs_dbg(FYI, "posix reopen succeeded\n");
1306 			oparms.reconnect = true;
1307 			goto reopen_success;
1308 		}
1309 		/*
1310 		 * fallthrough to retry open the old way on errors, especially
1311 		 * in the reconnect path it is important to retry hard
1312 		 */
1313 	}
1314 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1315 
1316 	/* If we're caching, we need to be able to fill in around partial writes. */
1317 	if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1318 		rdwr_for_fscache = 1;
1319 
1320 	desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1321 
1322 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
1323 	if (cfile->f_flags & O_SYNC)
1324 		create_options |= CREATE_WRITE_THROUGH;
1325 
1326 	if (cfile->f_flags & O_DIRECT)
1327 		create_options |= CREATE_NO_BUFFER;
1328 
1329 	if (server->ops->get_lease_key)
1330 		server->ops->get_lease_key(inode, &cfile->fid);
1331 
1332 retry_open:
1333 	oparms = (struct cifs_open_parms) {
1334 		.tcon = tcon,
1335 		.cifs_sb = cifs_sb,
1336 		.desired_access = desired_access,
1337 		.create_options = cifs_create_options(cifs_sb, create_options),
1338 		.disposition = disposition,
1339 		.path = full_path,
1340 		.fid = &cfile->fid,
1341 		.reconnect = true,
1342 	};
1343 
1344 	/*
1345 	 * Can not refresh inode by passing in file_info buf to be returned by
1346 	 * ops->open and then calling get_inode_info with returned buf since
1347 	 * file might have write behind data that needs to be flushed and server
1348 	 * version of file size can be stale. If we knew for sure that inode was
1349 	 * not dirty locally we could do this.
1350 	 */
1351 	rc = server->ops->open(xid, &oparms, &oplock, NULL);
1352 	if (rc == -ENOENT && oparms.reconnect == false) {
1353 		/* durable handle timeout is expired - open the file again */
1354 		rc = server->ops->open(xid, &oparms, &oplock, NULL);
1355 		/* indicate that we need to relock the file */
1356 		oparms.reconnect = true;
1357 	}
1358 	if (rc == -EACCES && rdwr_for_fscache == 1) {
1359 		desired_access = cifs_convert_flags(cfile->f_flags, 0);
1360 		rdwr_for_fscache = 2;
1361 		goto retry_open;
1362 	}
1363 
1364 	if (rc) {
1365 		mutex_unlock(&cfile->fh_mutex);
1366 		cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1367 		cifs_dbg(FYI, "oplock: %d\n", oplock);
1368 		goto reopen_error_exit;
1369 	}
1370 
1371 	if (rdwr_for_fscache == 2)
1372 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1373 
1374 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1375 reopen_success:
1376 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1377 	cfile->invalidHandle = false;
1378 	mutex_unlock(&cfile->fh_mutex);
1379 	cinode = CIFS_I(inode);
1380 
1381 	if (can_flush) {
1382 		rc = filemap_write_and_wait(inode->i_mapping);
1383 		if (!is_interrupt_error(rc))
1384 			mapping_set_error(inode->i_mapping, rc);
1385 
1386 		if (tcon->posix_extensions) {
1387 			rc = smb311_posix_get_inode_info(&inode, full_path,
1388 							 NULL, inode->i_sb, xid);
1389 		} else if (tcon->unix_ext) {
1390 			rc = cifs_get_inode_info_unix(&inode, full_path,
1391 						      inode->i_sb, xid);
1392 		} else {
1393 			rc = cifs_get_inode_info(&inode, full_path, NULL,
1394 						 inode->i_sb, xid, NULL);
1395 		}
1396 	}
1397 	/*
1398 	 * Else we are writing out data to server already and could deadlock if
1399 	 * we tried to flush data, and since we do not know if we have data that
1400 	 * would invalidate the current end of file on the server we can not go
1401 	 * to the server to get the new inode info.
1402 	 */
1403 
1404 	/*
1405 	 * If the server returned a read oplock and we have mandatory brlocks,
1406 	 * set oplock level to None.
1407 	 */
1408 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1409 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1410 		oplock = 0;
1411 	}
1412 
1413 	server->ops->set_fid(cfile, &cfile->fid, oplock);
1414 	if (oparms.reconnect)
1415 		cifs_relock_file(cfile);
1416 
1417 reopen_error_exit:
1418 	free_dentry_path(page);
1419 	free_xid(xid);
1420 	return rc;
1421 }
1422 
smb2_deferred_work_close(struct work_struct * work)1423 void smb2_deferred_work_close(struct work_struct *work)
1424 {
1425 	struct cifsFileInfo *cfile = container_of(work,
1426 			struct cifsFileInfo, deferred.work);
1427 
1428 	spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1429 	cifs_del_deferred_close(cfile);
1430 	cfile->deferred_close_scheduled = false;
1431 	spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1432 	_cifsFileInfo_put(cfile, true, false);
1433 }
1434 
1435 static bool
smb2_can_defer_close(struct inode * inode,struct cifs_deferred_close * dclose)1436 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1437 {
1438 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1439 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1440 
1441 	return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1442 			(cinode->oplock == CIFS_CACHE_RHW_FLG ||
1443 			 cinode->oplock == CIFS_CACHE_RH_FLG) &&
1444 			!test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags));
1445 
1446 }
1447 
cifs_close(struct inode * inode,struct file * file)1448 int cifs_close(struct inode *inode, struct file *file)
1449 {
1450 	struct cifsFileInfo *cfile;
1451 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1452 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1453 	struct cifs_deferred_close *dclose;
1454 
1455 	cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1456 
1457 	if (file->private_data != NULL) {
1458 		cfile = file->private_data;
1459 		file->private_data = NULL;
1460 		dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
1461 		if ((cfile->status_file_deleted == false) &&
1462 		    (smb2_can_defer_close(inode, dclose))) {
1463 			if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR, &cinode->netfs.flags)) {
1464 				inode_set_mtime_to_ts(inode,
1465 						      inode_set_ctime_current(inode));
1466 			}
1467 			spin_lock(&cinode->deferred_lock);
1468 			cifs_add_deferred_close(cfile, dclose);
1469 			if (cfile->deferred_close_scheduled &&
1470 			    delayed_work_pending(&cfile->deferred)) {
1471 				/*
1472 				 * If there is no pending work, mod_delayed_work queues new work.
1473 				 * So, Increase the ref count to avoid use-after-free.
1474 				 */
1475 				if (!mod_delayed_work(deferredclose_wq,
1476 						&cfile->deferred, cifs_sb->ctx->closetimeo))
1477 					cifsFileInfo_get(cfile);
1478 			} else {
1479 				/* Deferred close for files */
1480 				queue_delayed_work(deferredclose_wq,
1481 						&cfile->deferred, cifs_sb->ctx->closetimeo);
1482 				cfile->deferred_close_scheduled = true;
1483 				spin_unlock(&cinode->deferred_lock);
1484 				return 0;
1485 			}
1486 			spin_unlock(&cinode->deferred_lock);
1487 			_cifsFileInfo_put(cfile, true, false);
1488 		} else {
1489 			_cifsFileInfo_put(cfile, true, false);
1490 			kfree(dclose);
1491 		}
1492 	}
1493 
1494 	/* return code from the ->release op is always ignored */
1495 	return 0;
1496 }
1497 
1498 void
cifs_reopen_persistent_handles(struct cifs_tcon * tcon)1499 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1500 {
1501 	struct cifsFileInfo *open_file, *tmp;
1502 	LIST_HEAD(tmp_list);
1503 
1504 	if (!tcon->use_persistent || !tcon->need_reopen_files)
1505 		return;
1506 
1507 	tcon->need_reopen_files = false;
1508 
1509 	cifs_dbg(FYI, "Reopen persistent handles\n");
1510 
1511 	/* list all files open on tree connection, reopen resilient handles  */
1512 	spin_lock(&tcon->open_file_lock);
1513 	list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1514 		if (!open_file->invalidHandle)
1515 			continue;
1516 		cifsFileInfo_get(open_file);
1517 		list_add_tail(&open_file->rlist, &tmp_list);
1518 	}
1519 	spin_unlock(&tcon->open_file_lock);
1520 
1521 	list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1522 		if (cifs_reopen_file(open_file, false /* do not flush */))
1523 			tcon->need_reopen_files = true;
1524 		list_del_init(&open_file->rlist);
1525 		cifsFileInfo_put(open_file);
1526 	}
1527 }
1528 
cifs_closedir(struct inode * inode,struct file * file)1529 int cifs_closedir(struct inode *inode, struct file *file)
1530 {
1531 	int rc = 0;
1532 	unsigned int xid;
1533 	struct cifsFileInfo *cfile = file->private_data;
1534 	struct cifs_tcon *tcon;
1535 	struct TCP_Server_Info *server;
1536 	char *buf;
1537 
1538 	cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1539 
1540 	if (cfile == NULL)
1541 		return rc;
1542 
1543 	xid = get_xid();
1544 	tcon = tlink_tcon(cfile->tlink);
1545 	server = tcon->ses->server;
1546 
1547 	cifs_dbg(FYI, "Freeing private data in close dir\n");
1548 	spin_lock(&cfile->file_info_lock);
1549 	if (server->ops->dir_needs_close(cfile)) {
1550 		cfile->invalidHandle = true;
1551 		spin_unlock(&cfile->file_info_lock);
1552 		if (server->ops->close_dir)
1553 			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1554 		else
1555 			rc = -ENOSYS;
1556 		cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1557 		/* not much we can do if it fails anyway, ignore rc */
1558 		rc = 0;
1559 	} else
1560 		spin_unlock(&cfile->file_info_lock);
1561 
1562 	buf = cfile->srch_inf.ntwrk_buf_start;
1563 	if (buf) {
1564 		cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1565 		cfile->srch_inf.ntwrk_buf_start = NULL;
1566 		if (cfile->srch_inf.smallBuf)
1567 			cifs_small_buf_release(buf);
1568 		else
1569 			cifs_buf_release(buf);
1570 	}
1571 
1572 	cifs_put_tlink(cfile->tlink);
1573 	kfree(file->private_data);
1574 	file->private_data = NULL;
1575 	/* BB can we lock the filestruct while this is going on? */
1576 	free_xid(xid);
1577 	return rc;
1578 }
1579 
1580 static struct cifsLockInfo *
cifs_lock_init(__u64 offset,__u64 length,__u8 type,__u16 flags)1581 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1582 {
1583 	struct cifsLockInfo *lock =
1584 		kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
1585 	if (!lock)
1586 		return lock;
1587 	lock->offset = offset;
1588 	lock->length = length;
1589 	lock->type = type;
1590 	lock->pid = current->tgid;
1591 	lock->flags = flags;
1592 	INIT_LIST_HEAD(&lock->blist);
1593 	init_waitqueue_head(&lock->block_q);
1594 	return lock;
1595 }
1596 
1597 void
cifs_del_lock_waiters(struct cifsLockInfo * lock)1598 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1599 {
1600 	struct cifsLockInfo *li, *tmp;
1601 	list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1602 		list_del_init(&li->blist);
1603 		wake_up(&li->block_q);
1604 	}
1605 }
1606 
1607 #define CIFS_LOCK_OP	0
1608 #define CIFS_READ_OP	1
1609 #define CIFS_WRITE_OP	2
1610 
1611 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1612 static bool
cifs_find_fid_lock_conflict(struct cifs_fid_locks * fdlocks,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsFileInfo * cfile,struct cifsLockInfo ** conf_lock,int rw_check)1613 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1614 			    __u64 length, __u8 type, __u16 flags,
1615 			    struct cifsFileInfo *cfile,
1616 			    struct cifsLockInfo **conf_lock, int rw_check)
1617 {
1618 	struct cifsLockInfo *li;
1619 	struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1620 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1621 
1622 	list_for_each_entry(li, &fdlocks->locks, llist) {
1623 		if (offset + length <= li->offset ||
1624 		    offset >= li->offset + li->length)
1625 			continue;
1626 		if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1627 		    server->ops->compare_fids(cfile, cur_cfile)) {
1628 			/* shared lock prevents write op through the same fid */
1629 			if (!(li->type & server->vals->shared_lock_type) ||
1630 			    rw_check != CIFS_WRITE_OP)
1631 				continue;
1632 		}
1633 		if ((type & server->vals->shared_lock_type) &&
1634 		    ((server->ops->compare_fids(cfile, cur_cfile) &&
1635 		     current->tgid == li->pid) || type == li->type))
1636 			continue;
1637 		if (rw_check == CIFS_LOCK_OP &&
1638 		    (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1639 		    server->ops->compare_fids(cfile, cur_cfile))
1640 			continue;
1641 		if (conf_lock)
1642 			*conf_lock = li;
1643 		return true;
1644 	}
1645 	return false;
1646 }
1647 
1648 bool
cifs_find_lock_conflict(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsLockInfo ** conf_lock,int rw_check)1649 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1650 			__u8 type, __u16 flags,
1651 			struct cifsLockInfo **conf_lock, int rw_check)
1652 {
1653 	bool rc = false;
1654 	struct cifs_fid_locks *cur;
1655 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1656 
1657 	list_for_each_entry(cur, &cinode->llist, llist) {
1658 		rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1659 						 flags, cfile, conf_lock,
1660 						 rw_check);
1661 		if (rc)
1662 			break;
1663 	}
1664 
1665 	return rc;
1666 }
1667 
1668 /*
1669  * Check if there is another lock that prevents us to set the lock (mandatory
1670  * style). If such a lock exists, update the flock structure with its
1671  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1672  * or leave it the same if we can't. Returns 0 if we don't need to request to
1673  * the server or 1 otherwise.
1674  */
1675 static int
cifs_lock_test(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,struct file_lock * flock)1676 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1677 	       __u8 type, struct file_lock *flock)
1678 {
1679 	int rc = 0;
1680 	struct cifsLockInfo *conf_lock;
1681 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1682 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1683 	bool exist;
1684 
1685 	down_read(&cinode->lock_sem);
1686 
1687 	exist = cifs_find_lock_conflict(cfile, offset, length, type,
1688 					flock->c.flc_flags, &conf_lock,
1689 					CIFS_LOCK_OP);
1690 	if (exist) {
1691 		flock->fl_start = conf_lock->offset;
1692 		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1693 		flock->c.flc_pid = conf_lock->pid;
1694 		if (conf_lock->type & server->vals->shared_lock_type)
1695 			flock->c.flc_type = F_RDLCK;
1696 		else
1697 			flock->c.flc_type = F_WRLCK;
1698 	} else if (!cinode->can_cache_brlcks)
1699 		rc = 1;
1700 	else
1701 		flock->c.flc_type = F_UNLCK;
1702 
1703 	up_read(&cinode->lock_sem);
1704 	return rc;
1705 }
1706 
1707 static void
cifs_lock_add(struct cifsFileInfo * cfile,struct cifsLockInfo * lock)1708 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1709 {
1710 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1711 	cifs_down_write(&cinode->lock_sem);
1712 	list_add_tail(&lock->llist, &cfile->llist->locks);
1713 	up_write(&cinode->lock_sem);
1714 }
1715 
1716 /*
1717  * Set the byte-range lock (mandatory style). Returns:
1718  * 1) 0, if we set the lock and don't need to request to the server;
1719  * 2) 1, if no locks prevent us but we need to request to the server;
1720  * 3) -EACCES, if there is a lock that prevents us and wait is false.
1721  */
1722 static int
cifs_lock_add_if(struct cifsFileInfo * cfile,struct cifsLockInfo * lock,bool wait)1723 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1724 		 bool wait)
1725 {
1726 	struct cifsLockInfo *conf_lock;
1727 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1728 	bool exist;
1729 	int rc = 0;
1730 
1731 try_again:
1732 	exist = false;
1733 	cifs_down_write(&cinode->lock_sem);
1734 
1735 	exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1736 					lock->type, lock->flags, &conf_lock,
1737 					CIFS_LOCK_OP);
1738 	if (!exist && cinode->can_cache_brlcks) {
1739 		list_add_tail(&lock->llist, &cfile->llist->locks);
1740 		up_write(&cinode->lock_sem);
1741 		return rc;
1742 	}
1743 
1744 	if (!exist)
1745 		rc = 1;
1746 	else if (!wait)
1747 		rc = -EACCES;
1748 	else {
1749 		list_add_tail(&lock->blist, &conf_lock->blist);
1750 		up_write(&cinode->lock_sem);
1751 		rc = wait_event_interruptible(lock->block_q,
1752 					(lock->blist.prev == &lock->blist) &&
1753 					(lock->blist.next == &lock->blist));
1754 		if (!rc)
1755 			goto try_again;
1756 		cifs_down_write(&cinode->lock_sem);
1757 		list_del_init(&lock->blist);
1758 	}
1759 
1760 	up_write(&cinode->lock_sem);
1761 	return rc;
1762 }
1763 
1764 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1765 /*
1766  * Check if there is another lock that prevents us to set the lock (posix
1767  * style). If such a lock exists, update the flock structure with its
1768  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1769  * or leave it the same if we can't. Returns 0 if we don't need to request to
1770  * the server or 1 otherwise.
1771  */
1772 static int
cifs_posix_lock_test(struct file * file,struct file_lock * flock)1773 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1774 {
1775 	int rc = 0;
1776 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1777 	unsigned char saved_type = flock->c.flc_type;
1778 
1779 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1780 		return 1;
1781 
1782 	down_read(&cinode->lock_sem);
1783 	posix_test_lock(file, flock);
1784 
1785 	if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1786 		flock->c.flc_type = saved_type;
1787 		rc = 1;
1788 	}
1789 
1790 	up_read(&cinode->lock_sem);
1791 	return rc;
1792 }
1793 
1794 /*
1795  * Set the byte-range lock (posix style). Returns:
1796  * 1) <0, if the error occurs while setting the lock;
1797  * 2) 0, if we set the lock and don't need to request to the server;
1798  * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1799  * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1800  */
1801 static int
cifs_posix_lock_set(struct file * file,struct file_lock * flock)1802 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1803 {
1804 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1805 	int rc = FILE_LOCK_DEFERRED + 1;
1806 
1807 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1808 		return rc;
1809 
1810 	cifs_down_write(&cinode->lock_sem);
1811 	if (!cinode->can_cache_brlcks) {
1812 		up_write(&cinode->lock_sem);
1813 		return rc;
1814 	}
1815 
1816 	rc = posix_lock_file(file, flock, NULL);
1817 	up_write(&cinode->lock_sem);
1818 	return rc;
1819 }
1820 
1821 int
cifs_push_mandatory_locks(struct cifsFileInfo * cfile)1822 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1823 {
1824 	unsigned int xid;
1825 	int rc = 0, stored_rc;
1826 	struct cifsLockInfo *li, *tmp;
1827 	struct cifs_tcon *tcon;
1828 	unsigned int num, max_num, max_buf;
1829 	LOCKING_ANDX_RANGE *buf, *cur;
1830 	static const int types[] = {
1831 		LOCKING_ANDX_LARGE_FILES,
1832 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1833 	};
1834 	int i;
1835 
1836 	xid = get_xid();
1837 	tcon = tlink_tcon(cfile->tlink);
1838 
1839 	/*
1840 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1841 	 * and check it before using.
1842 	 */
1843 	max_buf = tcon->ses->server->maxBuf;
1844 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1845 		free_xid(xid);
1846 		return -EINVAL;
1847 	}
1848 
1849 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1850 		     PAGE_SIZE);
1851 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1852 			PAGE_SIZE);
1853 	max_num = (max_buf - sizeof(struct smb_hdr)) /
1854 						sizeof(LOCKING_ANDX_RANGE);
1855 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1856 	if (!buf) {
1857 		free_xid(xid);
1858 		return -ENOMEM;
1859 	}
1860 
1861 	for (i = 0; i < 2; i++) {
1862 		cur = buf;
1863 		num = 0;
1864 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1865 			if (li->type != types[i])
1866 				continue;
1867 			cur->Pid = cpu_to_le16(li->pid);
1868 			cur->LengthLow = cpu_to_le32((u32)li->length);
1869 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1870 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
1871 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1872 			if (++num == max_num) {
1873 				stored_rc = cifs_lockv(xid, tcon,
1874 						       cfile->fid.netfid,
1875 						       (__u8)li->type, 0, num,
1876 						       buf);
1877 				if (stored_rc)
1878 					rc = stored_rc;
1879 				cur = buf;
1880 				num = 0;
1881 			} else
1882 				cur++;
1883 		}
1884 
1885 		if (num) {
1886 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1887 					       (__u8)types[i], 0, num, buf);
1888 			if (stored_rc)
1889 				rc = stored_rc;
1890 		}
1891 	}
1892 
1893 	kfree(buf);
1894 	free_xid(xid);
1895 	return rc;
1896 }
1897 
1898 static __u32
hash_lockowner(fl_owner_t owner)1899 hash_lockowner(fl_owner_t owner)
1900 {
1901 	return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1902 }
1903 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1904 
1905 struct lock_to_push {
1906 	struct list_head llist;
1907 	__u64 offset;
1908 	__u64 length;
1909 	__u32 pid;
1910 	__u16 netfid;
1911 	__u8 type;
1912 };
1913 
1914 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1915 static int
cifs_push_posix_locks(struct cifsFileInfo * cfile)1916 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1917 {
1918 	struct inode *inode = d_inode(cfile->dentry);
1919 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1920 	struct file_lock *flock;
1921 	struct file_lock_context *flctx = locks_inode_context(inode);
1922 	unsigned int count = 0, i;
1923 	int rc = 0, xid, type;
1924 	struct list_head locks_to_send, *el;
1925 	struct lock_to_push *lck, *tmp;
1926 	__u64 length;
1927 
1928 	xid = get_xid();
1929 
1930 	if (!flctx)
1931 		goto out;
1932 
1933 	spin_lock(&flctx->flc_lock);
1934 	list_for_each(el, &flctx->flc_posix) {
1935 		count++;
1936 	}
1937 	spin_unlock(&flctx->flc_lock);
1938 
1939 	INIT_LIST_HEAD(&locks_to_send);
1940 
1941 	/*
1942 	 * Allocating count locks is enough because no FL_POSIX locks can be
1943 	 * added to the list while we are holding cinode->lock_sem that
1944 	 * protects locking operations of this inode.
1945 	 */
1946 	for (i = 0; i < count; i++) {
1947 		lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1948 		if (!lck) {
1949 			rc = -ENOMEM;
1950 			goto err_out;
1951 		}
1952 		list_add_tail(&lck->llist, &locks_to_send);
1953 	}
1954 
1955 	el = locks_to_send.next;
1956 	spin_lock(&flctx->flc_lock);
1957 	for_each_file_lock(flock, &flctx->flc_posix) {
1958 		unsigned char ftype = flock->c.flc_type;
1959 
1960 		if (el == &locks_to_send) {
1961 			/*
1962 			 * The list ended. We don't have enough allocated
1963 			 * structures - something is really wrong.
1964 			 */
1965 			cifs_dbg(VFS, "Can't push all brlocks!\n");
1966 			break;
1967 		}
1968 		length = cifs_flock_len(flock);
1969 		if (ftype == F_RDLCK || ftype == F_SHLCK)
1970 			type = CIFS_RDLCK;
1971 		else
1972 			type = CIFS_WRLCK;
1973 		lck = list_entry(el, struct lock_to_push, llist);
1974 		lck->pid = hash_lockowner(flock->c.flc_owner);
1975 		lck->netfid = cfile->fid.netfid;
1976 		lck->length = length;
1977 		lck->type = type;
1978 		lck->offset = flock->fl_start;
1979 	}
1980 	spin_unlock(&flctx->flc_lock);
1981 
1982 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1983 		int stored_rc;
1984 
1985 		stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1986 					     lck->offset, lck->length, NULL,
1987 					     lck->type, 0);
1988 		if (stored_rc)
1989 			rc = stored_rc;
1990 		list_del(&lck->llist);
1991 		kfree(lck);
1992 	}
1993 
1994 out:
1995 	free_xid(xid);
1996 	return rc;
1997 err_out:
1998 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1999 		list_del(&lck->llist);
2000 		kfree(lck);
2001 	}
2002 	goto out;
2003 }
2004 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2005 
2006 static int
cifs_push_locks(struct cifsFileInfo * cfile)2007 cifs_push_locks(struct cifsFileInfo *cfile)
2008 {
2009 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2010 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2011 	int rc = 0;
2012 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2013 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2014 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2015 
2016 	/* we are going to update can_cache_brlcks here - need a write access */
2017 	cifs_down_write(&cinode->lock_sem);
2018 	if (!cinode->can_cache_brlcks) {
2019 		up_write(&cinode->lock_sem);
2020 		return rc;
2021 	}
2022 
2023 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2024 	if (cap_unix(tcon->ses) &&
2025 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2026 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2027 		rc = cifs_push_posix_locks(cfile);
2028 	else
2029 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2030 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
2031 
2032 	cinode->can_cache_brlcks = false;
2033 	up_write(&cinode->lock_sem);
2034 	return rc;
2035 }
2036 
2037 static void
cifs_read_flock(struct file_lock * flock,__u32 * type,int * lock,int * unlock,bool * wait_flag,struct TCP_Server_Info * server)2038 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
2039 		bool *wait_flag, struct TCP_Server_Info *server)
2040 {
2041 	if (flock->c.flc_flags & FL_POSIX)
2042 		cifs_dbg(FYI, "Posix\n");
2043 	if (flock->c.flc_flags & FL_FLOCK)
2044 		cifs_dbg(FYI, "Flock\n");
2045 	if (flock->c.flc_flags & FL_SLEEP) {
2046 		cifs_dbg(FYI, "Blocking lock\n");
2047 		*wait_flag = true;
2048 	}
2049 	if (flock->c.flc_flags & FL_ACCESS)
2050 		cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
2051 	if (flock->c.flc_flags & FL_LEASE)
2052 		cifs_dbg(FYI, "Lease on file - not implemented yet\n");
2053 	if (flock->c.flc_flags &
2054 	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
2055 	       FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
2056 		cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
2057 		         flock->c.flc_flags);
2058 
2059 	*type = server->vals->large_lock_type;
2060 	if (lock_is_write(flock)) {
2061 		cifs_dbg(FYI, "F_WRLCK\n");
2062 		*type |= server->vals->exclusive_lock_type;
2063 		*lock = 1;
2064 	} else if (lock_is_unlock(flock)) {
2065 		cifs_dbg(FYI, "F_UNLCK\n");
2066 		*type |= server->vals->unlock_lock_type;
2067 		*unlock = 1;
2068 		/* Check if unlock includes more than one lock range */
2069 	} else if (lock_is_read(flock)) {
2070 		cifs_dbg(FYI, "F_RDLCK\n");
2071 		*type |= server->vals->shared_lock_type;
2072 		*lock = 1;
2073 	} else if (flock->c.flc_type == F_EXLCK) {
2074 		cifs_dbg(FYI, "F_EXLCK\n");
2075 		*type |= server->vals->exclusive_lock_type;
2076 		*lock = 1;
2077 	} else if (flock->c.flc_type == F_SHLCK) {
2078 		cifs_dbg(FYI, "F_SHLCK\n");
2079 		*type |= server->vals->shared_lock_type;
2080 		*lock = 1;
2081 	} else
2082 		cifs_dbg(FYI, "Unknown type of lock\n");
2083 }
2084 
2085 static int
cifs_getlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,unsigned int xid)2086 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
2087 	   bool wait_flag, bool posix_lck, unsigned int xid)
2088 {
2089 	int rc = 0;
2090 	__u64 length = cifs_flock_len(flock);
2091 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2092 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2093 	struct TCP_Server_Info *server = tcon->ses->server;
2094 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2095 	__u16 netfid = cfile->fid.netfid;
2096 
2097 	if (posix_lck) {
2098 		int posix_lock_type;
2099 
2100 		rc = cifs_posix_lock_test(file, flock);
2101 		if (!rc)
2102 			return rc;
2103 
2104 		if (type & server->vals->shared_lock_type)
2105 			posix_lock_type = CIFS_RDLCK;
2106 		else
2107 			posix_lock_type = CIFS_WRLCK;
2108 		rc = CIFSSMBPosixLock(xid, tcon, netfid,
2109 				      hash_lockowner(flock->c.flc_owner),
2110 				      flock->fl_start, length, flock,
2111 				      posix_lock_type, wait_flag);
2112 		return rc;
2113 	}
2114 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2115 
2116 	rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
2117 	if (!rc)
2118 		return rc;
2119 
2120 	/* BB we could chain these into one lock request BB */
2121 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2122 				    1, 0, false);
2123 	if (rc == 0) {
2124 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2125 					    type, 0, 1, false);
2126 		flock->c.flc_type = F_UNLCK;
2127 		if (rc != 0)
2128 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2129 				 rc);
2130 		return 0;
2131 	}
2132 
2133 	if (type & server->vals->shared_lock_type) {
2134 		flock->c.flc_type = F_WRLCK;
2135 		return 0;
2136 	}
2137 
2138 	type &= ~server->vals->exclusive_lock_type;
2139 
2140 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2141 				    type | server->vals->shared_lock_type,
2142 				    1, 0, false);
2143 	if (rc == 0) {
2144 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2145 			type | server->vals->shared_lock_type, 0, 1, false);
2146 		flock->c.flc_type = F_RDLCK;
2147 		if (rc != 0)
2148 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2149 				 rc);
2150 	} else
2151 		flock->c.flc_type = F_WRLCK;
2152 
2153 	return 0;
2154 }
2155 
2156 void
cifs_move_llist(struct list_head * source,struct list_head * dest)2157 cifs_move_llist(struct list_head *source, struct list_head *dest)
2158 {
2159 	struct list_head *li, *tmp;
2160 	list_for_each_safe(li, tmp, source)
2161 		list_move(li, dest);
2162 }
2163 
2164 int
cifs_get_hardlink_path(struct cifs_tcon * tcon,struct inode * inode,struct file * file)2165 cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode,
2166 				struct file *file)
2167 {
2168 	struct cifsFileInfo *open_file = NULL;
2169 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2170 	int rc = 0;
2171 
2172 	spin_lock(&tcon->open_file_lock);
2173 	spin_lock(&cinode->open_file_lock);
2174 
2175 	list_for_each_entry(open_file, &cinode->openFileList, flist) {
2176 		if (file->f_flags == open_file->f_flags) {
2177 			rc = -EINVAL;
2178 			break;
2179 		}
2180 	}
2181 
2182 	spin_unlock(&cinode->open_file_lock);
2183 	spin_unlock(&tcon->open_file_lock);
2184 	return rc;
2185 }
2186 
2187 void
cifs_free_llist(struct list_head * llist)2188 cifs_free_llist(struct list_head *llist)
2189 {
2190 	struct cifsLockInfo *li, *tmp;
2191 	list_for_each_entry_safe(li, tmp, llist, llist) {
2192 		cifs_del_lock_waiters(li);
2193 		list_del(&li->llist);
2194 		kfree(li);
2195 	}
2196 }
2197 
2198 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2199 int
cifs_unlock_range(struct cifsFileInfo * cfile,struct file_lock * flock,unsigned int xid)2200 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2201 		  unsigned int xid)
2202 {
2203 	int rc = 0, stored_rc;
2204 	static const int types[] = {
2205 		LOCKING_ANDX_LARGE_FILES,
2206 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2207 	};
2208 	unsigned int i;
2209 	unsigned int max_num, num, max_buf;
2210 	LOCKING_ANDX_RANGE *buf, *cur;
2211 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2212 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2213 	struct cifsLockInfo *li, *tmp;
2214 	__u64 length = cifs_flock_len(flock);
2215 	LIST_HEAD(tmp_llist);
2216 
2217 	/*
2218 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2219 	 * and check it before using.
2220 	 */
2221 	max_buf = tcon->ses->server->maxBuf;
2222 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2223 		return -EINVAL;
2224 
2225 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2226 		     PAGE_SIZE);
2227 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2228 			PAGE_SIZE);
2229 	max_num = (max_buf - sizeof(struct smb_hdr)) /
2230 						sizeof(LOCKING_ANDX_RANGE);
2231 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
2232 	if (!buf)
2233 		return -ENOMEM;
2234 
2235 	cifs_down_write(&cinode->lock_sem);
2236 	for (i = 0; i < 2; i++) {
2237 		cur = buf;
2238 		num = 0;
2239 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2240 			if (flock->fl_start > li->offset ||
2241 			    (flock->fl_start + length) <
2242 			    (li->offset + li->length))
2243 				continue;
2244 			if (current->tgid != li->pid)
2245 				continue;
2246 			if (types[i] != li->type)
2247 				continue;
2248 			if (cinode->can_cache_brlcks) {
2249 				/*
2250 				 * We can cache brlock requests - simply remove
2251 				 * a lock from the file's list.
2252 				 */
2253 				list_del(&li->llist);
2254 				cifs_del_lock_waiters(li);
2255 				kfree(li);
2256 				continue;
2257 			}
2258 			cur->Pid = cpu_to_le16(li->pid);
2259 			cur->LengthLow = cpu_to_le32((u32)li->length);
2260 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2261 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
2262 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2263 			/*
2264 			 * We need to save a lock here to let us add it again to
2265 			 * the file's list if the unlock range request fails on
2266 			 * the server.
2267 			 */
2268 			list_move(&li->llist, &tmp_llist);
2269 			if (++num == max_num) {
2270 				stored_rc = cifs_lockv(xid, tcon,
2271 						       cfile->fid.netfid,
2272 						       li->type, num, 0, buf);
2273 				if (stored_rc) {
2274 					/*
2275 					 * We failed on the unlock range
2276 					 * request - add all locks from the tmp
2277 					 * list to the head of the file's list.
2278 					 */
2279 					cifs_move_llist(&tmp_llist,
2280 							&cfile->llist->locks);
2281 					rc = stored_rc;
2282 				} else
2283 					/*
2284 					 * The unlock range request succeed -
2285 					 * free the tmp list.
2286 					 */
2287 					cifs_free_llist(&tmp_llist);
2288 				cur = buf;
2289 				num = 0;
2290 			} else
2291 				cur++;
2292 		}
2293 		if (num) {
2294 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2295 					       types[i], num, 0, buf);
2296 			if (stored_rc) {
2297 				cifs_move_llist(&tmp_llist,
2298 						&cfile->llist->locks);
2299 				rc = stored_rc;
2300 			} else
2301 				cifs_free_llist(&tmp_llist);
2302 		}
2303 	}
2304 
2305 	up_write(&cinode->lock_sem);
2306 	kfree(buf);
2307 	return rc;
2308 }
2309 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2310 
2311 static int
cifs_setlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,int lock,int unlock,unsigned int xid)2312 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2313 	   bool wait_flag, bool posix_lck, int lock, int unlock,
2314 	   unsigned int xid)
2315 {
2316 	int rc = 0;
2317 	__u64 length = cifs_flock_len(flock);
2318 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2319 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2320 	struct TCP_Server_Info *server = tcon->ses->server;
2321 	struct inode *inode = d_inode(cfile->dentry);
2322 
2323 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2324 	if (posix_lck) {
2325 		int posix_lock_type;
2326 
2327 		rc = cifs_posix_lock_set(file, flock);
2328 		if (rc <= FILE_LOCK_DEFERRED)
2329 			return rc;
2330 
2331 		if (type & server->vals->shared_lock_type)
2332 			posix_lock_type = CIFS_RDLCK;
2333 		else
2334 			posix_lock_type = CIFS_WRLCK;
2335 
2336 		if (unlock == 1)
2337 			posix_lock_type = CIFS_UNLCK;
2338 
2339 		rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2340 				      hash_lockowner(flock->c.flc_owner),
2341 				      flock->fl_start, length,
2342 				      NULL, posix_lock_type, wait_flag);
2343 		goto out;
2344 	}
2345 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2346 	if (lock) {
2347 		struct cifsLockInfo *lock;
2348 
2349 		lock = cifs_lock_init(flock->fl_start, length, type,
2350 				      flock->c.flc_flags);
2351 		if (!lock)
2352 			return -ENOMEM;
2353 
2354 		rc = cifs_lock_add_if(cfile, lock, wait_flag);
2355 		if (rc < 0) {
2356 			kfree(lock);
2357 			return rc;
2358 		}
2359 		if (!rc)
2360 			goto out;
2361 
2362 		/*
2363 		 * Windows 7 server can delay breaking lease from read to None
2364 		 * if we set a byte-range lock on a file - break it explicitly
2365 		 * before sending the lock to the server to be sure the next
2366 		 * read won't conflict with non-overlapted locks due to
2367 		 * pagereading.
2368 		 */
2369 		if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2370 					CIFS_CACHE_READ(CIFS_I(inode))) {
2371 			cifs_zap_mapping(inode);
2372 			cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2373 				 inode);
2374 			CIFS_I(inode)->oplock = 0;
2375 		}
2376 
2377 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2378 					    type, 1, 0, wait_flag);
2379 		if (rc) {
2380 			kfree(lock);
2381 			return rc;
2382 		}
2383 
2384 		cifs_lock_add(cfile, lock);
2385 	} else if (unlock)
2386 		rc = server->ops->mand_unlock_range(cfile, flock, xid);
2387 
2388 out:
2389 	if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2390 		/*
2391 		 * If this is a request to remove all locks because we
2392 		 * are closing the file, it doesn't matter if the
2393 		 * unlocking failed as both cifs.ko and the SMB server
2394 		 * remove the lock on file close
2395 		 */
2396 		if (rc) {
2397 			cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2398 			if (!(flock->c.flc_flags & FL_CLOSE))
2399 				return rc;
2400 		}
2401 		rc = locks_lock_file_wait(file, flock);
2402 	}
2403 	return rc;
2404 }
2405 
cifs_flock(struct file * file,int cmd,struct file_lock * fl)2406 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2407 {
2408 	int rc, xid;
2409 	int lock = 0, unlock = 0;
2410 	bool wait_flag = false;
2411 	bool posix_lck = false;
2412 	struct cifs_sb_info *cifs_sb;
2413 	struct cifs_tcon *tcon;
2414 	struct cifsFileInfo *cfile;
2415 	__u32 type;
2416 
2417 	xid = get_xid();
2418 
2419 	if (!(fl->c.flc_flags & FL_FLOCK)) {
2420 		rc = -ENOLCK;
2421 		free_xid(xid);
2422 		return rc;
2423 	}
2424 
2425 	cfile = (struct cifsFileInfo *)file->private_data;
2426 	tcon = tlink_tcon(cfile->tlink);
2427 
2428 	cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2429 			tcon->ses->server);
2430 	cifs_sb = CIFS_FILE_SB(file);
2431 
2432 	if (cap_unix(tcon->ses) &&
2433 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2434 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2435 		posix_lck = true;
2436 
2437 	if (!lock && !unlock) {
2438 		/*
2439 		 * if no lock or unlock then nothing to do since we do not
2440 		 * know what it is
2441 		 */
2442 		rc = -EOPNOTSUPP;
2443 		free_xid(xid);
2444 		return rc;
2445 	}
2446 
2447 	rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2448 			xid);
2449 	free_xid(xid);
2450 	return rc;
2451 
2452 
2453 }
2454 
cifs_lock(struct file * file,int cmd,struct file_lock * flock)2455 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2456 {
2457 	int rc, xid;
2458 	int lock = 0, unlock = 0;
2459 	bool wait_flag = false;
2460 	bool posix_lck = false;
2461 	struct cifs_sb_info *cifs_sb;
2462 	struct cifs_tcon *tcon;
2463 	struct cifsFileInfo *cfile;
2464 	__u32 type;
2465 
2466 	rc = -EACCES;
2467 	xid = get_xid();
2468 
2469 	cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2470 		 flock->c.flc_flags, flock->c.flc_type,
2471 		 (long long)flock->fl_start,
2472 		 (long long)flock->fl_end);
2473 
2474 	cfile = (struct cifsFileInfo *)file->private_data;
2475 	tcon = tlink_tcon(cfile->tlink);
2476 
2477 	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2478 			tcon->ses->server);
2479 	cifs_sb = CIFS_FILE_SB(file);
2480 	set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2481 
2482 	if (cap_unix(tcon->ses) &&
2483 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2484 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2485 		posix_lck = true;
2486 	/*
2487 	 * BB add code here to normalize offset and length to account for
2488 	 * negative length which we can not accept over the wire.
2489 	 */
2490 	if (IS_GETLK(cmd)) {
2491 		rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2492 		free_xid(xid);
2493 		return rc;
2494 	}
2495 
2496 	if (!lock && !unlock) {
2497 		/*
2498 		 * if no lock or unlock then nothing to do since we do not
2499 		 * know what it is
2500 		 */
2501 		free_xid(xid);
2502 		return -EOPNOTSUPP;
2503 	}
2504 
2505 	rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2506 			xid);
2507 	free_xid(xid);
2508 	return rc;
2509 }
2510 
cifs_write_subrequest_terminated(struct cifs_io_subrequest * wdata,ssize_t result)2511 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result)
2512 {
2513 	struct netfs_io_request *wreq = wdata->rreq;
2514 	struct netfs_inode *ictx = netfs_inode(wreq->inode);
2515 	loff_t wrend;
2516 
2517 	if (result > 0) {
2518 		wrend = wdata->subreq.start + wdata->subreq.transferred + result;
2519 
2520 		if (wrend > ictx->zero_point &&
2521 		    (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE ||
2522 		     wdata->rreq->origin == NETFS_DIO_WRITE))
2523 			ictx->zero_point = wrend;
2524 		if (wrend > ictx->remote_i_size)
2525 			netfs_resize_file(ictx, wrend, true);
2526 	}
2527 
2528 	netfs_write_subrequest_terminated(&wdata->subreq, result);
2529 }
2530 
find_readable_file(struct cifsInodeInfo * cifs_inode,bool fsuid_only)2531 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
2532 					bool fsuid_only)
2533 {
2534 	struct cifsFileInfo *open_file = NULL;
2535 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2536 
2537 	/* only filter by fsuid on multiuser mounts */
2538 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2539 		fsuid_only = false;
2540 
2541 	spin_lock(&cifs_inode->open_file_lock);
2542 	/* we could simply get the first_list_entry since write-only entries
2543 	   are always at the end of the list but since the first entry might
2544 	   have a close pending, we go through the whole list */
2545 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2546 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2547 			continue;
2548 		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2549 			if ((!open_file->invalidHandle)) {
2550 				/* found a good file */
2551 				/* lock it so it will not be closed on us */
2552 				cifsFileInfo_get(open_file);
2553 				spin_unlock(&cifs_inode->open_file_lock);
2554 				return open_file;
2555 			} /* else might as well continue, and look for
2556 			     another, or simply have the caller reopen it
2557 			     again rather than trying to fix this handle */
2558 		} else /* write only file */
2559 			break; /* write only files are last so must be done */
2560 	}
2561 	spin_unlock(&cifs_inode->open_file_lock);
2562 	return NULL;
2563 }
2564 
2565 /* Return -EBADF if no handle is found and general rc otherwise */
2566 int
cifs_get_writable_file(struct cifsInodeInfo * cifs_inode,int flags,struct cifsFileInfo ** ret_file)2567 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
2568 		       struct cifsFileInfo **ret_file)
2569 {
2570 	struct cifsFileInfo *open_file, *inv_file = NULL;
2571 	struct cifs_sb_info *cifs_sb;
2572 	bool any_available = false;
2573 	int rc = -EBADF;
2574 	unsigned int refind = 0;
2575 	bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2576 	bool with_delete = flags & FIND_WR_WITH_DELETE;
2577 	*ret_file = NULL;
2578 
2579 	/*
2580 	 * Having a null inode here (because mapping->host was set to zero by
2581 	 * the VFS or MM) should not happen but we had reports of on oops (due
2582 	 * to it being zero) during stress testcases so we need to check for it
2583 	 */
2584 
2585 	if (cifs_inode == NULL) {
2586 		cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2587 		dump_stack();
2588 		return rc;
2589 	}
2590 
2591 	cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2592 
2593 	/* only filter by fsuid on multiuser mounts */
2594 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2595 		fsuid_only = false;
2596 
2597 	spin_lock(&cifs_inode->open_file_lock);
2598 refind_writable:
2599 	if (refind > MAX_REOPEN_ATT) {
2600 		spin_unlock(&cifs_inode->open_file_lock);
2601 		return rc;
2602 	}
2603 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2604 		if (!any_available && open_file->pid != current->tgid)
2605 			continue;
2606 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2607 			continue;
2608 		if (with_delete && !(open_file->fid.access & DELETE))
2609 			continue;
2610 		if ((flags & FIND_WR_NO_PENDING_DELETE) &&
2611 		    open_file->status_file_deleted)
2612 			continue;
2613 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2614 			if (!open_file->invalidHandle) {
2615 				/* found a good writable file */
2616 				cifsFileInfo_get(open_file);
2617 				spin_unlock(&cifs_inode->open_file_lock);
2618 				*ret_file = open_file;
2619 				return 0;
2620 			} else {
2621 				if (!inv_file)
2622 					inv_file = open_file;
2623 			}
2624 		}
2625 	}
2626 	/* couldn't find usable FH with same pid, try any available */
2627 	if (!any_available) {
2628 		any_available = true;
2629 		goto refind_writable;
2630 	}
2631 
2632 	if (inv_file) {
2633 		any_available = false;
2634 		cifsFileInfo_get(inv_file);
2635 	}
2636 
2637 	spin_unlock(&cifs_inode->open_file_lock);
2638 
2639 	if (inv_file) {
2640 		rc = cifs_reopen_file(inv_file, false);
2641 		if (!rc) {
2642 			*ret_file = inv_file;
2643 			return 0;
2644 		}
2645 
2646 		spin_lock(&cifs_inode->open_file_lock);
2647 		list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2648 		spin_unlock(&cifs_inode->open_file_lock);
2649 		cifsFileInfo_put(inv_file);
2650 		++refind;
2651 		inv_file = NULL;
2652 		spin_lock(&cifs_inode->open_file_lock);
2653 		goto refind_writable;
2654 	}
2655 
2656 	return rc;
2657 }
2658 
2659 struct cifsFileInfo *
find_writable_file(struct cifsInodeInfo * cifs_inode,int flags)2660 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2661 {
2662 	struct cifsFileInfo *cfile;
2663 	int rc;
2664 
2665 	rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2666 	if (rc)
2667 		cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2668 
2669 	return cfile;
2670 }
2671 
2672 int
cifs_get_writable_path(struct cifs_tcon * tcon,const char * name,int flags,struct cifsFileInfo ** ret_file)2673 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2674 		       int flags,
2675 		       struct cifsFileInfo **ret_file)
2676 {
2677 	struct cifsFileInfo *cfile;
2678 	void *page = alloc_dentry_path();
2679 
2680 	*ret_file = NULL;
2681 
2682 	spin_lock(&tcon->open_file_lock);
2683 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2684 		struct cifsInodeInfo *cinode;
2685 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2686 		if (IS_ERR(full_path)) {
2687 			spin_unlock(&tcon->open_file_lock);
2688 			free_dentry_path(page);
2689 			return PTR_ERR(full_path);
2690 		}
2691 		if (strcmp(full_path, name))
2692 			continue;
2693 
2694 		cinode = CIFS_I(d_inode(cfile->dentry));
2695 		spin_unlock(&tcon->open_file_lock);
2696 		free_dentry_path(page);
2697 		return cifs_get_writable_file(cinode, flags, ret_file);
2698 	}
2699 
2700 	spin_unlock(&tcon->open_file_lock);
2701 	free_dentry_path(page);
2702 	return -ENOENT;
2703 }
2704 
2705 int
cifs_get_readable_path(struct cifs_tcon * tcon,const char * name,struct cifsFileInfo ** ret_file)2706 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2707 		       struct cifsFileInfo **ret_file)
2708 {
2709 	struct cifsFileInfo *cfile;
2710 	void *page = alloc_dentry_path();
2711 
2712 	*ret_file = NULL;
2713 
2714 	spin_lock(&tcon->open_file_lock);
2715 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2716 		struct cifsInodeInfo *cinode;
2717 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2718 		if (IS_ERR(full_path)) {
2719 			spin_unlock(&tcon->open_file_lock);
2720 			free_dentry_path(page);
2721 			return PTR_ERR(full_path);
2722 		}
2723 		if (strcmp(full_path, name))
2724 			continue;
2725 
2726 		cinode = CIFS_I(d_inode(cfile->dentry));
2727 		spin_unlock(&tcon->open_file_lock);
2728 		free_dentry_path(page);
2729 		*ret_file = find_readable_file(cinode, 0);
2730 		if (*ret_file) {
2731 			spin_lock(&cinode->open_file_lock);
2732 			if ((*ret_file)->status_file_deleted) {
2733 				spin_unlock(&cinode->open_file_lock);
2734 				cifsFileInfo_put(*ret_file);
2735 				*ret_file = NULL;
2736 			} else {
2737 				spin_unlock(&cinode->open_file_lock);
2738 			}
2739 		}
2740 		return *ret_file ? 0 : -ENOENT;
2741 	}
2742 
2743 	spin_unlock(&tcon->open_file_lock);
2744 	free_dentry_path(page);
2745 	return -ENOENT;
2746 }
2747 
2748 /*
2749  * Flush data on a strict file.
2750  */
cifs_strict_fsync(struct file * file,loff_t start,loff_t end,int datasync)2751 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2752 		      int datasync)
2753 {
2754 	struct cifsFileInfo *smbfile = file->private_data;
2755 	struct inode *inode = file_inode(file);
2756 	unsigned int xid;
2757 	int rc;
2758 
2759 	rc = file_write_and_wait_range(file, start, end);
2760 	if (rc) {
2761 		trace_cifs_fsync_err(inode->i_ino, rc);
2762 		return rc;
2763 	}
2764 
2765 	cifs_dbg(FYI, "%s: name=%pD datasync=0x%x\n", __func__, file, datasync);
2766 
2767 	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2768 		rc = cifs_zap_mapping(inode);
2769 		cifs_dbg(FYI, "%s: invalidate mapping: rc = %d\n", __func__, rc);
2770 	}
2771 
2772 	xid = get_xid();
2773 	rc = cifs_file_flush(xid, inode, smbfile);
2774 	free_xid(xid);
2775 	return rc;
2776 }
2777 
2778 /*
2779  * Flush data on a non-strict data.
2780  */
cifs_fsync(struct file * file,loff_t start,loff_t end,int datasync)2781 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2782 {
2783 	unsigned int xid;
2784 	int rc = 0;
2785 	struct cifs_tcon *tcon;
2786 	struct TCP_Server_Info *server;
2787 	struct cifsFileInfo *smbfile = file->private_data;
2788 	struct inode *inode = file_inode(file);
2789 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2790 
2791 	rc = file_write_and_wait_range(file, start, end);
2792 	if (rc) {
2793 		trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2794 		return rc;
2795 	}
2796 
2797 	xid = get_xid();
2798 
2799 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2800 		 file, datasync);
2801 
2802 	tcon = tlink_tcon(smbfile->tlink);
2803 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2804 		server = tcon->ses->server;
2805 		if (server->ops->flush == NULL) {
2806 			rc = -ENOSYS;
2807 			goto fsync_exit;
2808 		}
2809 
2810 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2811 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2812 			if (smbfile) {
2813 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2814 				cifsFileInfo_put(smbfile);
2815 			} else
2816 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2817 		} else
2818 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2819 	}
2820 
2821 fsync_exit:
2822 	free_xid(xid);
2823 	return rc;
2824 }
2825 
2826 /*
2827  * As file closes, flush all cached write data for this inode checking
2828  * for write behind errors.
2829  */
cifs_flush(struct file * file,fl_owner_t id)2830 int cifs_flush(struct file *file, fl_owner_t id)
2831 {
2832 	struct inode *inode = file_inode(file);
2833 	int rc = 0;
2834 
2835 	if (file->f_mode & FMODE_WRITE)
2836 		rc = filemap_write_and_wait(inode->i_mapping);
2837 
2838 	cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2839 	if (rc) {
2840 		/* get more nuanced writeback errors */
2841 		rc = filemap_check_wb_err(file->f_mapping, 0);
2842 		trace_cifs_flush_err(inode->i_ino, rc);
2843 	}
2844 	return rc;
2845 }
2846 
2847 static ssize_t
cifs_writev(struct kiocb * iocb,struct iov_iter * from)2848 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2849 {
2850 	struct file *file = iocb->ki_filp;
2851 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2852 	struct inode *inode = file->f_mapping->host;
2853 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2854 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2855 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2856 	ssize_t rc;
2857 
2858 	rc = netfs_start_io_write(inode);
2859 	if (rc < 0)
2860 		return rc;
2861 
2862 	/*
2863 	 * We need to hold the sem to be sure nobody modifies lock list
2864 	 * with a brlock that prevents writing.
2865 	 */
2866 	down_read(&cinode->lock_sem);
2867 
2868 	rc = generic_write_checks(iocb, from);
2869 	if (rc <= 0)
2870 		goto out;
2871 
2872 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) &&
2873 	    (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2874 				     server->vals->exclusive_lock_type, 0,
2875 				     NULL, CIFS_WRITE_OP))) {
2876 		rc = -EACCES;
2877 		goto out;
2878 	}
2879 
2880 	rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2881 
2882 out:
2883 	up_read(&cinode->lock_sem);
2884 	netfs_end_io_write(inode);
2885 	if (rc > 0)
2886 		rc = generic_write_sync(iocb, rc);
2887 	return rc;
2888 }
2889 
2890 ssize_t
cifs_strict_writev(struct kiocb * iocb,struct iov_iter * from)2891 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2892 {
2893 	struct inode *inode = file_inode(iocb->ki_filp);
2894 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2895 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2896 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2897 						iocb->ki_filp->private_data;
2898 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2899 	ssize_t written;
2900 
2901 	written = cifs_get_writer(cinode);
2902 	if (written)
2903 		return written;
2904 
2905 	if (CIFS_CACHE_WRITE(cinode)) {
2906 		if (cap_unix(tcon->ses) &&
2907 		    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2908 		    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2909 			written = netfs_file_write_iter(iocb, from);
2910 			goto out;
2911 		}
2912 		written = cifs_writev(iocb, from);
2913 		goto out;
2914 	}
2915 	/*
2916 	 * For non-oplocked files in strict cache mode we need to write the data
2917 	 * to the server exactly from the pos to pos+len-1 rather than flush all
2918 	 * affected pages because it may cause a error with mandatory locks on
2919 	 * these pages but not on the region from pos to ppos+len-1.
2920 	 */
2921 	written = netfs_file_write_iter(iocb, from);
2922 	if (CIFS_CACHE_READ(cinode)) {
2923 		/*
2924 		 * We have read level caching and we have just sent a write
2925 		 * request to the server thus making data in the cache stale.
2926 		 * Zap the cache and set oplock/lease level to NONE to avoid
2927 		 * reading stale data from the cache. All subsequent read
2928 		 * operations will read new data from the server.
2929 		 */
2930 		cifs_zap_mapping(inode);
2931 		cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2932 			 inode);
2933 		cinode->oplock = 0;
2934 	}
2935 out:
2936 	cifs_put_writer(cinode);
2937 	return written;
2938 }
2939 
cifs_loose_read_iter(struct kiocb * iocb,struct iov_iter * iter)2940 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2941 {
2942 	ssize_t rc;
2943 	struct inode *inode = file_inode(iocb->ki_filp);
2944 
2945 	if (iocb->ki_flags & IOCB_DIRECT)
2946 		return netfs_unbuffered_read_iter(iocb, iter);
2947 
2948 	rc = cifs_revalidate_mapping(inode);
2949 	if (rc)
2950 		return rc;
2951 
2952 	return netfs_file_read_iter(iocb, iter);
2953 }
2954 
cifs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)2955 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2956 {
2957 	struct inode *inode = file_inode(iocb->ki_filp);
2958 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2959 	ssize_t written;
2960 	int rc;
2961 
2962 	if (iocb->ki_filp->f_flags & O_DIRECT) {
2963 		written = netfs_unbuffered_write_iter(iocb, from);
2964 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
2965 			cifs_zap_mapping(inode);
2966 			cifs_dbg(FYI,
2967 				 "Set no oplock for inode=%p after a write operation\n",
2968 				 inode);
2969 			cinode->oplock = 0;
2970 		}
2971 		return written;
2972 	}
2973 
2974 	written = cifs_get_writer(cinode);
2975 	if (written)
2976 		return written;
2977 
2978 	written = netfs_file_write_iter(iocb, from);
2979 
2980 	if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
2981 		rc = filemap_fdatawrite(inode->i_mapping);
2982 		if (rc)
2983 			cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
2984 				 rc, inode);
2985 	}
2986 
2987 	cifs_put_writer(cinode);
2988 	return written;
2989 }
2990 
2991 ssize_t
cifs_strict_readv(struct kiocb * iocb,struct iov_iter * to)2992 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
2993 {
2994 	struct inode *inode = file_inode(iocb->ki_filp);
2995 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2996 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2997 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2998 						iocb->ki_filp->private_data;
2999 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3000 	int rc = -EACCES;
3001 
3002 	/*
3003 	 * In strict cache mode we need to read from the server all the time
3004 	 * if we don't have level II oplock because the server can delay mtime
3005 	 * change - so we can't make a decision about inode invalidating.
3006 	 * And we can also fail with pagereading if there are mandatory locks
3007 	 * on pages affected by this read but not on the region from pos to
3008 	 * pos+len-1.
3009 	 */
3010 	if (!CIFS_CACHE_READ(cinode))
3011 		return netfs_unbuffered_read_iter(iocb, to);
3012 
3013 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) {
3014 		if (iocb->ki_flags & IOCB_DIRECT)
3015 			return netfs_unbuffered_read_iter(iocb, to);
3016 		return netfs_buffered_read_iter(iocb, to);
3017 	}
3018 
3019 	/*
3020 	 * We need to hold the sem to be sure nobody modifies lock list
3021 	 * with a brlock that prevents reading.
3022 	 */
3023 	if (iocb->ki_flags & IOCB_DIRECT) {
3024 		rc = netfs_start_io_direct(inode);
3025 		if (rc < 0)
3026 			goto out;
3027 		rc = -EACCES;
3028 		down_read(&cinode->lock_sem);
3029 		if (!cifs_find_lock_conflict(
3030 			    cfile, iocb->ki_pos, iov_iter_count(to),
3031 			    tcon->ses->server->vals->shared_lock_type,
3032 			    0, NULL, CIFS_READ_OP))
3033 			rc = netfs_unbuffered_read_iter_locked(iocb, to);
3034 		up_read(&cinode->lock_sem);
3035 		netfs_end_io_direct(inode);
3036 	} else {
3037 		rc = netfs_start_io_read(inode);
3038 		if (rc < 0)
3039 			goto out;
3040 		rc = -EACCES;
3041 		down_read(&cinode->lock_sem);
3042 		if (!cifs_find_lock_conflict(
3043 			    cfile, iocb->ki_pos, iov_iter_count(to),
3044 			    tcon->ses->server->vals->shared_lock_type,
3045 			    0, NULL, CIFS_READ_OP))
3046 			rc = filemap_read(iocb, to, 0);
3047 		up_read(&cinode->lock_sem);
3048 		netfs_end_io_read(inode);
3049 	}
3050 out:
3051 	return rc;
3052 }
3053 
cifs_page_mkwrite(struct vm_fault * vmf)3054 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
3055 {
3056 	return netfs_page_mkwrite(vmf, NULL);
3057 }
3058 
3059 static const struct vm_operations_struct cifs_file_vm_ops = {
3060 	.fault = filemap_fault,
3061 	.map_pages = filemap_map_pages,
3062 	.page_mkwrite = cifs_page_mkwrite,
3063 };
3064 
cifs_file_strict_mmap_prepare(struct vm_area_desc * desc)3065 int cifs_file_strict_mmap_prepare(struct vm_area_desc *desc)
3066 {
3067 	int xid, rc = 0;
3068 	struct inode *inode = file_inode(desc->file);
3069 
3070 	xid = get_xid();
3071 
3072 	if (!CIFS_CACHE_READ(CIFS_I(inode)))
3073 		rc = cifs_zap_mapping(inode);
3074 	if (!rc)
3075 		rc = generic_file_mmap_prepare(desc);
3076 	if (!rc)
3077 		desc->vm_ops = &cifs_file_vm_ops;
3078 
3079 	free_xid(xid);
3080 	return rc;
3081 }
3082 
cifs_file_mmap_prepare(struct vm_area_desc * desc)3083 int cifs_file_mmap_prepare(struct vm_area_desc *desc)
3084 {
3085 	int rc, xid;
3086 
3087 	xid = get_xid();
3088 
3089 	rc = cifs_revalidate_file(desc->file);
3090 	if (rc)
3091 		cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3092 			 rc);
3093 	if (!rc)
3094 		rc = generic_file_mmap_prepare(desc);
3095 	if (!rc)
3096 		desc->vm_ops = &cifs_file_vm_ops;
3097 
3098 	free_xid(xid);
3099 	return rc;
3100 }
3101 
is_inode_writable(struct cifsInodeInfo * cifs_inode)3102 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3103 {
3104 	struct cifsFileInfo *open_file;
3105 
3106 	spin_lock(&cifs_inode->open_file_lock);
3107 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3108 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3109 			spin_unlock(&cifs_inode->open_file_lock);
3110 			return 1;
3111 		}
3112 	}
3113 	spin_unlock(&cifs_inode->open_file_lock);
3114 	return 0;
3115 }
3116 
3117 /* We do not want to update the file size from server for inodes
3118    open for write - to avoid races with writepage extending
3119    the file - in the future we could consider allowing
3120    refreshing the inode only on increases in the file size
3121    but this is tricky to do without racing with writebehind
3122    page caching in the current Linux kernel design */
is_size_safe_to_change(struct cifsInodeInfo * cifsInode,__u64 end_of_file,bool from_readdir)3123 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3124 			    bool from_readdir)
3125 {
3126 	if (!cifsInode)
3127 		return true;
3128 
3129 	if (is_inode_writable(cifsInode) ||
3130 		((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
3131 		/* This inode is open for write at least once */
3132 		struct cifs_sb_info *cifs_sb;
3133 
3134 		cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
3135 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
3136 			/* since no page cache to corrupt on directio
3137 			we can change size safely */
3138 			return true;
3139 		}
3140 
3141 		if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
3142 			return true;
3143 
3144 		return false;
3145 	} else
3146 		return true;
3147 }
3148 
cifs_oplock_break(struct work_struct * work)3149 void cifs_oplock_break(struct work_struct *work)
3150 {
3151 	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3152 						  oplock_break);
3153 	struct inode *inode = d_inode(cfile->dentry);
3154 	struct super_block *sb = inode->i_sb;
3155 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
3156 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3157 	struct cifs_tcon *tcon;
3158 	struct TCP_Server_Info *server;
3159 	struct tcon_link *tlink;
3160 	int rc = 0;
3161 	bool purge_cache = false, oplock_break_cancelled;
3162 	__u64 persistent_fid, volatile_fid;
3163 	__u16 net_fid;
3164 
3165 	/*
3166 	 * Hold a reference to the superblock to prevent it and its inodes from
3167 	 * being freed while we are accessing cinode. Otherwise, _cifsFileInfo_put()
3168 	 * may release the last reference to the sb and trigger inode eviction.
3169 	 */
3170 	cifs_sb_active(sb);
3171 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3172 			TASK_UNINTERRUPTIBLE);
3173 
3174 	tlink = cifs_sb_tlink(cifs_sb);
3175 	if (IS_ERR(tlink))
3176 		goto out;
3177 	tcon = tlink_tcon(tlink);
3178 	server = tcon->ses->server;
3179 
3180 	server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3181 				      cfile->oplock_epoch, &purge_cache);
3182 
3183 	if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
3184 						cifs_has_mand_locks(cinode)) {
3185 		cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3186 			 inode);
3187 		cinode->oplock = 0;
3188 	}
3189 
3190 	if (S_ISREG(inode->i_mode)) {
3191 		if (CIFS_CACHE_READ(cinode))
3192 			break_lease(inode, O_RDONLY);
3193 		else
3194 			break_lease(inode, O_WRONLY);
3195 		rc = filemap_fdatawrite(inode->i_mapping);
3196 		if (!CIFS_CACHE_READ(cinode) || purge_cache) {
3197 			rc = filemap_fdatawait(inode->i_mapping);
3198 			mapping_set_error(inode->i_mapping, rc);
3199 			cifs_zap_mapping(inode);
3200 		}
3201 		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3202 		if (CIFS_CACHE_WRITE(cinode))
3203 			goto oplock_break_ack;
3204 	}
3205 
3206 	rc = cifs_push_locks(cfile);
3207 	if (rc)
3208 		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3209 
3210 oplock_break_ack:
3211 	/*
3212 	 * When oplock break is received and there are no active
3213 	 * file handles but cached, then schedule deferred close immediately.
3214 	 * So, new open will not use cached handle.
3215 	 */
3216 
3217 	if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
3218 		cifs_close_deferred_file(cinode);
3219 
3220 	persistent_fid = cfile->fid.persistent_fid;
3221 	volatile_fid = cfile->fid.volatile_fid;
3222 	net_fid = cfile->fid.netfid;
3223 	oplock_break_cancelled = cfile->oplock_break_cancelled;
3224 
3225 	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3226 	/*
3227 	 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3228 	 * an acknowledgment to be sent when the file has already been closed.
3229 	 */
3230 	spin_lock(&cinode->open_file_lock);
3231 	/* check list empty since can race with kill_sb calling tree disconnect */
3232 	if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3233 		spin_unlock(&cinode->open_file_lock);
3234 		rc = server->ops->oplock_response(tcon, persistent_fid,
3235 						  volatile_fid, net_fid, cinode);
3236 		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3237 	} else
3238 		spin_unlock(&cinode->open_file_lock);
3239 
3240 	cifs_put_tlink(tlink);
3241 out:
3242 	cifs_done_oplock_break(cinode);
3243 	cifs_sb_deactive(sb);
3244 }
3245 
cifs_swap_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)3246 static int cifs_swap_activate(struct swap_info_struct *sis,
3247 			      struct file *swap_file, sector_t *span)
3248 {
3249 	struct cifsFileInfo *cfile = swap_file->private_data;
3250 	struct inode *inode = swap_file->f_mapping->host;
3251 	unsigned long blocks;
3252 	long long isize;
3253 
3254 	cifs_dbg(FYI, "swap activate\n");
3255 
3256 	if (!swap_file->f_mapping->a_ops->swap_rw)
3257 		/* Cannot support swap */
3258 		return -EINVAL;
3259 
3260 	spin_lock(&inode->i_lock);
3261 	blocks = inode->i_blocks;
3262 	isize = inode->i_size;
3263 	spin_unlock(&inode->i_lock);
3264 	if (blocks*512 < isize) {
3265 		pr_warn("swap activate: swapfile has holes\n");
3266 		return -EINVAL;
3267 	}
3268 	*span = sis->pages;
3269 
3270 	pr_warn_once("Swap support over SMB3 is experimental\n");
3271 
3272 	/*
3273 	 * TODO: consider adding ACL (or documenting how) to prevent other
3274 	 * users (on this or other systems) from reading it
3275 	 */
3276 
3277 
3278 	/* TODO: add sk_set_memalloc(inet) or similar */
3279 
3280 	if (cfile)
3281 		cfile->swapfile = true;
3282 	/*
3283 	 * TODO: Since file already open, we can't open with DENY_ALL here
3284 	 * but we could add call to grab a byte range lock to prevent others
3285 	 * from reading or writing the file
3286 	 */
3287 
3288 	sis->flags |= SWP_FS_OPS;
3289 	return add_swap_extent(sis, 0, sis->max, 0);
3290 }
3291 
cifs_swap_deactivate(struct file * file)3292 static void cifs_swap_deactivate(struct file *file)
3293 {
3294 	struct cifsFileInfo *cfile = file->private_data;
3295 
3296 	cifs_dbg(FYI, "swap deactivate\n");
3297 
3298 	/* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3299 
3300 	if (cfile)
3301 		cfile->swapfile = false;
3302 
3303 	/* do we need to unpin (or unlock) the file */
3304 }
3305 
3306 /**
3307  * cifs_swap_rw - SMB3 address space operation for swap I/O
3308  * @iocb: target I/O control block
3309  * @iter: I/O buffer
3310  *
3311  * Perform IO to the swap-file.  This is much like direct IO.
3312  */
cifs_swap_rw(struct kiocb * iocb,struct iov_iter * iter)3313 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3314 {
3315 	ssize_t ret;
3316 
3317 	if (iov_iter_rw(iter) == READ)
3318 		ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3319 	else
3320 		ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3321 	if (ret < 0)
3322 		return ret;
3323 	return 0;
3324 }
3325 
3326 const struct address_space_operations cifs_addr_ops = {
3327 	.read_folio	= netfs_read_folio,
3328 	.readahead	= netfs_readahead,
3329 	.writepages	= netfs_writepages,
3330 	.dirty_folio	= netfs_dirty_folio,
3331 	.release_folio	= netfs_release_folio,
3332 	.direct_IO	= noop_direct_IO,
3333 	.invalidate_folio = netfs_invalidate_folio,
3334 	.migrate_folio	= filemap_migrate_folio,
3335 	/*
3336 	 * TODO: investigate and if useful we could add an is_dirty_writeback
3337 	 * helper if needed
3338 	 */
3339 	.swap_activate	= cifs_swap_activate,
3340 	.swap_deactivate = cifs_swap_deactivate,
3341 	.swap_rw = cifs_swap_rw,
3342 };
3343 
3344 /*
3345  * cifs_readahead requires the server to support a buffer large enough to
3346  * contain the header plus one complete page of data.  Otherwise, we need
3347  * to leave cifs_readahead out of the address space operations.
3348  */
3349 const struct address_space_operations cifs_addr_ops_smallbuf = {
3350 	.read_folio	= netfs_read_folio,
3351 	.writepages	= netfs_writepages,
3352 	.dirty_folio	= netfs_dirty_folio,
3353 	.release_folio	= netfs_release_folio,
3354 	.invalidate_folio = netfs_invalidate_folio,
3355 	.migrate_folio	= filemap_migrate_folio,
3356 };
3357