1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3 *
4 * vfs operations that deal with files
5 *
6 * Copyright (C) International Business Machines Corp., 2002,2010
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
9 *
10 */
11 #include <linux/fs.h>
12 #include <linux/filelock.h>
13 #include <linux/backing-dev.h>
14 #include <linux/stat.h>
15 #include <linux/fcntl.h>
16 #include <linux/pagemap.h>
17 #include <linux/pagevec.h>
18 #include <linux/writeback.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/delay.h>
21 #include <linux/mount.h>
22 #include <linux/slab.h>
23 #include <linux/swap.h>
24 #include <linux/mm.h>
25 #include <asm/div64.h>
26 #include "cifsfs.h"
27 #include "cifspdu.h"
28 #include "cifsglob.h"
29 #include "cifsproto.h"
30 #include "smb2proto.h"
31 #include "cifs_unicode.h"
32 #include "cifs_debug.h"
33 #include "cifs_fs_sb.h"
34 #include "fscache.h"
35 #include "smbdirect.h"
36 #include "fs_context.h"
37 #include "cifs_ioctl.h"
38 #include "cached_dir.h"
39 #include <trace/events/netfs.h>
40
41 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
42
43 /*
44 * Prepare a subrequest to upload to the server. We need to allocate credits
45 * so that we know the maximum amount of data that we can include in it.
46 */
cifs_prepare_write(struct netfs_io_subrequest * subreq)47 static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
48 {
49 struct cifs_io_subrequest *wdata =
50 container_of(subreq, struct cifs_io_subrequest, subreq);
51 struct cifs_io_request *req = wdata->req;
52 struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr];
53 struct TCP_Server_Info *server;
54 struct cifsFileInfo *open_file = req->cfile;
55 size_t wsize = req->rreq.wsize;
56 int rc;
57
58 if (!wdata->have_xid) {
59 wdata->xid = get_xid();
60 wdata->have_xid = true;
61 }
62
63 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
64 wdata->server = server;
65
66 retry:
67 if (open_file->invalidHandle) {
68 rc = cifs_reopen_file(open_file, false);
69 if (rc < 0) {
70 if (rc == -EAGAIN)
71 goto retry;
72 subreq->error = rc;
73 return netfs_prepare_write_failed(subreq);
74 }
75 }
76
77 rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len,
78 &wdata->credits);
79 if (rc < 0) {
80 subreq->error = rc;
81 return netfs_prepare_write_failed(subreq);
82 }
83
84 wdata->credits.rreq_debug_id = subreq->rreq->debug_id;
85 wdata->credits.rreq_debug_index = subreq->debug_index;
86 wdata->credits.in_flight_check = 1;
87 trace_smb3_rw_credits(wdata->rreq->debug_id,
88 wdata->subreq.debug_index,
89 wdata->credits.value,
90 server->credits, server->in_flight,
91 wdata->credits.value,
92 cifs_trace_rw_credits_write_prepare);
93
94 #ifdef CONFIG_CIFS_SMB_DIRECT
95 if (server->smbd_conn)
96 stream->sreq_max_segs = server->smbd_conn->max_frmr_depth;
97 #endif
98 }
99
100 /*
101 * Issue a subrequest to upload to the server.
102 */
cifs_issue_write(struct netfs_io_subrequest * subreq)103 static void cifs_issue_write(struct netfs_io_subrequest *subreq)
104 {
105 struct cifs_io_subrequest *wdata =
106 container_of(subreq, struct cifs_io_subrequest, subreq);
107 struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
108 int rc;
109
110 if (cifs_forced_shutdown(sbi)) {
111 rc = -EIO;
112 goto fail;
113 }
114
115 rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
116 if (rc)
117 goto fail;
118
119 rc = -EAGAIN;
120 if (wdata->req->cfile->invalidHandle)
121 goto fail;
122
123 wdata->server->ops->async_writev(wdata);
124 out:
125 return;
126
127 fail:
128 if (rc == -EAGAIN)
129 trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
130 else
131 trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
132 add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
133 cifs_write_subrequest_terminated(wdata, rc, false);
134 goto out;
135 }
136
cifs_netfs_invalidate_cache(struct netfs_io_request * wreq)137 static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq)
138 {
139 cifs_invalidate_cache(wreq->inode, 0);
140 }
141
142 /*
143 * Negotiate the size of a read operation on behalf of the netfs library.
144 */
cifs_prepare_read(struct netfs_io_subrequest * subreq)145 static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
146 {
147 struct netfs_io_request *rreq = subreq->rreq;
148 struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
149 struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
150 struct TCP_Server_Info *server;
151 struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
152 size_t size;
153 int rc = 0;
154
155 if (!rdata->have_xid) {
156 rdata->xid = get_xid();
157 rdata->have_xid = true;
158 }
159
160 server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
161 rdata->server = server;
162
163 if (cifs_sb->ctx->rsize == 0)
164 cifs_sb->ctx->rsize =
165 server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink),
166 cifs_sb->ctx);
167
168 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
169 &size, &rdata->credits);
170 if (rc)
171 return rc;
172
173 rreq->io_streams[0].sreq_max_len = size;
174
175 rdata->credits.in_flight_check = 1;
176 rdata->credits.rreq_debug_id = rreq->debug_id;
177 rdata->credits.rreq_debug_index = subreq->debug_index;
178
179 trace_smb3_rw_credits(rdata->rreq->debug_id,
180 rdata->subreq.debug_index,
181 rdata->credits.value,
182 server->credits, server->in_flight, 0,
183 cifs_trace_rw_credits_read_submit);
184
185 #ifdef CONFIG_CIFS_SMB_DIRECT
186 if (server->smbd_conn)
187 rreq->io_streams[0].sreq_max_segs = server->smbd_conn->max_frmr_depth;
188 #endif
189 return 0;
190 }
191
192 /*
193 * Issue a read operation on behalf of the netfs helper functions. We're asked
194 * to make a read of a certain size at a point in the file. We are permitted
195 * to only read a portion of that, but as long as we read something, the netfs
196 * helper will call us again so that we can issue another read.
197 */
cifs_issue_read(struct netfs_io_subrequest * subreq)198 static void cifs_issue_read(struct netfs_io_subrequest *subreq)
199 {
200 struct netfs_io_request *rreq = subreq->rreq;
201 struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
202 struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
203 struct TCP_Server_Info *server = rdata->server;
204 int rc = 0;
205
206 cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
207 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
208 subreq->transferred, subreq->len);
209
210 rc = adjust_credits(server, rdata, cifs_trace_rw_credits_issue_read_adjust);
211 if (rc)
212 goto failed;
213
214 if (req->cfile->invalidHandle) {
215 do {
216 rc = cifs_reopen_file(req->cfile, true);
217 } while (rc == -EAGAIN);
218 if (rc)
219 goto failed;
220 }
221
222 if (subreq->rreq->origin != NETFS_DIO_READ)
223 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
224
225 trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
226 rc = rdata->server->ops->async_readv(rdata);
227 if (rc)
228 goto failed;
229 return;
230
231 failed:
232 subreq->error = rc;
233 netfs_read_subreq_terminated(subreq);
234 }
235
236 /*
237 * Writeback calls this when it finds a folio that needs uploading. This isn't
238 * called if writeback only has copy-to-cache to deal with.
239 */
cifs_begin_writeback(struct netfs_io_request * wreq)240 static void cifs_begin_writeback(struct netfs_io_request *wreq)
241 {
242 struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
243 int ret;
244
245 ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
246 if (ret) {
247 cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
248 return;
249 }
250
251 wreq->io_streams[0].avail = true;
252 }
253
254 /*
255 * Initialise a request.
256 */
cifs_init_request(struct netfs_io_request * rreq,struct file * file)257 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
258 {
259 struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
260 struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
261 struct cifsFileInfo *open_file = NULL;
262
263 rreq->rsize = cifs_sb->ctx->rsize;
264 rreq->wsize = cifs_sb->ctx->wsize;
265 req->pid = current->tgid; // Ummm... This may be a workqueue
266
267 if (file) {
268 open_file = file->private_data;
269 rreq->netfs_priv = file->private_data;
270 req->cfile = cifsFileInfo_get(open_file);
271 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
272 req->pid = req->cfile->pid;
273 } else if (rreq->origin != NETFS_WRITEBACK) {
274 WARN_ON_ONCE(1);
275 return -EIO;
276 }
277
278 return 0;
279 }
280
281 /*
282 * Completion of a request operation.
283 */
cifs_rreq_done(struct netfs_io_request * rreq)284 static void cifs_rreq_done(struct netfs_io_request *rreq)
285 {
286 struct timespec64 atime, mtime;
287 struct inode *inode = rreq->inode;
288
289 /* we do not want atime to be less than mtime, it broke some apps */
290 atime = inode_set_atime_to_ts(inode, current_time(inode));
291 mtime = inode_get_mtime(inode);
292 if (timespec64_compare(&atime, &mtime))
293 inode_set_atime_to_ts(inode, inode_get_mtime(inode));
294 }
295
cifs_free_request(struct netfs_io_request * rreq)296 static void cifs_free_request(struct netfs_io_request *rreq)
297 {
298 struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
299
300 if (req->cfile)
301 cifsFileInfo_put(req->cfile);
302 }
303
cifs_free_subrequest(struct netfs_io_subrequest * subreq)304 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
305 {
306 struct cifs_io_subrequest *rdata =
307 container_of(subreq, struct cifs_io_subrequest, subreq);
308 int rc = subreq->error;
309
310 if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
311 #ifdef CONFIG_CIFS_SMB_DIRECT
312 if (rdata->mr) {
313 smbd_deregister_mr(rdata->mr);
314 rdata->mr = NULL;
315 }
316 #endif
317 }
318
319 if (rdata->credits.value != 0) {
320 trace_smb3_rw_credits(rdata->rreq->debug_id,
321 rdata->subreq.debug_index,
322 rdata->credits.value,
323 rdata->server ? rdata->server->credits : 0,
324 rdata->server ? rdata->server->in_flight : 0,
325 -rdata->credits.value,
326 cifs_trace_rw_credits_free_subreq);
327 if (rdata->server)
328 add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
329 else
330 rdata->credits.value = 0;
331 }
332
333 if (rdata->have_xid)
334 free_xid(rdata->xid);
335 }
336
337 const struct netfs_request_ops cifs_req_ops = {
338 .request_pool = &cifs_io_request_pool,
339 .subrequest_pool = &cifs_io_subrequest_pool,
340 .init_request = cifs_init_request,
341 .free_request = cifs_free_request,
342 .free_subrequest = cifs_free_subrequest,
343 .prepare_read = cifs_prepare_read,
344 .issue_read = cifs_issue_read,
345 .done = cifs_rreq_done,
346 .begin_writeback = cifs_begin_writeback,
347 .prepare_write = cifs_prepare_write,
348 .issue_write = cifs_issue_write,
349 .invalidate_cache = cifs_netfs_invalidate_cache,
350 };
351
352 /*
353 * Mark as invalid, all open files on tree connections since they
354 * were closed when session to server was lost.
355 */
356 void
cifs_mark_open_files_invalid(struct cifs_tcon * tcon)357 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
358 {
359 struct cifsFileInfo *open_file = NULL;
360 struct list_head *tmp;
361 struct list_head *tmp1;
362
363 /* only send once per connect */
364 spin_lock(&tcon->tc_lock);
365 if (tcon->need_reconnect)
366 tcon->status = TID_NEED_RECON;
367
368 if (tcon->status != TID_NEED_RECON) {
369 spin_unlock(&tcon->tc_lock);
370 return;
371 }
372 tcon->status = TID_IN_FILES_INVALIDATE;
373 spin_unlock(&tcon->tc_lock);
374
375 /* list all files open on tree connection and mark them invalid */
376 spin_lock(&tcon->open_file_lock);
377 list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
378 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
379 open_file->invalidHandle = true;
380 open_file->oplock_break_cancelled = true;
381 }
382 spin_unlock(&tcon->open_file_lock);
383
384 invalidate_all_cached_dirs(tcon);
385 spin_lock(&tcon->tc_lock);
386 if (tcon->status == TID_IN_FILES_INVALIDATE)
387 tcon->status = TID_NEED_TCON;
388 spin_unlock(&tcon->tc_lock);
389
390 /*
391 * BB Add call to invalidate_inodes(sb) for all superblocks mounted
392 * to this tcon.
393 */
394 }
395
cifs_convert_flags(unsigned int flags,int rdwr_for_fscache)396 static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
397 {
398 if ((flags & O_ACCMODE) == O_RDONLY)
399 return GENERIC_READ;
400 else if ((flags & O_ACCMODE) == O_WRONLY)
401 return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
402 else if ((flags & O_ACCMODE) == O_RDWR) {
403 /* GENERIC_ALL is too much permission to request
404 can cause unnecessary access denied on create */
405 /* return GENERIC_ALL; */
406 return (GENERIC_READ | GENERIC_WRITE);
407 }
408
409 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
410 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
411 FILE_READ_DATA);
412 }
413
414 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_convert_flags(unsigned int flags)415 static u32 cifs_posix_convert_flags(unsigned int flags)
416 {
417 u32 posix_flags = 0;
418
419 if ((flags & O_ACCMODE) == O_RDONLY)
420 posix_flags = SMB_O_RDONLY;
421 else if ((flags & O_ACCMODE) == O_WRONLY)
422 posix_flags = SMB_O_WRONLY;
423 else if ((flags & O_ACCMODE) == O_RDWR)
424 posix_flags = SMB_O_RDWR;
425
426 if (flags & O_CREAT) {
427 posix_flags |= SMB_O_CREAT;
428 if (flags & O_EXCL)
429 posix_flags |= SMB_O_EXCL;
430 } else if (flags & O_EXCL)
431 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
432 current->comm, current->tgid);
433
434 if (flags & O_TRUNC)
435 posix_flags |= SMB_O_TRUNC;
436 /* be safe and imply O_SYNC for O_DSYNC */
437 if (flags & O_DSYNC)
438 posix_flags |= SMB_O_SYNC;
439 if (flags & O_DIRECTORY)
440 posix_flags |= SMB_O_DIRECTORY;
441 if (flags & O_NOFOLLOW)
442 posix_flags |= SMB_O_NOFOLLOW;
443 if (flags & O_DIRECT)
444 posix_flags |= SMB_O_DIRECT;
445
446 return posix_flags;
447 }
448 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
449
cifs_get_disposition(unsigned int flags)450 static inline int cifs_get_disposition(unsigned int flags)
451 {
452 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
453 return FILE_CREATE;
454 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
455 return FILE_OVERWRITE_IF;
456 else if ((flags & O_CREAT) == O_CREAT)
457 return FILE_OPEN_IF;
458 else if ((flags & O_TRUNC) == O_TRUNC)
459 return FILE_OVERWRITE;
460 else
461 return FILE_OPEN;
462 }
463
464 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_open(const char * full_path,struct inode ** pinode,struct super_block * sb,int mode,unsigned int f_flags,__u32 * poplock,__u16 * pnetfid,unsigned int xid)465 int cifs_posix_open(const char *full_path, struct inode **pinode,
466 struct super_block *sb, int mode, unsigned int f_flags,
467 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
468 {
469 int rc;
470 FILE_UNIX_BASIC_INFO *presp_data;
471 __u32 posix_flags = 0;
472 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
473 struct cifs_fattr fattr;
474 struct tcon_link *tlink;
475 struct cifs_tcon *tcon;
476
477 cifs_dbg(FYI, "posix open %s\n", full_path);
478
479 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
480 if (presp_data == NULL)
481 return -ENOMEM;
482
483 tlink = cifs_sb_tlink(cifs_sb);
484 if (IS_ERR(tlink)) {
485 rc = PTR_ERR(tlink);
486 goto posix_open_ret;
487 }
488
489 tcon = tlink_tcon(tlink);
490 mode &= ~current_umask();
491
492 posix_flags = cifs_posix_convert_flags(f_flags);
493 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
494 poplock, full_path, cifs_sb->local_nls,
495 cifs_remap(cifs_sb));
496 cifs_put_tlink(tlink);
497
498 if (rc)
499 goto posix_open_ret;
500
501 if (presp_data->Type == cpu_to_le32(-1))
502 goto posix_open_ret; /* open ok, caller does qpathinfo */
503
504 if (!pinode)
505 goto posix_open_ret; /* caller does not need info */
506
507 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
508
509 /* get new inode and set it up */
510 if (*pinode == NULL) {
511 cifs_fill_uniqueid(sb, &fattr);
512 *pinode = cifs_iget(sb, &fattr);
513 if (!*pinode) {
514 rc = -ENOMEM;
515 goto posix_open_ret;
516 }
517 } else {
518 cifs_revalidate_mapping(*pinode);
519 rc = cifs_fattr_to_inode(*pinode, &fattr, false);
520 }
521
522 posix_open_ret:
523 kfree(presp_data);
524 return rc;
525 }
526 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
527
cifs_nt_open(const char * full_path,struct inode * inode,struct cifs_sb_info * cifs_sb,struct cifs_tcon * tcon,unsigned int f_flags,__u32 * oplock,struct cifs_fid * fid,unsigned int xid,struct cifs_open_info_data * buf)528 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
529 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
530 struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
531 {
532 int rc;
533 int desired_access;
534 int disposition;
535 int create_options = CREATE_NOT_DIR;
536 struct TCP_Server_Info *server = tcon->ses->server;
537 struct cifs_open_parms oparms;
538 int rdwr_for_fscache = 0;
539
540 if (!server->ops->open)
541 return -ENOSYS;
542
543 /* If we're caching, we need to be able to fill in around partial writes. */
544 if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
545 rdwr_for_fscache = 1;
546
547 desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
548
549 /*********************************************************************
550 * open flag mapping table:
551 *
552 * POSIX Flag CIFS Disposition
553 * ---------- ----------------
554 * O_CREAT FILE_OPEN_IF
555 * O_CREAT | O_EXCL FILE_CREATE
556 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
557 * O_TRUNC FILE_OVERWRITE
558 * none of the above FILE_OPEN
559 *
560 * Note that there is not a direct match between disposition
561 * FILE_SUPERSEDE (ie create whether or not file exists although
562 * O_CREAT | O_TRUNC is similar but truncates the existing
563 * file rather than creating a new file as FILE_SUPERSEDE does
564 * (which uses the attributes / metadata passed in on open call)
565 *?
566 *? O_SYNC is a reasonable match to CIFS writethrough flag
567 *? and the read write flags match reasonably. O_LARGEFILE
568 *? is irrelevant because largefile support is always used
569 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
570 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
571 *********************************************************************/
572
573 disposition = cifs_get_disposition(f_flags);
574
575 /* BB pass O_SYNC flag through on file attributes .. BB */
576
577 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
578 if (f_flags & O_SYNC)
579 create_options |= CREATE_WRITE_THROUGH;
580
581 if (f_flags & O_DIRECT)
582 create_options |= CREATE_NO_BUFFER;
583
584 retry_open:
585 oparms = (struct cifs_open_parms) {
586 .tcon = tcon,
587 .cifs_sb = cifs_sb,
588 .desired_access = desired_access,
589 .create_options = cifs_create_options(cifs_sb, create_options),
590 .disposition = disposition,
591 .path = full_path,
592 .fid = fid,
593 };
594
595 rc = server->ops->open(xid, &oparms, oplock, buf);
596 if (rc) {
597 if (rc == -EACCES && rdwr_for_fscache == 1) {
598 desired_access = cifs_convert_flags(f_flags, 0);
599 rdwr_for_fscache = 2;
600 goto retry_open;
601 }
602 return rc;
603 }
604 if (rdwr_for_fscache == 2)
605 cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
606
607 /* TODO: Add support for calling posix query info but with passing in fid */
608 if (tcon->unix_ext)
609 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
610 xid);
611 else
612 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
613 xid, fid);
614
615 if (rc) {
616 server->ops->close(xid, tcon, fid);
617 if (rc == -ESTALE)
618 rc = -EOPENSTALE;
619 }
620
621 return rc;
622 }
623
624 static bool
cifs_has_mand_locks(struct cifsInodeInfo * cinode)625 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
626 {
627 struct cifs_fid_locks *cur;
628 bool has_locks = false;
629
630 down_read(&cinode->lock_sem);
631 list_for_each_entry(cur, &cinode->llist, llist) {
632 if (!list_empty(&cur->locks)) {
633 has_locks = true;
634 break;
635 }
636 }
637 up_read(&cinode->lock_sem);
638 return has_locks;
639 }
640
641 void
cifs_down_write(struct rw_semaphore * sem)642 cifs_down_write(struct rw_semaphore *sem)
643 {
644 while (!down_write_trylock(sem))
645 msleep(10);
646 }
647
648 static void cifsFileInfo_put_work(struct work_struct *work);
649 void serverclose_work(struct work_struct *work);
650
cifs_new_fileinfo(struct cifs_fid * fid,struct file * file,struct tcon_link * tlink,__u32 oplock,const char * symlink_target)651 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
652 struct tcon_link *tlink, __u32 oplock,
653 const char *symlink_target)
654 {
655 struct dentry *dentry = file_dentry(file);
656 struct inode *inode = d_inode(dentry);
657 struct cifsInodeInfo *cinode = CIFS_I(inode);
658 struct cifsFileInfo *cfile;
659 struct cifs_fid_locks *fdlocks;
660 struct cifs_tcon *tcon = tlink_tcon(tlink);
661 struct TCP_Server_Info *server = tcon->ses->server;
662
663 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
664 if (cfile == NULL)
665 return cfile;
666
667 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
668 if (!fdlocks) {
669 kfree(cfile);
670 return NULL;
671 }
672
673 if (symlink_target) {
674 cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
675 if (!cfile->symlink_target) {
676 kfree(fdlocks);
677 kfree(cfile);
678 return NULL;
679 }
680 }
681
682 INIT_LIST_HEAD(&fdlocks->locks);
683 fdlocks->cfile = cfile;
684 cfile->llist = fdlocks;
685
686 cfile->count = 1;
687 cfile->pid = current->tgid;
688 cfile->uid = current_fsuid();
689 cfile->dentry = dget(dentry);
690 cfile->f_flags = file->f_flags;
691 cfile->invalidHandle = false;
692 cfile->deferred_close_scheduled = false;
693 cfile->tlink = cifs_get_tlink(tlink);
694 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
695 INIT_WORK(&cfile->put, cifsFileInfo_put_work);
696 INIT_WORK(&cfile->serverclose, serverclose_work);
697 INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
698 mutex_init(&cfile->fh_mutex);
699 spin_lock_init(&cfile->file_info_lock);
700
701 cifs_sb_active(inode->i_sb);
702
703 /*
704 * If the server returned a read oplock and we have mandatory brlocks,
705 * set oplock level to None.
706 */
707 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
708 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
709 oplock = 0;
710 }
711
712 cifs_down_write(&cinode->lock_sem);
713 list_add(&fdlocks->llist, &cinode->llist);
714 up_write(&cinode->lock_sem);
715
716 spin_lock(&tcon->open_file_lock);
717 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
718 oplock = fid->pending_open->oplock;
719 list_del(&fid->pending_open->olist);
720
721 fid->purge_cache = false;
722 server->ops->set_fid(cfile, fid, oplock);
723
724 list_add(&cfile->tlist, &tcon->openFileList);
725 atomic_inc(&tcon->num_local_opens);
726
727 /* if readable file instance put first in list*/
728 spin_lock(&cinode->open_file_lock);
729 if (file->f_mode & FMODE_READ)
730 list_add(&cfile->flist, &cinode->openFileList);
731 else
732 list_add_tail(&cfile->flist, &cinode->openFileList);
733 spin_unlock(&cinode->open_file_lock);
734 spin_unlock(&tcon->open_file_lock);
735
736 if (fid->purge_cache)
737 cifs_zap_mapping(inode);
738
739 file->private_data = cfile;
740 return cfile;
741 }
742
743 struct cifsFileInfo *
cifsFileInfo_get(struct cifsFileInfo * cifs_file)744 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
745 {
746 spin_lock(&cifs_file->file_info_lock);
747 cifsFileInfo_get_locked(cifs_file);
748 spin_unlock(&cifs_file->file_info_lock);
749 return cifs_file;
750 }
751
cifsFileInfo_put_final(struct cifsFileInfo * cifs_file)752 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
753 {
754 struct inode *inode = d_inode(cifs_file->dentry);
755 struct cifsInodeInfo *cifsi = CIFS_I(inode);
756 struct cifsLockInfo *li, *tmp;
757 struct super_block *sb = inode->i_sb;
758
759 /*
760 * Delete any outstanding lock records. We'll lose them when the file
761 * is closed anyway.
762 */
763 cifs_down_write(&cifsi->lock_sem);
764 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
765 list_del(&li->llist);
766 cifs_del_lock_waiters(li);
767 kfree(li);
768 }
769 list_del(&cifs_file->llist->llist);
770 kfree(cifs_file->llist);
771 up_write(&cifsi->lock_sem);
772
773 cifs_put_tlink(cifs_file->tlink);
774 dput(cifs_file->dentry);
775 cifs_sb_deactive(sb);
776 kfree(cifs_file->symlink_target);
777 kfree(cifs_file);
778 }
779
cifsFileInfo_put_work(struct work_struct * work)780 static void cifsFileInfo_put_work(struct work_struct *work)
781 {
782 struct cifsFileInfo *cifs_file = container_of(work,
783 struct cifsFileInfo, put);
784
785 cifsFileInfo_put_final(cifs_file);
786 }
787
serverclose_work(struct work_struct * work)788 void serverclose_work(struct work_struct *work)
789 {
790 struct cifsFileInfo *cifs_file = container_of(work,
791 struct cifsFileInfo, serverclose);
792
793 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
794
795 struct TCP_Server_Info *server = tcon->ses->server;
796 int rc = 0;
797 int retries = 0;
798 int MAX_RETRIES = 4;
799
800 do {
801 if (server->ops->close_getattr)
802 rc = server->ops->close_getattr(0, tcon, cifs_file);
803 else if (server->ops->close)
804 rc = server->ops->close(0, tcon, &cifs_file->fid);
805
806 if (rc == -EBUSY || rc == -EAGAIN) {
807 retries++;
808 msleep(250);
809 }
810 } while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
811 );
812
813 if (retries == MAX_RETRIES)
814 pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
815
816 if (cifs_file->offload)
817 queue_work(fileinfo_put_wq, &cifs_file->put);
818 else
819 cifsFileInfo_put_final(cifs_file);
820 }
821
822 /**
823 * cifsFileInfo_put - release a reference of file priv data
824 *
825 * Always potentially wait for oplock handler. See _cifsFileInfo_put().
826 *
827 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
828 */
cifsFileInfo_put(struct cifsFileInfo * cifs_file)829 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
830 {
831 _cifsFileInfo_put(cifs_file, true, true);
832 }
833
834 /**
835 * _cifsFileInfo_put - release a reference of file priv data
836 *
837 * This may involve closing the filehandle @cifs_file out on the
838 * server. Must be called without holding tcon->open_file_lock,
839 * cinode->open_file_lock and cifs_file->file_info_lock.
840 *
841 * If @wait_for_oplock_handler is true and we are releasing the last
842 * reference, wait for any running oplock break handler of the file
843 * and cancel any pending one.
844 *
845 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
846 * @wait_oplock_handler: must be false if called from oplock_break_handler
847 * @offload: not offloaded on close and oplock breaks
848 *
849 */
_cifsFileInfo_put(struct cifsFileInfo * cifs_file,bool wait_oplock_handler,bool offload)850 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
851 bool wait_oplock_handler, bool offload)
852 {
853 struct inode *inode = d_inode(cifs_file->dentry);
854 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
855 struct TCP_Server_Info *server = tcon->ses->server;
856 struct cifsInodeInfo *cifsi = CIFS_I(inode);
857 struct super_block *sb = inode->i_sb;
858 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
859 struct cifs_fid fid = {};
860 struct cifs_pending_open open;
861 bool oplock_break_cancelled;
862 bool serverclose_offloaded = false;
863
864 spin_lock(&tcon->open_file_lock);
865 spin_lock(&cifsi->open_file_lock);
866 spin_lock(&cifs_file->file_info_lock);
867
868 cifs_file->offload = offload;
869 if (--cifs_file->count > 0) {
870 spin_unlock(&cifs_file->file_info_lock);
871 spin_unlock(&cifsi->open_file_lock);
872 spin_unlock(&tcon->open_file_lock);
873 return;
874 }
875 spin_unlock(&cifs_file->file_info_lock);
876
877 if (server->ops->get_lease_key)
878 server->ops->get_lease_key(inode, &fid);
879
880 /* store open in pending opens to make sure we don't miss lease break */
881 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
882
883 /* remove it from the lists */
884 list_del(&cifs_file->flist);
885 list_del(&cifs_file->tlist);
886 atomic_dec(&tcon->num_local_opens);
887
888 if (list_empty(&cifsi->openFileList)) {
889 cifs_dbg(FYI, "closing last open instance for inode %p\n",
890 d_inode(cifs_file->dentry));
891 /*
892 * In strict cache mode we need invalidate mapping on the last
893 * close because it may cause a error when we open this file
894 * again and get at least level II oplock.
895 */
896 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
897 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
898 cifs_set_oplock_level(cifsi, 0);
899 }
900
901 spin_unlock(&cifsi->open_file_lock);
902 spin_unlock(&tcon->open_file_lock);
903
904 oplock_break_cancelled = wait_oplock_handler ?
905 cancel_work_sync(&cifs_file->oplock_break) : false;
906
907 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
908 struct TCP_Server_Info *server = tcon->ses->server;
909 unsigned int xid;
910 int rc = 0;
911
912 xid = get_xid();
913 if (server->ops->close_getattr)
914 rc = server->ops->close_getattr(xid, tcon, cifs_file);
915 else if (server->ops->close)
916 rc = server->ops->close(xid, tcon, &cifs_file->fid);
917 _free_xid(xid);
918
919 if (rc == -EBUSY || rc == -EAGAIN) {
920 // Server close failed, hence offloading it as an async op
921 queue_work(serverclose_wq, &cifs_file->serverclose);
922 serverclose_offloaded = true;
923 }
924 }
925
926 if (oplock_break_cancelled)
927 cifs_done_oplock_break(cifsi);
928
929 cifs_del_pending_open(&open);
930
931 // if serverclose has been offloaded to wq (on failure), it will
932 // handle offloading put as well. If serverclose not offloaded,
933 // we need to handle offloading put here.
934 if (!serverclose_offloaded) {
935 if (offload)
936 queue_work(fileinfo_put_wq, &cifs_file->put);
937 else
938 cifsFileInfo_put_final(cifs_file);
939 }
940 }
941
cifs_open(struct inode * inode,struct file * file)942 int cifs_open(struct inode *inode, struct file *file)
943
944 {
945 int rc = -EACCES;
946 unsigned int xid;
947 __u32 oplock;
948 struct cifs_sb_info *cifs_sb;
949 struct TCP_Server_Info *server;
950 struct cifs_tcon *tcon;
951 struct tcon_link *tlink;
952 struct cifsFileInfo *cfile = NULL;
953 void *page;
954 const char *full_path;
955 bool posix_open_ok = false;
956 struct cifs_fid fid = {};
957 struct cifs_pending_open open;
958 struct cifs_open_info_data data = {};
959
960 xid = get_xid();
961
962 cifs_sb = CIFS_SB(inode->i_sb);
963 if (unlikely(cifs_forced_shutdown(cifs_sb))) {
964 free_xid(xid);
965 return -EIO;
966 }
967
968 tlink = cifs_sb_tlink(cifs_sb);
969 if (IS_ERR(tlink)) {
970 free_xid(xid);
971 return PTR_ERR(tlink);
972 }
973 tcon = tlink_tcon(tlink);
974 server = tcon->ses->server;
975
976 page = alloc_dentry_path();
977 full_path = build_path_from_dentry(file_dentry(file), page);
978 if (IS_ERR(full_path)) {
979 rc = PTR_ERR(full_path);
980 goto out;
981 }
982
983 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
984 inode, file->f_flags, full_path);
985
986 if (file->f_flags & O_DIRECT &&
987 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
988 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
989 file->f_op = &cifs_file_direct_nobrl_ops;
990 else
991 file->f_op = &cifs_file_direct_ops;
992 }
993
994 /* Get the cached handle as SMB2 close is deferred */
995 if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) {
996 rc = cifs_get_writable_path(tcon, full_path, FIND_WR_FSUID_ONLY, &cfile);
997 } else {
998 rc = cifs_get_readable_path(tcon, full_path, &cfile);
999 }
1000 if (rc == 0) {
1001 if (file->f_flags == cfile->f_flags) {
1002 file->private_data = cfile;
1003 spin_lock(&CIFS_I(inode)->deferred_lock);
1004 cifs_del_deferred_close(cfile);
1005 spin_unlock(&CIFS_I(inode)->deferred_lock);
1006 goto use_cache;
1007 } else {
1008 _cifsFileInfo_put(cfile, true, false);
1009 }
1010 }
1011
1012 if (server->oplocks)
1013 oplock = REQ_OPLOCK;
1014 else
1015 oplock = 0;
1016
1017 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1018 if (!tcon->broken_posix_open && tcon->unix_ext &&
1019 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1020 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1021 /* can not refresh inode info since size could be stale */
1022 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
1023 cifs_sb->ctx->file_mode /* ignored */,
1024 file->f_flags, &oplock, &fid.netfid, xid);
1025 if (rc == 0) {
1026 cifs_dbg(FYI, "posix open succeeded\n");
1027 posix_open_ok = true;
1028 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1029 if (tcon->ses->serverNOS)
1030 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1031 tcon->ses->ip_addr,
1032 tcon->ses->serverNOS);
1033 tcon->broken_posix_open = true;
1034 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
1035 (rc != -EOPNOTSUPP)) /* path not found or net err */
1036 goto out;
1037 /*
1038 * Else fallthrough to retry open the old way on network i/o
1039 * or DFS errors.
1040 */
1041 }
1042 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1043
1044 if (server->ops->get_lease_key)
1045 server->ops->get_lease_key(inode, &fid);
1046
1047 cifs_add_pending_open(&fid, tlink, &open);
1048
1049 if (!posix_open_ok) {
1050 if (server->ops->get_lease_key)
1051 server->ops->get_lease_key(inode, &fid);
1052
1053 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1054 xid, &data);
1055 if (rc) {
1056 cifs_del_pending_open(&open);
1057 goto out;
1058 }
1059 }
1060
1061 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1062 if (cfile == NULL) {
1063 if (server->ops->close)
1064 server->ops->close(xid, tcon, &fid);
1065 cifs_del_pending_open(&open);
1066 rc = -ENOMEM;
1067 goto out;
1068 }
1069
1070 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1071 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1072 /*
1073 * Time to set mode which we can not set earlier due to
1074 * problems creating new read-only files.
1075 */
1076 struct cifs_unix_set_info_args args = {
1077 .mode = inode->i_mode,
1078 .uid = INVALID_UID, /* no change */
1079 .gid = INVALID_GID, /* no change */
1080 .ctime = NO_CHANGE_64,
1081 .atime = NO_CHANGE_64,
1082 .mtime = NO_CHANGE_64,
1083 .device = 0,
1084 };
1085 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1086 cfile->pid);
1087 }
1088 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1089
1090 use_cache:
1091 fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1092 file->f_mode & FMODE_WRITE);
1093 if (!(file->f_flags & O_DIRECT))
1094 goto out;
1095 if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1096 goto out;
1097 cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1098
1099 out:
1100 free_dentry_path(page);
1101 free_xid(xid);
1102 cifs_put_tlink(tlink);
1103 cifs_free_open_info(&data);
1104 return rc;
1105 }
1106
1107 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1108 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1109 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1110
1111 /*
1112 * Try to reacquire byte range locks that were released when session
1113 * to server was lost.
1114 */
1115 static int
cifs_relock_file(struct cifsFileInfo * cfile)1116 cifs_relock_file(struct cifsFileInfo *cfile)
1117 {
1118 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1119 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1120 int rc = 0;
1121 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1122 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1123 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1124
1125 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1126 if (cinode->can_cache_brlcks) {
1127 /* can cache locks - no need to relock */
1128 up_read(&cinode->lock_sem);
1129 return rc;
1130 }
1131
1132 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1133 if (cap_unix(tcon->ses) &&
1134 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1135 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1136 rc = cifs_push_posix_locks(cfile);
1137 else
1138 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1139 rc = tcon->ses->server->ops->push_mand_locks(cfile);
1140
1141 up_read(&cinode->lock_sem);
1142 return rc;
1143 }
1144
1145 static int
cifs_reopen_file(struct cifsFileInfo * cfile,bool can_flush)1146 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1147 {
1148 int rc = -EACCES;
1149 unsigned int xid;
1150 __u32 oplock;
1151 struct cifs_sb_info *cifs_sb;
1152 struct cifs_tcon *tcon;
1153 struct TCP_Server_Info *server;
1154 struct cifsInodeInfo *cinode;
1155 struct inode *inode;
1156 void *page;
1157 const char *full_path;
1158 int desired_access;
1159 int disposition = FILE_OPEN;
1160 int create_options = CREATE_NOT_DIR;
1161 struct cifs_open_parms oparms;
1162 int rdwr_for_fscache = 0;
1163
1164 xid = get_xid();
1165 mutex_lock(&cfile->fh_mutex);
1166 if (!cfile->invalidHandle) {
1167 mutex_unlock(&cfile->fh_mutex);
1168 free_xid(xid);
1169 return 0;
1170 }
1171
1172 inode = d_inode(cfile->dentry);
1173 cifs_sb = CIFS_SB(inode->i_sb);
1174 tcon = tlink_tcon(cfile->tlink);
1175 server = tcon->ses->server;
1176
1177 /*
1178 * Can not grab rename sem here because various ops, including those
1179 * that already have the rename sem can end up causing writepage to get
1180 * called and if the server was down that means we end up here, and we
1181 * can never tell if the caller already has the rename_sem.
1182 */
1183 page = alloc_dentry_path();
1184 full_path = build_path_from_dentry(cfile->dentry, page);
1185 if (IS_ERR(full_path)) {
1186 mutex_unlock(&cfile->fh_mutex);
1187 free_dentry_path(page);
1188 free_xid(xid);
1189 return PTR_ERR(full_path);
1190 }
1191
1192 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1193 inode, cfile->f_flags, full_path);
1194
1195 if (tcon->ses->server->oplocks)
1196 oplock = REQ_OPLOCK;
1197 else
1198 oplock = 0;
1199
1200 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1201 if (tcon->unix_ext && cap_unix(tcon->ses) &&
1202 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1203 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1204 /*
1205 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1206 * original open. Must mask them off for a reopen.
1207 */
1208 unsigned int oflags = cfile->f_flags &
1209 ~(O_CREAT | O_EXCL | O_TRUNC);
1210
1211 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1212 cifs_sb->ctx->file_mode /* ignored */,
1213 oflags, &oplock, &cfile->fid.netfid, xid);
1214 if (rc == 0) {
1215 cifs_dbg(FYI, "posix reopen succeeded\n");
1216 oparms.reconnect = true;
1217 goto reopen_success;
1218 }
1219 /*
1220 * fallthrough to retry open the old way on errors, especially
1221 * in the reconnect path it is important to retry hard
1222 */
1223 }
1224 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1225
1226 /* If we're caching, we need to be able to fill in around partial writes. */
1227 if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1228 rdwr_for_fscache = 1;
1229
1230 desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1231
1232 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
1233 if (cfile->f_flags & O_SYNC)
1234 create_options |= CREATE_WRITE_THROUGH;
1235
1236 if (cfile->f_flags & O_DIRECT)
1237 create_options |= CREATE_NO_BUFFER;
1238
1239 if (server->ops->get_lease_key)
1240 server->ops->get_lease_key(inode, &cfile->fid);
1241
1242 retry_open:
1243 oparms = (struct cifs_open_parms) {
1244 .tcon = tcon,
1245 .cifs_sb = cifs_sb,
1246 .desired_access = desired_access,
1247 .create_options = cifs_create_options(cifs_sb, create_options),
1248 .disposition = disposition,
1249 .path = full_path,
1250 .fid = &cfile->fid,
1251 .reconnect = true,
1252 };
1253
1254 /*
1255 * Can not refresh inode by passing in file_info buf to be returned by
1256 * ops->open and then calling get_inode_info with returned buf since
1257 * file might have write behind data that needs to be flushed and server
1258 * version of file size can be stale. If we knew for sure that inode was
1259 * not dirty locally we could do this.
1260 */
1261 rc = server->ops->open(xid, &oparms, &oplock, NULL);
1262 if (rc == -ENOENT && oparms.reconnect == false) {
1263 /* durable handle timeout is expired - open the file again */
1264 rc = server->ops->open(xid, &oparms, &oplock, NULL);
1265 /* indicate that we need to relock the file */
1266 oparms.reconnect = true;
1267 }
1268 if (rc == -EACCES && rdwr_for_fscache == 1) {
1269 desired_access = cifs_convert_flags(cfile->f_flags, 0);
1270 rdwr_for_fscache = 2;
1271 goto retry_open;
1272 }
1273
1274 if (rc) {
1275 mutex_unlock(&cfile->fh_mutex);
1276 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1277 cifs_dbg(FYI, "oplock: %d\n", oplock);
1278 goto reopen_error_exit;
1279 }
1280
1281 if (rdwr_for_fscache == 2)
1282 cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1283
1284 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1285 reopen_success:
1286 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1287 cfile->invalidHandle = false;
1288 mutex_unlock(&cfile->fh_mutex);
1289 cinode = CIFS_I(inode);
1290
1291 if (can_flush) {
1292 rc = filemap_write_and_wait(inode->i_mapping);
1293 if (!is_interrupt_error(rc))
1294 mapping_set_error(inode->i_mapping, rc);
1295
1296 if (tcon->posix_extensions) {
1297 rc = smb311_posix_get_inode_info(&inode, full_path,
1298 NULL, inode->i_sb, xid);
1299 } else if (tcon->unix_ext) {
1300 rc = cifs_get_inode_info_unix(&inode, full_path,
1301 inode->i_sb, xid);
1302 } else {
1303 rc = cifs_get_inode_info(&inode, full_path, NULL,
1304 inode->i_sb, xid, NULL);
1305 }
1306 }
1307 /*
1308 * Else we are writing out data to server already and could deadlock if
1309 * we tried to flush data, and since we do not know if we have data that
1310 * would invalidate the current end of file on the server we can not go
1311 * to the server to get the new inode info.
1312 */
1313
1314 /*
1315 * If the server returned a read oplock and we have mandatory brlocks,
1316 * set oplock level to None.
1317 */
1318 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1319 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1320 oplock = 0;
1321 }
1322
1323 server->ops->set_fid(cfile, &cfile->fid, oplock);
1324 if (oparms.reconnect)
1325 cifs_relock_file(cfile);
1326
1327 reopen_error_exit:
1328 free_dentry_path(page);
1329 free_xid(xid);
1330 return rc;
1331 }
1332
smb2_deferred_work_close(struct work_struct * work)1333 void smb2_deferred_work_close(struct work_struct *work)
1334 {
1335 struct cifsFileInfo *cfile = container_of(work,
1336 struct cifsFileInfo, deferred.work);
1337
1338 spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1339 cifs_del_deferred_close(cfile);
1340 cfile->deferred_close_scheduled = false;
1341 spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1342 _cifsFileInfo_put(cfile, true, false);
1343 }
1344
1345 static bool
smb2_can_defer_close(struct inode * inode,struct cifs_deferred_close * dclose)1346 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1347 {
1348 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1349 struct cifsInodeInfo *cinode = CIFS_I(inode);
1350
1351 return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1352 (cinode->oplock == CIFS_CACHE_RHW_FLG ||
1353 cinode->oplock == CIFS_CACHE_RH_FLG) &&
1354 !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags));
1355
1356 }
1357
cifs_close(struct inode * inode,struct file * file)1358 int cifs_close(struct inode *inode, struct file *file)
1359 {
1360 struct cifsFileInfo *cfile;
1361 struct cifsInodeInfo *cinode = CIFS_I(inode);
1362 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1363 struct cifs_deferred_close *dclose;
1364
1365 cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1366
1367 if (file->private_data != NULL) {
1368 cfile = file->private_data;
1369 file->private_data = NULL;
1370 dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
1371 if ((cfile->status_file_deleted == false) &&
1372 (smb2_can_defer_close(inode, dclose))) {
1373 if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR, &cinode->netfs.flags)) {
1374 inode_set_mtime_to_ts(inode,
1375 inode_set_ctime_current(inode));
1376 }
1377 spin_lock(&cinode->deferred_lock);
1378 cifs_add_deferred_close(cfile, dclose);
1379 if (cfile->deferred_close_scheduled &&
1380 delayed_work_pending(&cfile->deferred)) {
1381 /*
1382 * If there is no pending work, mod_delayed_work queues new work.
1383 * So, Increase the ref count to avoid use-after-free.
1384 */
1385 if (!mod_delayed_work(deferredclose_wq,
1386 &cfile->deferred, cifs_sb->ctx->closetimeo))
1387 cifsFileInfo_get(cfile);
1388 } else {
1389 /* Deferred close for files */
1390 queue_delayed_work(deferredclose_wq,
1391 &cfile->deferred, cifs_sb->ctx->closetimeo);
1392 cfile->deferred_close_scheduled = true;
1393 spin_unlock(&cinode->deferred_lock);
1394 return 0;
1395 }
1396 spin_unlock(&cinode->deferred_lock);
1397 _cifsFileInfo_put(cfile, true, false);
1398 } else {
1399 _cifsFileInfo_put(cfile, true, false);
1400 kfree(dclose);
1401 }
1402 }
1403
1404 /* return code from the ->release op is always ignored */
1405 return 0;
1406 }
1407
1408 void
cifs_reopen_persistent_handles(struct cifs_tcon * tcon)1409 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1410 {
1411 struct cifsFileInfo *open_file, *tmp;
1412 LIST_HEAD(tmp_list);
1413
1414 if (!tcon->use_persistent || !tcon->need_reopen_files)
1415 return;
1416
1417 tcon->need_reopen_files = false;
1418
1419 cifs_dbg(FYI, "Reopen persistent handles\n");
1420
1421 /* list all files open on tree connection, reopen resilient handles */
1422 spin_lock(&tcon->open_file_lock);
1423 list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1424 if (!open_file->invalidHandle)
1425 continue;
1426 cifsFileInfo_get(open_file);
1427 list_add_tail(&open_file->rlist, &tmp_list);
1428 }
1429 spin_unlock(&tcon->open_file_lock);
1430
1431 list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1432 if (cifs_reopen_file(open_file, false /* do not flush */))
1433 tcon->need_reopen_files = true;
1434 list_del_init(&open_file->rlist);
1435 cifsFileInfo_put(open_file);
1436 }
1437 }
1438
cifs_closedir(struct inode * inode,struct file * file)1439 int cifs_closedir(struct inode *inode, struct file *file)
1440 {
1441 int rc = 0;
1442 unsigned int xid;
1443 struct cifsFileInfo *cfile = file->private_data;
1444 struct cifs_tcon *tcon;
1445 struct TCP_Server_Info *server;
1446 char *buf;
1447
1448 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1449
1450 if (cfile == NULL)
1451 return rc;
1452
1453 xid = get_xid();
1454 tcon = tlink_tcon(cfile->tlink);
1455 server = tcon->ses->server;
1456
1457 cifs_dbg(FYI, "Freeing private data in close dir\n");
1458 spin_lock(&cfile->file_info_lock);
1459 if (server->ops->dir_needs_close(cfile)) {
1460 cfile->invalidHandle = true;
1461 spin_unlock(&cfile->file_info_lock);
1462 if (server->ops->close_dir)
1463 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1464 else
1465 rc = -ENOSYS;
1466 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1467 /* not much we can do if it fails anyway, ignore rc */
1468 rc = 0;
1469 } else
1470 spin_unlock(&cfile->file_info_lock);
1471
1472 buf = cfile->srch_inf.ntwrk_buf_start;
1473 if (buf) {
1474 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1475 cfile->srch_inf.ntwrk_buf_start = NULL;
1476 if (cfile->srch_inf.smallBuf)
1477 cifs_small_buf_release(buf);
1478 else
1479 cifs_buf_release(buf);
1480 }
1481
1482 cifs_put_tlink(cfile->tlink);
1483 kfree(file->private_data);
1484 file->private_data = NULL;
1485 /* BB can we lock the filestruct while this is going on? */
1486 free_xid(xid);
1487 return rc;
1488 }
1489
1490 static struct cifsLockInfo *
cifs_lock_init(__u64 offset,__u64 length,__u8 type,__u16 flags)1491 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1492 {
1493 struct cifsLockInfo *lock =
1494 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
1495 if (!lock)
1496 return lock;
1497 lock->offset = offset;
1498 lock->length = length;
1499 lock->type = type;
1500 lock->pid = current->tgid;
1501 lock->flags = flags;
1502 INIT_LIST_HEAD(&lock->blist);
1503 init_waitqueue_head(&lock->block_q);
1504 return lock;
1505 }
1506
1507 void
cifs_del_lock_waiters(struct cifsLockInfo * lock)1508 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1509 {
1510 struct cifsLockInfo *li, *tmp;
1511 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1512 list_del_init(&li->blist);
1513 wake_up(&li->block_q);
1514 }
1515 }
1516
1517 #define CIFS_LOCK_OP 0
1518 #define CIFS_READ_OP 1
1519 #define CIFS_WRITE_OP 2
1520
1521 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1522 static bool
cifs_find_fid_lock_conflict(struct cifs_fid_locks * fdlocks,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsFileInfo * cfile,struct cifsLockInfo ** conf_lock,int rw_check)1523 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1524 __u64 length, __u8 type, __u16 flags,
1525 struct cifsFileInfo *cfile,
1526 struct cifsLockInfo **conf_lock, int rw_check)
1527 {
1528 struct cifsLockInfo *li;
1529 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1530 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1531
1532 list_for_each_entry(li, &fdlocks->locks, llist) {
1533 if (offset + length <= li->offset ||
1534 offset >= li->offset + li->length)
1535 continue;
1536 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1537 server->ops->compare_fids(cfile, cur_cfile)) {
1538 /* shared lock prevents write op through the same fid */
1539 if (!(li->type & server->vals->shared_lock_type) ||
1540 rw_check != CIFS_WRITE_OP)
1541 continue;
1542 }
1543 if ((type & server->vals->shared_lock_type) &&
1544 ((server->ops->compare_fids(cfile, cur_cfile) &&
1545 current->tgid == li->pid) || type == li->type))
1546 continue;
1547 if (rw_check == CIFS_LOCK_OP &&
1548 (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1549 server->ops->compare_fids(cfile, cur_cfile))
1550 continue;
1551 if (conf_lock)
1552 *conf_lock = li;
1553 return true;
1554 }
1555 return false;
1556 }
1557
1558 bool
cifs_find_lock_conflict(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsLockInfo ** conf_lock,int rw_check)1559 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1560 __u8 type, __u16 flags,
1561 struct cifsLockInfo **conf_lock, int rw_check)
1562 {
1563 bool rc = false;
1564 struct cifs_fid_locks *cur;
1565 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1566
1567 list_for_each_entry(cur, &cinode->llist, llist) {
1568 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1569 flags, cfile, conf_lock,
1570 rw_check);
1571 if (rc)
1572 break;
1573 }
1574
1575 return rc;
1576 }
1577
1578 /*
1579 * Check if there is another lock that prevents us to set the lock (mandatory
1580 * style). If such a lock exists, update the flock structure with its
1581 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1582 * or leave it the same if we can't. Returns 0 if we don't need to request to
1583 * the server or 1 otherwise.
1584 */
1585 static int
cifs_lock_test(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,struct file_lock * flock)1586 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1587 __u8 type, struct file_lock *flock)
1588 {
1589 int rc = 0;
1590 struct cifsLockInfo *conf_lock;
1591 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1592 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1593 bool exist;
1594
1595 down_read(&cinode->lock_sem);
1596
1597 exist = cifs_find_lock_conflict(cfile, offset, length, type,
1598 flock->c.flc_flags, &conf_lock,
1599 CIFS_LOCK_OP);
1600 if (exist) {
1601 flock->fl_start = conf_lock->offset;
1602 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1603 flock->c.flc_pid = conf_lock->pid;
1604 if (conf_lock->type & server->vals->shared_lock_type)
1605 flock->c.flc_type = F_RDLCK;
1606 else
1607 flock->c.flc_type = F_WRLCK;
1608 } else if (!cinode->can_cache_brlcks)
1609 rc = 1;
1610 else
1611 flock->c.flc_type = F_UNLCK;
1612
1613 up_read(&cinode->lock_sem);
1614 return rc;
1615 }
1616
1617 static void
cifs_lock_add(struct cifsFileInfo * cfile,struct cifsLockInfo * lock)1618 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1619 {
1620 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1621 cifs_down_write(&cinode->lock_sem);
1622 list_add_tail(&lock->llist, &cfile->llist->locks);
1623 up_write(&cinode->lock_sem);
1624 }
1625
1626 /*
1627 * Set the byte-range lock (mandatory style). Returns:
1628 * 1) 0, if we set the lock and don't need to request to the server;
1629 * 2) 1, if no locks prevent us but we need to request to the server;
1630 * 3) -EACCES, if there is a lock that prevents us and wait is false.
1631 */
1632 static int
cifs_lock_add_if(struct cifsFileInfo * cfile,struct cifsLockInfo * lock,bool wait)1633 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1634 bool wait)
1635 {
1636 struct cifsLockInfo *conf_lock;
1637 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1638 bool exist;
1639 int rc = 0;
1640
1641 try_again:
1642 exist = false;
1643 cifs_down_write(&cinode->lock_sem);
1644
1645 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1646 lock->type, lock->flags, &conf_lock,
1647 CIFS_LOCK_OP);
1648 if (!exist && cinode->can_cache_brlcks) {
1649 list_add_tail(&lock->llist, &cfile->llist->locks);
1650 up_write(&cinode->lock_sem);
1651 return rc;
1652 }
1653
1654 if (!exist)
1655 rc = 1;
1656 else if (!wait)
1657 rc = -EACCES;
1658 else {
1659 list_add_tail(&lock->blist, &conf_lock->blist);
1660 up_write(&cinode->lock_sem);
1661 rc = wait_event_interruptible(lock->block_q,
1662 (lock->blist.prev == &lock->blist) &&
1663 (lock->blist.next == &lock->blist));
1664 if (!rc)
1665 goto try_again;
1666 cifs_down_write(&cinode->lock_sem);
1667 list_del_init(&lock->blist);
1668 }
1669
1670 up_write(&cinode->lock_sem);
1671 return rc;
1672 }
1673
1674 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1675 /*
1676 * Check if there is another lock that prevents us to set the lock (posix
1677 * style). If such a lock exists, update the flock structure with its
1678 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1679 * or leave it the same if we can't. Returns 0 if we don't need to request to
1680 * the server or 1 otherwise.
1681 */
1682 static int
cifs_posix_lock_test(struct file * file,struct file_lock * flock)1683 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1684 {
1685 int rc = 0;
1686 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1687 unsigned char saved_type = flock->c.flc_type;
1688
1689 if ((flock->c.flc_flags & FL_POSIX) == 0)
1690 return 1;
1691
1692 down_read(&cinode->lock_sem);
1693 posix_test_lock(file, flock);
1694
1695 if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1696 flock->c.flc_type = saved_type;
1697 rc = 1;
1698 }
1699
1700 up_read(&cinode->lock_sem);
1701 return rc;
1702 }
1703
1704 /*
1705 * Set the byte-range lock (posix style). Returns:
1706 * 1) <0, if the error occurs while setting the lock;
1707 * 2) 0, if we set the lock and don't need to request to the server;
1708 * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1709 * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1710 */
1711 static int
cifs_posix_lock_set(struct file * file,struct file_lock * flock)1712 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1713 {
1714 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1715 int rc = FILE_LOCK_DEFERRED + 1;
1716
1717 if ((flock->c.flc_flags & FL_POSIX) == 0)
1718 return rc;
1719
1720 cifs_down_write(&cinode->lock_sem);
1721 if (!cinode->can_cache_brlcks) {
1722 up_write(&cinode->lock_sem);
1723 return rc;
1724 }
1725
1726 rc = posix_lock_file(file, flock, NULL);
1727 up_write(&cinode->lock_sem);
1728 return rc;
1729 }
1730
1731 int
cifs_push_mandatory_locks(struct cifsFileInfo * cfile)1732 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1733 {
1734 unsigned int xid;
1735 int rc = 0, stored_rc;
1736 struct cifsLockInfo *li, *tmp;
1737 struct cifs_tcon *tcon;
1738 unsigned int num, max_num, max_buf;
1739 LOCKING_ANDX_RANGE *buf, *cur;
1740 static const int types[] = {
1741 LOCKING_ANDX_LARGE_FILES,
1742 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1743 };
1744 int i;
1745
1746 xid = get_xid();
1747 tcon = tlink_tcon(cfile->tlink);
1748
1749 /*
1750 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1751 * and check it before using.
1752 */
1753 max_buf = tcon->ses->server->maxBuf;
1754 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1755 free_xid(xid);
1756 return -EINVAL;
1757 }
1758
1759 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1760 PAGE_SIZE);
1761 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1762 PAGE_SIZE);
1763 max_num = (max_buf - sizeof(struct smb_hdr)) /
1764 sizeof(LOCKING_ANDX_RANGE);
1765 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1766 if (!buf) {
1767 free_xid(xid);
1768 return -ENOMEM;
1769 }
1770
1771 for (i = 0; i < 2; i++) {
1772 cur = buf;
1773 num = 0;
1774 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1775 if (li->type != types[i])
1776 continue;
1777 cur->Pid = cpu_to_le16(li->pid);
1778 cur->LengthLow = cpu_to_le32((u32)li->length);
1779 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1780 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1781 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1782 if (++num == max_num) {
1783 stored_rc = cifs_lockv(xid, tcon,
1784 cfile->fid.netfid,
1785 (__u8)li->type, 0, num,
1786 buf);
1787 if (stored_rc)
1788 rc = stored_rc;
1789 cur = buf;
1790 num = 0;
1791 } else
1792 cur++;
1793 }
1794
1795 if (num) {
1796 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1797 (__u8)types[i], 0, num, buf);
1798 if (stored_rc)
1799 rc = stored_rc;
1800 }
1801 }
1802
1803 kfree(buf);
1804 free_xid(xid);
1805 return rc;
1806 }
1807
1808 static __u32
hash_lockowner(fl_owner_t owner)1809 hash_lockowner(fl_owner_t owner)
1810 {
1811 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1812 }
1813 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1814
1815 struct lock_to_push {
1816 struct list_head llist;
1817 __u64 offset;
1818 __u64 length;
1819 __u32 pid;
1820 __u16 netfid;
1821 __u8 type;
1822 };
1823
1824 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1825 static int
cifs_push_posix_locks(struct cifsFileInfo * cfile)1826 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1827 {
1828 struct inode *inode = d_inode(cfile->dentry);
1829 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1830 struct file_lock *flock;
1831 struct file_lock_context *flctx = locks_inode_context(inode);
1832 unsigned int count = 0, i;
1833 int rc = 0, xid, type;
1834 struct list_head locks_to_send, *el;
1835 struct lock_to_push *lck, *tmp;
1836 __u64 length;
1837
1838 xid = get_xid();
1839
1840 if (!flctx)
1841 goto out;
1842
1843 spin_lock(&flctx->flc_lock);
1844 list_for_each(el, &flctx->flc_posix) {
1845 count++;
1846 }
1847 spin_unlock(&flctx->flc_lock);
1848
1849 INIT_LIST_HEAD(&locks_to_send);
1850
1851 /*
1852 * Allocating count locks is enough because no FL_POSIX locks can be
1853 * added to the list while we are holding cinode->lock_sem that
1854 * protects locking operations of this inode.
1855 */
1856 for (i = 0; i < count; i++) {
1857 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1858 if (!lck) {
1859 rc = -ENOMEM;
1860 goto err_out;
1861 }
1862 list_add_tail(&lck->llist, &locks_to_send);
1863 }
1864
1865 el = locks_to_send.next;
1866 spin_lock(&flctx->flc_lock);
1867 for_each_file_lock(flock, &flctx->flc_posix) {
1868 unsigned char ftype = flock->c.flc_type;
1869
1870 if (el == &locks_to_send) {
1871 /*
1872 * The list ended. We don't have enough allocated
1873 * structures - something is really wrong.
1874 */
1875 cifs_dbg(VFS, "Can't push all brlocks!\n");
1876 break;
1877 }
1878 length = cifs_flock_len(flock);
1879 if (ftype == F_RDLCK || ftype == F_SHLCK)
1880 type = CIFS_RDLCK;
1881 else
1882 type = CIFS_WRLCK;
1883 lck = list_entry(el, struct lock_to_push, llist);
1884 lck->pid = hash_lockowner(flock->c.flc_owner);
1885 lck->netfid = cfile->fid.netfid;
1886 lck->length = length;
1887 lck->type = type;
1888 lck->offset = flock->fl_start;
1889 }
1890 spin_unlock(&flctx->flc_lock);
1891
1892 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1893 int stored_rc;
1894
1895 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1896 lck->offset, lck->length, NULL,
1897 lck->type, 0);
1898 if (stored_rc)
1899 rc = stored_rc;
1900 list_del(&lck->llist);
1901 kfree(lck);
1902 }
1903
1904 out:
1905 free_xid(xid);
1906 return rc;
1907 err_out:
1908 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1909 list_del(&lck->llist);
1910 kfree(lck);
1911 }
1912 goto out;
1913 }
1914 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1915
1916 static int
cifs_push_locks(struct cifsFileInfo * cfile)1917 cifs_push_locks(struct cifsFileInfo *cfile)
1918 {
1919 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1920 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1921 int rc = 0;
1922 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1923 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1924 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1925
1926 /* we are going to update can_cache_brlcks here - need a write access */
1927 cifs_down_write(&cinode->lock_sem);
1928 if (!cinode->can_cache_brlcks) {
1929 up_write(&cinode->lock_sem);
1930 return rc;
1931 }
1932
1933 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1934 if (cap_unix(tcon->ses) &&
1935 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1936 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1937 rc = cifs_push_posix_locks(cfile);
1938 else
1939 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1940 rc = tcon->ses->server->ops->push_mand_locks(cfile);
1941
1942 cinode->can_cache_brlcks = false;
1943 up_write(&cinode->lock_sem);
1944 return rc;
1945 }
1946
1947 static void
cifs_read_flock(struct file_lock * flock,__u32 * type,int * lock,int * unlock,bool * wait_flag,struct TCP_Server_Info * server)1948 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1949 bool *wait_flag, struct TCP_Server_Info *server)
1950 {
1951 if (flock->c.flc_flags & FL_POSIX)
1952 cifs_dbg(FYI, "Posix\n");
1953 if (flock->c.flc_flags & FL_FLOCK)
1954 cifs_dbg(FYI, "Flock\n");
1955 if (flock->c.flc_flags & FL_SLEEP) {
1956 cifs_dbg(FYI, "Blocking lock\n");
1957 *wait_flag = true;
1958 }
1959 if (flock->c.flc_flags & FL_ACCESS)
1960 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1961 if (flock->c.flc_flags & FL_LEASE)
1962 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1963 if (flock->c.flc_flags &
1964 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1965 FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
1966 cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
1967 flock->c.flc_flags);
1968
1969 *type = server->vals->large_lock_type;
1970 if (lock_is_write(flock)) {
1971 cifs_dbg(FYI, "F_WRLCK\n");
1972 *type |= server->vals->exclusive_lock_type;
1973 *lock = 1;
1974 } else if (lock_is_unlock(flock)) {
1975 cifs_dbg(FYI, "F_UNLCK\n");
1976 *type |= server->vals->unlock_lock_type;
1977 *unlock = 1;
1978 /* Check if unlock includes more than one lock range */
1979 } else if (lock_is_read(flock)) {
1980 cifs_dbg(FYI, "F_RDLCK\n");
1981 *type |= server->vals->shared_lock_type;
1982 *lock = 1;
1983 } else if (flock->c.flc_type == F_EXLCK) {
1984 cifs_dbg(FYI, "F_EXLCK\n");
1985 *type |= server->vals->exclusive_lock_type;
1986 *lock = 1;
1987 } else if (flock->c.flc_type == F_SHLCK) {
1988 cifs_dbg(FYI, "F_SHLCK\n");
1989 *type |= server->vals->shared_lock_type;
1990 *lock = 1;
1991 } else
1992 cifs_dbg(FYI, "Unknown type of lock\n");
1993 }
1994
1995 static int
cifs_getlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,unsigned int xid)1996 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1997 bool wait_flag, bool posix_lck, unsigned int xid)
1998 {
1999 int rc = 0;
2000 __u64 length = cifs_flock_len(flock);
2001 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2002 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2003 struct TCP_Server_Info *server = tcon->ses->server;
2004 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2005 __u16 netfid = cfile->fid.netfid;
2006
2007 if (posix_lck) {
2008 int posix_lock_type;
2009
2010 rc = cifs_posix_lock_test(file, flock);
2011 if (!rc)
2012 return rc;
2013
2014 if (type & server->vals->shared_lock_type)
2015 posix_lock_type = CIFS_RDLCK;
2016 else
2017 posix_lock_type = CIFS_WRLCK;
2018 rc = CIFSSMBPosixLock(xid, tcon, netfid,
2019 hash_lockowner(flock->c.flc_owner),
2020 flock->fl_start, length, flock,
2021 posix_lock_type, wait_flag);
2022 return rc;
2023 }
2024 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2025
2026 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
2027 if (!rc)
2028 return rc;
2029
2030 /* BB we could chain these into one lock request BB */
2031 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2032 1, 0, false);
2033 if (rc == 0) {
2034 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2035 type, 0, 1, false);
2036 flock->c.flc_type = F_UNLCK;
2037 if (rc != 0)
2038 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2039 rc);
2040 return 0;
2041 }
2042
2043 if (type & server->vals->shared_lock_type) {
2044 flock->c.flc_type = F_WRLCK;
2045 return 0;
2046 }
2047
2048 type &= ~server->vals->exclusive_lock_type;
2049
2050 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2051 type | server->vals->shared_lock_type,
2052 1, 0, false);
2053 if (rc == 0) {
2054 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2055 type | server->vals->shared_lock_type, 0, 1, false);
2056 flock->c.flc_type = F_RDLCK;
2057 if (rc != 0)
2058 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2059 rc);
2060 } else
2061 flock->c.flc_type = F_WRLCK;
2062
2063 return 0;
2064 }
2065
2066 void
cifs_move_llist(struct list_head * source,struct list_head * dest)2067 cifs_move_llist(struct list_head *source, struct list_head *dest)
2068 {
2069 struct list_head *li, *tmp;
2070 list_for_each_safe(li, tmp, source)
2071 list_move(li, dest);
2072 }
2073
2074 void
cifs_free_llist(struct list_head * llist)2075 cifs_free_llist(struct list_head *llist)
2076 {
2077 struct cifsLockInfo *li, *tmp;
2078 list_for_each_entry_safe(li, tmp, llist, llist) {
2079 cifs_del_lock_waiters(li);
2080 list_del(&li->llist);
2081 kfree(li);
2082 }
2083 }
2084
2085 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2086 int
cifs_unlock_range(struct cifsFileInfo * cfile,struct file_lock * flock,unsigned int xid)2087 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2088 unsigned int xid)
2089 {
2090 int rc = 0, stored_rc;
2091 static const int types[] = {
2092 LOCKING_ANDX_LARGE_FILES,
2093 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2094 };
2095 unsigned int i;
2096 unsigned int max_num, num, max_buf;
2097 LOCKING_ANDX_RANGE *buf, *cur;
2098 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2099 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2100 struct cifsLockInfo *li, *tmp;
2101 __u64 length = cifs_flock_len(flock);
2102 LIST_HEAD(tmp_llist);
2103
2104 /*
2105 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2106 * and check it before using.
2107 */
2108 max_buf = tcon->ses->server->maxBuf;
2109 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2110 return -EINVAL;
2111
2112 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2113 PAGE_SIZE);
2114 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2115 PAGE_SIZE);
2116 max_num = (max_buf - sizeof(struct smb_hdr)) /
2117 sizeof(LOCKING_ANDX_RANGE);
2118 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
2119 if (!buf)
2120 return -ENOMEM;
2121
2122 cifs_down_write(&cinode->lock_sem);
2123 for (i = 0; i < 2; i++) {
2124 cur = buf;
2125 num = 0;
2126 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2127 if (flock->fl_start > li->offset ||
2128 (flock->fl_start + length) <
2129 (li->offset + li->length))
2130 continue;
2131 if (current->tgid != li->pid)
2132 continue;
2133 if (types[i] != li->type)
2134 continue;
2135 if (cinode->can_cache_brlcks) {
2136 /*
2137 * We can cache brlock requests - simply remove
2138 * a lock from the file's list.
2139 */
2140 list_del(&li->llist);
2141 cifs_del_lock_waiters(li);
2142 kfree(li);
2143 continue;
2144 }
2145 cur->Pid = cpu_to_le16(li->pid);
2146 cur->LengthLow = cpu_to_le32((u32)li->length);
2147 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2148 cur->OffsetLow = cpu_to_le32((u32)li->offset);
2149 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2150 /*
2151 * We need to save a lock here to let us add it again to
2152 * the file's list if the unlock range request fails on
2153 * the server.
2154 */
2155 list_move(&li->llist, &tmp_llist);
2156 if (++num == max_num) {
2157 stored_rc = cifs_lockv(xid, tcon,
2158 cfile->fid.netfid,
2159 li->type, num, 0, buf);
2160 if (stored_rc) {
2161 /*
2162 * We failed on the unlock range
2163 * request - add all locks from the tmp
2164 * list to the head of the file's list.
2165 */
2166 cifs_move_llist(&tmp_llist,
2167 &cfile->llist->locks);
2168 rc = stored_rc;
2169 } else
2170 /*
2171 * The unlock range request succeed -
2172 * free the tmp list.
2173 */
2174 cifs_free_llist(&tmp_llist);
2175 cur = buf;
2176 num = 0;
2177 } else
2178 cur++;
2179 }
2180 if (num) {
2181 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2182 types[i], num, 0, buf);
2183 if (stored_rc) {
2184 cifs_move_llist(&tmp_llist,
2185 &cfile->llist->locks);
2186 rc = stored_rc;
2187 } else
2188 cifs_free_llist(&tmp_llist);
2189 }
2190 }
2191
2192 up_write(&cinode->lock_sem);
2193 kfree(buf);
2194 return rc;
2195 }
2196 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2197
2198 static int
cifs_setlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,int lock,int unlock,unsigned int xid)2199 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2200 bool wait_flag, bool posix_lck, int lock, int unlock,
2201 unsigned int xid)
2202 {
2203 int rc = 0;
2204 __u64 length = cifs_flock_len(flock);
2205 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2206 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2207 struct TCP_Server_Info *server = tcon->ses->server;
2208 struct inode *inode = d_inode(cfile->dentry);
2209
2210 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2211 if (posix_lck) {
2212 int posix_lock_type;
2213
2214 rc = cifs_posix_lock_set(file, flock);
2215 if (rc <= FILE_LOCK_DEFERRED)
2216 return rc;
2217
2218 if (type & server->vals->shared_lock_type)
2219 posix_lock_type = CIFS_RDLCK;
2220 else
2221 posix_lock_type = CIFS_WRLCK;
2222
2223 if (unlock == 1)
2224 posix_lock_type = CIFS_UNLCK;
2225
2226 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2227 hash_lockowner(flock->c.flc_owner),
2228 flock->fl_start, length,
2229 NULL, posix_lock_type, wait_flag);
2230 goto out;
2231 }
2232 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2233 if (lock) {
2234 struct cifsLockInfo *lock;
2235
2236 lock = cifs_lock_init(flock->fl_start, length, type,
2237 flock->c.flc_flags);
2238 if (!lock)
2239 return -ENOMEM;
2240
2241 rc = cifs_lock_add_if(cfile, lock, wait_flag);
2242 if (rc < 0) {
2243 kfree(lock);
2244 return rc;
2245 }
2246 if (!rc)
2247 goto out;
2248
2249 /*
2250 * Windows 7 server can delay breaking lease from read to None
2251 * if we set a byte-range lock on a file - break it explicitly
2252 * before sending the lock to the server to be sure the next
2253 * read won't conflict with non-overlapted locks due to
2254 * pagereading.
2255 */
2256 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2257 CIFS_CACHE_READ(CIFS_I(inode))) {
2258 cifs_zap_mapping(inode);
2259 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2260 inode);
2261 CIFS_I(inode)->oplock = 0;
2262 }
2263
2264 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2265 type, 1, 0, wait_flag);
2266 if (rc) {
2267 kfree(lock);
2268 return rc;
2269 }
2270
2271 cifs_lock_add(cfile, lock);
2272 } else if (unlock)
2273 rc = server->ops->mand_unlock_range(cfile, flock, xid);
2274
2275 out:
2276 if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2277 /*
2278 * If this is a request to remove all locks because we
2279 * are closing the file, it doesn't matter if the
2280 * unlocking failed as both cifs.ko and the SMB server
2281 * remove the lock on file close
2282 */
2283 if (rc) {
2284 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2285 if (!(flock->c.flc_flags & FL_CLOSE))
2286 return rc;
2287 }
2288 rc = locks_lock_file_wait(file, flock);
2289 }
2290 return rc;
2291 }
2292
cifs_flock(struct file * file,int cmd,struct file_lock * fl)2293 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2294 {
2295 int rc, xid;
2296 int lock = 0, unlock = 0;
2297 bool wait_flag = false;
2298 bool posix_lck = false;
2299 struct cifs_sb_info *cifs_sb;
2300 struct cifs_tcon *tcon;
2301 struct cifsFileInfo *cfile;
2302 __u32 type;
2303
2304 xid = get_xid();
2305
2306 if (!(fl->c.flc_flags & FL_FLOCK)) {
2307 rc = -ENOLCK;
2308 free_xid(xid);
2309 return rc;
2310 }
2311
2312 cfile = (struct cifsFileInfo *)file->private_data;
2313 tcon = tlink_tcon(cfile->tlink);
2314
2315 cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2316 tcon->ses->server);
2317 cifs_sb = CIFS_FILE_SB(file);
2318
2319 if (cap_unix(tcon->ses) &&
2320 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2321 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2322 posix_lck = true;
2323
2324 if (!lock && !unlock) {
2325 /*
2326 * if no lock or unlock then nothing to do since we do not
2327 * know what it is
2328 */
2329 rc = -EOPNOTSUPP;
2330 free_xid(xid);
2331 return rc;
2332 }
2333
2334 rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2335 xid);
2336 free_xid(xid);
2337 return rc;
2338
2339
2340 }
2341
cifs_lock(struct file * file,int cmd,struct file_lock * flock)2342 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2343 {
2344 int rc, xid;
2345 int lock = 0, unlock = 0;
2346 bool wait_flag = false;
2347 bool posix_lck = false;
2348 struct cifs_sb_info *cifs_sb;
2349 struct cifs_tcon *tcon;
2350 struct cifsFileInfo *cfile;
2351 __u32 type;
2352
2353 rc = -EACCES;
2354 xid = get_xid();
2355
2356 cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2357 flock->c.flc_flags, flock->c.flc_type,
2358 (long long)flock->fl_start,
2359 (long long)flock->fl_end);
2360
2361 cfile = (struct cifsFileInfo *)file->private_data;
2362 tcon = tlink_tcon(cfile->tlink);
2363
2364 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2365 tcon->ses->server);
2366 cifs_sb = CIFS_FILE_SB(file);
2367 set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2368
2369 if (cap_unix(tcon->ses) &&
2370 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2371 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2372 posix_lck = true;
2373 /*
2374 * BB add code here to normalize offset and length to account for
2375 * negative length which we can not accept over the wire.
2376 */
2377 if (IS_GETLK(cmd)) {
2378 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2379 free_xid(xid);
2380 return rc;
2381 }
2382
2383 if (!lock && !unlock) {
2384 /*
2385 * if no lock or unlock then nothing to do since we do not
2386 * know what it is
2387 */
2388 free_xid(xid);
2389 return -EOPNOTSUPP;
2390 }
2391
2392 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2393 xid);
2394 free_xid(xid);
2395 return rc;
2396 }
2397
cifs_write_subrequest_terminated(struct cifs_io_subrequest * wdata,ssize_t result,bool was_async)2398 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result,
2399 bool was_async)
2400 {
2401 struct netfs_io_request *wreq = wdata->rreq;
2402 struct netfs_inode *ictx = netfs_inode(wreq->inode);
2403 loff_t wrend;
2404
2405 if (result > 0) {
2406 wrend = wdata->subreq.start + wdata->subreq.transferred + result;
2407
2408 if (wrend > ictx->zero_point &&
2409 (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE ||
2410 wdata->rreq->origin == NETFS_DIO_WRITE))
2411 ictx->zero_point = wrend;
2412 if (wrend > ictx->remote_i_size)
2413 netfs_resize_file(ictx, wrend, true);
2414 }
2415
2416 netfs_write_subrequest_terminated(&wdata->subreq, result, was_async);
2417 }
2418
find_readable_file(struct cifsInodeInfo * cifs_inode,bool fsuid_only)2419 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
2420 bool fsuid_only)
2421 {
2422 struct cifsFileInfo *open_file = NULL;
2423 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2424
2425 /* only filter by fsuid on multiuser mounts */
2426 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2427 fsuid_only = false;
2428
2429 spin_lock(&cifs_inode->open_file_lock);
2430 /* we could simply get the first_list_entry since write-only entries
2431 are always at the end of the list but since the first entry might
2432 have a close pending, we go through the whole list */
2433 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2434 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2435 continue;
2436 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2437 if ((!open_file->invalidHandle)) {
2438 /* found a good file */
2439 /* lock it so it will not be closed on us */
2440 cifsFileInfo_get(open_file);
2441 spin_unlock(&cifs_inode->open_file_lock);
2442 return open_file;
2443 } /* else might as well continue, and look for
2444 another, or simply have the caller reopen it
2445 again rather than trying to fix this handle */
2446 } else /* write only file */
2447 break; /* write only files are last so must be done */
2448 }
2449 spin_unlock(&cifs_inode->open_file_lock);
2450 return NULL;
2451 }
2452
2453 /* Return -EBADF if no handle is found and general rc otherwise */
2454 int
cifs_get_writable_file(struct cifsInodeInfo * cifs_inode,int flags,struct cifsFileInfo ** ret_file)2455 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
2456 struct cifsFileInfo **ret_file)
2457 {
2458 struct cifsFileInfo *open_file, *inv_file = NULL;
2459 struct cifs_sb_info *cifs_sb;
2460 bool any_available = false;
2461 int rc = -EBADF;
2462 unsigned int refind = 0;
2463 bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2464 bool with_delete = flags & FIND_WR_WITH_DELETE;
2465 *ret_file = NULL;
2466
2467 /*
2468 * Having a null inode here (because mapping->host was set to zero by
2469 * the VFS or MM) should not happen but we had reports of on oops (due
2470 * to it being zero) during stress testcases so we need to check for it
2471 */
2472
2473 if (cifs_inode == NULL) {
2474 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2475 dump_stack();
2476 return rc;
2477 }
2478
2479 cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2480
2481 /* only filter by fsuid on multiuser mounts */
2482 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2483 fsuid_only = false;
2484
2485 spin_lock(&cifs_inode->open_file_lock);
2486 refind_writable:
2487 if (refind > MAX_REOPEN_ATT) {
2488 spin_unlock(&cifs_inode->open_file_lock);
2489 return rc;
2490 }
2491 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2492 if (!any_available && open_file->pid != current->tgid)
2493 continue;
2494 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2495 continue;
2496 if (with_delete && !(open_file->fid.access & DELETE))
2497 continue;
2498 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2499 if (!open_file->invalidHandle) {
2500 /* found a good writable file */
2501 cifsFileInfo_get(open_file);
2502 spin_unlock(&cifs_inode->open_file_lock);
2503 *ret_file = open_file;
2504 return 0;
2505 } else {
2506 if (!inv_file)
2507 inv_file = open_file;
2508 }
2509 }
2510 }
2511 /* couldn't find usable FH with same pid, try any available */
2512 if (!any_available) {
2513 any_available = true;
2514 goto refind_writable;
2515 }
2516
2517 if (inv_file) {
2518 any_available = false;
2519 cifsFileInfo_get(inv_file);
2520 }
2521
2522 spin_unlock(&cifs_inode->open_file_lock);
2523
2524 if (inv_file) {
2525 rc = cifs_reopen_file(inv_file, false);
2526 if (!rc) {
2527 *ret_file = inv_file;
2528 return 0;
2529 }
2530
2531 spin_lock(&cifs_inode->open_file_lock);
2532 list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2533 spin_unlock(&cifs_inode->open_file_lock);
2534 cifsFileInfo_put(inv_file);
2535 ++refind;
2536 inv_file = NULL;
2537 spin_lock(&cifs_inode->open_file_lock);
2538 goto refind_writable;
2539 }
2540
2541 return rc;
2542 }
2543
2544 struct cifsFileInfo *
find_writable_file(struct cifsInodeInfo * cifs_inode,int flags)2545 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2546 {
2547 struct cifsFileInfo *cfile;
2548 int rc;
2549
2550 rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2551 if (rc)
2552 cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2553
2554 return cfile;
2555 }
2556
2557 int
cifs_get_writable_path(struct cifs_tcon * tcon,const char * name,int flags,struct cifsFileInfo ** ret_file)2558 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2559 int flags,
2560 struct cifsFileInfo **ret_file)
2561 {
2562 struct cifsFileInfo *cfile;
2563 void *page = alloc_dentry_path();
2564
2565 *ret_file = NULL;
2566
2567 spin_lock(&tcon->open_file_lock);
2568 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2569 struct cifsInodeInfo *cinode;
2570 const char *full_path = build_path_from_dentry(cfile->dentry, page);
2571 if (IS_ERR(full_path)) {
2572 spin_unlock(&tcon->open_file_lock);
2573 free_dentry_path(page);
2574 return PTR_ERR(full_path);
2575 }
2576 if (strcmp(full_path, name))
2577 continue;
2578
2579 cinode = CIFS_I(d_inode(cfile->dentry));
2580 spin_unlock(&tcon->open_file_lock);
2581 free_dentry_path(page);
2582 return cifs_get_writable_file(cinode, flags, ret_file);
2583 }
2584
2585 spin_unlock(&tcon->open_file_lock);
2586 free_dentry_path(page);
2587 return -ENOENT;
2588 }
2589
2590 int
cifs_get_readable_path(struct cifs_tcon * tcon,const char * name,struct cifsFileInfo ** ret_file)2591 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2592 struct cifsFileInfo **ret_file)
2593 {
2594 struct cifsFileInfo *cfile;
2595 void *page = alloc_dentry_path();
2596
2597 *ret_file = NULL;
2598
2599 spin_lock(&tcon->open_file_lock);
2600 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2601 struct cifsInodeInfo *cinode;
2602 const char *full_path = build_path_from_dentry(cfile->dentry, page);
2603 if (IS_ERR(full_path)) {
2604 spin_unlock(&tcon->open_file_lock);
2605 free_dentry_path(page);
2606 return PTR_ERR(full_path);
2607 }
2608 if (strcmp(full_path, name))
2609 continue;
2610
2611 cinode = CIFS_I(d_inode(cfile->dentry));
2612 spin_unlock(&tcon->open_file_lock);
2613 free_dentry_path(page);
2614 *ret_file = find_readable_file(cinode, 0);
2615 return *ret_file ? 0 : -ENOENT;
2616 }
2617
2618 spin_unlock(&tcon->open_file_lock);
2619 free_dentry_path(page);
2620 return -ENOENT;
2621 }
2622
2623 /*
2624 * Flush data on a strict file.
2625 */
cifs_strict_fsync(struct file * file,loff_t start,loff_t end,int datasync)2626 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2627 int datasync)
2628 {
2629 unsigned int xid;
2630 int rc = 0;
2631 struct cifs_tcon *tcon;
2632 struct TCP_Server_Info *server;
2633 struct cifsFileInfo *smbfile = file->private_data;
2634 struct inode *inode = file_inode(file);
2635 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2636
2637 rc = file_write_and_wait_range(file, start, end);
2638 if (rc) {
2639 trace_cifs_fsync_err(inode->i_ino, rc);
2640 return rc;
2641 }
2642
2643 xid = get_xid();
2644
2645 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2646 file, datasync);
2647
2648 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2649 rc = cifs_zap_mapping(inode);
2650 if (rc) {
2651 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2652 rc = 0; /* don't care about it in fsync */
2653 }
2654 }
2655
2656 tcon = tlink_tcon(smbfile->tlink);
2657 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2658 server = tcon->ses->server;
2659 if (server->ops->flush == NULL) {
2660 rc = -ENOSYS;
2661 goto strict_fsync_exit;
2662 }
2663
2664 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2665 smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2666 if (smbfile) {
2667 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2668 cifsFileInfo_put(smbfile);
2669 } else
2670 cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2671 } else
2672 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2673 }
2674
2675 strict_fsync_exit:
2676 free_xid(xid);
2677 return rc;
2678 }
2679
2680 /*
2681 * Flush data on a non-strict data.
2682 */
cifs_fsync(struct file * file,loff_t start,loff_t end,int datasync)2683 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2684 {
2685 unsigned int xid;
2686 int rc = 0;
2687 struct cifs_tcon *tcon;
2688 struct TCP_Server_Info *server;
2689 struct cifsFileInfo *smbfile = file->private_data;
2690 struct inode *inode = file_inode(file);
2691 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2692
2693 rc = file_write_and_wait_range(file, start, end);
2694 if (rc) {
2695 trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2696 return rc;
2697 }
2698
2699 xid = get_xid();
2700
2701 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2702 file, datasync);
2703
2704 tcon = tlink_tcon(smbfile->tlink);
2705 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2706 server = tcon->ses->server;
2707 if (server->ops->flush == NULL) {
2708 rc = -ENOSYS;
2709 goto fsync_exit;
2710 }
2711
2712 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2713 smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2714 if (smbfile) {
2715 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2716 cifsFileInfo_put(smbfile);
2717 } else
2718 cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2719 } else
2720 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2721 }
2722
2723 fsync_exit:
2724 free_xid(xid);
2725 return rc;
2726 }
2727
2728 /*
2729 * As file closes, flush all cached write data for this inode checking
2730 * for write behind errors.
2731 */
cifs_flush(struct file * file,fl_owner_t id)2732 int cifs_flush(struct file *file, fl_owner_t id)
2733 {
2734 struct inode *inode = file_inode(file);
2735 int rc = 0;
2736
2737 if (file->f_mode & FMODE_WRITE)
2738 rc = filemap_write_and_wait(inode->i_mapping);
2739
2740 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2741 if (rc) {
2742 /* get more nuanced writeback errors */
2743 rc = filemap_check_wb_err(file->f_mapping, 0);
2744 trace_cifs_flush_err(inode->i_ino, rc);
2745 }
2746 return rc;
2747 }
2748
2749 static ssize_t
cifs_writev(struct kiocb * iocb,struct iov_iter * from)2750 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2751 {
2752 struct file *file = iocb->ki_filp;
2753 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2754 struct inode *inode = file->f_mapping->host;
2755 struct cifsInodeInfo *cinode = CIFS_I(inode);
2756 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2757 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2758 ssize_t rc;
2759
2760 rc = netfs_start_io_write(inode);
2761 if (rc < 0)
2762 return rc;
2763
2764 /*
2765 * We need to hold the sem to be sure nobody modifies lock list
2766 * with a brlock that prevents writing.
2767 */
2768 down_read(&cinode->lock_sem);
2769
2770 rc = generic_write_checks(iocb, from);
2771 if (rc <= 0)
2772 goto out;
2773
2774 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) &&
2775 (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2776 server->vals->exclusive_lock_type, 0,
2777 NULL, CIFS_WRITE_OP))) {
2778 rc = -EACCES;
2779 goto out;
2780 }
2781
2782 rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2783
2784 out:
2785 up_read(&cinode->lock_sem);
2786 netfs_end_io_write(inode);
2787 if (rc > 0)
2788 rc = generic_write_sync(iocb, rc);
2789 return rc;
2790 }
2791
2792 ssize_t
cifs_strict_writev(struct kiocb * iocb,struct iov_iter * from)2793 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2794 {
2795 struct inode *inode = file_inode(iocb->ki_filp);
2796 struct cifsInodeInfo *cinode = CIFS_I(inode);
2797 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2798 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2799 iocb->ki_filp->private_data;
2800 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2801 ssize_t written;
2802
2803 written = cifs_get_writer(cinode);
2804 if (written)
2805 return written;
2806
2807 if (CIFS_CACHE_WRITE(cinode)) {
2808 if (cap_unix(tcon->ses) &&
2809 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2810 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2811 written = netfs_file_write_iter(iocb, from);
2812 goto out;
2813 }
2814 written = cifs_writev(iocb, from);
2815 goto out;
2816 }
2817 /*
2818 * For non-oplocked files in strict cache mode we need to write the data
2819 * to the server exactly from the pos to pos+len-1 rather than flush all
2820 * affected pages because it may cause a error with mandatory locks on
2821 * these pages but not on the region from pos to ppos+len-1.
2822 */
2823 written = netfs_file_write_iter(iocb, from);
2824 if (CIFS_CACHE_READ(cinode)) {
2825 /*
2826 * We have read level caching and we have just sent a write
2827 * request to the server thus making data in the cache stale.
2828 * Zap the cache and set oplock/lease level to NONE to avoid
2829 * reading stale data from the cache. All subsequent read
2830 * operations will read new data from the server.
2831 */
2832 cifs_zap_mapping(inode);
2833 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2834 inode);
2835 cinode->oplock = 0;
2836 }
2837 out:
2838 cifs_put_writer(cinode);
2839 return written;
2840 }
2841
cifs_loose_read_iter(struct kiocb * iocb,struct iov_iter * iter)2842 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2843 {
2844 ssize_t rc;
2845 struct inode *inode = file_inode(iocb->ki_filp);
2846
2847 if (iocb->ki_flags & IOCB_DIRECT)
2848 return netfs_unbuffered_read_iter(iocb, iter);
2849
2850 rc = cifs_revalidate_mapping(inode);
2851 if (rc)
2852 return rc;
2853
2854 return netfs_file_read_iter(iocb, iter);
2855 }
2856
cifs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)2857 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2858 {
2859 struct inode *inode = file_inode(iocb->ki_filp);
2860 struct cifsInodeInfo *cinode = CIFS_I(inode);
2861 ssize_t written;
2862 int rc;
2863
2864 if (iocb->ki_filp->f_flags & O_DIRECT) {
2865 written = netfs_unbuffered_write_iter(iocb, from);
2866 if (written > 0 && CIFS_CACHE_READ(cinode)) {
2867 cifs_zap_mapping(inode);
2868 cifs_dbg(FYI,
2869 "Set no oplock for inode=%p after a write operation\n",
2870 inode);
2871 cinode->oplock = 0;
2872 }
2873 return written;
2874 }
2875
2876 written = cifs_get_writer(cinode);
2877 if (written)
2878 return written;
2879
2880 written = netfs_file_write_iter(iocb, from);
2881
2882 if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
2883 rc = filemap_fdatawrite(inode->i_mapping);
2884 if (rc)
2885 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
2886 rc, inode);
2887 }
2888
2889 cifs_put_writer(cinode);
2890 return written;
2891 }
2892
2893 ssize_t
cifs_strict_readv(struct kiocb * iocb,struct iov_iter * to)2894 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
2895 {
2896 struct inode *inode = file_inode(iocb->ki_filp);
2897 struct cifsInodeInfo *cinode = CIFS_I(inode);
2898 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2899 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2900 iocb->ki_filp->private_data;
2901 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2902 int rc = -EACCES;
2903
2904 /*
2905 * In strict cache mode we need to read from the server all the time
2906 * if we don't have level II oplock because the server can delay mtime
2907 * change - so we can't make a decision about inode invalidating.
2908 * And we can also fail with pagereading if there are mandatory locks
2909 * on pages affected by this read but not on the region from pos to
2910 * pos+len-1.
2911 */
2912 if (!CIFS_CACHE_READ(cinode))
2913 return netfs_unbuffered_read_iter(iocb, to);
2914
2915 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) {
2916 if (iocb->ki_flags & IOCB_DIRECT)
2917 return netfs_unbuffered_read_iter(iocb, to);
2918 return netfs_buffered_read_iter(iocb, to);
2919 }
2920
2921 /*
2922 * We need to hold the sem to be sure nobody modifies lock list
2923 * with a brlock that prevents reading.
2924 */
2925 if (iocb->ki_flags & IOCB_DIRECT) {
2926 rc = netfs_start_io_direct(inode);
2927 if (rc < 0)
2928 goto out;
2929 rc = -EACCES;
2930 down_read(&cinode->lock_sem);
2931 if (!cifs_find_lock_conflict(
2932 cfile, iocb->ki_pos, iov_iter_count(to),
2933 tcon->ses->server->vals->shared_lock_type,
2934 0, NULL, CIFS_READ_OP))
2935 rc = netfs_unbuffered_read_iter_locked(iocb, to);
2936 up_read(&cinode->lock_sem);
2937 netfs_end_io_direct(inode);
2938 } else {
2939 rc = netfs_start_io_read(inode);
2940 if (rc < 0)
2941 goto out;
2942 rc = -EACCES;
2943 down_read(&cinode->lock_sem);
2944 if (!cifs_find_lock_conflict(
2945 cfile, iocb->ki_pos, iov_iter_count(to),
2946 tcon->ses->server->vals->shared_lock_type,
2947 0, NULL, CIFS_READ_OP))
2948 rc = filemap_read(iocb, to, 0);
2949 up_read(&cinode->lock_sem);
2950 netfs_end_io_read(inode);
2951 }
2952 out:
2953 return rc;
2954 }
2955
cifs_page_mkwrite(struct vm_fault * vmf)2956 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
2957 {
2958 return netfs_page_mkwrite(vmf, NULL);
2959 }
2960
2961 static const struct vm_operations_struct cifs_file_vm_ops = {
2962 .fault = filemap_fault,
2963 .map_pages = filemap_map_pages,
2964 .page_mkwrite = cifs_page_mkwrite,
2965 };
2966
cifs_file_strict_mmap(struct file * file,struct vm_area_struct * vma)2967 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2968 {
2969 int xid, rc = 0;
2970 struct inode *inode = file_inode(file);
2971
2972 xid = get_xid();
2973
2974 if (!CIFS_CACHE_READ(CIFS_I(inode)))
2975 rc = cifs_zap_mapping(inode);
2976 if (!rc)
2977 rc = generic_file_mmap(file, vma);
2978 if (!rc)
2979 vma->vm_ops = &cifs_file_vm_ops;
2980
2981 free_xid(xid);
2982 return rc;
2983 }
2984
cifs_file_mmap(struct file * file,struct vm_area_struct * vma)2985 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2986 {
2987 int rc, xid;
2988
2989 xid = get_xid();
2990
2991 rc = cifs_revalidate_file(file);
2992 if (rc)
2993 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
2994 rc);
2995 if (!rc)
2996 rc = generic_file_mmap(file, vma);
2997 if (!rc)
2998 vma->vm_ops = &cifs_file_vm_ops;
2999
3000 free_xid(xid);
3001 return rc;
3002 }
3003
is_inode_writable(struct cifsInodeInfo * cifs_inode)3004 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3005 {
3006 struct cifsFileInfo *open_file;
3007
3008 spin_lock(&cifs_inode->open_file_lock);
3009 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3010 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3011 spin_unlock(&cifs_inode->open_file_lock);
3012 return 1;
3013 }
3014 }
3015 spin_unlock(&cifs_inode->open_file_lock);
3016 return 0;
3017 }
3018
3019 /* We do not want to update the file size from server for inodes
3020 open for write - to avoid races with writepage extending
3021 the file - in the future we could consider allowing
3022 refreshing the inode only on increases in the file size
3023 but this is tricky to do without racing with writebehind
3024 page caching in the current Linux kernel design */
is_size_safe_to_change(struct cifsInodeInfo * cifsInode,__u64 end_of_file,bool from_readdir)3025 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3026 bool from_readdir)
3027 {
3028 if (!cifsInode)
3029 return true;
3030
3031 if (is_inode_writable(cifsInode) ||
3032 ((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
3033 /* This inode is open for write at least once */
3034 struct cifs_sb_info *cifs_sb;
3035
3036 cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
3037 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
3038 /* since no page cache to corrupt on directio
3039 we can change size safely */
3040 return true;
3041 }
3042
3043 if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
3044 return true;
3045
3046 return false;
3047 } else
3048 return true;
3049 }
3050
cifs_oplock_break(struct work_struct * work)3051 void cifs_oplock_break(struct work_struct *work)
3052 {
3053 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3054 oplock_break);
3055 struct inode *inode = d_inode(cfile->dentry);
3056 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3057 struct cifsInodeInfo *cinode = CIFS_I(inode);
3058 struct cifs_tcon *tcon;
3059 struct TCP_Server_Info *server;
3060 struct tcon_link *tlink;
3061 int rc = 0;
3062 bool purge_cache = false, oplock_break_cancelled;
3063 __u64 persistent_fid, volatile_fid;
3064 __u16 net_fid;
3065
3066 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3067 TASK_UNINTERRUPTIBLE);
3068
3069 tlink = cifs_sb_tlink(cifs_sb);
3070 if (IS_ERR(tlink))
3071 goto out;
3072 tcon = tlink_tcon(tlink);
3073 server = tcon->ses->server;
3074
3075 server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3076 cfile->oplock_epoch, &purge_cache);
3077
3078 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
3079 cifs_has_mand_locks(cinode)) {
3080 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3081 inode);
3082 cinode->oplock = 0;
3083 }
3084
3085 if (inode && S_ISREG(inode->i_mode)) {
3086 if (CIFS_CACHE_READ(cinode))
3087 break_lease(inode, O_RDONLY);
3088 else
3089 break_lease(inode, O_WRONLY);
3090 rc = filemap_fdatawrite(inode->i_mapping);
3091 if (!CIFS_CACHE_READ(cinode) || purge_cache) {
3092 rc = filemap_fdatawait(inode->i_mapping);
3093 mapping_set_error(inode->i_mapping, rc);
3094 cifs_zap_mapping(inode);
3095 }
3096 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3097 if (CIFS_CACHE_WRITE(cinode))
3098 goto oplock_break_ack;
3099 }
3100
3101 rc = cifs_push_locks(cfile);
3102 if (rc)
3103 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3104
3105 oplock_break_ack:
3106 /*
3107 * When oplock break is received and there are no active
3108 * file handles but cached, then schedule deferred close immediately.
3109 * So, new open will not use cached handle.
3110 */
3111
3112 if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
3113 cifs_close_deferred_file(cinode);
3114
3115 persistent_fid = cfile->fid.persistent_fid;
3116 volatile_fid = cfile->fid.volatile_fid;
3117 net_fid = cfile->fid.netfid;
3118 oplock_break_cancelled = cfile->oplock_break_cancelled;
3119
3120 _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3121 /*
3122 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3123 * an acknowledgment to be sent when the file has already been closed.
3124 */
3125 spin_lock(&cinode->open_file_lock);
3126 /* check list empty since can race with kill_sb calling tree disconnect */
3127 if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3128 spin_unlock(&cinode->open_file_lock);
3129 rc = server->ops->oplock_response(tcon, persistent_fid,
3130 volatile_fid, net_fid, cinode);
3131 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3132 } else
3133 spin_unlock(&cinode->open_file_lock);
3134
3135 cifs_put_tlink(tlink);
3136 out:
3137 cifs_done_oplock_break(cinode);
3138 }
3139
cifs_swap_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)3140 static int cifs_swap_activate(struct swap_info_struct *sis,
3141 struct file *swap_file, sector_t *span)
3142 {
3143 struct cifsFileInfo *cfile = swap_file->private_data;
3144 struct inode *inode = swap_file->f_mapping->host;
3145 unsigned long blocks;
3146 long long isize;
3147
3148 cifs_dbg(FYI, "swap activate\n");
3149
3150 if (!swap_file->f_mapping->a_ops->swap_rw)
3151 /* Cannot support swap */
3152 return -EINVAL;
3153
3154 spin_lock(&inode->i_lock);
3155 blocks = inode->i_blocks;
3156 isize = inode->i_size;
3157 spin_unlock(&inode->i_lock);
3158 if (blocks*512 < isize) {
3159 pr_warn("swap activate: swapfile has holes\n");
3160 return -EINVAL;
3161 }
3162 *span = sis->pages;
3163
3164 pr_warn_once("Swap support over SMB3 is experimental\n");
3165
3166 /*
3167 * TODO: consider adding ACL (or documenting how) to prevent other
3168 * users (on this or other systems) from reading it
3169 */
3170
3171
3172 /* TODO: add sk_set_memalloc(inet) or similar */
3173
3174 if (cfile)
3175 cfile->swapfile = true;
3176 /*
3177 * TODO: Since file already open, we can't open with DENY_ALL here
3178 * but we could add call to grab a byte range lock to prevent others
3179 * from reading or writing the file
3180 */
3181
3182 sis->flags |= SWP_FS_OPS;
3183 return add_swap_extent(sis, 0, sis->max, 0);
3184 }
3185
cifs_swap_deactivate(struct file * file)3186 static void cifs_swap_deactivate(struct file *file)
3187 {
3188 struct cifsFileInfo *cfile = file->private_data;
3189
3190 cifs_dbg(FYI, "swap deactivate\n");
3191
3192 /* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3193
3194 if (cfile)
3195 cfile->swapfile = false;
3196
3197 /* do we need to unpin (or unlock) the file */
3198 }
3199
3200 /**
3201 * cifs_swap_rw - SMB3 address space operation for swap I/O
3202 * @iocb: target I/O control block
3203 * @iter: I/O buffer
3204 *
3205 * Perform IO to the swap-file. This is much like direct IO.
3206 */
cifs_swap_rw(struct kiocb * iocb,struct iov_iter * iter)3207 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3208 {
3209 ssize_t ret;
3210
3211 if (iov_iter_rw(iter) == READ)
3212 ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3213 else
3214 ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3215 if (ret < 0)
3216 return ret;
3217 return 0;
3218 }
3219
3220 const struct address_space_operations cifs_addr_ops = {
3221 .read_folio = netfs_read_folio,
3222 .readahead = netfs_readahead,
3223 .writepages = netfs_writepages,
3224 .dirty_folio = netfs_dirty_folio,
3225 .release_folio = netfs_release_folio,
3226 .direct_IO = noop_direct_IO,
3227 .invalidate_folio = netfs_invalidate_folio,
3228 .migrate_folio = filemap_migrate_folio,
3229 /*
3230 * TODO: investigate and if useful we could add an is_dirty_writeback
3231 * helper if needed
3232 */
3233 .swap_activate = cifs_swap_activate,
3234 .swap_deactivate = cifs_swap_deactivate,
3235 .swap_rw = cifs_swap_rw,
3236 };
3237
3238 /*
3239 * cifs_readahead requires the server to support a buffer large enough to
3240 * contain the header plus one complete page of data. Otherwise, we need
3241 * to leave cifs_readahead out of the address space operations.
3242 */
3243 const struct address_space_operations cifs_addr_ops_smallbuf = {
3244 .read_folio = netfs_read_folio,
3245 .writepages = netfs_writepages,
3246 .dirty_folio = netfs_dirty_folio,
3247 .release_folio = netfs_release_folio,
3248 .invalidate_folio = netfs_invalidate_folio,
3249 .migrate_folio = filemap_migrate_folio,
3250 };
3251