1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3 *
4 * vfs operations that deal with files
5 *
6 * Copyright (C) International Business Machines Corp., 2002,2010
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
9 *
10 */
11 #include <linux/fs.h>
12 #include <linux/filelock.h>
13 #include <linux/backing-dev.h>
14 #include <linux/stat.h>
15 #include <linux/fcntl.h>
16 #include <linux/pagemap.h>
17 #include <linux/pagevec.h>
18 #include <linux/writeback.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/delay.h>
21 #include <linux/mount.h>
22 #include <linux/slab.h>
23 #include <linux/swap.h>
24 #include <linux/mm.h>
25 #include <asm/div64.h>
26 #include "cifsfs.h"
27 #include "cifspdu.h"
28 #include "cifsglob.h"
29 #include "cifsproto.h"
30 #include "smb2proto.h"
31 #include "cifs_unicode.h"
32 #include "cifs_debug.h"
33 #include "cifs_fs_sb.h"
34 #include "fscache.h"
35 #include "smbdirect.h"
36 #include "fs_context.h"
37 #include "cifs_ioctl.h"
38 #include "cached_dir.h"
39 #include <trace/events/netfs.h>
40
41 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
42
43 /*
44 * Prepare a subrequest to upload to the server. We need to allocate credits
45 * so that we know the maximum amount of data that we can include in it.
46 */
cifs_prepare_write(struct netfs_io_subrequest * subreq)47 static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
48 {
49 struct cifs_io_subrequest *wdata =
50 container_of(subreq, struct cifs_io_subrequest, subreq);
51 struct cifs_io_request *req = wdata->req;
52 struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr];
53 struct TCP_Server_Info *server;
54 struct cifsFileInfo *open_file = req->cfile;
55 struct cifs_sb_info *cifs_sb = CIFS_SB(wdata->rreq->inode->i_sb);
56 size_t wsize = req->rreq.wsize;
57 int rc;
58
59 if (!wdata->have_xid) {
60 wdata->xid = get_xid();
61 wdata->have_xid = true;
62 }
63
64 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
65 wdata->server = server;
66
67 if (cifs_sb->ctx->wsize == 0)
68 cifs_negotiate_wsize(server, cifs_sb->ctx,
69 tlink_tcon(req->cfile->tlink));
70
71 retry:
72 if (open_file->invalidHandle) {
73 rc = cifs_reopen_file(open_file, false);
74 if (rc < 0) {
75 if (rc == -EAGAIN)
76 goto retry;
77 subreq->error = rc;
78 return netfs_prepare_write_failed(subreq);
79 }
80 }
81
82 rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len,
83 &wdata->credits);
84 if (rc < 0) {
85 subreq->error = rc;
86 return netfs_prepare_write_failed(subreq);
87 }
88
89 wdata->credits.rreq_debug_id = subreq->rreq->debug_id;
90 wdata->credits.rreq_debug_index = subreq->debug_index;
91 wdata->credits.in_flight_check = 1;
92 trace_smb3_rw_credits(wdata->rreq->debug_id,
93 wdata->subreq.debug_index,
94 wdata->credits.value,
95 server->credits, server->in_flight,
96 wdata->credits.value,
97 cifs_trace_rw_credits_write_prepare);
98
99 #ifdef CONFIG_CIFS_SMB_DIRECT
100 if (server->smbd_conn) {
101 const struct smbdirect_socket_parameters *sp =
102 smbd_get_parameters(server->smbd_conn);
103
104 stream->sreq_max_segs = sp->max_frmr_depth;
105 }
106 #endif
107 }
108
109 /*
110 * Issue a subrequest to upload to the server.
111 */
cifs_issue_write(struct netfs_io_subrequest * subreq)112 static void cifs_issue_write(struct netfs_io_subrequest *subreq)
113 {
114 struct cifs_io_subrequest *wdata =
115 container_of(subreq, struct cifs_io_subrequest, subreq);
116 struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
117 int rc;
118
119 if (cifs_forced_shutdown(sbi)) {
120 rc = -EIO;
121 goto fail;
122 }
123
124 rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
125 if (rc)
126 goto fail;
127
128 rc = -EAGAIN;
129 if (wdata->req->cfile->invalidHandle)
130 goto fail;
131
132 wdata->server->ops->async_writev(wdata);
133 out:
134 return;
135
136 fail:
137 if (rc == -EAGAIN)
138 trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
139 else
140 trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
141 add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
142 cifs_write_subrequest_terminated(wdata, rc);
143 goto out;
144 }
145
cifs_netfs_invalidate_cache(struct netfs_io_request * wreq)146 static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq)
147 {
148 cifs_invalidate_cache(wreq->inode, 0);
149 }
150
151 /*
152 * Negotiate the size of a read operation on behalf of the netfs library.
153 */
cifs_prepare_read(struct netfs_io_subrequest * subreq)154 static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
155 {
156 struct netfs_io_request *rreq = subreq->rreq;
157 struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
158 struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
159 struct TCP_Server_Info *server;
160 struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
161 size_t size;
162 int rc = 0;
163
164 if (!rdata->have_xid) {
165 rdata->xid = get_xid();
166 rdata->have_xid = true;
167 }
168
169 server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
170 rdata->server = server;
171
172 if (cifs_sb->ctx->rsize == 0)
173 cifs_negotiate_rsize(server, cifs_sb->ctx,
174 tlink_tcon(req->cfile->tlink));
175
176 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
177 &size, &rdata->credits);
178 if (rc)
179 return rc;
180
181 rreq->io_streams[0].sreq_max_len = size;
182
183 rdata->credits.in_flight_check = 1;
184 rdata->credits.rreq_debug_id = rreq->debug_id;
185 rdata->credits.rreq_debug_index = subreq->debug_index;
186
187 trace_smb3_rw_credits(rdata->rreq->debug_id,
188 rdata->subreq.debug_index,
189 rdata->credits.value,
190 server->credits, server->in_flight, 0,
191 cifs_trace_rw_credits_read_submit);
192
193 #ifdef CONFIG_CIFS_SMB_DIRECT
194 if (server->smbd_conn) {
195 const struct smbdirect_socket_parameters *sp =
196 smbd_get_parameters(server->smbd_conn);
197
198 rreq->io_streams[0].sreq_max_segs = sp->max_frmr_depth;
199 }
200 #endif
201 return 0;
202 }
203
204 /*
205 * Issue a read operation on behalf of the netfs helper functions. We're asked
206 * to make a read of a certain size at a point in the file. We are permitted
207 * to only read a portion of that, but as long as we read something, the netfs
208 * helper will call us again so that we can issue another read.
209 */
cifs_issue_read(struct netfs_io_subrequest * subreq)210 static void cifs_issue_read(struct netfs_io_subrequest *subreq)
211 {
212 struct netfs_io_request *rreq = subreq->rreq;
213 struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
214 struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
215 struct TCP_Server_Info *server = rdata->server;
216 int rc = 0;
217
218 cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
219 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
220 subreq->transferred, subreq->len);
221
222 rc = adjust_credits(server, rdata, cifs_trace_rw_credits_issue_read_adjust);
223 if (rc)
224 goto failed;
225
226 if (req->cfile->invalidHandle) {
227 do {
228 rc = cifs_reopen_file(req->cfile, true);
229 } while (rc == -EAGAIN);
230 if (rc)
231 goto failed;
232 }
233
234 if (subreq->rreq->origin != NETFS_UNBUFFERED_READ &&
235 subreq->rreq->origin != NETFS_DIO_READ)
236 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
237
238 trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
239 rc = rdata->server->ops->async_readv(rdata);
240 if (rc)
241 goto failed;
242 return;
243
244 failed:
245 subreq->error = rc;
246 netfs_read_subreq_terminated(subreq);
247 }
248
249 /*
250 * Writeback calls this when it finds a folio that needs uploading. This isn't
251 * called if writeback only has copy-to-cache to deal with.
252 */
cifs_begin_writeback(struct netfs_io_request * wreq)253 static void cifs_begin_writeback(struct netfs_io_request *wreq)
254 {
255 struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
256 int ret;
257
258 ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
259 if (ret) {
260 cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
261 return;
262 }
263
264 wreq->io_streams[0].avail = true;
265 }
266
267 /*
268 * Initialise a request.
269 */
cifs_init_request(struct netfs_io_request * rreq,struct file * file)270 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
271 {
272 struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
273 struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
274 struct cifsFileInfo *open_file = NULL;
275
276 rreq->rsize = cifs_sb->ctx->rsize;
277 rreq->wsize = cifs_sb->ctx->wsize;
278 req->pid = current->tgid; // Ummm... This may be a workqueue
279
280 if (file) {
281 open_file = file->private_data;
282 rreq->netfs_priv = file->private_data;
283 req->cfile = cifsFileInfo_get(open_file);
284 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
285 req->pid = req->cfile->pid;
286 } else if (rreq->origin != NETFS_WRITEBACK) {
287 WARN_ON_ONCE(1);
288 return -EIO;
289 }
290
291 return 0;
292 }
293
294 /*
295 * Completion of a request operation.
296 */
cifs_rreq_done(struct netfs_io_request * rreq)297 static void cifs_rreq_done(struct netfs_io_request *rreq)
298 {
299 struct timespec64 atime, mtime;
300 struct inode *inode = rreq->inode;
301
302 /* we do not want atime to be less than mtime, it broke some apps */
303 atime = inode_set_atime_to_ts(inode, current_time(inode));
304 mtime = inode_get_mtime(inode);
305 if (timespec64_compare(&atime, &mtime))
306 inode_set_atime_to_ts(inode, inode_get_mtime(inode));
307 }
308
cifs_free_request(struct netfs_io_request * rreq)309 static void cifs_free_request(struct netfs_io_request *rreq)
310 {
311 struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
312
313 if (req->cfile)
314 cifsFileInfo_put(req->cfile);
315 }
316
cifs_free_subrequest(struct netfs_io_subrequest * subreq)317 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
318 {
319 struct cifs_io_subrequest *rdata =
320 container_of(subreq, struct cifs_io_subrequest, subreq);
321 int rc = subreq->error;
322
323 if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
324 #ifdef CONFIG_CIFS_SMB_DIRECT
325 if (rdata->mr) {
326 smbd_deregister_mr(rdata->mr);
327 rdata->mr = NULL;
328 }
329 #endif
330 }
331
332 if (rdata->credits.value != 0) {
333 trace_smb3_rw_credits(rdata->rreq->debug_id,
334 rdata->subreq.debug_index,
335 rdata->credits.value,
336 rdata->server ? rdata->server->credits : 0,
337 rdata->server ? rdata->server->in_flight : 0,
338 -rdata->credits.value,
339 cifs_trace_rw_credits_free_subreq);
340 if (rdata->server)
341 add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
342 else
343 rdata->credits.value = 0;
344 }
345
346 if (rdata->have_xid)
347 free_xid(rdata->xid);
348 }
349
350 const struct netfs_request_ops cifs_req_ops = {
351 .request_pool = &cifs_io_request_pool,
352 .subrequest_pool = &cifs_io_subrequest_pool,
353 .init_request = cifs_init_request,
354 .free_request = cifs_free_request,
355 .free_subrequest = cifs_free_subrequest,
356 .prepare_read = cifs_prepare_read,
357 .issue_read = cifs_issue_read,
358 .done = cifs_rreq_done,
359 .begin_writeback = cifs_begin_writeback,
360 .prepare_write = cifs_prepare_write,
361 .issue_write = cifs_issue_write,
362 .invalidate_cache = cifs_netfs_invalidate_cache,
363 };
364
365 /*
366 * Mark as invalid, all open files on tree connections since they
367 * were closed when session to server was lost.
368 */
369 void
cifs_mark_open_files_invalid(struct cifs_tcon * tcon)370 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
371 {
372 struct cifsFileInfo *open_file = NULL;
373 struct list_head *tmp;
374 struct list_head *tmp1;
375
376 /* only send once per connect */
377 spin_lock(&tcon->tc_lock);
378 if (tcon->need_reconnect)
379 tcon->status = TID_NEED_RECON;
380
381 if (tcon->status != TID_NEED_RECON) {
382 spin_unlock(&tcon->tc_lock);
383 return;
384 }
385 tcon->status = TID_IN_FILES_INVALIDATE;
386 spin_unlock(&tcon->tc_lock);
387
388 /* list all files open on tree connection and mark them invalid */
389 spin_lock(&tcon->open_file_lock);
390 list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
391 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
392 open_file->invalidHandle = true;
393 open_file->oplock_break_cancelled = true;
394 }
395 spin_unlock(&tcon->open_file_lock);
396
397 invalidate_all_cached_dirs(tcon);
398 spin_lock(&tcon->tc_lock);
399 if (tcon->status == TID_IN_FILES_INVALIDATE)
400 tcon->status = TID_NEED_TCON;
401 spin_unlock(&tcon->tc_lock);
402
403 /*
404 * BB Add call to evict_inodes(sb) for all superblocks mounted
405 * to this tcon.
406 */
407 }
408
cifs_convert_flags(unsigned int flags,int rdwr_for_fscache)409 static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
410 {
411 if ((flags & O_ACCMODE) == O_RDONLY)
412 return GENERIC_READ;
413 else if ((flags & O_ACCMODE) == O_WRONLY)
414 return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
415 else if ((flags & O_ACCMODE) == O_RDWR) {
416 /* GENERIC_ALL is too much permission to request
417 can cause unnecessary access denied on create */
418 /* return GENERIC_ALL; */
419 return (GENERIC_READ | GENERIC_WRITE);
420 }
421
422 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
423 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
424 FILE_READ_DATA);
425 }
426
427 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_convert_flags(unsigned int flags)428 static u32 cifs_posix_convert_flags(unsigned int flags)
429 {
430 u32 posix_flags = 0;
431
432 if ((flags & O_ACCMODE) == O_RDONLY)
433 posix_flags = SMB_O_RDONLY;
434 else if ((flags & O_ACCMODE) == O_WRONLY)
435 posix_flags = SMB_O_WRONLY;
436 else if ((flags & O_ACCMODE) == O_RDWR)
437 posix_flags = SMB_O_RDWR;
438
439 if (flags & O_CREAT) {
440 posix_flags |= SMB_O_CREAT;
441 if (flags & O_EXCL)
442 posix_flags |= SMB_O_EXCL;
443 } else if (flags & O_EXCL)
444 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
445 current->comm, current->tgid);
446
447 if (flags & O_TRUNC)
448 posix_flags |= SMB_O_TRUNC;
449 /* be safe and imply O_SYNC for O_DSYNC */
450 if (flags & O_DSYNC)
451 posix_flags |= SMB_O_SYNC;
452 if (flags & O_DIRECTORY)
453 posix_flags |= SMB_O_DIRECTORY;
454 if (flags & O_NOFOLLOW)
455 posix_flags |= SMB_O_NOFOLLOW;
456 if (flags & O_DIRECT)
457 posix_flags |= SMB_O_DIRECT;
458
459 return posix_flags;
460 }
461 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
462
cifs_get_disposition(unsigned int flags)463 static inline int cifs_get_disposition(unsigned int flags)
464 {
465 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
466 return FILE_CREATE;
467 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
468 return FILE_OVERWRITE_IF;
469 else if ((flags & O_CREAT) == O_CREAT)
470 return FILE_OPEN_IF;
471 else if ((flags & O_TRUNC) == O_TRUNC)
472 return FILE_OVERWRITE;
473 else
474 return FILE_OPEN;
475 }
476
477 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_open(const char * full_path,struct inode ** pinode,struct super_block * sb,int mode,unsigned int f_flags,__u32 * poplock,__u16 * pnetfid,unsigned int xid)478 int cifs_posix_open(const char *full_path, struct inode **pinode,
479 struct super_block *sb, int mode, unsigned int f_flags,
480 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
481 {
482 int rc;
483 FILE_UNIX_BASIC_INFO *presp_data;
484 __u32 posix_flags = 0;
485 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
486 struct cifs_fattr fattr;
487 struct tcon_link *tlink;
488 struct cifs_tcon *tcon;
489
490 cifs_dbg(FYI, "posix open %s\n", full_path);
491
492 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
493 if (presp_data == NULL)
494 return -ENOMEM;
495
496 tlink = cifs_sb_tlink(cifs_sb);
497 if (IS_ERR(tlink)) {
498 rc = PTR_ERR(tlink);
499 goto posix_open_ret;
500 }
501
502 tcon = tlink_tcon(tlink);
503 mode &= ~current_umask();
504
505 posix_flags = cifs_posix_convert_flags(f_flags);
506 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
507 poplock, full_path, cifs_sb->local_nls,
508 cifs_remap(cifs_sb));
509 cifs_put_tlink(tlink);
510
511 if (rc)
512 goto posix_open_ret;
513
514 if (presp_data->Type == cpu_to_le32(-1))
515 goto posix_open_ret; /* open ok, caller does qpathinfo */
516
517 if (!pinode)
518 goto posix_open_ret; /* caller does not need info */
519
520 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
521
522 /* get new inode and set it up */
523 if (*pinode == NULL) {
524 cifs_fill_uniqueid(sb, &fattr);
525 *pinode = cifs_iget(sb, &fattr);
526 if (!*pinode) {
527 rc = -ENOMEM;
528 goto posix_open_ret;
529 }
530 } else {
531 cifs_revalidate_mapping(*pinode);
532 rc = cifs_fattr_to_inode(*pinode, &fattr, false);
533 }
534
535 posix_open_ret:
536 kfree(presp_data);
537 return rc;
538 }
539 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
540
cifs_nt_open(const char * full_path,struct inode * inode,struct cifs_sb_info * cifs_sb,struct cifs_tcon * tcon,unsigned int f_flags,__u32 * oplock,struct cifs_fid * fid,unsigned int xid,struct cifs_open_info_data * buf)541 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
542 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
543 struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
544 {
545 int rc;
546 int desired_access;
547 int disposition;
548 int create_options = CREATE_NOT_DIR;
549 struct TCP_Server_Info *server = tcon->ses->server;
550 struct cifs_open_parms oparms;
551 int rdwr_for_fscache = 0;
552
553 if (!server->ops->open)
554 return -ENOSYS;
555
556 /* If we're caching, we need to be able to fill in around partial writes. */
557 if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
558 rdwr_for_fscache = 1;
559
560 desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
561
562 /*********************************************************************
563 * open flag mapping table:
564 *
565 * POSIX Flag CIFS Disposition
566 * ---------- ----------------
567 * O_CREAT FILE_OPEN_IF
568 * O_CREAT | O_EXCL FILE_CREATE
569 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
570 * O_TRUNC FILE_OVERWRITE
571 * none of the above FILE_OPEN
572 *
573 * Note that there is not a direct match between disposition
574 * FILE_SUPERSEDE (ie create whether or not file exists although
575 * O_CREAT | O_TRUNC is similar but truncates the existing
576 * file rather than creating a new file as FILE_SUPERSEDE does
577 * (which uses the attributes / metadata passed in on open call)
578 *?
579 *? O_SYNC is a reasonable match to CIFS writethrough flag
580 *? and the read write flags match reasonably. O_LARGEFILE
581 *? is irrelevant because largefile support is always used
582 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
583 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
584 *********************************************************************/
585
586 disposition = cifs_get_disposition(f_flags);
587
588 /* BB pass O_SYNC flag through on file attributes .. BB */
589
590 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
591 if (f_flags & O_SYNC)
592 create_options |= CREATE_WRITE_THROUGH;
593
594 if (f_flags & O_DIRECT)
595 create_options |= CREATE_NO_BUFFER;
596
597 retry_open:
598 oparms = (struct cifs_open_parms) {
599 .tcon = tcon,
600 .cifs_sb = cifs_sb,
601 .desired_access = desired_access,
602 .create_options = cifs_create_options(cifs_sb, create_options),
603 .disposition = disposition,
604 .path = full_path,
605 .fid = fid,
606 };
607
608 rc = server->ops->open(xid, &oparms, oplock, buf);
609 if (rc) {
610 if (rc == -EACCES && rdwr_for_fscache == 1) {
611 desired_access = cifs_convert_flags(f_flags, 0);
612 rdwr_for_fscache = 2;
613 goto retry_open;
614 }
615 return rc;
616 }
617 if (rdwr_for_fscache == 2)
618 cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
619
620 /* TODO: Add support for calling posix query info but with passing in fid */
621 if (tcon->unix_ext)
622 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
623 xid);
624 else
625 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
626 xid, fid);
627
628 if (rc) {
629 server->ops->close(xid, tcon, fid);
630 if (rc == -ESTALE)
631 rc = -EOPENSTALE;
632 }
633
634 return rc;
635 }
636
637 static bool
cifs_has_mand_locks(struct cifsInodeInfo * cinode)638 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
639 {
640 struct cifs_fid_locks *cur;
641 bool has_locks = false;
642
643 down_read(&cinode->lock_sem);
644 list_for_each_entry(cur, &cinode->llist, llist) {
645 if (!list_empty(&cur->locks)) {
646 has_locks = true;
647 break;
648 }
649 }
650 up_read(&cinode->lock_sem);
651 return has_locks;
652 }
653
654 void
cifs_down_write(struct rw_semaphore * sem)655 cifs_down_write(struct rw_semaphore *sem)
656 {
657 while (!down_write_trylock(sem))
658 msleep(10);
659 }
660
661 static void cifsFileInfo_put_work(struct work_struct *work);
662 void serverclose_work(struct work_struct *work);
663
cifs_new_fileinfo(struct cifs_fid * fid,struct file * file,struct tcon_link * tlink,__u32 oplock,const char * symlink_target)664 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
665 struct tcon_link *tlink, __u32 oplock,
666 const char *symlink_target)
667 {
668 struct dentry *dentry = file_dentry(file);
669 struct inode *inode = d_inode(dentry);
670 struct cifsInodeInfo *cinode = CIFS_I(inode);
671 struct cifsFileInfo *cfile;
672 struct cifs_fid_locks *fdlocks;
673 struct cifs_tcon *tcon = tlink_tcon(tlink);
674 struct TCP_Server_Info *server = tcon->ses->server;
675
676 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
677 if (cfile == NULL)
678 return cfile;
679
680 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
681 if (!fdlocks) {
682 kfree(cfile);
683 return NULL;
684 }
685
686 if (symlink_target) {
687 cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
688 if (!cfile->symlink_target) {
689 kfree(fdlocks);
690 kfree(cfile);
691 return NULL;
692 }
693 }
694
695 INIT_LIST_HEAD(&fdlocks->locks);
696 fdlocks->cfile = cfile;
697 cfile->llist = fdlocks;
698
699 cfile->count = 1;
700 cfile->pid = current->tgid;
701 cfile->uid = current_fsuid();
702 cfile->dentry = dget(dentry);
703 cfile->f_flags = file->f_flags;
704 cfile->invalidHandle = false;
705 cfile->deferred_close_scheduled = false;
706 cfile->tlink = cifs_get_tlink(tlink);
707 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
708 INIT_WORK(&cfile->put, cifsFileInfo_put_work);
709 INIT_WORK(&cfile->serverclose, serverclose_work);
710 INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
711 mutex_init(&cfile->fh_mutex);
712 spin_lock_init(&cfile->file_info_lock);
713
714 cifs_sb_active(inode->i_sb);
715
716 /*
717 * If the server returned a read oplock and we have mandatory brlocks,
718 * set oplock level to None.
719 */
720 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
721 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
722 oplock = 0;
723 }
724
725 cifs_down_write(&cinode->lock_sem);
726 list_add(&fdlocks->llist, &cinode->llist);
727 up_write(&cinode->lock_sem);
728
729 spin_lock(&tcon->open_file_lock);
730 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
731 oplock = fid->pending_open->oplock;
732 list_del(&fid->pending_open->olist);
733
734 fid->purge_cache = false;
735 server->ops->set_fid(cfile, fid, oplock);
736
737 list_add(&cfile->tlist, &tcon->openFileList);
738 atomic_inc(&tcon->num_local_opens);
739
740 /* if readable file instance put first in list*/
741 spin_lock(&cinode->open_file_lock);
742 if (file->f_mode & FMODE_READ)
743 list_add(&cfile->flist, &cinode->openFileList);
744 else
745 list_add_tail(&cfile->flist, &cinode->openFileList);
746 spin_unlock(&cinode->open_file_lock);
747 spin_unlock(&tcon->open_file_lock);
748
749 if (fid->purge_cache)
750 cifs_zap_mapping(inode);
751
752 file->private_data = cfile;
753 return cfile;
754 }
755
756 struct cifsFileInfo *
cifsFileInfo_get(struct cifsFileInfo * cifs_file)757 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
758 {
759 spin_lock(&cifs_file->file_info_lock);
760 cifsFileInfo_get_locked(cifs_file);
761 spin_unlock(&cifs_file->file_info_lock);
762 return cifs_file;
763 }
764
cifsFileInfo_put_final(struct cifsFileInfo * cifs_file)765 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
766 {
767 struct inode *inode = d_inode(cifs_file->dentry);
768 struct cifsInodeInfo *cifsi = CIFS_I(inode);
769 struct cifsLockInfo *li, *tmp;
770 struct super_block *sb = inode->i_sb;
771
772 /*
773 * Delete any outstanding lock records. We'll lose them when the file
774 * is closed anyway.
775 */
776 cifs_down_write(&cifsi->lock_sem);
777 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
778 list_del(&li->llist);
779 cifs_del_lock_waiters(li);
780 kfree(li);
781 }
782 list_del(&cifs_file->llist->llist);
783 kfree(cifs_file->llist);
784 up_write(&cifsi->lock_sem);
785
786 cifs_put_tlink(cifs_file->tlink);
787 dput(cifs_file->dentry);
788 cifs_sb_deactive(sb);
789 kfree(cifs_file->symlink_target);
790 kfree(cifs_file);
791 }
792
cifsFileInfo_put_work(struct work_struct * work)793 static void cifsFileInfo_put_work(struct work_struct *work)
794 {
795 struct cifsFileInfo *cifs_file = container_of(work,
796 struct cifsFileInfo, put);
797
798 cifsFileInfo_put_final(cifs_file);
799 }
800
serverclose_work(struct work_struct * work)801 void serverclose_work(struct work_struct *work)
802 {
803 struct cifsFileInfo *cifs_file = container_of(work,
804 struct cifsFileInfo, serverclose);
805
806 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
807
808 struct TCP_Server_Info *server = tcon->ses->server;
809 int rc = 0;
810 int retries = 0;
811 int MAX_RETRIES = 4;
812
813 do {
814 if (server->ops->close_getattr)
815 rc = server->ops->close_getattr(0, tcon, cifs_file);
816 else if (server->ops->close)
817 rc = server->ops->close(0, tcon, &cifs_file->fid);
818
819 if (rc == -EBUSY || rc == -EAGAIN) {
820 retries++;
821 msleep(250);
822 }
823 } while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
824 );
825
826 if (retries == MAX_RETRIES)
827 pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
828
829 if (cifs_file->offload)
830 queue_work(fileinfo_put_wq, &cifs_file->put);
831 else
832 cifsFileInfo_put_final(cifs_file);
833 }
834
835 /**
836 * cifsFileInfo_put - release a reference of file priv data
837 *
838 * Always potentially wait for oplock handler. See _cifsFileInfo_put().
839 *
840 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
841 */
cifsFileInfo_put(struct cifsFileInfo * cifs_file)842 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
843 {
844 _cifsFileInfo_put(cifs_file, true, true);
845 }
846
847 /**
848 * _cifsFileInfo_put - release a reference of file priv data
849 *
850 * This may involve closing the filehandle @cifs_file out on the
851 * server. Must be called without holding tcon->open_file_lock,
852 * cinode->open_file_lock and cifs_file->file_info_lock.
853 *
854 * If @wait_for_oplock_handler is true and we are releasing the last
855 * reference, wait for any running oplock break handler of the file
856 * and cancel any pending one.
857 *
858 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
859 * @wait_oplock_handler: must be false if called from oplock_break_handler
860 * @offload: not offloaded on close and oplock breaks
861 *
862 */
_cifsFileInfo_put(struct cifsFileInfo * cifs_file,bool wait_oplock_handler,bool offload)863 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
864 bool wait_oplock_handler, bool offload)
865 {
866 struct inode *inode = d_inode(cifs_file->dentry);
867 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
868 struct TCP_Server_Info *server = tcon->ses->server;
869 struct cifsInodeInfo *cifsi = CIFS_I(inode);
870 struct super_block *sb = inode->i_sb;
871 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
872 struct cifs_fid fid = {};
873 struct cifs_pending_open open;
874 bool oplock_break_cancelled;
875 bool serverclose_offloaded = false;
876
877 spin_lock(&tcon->open_file_lock);
878 spin_lock(&cifsi->open_file_lock);
879 spin_lock(&cifs_file->file_info_lock);
880
881 cifs_file->offload = offload;
882 if (--cifs_file->count > 0) {
883 spin_unlock(&cifs_file->file_info_lock);
884 spin_unlock(&cifsi->open_file_lock);
885 spin_unlock(&tcon->open_file_lock);
886 return;
887 }
888 spin_unlock(&cifs_file->file_info_lock);
889
890 if (server->ops->get_lease_key)
891 server->ops->get_lease_key(inode, &fid);
892
893 /* store open in pending opens to make sure we don't miss lease break */
894 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
895
896 /* remove it from the lists */
897 list_del(&cifs_file->flist);
898 list_del(&cifs_file->tlist);
899 atomic_dec(&tcon->num_local_opens);
900
901 if (list_empty(&cifsi->openFileList)) {
902 cifs_dbg(FYI, "closing last open instance for inode %p\n",
903 d_inode(cifs_file->dentry));
904 /*
905 * In strict cache mode we need invalidate mapping on the last
906 * close because it may cause a error when we open this file
907 * again and get at least level II oplock.
908 */
909 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
910 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
911 cifs_set_oplock_level(cifsi, 0);
912 }
913
914 spin_unlock(&cifsi->open_file_lock);
915 spin_unlock(&tcon->open_file_lock);
916
917 oplock_break_cancelled = wait_oplock_handler ?
918 cancel_work_sync(&cifs_file->oplock_break) : false;
919
920 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
921 struct TCP_Server_Info *server = tcon->ses->server;
922 unsigned int xid;
923 int rc = 0;
924
925 xid = get_xid();
926 if (server->ops->close_getattr)
927 rc = server->ops->close_getattr(xid, tcon, cifs_file);
928 else if (server->ops->close)
929 rc = server->ops->close(xid, tcon, &cifs_file->fid);
930 _free_xid(xid);
931
932 if (rc == -EBUSY || rc == -EAGAIN) {
933 // Server close failed, hence offloading it as an async op
934 queue_work(serverclose_wq, &cifs_file->serverclose);
935 serverclose_offloaded = true;
936 }
937 }
938
939 if (oplock_break_cancelled)
940 cifs_done_oplock_break(cifsi);
941
942 cifs_del_pending_open(&open);
943
944 // if serverclose has been offloaded to wq (on failure), it will
945 // handle offloading put as well. If serverclose not offloaded,
946 // we need to handle offloading put here.
947 if (!serverclose_offloaded) {
948 if (offload)
949 queue_work(fileinfo_put_wq, &cifs_file->put);
950 else
951 cifsFileInfo_put_final(cifs_file);
952 }
953 }
954
cifs_open(struct inode * inode,struct file * file)955 int cifs_open(struct inode *inode, struct file *file)
956
957 {
958 int rc = -EACCES;
959 unsigned int xid;
960 __u32 oplock;
961 struct cifs_sb_info *cifs_sb;
962 struct TCP_Server_Info *server;
963 struct cifs_tcon *tcon;
964 struct tcon_link *tlink;
965 struct cifsFileInfo *cfile = NULL;
966 void *page;
967 const char *full_path;
968 bool posix_open_ok = false;
969 struct cifs_fid fid = {};
970 struct cifs_pending_open open;
971 struct cifs_open_info_data data = {};
972
973 xid = get_xid();
974
975 cifs_sb = CIFS_SB(inode->i_sb);
976 if (unlikely(cifs_forced_shutdown(cifs_sb))) {
977 free_xid(xid);
978 return -EIO;
979 }
980
981 tlink = cifs_sb_tlink(cifs_sb);
982 if (IS_ERR(tlink)) {
983 free_xid(xid);
984 return PTR_ERR(tlink);
985 }
986 tcon = tlink_tcon(tlink);
987 server = tcon->ses->server;
988
989 page = alloc_dentry_path();
990 full_path = build_path_from_dentry(file_dentry(file), page);
991 if (IS_ERR(full_path)) {
992 rc = PTR_ERR(full_path);
993 goto out;
994 }
995
996 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
997 inode, file->f_flags, full_path);
998
999 if (file->f_flags & O_DIRECT &&
1000 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
1001 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
1002 file->f_op = &cifs_file_direct_nobrl_ops;
1003 else
1004 file->f_op = &cifs_file_direct_ops;
1005 }
1006
1007 /* Get the cached handle as SMB2 close is deferred */
1008 if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) {
1009 rc = cifs_get_writable_path(tcon, full_path,
1010 FIND_WR_FSUID_ONLY |
1011 FIND_WR_NO_PENDING_DELETE,
1012 &cfile);
1013 } else {
1014 rc = cifs_get_readable_path(tcon, full_path, &cfile);
1015 }
1016 if (rc == 0) {
1017 unsigned int oflags = file->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC);
1018 unsigned int cflags = cfile->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC);
1019
1020 if (cifs_convert_flags(oflags, 0) == cifs_convert_flags(cflags, 0) &&
1021 (oflags & (O_SYNC|O_DIRECT)) == (cflags & (O_SYNC|O_DIRECT))) {
1022 file->private_data = cfile;
1023 spin_lock(&CIFS_I(inode)->deferred_lock);
1024 cifs_del_deferred_close(cfile);
1025 spin_unlock(&CIFS_I(inode)->deferred_lock);
1026 goto use_cache;
1027 }
1028 _cifsFileInfo_put(cfile, true, false);
1029 } else {
1030 /* hard link on the defeered close file */
1031 rc = cifs_get_hardlink_path(tcon, inode, file);
1032 if (rc)
1033 cifs_close_deferred_file(CIFS_I(inode));
1034 }
1035
1036 if (server->oplocks)
1037 oplock = REQ_OPLOCK;
1038 else
1039 oplock = 0;
1040
1041 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1042 if (!tcon->broken_posix_open && tcon->unix_ext &&
1043 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1044 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1045 /* can not refresh inode info since size could be stale */
1046 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
1047 cifs_sb->ctx->file_mode /* ignored */,
1048 file->f_flags, &oplock, &fid.netfid, xid);
1049 if (rc == 0) {
1050 cifs_dbg(FYI, "posix open succeeded\n");
1051 posix_open_ok = true;
1052 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1053 if (tcon->ses->serverNOS)
1054 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1055 tcon->ses->ip_addr,
1056 tcon->ses->serverNOS);
1057 tcon->broken_posix_open = true;
1058 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
1059 (rc != -EOPNOTSUPP)) /* path not found or net err */
1060 goto out;
1061 /*
1062 * Else fallthrough to retry open the old way on network i/o
1063 * or DFS errors.
1064 */
1065 }
1066 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1067
1068 if (server->ops->get_lease_key)
1069 server->ops->get_lease_key(inode, &fid);
1070
1071 cifs_add_pending_open(&fid, tlink, &open);
1072
1073 if (!posix_open_ok) {
1074 if (server->ops->get_lease_key)
1075 server->ops->get_lease_key(inode, &fid);
1076
1077 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1078 xid, &data);
1079 if (rc) {
1080 cifs_del_pending_open(&open);
1081 goto out;
1082 }
1083 }
1084
1085 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1086 if (cfile == NULL) {
1087 if (server->ops->close)
1088 server->ops->close(xid, tcon, &fid);
1089 cifs_del_pending_open(&open);
1090 rc = -ENOMEM;
1091 goto out;
1092 }
1093
1094 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1095 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1096 /*
1097 * Time to set mode which we can not set earlier due to
1098 * problems creating new read-only files.
1099 */
1100 struct cifs_unix_set_info_args args = {
1101 .mode = inode->i_mode,
1102 .uid = INVALID_UID, /* no change */
1103 .gid = INVALID_GID, /* no change */
1104 .ctime = NO_CHANGE_64,
1105 .atime = NO_CHANGE_64,
1106 .mtime = NO_CHANGE_64,
1107 .device = 0,
1108 };
1109 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1110 cfile->pid);
1111 }
1112 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1113
1114 use_cache:
1115 fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1116 file->f_mode & FMODE_WRITE);
1117 if (!(file->f_flags & O_DIRECT))
1118 goto out;
1119 if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1120 goto out;
1121 cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1122
1123 out:
1124 free_dentry_path(page);
1125 free_xid(xid);
1126 cifs_put_tlink(tlink);
1127 cifs_free_open_info(&data);
1128 return rc;
1129 }
1130
1131 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1132 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1133 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1134
1135 /*
1136 * Try to reacquire byte range locks that were released when session
1137 * to server was lost.
1138 */
1139 static int
cifs_relock_file(struct cifsFileInfo * cfile)1140 cifs_relock_file(struct cifsFileInfo *cfile)
1141 {
1142 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1143 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1144 int rc = 0;
1145 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1146 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1147 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1148
1149 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1150 if (cinode->can_cache_brlcks) {
1151 /* can cache locks - no need to relock */
1152 up_read(&cinode->lock_sem);
1153 return rc;
1154 }
1155
1156 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1157 if (cap_unix(tcon->ses) &&
1158 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1159 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1160 rc = cifs_push_posix_locks(cfile);
1161 else
1162 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1163 rc = tcon->ses->server->ops->push_mand_locks(cfile);
1164
1165 up_read(&cinode->lock_sem);
1166 return rc;
1167 }
1168
1169 static int
cifs_reopen_file(struct cifsFileInfo * cfile,bool can_flush)1170 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1171 {
1172 int rc = -EACCES;
1173 unsigned int xid;
1174 __u32 oplock;
1175 struct cifs_sb_info *cifs_sb;
1176 struct cifs_tcon *tcon;
1177 struct TCP_Server_Info *server;
1178 struct cifsInodeInfo *cinode;
1179 struct inode *inode;
1180 void *page;
1181 const char *full_path;
1182 int desired_access;
1183 int disposition = FILE_OPEN;
1184 int create_options = CREATE_NOT_DIR;
1185 struct cifs_open_parms oparms;
1186 int rdwr_for_fscache = 0;
1187
1188 xid = get_xid();
1189 mutex_lock(&cfile->fh_mutex);
1190 if (!cfile->invalidHandle) {
1191 mutex_unlock(&cfile->fh_mutex);
1192 free_xid(xid);
1193 return 0;
1194 }
1195
1196 inode = d_inode(cfile->dentry);
1197 cifs_sb = CIFS_SB(inode->i_sb);
1198 tcon = tlink_tcon(cfile->tlink);
1199 server = tcon->ses->server;
1200
1201 /*
1202 * Can not grab rename sem here because various ops, including those
1203 * that already have the rename sem can end up causing writepage to get
1204 * called and if the server was down that means we end up here, and we
1205 * can never tell if the caller already has the rename_sem.
1206 */
1207 page = alloc_dentry_path();
1208 full_path = build_path_from_dentry(cfile->dentry, page);
1209 if (IS_ERR(full_path)) {
1210 mutex_unlock(&cfile->fh_mutex);
1211 free_dentry_path(page);
1212 free_xid(xid);
1213 return PTR_ERR(full_path);
1214 }
1215
1216 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1217 inode, cfile->f_flags, full_path);
1218
1219 if (tcon->ses->server->oplocks)
1220 oplock = REQ_OPLOCK;
1221 else
1222 oplock = 0;
1223
1224 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1225 if (tcon->unix_ext && cap_unix(tcon->ses) &&
1226 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1227 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1228 /*
1229 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1230 * original open. Must mask them off for a reopen.
1231 */
1232 unsigned int oflags = cfile->f_flags &
1233 ~(O_CREAT | O_EXCL | O_TRUNC);
1234
1235 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1236 cifs_sb->ctx->file_mode /* ignored */,
1237 oflags, &oplock, &cfile->fid.netfid, xid);
1238 if (rc == 0) {
1239 cifs_dbg(FYI, "posix reopen succeeded\n");
1240 oparms.reconnect = true;
1241 goto reopen_success;
1242 }
1243 /*
1244 * fallthrough to retry open the old way on errors, especially
1245 * in the reconnect path it is important to retry hard
1246 */
1247 }
1248 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1249
1250 /* If we're caching, we need to be able to fill in around partial writes. */
1251 if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1252 rdwr_for_fscache = 1;
1253
1254 desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1255
1256 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
1257 if (cfile->f_flags & O_SYNC)
1258 create_options |= CREATE_WRITE_THROUGH;
1259
1260 if (cfile->f_flags & O_DIRECT)
1261 create_options |= CREATE_NO_BUFFER;
1262
1263 if (server->ops->get_lease_key)
1264 server->ops->get_lease_key(inode, &cfile->fid);
1265
1266 retry_open:
1267 oparms = (struct cifs_open_parms) {
1268 .tcon = tcon,
1269 .cifs_sb = cifs_sb,
1270 .desired_access = desired_access,
1271 .create_options = cifs_create_options(cifs_sb, create_options),
1272 .disposition = disposition,
1273 .path = full_path,
1274 .fid = &cfile->fid,
1275 .reconnect = true,
1276 };
1277
1278 /*
1279 * Can not refresh inode by passing in file_info buf to be returned by
1280 * ops->open and then calling get_inode_info with returned buf since
1281 * file might have write behind data that needs to be flushed and server
1282 * version of file size can be stale. If we knew for sure that inode was
1283 * not dirty locally we could do this.
1284 */
1285 rc = server->ops->open(xid, &oparms, &oplock, NULL);
1286 if (rc == -ENOENT && oparms.reconnect == false) {
1287 /* durable handle timeout is expired - open the file again */
1288 rc = server->ops->open(xid, &oparms, &oplock, NULL);
1289 /* indicate that we need to relock the file */
1290 oparms.reconnect = true;
1291 }
1292 if (rc == -EACCES && rdwr_for_fscache == 1) {
1293 desired_access = cifs_convert_flags(cfile->f_flags, 0);
1294 rdwr_for_fscache = 2;
1295 goto retry_open;
1296 }
1297
1298 if (rc) {
1299 mutex_unlock(&cfile->fh_mutex);
1300 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1301 cifs_dbg(FYI, "oplock: %d\n", oplock);
1302 goto reopen_error_exit;
1303 }
1304
1305 if (rdwr_for_fscache == 2)
1306 cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1307
1308 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1309 reopen_success:
1310 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1311 cfile->invalidHandle = false;
1312 mutex_unlock(&cfile->fh_mutex);
1313 cinode = CIFS_I(inode);
1314
1315 if (can_flush) {
1316 rc = filemap_write_and_wait(inode->i_mapping);
1317 if (!is_interrupt_error(rc))
1318 mapping_set_error(inode->i_mapping, rc);
1319
1320 if (tcon->posix_extensions) {
1321 rc = smb311_posix_get_inode_info(&inode, full_path,
1322 NULL, inode->i_sb, xid);
1323 } else if (tcon->unix_ext) {
1324 rc = cifs_get_inode_info_unix(&inode, full_path,
1325 inode->i_sb, xid);
1326 } else {
1327 rc = cifs_get_inode_info(&inode, full_path, NULL,
1328 inode->i_sb, xid, NULL);
1329 }
1330 }
1331 /*
1332 * Else we are writing out data to server already and could deadlock if
1333 * we tried to flush data, and since we do not know if we have data that
1334 * would invalidate the current end of file on the server we can not go
1335 * to the server to get the new inode info.
1336 */
1337
1338 /*
1339 * If the server returned a read oplock and we have mandatory brlocks,
1340 * set oplock level to None.
1341 */
1342 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1343 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1344 oplock = 0;
1345 }
1346
1347 server->ops->set_fid(cfile, &cfile->fid, oplock);
1348 if (oparms.reconnect)
1349 cifs_relock_file(cfile);
1350
1351 reopen_error_exit:
1352 free_dentry_path(page);
1353 free_xid(xid);
1354 return rc;
1355 }
1356
smb2_deferred_work_close(struct work_struct * work)1357 void smb2_deferred_work_close(struct work_struct *work)
1358 {
1359 struct cifsFileInfo *cfile = container_of(work,
1360 struct cifsFileInfo, deferred.work);
1361
1362 spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1363 cifs_del_deferred_close(cfile);
1364 cfile->deferred_close_scheduled = false;
1365 spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1366 _cifsFileInfo_put(cfile, true, false);
1367 }
1368
1369 static bool
smb2_can_defer_close(struct inode * inode,struct cifs_deferred_close * dclose)1370 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1371 {
1372 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1373 struct cifsInodeInfo *cinode = CIFS_I(inode);
1374
1375 return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1376 (cinode->oplock == CIFS_CACHE_RHW_FLG ||
1377 cinode->oplock == CIFS_CACHE_RH_FLG) &&
1378 !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags));
1379
1380 }
1381
cifs_close(struct inode * inode,struct file * file)1382 int cifs_close(struct inode *inode, struct file *file)
1383 {
1384 struct cifsFileInfo *cfile;
1385 struct cifsInodeInfo *cinode = CIFS_I(inode);
1386 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1387 struct cifs_deferred_close *dclose;
1388
1389 cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1390
1391 if (file->private_data != NULL) {
1392 cfile = file->private_data;
1393 file->private_data = NULL;
1394 dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
1395 if ((cfile->status_file_deleted == false) &&
1396 (smb2_can_defer_close(inode, dclose))) {
1397 if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR, &cinode->netfs.flags)) {
1398 inode_set_mtime_to_ts(inode,
1399 inode_set_ctime_current(inode));
1400 }
1401 spin_lock(&cinode->deferred_lock);
1402 cifs_add_deferred_close(cfile, dclose);
1403 if (cfile->deferred_close_scheduled &&
1404 delayed_work_pending(&cfile->deferred)) {
1405 /*
1406 * If there is no pending work, mod_delayed_work queues new work.
1407 * So, Increase the ref count to avoid use-after-free.
1408 */
1409 if (!mod_delayed_work(deferredclose_wq,
1410 &cfile->deferred, cifs_sb->ctx->closetimeo))
1411 cifsFileInfo_get(cfile);
1412 } else {
1413 /* Deferred close for files */
1414 queue_delayed_work(deferredclose_wq,
1415 &cfile->deferred, cifs_sb->ctx->closetimeo);
1416 cfile->deferred_close_scheduled = true;
1417 spin_unlock(&cinode->deferred_lock);
1418 return 0;
1419 }
1420 spin_unlock(&cinode->deferred_lock);
1421 _cifsFileInfo_put(cfile, true, false);
1422 } else {
1423 _cifsFileInfo_put(cfile, true, false);
1424 kfree(dclose);
1425 }
1426 }
1427
1428 /* return code from the ->release op is always ignored */
1429 return 0;
1430 }
1431
1432 void
cifs_reopen_persistent_handles(struct cifs_tcon * tcon)1433 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1434 {
1435 struct cifsFileInfo *open_file, *tmp;
1436 LIST_HEAD(tmp_list);
1437
1438 if (!tcon->use_persistent || !tcon->need_reopen_files)
1439 return;
1440
1441 tcon->need_reopen_files = false;
1442
1443 cifs_dbg(FYI, "Reopen persistent handles\n");
1444
1445 /* list all files open on tree connection, reopen resilient handles */
1446 spin_lock(&tcon->open_file_lock);
1447 list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1448 if (!open_file->invalidHandle)
1449 continue;
1450 cifsFileInfo_get(open_file);
1451 list_add_tail(&open_file->rlist, &tmp_list);
1452 }
1453 spin_unlock(&tcon->open_file_lock);
1454
1455 list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1456 if (cifs_reopen_file(open_file, false /* do not flush */))
1457 tcon->need_reopen_files = true;
1458 list_del_init(&open_file->rlist);
1459 cifsFileInfo_put(open_file);
1460 }
1461 }
1462
cifs_closedir(struct inode * inode,struct file * file)1463 int cifs_closedir(struct inode *inode, struct file *file)
1464 {
1465 int rc = 0;
1466 unsigned int xid;
1467 struct cifsFileInfo *cfile = file->private_data;
1468 struct cifs_tcon *tcon;
1469 struct TCP_Server_Info *server;
1470 char *buf;
1471
1472 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1473
1474 if (cfile == NULL)
1475 return rc;
1476
1477 xid = get_xid();
1478 tcon = tlink_tcon(cfile->tlink);
1479 server = tcon->ses->server;
1480
1481 cifs_dbg(FYI, "Freeing private data in close dir\n");
1482 spin_lock(&cfile->file_info_lock);
1483 if (server->ops->dir_needs_close(cfile)) {
1484 cfile->invalidHandle = true;
1485 spin_unlock(&cfile->file_info_lock);
1486 if (server->ops->close_dir)
1487 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1488 else
1489 rc = -ENOSYS;
1490 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1491 /* not much we can do if it fails anyway, ignore rc */
1492 rc = 0;
1493 } else
1494 spin_unlock(&cfile->file_info_lock);
1495
1496 buf = cfile->srch_inf.ntwrk_buf_start;
1497 if (buf) {
1498 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1499 cfile->srch_inf.ntwrk_buf_start = NULL;
1500 if (cfile->srch_inf.smallBuf)
1501 cifs_small_buf_release(buf);
1502 else
1503 cifs_buf_release(buf);
1504 }
1505
1506 cifs_put_tlink(cfile->tlink);
1507 kfree(file->private_data);
1508 file->private_data = NULL;
1509 /* BB can we lock the filestruct while this is going on? */
1510 free_xid(xid);
1511 return rc;
1512 }
1513
1514 static struct cifsLockInfo *
cifs_lock_init(__u64 offset,__u64 length,__u8 type,__u16 flags)1515 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1516 {
1517 struct cifsLockInfo *lock =
1518 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
1519 if (!lock)
1520 return lock;
1521 lock->offset = offset;
1522 lock->length = length;
1523 lock->type = type;
1524 lock->pid = current->tgid;
1525 lock->flags = flags;
1526 INIT_LIST_HEAD(&lock->blist);
1527 init_waitqueue_head(&lock->block_q);
1528 return lock;
1529 }
1530
1531 void
cifs_del_lock_waiters(struct cifsLockInfo * lock)1532 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1533 {
1534 struct cifsLockInfo *li, *tmp;
1535 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1536 list_del_init(&li->blist);
1537 wake_up(&li->block_q);
1538 }
1539 }
1540
1541 #define CIFS_LOCK_OP 0
1542 #define CIFS_READ_OP 1
1543 #define CIFS_WRITE_OP 2
1544
1545 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1546 static bool
cifs_find_fid_lock_conflict(struct cifs_fid_locks * fdlocks,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsFileInfo * cfile,struct cifsLockInfo ** conf_lock,int rw_check)1547 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1548 __u64 length, __u8 type, __u16 flags,
1549 struct cifsFileInfo *cfile,
1550 struct cifsLockInfo **conf_lock, int rw_check)
1551 {
1552 struct cifsLockInfo *li;
1553 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1554 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1555
1556 list_for_each_entry(li, &fdlocks->locks, llist) {
1557 if (offset + length <= li->offset ||
1558 offset >= li->offset + li->length)
1559 continue;
1560 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1561 server->ops->compare_fids(cfile, cur_cfile)) {
1562 /* shared lock prevents write op through the same fid */
1563 if (!(li->type & server->vals->shared_lock_type) ||
1564 rw_check != CIFS_WRITE_OP)
1565 continue;
1566 }
1567 if ((type & server->vals->shared_lock_type) &&
1568 ((server->ops->compare_fids(cfile, cur_cfile) &&
1569 current->tgid == li->pid) || type == li->type))
1570 continue;
1571 if (rw_check == CIFS_LOCK_OP &&
1572 (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1573 server->ops->compare_fids(cfile, cur_cfile))
1574 continue;
1575 if (conf_lock)
1576 *conf_lock = li;
1577 return true;
1578 }
1579 return false;
1580 }
1581
1582 bool
cifs_find_lock_conflict(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsLockInfo ** conf_lock,int rw_check)1583 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1584 __u8 type, __u16 flags,
1585 struct cifsLockInfo **conf_lock, int rw_check)
1586 {
1587 bool rc = false;
1588 struct cifs_fid_locks *cur;
1589 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1590
1591 list_for_each_entry(cur, &cinode->llist, llist) {
1592 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1593 flags, cfile, conf_lock,
1594 rw_check);
1595 if (rc)
1596 break;
1597 }
1598
1599 return rc;
1600 }
1601
1602 /*
1603 * Check if there is another lock that prevents us to set the lock (mandatory
1604 * style). If such a lock exists, update the flock structure with its
1605 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1606 * or leave it the same if we can't. Returns 0 if we don't need to request to
1607 * the server or 1 otherwise.
1608 */
1609 static int
cifs_lock_test(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,struct file_lock * flock)1610 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1611 __u8 type, struct file_lock *flock)
1612 {
1613 int rc = 0;
1614 struct cifsLockInfo *conf_lock;
1615 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1616 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1617 bool exist;
1618
1619 down_read(&cinode->lock_sem);
1620
1621 exist = cifs_find_lock_conflict(cfile, offset, length, type,
1622 flock->c.flc_flags, &conf_lock,
1623 CIFS_LOCK_OP);
1624 if (exist) {
1625 flock->fl_start = conf_lock->offset;
1626 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1627 flock->c.flc_pid = conf_lock->pid;
1628 if (conf_lock->type & server->vals->shared_lock_type)
1629 flock->c.flc_type = F_RDLCK;
1630 else
1631 flock->c.flc_type = F_WRLCK;
1632 } else if (!cinode->can_cache_brlcks)
1633 rc = 1;
1634 else
1635 flock->c.flc_type = F_UNLCK;
1636
1637 up_read(&cinode->lock_sem);
1638 return rc;
1639 }
1640
1641 static void
cifs_lock_add(struct cifsFileInfo * cfile,struct cifsLockInfo * lock)1642 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1643 {
1644 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1645 cifs_down_write(&cinode->lock_sem);
1646 list_add_tail(&lock->llist, &cfile->llist->locks);
1647 up_write(&cinode->lock_sem);
1648 }
1649
1650 /*
1651 * Set the byte-range lock (mandatory style). Returns:
1652 * 1) 0, if we set the lock and don't need to request to the server;
1653 * 2) 1, if no locks prevent us but we need to request to the server;
1654 * 3) -EACCES, if there is a lock that prevents us and wait is false.
1655 */
1656 static int
cifs_lock_add_if(struct cifsFileInfo * cfile,struct cifsLockInfo * lock,bool wait)1657 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1658 bool wait)
1659 {
1660 struct cifsLockInfo *conf_lock;
1661 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1662 bool exist;
1663 int rc = 0;
1664
1665 try_again:
1666 exist = false;
1667 cifs_down_write(&cinode->lock_sem);
1668
1669 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1670 lock->type, lock->flags, &conf_lock,
1671 CIFS_LOCK_OP);
1672 if (!exist && cinode->can_cache_brlcks) {
1673 list_add_tail(&lock->llist, &cfile->llist->locks);
1674 up_write(&cinode->lock_sem);
1675 return rc;
1676 }
1677
1678 if (!exist)
1679 rc = 1;
1680 else if (!wait)
1681 rc = -EACCES;
1682 else {
1683 list_add_tail(&lock->blist, &conf_lock->blist);
1684 up_write(&cinode->lock_sem);
1685 rc = wait_event_interruptible(lock->block_q,
1686 (lock->blist.prev == &lock->blist) &&
1687 (lock->blist.next == &lock->blist));
1688 if (!rc)
1689 goto try_again;
1690 cifs_down_write(&cinode->lock_sem);
1691 list_del_init(&lock->blist);
1692 }
1693
1694 up_write(&cinode->lock_sem);
1695 return rc;
1696 }
1697
1698 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1699 /*
1700 * Check if there is another lock that prevents us to set the lock (posix
1701 * style). If such a lock exists, update the flock structure with its
1702 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1703 * or leave it the same if we can't. Returns 0 if we don't need to request to
1704 * the server or 1 otherwise.
1705 */
1706 static int
cifs_posix_lock_test(struct file * file,struct file_lock * flock)1707 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1708 {
1709 int rc = 0;
1710 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1711 unsigned char saved_type = flock->c.flc_type;
1712
1713 if ((flock->c.flc_flags & FL_POSIX) == 0)
1714 return 1;
1715
1716 down_read(&cinode->lock_sem);
1717 posix_test_lock(file, flock);
1718
1719 if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1720 flock->c.flc_type = saved_type;
1721 rc = 1;
1722 }
1723
1724 up_read(&cinode->lock_sem);
1725 return rc;
1726 }
1727
1728 /*
1729 * Set the byte-range lock (posix style). Returns:
1730 * 1) <0, if the error occurs while setting the lock;
1731 * 2) 0, if we set the lock and don't need to request to the server;
1732 * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1733 * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1734 */
1735 static int
cifs_posix_lock_set(struct file * file,struct file_lock * flock)1736 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1737 {
1738 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1739 int rc = FILE_LOCK_DEFERRED + 1;
1740
1741 if ((flock->c.flc_flags & FL_POSIX) == 0)
1742 return rc;
1743
1744 cifs_down_write(&cinode->lock_sem);
1745 if (!cinode->can_cache_brlcks) {
1746 up_write(&cinode->lock_sem);
1747 return rc;
1748 }
1749
1750 rc = posix_lock_file(file, flock, NULL);
1751 up_write(&cinode->lock_sem);
1752 return rc;
1753 }
1754
1755 int
cifs_push_mandatory_locks(struct cifsFileInfo * cfile)1756 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1757 {
1758 unsigned int xid;
1759 int rc = 0, stored_rc;
1760 struct cifsLockInfo *li, *tmp;
1761 struct cifs_tcon *tcon;
1762 unsigned int num, max_num, max_buf;
1763 LOCKING_ANDX_RANGE *buf, *cur;
1764 static const int types[] = {
1765 LOCKING_ANDX_LARGE_FILES,
1766 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1767 };
1768 int i;
1769
1770 xid = get_xid();
1771 tcon = tlink_tcon(cfile->tlink);
1772
1773 /*
1774 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1775 * and check it before using.
1776 */
1777 max_buf = tcon->ses->server->maxBuf;
1778 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1779 free_xid(xid);
1780 return -EINVAL;
1781 }
1782
1783 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1784 PAGE_SIZE);
1785 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1786 PAGE_SIZE);
1787 max_num = (max_buf - sizeof(struct smb_hdr)) /
1788 sizeof(LOCKING_ANDX_RANGE);
1789 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1790 if (!buf) {
1791 free_xid(xid);
1792 return -ENOMEM;
1793 }
1794
1795 for (i = 0; i < 2; i++) {
1796 cur = buf;
1797 num = 0;
1798 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1799 if (li->type != types[i])
1800 continue;
1801 cur->Pid = cpu_to_le16(li->pid);
1802 cur->LengthLow = cpu_to_le32((u32)li->length);
1803 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1804 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1805 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1806 if (++num == max_num) {
1807 stored_rc = cifs_lockv(xid, tcon,
1808 cfile->fid.netfid,
1809 (__u8)li->type, 0, num,
1810 buf);
1811 if (stored_rc)
1812 rc = stored_rc;
1813 cur = buf;
1814 num = 0;
1815 } else
1816 cur++;
1817 }
1818
1819 if (num) {
1820 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1821 (__u8)types[i], 0, num, buf);
1822 if (stored_rc)
1823 rc = stored_rc;
1824 }
1825 }
1826
1827 kfree(buf);
1828 free_xid(xid);
1829 return rc;
1830 }
1831
1832 static __u32
hash_lockowner(fl_owner_t owner)1833 hash_lockowner(fl_owner_t owner)
1834 {
1835 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1836 }
1837 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1838
1839 struct lock_to_push {
1840 struct list_head llist;
1841 __u64 offset;
1842 __u64 length;
1843 __u32 pid;
1844 __u16 netfid;
1845 __u8 type;
1846 };
1847
1848 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1849 static int
cifs_push_posix_locks(struct cifsFileInfo * cfile)1850 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1851 {
1852 struct inode *inode = d_inode(cfile->dentry);
1853 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1854 struct file_lock *flock;
1855 struct file_lock_context *flctx = locks_inode_context(inode);
1856 unsigned int count = 0, i;
1857 int rc = 0, xid, type;
1858 struct list_head locks_to_send, *el;
1859 struct lock_to_push *lck, *tmp;
1860 __u64 length;
1861
1862 xid = get_xid();
1863
1864 if (!flctx)
1865 goto out;
1866
1867 spin_lock(&flctx->flc_lock);
1868 list_for_each(el, &flctx->flc_posix) {
1869 count++;
1870 }
1871 spin_unlock(&flctx->flc_lock);
1872
1873 INIT_LIST_HEAD(&locks_to_send);
1874
1875 /*
1876 * Allocating count locks is enough because no FL_POSIX locks can be
1877 * added to the list while we are holding cinode->lock_sem that
1878 * protects locking operations of this inode.
1879 */
1880 for (i = 0; i < count; i++) {
1881 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1882 if (!lck) {
1883 rc = -ENOMEM;
1884 goto err_out;
1885 }
1886 list_add_tail(&lck->llist, &locks_to_send);
1887 }
1888
1889 el = locks_to_send.next;
1890 spin_lock(&flctx->flc_lock);
1891 for_each_file_lock(flock, &flctx->flc_posix) {
1892 unsigned char ftype = flock->c.flc_type;
1893
1894 if (el == &locks_to_send) {
1895 /*
1896 * The list ended. We don't have enough allocated
1897 * structures - something is really wrong.
1898 */
1899 cifs_dbg(VFS, "Can't push all brlocks!\n");
1900 break;
1901 }
1902 length = cifs_flock_len(flock);
1903 if (ftype == F_RDLCK || ftype == F_SHLCK)
1904 type = CIFS_RDLCK;
1905 else
1906 type = CIFS_WRLCK;
1907 lck = list_entry(el, struct lock_to_push, llist);
1908 lck->pid = hash_lockowner(flock->c.flc_owner);
1909 lck->netfid = cfile->fid.netfid;
1910 lck->length = length;
1911 lck->type = type;
1912 lck->offset = flock->fl_start;
1913 }
1914 spin_unlock(&flctx->flc_lock);
1915
1916 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1917 int stored_rc;
1918
1919 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1920 lck->offset, lck->length, NULL,
1921 lck->type, 0);
1922 if (stored_rc)
1923 rc = stored_rc;
1924 list_del(&lck->llist);
1925 kfree(lck);
1926 }
1927
1928 out:
1929 free_xid(xid);
1930 return rc;
1931 err_out:
1932 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1933 list_del(&lck->llist);
1934 kfree(lck);
1935 }
1936 goto out;
1937 }
1938 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1939
1940 static int
cifs_push_locks(struct cifsFileInfo * cfile)1941 cifs_push_locks(struct cifsFileInfo *cfile)
1942 {
1943 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1944 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1945 int rc = 0;
1946 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1947 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1948 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1949
1950 /* we are going to update can_cache_brlcks here - need a write access */
1951 cifs_down_write(&cinode->lock_sem);
1952 if (!cinode->can_cache_brlcks) {
1953 up_write(&cinode->lock_sem);
1954 return rc;
1955 }
1956
1957 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1958 if (cap_unix(tcon->ses) &&
1959 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1960 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1961 rc = cifs_push_posix_locks(cfile);
1962 else
1963 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1964 rc = tcon->ses->server->ops->push_mand_locks(cfile);
1965
1966 cinode->can_cache_brlcks = false;
1967 up_write(&cinode->lock_sem);
1968 return rc;
1969 }
1970
1971 static void
cifs_read_flock(struct file_lock * flock,__u32 * type,int * lock,int * unlock,bool * wait_flag,struct TCP_Server_Info * server)1972 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1973 bool *wait_flag, struct TCP_Server_Info *server)
1974 {
1975 if (flock->c.flc_flags & FL_POSIX)
1976 cifs_dbg(FYI, "Posix\n");
1977 if (flock->c.flc_flags & FL_FLOCK)
1978 cifs_dbg(FYI, "Flock\n");
1979 if (flock->c.flc_flags & FL_SLEEP) {
1980 cifs_dbg(FYI, "Blocking lock\n");
1981 *wait_flag = true;
1982 }
1983 if (flock->c.flc_flags & FL_ACCESS)
1984 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1985 if (flock->c.flc_flags & FL_LEASE)
1986 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1987 if (flock->c.flc_flags &
1988 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1989 FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
1990 cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
1991 flock->c.flc_flags);
1992
1993 *type = server->vals->large_lock_type;
1994 if (lock_is_write(flock)) {
1995 cifs_dbg(FYI, "F_WRLCK\n");
1996 *type |= server->vals->exclusive_lock_type;
1997 *lock = 1;
1998 } else if (lock_is_unlock(flock)) {
1999 cifs_dbg(FYI, "F_UNLCK\n");
2000 *type |= server->vals->unlock_lock_type;
2001 *unlock = 1;
2002 /* Check if unlock includes more than one lock range */
2003 } else if (lock_is_read(flock)) {
2004 cifs_dbg(FYI, "F_RDLCK\n");
2005 *type |= server->vals->shared_lock_type;
2006 *lock = 1;
2007 } else if (flock->c.flc_type == F_EXLCK) {
2008 cifs_dbg(FYI, "F_EXLCK\n");
2009 *type |= server->vals->exclusive_lock_type;
2010 *lock = 1;
2011 } else if (flock->c.flc_type == F_SHLCK) {
2012 cifs_dbg(FYI, "F_SHLCK\n");
2013 *type |= server->vals->shared_lock_type;
2014 *lock = 1;
2015 } else
2016 cifs_dbg(FYI, "Unknown type of lock\n");
2017 }
2018
2019 static int
cifs_getlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,unsigned int xid)2020 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
2021 bool wait_flag, bool posix_lck, unsigned int xid)
2022 {
2023 int rc = 0;
2024 __u64 length = cifs_flock_len(flock);
2025 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2026 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2027 struct TCP_Server_Info *server = tcon->ses->server;
2028 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2029 __u16 netfid = cfile->fid.netfid;
2030
2031 if (posix_lck) {
2032 int posix_lock_type;
2033
2034 rc = cifs_posix_lock_test(file, flock);
2035 if (!rc)
2036 return rc;
2037
2038 if (type & server->vals->shared_lock_type)
2039 posix_lock_type = CIFS_RDLCK;
2040 else
2041 posix_lock_type = CIFS_WRLCK;
2042 rc = CIFSSMBPosixLock(xid, tcon, netfid,
2043 hash_lockowner(flock->c.flc_owner),
2044 flock->fl_start, length, flock,
2045 posix_lock_type, wait_flag);
2046 return rc;
2047 }
2048 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2049
2050 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
2051 if (!rc)
2052 return rc;
2053
2054 /* BB we could chain these into one lock request BB */
2055 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2056 1, 0, false);
2057 if (rc == 0) {
2058 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2059 type, 0, 1, false);
2060 flock->c.flc_type = F_UNLCK;
2061 if (rc != 0)
2062 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2063 rc);
2064 return 0;
2065 }
2066
2067 if (type & server->vals->shared_lock_type) {
2068 flock->c.flc_type = F_WRLCK;
2069 return 0;
2070 }
2071
2072 type &= ~server->vals->exclusive_lock_type;
2073
2074 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2075 type | server->vals->shared_lock_type,
2076 1, 0, false);
2077 if (rc == 0) {
2078 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2079 type | server->vals->shared_lock_type, 0, 1, false);
2080 flock->c.flc_type = F_RDLCK;
2081 if (rc != 0)
2082 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2083 rc);
2084 } else
2085 flock->c.flc_type = F_WRLCK;
2086
2087 return 0;
2088 }
2089
2090 void
cifs_move_llist(struct list_head * source,struct list_head * dest)2091 cifs_move_llist(struct list_head *source, struct list_head *dest)
2092 {
2093 struct list_head *li, *tmp;
2094 list_for_each_safe(li, tmp, source)
2095 list_move(li, dest);
2096 }
2097
2098 int
cifs_get_hardlink_path(struct cifs_tcon * tcon,struct inode * inode,struct file * file)2099 cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode,
2100 struct file *file)
2101 {
2102 struct cifsFileInfo *open_file = NULL;
2103 struct cifsInodeInfo *cinode = CIFS_I(inode);
2104 int rc = 0;
2105
2106 spin_lock(&tcon->open_file_lock);
2107 spin_lock(&cinode->open_file_lock);
2108
2109 list_for_each_entry(open_file, &cinode->openFileList, flist) {
2110 if (file->f_flags == open_file->f_flags) {
2111 rc = -EINVAL;
2112 break;
2113 }
2114 }
2115
2116 spin_unlock(&cinode->open_file_lock);
2117 spin_unlock(&tcon->open_file_lock);
2118 return rc;
2119 }
2120
2121 void
cifs_free_llist(struct list_head * llist)2122 cifs_free_llist(struct list_head *llist)
2123 {
2124 struct cifsLockInfo *li, *tmp;
2125 list_for_each_entry_safe(li, tmp, llist, llist) {
2126 cifs_del_lock_waiters(li);
2127 list_del(&li->llist);
2128 kfree(li);
2129 }
2130 }
2131
2132 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2133 int
cifs_unlock_range(struct cifsFileInfo * cfile,struct file_lock * flock,unsigned int xid)2134 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2135 unsigned int xid)
2136 {
2137 int rc = 0, stored_rc;
2138 static const int types[] = {
2139 LOCKING_ANDX_LARGE_FILES,
2140 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2141 };
2142 unsigned int i;
2143 unsigned int max_num, num, max_buf;
2144 LOCKING_ANDX_RANGE *buf, *cur;
2145 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2146 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2147 struct cifsLockInfo *li, *tmp;
2148 __u64 length = cifs_flock_len(flock);
2149 LIST_HEAD(tmp_llist);
2150
2151 /*
2152 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2153 * and check it before using.
2154 */
2155 max_buf = tcon->ses->server->maxBuf;
2156 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2157 return -EINVAL;
2158
2159 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2160 PAGE_SIZE);
2161 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2162 PAGE_SIZE);
2163 max_num = (max_buf - sizeof(struct smb_hdr)) /
2164 sizeof(LOCKING_ANDX_RANGE);
2165 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
2166 if (!buf)
2167 return -ENOMEM;
2168
2169 cifs_down_write(&cinode->lock_sem);
2170 for (i = 0; i < 2; i++) {
2171 cur = buf;
2172 num = 0;
2173 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2174 if (flock->fl_start > li->offset ||
2175 (flock->fl_start + length) <
2176 (li->offset + li->length))
2177 continue;
2178 if (current->tgid != li->pid)
2179 continue;
2180 if (types[i] != li->type)
2181 continue;
2182 if (cinode->can_cache_brlcks) {
2183 /*
2184 * We can cache brlock requests - simply remove
2185 * a lock from the file's list.
2186 */
2187 list_del(&li->llist);
2188 cifs_del_lock_waiters(li);
2189 kfree(li);
2190 continue;
2191 }
2192 cur->Pid = cpu_to_le16(li->pid);
2193 cur->LengthLow = cpu_to_le32((u32)li->length);
2194 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2195 cur->OffsetLow = cpu_to_le32((u32)li->offset);
2196 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2197 /*
2198 * We need to save a lock here to let us add it again to
2199 * the file's list if the unlock range request fails on
2200 * the server.
2201 */
2202 list_move(&li->llist, &tmp_llist);
2203 if (++num == max_num) {
2204 stored_rc = cifs_lockv(xid, tcon,
2205 cfile->fid.netfid,
2206 li->type, num, 0, buf);
2207 if (stored_rc) {
2208 /*
2209 * We failed on the unlock range
2210 * request - add all locks from the tmp
2211 * list to the head of the file's list.
2212 */
2213 cifs_move_llist(&tmp_llist,
2214 &cfile->llist->locks);
2215 rc = stored_rc;
2216 } else
2217 /*
2218 * The unlock range request succeed -
2219 * free the tmp list.
2220 */
2221 cifs_free_llist(&tmp_llist);
2222 cur = buf;
2223 num = 0;
2224 } else
2225 cur++;
2226 }
2227 if (num) {
2228 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2229 types[i], num, 0, buf);
2230 if (stored_rc) {
2231 cifs_move_llist(&tmp_llist,
2232 &cfile->llist->locks);
2233 rc = stored_rc;
2234 } else
2235 cifs_free_llist(&tmp_llist);
2236 }
2237 }
2238
2239 up_write(&cinode->lock_sem);
2240 kfree(buf);
2241 return rc;
2242 }
2243 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2244
2245 static int
cifs_setlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,int lock,int unlock,unsigned int xid)2246 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2247 bool wait_flag, bool posix_lck, int lock, int unlock,
2248 unsigned int xid)
2249 {
2250 int rc = 0;
2251 __u64 length = cifs_flock_len(flock);
2252 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2253 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2254 struct TCP_Server_Info *server = tcon->ses->server;
2255 struct inode *inode = d_inode(cfile->dentry);
2256
2257 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2258 if (posix_lck) {
2259 int posix_lock_type;
2260
2261 rc = cifs_posix_lock_set(file, flock);
2262 if (rc <= FILE_LOCK_DEFERRED)
2263 return rc;
2264
2265 if (type & server->vals->shared_lock_type)
2266 posix_lock_type = CIFS_RDLCK;
2267 else
2268 posix_lock_type = CIFS_WRLCK;
2269
2270 if (unlock == 1)
2271 posix_lock_type = CIFS_UNLCK;
2272
2273 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2274 hash_lockowner(flock->c.flc_owner),
2275 flock->fl_start, length,
2276 NULL, posix_lock_type, wait_flag);
2277 goto out;
2278 }
2279 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2280 if (lock) {
2281 struct cifsLockInfo *lock;
2282
2283 lock = cifs_lock_init(flock->fl_start, length, type,
2284 flock->c.flc_flags);
2285 if (!lock)
2286 return -ENOMEM;
2287
2288 rc = cifs_lock_add_if(cfile, lock, wait_flag);
2289 if (rc < 0) {
2290 kfree(lock);
2291 return rc;
2292 }
2293 if (!rc)
2294 goto out;
2295
2296 /*
2297 * Windows 7 server can delay breaking lease from read to None
2298 * if we set a byte-range lock on a file - break it explicitly
2299 * before sending the lock to the server to be sure the next
2300 * read won't conflict with non-overlapted locks due to
2301 * pagereading.
2302 */
2303 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2304 CIFS_CACHE_READ(CIFS_I(inode))) {
2305 cifs_zap_mapping(inode);
2306 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2307 inode);
2308 CIFS_I(inode)->oplock = 0;
2309 }
2310
2311 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2312 type, 1, 0, wait_flag);
2313 if (rc) {
2314 kfree(lock);
2315 return rc;
2316 }
2317
2318 cifs_lock_add(cfile, lock);
2319 } else if (unlock)
2320 rc = server->ops->mand_unlock_range(cfile, flock, xid);
2321
2322 out:
2323 if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2324 /*
2325 * If this is a request to remove all locks because we
2326 * are closing the file, it doesn't matter if the
2327 * unlocking failed as both cifs.ko and the SMB server
2328 * remove the lock on file close
2329 */
2330 if (rc) {
2331 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2332 if (!(flock->c.flc_flags & FL_CLOSE))
2333 return rc;
2334 }
2335 rc = locks_lock_file_wait(file, flock);
2336 }
2337 return rc;
2338 }
2339
cifs_flock(struct file * file,int cmd,struct file_lock * fl)2340 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2341 {
2342 int rc, xid;
2343 int lock = 0, unlock = 0;
2344 bool wait_flag = false;
2345 bool posix_lck = false;
2346 struct cifs_sb_info *cifs_sb;
2347 struct cifs_tcon *tcon;
2348 struct cifsFileInfo *cfile;
2349 __u32 type;
2350
2351 xid = get_xid();
2352
2353 if (!(fl->c.flc_flags & FL_FLOCK)) {
2354 rc = -ENOLCK;
2355 free_xid(xid);
2356 return rc;
2357 }
2358
2359 cfile = (struct cifsFileInfo *)file->private_data;
2360 tcon = tlink_tcon(cfile->tlink);
2361
2362 cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2363 tcon->ses->server);
2364 cifs_sb = CIFS_FILE_SB(file);
2365
2366 if (cap_unix(tcon->ses) &&
2367 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2368 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2369 posix_lck = true;
2370
2371 if (!lock && !unlock) {
2372 /*
2373 * if no lock or unlock then nothing to do since we do not
2374 * know what it is
2375 */
2376 rc = -EOPNOTSUPP;
2377 free_xid(xid);
2378 return rc;
2379 }
2380
2381 rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2382 xid);
2383 free_xid(xid);
2384 return rc;
2385
2386
2387 }
2388
cifs_lock(struct file * file,int cmd,struct file_lock * flock)2389 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2390 {
2391 int rc, xid;
2392 int lock = 0, unlock = 0;
2393 bool wait_flag = false;
2394 bool posix_lck = false;
2395 struct cifs_sb_info *cifs_sb;
2396 struct cifs_tcon *tcon;
2397 struct cifsFileInfo *cfile;
2398 __u32 type;
2399
2400 rc = -EACCES;
2401 xid = get_xid();
2402
2403 cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2404 flock->c.flc_flags, flock->c.flc_type,
2405 (long long)flock->fl_start,
2406 (long long)flock->fl_end);
2407
2408 cfile = (struct cifsFileInfo *)file->private_data;
2409 tcon = tlink_tcon(cfile->tlink);
2410
2411 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2412 tcon->ses->server);
2413 cifs_sb = CIFS_FILE_SB(file);
2414 set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2415
2416 if (cap_unix(tcon->ses) &&
2417 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2418 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2419 posix_lck = true;
2420 /*
2421 * BB add code here to normalize offset and length to account for
2422 * negative length which we can not accept over the wire.
2423 */
2424 if (IS_GETLK(cmd)) {
2425 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2426 free_xid(xid);
2427 return rc;
2428 }
2429
2430 if (!lock && !unlock) {
2431 /*
2432 * if no lock or unlock then nothing to do since we do not
2433 * know what it is
2434 */
2435 free_xid(xid);
2436 return -EOPNOTSUPP;
2437 }
2438
2439 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2440 xid);
2441 free_xid(xid);
2442 return rc;
2443 }
2444
cifs_write_subrequest_terminated(struct cifs_io_subrequest * wdata,ssize_t result)2445 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result)
2446 {
2447 struct netfs_io_request *wreq = wdata->rreq;
2448 struct netfs_inode *ictx = netfs_inode(wreq->inode);
2449 loff_t wrend;
2450
2451 if (result > 0) {
2452 wrend = wdata->subreq.start + wdata->subreq.transferred + result;
2453
2454 if (wrend > ictx->zero_point &&
2455 (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE ||
2456 wdata->rreq->origin == NETFS_DIO_WRITE))
2457 ictx->zero_point = wrend;
2458 if (wrend > ictx->remote_i_size)
2459 netfs_resize_file(ictx, wrend, true);
2460 }
2461
2462 netfs_write_subrequest_terminated(&wdata->subreq, result);
2463 }
2464
find_readable_file(struct cifsInodeInfo * cifs_inode,bool fsuid_only)2465 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
2466 bool fsuid_only)
2467 {
2468 struct cifsFileInfo *open_file = NULL;
2469 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2470
2471 /* only filter by fsuid on multiuser mounts */
2472 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2473 fsuid_only = false;
2474
2475 spin_lock(&cifs_inode->open_file_lock);
2476 /* we could simply get the first_list_entry since write-only entries
2477 are always at the end of the list but since the first entry might
2478 have a close pending, we go through the whole list */
2479 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2480 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2481 continue;
2482 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2483 if ((!open_file->invalidHandle)) {
2484 /* found a good file */
2485 /* lock it so it will not be closed on us */
2486 cifsFileInfo_get(open_file);
2487 spin_unlock(&cifs_inode->open_file_lock);
2488 return open_file;
2489 } /* else might as well continue, and look for
2490 another, or simply have the caller reopen it
2491 again rather than trying to fix this handle */
2492 } else /* write only file */
2493 break; /* write only files are last so must be done */
2494 }
2495 spin_unlock(&cifs_inode->open_file_lock);
2496 return NULL;
2497 }
2498
2499 /* Return -EBADF if no handle is found and general rc otherwise */
2500 int
cifs_get_writable_file(struct cifsInodeInfo * cifs_inode,int flags,struct cifsFileInfo ** ret_file)2501 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
2502 struct cifsFileInfo **ret_file)
2503 {
2504 struct cifsFileInfo *open_file, *inv_file = NULL;
2505 struct cifs_sb_info *cifs_sb;
2506 bool any_available = false;
2507 int rc = -EBADF;
2508 unsigned int refind = 0;
2509 bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2510 bool with_delete = flags & FIND_WR_WITH_DELETE;
2511 *ret_file = NULL;
2512
2513 /*
2514 * Having a null inode here (because mapping->host was set to zero by
2515 * the VFS or MM) should not happen but we had reports of on oops (due
2516 * to it being zero) during stress testcases so we need to check for it
2517 */
2518
2519 if (cifs_inode == NULL) {
2520 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2521 dump_stack();
2522 return rc;
2523 }
2524
2525 cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2526
2527 /* only filter by fsuid on multiuser mounts */
2528 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2529 fsuid_only = false;
2530
2531 spin_lock(&cifs_inode->open_file_lock);
2532 refind_writable:
2533 if (refind > MAX_REOPEN_ATT) {
2534 spin_unlock(&cifs_inode->open_file_lock);
2535 return rc;
2536 }
2537 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2538 if (!any_available && open_file->pid != current->tgid)
2539 continue;
2540 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2541 continue;
2542 if (with_delete && !(open_file->fid.access & DELETE))
2543 continue;
2544 if ((flags & FIND_WR_NO_PENDING_DELETE) &&
2545 open_file->status_file_deleted)
2546 continue;
2547 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2548 if (!open_file->invalidHandle) {
2549 /* found a good writable file */
2550 cifsFileInfo_get(open_file);
2551 spin_unlock(&cifs_inode->open_file_lock);
2552 *ret_file = open_file;
2553 return 0;
2554 } else {
2555 if (!inv_file)
2556 inv_file = open_file;
2557 }
2558 }
2559 }
2560 /* couldn't find usable FH with same pid, try any available */
2561 if (!any_available) {
2562 any_available = true;
2563 goto refind_writable;
2564 }
2565
2566 if (inv_file) {
2567 any_available = false;
2568 cifsFileInfo_get(inv_file);
2569 }
2570
2571 spin_unlock(&cifs_inode->open_file_lock);
2572
2573 if (inv_file) {
2574 rc = cifs_reopen_file(inv_file, false);
2575 if (!rc) {
2576 *ret_file = inv_file;
2577 return 0;
2578 }
2579
2580 spin_lock(&cifs_inode->open_file_lock);
2581 list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2582 spin_unlock(&cifs_inode->open_file_lock);
2583 cifsFileInfo_put(inv_file);
2584 ++refind;
2585 inv_file = NULL;
2586 spin_lock(&cifs_inode->open_file_lock);
2587 goto refind_writable;
2588 }
2589
2590 return rc;
2591 }
2592
2593 struct cifsFileInfo *
find_writable_file(struct cifsInodeInfo * cifs_inode,int flags)2594 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2595 {
2596 struct cifsFileInfo *cfile;
2597 int rc;
2598
2599 rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2600 if (rc)
2601 cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2602
2603 return cfile;
2604 }
2605
2606 int
cifs_get_writable_path(struct cifs_tcon * tcon,const char * name,int flags,struct cifsFileInfo ** ret_file)2607 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2608 int flags,
2609 struct cifsFileInfo **ret_file)
2610 {
2611 struct cifsFileInfo *cfile;
2612 void *page = alloc_dentry_path();
2613
2614 *ret_file = NULL;
2615
2616 spin_lock(&tcon->open_file_lock);
2617 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2618 struct cifsInodeInfo *cinode;
2619 const char *full_path = build_path_from_dentry(cfile->dentry, page);
2620 if (IS_ERR(full_path)) {
2621 spin_unlock(&tcon->open_file_lock);
2622 free_dentry_path(page);
2623 return PTR_ERR(full_path);
2624 }
2625 if (strcmp(full_path, name))
2626 continue;
2627
2628 cinode = CIFS_I(d_inode(cfile->dentry));
2629 spin_unlock(&tcon->open_file_lock);
2630 free_dentry_path(page);
2631 return cifs_get_writable_file(cinode, flags, ret_file);
2632 }
2633
2634 spin_unlock(&tcon->open_file_lock);
2635 free_dentry_path(page);
2636 return -ENOENT;
2637 }
2638
2639 int
cifs_get_readable_path(struct cifs_tcon * tcon,const char * name,struct cifsFileInfo ** ret_file)2640 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2641 struct cifsFileInfo **ret_file)
2642 {
2643 struct cifsFileInfo *cfile;
2644 void *page = alloc_dentry_path();
2645
2646 *ret_file = NULL;
2647
2648 spin_lock(&tcon->open_file_lock);
2649 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2650 struct cifsInodeInfo *cinode;
2651 const char *full_path = build_path_from_dentry(cfile->dentry, page);
2652 if (IS_ERR(full_path)) {
2653 spin_unlock(&tcon->open_file_lock);
2654 free_dentry_path(page);
2655 return PTR_ERR(full_path);
2656 }
2657 if (strcmp(full_path, name))
2658 continue;
2659
2660 cinode = CIFS_I(d_inode(cfile->dentry));
2661 spin_unlock(&tcon->open_file_lock);
2662 free_dentry_path(page);
2663 *ret_file = find_readable_file(cinode, 0);
2664 if (*ret_file) {
2665 spin_lock(&cinode->open_file_lock);
2666 if ((*ret_file)->status_file_deleted) {
2667 spin_unlock(&cinode->open_file_lock);
2668 cifsFileInfo_put(*ret_file);
2669 *ret_file = NULL;
2670 } else {
2671 spin_unlock(&cinode->open_file_lock);
2672 }
2673 }
2674 return *ret_file ? 0 : -ENOENT;
2675 }
2676
2677 spin_unlock(&tcon->open_file_lock);
2678 free_dentry_path(page);
2679 return -ENOENT;
2680 }
2681
2682 /*
2683 * Flush data on a strict file.
2684 */
cifs_strict_fsync(struct file * file,loff_t start,loff_t end,int datasync)2685 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2686 int datasync)
2687 {
2688 unsigned int xid;
2689 int rc = 0;
2690 struct cifs_tcon *tcon;
2691 struct TCP_Server_Info *server;
2692 struct cifsFileInfo *smbfile = file->private_data;
2693 struct inode *inode = file_inode(file);
2694 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2695
2696 rc = file_write_and_wait_range(file, start, end);
2697 if (rc) {
2698 trace_cifs_fsync_err(inode->i_ino, rc);
2699 return rc;
2700 }
2701
2702 xid = get_xid();
2703
2704 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2705 file, datasync);
2706
2707 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2708 rc = cifs_zap_mapping(inode);
2709 if (rc) {
2710 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2711 rc = 0; /* don't care about it in fsync */
2712 }
2713 }
2714
2715 tcon = tlink_tcon(smbfile->tlink);
2716 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2717 server = tcon->ses->server;
2718 if (server->ops->flush == NULL) {
2719 rc = -ENOSYS;
2720 goto strict_fsync_exit;
2721 }
2722
2723 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2724 smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2725 if (smbfile) {
2726 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2727 cifsFileInfo_put(smbfile);
2728 } else
2729 cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2730 } else
2731 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2732 }
2733
2734 strict_fsync_exit:
2735 free_xid(xid);
2736 return rc;
2737 }
2738
2739 /*
2740 * Flush data on a non-strict data.
2741 */
cifs_fsync(struct file * file,loff_t start,loff_t end,int datasync)2742 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2743 {
2744 unsigned int xid;
2745 int rc = 0;
2746 struct cifs_tcon *tcon;
2747 struct TCP_Server_Info *server;
2748 struct cifsFileInfo *smbfile = file->private_data;
2749 struct inode *inode = file_inode(file);
2750 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2751
2752 rc = file_write_and_wait_range(file, start, end);
2753 if (rc) {
2754 trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2755 return rc;
2756 }
2757
2758 xid = get_xid();
2759
2760 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2761 file, datasync);
2762
2763 tcon = tlink_tcon(smbfile->tlink);
2764 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2765 server = tcon->ses->server;
2766 if (server->ops->flush == NULL) {
2767 rc = -ENOSYS;
2768 goto fsync_exit;
2769 }
2770
2771 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2772 smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2773 if (smbfile) {
2774 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2775 cifsFileInfo_put(smbfile);
2776 } else
2777 cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2778 } else
2779 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2780 }
2781
2782 fsync_exit:
2783 free_xid(xid);
2784 return rc;
2785 }
2786
2787 /*
2788 * As file closes, flush all cached write data for this inode checking
2789 * for write behind errors.
2790 */
cifs_flush(struct file * file,fl_owner_t id)2791 int cifs_flush(struct file *file, fl_owner_t id)
2792 {
2793 struct inode *inode = file_inode(file);
2794 int rc = 0;
2795
2796 if (file->f_mode & FMODE_WRITE)
2797 rc = filemap_write_and_wait(inode->i_mapping);
2798
2799 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2800 if (rc) {
2801 /* get more nuanced writeback errors */
2802 rc = filemap_check_wb_err(file->f_mapping, 0);
2803 trace_cifs_flush_err(inode->i_ino, rc);
2804 }
2805 return rc;
2806 }
2807
2808 static ssize_t
cifs_writev(struct kiocb * iocb,struct iov_iter * from)2809 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2810 {
2811 struct file *file = iocb->ki_filp;
2812 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2813 struct inode *inode = file->f_mapping->host;
2814 struct cifsInodeInfo *cinode = CIFS_I(inode);
2815 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2816 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2817 ssize_t rc;
2818
2819 rc = netfs_start_io_write(inode);
2820 if (rc < 0)
2821 return rc;
2822
2823 /*
2824 * We need to hold the sem to be sure nobody modifies lock list
2825 * with a brlock that prevents writing.
2826 */
2827 down_read(&cinode->lock_sem);
2828
2829 rc = generic_write_checks(iocb, from);
2830 if (rc <= 0)
2831 goto out;
2832
2833 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) &&
2834 (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2835 server->vals->exclusive_lock_type, 0,
2836 NULL, CIFS_WRITE_OP))) {
2837 rc = -EACCES;
2838 goto out;
2839 }
2840
2841 rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2842
2843 out:
2844 up_read(&cinode->lock_sem);
2845 netfs_end_io_write(inode);
2846 if (rc > 0)
2847 rc = generic_write_sync(iocb, rc);
2848 return rc;
2849 }
2850
2851 ssize_t
cifs_strict_writev(struct kiocb * iocb,struct iov_iter * from)2852 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2853 {
2854 struct inode *inode = file_inode(iocb->ki_filp);
2855 struct cifsInodeInfo *cinode = CIFS_I(inode);
2856 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2857 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2858 iocb->ki_filp->private_data;
2859 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2860 ssize_t written;
2861
2862 written = cifs_get_writer(cinode);
2863 if (written)
2864 return written;
2865
2866 if (CIFS_CACHE_WRITE(cinode)) {
2867 if (cap_unix(tcon->ses) &&
2868 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2869 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2870 written = netfs_file_write_iter(iocb, from);
2871 goto out;
2872 }
2873 written = cifs_writev(iocb, from);
2874 goto out;
2875 }
2876 /*
2877 * For non-oplocked files in strict cache mode we need to write the data
2878 * to the server exactly from the pos to pos+len-1 rather than flush all
2879 * affected pages because it may cause a error with mandatory locks on
2880 * these pages but not on the region from pos to ppos+len-1.
2881 */
2882 written = netfs_file_write_iter(iocb, from);
2883 if (CIFS_CACHE_READ(cinode)) {
2884 /*
2885 * We have read level caching and we have just sent a write
2886 * request to the server thus making data in the cache stale.
2887 * Zap the cache and set oplock/lease level to NONE to avoid
2888 * reading stale data from the cache. All subsequent read
2889 * operations will read new data from the server.
2890 */
2891 cifs_zap_mapping(inode);
2892 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2893 inode);
2894 cinode->oplock = 0;
2895 }
2896 out:
2897 cifs_put_writer(cinode);
2898 return written;
2899 }
2900
cifs_loose_read_iter(struct kiocb * iocb,struct iov_iter * iter)2901 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2902 {
2903 ssize_t rc;
2904 struct inode *inode = file_inode(iocb->ki_filp);
2905
2906 if (iocb->ki_flags & IOCB_DIRECT)
2907 return netfs_unbuffered_read_iter(iocb, iter);
2908
2909 rc = cifs_revalidate_mapping(inode);
2910 if (rc)
2911 return rc;
2912
2913 return netfs_file_read_iter(iocb, iter);
2914 }
2915
cifs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)2916 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2917 {
2918 struct inode *inode = file_inode(iocb->ki_filp);
2919 struct cifsInodeInfo *cinode = CIFS_I(inode);
2920 ssize_t written;
2921 int rc;
2922
2923 if (iocb->ki_filp->f_flags & O_DIRECT) {
2924 written = netfs_unbuffered_write_iter(iocb, from);
2925 if (written > 0 && CIFS_CACHE_READ(cinode)) {
2926 cifs_zap_mapping(inode);
2927 cifs_dbg(FYI,
2928 "Set no oplock for inode=%p after a write operation\n",
2929 inode);
2930 cinode->oplock = 0;
2931 }
2932 return written;
2933 }
2934
2935 written = cifs_get_writer(cinode);
2936 if (written)
2937 return written;
2938
2939 written = netfs_file_write_iter(iocb, from);
2940
2941 if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
2942 rc = filemap_fdatawrite(inode->i_mapping);
2943 if (rc)
2944 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
2945 rc, inode);
2946 }
2947
2948 cifs_put_writer(cinode);
2949 return written;
2950 }
2951
2952 ssize_t
cifs_strict_readv(struct kiocb * iocb,struct iov_iter * to)2953 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
2954 {
2955 struct inode *inode = file_inode(iocb->ki_filp);
2956 struct cifsInodeInfo *cinode = CIFS_I(inode);
2957 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2958 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2959 iocb->ki_filp->private_data;
2960 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2961 int rc = -EACCES;
2962
2963 /*
2964 * In strict cache mode we need to read from the server all the time
2965 * if we don't have level II oplock because the server can delay mtime
2966 * change - so we can't make a decision about inode invalidating.
2967 * And we can also fail with pagereading if there are mandatory locks
2968 * on pages affected by this read but not on the region from pos to
2969 * pos+len-1.
2970 */
2971 if (!CIFS_CACHE_READ(cinode))
2972 return netfs_unbuffered_read_iter(iocb, to);
2973
2974 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) {
2975 if (iocb->ki_flags & IOCB_DIRECT)
2976 return netfs_unbuffered_read_iter(iocb, to);
2977 return netfs_buffered_read_iter(iocb, to);
2978 }
2979
2980 /*
2981 * We need to hold the sem to be sure nobody modifies lock list
2982 * with a brlock that prevents reading.
2983 */
2984 if (iocb->ki_flags & IOCB_DIRECT) {
2985 rc = netfs_start_io_direct(inode);
2986 if (rc < 0)
2987 goto out;
2988 rc = -EACCES;
2989 down_read(&cinode->lock_sem);
2990 if (!cifs_find_lock_conflict(
2991 cfile, iocb->ki_pos, iov_iter_count(to),
2992 tcon->ses->server->vals->shared_lock_type,
2993 0, NULL, CIFS_READ_OP))
2994 rc = netfs_unbuffered_read_iter_locked(iocb, to);
2995 up_read(&cinode->lock_sem);
2996 netfs_end_io_direct(inode);
2997 } else {
2998 rc = netfs_start_io_read(inode);
2999 if (rc < 0)
3000 goto out;
3001 rc = -EACCES;
3002 down_read(&cinode->lock_sem);
3003 if (!cifs_find_lock_conflict(
3004 cfile, iocb->ki_pos, iov_iter_count(to),
3005 tcon->ses->server->vals->shared_lock_type,
3006 0, NULL, CIFS_READ_OP))
3007 rc = filemap_read(iocb, to, 0);
3008 up_read(&cinode->lock_sem);
3009 netfs_end_io_read(inode);
3010 }
3011 out:
3012 return rc;
3013 }
3014
cifs_page_mkwrite(struct vm_fault * vmf)3015 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
3016 {
3017 return netfs_page_mkwrite(vmf, NULL);
3018 }
3019
3020 static const struct vm_operations_struct cifs_file_vm_ops = {
3021 .fault = filemap_fault,
3022 .map_pages = filemap_map_pages,
3023 .page_mkwrite = cifs_page_mkwrite,
3024 };
3025
cifs_file_strict_mmap_prepare(struct vm_area_desc * desc)3026 int cifs_file_strict_mmap_prepare(struct vm_area_desc *desc)
3027 {
3028 int xid, rc = 0;
3029 struct inode *inode = file_inode(desc->file);
3030
3031 xid = get_xid();
3032
3033 if (!CIFS_CACHE_READ(CIFS_I(inode)))
3034 rc = cifs_zap_mapping(inode);
3035 if (!rc)
3036 rc = generic_file_mmap_prepare(desc);
3037 if (!rc)
3038 desc->vm_ops = &cifs_file_vm_ops;
3039
3040 free_xid(xid);
3041 return rc;
3042 }
3043
cifs_file_mmap_prepare(struct vm_area_desc * desc)3044 int cifs_file_mmap_prepare(struct vm_area_desc *desc)
3045 {
3046 int rc, xid;
3047
3048 xid = get_xid();
3049
3050 rc = cifs_revalidate_file(desc->file);
3051 if (rc)
3052 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3053 rc);
3054 if (!rc)
3055 rc = generic_file_mmap_prepare(desc);
3056 if (!rc)
3057 desc->vm_ops = &cifs_file_vm_ops;
3058
3059 free_xid(xid);
3060 return rc;
3061 }
3062
is_inode_writable(struct cifsInodeInfo * cifs_inode)3063 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3064 {
3065 struct cifsFileInfo *open_file;
3066
3067 spin_lock(&cifs_inode->open_file_lock);
3068 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3069 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3070 spin_unlock(&cifs_inode->open_file_lock);
3071 return 1;
3072 }
3073 }
3074 spin_unlock(&cifs_inode->open_file_lock);
3075 return 0;
3076 }
3077
3078 /* We do not want to update the file size from server for inodes
3079 open for write - to avoid races with writepage extending
3080 the file - in the future we could consider allowing
3081 refreshing the inode only on increases in the file size
3082 but this is tricky to do without racing with writebehind
3083 page caching in the current Linux kernel design */
is_size_safe_to_change(struct cifsInodeInfo * cifsInode,__u64 end_of_file,bool from_readdir)3084 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3085 bool from_readdir)
3086 {
3087 if (!cifsInode)
3088 return true;
3089
3090 if (is_inode_writable(cifsInode) ||
3091 ((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
3092 /* This inode is open for write at least once */
3093 struct cifs_sb_info *cifs_sb;
3094
3095 cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
3096 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
3097 /* since no page cache to corrupt on directio
3098 we can change size safely */
3099 return true;
3100 }
3101
3102 if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
3103 return true;
3104
3105 return false;
3106 } else
3107 return true;
3108 }
3109
cifs_oplock_break(struct work_struct * work)3110 void cifs_oplock_break(struct work_struct *work)
3111 {
3112 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3113 oplock_break);
3114 struct inode *inode = d_inode(cfile->dentry);
3115 struct super_block *sb = inode->i_sb;
3116 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
3117 struct cifsInodeInfo *cinode = CIFS_I(inode);
3118 struct cifs_tcon *tcon;
3119 struct TCP_Server_Info *server;
3120 struct tcon_link *tlink;
3121 int rc = 0;
3122 bool purge_cache = false, oplock_break_cancelled;
3123 __u64 persistent_fid, volatile_fid;
3124 __u16 net_fid;
3125
3126 /*
3127 * Hold a reference to the superblock to prevent it and its inodes from
3128 * being freed while we are accessing cinode. Otherwise, _cifsFileInfo_put()
3129 * may release the last reference to the sb and trigger inode eviction.
3130 */
3131 cifs_sb_active(sb);
3132 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3133 TASK_UNINTERRUPTIBLE);
3134
3135 tlink = cifs_sb_tlink(cifs_sb);
3136 if (IS_ERR(tlink))
3137 goto out;
3138 tcon = tlink_tcon(tlink);
3139 server = tcon->ses->server;
3140
3141 server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3142 cfile->oplock_epoch, &purge_cache);
3143
3144 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
3145 cifs_has_mand_locks(cinode)) {
3146 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3147 inode);
3148 cinode->oplock = 0;
3149 }
3150
3151 if (S_ISREG(inode->i_mode)) {
3152 if (CIFS_CACHE_READ(cinode))
3153 break_lease(inode, O_RDONLY);
3154 else
3155 break_lease(inode, O_WRONLY);
3156 rc = filemap_fdatawrite(inode->i_mapping);
3157 if (!CIFS_CACHE_READ(cinode) || purge_cache) {
3158 rc = filemap_fdatawait(inode->i_mapping);
3159 mapping_set_error(inode->i_mapping, rc);
3160 cifs_zap_mapping(inode);
3161 }
3162 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3163 if (CIFS_CACHE_WRITE(cinode))
3164 goto oplock_break_ack;
3165 }
3166
3167 rc = cifs_push_locks(cfile);
3168 if (rc)
3169 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3170
3171 oplock_break_ack:
3172 /*
3173 * When oplock break is received and there are no active
3174 * file handles but cached, then schedule deferred close immediately.
3175 * So, new open will not use cached handle.
3176 */
3177
3178 if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
3179 cifs_close_deferred_file(cinode);
3180
3181 persistent_fid = cfile->fid.persistent_fid;
3182 volatile_fid = cfile->fid.volatile_fid;
3183 net_fid = cfile->fid.netfid;
3184 oplock_break_cancelled = cfile->oplock_break_cancelled;
3185
3186 _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3187 /*
3188 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3189 * an acknowledgment to be sent when the file has already been closed.
3190 */
3191 spin_lock(&cinode->open_file_lock);
3192 /* check list empty since can race with kill_sb calling tree disconnect */
3193 if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3194 spin_unlock(&cinode->open_file_lock);
3195 rc = server->ops->oplock_response(tcon, persistent_fid,
3196 volatile_fid, net_fid, cinode);
3197 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3198 } else
3199 spin_unlock(&cinode->open_file_lock);
3200
3201 cifs_put_tlink(tlink);
3202 out:
3203 cifs_done_oplock_break(cinode);
3204 cifs_sb_deactive(sb);
3205 }
3206
cifs_swap_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)3207 static int cifs_swap_activate(struct swap_info_struct *sis,
3208 struct file *swap_file, sector_t *span)
3209 {
3210 struct cifsFileInfo *cfile = swap_file->private_data;
3211 struct inode *inode = swap_file->f_mapping->host;
3212 unsigned long blocks;
3213 long long isize;
3214
3215 cifs_dbg(FYI, "swap activate\n");
3216
3217 if (!swap_file->f_mapping->a_ops->swap_rw)
3218 /* Cannot support swap */
3219 return -EINVAL;
3220
3221 spin_lock(&inode->i_lock);
3222 blocks = inode->i_blocks;
3223 isize = inode->i_size;
3224 spin_unlock(&inode->i_lock);
3225 if (blocks*512 < isize) {
3226 pr_warn("swap activate: swapfile has holes\n");
3227 return -EINVAL;
3228 }
3229 *span = sis->pages;
3230
3231 pr_warn_once("Swap support over SMB3 is experimental\n");
3232
3233 /*
3234 * TODO: consider adding ACL (or documenting how) to prevent other
3235 * users (on this or other systems) from reading it
3236 */
3237
3238
3239 /* TODO: add sk_set_memalloc(inet) or similar */
3240
3241 if (cfile)
3242 cfile->swapfile = true;
3243 /*
3244 * TODO: Since file already open, we can't open with DENY_ALL here
3245 * but we could add call to grab a byte range lock to prevent others
3246 * from reading or writing the file
3247 */
3248
3249 sis->flags |= SWP_FS_OPS;
3250 return add_swap_extent(sis, 0, sis->max, 0);
3251 }
3252
cifs_swap_deactivate(struct file * file)3253 static void cifs_swap_deactivate(struct file *file)
3254 {
3255 struct cifsFileInfo *cfile = file->private_data;
3256
3257 cifs_dbg(FYI, "swap deactivate\n");
3258
3259 /* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3260
3261 if (cfile)
3262 cfile->swapfile = false;
3263
3264 /* do we need to unpin (or unlock) the file */
3265 }
3266
3267 /**
3268 * cifs_swap_rw - SMB3 address space operation for swap I/O
3269 * @iocb: target I/O control block
3270 * @iter: I/O buffer
3271 *
3272 * Perform IO to the swap-file. This is much like direct IO.
3273 */
cifs_swap_rw(struct kiocb * iocb,struct iov_iter * iter)3274 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3275 {
3276 ssize_t ret;
3277
3278 if (iov_iter_rw(iter) == READ)
3279 ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3280 else
3281 ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3282 if (ret < 0)
3283 return ret;
3284 return 0;
3285 }
3286
3287 const struct address_space_operations cifs_addr_ops = {
3288 .read_folio = netfs_read_folio,
3289 .readahead = netfs_readahead,
3290 .writepages = netfs_writepages,
3291 .dirty_folio = netfs_dirty_folio,
3292 .release_folio = netfs_release_folio,
3293 .direct_IO = noop_direct_IO,
3294 .invalidate_folio = netfs_invalidate_folio,
3295 .migrate_folio = filemap_migrate_folio,
3296 /*
3297 * TODO: investigate and if useful we could add an is_dirty_writeback
3298 * helper if needed
3299 */
3300 .swap_activate = cifs_swap_activate,
3301 .swap_deactivate = cifs_swap_deactivate,
3302 .swap_rw = cifs_swap_rw,
3303 };
3304
3305 /*
3306 * cifs_readahead requires the server to support a buffer large enough to
3307 * contain the header plus one complete page of data. Otherwise, we need
3308 * to leave cifs_readahead out of the address space operations.
3309 */
3310 const struct address_space_operations cifs_addr_ops_smallbuf = {
3311 .read_folio = netfs_read_folio,
3312 .writepages = netfs_writepages,
3313 .dirty_folio = netfs_dirty_folio,
3314 .release_folio = netfs_release_folio,
3315 .invalidate_folio = netfs_invalidate_folio,
3316 .migrate_folio = filemap_migrate_folio,
3317 };
3318