1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * SMB2 version specific operations
4 *
5 * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
6 */
7
8 #include <linux/pagemap.h>
9 #include <linux/vfs.h>
10 #include <linux/falloc.h>
11 #include <linux/scatterlist.h>
12 #include <linux/uuid.h>
13 #include <linux/sort.h>
14 #include <crypto/aead.h>
15 #include <linux/fiemap.h>
16 #include <linux/folio_queue.h>
17 #include <uapi/linux/magic.h>
18 #include "cifsfs.h"
19 #include "cifsglob.h"
20 #include "smb2pdu.h"
21 #include "smb2proto.h"
22 #include "cifsproto.h"
23 #include "cifs_debug.h"
24 #include "cifs_unicode.h"
25 #include "../common/smb2status.h"
26 #include "smb2glob.h"
27 #include "cifs_ioctl.h"
28 #include "smbdirect.h"
29 #include "fscache.h"
30 #include "fs_context.h"
31 #include "cached_dir.h"
32 #include "reparse.h"
33
34 /* Change credits for different ops and return the total number of credits */
35 static int
change_conf(struct TCP_Server_Info * server)36 change_conf(struct TCP_Server_Info *server)
37 {
38 server->credits += server->echo_credits + server->oplock_credits;
39 if (server->credits > server->max_credits)
40 server->credits = server->max_credits;
41 server->oplock_credits = server->echo_credits = 0;
42 switch (server->credits) {
43 case 0:
44 return 0;
45 case 1:
46 server->echoes = false;
47 server->oplocks = false;
48 break;
49 case 2:
50 server->echoes = true;
51 server->oplocks = false;
52 server->echo_credits = 1;
53 break;
54 default:
55 server->echoes = true;
56 if (enable_oplocks) {
57 server->oplocks = true;
58 server->oplock_credits = 1;
59 } else
60 server->oplocks = false;
61
62 server->echo_credits = 1;
63 }
64 server->credits -= server->echo_credits + server->oplock_credits;
65 return server->credits + server->echo_credits + server->oplock_credits;
66 }
67
68 static void
smb2_add_credits(struct TCP_Server_Info * server,struct cifs_credits * credits,const int optype)69 smb2_add_credits(struct TCP_Server_Info *server,
70 struct cifs_credits *credits, const int optype)
71 {
72 int *val, rc = -1;
73 int scredits, in_flight;
74 unsigned int add = credits->value;
75 unsigned int instance = credits->instance;
76 bool reconnect_detected = false;
77 bool reconnect_with_invalid_credits = false;
78
79 spin_lock(&server->req_lock);
80 val = server->ops->get_credits_field(server, optype);
81
82 /* eg found case where write overlapping reconnect messed up credits */
83 if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
84 reconnect_with_invalid_credits = true;
85
86 if ((instance == 0) || (instance == server->reconnect_instance))
87 *val += add;
88 else
89 reconnect_detected = true;
90
91 if (*val > 65000) {
92 *val = 65000; /* Don't get near 64K credits, avoid srv bugs */
93 pr_warn_once("server overflowed SMB3 credits\n");
94 trace_smb3_overflow_credits(server->current_mid,
95 server->conn_id, server->hostname, *val,
96 add, server->in_flight);
97 }
98 if (credits->in_flight_check > 1) {
99 pr_warn_once("rreq R=%08x[%x] Credits not in flight\n",
100 credits->rreq_debug_id, credits->rreq_debug_index);
101 } else {
102 credits->in_flight_check = 2;
103 }
104 if (WARN_ON_ONCE(server->in_flight == 0)) {
105 pr_warn_once("rreq R=%08x[%x] Zero in_flight\n",
106 credits->rreq_debug_id, credits->rreq_debug_index);
107 trace_smb3_rw_credits(credits->rreq_debug_id,
108 credits->rreq_debug_index,
109 credits->value,
110 server->credits, server->in_flight, 0,
111 cifs_trace_rw_credits_zero_in_flight);
112 }
113 server->in_flight--;
114 if (server->in_flight == 0 &&
115 ((optype & CIFS_OP_MASK) != CIFS_NEG_OP) &&
116 ((optype & CIFS_OP_MASK) != CIFS_SESS_OP))
117 rc = change_conf(server);
118 /*
119 * Sometimes server returns 0 credits on oplock break ack - we need to
120 * rebalance credits in this case.
121 */
122 else if (server->in_flight > 0 && server->oplock_credits == 0 &&
123 server->oplocks) {
124 if (server->credits > 1) {
125 server->credits--;
126 server->oplock_credits++;
127 }
128 } else if ((server->in_flight > 0) && (server->oplock_credits > 3) &&
129 ((optype & CIFS_OP_MASK) == CIFS_OBREAK_OP))
130 /* if now have too many oplock credits, rebalance so don't starve normal ops */
131 change_conf(server);
132
133 scredits = *val;
134 in_flight = server->in_flight;
135 spin_unlock(&server->req_lock);
136 wake_up(&server->request_q);
137
138 if (reconnect_detected) {
139 trace_smb3_reconnect_detected(server->current_mid,
140 server->conn_id, server->hostname, scredits, add, in_flight);
141
142 cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n",
143 add, instance);
144 }
145
146 if (reconnect_with_invalid_credits) {
147 trace_smb3_reconnect_with_invalid_credits(server->current_mid,
148 server->conn_id, server->hostname, scredits, add, in_flight);
149 cifs_dbg(FYI, "Negotiate operation when server credits is non-zero. Optype: %d, server credits: %d, credits added: %d\n",
150 optype, scredits, add);
151 }
152
153 spin_lock(&server->srv_lock);
154 if (server->tcpStatus == CifsNeedReconnect
155 || server->tcpStatus == CifsExiting) {
156 spin_unlock(&server->srv_lock);
157 return;
158 }
159 spin_unlock(&server->srv_lock);
160
161 switch (rc) {
162 case -1:
163 /* change_conf hasn't been executed */
164 break;
165 case 0:
166 cifs_server_dbg(VFS, "Possible client or server bug - zero credits\n");
167 break;
168 case 1:
169 cifs_server_dbg(VFS, "disabling echoes and oplocks\n");
170 break;
171 case 2:
172 cifs_dbg(FYI, "disabling oplocks\n");
173 break;
174 default:
175 /* change_conf rebalanced credits for different types */
176 break;
177 }
178
179 trace_smb3_add_credits(server->current_mid,
180 server->conn_id, server->hostname, scredits, add, in_flight);
181 cifs_dbg(FYI, "%s: added %u credits total=%d\n", __func__, add, scredits);
182 }
183
184 static void
smb2_set_credits(struct TCP_Server_Info * server,const int val)185 smb2_set_credits(struct TCP_Server_Info *server, const int val)
186 {
187 int scredits, in_flight;
188
189 spin_lock(&server->req_lock);
190 server->credits = val;
191 if (val == 1) {
192 server->reconnect_instance++;
193 /*
194 * ChannelSequence updated for all channels in primary channel so that consistent
195 * across SMB3 requests sent on any channel. See MS-SMB2 3.2.4.1 and 3.2.7.1
196 */
197 if (SERVER_IS_CHAN(server))
198 server->primary_server->channel_sequence_num++;
199 else
200 server->channel_sequence_num++;
201 }
202 scredits = server->credits;
203 in_flight = server->in_flight;
204 spin_unlock(&server->req_lock);
205
206 trace_smb3_set_credits(server->current_mid,
207 server->conn_id, server->hostname, scredits, val, in_flight);
208 cifs_dbg(FYI, "%s: set %u credits\n", __func__, val);
209
210 /* don't log while holding the lock */
211 if (val == 1)
212 cifs_dbg(FYI, "set credits to 1 due to smb2 reconnect\n");
213 }
214
215 static int *
smb2_get_credits_field(struct TCP_Server_Info * server,const int optype)216 smb2_get_credits_field(struct TCP_Server_Info *server, const int optype)
217 {
218 switch (optype) {
219 case CIFS_ECHO_OP:
220 return &server->echo_credits;
221 case CIFS_OBREAK_OP:
222 return &server->oplock_credits;
223 default:
224 return &server->credits;
225 }
226 }
227
228 static unsigned int
smb2_get_credits(struct mid_q_entry * mid)229 smb2_get_credits(struct mid_q_entry *mid)
230 {
231 return mid->credits_received;
232 }
233
234 static int
smb2_wait_mtu_credits(struct TCP_Server_Info * server,size_t size,size_t * num,struct cifs_credits * credits)235 smb2_wait_mtu_credits(struct TCP_Server_Info *server, size_t size,
236 size_t *num, struct cifs_credits *credits)
237 {
238 int rc = 0;
239 unsigned int scredits, in_flight;
240
241 spin_lock(&server->req_lock);
242 while (1) {
243 spin_unlock(&server->req_lock);
244
245 spin_lock(&server->srv_lock);
246 if (server->tcpStatus == CifsExiting) {
247 spin_unlock(&server->srv_lock);
248 return -ENOENT;
249 }
250 spin_unlock(&server->srv_lock);
251
252 spin_lock(&server->req_lock);
253 if (server->credits <= 0) {
254 spin_unlock(&server->req_lock);
255 cifs_num_waiters_inc(server);
256 rc = wait_event_killable(server->request_q,
257 has_credits(server, &server->credits, 1));
258 cifs_num_waiters_dec(server);
259 if (rc)
260 return rc;
261 spin_lock(&server->req_lock);
262 } else {
263 scredits = server->credits;
264 /* can deadlock with reopen */
265 if (scredits <= 8) {
266 *num = SMB2_MAX_BUFFER_SIZE;
267 credits->value = 0;
268 credits->instance = 0;
269 break;
270 }
271
272 /* leave some credits for reopen and other ops */
273 scredits -= 8;
274 *num = min_t(unsigned int, size,
275 scredits * SMB2_MAX_BUFFER_SIZE);
276
277 credits->value =
278 DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE);
279 credits->instance = server->reconnect_instance;
280 server->credits -= credits->value;
281 server->in_flight++;
282 if (server->in_flight > server->max_in_flight)
283 server->max_in_flight = server->in_flight;
284 break;
285 }
286 }
287 scredits = server->credits;
288 in_flight = server->in_flight;
289 spin_unlock(&server->req_lock);
290
291 trace_smb3_wait_credits(server->current_mid,
292 server->conn_id, server->hostname, scredits, -(credits->value), in_flight);
293 cifs_dbg(FYI, "%s: removed %u credits total=%d\n",
294 __func__, credits->value, scredits);
295
296 return rc;
297 }
298
299 static int
smb2_adjust_credits(struct TCP_Server_Info * server,struct cifs_io_subrequest * subreq,unsigned int trace)300 smb2_adjust_credits(struct TCP_Server_Info *server,
301 struct cifs_io_subrequest *subreq,
302 unsigned int /*enum smb3_rw_credits_trace*/ trace)
303 {
304 struct cifs_credits *credits = &subreq->credits;
305 int new_val = DIV_ROUND_UP(subreq->subreq.len - subreq->subreq.transferred,
306 SMB2_MAX_BUFFER_SIZE);
307 int scredits, in_flight;
308
309 if (!credits->value || credits->value == new_val)
310 return 0;
311
312 if (credits->value < new_val) {
313 trace_smb3_rw_credits(subreq->rreq->debug_id,
314 subreq->subreq.debug_index,
315 credits->value,
316 server->credits, server->in_flight,
317 new_val - credits->value,
318 cifs_trace_rw_credits_no_adjust_up);
319 trace_smb3_too_many_credits(server->current_mid,
320 server->conn_id, server->hostname, 0, credits->value - new_val, 0);
321 cifs_server_dbg(VFS, "R=%x[%x] request has less credits (%d) than required (%d)",
322 subreq->rreq->debug_id, subreq->subreq.debug_index,
323 credits->value, new_val);
324
325 return -EOPNOTSUPP;
326 }
327
328 spin_lock(&server->req_lock);
329
330 if (server->reconnect_instance != credits->instance) {
331 scredits = server->credits;
332 in_flight = server->in_flight;
333 spin_unlock(&server->req_lock);
334
335 trace_smb3_rw_credits(subreq->rreq->debug_id,
336 subreq->subreq.debug_index,
337 credits->value,
338 server->credits, server->in_flight,
339 new_val - credits->value,
340 cifs_trace_rw_credits_old_session);
341 trace_smb3_reconnect_detected(server->current_mid,
342 server->conn_id, server->hostname, scredits,
343 credits->value - new_val, in_flight);
344 cifs_server_dbg(VFS, "R=%x[%x] trying to return %d credits to old session\n",
345 subreq->rreq->debug_id, subreq->subreq.debug_index,
346 credits->value - new_val);
347 return -EAGAIN;
348 }
349
350 trace_smb3_rw_credits(subreq->rreq->debug_id,
351 subreq->subreq.debug_index,
352 credits->value,
353 server->credits, server->in_flight,
354 new_val - credits->value, trace);
355 server->credits += credits->value - new_val;
356 scredits = server->credits;
357 in_flight = server->in_flight;
358 spin_unlock(&server->req_lock);
359 wake_up(&server->request_q);
360
361 trace_smb3_adj_credits(server->current_mid,
362 server->conn_id, server->hostname, scredits,
363 credits->value - new_val, in_flight);
364 cifs_dbg(FYI, "%s: adjust added %u credits total=%d\n",
365 __func__, credits->value - new_val, scredits);
366
367 credits->value = new_val;
368
369 return 0;
370 }
371
372 static __u64
smb2_get_next_mid(struct TCP_Server_Info * server)373 smb2_get_next_mid(struct TCP_Server_Info *server)
374 {
375 __u64 mid;
376 /* for SMB2 we need the current value */
377 spin_lock(&server->mid_counter_lock);
378 mid = server->current_mid++;
379 spin_unlock(&server->mid_counter_lock);
380 return mid;
381 }
382
383 static void
smb2_revert_current_mid(struct TCP_Server_Info * server,const unsigned int val)384 smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
385 {
386 spin_lock(&server->mid_counter_lock);
387 if (server->current_mid >= val)
388 server->current_mid -= val;
389 spin_unlock(&server->mid_counter_lock);
390 }
391
392 static struct mid_q_entry *
__smb2_find_mid(struct TCP_Server_Info * server,char * buf,bool dequeue)393 __smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
394 {
395 struct mid_q_entry *mid;
396 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
397 __u64 wire_mid = le64_to_cpu(shdr->MessageId);
398
399 if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
400 cifs_server_dbg(VFS, "Encrypted frame parsing not supported yet\n");
401 return NULL;
402 }
403
404 spin_lock(&server->mid_queue_lock);
405 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
406 if ((mid->mid == wire_mid) &&
407 (mid->mid_state == MID_REQUEST_SUBMITTED) &&
408 (mid->command == shdr->Command)) {
409 kref_get(&mid->refcount);
410 if (dequeue) {
411 list_del_init(&mid->qhead);
412 mid->deleted_from_q = true;
413 }
414 spin_unlock(&server->mid_queue_lock);
415 return mid;
416 }
417 }
418 spin_unlock(&server->mid_queue_lock);
419 return NULL;
420 }
421
422 static struct mid_q_entry *
smb2_find_mid(struct TCP_Server_Info * server,char * buf)423 smb2_find_mid(struct TCP_Server_Info *server, char *buf)
424 {
425 return __smb2_find_mid(server, buf, false);
426 }
427
428 static struct mid_q_entry *
smb2_find_dequeue_mid(struct TCP_Server_Info * server,char * buf)429 smb2_find_dequeue_mid(struct TCP_Server_Info *server, char *buf)
430 {
431 return __smb2_find_mid(server, buf, true);
432 }
433
434 static void
smb2_dump_detail(void * buf,struct TCP_Server_Info * server)435 smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
436 {
437 #ifdef CONFIG_CIFS_DEBUG2
438 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
439
440 cifs_server_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
441 shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
442 shdr->Id.SyncId.ProcessId);
443 if (!server->ops->check_message(buf, server->total_read, server)) {
444 cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
445 server->ops->calc_smb_size(buf));
446 }
447 #endif
448 }
449
450 static bool
smb2_need_neg(struct TCP_Server_Info * server)451 smb2_need_neg(struct TCP_Server_Info *server)
452 {
453 return server->max_read == 0;
454 }
455
456 static int
smb2_negotiate(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server)457 smb2_negotiate(const unsigned int xid,
458 struct cifs_ses *ses,
459 struct TCP_Server_Info *server)
460 {
461 int rc;
462
463 spin_lock(&server->mid_counter_lock);
464 server->current_mid = 0;
465 spin_unlock(&server->mid_counter_lock);
466 rc = SMB2_negotiate(xid, ses, server);
467 return rc;
468 }
469
470 static inline unsigned int
prevent_zero_iosize(unsigned int size,const char * type)471 prevent_zero_iosize(unsigned int size, const char *type)
472 {
473 if (size == 0) {
474 cifs_dbg(VFS, "SMB: Zero %ssize calculated, using minimum value %u\n",
475 type, CIFS_MIN_DEFAULT_IOSIZE);
476 return CIFS_MIN_DEFAULT_IOSIZE;
477 }
478 return size;
479 }
480
481 static unsigned int
smb2_negotiate_wsize(struct cifs_tcon * tcon,struct smb3_fs_context * ctx)482 smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
483 {
484 struct TCP_Server_Info *server = tcon->ses->server;
485 unsigned int wsize;
486
487 /* start with specified wsize, or default */
488 wsize = ctx->got_wsize ? ctx->vol_wsize : CIFS_DEFAULT_IOSIZE;
489 wsize = min_t(unsigned int, wsize, server->max_write);
490 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
491 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
492
493 return prevent_zero_iosize(wsize, "w");
494 }
495
496 static unsigned int
smb3_negotiate_wsize(struct cifs_tcon * tcon,struct smb3_fs_context * ctx)497 smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
498 {
499 struct TCP_Server_Info *server = tcon->ses->server;
500 unsigned int wsize;
501
502 /* start with specified wsize, or default */
503 wsize = ctx->got_wsize ? ctx->vol_wsize : SMB3_DEFAULT_IOSIZE;
504 wsize = min_t(unsigned int, wsize, server->max_write);
505 #ifdef CONFIG_CIFS_SMB_DIRECT
506 if (server->rdma) {
507 const struct smbdirect_socket_parameters *sp =
508 smbd_get_parameters(server->smbd_conn);
509
510 if (server->sign)
511 /*
512 * Account for SMB2 data transfer packet header and
513 * possible encryption header
514 */
515 wsize = min_t(unsigned int,
516 wsize,
517 sp->max_fragmented_send_size -
518 SMB2_READWRITE_PDU_HEADER_SIZE -
519 sizeof(struct smb2_transform_hdr));
520 else
521 wsize = min_t(unsigned int,
522 wsize, sp->max_read_write_size);
523 }
524 #endif
525 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
526 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
527
528 return prevent_zero_iosize(wsize, "w");
529 }
530
531 static unsigned int
smb2_negotiate_rsize(struct cifs_tcon * tcon,struct smb3_fs_context * ctx)532 smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
533 {
534 struct TCP_Server_Info *server = tcon->ses->server;
535 unsigned int rsize;
536
537 /* start with specified rsize, or default */
538 rsize = ctx->got_rsize ? ctx->vol_rsize : CIFS_DEFAULT_IOSIZE;
539 rsize = min_t(unsigned int, rsize, server->max_read);
540
541 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
542 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
543
544 return prevent_zero_iosize(rsize, "r");
545 }
546
547 static unsigned int
smb3_negotiate_rsize(struct cifs_tcon * tcon,struct smb3_fs_context * ctx)548 smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
549 {
550 struct TCP_Server_Info *server = tcon->ses->server;
551 unsigned int rsize;
552
553 /* start with specified rsize, or default */
554 rsize = ctx->got_rsize ? ctx->vol_rsize : SMB3_DEFAULT_IOSIZE;
555 rsize = min_t(unsigned int, rsize, server->max_read);
556 #ifdef CONFIG_CIFS_SMB_DIRECT
557 if (server->rdma) {
558 const struct smbdirect_socket_parameters *sp =
559 smbd_get_parameters(server->smbd_conn);
560
561 if (server->sign)
562 /*
563 * Account for SMB2 data transfer packet header and
564 * possible encryption header
565 */
566 rsize = min_t(unsigned int,
567 rsize,
568 sp->max_fragmented_recv_size -
569 SMB2_READWRITE_PDU_HEADER_SIZE -
570 sizeof(struct smb2_transform_hdr));
571 else
572 rsize = min_t(unsigned int,
573 rsize, sp->max_read_write_size);
574 }
575 #endif
576
577 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
578 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
579
580 return prevent_zero_iosize(rsize, "r");
581 }
582
583 /*
584 * compare two interfaces a and b
585 * return 0 if everything matches.
586 * return 1 if a is rdma capable, or rss capable, or has higher link speed
587 * return -1 otherwise.
588 */
589 static int
iface_cmp(struct cifs_server_iface * a,struct cifs_server_iface * b)590 iface_cmp(struct cifs_server_iface *a, struct cifs_server_iface *b)
591 {
592 int cmp_ret = 0;
593
594 WARN_ON(!a || !b);
595 if (a->rdma_capable == b->rdma_capable) {
596 if (a->rss_capable == b->rss_capable) {
597 if (a->speed == b->speed) {
598 cmp_ret = cifs_ipaddr_cmp((struct sockaddr *) &a->sockaddr,
599 (struct sockaddr *) &b->sockaddr);
600 if (!cmp_ret)
601 return 0;
602 else if (cmp_ret > 0)
603 return 1;
604 else
605 return -1;
606 } else if (a->speed > b->speed)
607 return 1;
608 else
609 return -1;
610 } else if (a->rss_capable > b->rss_capable)
611 return 1;
612 else
613 return -1;
614 } else if (a->rdma_capable > b->rdma_capable)
615 return 1;
616 else
617 return -1;
618 }
619
620 static int
parse_server_interfaces(struct network_interface_info_ioctl_rsp * buf,size_t buf_len,struct cifs_ses * ses,bool in_mount)621 parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
622 size_t buf_len, struct cifs_ses *ses, bool in_mount)
623 {
624 struct network_interface_info_ioctl_rsp *p;
625 struct sockaddr_in *addr4;
626 struct sockaddr_in6 *addr6;
627 struct iface_info_ipv4 *p4;
628 struct iface_info_ipv6 *p6;
629 struct cifs_server_iface *info = NULL, *iface = NULL, *niface = NULL;
630 struct cifs_server_iface tmp_iface;
631 ssize_t bytes_left;
632 size_t next = 0;
633 int nb_iface = 0;
634 int rc = 0, ret = 0;
635
636 bytes_left = buf_len;
637 p = buf;
638
639 spin_lock(&ses->iface_lock);
640 /* do not query too frequently, this time with lock held */
641 if (ses->iface_last_update &&
642 time_before(jiffies, ses->iface_last_update +
643 (SMB_INTERFACE_POLL_INTERVAL * HZ))) {
644 spin_unlock(&ses->iface_lock);
645 return 0;
646 }
647
648 /*
649 * Go through iface_list and mark them as inactive
650 */
651 list_for_each_entry_safe(iface, niface, &ses->iface_list,
652 iface_head)
653 iface->is_active = 0;
654
655 spin_unlock(&ses->iface_lock);
656
657 /*
658 * Samba server e.g. can return an empty interface list in some cases,
659 * which would only be a problem if we were requesting multichannel
660 */
661 if (bytes_left == 0) {
662 /* avoid spamming logs every 10 minutes, so log only in mount */
663 if ((ses->chan_max > 1) && in_mount)
664 cifs_dbg(VFS,
665 "multichannel not available\n"
666 "Empty network interface list returned by server %s\n",
667 ses->server->hostname);
668 rc = -EOPNOTSUPP;
669 ses->iface_last_update = jiffies;
670 goto out;
671 }
672
673 while (bytes_left >= (ssize_t)sizeof(*p)) {
674 memset(&tmp_iface, 0, sizeof(tmp_iface));
675 /* default to 1Gbps when link speed is unset */
676 tmp_iface.speed = le64_to_cpu(p->LinkSpeed) ?: 1000000000;
677 tmp_iface.rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
678 tmp_iface.rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
679
680 switch (p->Family) {
681 /*
682 * The kernel and wire socket structures have the same
683 * layout and use network byte order but make the
684 * conversion explicit in case either one changes.
685 */
686 case INTERNETWORK:
687 addr4 = (struct sockaddr_in *)&tmp_iface.sockaddr;
688 p4 = (struct iface_info_ipv4 *)p->Buffer;
689 addr4->sin_family = AF_INET;
690 memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
691
692 /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
693 addr4->sin_port = cpu_to_be16(CIFS_PORT);
694
695 cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
696 &addr4->sin_addr);
697 break;
698 case INTERNETWORKV6:
699 addr6 = (struct sockaddr_in6 *)&tmp_iface.sockaddr;
700 p6 = (struct iface_info_ipv6 *)p->Buffer;
701 addr6->sin6_family = AF_INET6;
702 memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
703
704 /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
705 addr6->sin6_flowinfo = 0;
706 addr6->sin6_scope_id = 0;
707 addr6->sin6_port = cpu_to_be16(CIFS_PORT);
708
709 cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
710 &addr6->sin6_addr);
711 break;
712 default:
713 cifs_dbg(VFS,
714 "%s: skipping unsupported socket family\n",
715 __func__);
716 goto next_iface;
717 }
718
719 /*
720 * The iface_list is assumed to be sorted by speed.
721 * Check if the new interface exists in that list.
722 * NEVER change iface. it could be in use.
723 * Add a new one instead
724 */
725 spin_lock(&ses->iface_lock);
726 list_for_each_entry_safe(iface, niface, &ses->iface_list,
727 iface_head) {
728 ret = iface_cmp(iface, &tmp_iface);
729 if (!ret) {
730 iface->is_active = 1;
731 spin_unlock(&ses->iface_lock);
732 goto next_iface;
733 } else if (ret < 0) {
734 /* all remaining ifaces are slower */
735 kref_get(&iface->refcount);
736 break;
737 }
738 }
739 spin_unlock(&ses->iface_lock);
740
741 /* no match. insert the entry in the list */
742 info = kmalloc(sizeof(struct cifs_server_iface),
743 GFP_KERNEL);
744 if (!info) {
745 rc = -ENOMEM;
746 goto out;
747 }
748 memcpy(info, &tmp_iface, sizeof(tmp_iface));
749
750 /* add this new entry to the list */
751 kref_init(&info->refcount);
752 info->is_active = 1;
753
754 cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, ses->iface_count);
755 cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
756 cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
757 le32_to_cpu(p->Capability));
758
759 spin_lock(&ses->iface_lock);
760 if (!list_entry_is_head(iface, &ses->iface_list, iface_head)) {
761 list_add_tail(&info->iface_head, &iface->iface_head);
762 kref_put(&iface->refcount, release_iface);
763 } else
764 list_add_tail(&info->iface_head, &ses->iface_list);
765
766 ses->iface_count++;
767 spin_unlock(&ses->iface_lock);
768 next_iface:
769 nb_iface++;
770 next = le32_to_cpu(p->Next);
771 if (!next) {
772 bytes_left -= sizeof(*p);
773 break;
774 }
775 /* Validate that Next doesn't point beyond the buffer */
776 if (next > bytes_left) {
777 cifs_dbg(VFS, "%s: invalid Next pointer %zu > %zd\n",
778 __func__, next, bytes_left);
779 rc = -EINVAL;
780 goto out;
781 }
782 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
783 bytes_left -= next;
784 }
785
786 if (!nb_iface) {
787 cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
788 rc = -EINVAL;
789 goto out;
790 }
791
792 /* Azure rounds the buffer size up 8, to a 16 byte boundary */
793 if ((bytes_left > 8) ||
794 (bytes_left >= offsetof(struct network_interface_info_ioctl_rsp, Next)
795 + sizeof(p->Next) && p->Next))
796 cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
797
798 ses->iface_last_update = jiffies;
799
800 out:
801 /*
802 * Go through the list again and put the inactive entries
803 */
804 spin_lock(&ses->iface_lock);
805 list_for_each_entry_safe(iface, niface, &ses->iface_list,
806 iface_head) {
807 if (!iface->is_active) {
808 list_del(&iface->iface_head);
809 kref_put(&iface->refcount, release_iface);
810 ses->iface_count--;
811 }
812 }
813 spin_unlock(&ses->iface_lock);
814
815 return rc;
816 }
817
818 int
SMB3_request_interfaces(const unsigned int xid,struct cifs_tcon * tcon,bool in_mount)819 SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_mount)
820 {
821 int rc;
822 unsigned int ret_data_len = 0;
823 struct network_interface_info_ioctl_rsp *out_buf = NULL;
824 struct cifs_ses *ses = tcon->ses;
825 struct TCP_Server_Info *pserver;
826
827 /* do not query too frequently */
828 if (ses->iface_last_update &&
829 time_before(jiffies, ses->iface_last_update +
830 (SMB_INTERFACE_POLL_INTERVAL * HZ)))
831 return 0;
832
833 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
834 FSCTL_QUERY_NETWORK_INTERFACE_INFO,
835 NULL /* no data input */, 0 /* no data input */,
836 CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
837 if (rc == -EOPNOTSUPP) {
838 cifs_dbg(FYI,
839 "server does not support query network interfaces\n");
840 ret_data_len = 0;
841 } else if (rc != 0) {
842 cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
843 goto out;
844 }
845
846 rc = parse_server_interfaces(out_buf, ret_data_len, ses, in_mount);
847 if (rc)
848 goto out;
849
850 /* check if iface is still active */
851 spin_lock(&ses->chan_lock);
852 pserver = ses->chans[0].server;
853 if (pserver && !cifs_chan_is_iface_active(ses, pserver)) {
854 spin_unlock(&ses->chan_lock);
855 cifs_chan_update_iface(ses, pserver);
856 spin_lock(&ses->chan_lock);
857 }
858 spin_unlock(&ses->chan_lock);
859
860 out:
861 kfree(out_buf);
862 return rc;
863 }
864
865 static void
smb3_qfs_tcon(const unsigned int xid,struct cifs_tcon * tcon,struct cifs_sb_info * cifs_sb)866 smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
867 struct cifs_sb_info *cifs_sb)
868 {
869 int rc;
870 __le16 srch_path = 0; /* Null - open root of share */
871 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
872 struct cifs_open_parms oparms;
873 struct cifs_fid fid;
874 struct cached_fid *cfid = NULL;
875
876 oparms = (struct cifs_open_parms) {
877 .tcon = tcon,
878 .path = "",
879 .desired_access = FILE_READ_ATTRIBUTES,
880 .disposition = FILE_OPEN,
881 .create_options = cifs_create_options(cifs_sb, 0),
882 .fid = &fid,
883 };
884
885 rc = open_cached_dir(xid, tcon, "", cifs_sb, false, &cfid);
886 if (rc == 0)
887 memcpy(&fid, &cfid->fid, sizeof(struct cifs_fid));
888 else
889 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
890 NULL, NULL);
891 if (rc)
892 return;
893
894 SMB3_request_interfaces(xid, tcon, true /* called during mount */);
895
896 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
897 FS_ATTRIBUTE_INFORMATION);
898 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
899 FS_DEVICE_INFORMATION);
900 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
901 FS_VOLUME_INFORMATION);
902 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
903 FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
904 if (cfid == NULL)
905 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
906 else
907 close_cached_dir(cfid);
908 }
909
910 static void
smb2_qfs_tcon(const unsigned int xid,struct cifs_tcon * tcon,struct cifs_sb_info * cifs_sb)911 smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
912 struct cifs_sb_info *cifs_sb)
913 {
914 int rc;
915 __le16 srch_path = 0; /* Null - open root of share */
916 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
917 struct cifs_open_parms oparms;
918 struct cifs_fid fid;
919
920 oparms = (struct cifs_open_parms) {
921 .tcon = tcon,
922 .path = "",
923 .desired_access = FILE_READ_ATTRIBUTES,
924 .disposition = FILE_OPEN,
925 .create_options = cifs_create_options(cifs_sb, 0),
926 .fid = &fid,
927 };
928
929 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
930 NULL, NULL);
931 if (rc)
932 return;
933
934 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
935 FS_ATTRIBUTE_INFORMATION);
936 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
937 FS_DEVICE_INFORMATION);
938 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
939 }
940
941 static int
smb2_is_path_accessible(const unsigned int xid,struct cifs_tcon * tcon,struct cifs_sb_info * cifs_sb,const char * full_path)942 smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
943 struct cifs_sb_info *cifs_sb, const char *full_path)
944 {
945 __le16 *utf16_path;
946 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
947 int err_buftype = CIFS_NO_BUFFER;
948 struct cifs_open_parms oparms;
949 struct kvec err_iov = {};
950 struct cifs_fid fid;
951 struct cached_fid *cfid;
952 bool islink;
953 int rc, rc2;
954
955 rc = open_cached_dir(xid, tcon, full_path, cifs_sb, true, &cfid);
956 if (!rc) {
957 close_cached_dir(cfid);
958 return 0;
959 }
960
961 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
962 if (!utf16_path)
963 return -ENOMEM;
964
965 oparms = (struct cifs_open_parms) {
966 .tcon = tcon,
967 .path = full_path,
968 .desired_access = FILE_READ_ATTRIBUTES,
969 .disposition = FILE_OPEN,
970 .create_options = cifs_create_options(cifs_sb, 0),
971 .fid = &fid,
972 };
973
974 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
975 &err_iov, &err_buftype);
976 if (rc) {
977 struct smb2_hdr *hdr = err_iov.iov_base;
978
979 if (unlikely(!hdr || err_buftype == CIFS_NO_BUFFER))
980 goto out;
981
982 if (rc != -EREMOTE && hdr->Status == STATUS_OBJECT_NAME_INVALID) {
983 rc2 = cifs_inval_name_dfs_link_error(xid, tcon, cifs_sb,
984 full_path, &islink);
985 if (rc2) {
986 rc = rc2;
987 goto out;
988 }
989 if (islink)
990 rc = -EREMOTE;
991 }
992 if (rc == -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) &&
993 (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS))
994 rc = -EOPNOTSUPP;
995 goto out;
996 }
997
998 rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
999
1000 out:
1001 free_rsp_buf(err_buftype, err_iov.iov_base);
1002 kfree(utf16_path);
1003 return rc;
1004 }
1005
smb2_get_srv_inum(const unsigned int xid,struct cifs_tcon * tcon,struct cifs_sb_info * cifs_sb,const char * full_path,u64 * uniqueid,struct cifs_open_info_data * data)1006 static int smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
1007 struct cifs_sb_info *cifs_sb, const char *full_path,
1008 u64 *uniqueid, struct cifs_open_info_data *data)
1009 {
1010 *uniqueid = le64_to_cpu(data->fi.IndexNumber);
1011 return 0;
1012 }
1013
smb2_query_file_info(const unsigned int xid,struct cifs_tcon * tcon,struct cifsFileInfo * cfile,struct cifs_open_info_data * data)1014 static int smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
1015 struct cifsFileInfo *cfile, struct cifs_open_info_data *data)
1016 {
1017 struct cifs_fid *fid = &cfile->fid;
1018
1019 if (cfile->symlink_target) {
1020 data->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
1021 if (!data->symlink_target)
1022 return -ENOMEM;
1023 }
1024 data->contains_posix_file_info = false;
1025 return SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid, &data->fi);
1026 }
1027
1028 #ifdef CONFIG_CIFS_XATTR
1029 static ssize_t
move_smb2_ea_to_cifs(char * dst,size_t dst_size,struct smb2_file_full_ea_info * src,size_t src_size,const unsigned char * ea_name)1030 move_smb2_ea_to_cifs(char *dst, size_t dst_size,
1031 struct smb2_file_full_ea_info *src, size_t src_size,
1032 const unsigned char *ea_name)
1033 {
1034 int rc = 0;
1035 unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
1036 char *name, *value;
1037 size_t buf_size = dst_size;
1038 size_t name_len, value_len, user_name_len;
1039
1040 while (src_size > 0) {
1041 name_len = (size_t)src->ea_name_length;
1042 value_len = (size_t)le16_to_cpu(src->ea_value_length);
1043
1044 if (name_len == 0)
1045 break;
1046
1047 if (src_size < 8 + name_len + 1 + value_len) {
1048 cifs_dbg(FYI, "EA entry goes beyond length of list\n");
1049 rc = -EIO;
1050 goto out;
1051 }
1052
1053 name = &src->ea_data[0];
1054 value = &src->ea_data[src->ea_name_length + 1];
1055
1056 if (ea_name) {
1057 if (ea_name_len == name_len &&
1058 memcmp(ea_name, name, name_len) == 0) {
1059 rc = value_len;
1060 if (dst_size == 0)
1061 goto out;
1062 if (dst_size < value_len) {
1063 rc = -ERANGE;
1064 goto out;
1065 }
1066 memcpy(dst, value, value_len);
1067 goto out;
1068 }
1069 } else {
1070 /* 'user.' plus a terminating null */
1071 user_name_len = 5 + 1 + name_len;
1072
1073 if (buf_size == 0) {
1074 /* skip copy - calc size only */
1075 rc += user_name_len;
1076 } else if (dst_size >= user_name_len) {
1077 dst_size -= user_name_len;
1078 memcpy(dst, "user.", 5);
1079 dst += 5;
1080 memcpy(dst, src->ea_data, name_len);
1081 dst += name_len;
1082 *dst = 0;
1083 ++dst;
1084 rc += user_name_len;
1085 } else {
1086 /* stop before overrun buffer */
1087 rc = -ERANGE;
1088 break;
1089 }
1090 }
1091
1092 if (!src->next_entry_offset)
1093 break;
1094
1095 if (src_size < le32_to_cpu(src->next_entry_offset)) {
1096 /* stop before overrun buffer */
1097 rc = -ERANGE;
1098 break;
1099 }
1100 src_size -= le32_to_cpu(src->next_entry_offset);
1101 src = (void *)((char *)src +
1102 le32_to_cpu(src->next_entry_offset));
1103 }
1104
1105 /* didn't find the named attribute */
1106 if (ea_name)
1107 rc = -ENODATA;
1108
1109 out:
1110 return (ssize_t)rc;
1111 }
1112
1113 static ssize_t
smb2_query_eas(const unsigned int xid,struct cifs_tcon * tcon,const unsigned char * path,const unsigned char * ea_name,char * ea_data,size_t buf_size,struct cifs_sb_info * cifs_sb)1114 smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
1115 const unsigned char *path, const unsigned char *ea_name,
1116 char *ea_data, size_t buf_size,
1117 struct cifs_sb_info *cifs_sb)
1118 {
1119 int rc;
1120 struct kvec rsp_iov = {NULL, 0};
1121 int buftype = CIFS_NO_BUFFER;
1122 struct smb2_query_info_rsp *rsp;
1123 struct smb2_file_full_ea_info *info = NULL;
1124
1125 rc = smb2_query_info_compound(xid, tcon, path,
1126 FILE_READ_EA,
1127 FILE_FULL_EA_INFORMATION,
1128 SMB2_O_INFO_FILE,
1129 CIFSMaxBufSize -
1130 MAX_SMB2_CREATE_RESPONSE_SIZE -
1131 MAX_SMB2_CLOSE_RESPONSE_SIZE,
1132 &rsp_iov, &buftype, cifs_sb);
1133 if (rc) {
1134 /*
1135 * If ea_name is NULL (listxattr) and there are no EAs,
1136 * return 0 as it's not an error. Otherwise, the specified
1137 * ea_name was not found.
1138 */
1139 if (!ea_name && rc == -ENODATA)
1140 rc = 0;
1141 goto qeas_exit;
1142 }
1143
1144 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
1145 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
1146 le32_to_cpu(rsp->OutputBufferLength),
1147 &rsp_iov,
1148 sizeof(struct smb2_file_full_ea_info));
1149 if (rc)
1150 goto qeas_exit;
1151
1152 info = (struct smb2_file_full_ea_info *)(
1153 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
1154 rc = move_smb2_ea_to_cifs(ea_data, buf_size, info,
1155 le32_to_cpu(rsp->OutputBufferLength), ea_name);
1156
1157 qeas_exit:
1158 free_rsp_buf(buftype, rsp_iov.iov_base);
1159 return rc;
1160 }
1161
1162 static int
smb2_set_ea(const unsigned int xid,struct cifs_tcon * tcon,const char * path,const char * ea_name,const void * ea_value,const __u16 ea_value_len,const struct nls_table * nls_codepage,struct cifs_sb_info * cifs_sb)1163 smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
1164 const char *path, const char *ea_name, const void *ea_value,
1165 const __u16 ea_value_len, const struct nls_table *nls_codepage,
1166 struct cifs_sb_info *cifs_sb)
1167 {
1168 struct smb2_compound_vars *vars;
1169 struct cifs_ses *ses = tcon->ses;
1170 struct TCP_Server_Info *server;
1171 struct smb_rqst *rqst;
1172 struct kvec *rsp_iov;
1173 __le16 *utf16_path = NULL;
1174 int ea_name_len = strlen(ea_name);
1175 int flags = CIFS_CP_CREATE_CLOSE_OP;
1176 int len;
1177 int resp_buftype[3];
1178 struct cifs_open_parms oparms;
1179 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1180 struct cifs_fid fid;
1181 unsigned int size[1];
1182 void *data[1];
1183 struct smb2_file_full_ea_info *ea;
1184 struct smb2_query_info_rsp *rsp;
1185 int rc, used_len = 0;
1186 int retries = 0, cur_sleep = 1;
1187
1188 replay_again:
1189 /* reinitialize for possible replay */
1190 flags = CIFS_CP_CREATE_CLOSE_OP;
1191 oplock = SMB2_OPLOCK_LEVEL_NONE;
1192 server = cifs_pick_channel(ses);
1193
1194 if (smb3_encryption_required(tcon))
1195 flags |= CIFS_TRANSFORM_REQ;
1196
1197 if (ea_name_len > 255)
1198 return -EINVAL;
1199
1200 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1201 if (!utf16_path)
1202 return -ENOMEM;
1203
1204 ea = NULL;
1205 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1206 vars = kzalloc(sizeof(*vars), GFP_KERNEL);
1207 if (!vars) {
1208 rc = -ENOMEM;
1209 goto out_free_path;
1210 }
1211 rqst = vars->rqst;
1212 rsp_iov = vars->rsp_iov;
1213
1214 if (ses->server->ops->query_all_EAs) {
1215 if (!ea_value) {
1216 rc = ses->server->ops->query_all_EAs(xid, tcon, path,
1217 ea_name, NULL, 0,
1218 cifs_sb);
1219 if (rc == -ENODATA)
1220 goto sea_exit;
1221 } else {
1222 /* If we are adding a attribute we should first check
1223 * if there will be enough space available to store
1224 * the new EA. If not we should not add it since we
1225 * would not be able to even read the EAs back.
1226 */
1227 rc = smb2_query_info_compound(xid, tcon, path,
1228 FILE_READ_EA,
1229 FILE_FULL_EA_INFORMATION,
1230 SMB2_O_INFO_FILE,
1231 CIFSMaxBufSize -
1232 MAX_SMB2_CREATE_RESPONSE_SIZE -
1233 MAX_SMB2_CLOSE_RESPONSE_SIZE,
1234 &rsp_iov[1], &resp_buftype[1], cifs_sb);
1235 if (rc == 0) {
1236 rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1237 used_len = le32_to_cpu(rsp->OutputBufferLength);
1238 }
1239 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1240 resp_buftype[1] = CIFS_NO_BUFFER;
1241 memset(&rsp_iov[1], 0, sizeof(rsp_iov[1]));
1242 rc = 0;
1243
1244 /* Use a fudge factor of 256 bytes in case we collide
1245 * with a different set_EAs command.
1246 */
1247 if (CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
1248 MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 <
1249 used_len + ea_name_len + ea_value_len + 1) {
1250 rc = -ENOSPC;
1251 goto sea_exit;
1252 }
1253 }
1254 }
1255
1256 /* Open */
1257 rqst[0].rq_iov = vars->open_iov;
1258 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
1259
1260 oparms = (struct cifs_open_parms) {
1261 .tcon = tcon,
1262 .path = path,
1263 .desired_access = FILE_WRITE_EA,
1264 .disposition = FILE_OPEN,
1265 .create_options = cifs_create_options(cifs_sb, 0),
1266 .fid = &fid,
1267 .replay = !!(retries),
1268 };
1269
1270 rc = SMB2_open_init(tcon, server,
1271 &rqst[0], &oplock, &oparms, utf16_path);
1272 if (rc)
1273 goto sea_exit;
1274 smb2_set_next_command(tcon, &rqst[0]);
1275
1276
1277 /* Set Info */
1278 rqst[1].rq_iov = vars->si_iov;
1279 rqst[1].rq_nvec = 1;
1280
1281 len = sizeof(*ea) + ea_name_len + ea_value_len + 1;
1282 ea = kzalloc(len, GFP_KERNEL);
1283 if (ea == NULL) {
1284 rc = -ENOMEM;
1285 goto sea_exit;
1286 }
1287
1288 ea->ea_name_length = ea_name_len;
1289 ea->ea_value_length = cpu_to_le16(ea_value_len);
1290 memcpy(ea->ea_data, ea_name, ea_name_len + 1);
1291 memcpy(ea->ea_data + ea_name_len + 1, ea_value, ea_value_len);
1292
1293 size[0] = len;
1294 data[0] = ea;
1295
1296 rc = SMB2_set_info_init(tcon, server,
1297 &rqst[1], COMPOUND_FID,
1298 COMPOUND_FID, current->tgid,
1299 FILE_FULL_EA_INFORMATION,
1300 SMB2_O_INFO_FILE, 0, data, size);
1301 if (rc)
1302 goto sea_exit;
1303 smb2_set_next_command(tcon, &rqst[1]);
1304 smb2_set_related(&rqst[1]);
1305
1306 /* Close */
1307 rqst[2].rq_iov = &vars->close_iov;
1308 rqst[2].rq_nvec = 1;
1309 rc = SMB2_close_init(tcon, server,
1310 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
1311 if (rc)
1312 goto sea_exit;
1313 smb2_set_related(&rqst[2]);
1314
1315 if (retries) {
1316 smb2_set_replay(server, &rqst[0]);
1317 smb2_set_replay(server, &rqst[1]);
1318 smb2_set_replay(server, &rqst[2]);
1319 }
1320
1321 rc = compound_send_recv(xid, ses, server,
1322 flags, 3, rqst,
1323 resp_buftype, rsp_iov);
1324 /* no need to bump num_remote_opens because handle immediately closed */
1325
1326 sea_exit:
1327 kfree(ea);
1328 SMB2_open_free(&rqst[0]);
1329 SMB2_set_info_free(&rqst[1]);
1330 SMB2_close_free(&rqst[2]);
1331 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1332 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1333 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
1334 kfree(vars);
1335 out_free_path:
1336 kfree(utf16_path);
1337
1338 if (is_replayable_error(rc) &&
1339 smb2_should_replay(tcon, &retries, &cur_sleep))
1340 goto replay_again;
1341
1342 return rc;
1343 }
1344 #endif
1345
1346 static bool
smb2_can_echo(struct TCP_Server_Info * server)1347 smb2_can_echo(struct TCP_Server_Info *server)
1348 {
1349 return server->echoes;
1350 }
1351
1352 static void
smb2_clear_stats(struct cifs_tcon * tcon)1353 smb2_clear_stats(struct cifs_tcon *tcon)
1354 {
1355 int i;
1356
1357 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
1358 atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
1359 atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
1360 }
1361 }
1362
1363 static void
smb2_dump_share_caps(struct seq_file * m,struct cifs_tcon * tcon)1364 smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
1365 {
1366 seq_puts(m, "\n\tShare Capabilities:");
1367 if (tcon->capabilities & SMB2_SHARE_CAP_DFS)
1368 seq_puts(m, " DFS,");
1369 if (tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
1370 seq_puts(m, " CONTINUOUS AVAILABILITY,");
1371 if (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT)
1372 seq_puts(m, " SCALEOUT,");
1373 if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER)
1374 seq_puts(m, " CLUSTER,");
1375 if (tcon->capabilities & SMB2_SHARE_CAP_ASYMMETRIC)
1376 seq_puts(m, " ASYMMETRIC,");
1377 if (tcon->capabilities == 0)
1378 seq_puts(m, " None");
1379 if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE)
1380 seq_puts(m, " Aligned,");
1381 if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE)
1382 seq_puts(m, " Partition Aligned,");
1383 if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY)
1384 seq_puts(m, " SSD,");
1385 if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED)
1386 seq_puts(m, " TRIM-support,");
1387
1388 seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags);
1389 seq_printf(m, "\n\ttid: 0x%x", tcon->tid);
1390 if (tcon->perf_sector_size)
1391 seq_printf(m, "\tOptimal sector size: 0x%x",
1392 tcon->perf_sector_size);
1393 seq_printf(m, "\tMaximal Access: 0x%x", tcon->maximal_access);
1394 }
1395
1396 static void
smb2_print_stats(struct seq_file * m,struct cifs_tcon * tcon)1397 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
1398 {
1399 atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
1400 atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
1401
1402 /*
1403 * Can't display SMB2_NEGOTIATE, SESSION_SETUP, LOGOFF, CANCEL and ECHO
1404 * totals (requests sent) since those SMBs are per-session not per tcon
1405 */
1406 seq_printf(m, "\nBytes read: %llu Bytes written: %llu",
1407 (long long)(tcon->bytes_read),
1408 (long long)(tcon->bytes_written));
1409 seq_printf(m, "\nOpen files: %d total (local), %d open on server",
1410 atomic_read(&tcon->num_local_opens),
1411 atomic_read(&tcon->num_remote_opens));
1412 seq_printf(m, "\nTreeConnects: %d total %d failed",
1413 atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
1414 atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
1415 seq_printf(m, "\nTreeDisconnects: %d total %d failed",
1416 atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
1417 atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
1418 seq_printf(m, "\nCreates: %d total %d failed",
1419 atomic_read(&sent[SMB2_CREATE_HE]),
1420 atomic_read(&failed[SMB2_CREATE_HE]));
1421 seq_printf(m, "\nCloses: %d total %d failed",
1422 atomic_read(&sent[SMB2_CLOSE_HE]),
1423 atomic_read(&failed[SMB2_CLOSE_HE]));
1424 seq_printf(m, "\nFlushes: %d total %d failed",
1425 atomic_read(&sent[SMB2_FLUSH_HE]),
1426 atomic_read(&failed[SMB2_FLUSH_HE]));
1427 seq_printf(m, "\nReads: %d total %d failed",
1428 atomic_read(&sent[SMB2_READ_HE]),
1429 atomic_read(&failed[SMB2_READ_HE]));
1430 seq_printf(m, "\nWrites: %d total %d failed",
1431 atomic_read(&sent[SMB2_WRITE_HE]),
1432 atomic_read(&failed[SMB2_WRITE_HE]));
1433 seq_printf(m, "\nLocks: %d total %d failed",
1434 atomic_read(&sent[SMB2_LOCK_HE]),
1435 atomic_read(&failed[SMB2_LOCK_HE]));
1436 seq_printf(m, "\nIOCTLs: %d total %d failed",
1437 atomic_read(&sent[SMB2_IOCTL_HE]),
1438 atomic_read(&failed[SMB2_IOCTL_HE]));
1439 seq_printf(m, "\nQueryDirectories: %d total %d failed",
1440 atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
1441 atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
1442 seq_printf(m, "\nChangeNotifies: %d total %d failed",
1443 atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
1444 atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
1445 seq_printf(m, "\nQueryInfos: %d total %d failed",
1446 atomic_read(&sent[SMB2_QUERY_INFO_HE]),
1447 atomic_read(&failed[SMB2_QUERY_INFO_HE]));
1448 seq_printf(m, "\nSetInfos: %d total %d failed",
1449 atomic_read(&sent[SMB2_SET_INFO_HE]),
1450 atomic_read(&failed[SMB2_SET_INFO_HE]));
1451 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
1452 atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
1453 atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
1454 }
1455
1456 static void
smb2_set_fid(struct cifsFileInfo * cfile,struct cifs_fid * fid,__u32 oplock)1457 smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
1458 {
1459 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1460 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1461
1462 cfile->fid.persistent_fid = fid->persistent_fid;
1463 cfile->fid.volatile_fid = fid->volatile_fid;
1464 cfile->fid.access = fid->access;
1465 #ifdef CONFIG_CIFS_DEBUG2
1466 cfile->fid.mid = fid->mid;
1467 #endif /* CIFS_DEBUG2 */
1468 server->ops->set_oplock_level(cinode, oplock, fid->epoch,
1469 &fid->purge_cache);
1470 cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
1471 memcpy(cfile->fid.create_guid, fid->create_guid, 16);
1472 }
1473
1474 static int
smb2_close_file(const unsigned int xid,struct cifs_tcon * tcon,struct cifs_fid * fid)1475 smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
1476 struct cifs_fid *fid)
1477 {
1478 return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
1479 }
1480
1481 static int
smb2_close_getattr(const unsigned int xid,struct cifs_tcon * tcon,struct cifsFileInfo * cfile)1482 smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
1483 struct cifsFileInfo *cfile)
1484 {
1485 struct smb2_file_network_open_info file_inf;
1486 struct inode *inode;
1487 int rc;
1488
1489 rc = __SMB2_close(xid, tcon, cfile->fid.persistent_fid,
1490 cfile->fid.volatile_fid, &file_inf);
1491 if (rc)
1492 return rc;
1493
1494 inode = d_inode(cfile->dentry);
1495
1496 spin_lock(&inode->i_lock);
1497 CIFS_I(inode)->time = jiffies;
1498
1499 /* Creation time should not need to be updated on close */
1500 if (file_inf.LastWriteTime)
1501 inode_set_mtime_to_ts(inode,
1502 cifs_NTtimeToUnix(file_inf.LastWriteTime));
1503 if (file_inf.ChangeTime)
1504 inode_set_ctime_to_ts(inode,
1505 cifs_NTtimeToUnix(file_inf.ChangeTime));
1506 if (file_inf.LastAccessTime)
1507 inode_set_atime_to_ts(inode,
1508 cifs_NTtimeToUnix(file_inf.LastAccessTime));
1509
1510 /*
1511 * i_blocks is not related to (i_size / i_blksize),
1512 * but instead 512 byte (2**9) size is required for
1513 * calculating num blocks.
1514 */
1515 if (le64_to_cpu(file_inf.AllocationSize) > 4096)
1516 inode->i_blocks =
1517 (512 - 1 + le64_to_cpu(file_inf.AllocationSize)) >> 9;
1518
1519 /* End of file and Attributes should not have to be updated on close */
1520 spin_unlock(&inode->i_lock);
1521 return rc;
1522 }
1523
1524 static int
SMB2_request_res_key(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,struct copychunk_ioctl * pcchunk)1525 SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
1526 u64 persistent_fid, u64 volatile_fid,
1527 struct copychunk_ioctl *pcchunk)
1528 {
1529 int rc;
1530 unsigned int ret_data_len;
1531 struct resume_key_req *res_key;
1532
1533 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
1534 FSCTL_SRV_REQUEST_RESUME_KEY, NULL, 0 /* no input */,
1535 CIFSMaxBufSize, (char **)&res_key, &ret_data_len);
1536
1537 if (rc == -EOPNOTSUPP) {
1538 pr_warn_once("Server share %s does not support copy range\n", tcon->tree_name);
1539 goto req_res_key_exit;
1540 } else if (rc) {
1541 cifs_tcon_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
1542 goto req_res_key_exit;
1543 }
1544 if (ret_data_len < sizeof(struct resume_key_req)) {
1545 cifs_tcon_dbg(VFS, "Invalid refcopy resume key length\n");
1546 rc = -EINVAL;
1547 goto req_res_key_exit;
1548 }
1549 memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE);
1550
1551 req_res_key_exit:
1552 kfree(res_key);
1553 return rc;
1554 }
1555
1556 static int
smb2_ioctl_query_info(const unsigned int xid,struct cifs_tcon * tcon,struct cifs_sb_info * cifs_sb,__le16 * path,int is_dir,unsigned long p)1557 smb2_ioctl_query_info(const unsigned int xid,
1558 struct cifs_tcon *tcon,
1559 struct cifs_sb_info *cifs_sb,
1560 __le16 *path, int is_dir,
1561 unsigned long p)
1562 {
1563 struct smb2_compound_vars *vars;
1564 struct smb_rqst *rqst;
1565 struct kvec *rsp_iov;
1566 struct cifs_ses *ses = tcon->ses;
1567 struct TCP_Server_Info *server;
1568 char __user *arg = (char __user *)p;
1569 struct smb_query_info qi;
1570 struct smb_query_info __user *pqi;
1571 int rc = 0;
1572 int flags = CIFS_CP_CREATE_CLOSE_OP;
1573 struct smb2_query_info_rsp *qi_rsp = NULL;
1574 struct smb2_ioctl_rsp *io_rsp = NULL;
1575 void *buffer = NULL;
1576 int resp_buftype[3];
1577 struct cifs_open_parms oparms;
1578 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1579 struct cifs_fid fid;
1580 unsigned int size[2];
1581 void *data[2];
1582 int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
1583 void (*free_req1_func)(struct smb_rqst *r);
1584 int retries = 0, cur_sleep = 1;
1585
1586 replay_again:
1587 /* reinitialize for possible replay */
1588 flags = CIFS_CP_CREATE_CLOSE_OP;
1589 oplock = SMB2_OPLOCK_LEVEL_NONE;
1590 server = cifs_pick_channel(ses);
1591
1592 vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
1593 if (vars == NULL)
1594 return -ENOMEM;
1595 rqst = &vars->rqst[0];
1596 rsp_iov = &vars->rsp_iov[0];
1597
1598 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1599
1600 if (copy_from_user(&qi, arg, sizeof(struct smb_query_info))) {
1601 rc = -EFAULT;
1602 goto free_vars;
1603 }
1604 if (qi.output_buffer_length > 1024) {
1605 rc = -EINVAL;
1606 goto free_vars;
1607 }
1608
1609 if (!ses || !server) {
1610 rc = -EIO;
1611 goto free_vars;
1612 }
1613
1614 if (smb3_encryption_required(tcon))
1615 flags |= CIFS_TRANSFORM_REQ;
1616
1617 if (qi.output_buffer_length) {
1618 buffer = memdup_user(arg + sizeof(struct smb_query_info), qi.output_buffer_length);
1619 if (IS_ERR(buffer)) {
1620 rc = PTR_ERR(buffer);
1621 goto free_vars;
1622 }
1623 }
1624
1625 /* Open */
1626 rqst[0].rq_iov = &vars->open_iov[0];
1627 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
1628
1629 oparms = (struct cifs_open_parms) {
1630 .tcon = tcon,
1631 .disposition = FILE_OPEN,
1632 .create_options = cifs_create_options(cifs_sb, create_options),
1633 .fid = &fid,
1634 .replay = !!(retries),
1635 };
1636
1637 if (qi.flags & PASSTHRU_FSCTL) {
1638 switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) {
1639 case FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS:
1640 oparms.desired_access = FILE_READ_DATA | FILE_WRITE_DATA | FILE_READ_ATTRIBUTES | SYNCHRONIZE;
1641 break;
1642 case FSCTL_DEVICE_ACCESS_FILE_ANY_ACCESS:
1643 oparms.desired_access = GENERIC_ALL;
1644 break;
1645 case FSCTL_DEVICE_ACCESS_FILE_READ_ACCESS:
1646 oparms.desired_access = GENERIC_READ;
1647 break;
1648 case FSCTL_DEVICE_ACCESS_FILE_WRITE_ACCESS:
1649 oparms.desired_access = GENERIC_WRITE;
1650 break;
1651 }
1652 } else if (qi.flags & PASSTHRU_SET_INFO) {
1653 oparms.desired_access = GENERIC_WRITE;
1654 } else {
1655 oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
1656 }
1657
1658 rc = SMB2_open_init(tcon, server,
1659 &rqst[0], &oplock, &oparms, path);
1660 if (rc)
1661 goto free_output_buffer;
1662 smb2_set_next_command(tcon, &rqst[0]);
1663
1664 /* Query */
1665 if (qi.flags & PASSTHRU_FSCTL) {
1666 /* Can eventually relax perm check since server enforces too */
1667 if (!capable(CAP_SYS_ADMIN)) {
1668 rc = -EPERM;
1669 goto free_open_req;
1670 }
1671 rqst[1].rq_iov = &vars->io_iov[0];
1672 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
1673
1674 rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID,
1675 qi.info_type, buffer, qi.output_buffer_length,
1676 CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
1677 MAX_SMB2_CLOSE_RESPONSE_SIZE);
1678 free_req1_func = SMB2_ioctl_free;
1679 } else if (qi.flags == PASSTHRU_SET_INFO) {
1680 /* Can eventually relax perm check since server enforces too */
1681 if (!capable(CAP_SYS_ADMIN)) {
1682 rc = -EPERM;
1683 goto free_open_req;
1684 }
1685 if (qi.output_buffer_length < 8) {
1686 rc = -EINVAL;
1687 goto free_open_req;
1688 }
1689 rqst[1].rq_iov = vars->si_iov;
1690 rqst[1].rq_nvec = 1;
1691
1692 /* MS-FSCC 2.4.13 FileEndOfFileInformation */
1693 size[0] = 8;
1694 data[0] = buffer;
1695
1696 rc = SMB2_set_info_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID,
1697 current->tgid, FILE_END_OF_FILE_INFORMATION,
1698 SMB2_O_INFO_FILE, 0, data, size);
1699 free_req1_func = SMB2_set_info_free;
1700 } else if (qi.flags == PASSTHRU_QUERY_INFO) {
1701 rqst[1].rq_iov = &vars->qi_iov;
1702 rqst[1].rq_nvec = 1;
1703
1704 rc = SMB2_query_info_init(tcon, server,
1705 &rqst[1], COMPOUND_FID,
1706 COMPOUND_FID, qi.file_info_class,
1707 qi.info_type, qi.additional_information,
1708 qi.input_buffer_length,
1709 qi.output_buffer_length, buffer);
1710 free_req1_func = SMB2_query_info_free;
1711 } else { /* unknown flags */
1712 cifs_tcon_dbg(VFS, "Invalid passthru query flags: 0x%x\n",
1713 qi.flags);
1714 rc = -EINVAL;
1715 }
1716
1717 if (rc)
1718 goto free_open_req;
1719 smb2_set_next_command(tcon, &rqst[1]);
1720 smb2_set_related(&rqst[1]);
1721
1722 /* Close */
1723 rqst[2].rq_iov = &vars->close_iov;
1724 rqst[2].rq_nvec = 1;
1725
1726 rc = SMB2_close_init(tcon, server,
1727 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
1728 if (rc)
1729 goto free_req_1;
1730 smb2_set_related(&rqst[2]);
1731
1732 if (retries) {
1733 smb2_set_replay(server, &rqst[0]);
1734 smb2_set_replay(server, &rqst[1]);
1735 smb2_set_replay(server, &rqst[2]);
1736 }
1737
1738 rc = compound_send_recv(xid, ses, server,
1739 flags, 3, rqst,
1740 resp_buftype, rsp_iov);
1741 if (rc)
1742 goto out;
1743
1744 /* No need to bump num_remote_opens since handle immediately closed */
1745 if (qi.flags & PASSTHRU_FSCTL) {
1746 pqi = (struct smb_query_info __user *)arg;
1747 io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
1748 if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length)
1749 qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
1750 if (qi.input_buffer_length > 0 &&
1751 le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length
1752 > rsp_iov[1].iov_len) {
1753 rc = -EFAULT;
1754 goto out;
1755 }
1756
1757 if (copy_to_user(&pqi->input_buffer_length,
1758 &qi.input_buffer_length,
1759 sizeof(qi.input_buffer_length))) {
1760 rc = -EFAULT;
1761 goto out;
1762 }
1763
1764 if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
1765 (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
1766 qi.input_buffer_length))
1767 rc = -EFAULT;
1768 } else {
1769 pqi = (struct smb_query_info __user *)arg;
1770 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1771 if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length)
1772 qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
1773 if (copy_to_user(&pqi->input_buffer_length,
1774 &qi.input_buffer_length,
1775 sizeof(qi.input_buffer_length))) {
1776 rc = -EFAULT;
1777 goto out;
1778 }
1779
1780 if (copy_to_user(pqi + 1, qi_rsp->Buffer,
1781 qi.input_buffer_length))
1782 rc = -EFAULT;
1783 }
1784
1785 out:
1786 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1787 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1788 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
1789 SMB2_close_free(&rqst[2]);
1790 free_req_1:
1791 free_req1_func(&rqst[1]);
1792 free_open_req:
1793 SMB2_open_free(&rqst[0]);
1794 free_output_buffer:
1795 kfree(buffer);
1796 free_vars:
1797 kfree(vars);
1798
1799 if (is_replayable_error(rc) &&
1800 smb2_should_replay(tcon, &retries, &cur_sleep))
1801 goto replay_again;
1802
1803 return rc;
1804 }
1805
1806 /**
1807 * calc_chunk_count - calculates the number chunks to be filled in the Chunks[]
1808 * array of struct copychunk_ioctl
1809 *
1810 * @tcon: destination file tcon
1811 * @bytes_left: how many bytes are left to copy
1812 *
1813 * Return: maximum number of chunks with which Chunks[] can be filled.
1814 */
1815 static inline u32
calc_chunk_count(struct cifs_tcon * tcon,u64 bytes_left)1816 calc_chunk_count(struct cifs_tcon *tcon, u64 bytes_left)
1817 {
1818 u32 max_chunks = READ_ONCE(tcon->max_chunks);
1819 u32 max_bytes_copy = READ_ONCE(tcon->max_bytes_copy);
1820 u32 max_bytes_chunk = READ_ONCE(tcon->max_bytes_chunk);
1821 u64 need;
1822 u32 allowed;
1823
1824 if (!max_bytes_chunk || !max_bytes_copy || !max_chunks)
1825 return 0;
1826
1827 /* chunks needed for the remaining bytes */
1828 need = DIV_ROUND_UP_ULL(bytes_left, max_bytes_chunk);
1829 /* chunks allowed per cc request */
1830 allowed = DIV_ROUND_UP(max_bytes_copy, max_bytes_chunk);
1831
1832 return (u32)umin(need, umin(max_chunks, allowed));
1833 }
1834
1835 /**
1836 * smb2_copychunk_range - server-side copy of data range
1837 *
1838 * @xid: transaction id
1839 * @src_file: source file
1840 * @dst_file: destination file
1841 * @src_off: source file byte offset
1842 * @len: number of bytes to copy
1843 * @dst_off: destination file byte offset
1844 *
1845 * Obtains a resume key for @src_file and issues FSCTL_SRV_COPYCHUNK_WRITE
1846 * IOCTLs, splitting the request into chunks limited by tcon->max_*.
1847 *
1848 * Return: @len on success; negative errno on failure.
1849 */
1850 static ssize_t
smb2_copychunk_range(const unsigned int xid,struct cifsFileInfo * src_file,struct cifsFileInfo * dst_file,u64 src_off,u64 len,u64 dst_off)1851 smb2_copychunk_range(const unsigned int xid,
1852 struct cifsFileInfo *src_file,
1853 struct cifsFileInfo *dst_file,
1854 u64 src_off,
1855 u64 len,
1856 u64 dst_off)
1857 {
1858 int rc = 0;
1859 unsigned int ret_data_len = 0;
1860 struct copychunk_ioctl *cc_req = NULL;
1861 struct copychunk_ioctl_rsp *cc_rsp = NULL;
1862 struct cifs_tcon *tcon;
1863 struct copychunk *chunk;
1864 u32 chunks, chunk_count, chunk_bytes;
1865 u32 copy_bytes, copy_bytes_left;
1866 u32 chunks_written, bytes_written;
1867 u64 total_bytes_left = len;
1868 u64 src_off_prev, dst_off_prev;
1869 u32 retries = 0;
1870
1871 tcon = tlink_tcon(dst_file->tlink);
1872
1873 trace_smb3_copychunk_enter(xid, src_file->fid.volatile_fid,
1874 dst_file->fid.volatile_fid, tcon->tid,
1875 tcon->ses->Suid, src_off, dst_off, len);
1876
1877 retry:
1878 chunk_count = calc_chunk_count(tcon, total_bytes_left);
1879 if (!chunk_count) {
1880 rc = -EOPNOTSUPP;
1881 goto out;
1882 }
1883
1884 cc_req = kzalloc(struct_size(cc_req, Chunks, chunk_count), GFP_KERNEL);
1885 if (!cc_req) {
1886 rc = -ENOMEM;
1887 goto out;
1888 }
1889
1890 /* Request a key from the server to identify the source of the copy */
1891 rc = SMB2_request_res_key(xid,
1892 tlink_tcon(src_file->tlink),
1893 src_file->fid.persistent_fid,
1894 src_file->fid.volatile_fid,
1895 cc_req);
1896
1897 /* Note: request_res_key sets res_key null only if rc != 0 */
1898 if (rc)
1899 goto out;
1900
1901 while (total_bytes_left > 0) {
1902
1903 /* Store previous offsets to allow rewind */
1904 src_off_prev = src_off;
1905 dst_off_prev = dst_off;
1906
1907 chunks = 0;
1908 copy_bytes = 0;
1909 copy_bytes_left = umin(total_bytes_left, tcon->max_bytes_copy);
1910 while (copy_bytes_left > 0 && chunks < chunk_count) {
1911 chunk = &cc_req->Chunks[chunks++];
1912
1913 chunk->SourceOffset = cpu_to_le64(src_off);
1914 chunk->TargetOffset = cpu_to_le64(dst_off);
1915
1916 chunk_bytes = umin(copy_bytes_left, tcon->max_bytes_chunk);
1917
1918 chunk->Length = cpu_to_le32(chunk_bytes);
1919 /* Buffer is zeroed, no need to set chunk->Reserved = 0 */
1920
1921 src_off += chunk_bytes;
1922 dst_off += chunk_bytes;
1923
1924 copy_bytes_left -= chunk_bytes;
1925 copy_bytes += chunk_bytes;
1926 }
1927
1928 cc_req->ChunkCount = cpu_to_le32(chunks);
1929 /* Buffer is zeroed, no need to set cc_req->Reserved = 0 */
1930
1931 /* Request server copy to target from src identified by key */
1932 kfree(cc_rsp);
1933 cc_rsp = NULL;
1934 rc = SMB2_ioctl(xid, tcon, dst_file->fid.persistent_fid,
1935 dst_file->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
1936 (char *)cc_req, struct_size(cc_req, Chunks, chunks),
1937 CIFSMaxBufSize, (char **)&cc_rsp, &ret_data_len);
1938
1939 if (rc && rc != -EINVAL)
1940 goto out;
1941
1942 if (unlikely(ret_data_len != sizeof(*cc_rsp))) {
1943 cifs_tcon_dbg(VFS, "Copychunk invalid response: size %u/%zu\n",
1944 ret_data_len, sizeof(*cc_rsp));
1945 rc = -EIO;
1946 goto out;
1947 }
1948
1949 bytes_written = le32_to_cpu(cc_rsp->TotalBytesWritten);
1950 chunks_written = le32_to_cpu(cc_rsp->ChunksWritten);
1951 chunk_bytes = le32_to_cpu(cc_rsp->ChunkBytesWritten);
1952
1953 if (rc == 0) {
1954 /* Check if server claimed to write more than we asked */
1955 if (unlikely(!bytes_written || bytes_written > copy_bytes ||
1956 !chunks_written || chunks_written > chunks)) {
1957 cifs_tcon_dbg(VFS, "Copychunk invalid response: bytes written %u/%u, chunks written %u/%u\n",
1958 bytes_written, copy_bytes, chunks_written, chunks);
1959 rc = -EIO;
1960 goto out;
1961 }
1962
1963 /* Partial write: rewind */
1964 if (bytes_written < copy_bytes) {
1965 u32 delta = copy_bytes - bytes_written;
1966
1967 src_off -= delta;
1968 dst_off -= delta;
1969 }
1970
1971 total_bytes_left -= bytes_written;
1972 continue;
1973 }
1974
1975 /*
1976 * Check if server is not asking us to reduce size.
1977 *
1978 * Note: As per MS-SMB2 2.2.32.1, the values returned
1979 * in cc_rsp are not strictly lower than what existed
1980 * before.
1981 */
1982 if (bytes_written < tcon->max_bytes_copy) {
1983 cifs_tcon_dbg(FYI, "Copychunk MaxBytesCopy updated: %u -> %u\n",
1984 tcon->max_bytes_copy, bytes_written);
1985 tcon->max_bytes_copy = bytes_written;
1986 }
1987
1988 if (chunks_written < tcon->max_chunks) {
1989 cifs_tcon_dbg(FYI, "Copychunk MaxChunks updated: %u -> %u\n",
1990 tcon->max_chunks, chunks_written);
1991 tcon->max_chunks = chunks_written;
1992 }
1993
1994 if (chunk_bytes < tcon->max_bytes_chunk) {
1995 cifs_tcon_dbg(FYI, "Copychunk MaxBytesChunk updated: %u -> %u\n",
1996 tcon->max_bytes_chunk, chunk_bytes);
1997 tcon->max_bytes_chunk = chunk_bytes;
1998 }
1999
2000 /* reset to last offsets */
2001 if (retries++ < 2) {
2002 src_off = src_off_prev;
2003 dst_off = dst_off_prev;
2004 kfree(cc_req);
2005 cc_req = NULL;
2006 goto retry;
2007 }
2008
2009 break;
2010 }
2011
2012 out:
2013 kfree(cc_req);
2014 kfree(cc_rsp);
2015 if (rc) {
2016 trace_smb3_copychunk_err(xid, src_file->fid.volatile_fid,
2017 dst_file->fid.volatile_fid, tcon->tid,
2018 tcon->ses->Suid, src_off, dst_off, len, rc);
2019 return rc;
2020 } else {
2021 trace_smb3_copychunk_done(xid, src_file->fid.volatile_fid,
2022 dst_file->fid.volatile_fid, tcon->tid,
2023 tcon->ses->Suid, src_off, dst_off, len);
2024 return len;
2025 }
2026 }
2027
2028 static int
smb2_flush_file(const unsigned int xid,struct cifs_tcon * tcon,struct cifs_fid * fid)2029 smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
2030 struct cifs_fid *fid)
2031 {
2032 return SMB2_flush(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2033 }
2034
2035 static unsigned int
smb2_read_data_offset(char * buf)2036 smb2_read_data_offset(char *buf)
2037 {
2038 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
2039
2040 return rsp->DataOffset;
2041 }
2042
2043 static unsigned int
smb2_read_data_length(char * buf,bool in_remaining)2044 smb2_read_data_length(char *buf, bool in_remaining)
2045 {
2046 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
2047
2048 if (in_remaining)
2049 return le32_to_cpu(rsp->DataRemaining);
2050
2051 return le32_to_cpu(rsp->DataLength);
2052 }
2053
2054
2055 static int
smb2_sync_read(const unsigned int xid,struct cifs_fid * pfid,struct cifs_io_parms * parms,unsigned int * bytes_read,char ** buf,int * buf_type)2056 smb2_sync_read(const unsigned int xid, struct cifs_fid *pfid,
2057 struct cifs_io_parms *parms, unsigned int *bytes_read,
2058 char **buf, int *buf_type)
2059 {
2060 parms->persistent_fid = pfid->persistent_fid;
2061 parms->volatile_fid = pfid->volatile_fid;
2062 return SMB2_read(xid, parms, bytes_read, buf, buf_type);
2063 }
2064
2065 static int
smb2_sync_write(const unsigned int xid,struct cifs_fid * pfid,struct cifs_io_parms * parms,unsigned int * written,struct kvec * iov,unsigned long nr_segs)2066 smb2_sync_write(const unsigned int xid, struct cifs_fid *pfid,
2067 struct cifs_io_parms *parms, unsigned int *written,
2068 struct kvec *iov, unsigned long nr_segs)
2069 {
2070
2071 parms->persistent_fid = pfid->persistent_fid;
2072 parms->volatile_fid = pfid->volatile_fid;
2073 return SMB2_write(xid, parms, written, iov, nr_segs);
2074 }
2075
2076 /* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */
smb2_set_sparse(const unsigned int xid,struct cifs_tcon * tcon,struct cifsFileInfo * cfile,struct inode * inode,__u8 setsparse)2077 static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
2078 struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse)
2079 {
2080 struct cifsInodeInfo *cifsi;
2081 int rc;
2082
2083 cifsi = CIFS_I(inode);
2084
2085 /* if file already sparse don't bother setting sparse again */
2086 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse)
2087 return true; /* already sparse */
2088
2089 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse)
2090 return true; /* already not sparse */
2091
2092 /*
2093 * Can't check for sparse support on share the usual way via the
2094 * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share
2095 * since Samba server doesn't set the flag on the share, yet
2096 * supports the set sparse FSCTL and returns sparse correctly
2097 * in the file attributes. If we fail setting sparse though we
2098 * mark that server does not support sparse files for this share
2099 * to avoid repeatedly sending the unsupported fsctl to server
2100 * if the file is repeatedly extended.
2101 */
2102 if (tcon->broken_sparse_sup)
2103 return false;
2104
2105 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2106 cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
2107 &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
2108 if (rc) {
2109 tcon->broken_sparse_sup = true;
2110 cifs_dbg(FYI, "set sparse rc = %d\n", rc);
2111 return false;
2112 }
2113
2114 if (setsparse)
2115 cifsi->cifsAttrs |= FILE_ATTRIBUTE_SPARSE_FILE;
2116 else
2117 cifsi->cifsAttrs &= (~FILE_ATTRIBUTE_SPARSE_FILE);
2118
2119 return true;
2120 }
2121
2122 static int
smb2_set_file_size(const unsigned int xid,struct cifs_tcon * tcon,struct cifsFileInfo * cfile,__u64 size,bool set_alloc)2123 smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
2124 struct cifsFileInfo *cfile, __u64 size, bool set_alloc)
2125 {
2126 struct inode *inode;
2127
2128 /*
2129 * If extending file more than one page make sparse. Many Linux fs
2130 * make files sparse by default when extending via ftruncate
2131 */
2132 inode = d_inode(cfile->dentry);
2133
2134 if (!set_alloc && (size > inode->i_size + 8192)) {
2135 __u8 set_sparse = 1;
2136
2137 /* whether set sparse succeeds or not, extend the file */
2138 smb2_set_sparse(xid, tcon, cfile, inode, set_sparse);
2139 }
2140
2141 return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
2142 cfile->fid.volatile_fid, cfile->pid, size);
2143 }
2144
2145 static int
smb2_duplicate_extents(const unsigned int xid,struct cifsFileInfo * srcfile,struct cifsFileInfo * trgtfile,u64 src_off,u64 len,u64 dest_off)2146 smb2_duplicate_extents(const unsigned int xid,
2147 struct cifsFileInfo *srcfile,
2148 struct cifsFileInfo *trgtfile, u64 src_off,
2149 u64 len, u64 dest_off)
2150 {
2151 int rc;
2152 unsigned int ret_data_len;
2153 struct inode *inode;
2154 struct duplicate_extents_to_file dup_ext_buf;
2155 struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
2156
2157 /* server fileays advertise duplicate extent support with this flag */
2158 if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) &
2159 FILE_SUPPORTS_BLOCK_REFCOUNTING) == 0)
2160 return -EOPNOTSUPP;
2161
2162 dup_ext_buf.VolatileFileHandle = srcfile->fid.volatile_fid;
2163 dup_ext_buf.PersistentFileHandle = srcfile->fid.persistent_fid;
2164 dup_ext_buf.SourceFileOffset = cpu_to_le64(src_off);
2165 dup_ext_buf.TargetFileOffset = cpu_to_le64(dest_off);
2166 dup_ext_buf.ByteCount = cpu_to_le64(len);
2167 cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n",
2168 src_off, dest_off, len);
2169 trace_smb3_clone_enter(xid, srcfile->fid.volatile_fid,
2170 trgtfile->fid.volatile_fid, tcon->tid,
2171 tcon->ses->Suid, src_off, dest_off, len);
2172 inode = d_inode(trgtfile->dentry);
2173 if (inode->i_size < dest_off + len) {
2174 rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
2175 if (rc)
2176 goto duplicate_extents_out;
2177
2178 /*
2179 * Although also could set plausible allocation size (i_blocks)
2180 * here in addition to setting the file size, in reflink
2181 * it is likely that the target file is sparse. Its allocation
2182 * size will be queried on next revalidate, but it is important
2183 * to make sure that file's cached size is updated immediately
2184 */
2185 netfs_resize_file(netfs_inode(inode), dest_off + len, true);
2186 cifs_setsize(inode, dest_off + len);
2187 }
2188 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
2189 trgtfile->fid.volatile_fid,
2190 FSCTL_DUPLICATE_EXTENTS_TO_FILE,
2191 (char *)&dup_ext_buf,
2192 sizeof(struct duplicate_extents_to_file),
2193 CIFSMaxBufSize, NULL,
2194 &ret_data_len);
2195
2196 if (ret_data_len > 0)
2197 cifs_dbg(FYI, "Non-zero response length in duplicate extents\n");
2198
2199 duplicate_extents_out:
2200 if (rc)
2201 trace_smb3_clone_err(xid, srcfile->fid.volatile_fid,
2202 trgtfile->fid.volatile_fid,
2203 tcon->tid, tcon->ses->Suid, src_off,
2204 dest_off, len, rc);
2205 else
2206 trace_smb3_clone_done(xid, srcfile->fid.volatile_fid,
2207 trgtfile->fid.volatile_fid, tcon->tid,
2208 tcon->ses->Suid, src_off, dest_off, len);
2209 return rc;
2210 }
2211
2212 static int
smb2_set_compression(const unsigned int xid,struct cifs_tcon * tcon,struct cifsFileInfo * cfile)2213 smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
2214 struct cifsFileInfo *cfile)
2215 {
2216 return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid,
2217 cfile->fid.volatile_fid);
2218 }
2219
2220 static int
smb3_set_integrity(const unsigned int xid,struct cifs_tcon * tcon,struct cifsFileInfo * cfile)2221 smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
2222 struct cifsFileInfo *cfile)
2223 {
2224 struct fsctl_set_integrity_information_req integr_info;
2225 unsigned int ret_data_len;
2226
2227 integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
2228 integr_info.Flags = 0;
2229 integr_info.Reserved = 0;
2230
2231 return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2232 cfile->fid.volatile_fid,
2233 FSCTL_SET_INTEGRITY_INFORMATION,
2234 (char *)&integr_info,
2235 sizeof(struct fsctl_set_integrity_information_req),
2236 CIFSMaxBufSize, NULL,
2237 &ret_data_len);
2238
2239 }
2240
2241 /* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
2242 #define GMT_TOKEN_SIZE 50
2243
2244 #define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
2245
2246 /*
2247 * Input buffer contains (empty) struct smb_snapshot array with size filled in
2248 * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
2249 */
2250 static int
smb3_enum_snapshots(const unsigned int xid,struct cifs_tcon * tcon,struct cifsFileInfo * cfile,void __user * ioc_buf)2251 smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
2252 struct cifsFileInfo *cfile, void __user *ioc_buf)
2253 {
2254 char *retbuf = NULL;
2255 unsigned int ret_data_len = 0;
2256 int rc;
2257 u32 max_response_size;
2258 struct smb_snapshot_array snapshot_in;
2259
2260 /*
2261 * On the first query to enumerate the list of snapshots available
2262 * for this volume the buffer begins with 0 (number of snapshots
2263 * which can be returned is zero since at that point we do not know
2264 * how big the buffer needs to be). On the second query,
2265 * it (ret_data_len) is set to number of snapshots so we can
2266 * know to set the maximum response size larger (see below).
2267 */
2268 if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
2269 return -EFAULT;
2270
2271 /*
2272 * Note that for snapshot queries that servers like Azure expect that
2273 * the first query be minimal size (and just used to get the number/size
2274 * of previous versions) so response size must be specified as EXACTLY
2275 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
2276 * of eight bytes.
2277 */
2278 if (ret_data_len == 0)
2279 max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
2280 else
2281 max_response_size = CIFSMaxBufSize;
2282
2283 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2284 cfile->fid.volatile_fid,
2285 FSCTL_SRV_ENUMERATE_SNAPSHOTS,
2286 NULL, 0 /* no input data */, max_response_size,
2287 (char **)&retbuf,
2288 &ret_data_len);
2289 cifs_dbg(FYI, "enum snapshots ioctl returned %d and ret buflen is %d\n",
2290 rc, ret_data_len);
2291 if (rc)
2292 return rc;
2293
2294 if (ret_data_len && (ioc_buf != NULL) && (retbuf != NULL)) {
2295 /* Fixup buffer */
2296 if (copy_from_user(&snapshot_in, ioc_buf,
2297 sizeof(struct smb_snapshot_array))) {
2298 rc = -EFAULT;
2299 kfree(retbuf);
2300 return rc;
2301 }
2302
2303 /*
2304 * Check for min size, ie not large enough to fit even one GMT
2305 * token (snapshot). On the first ioctl some users may pass in
2306 * smaller size (or zero) to simply get the size of the array
2307 * so the user space caller can allocate sufficient memory
2308 * and retry the ioctl again with larger array size sufficient
2309 * to hold all of the snapshot GMT tokens on the second try.
2310 */
2311 if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
2312 ret_data_len = sizeof(struct smb_snapshot_array);
2313
2314 /*
2315 * We return struct SRV_SNAPSHOT_ARRAY, followed by
2316 * the snapshot array (of 50 byte GMT tokens) each
2317 * representing an available previous version of the data
2318 */
2319 if (ret_data_len > (snapshot_in.snapshot_array_size +
2320 sizeof(struct smb_snapshot_array)))
2321 ret_data_len = snapshot_in.snapshot_array_size +
2322 sizeof(struct smb_snapshot_array);
2323
2324 if (copy_to_user(ioc_buf, retbuf, ret_data_len))
2325 rc = -EFAULT;
2326 }
2327
2328 kfree(retbuf);
2329 return rc;
2330 }
2331
2332
2333
2334 static int
smb3_notify(const unsigned int xid,struct file * pfile,void __user * ioc_buf,bool return_changes)2335 smb3_notify(const unsigned int xid, struct file *pfile,
2336 void __user *ioc_buf, bool return_changes)
2337 {
2338 struct smb3_notify_info notify;
2339 struct smb3_notify_info __user *pnotify_buf;
2340 struct dentry *dentry = pfile->f_path.dentry;
2341 struct inode *inode = file_inode(pfile);
2342 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2343 struct cifs_open_parms oparms;
2344 struct cifs_fid fid;
2345 struct cifs_tcon *tcon;
2346 const unsigned char *path;
2347 char *returned_ioctl_info = NULL;
2348 void *page = alloc_dentry_path();
2349 __le16 *utf16_path = NULL;
2350 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2351 int rc = 0;
2352 __u32 ret_len = 0;
2353
2354 path = build_path_from_dentry(dentry, page);
2355 if (IS_ERR(path)) {
2356 rc = PTR_ERR(path);
2357 goto notify_exit;
2358 }
2359
2360 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
2361 if (utf16_path == NULL) {
2362 rc = -ENOMEM;
2363 goto notify_exit;
2364 }
2365
2366 if (return_changes) {
2367 if (copy_from_user(¬ify, ioc_buf, sizeof(struct smb3_notify_info))) {
2368 rc = -EFAULT;
2369 goto notify_exit;
2370 }
2371 } else {
2372 if (copy_from_user(¬ify, ioc_buf, sizeof(struct smb3_notify))) {
2373 rc = -EFAULT;
2374 goto notify_exit;
2375 }
2376 notify.data_len = 0;
2377 }
2378
2379 tcon = cifs_sb_master_tcon(cifs_sb);
2380 oparms = (struct cifs_open_parms) {
2381 .tcon = tcon,
2382 .path = path,
2383 .desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA,
2384 .disposition = FILE_OPEN,
2385 .create_options = cifs_create_options(cifs_sb, 0),
2386 .fid = &fid,
2387 };
2388
2389 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
2390 NULL);
2391 if (rc)
2392 goto notify_exit;
2393
2394 rc = SMB2_change_notify(xid, tcon, fid.persistent_fid, fid.volatile_fid,
2395 notify.watch_tree, notify.completion_filter,
2396 notify.data_len, &returned_ioctl_info, &ret_len);
2397
2398 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2399
2400 cifs_dbg(FYI, "change notify for path %s rc %d\n", path, rc);
2401 if (return_changes && (ret_len > 0) && (notify.data_len > 0)) {
2402 if (ret_len > notify.data_len)
2403 ret_len = notify.data_len;
2404 pnotify_buf = (struct smb3_notify_info __user *)ioc_buf;
2405 if (copy_to_user(pnotify_buf->notify_data, returned_ioctl_info, ret_len))
2406 rc = -EFAULT;
2407 else if (copy_to_user(&pnotify_buf->data_len, &ret_len, sizeof(ret_len)))
2408 rc = -EFAULT;
2409 }
2410 kfree(returned_ioctl_info);
2411 notify_exit:
2412 free_dentry_path(page);
2413 kfree(utf16_path);
2414 return rc;
2415 }
2416
2417 static int
smb2_query_dir_first(const unsigned int xid,struct cifs_tcon * tcon,const char * path,struct cifs_sb_info * cifs_sb,struct cifs_fid * fid,__u16 search_flags,struct cifs_search_info * srch_inf)2418 smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
2419 const char *path, struct cifs_sb_info *cifs_sb,
2420 struct cifs_fid *fid, __u16 search_flags,
2421 struct cifs_search_info *srch_inf)
2422 {
2423 __le16 *utf16_path;
2424 struct smb_rqst rqst[2];
2425 struct kvec rsp_iov[2];
2426 int resp_buftype[2];
2427 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
2428 struct kvec qd_iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
2429 int rc, flags = 0;
2430 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2431 struct cifs_open_parms oparms;
2432 struct smb2_query_directory_rsp *qd_rsp = NULL;
2433 struct smb2_create_rsp *op_rsp = NULL;
2434 struct TCP_Server_Info *server;
2435 int retries = 0, cur_sleep = 1;
2436
2437 replay_again:
2438 /* reinitialize for possible replay */
2439 flags = 0;
2440 oplock = SMB2_OPLOCK_LEVEL_NONE;
2441 server = cifs_pick_channel(tcon->ses);
2442
2443 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
2444 if (!utf16_path)
2445 return -ENOMEM;
2446
2447 if (smb3_encryption_required(tcon))
2448 flags |= CIFS_TRANSFORM_REQ;
2449
2450 memset(rqst, 0, sizeof(rqst));
2451 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
2452 memset(rsp_iov, 0, sizeof(rsp_iov));
2453
2454 /* Open */
2455 memset(&open_iov, 0, sizeof(open_iov));
2456 rqst[0].rq_iov = open_iov;
2457 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
2458
2459 oparms = (struct cifs_open_parms) {
2460 .tcon = tcon,
2461 .path = path,
2462 .desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA,
2463 .disposition = FILE_OPEN,
2464 .create_options = cifs_create_options(cifs_sb, 0),
2465 .fid = fid,
2466 .replay = !!(retries),
2467 };
2468
2469 rc = SMB2_open_init(tcon, server,
2470 &rqst[0], &oplock, &oparms, utf16_path);
2471 if (rc)
2472 goto qdf_free;
2473 smb2_set_next_command(tcon, &rqst[0]);
2474
2475 /* Query directory */
2476 srch_inf->entries_in_buffer = 0;
2477 srch_inf->index_of_last_entry = 2;
2478
2479 memset(&qd_iov, 0, sizeof(qd_iov));
2480 rqst[1].rq_iov = qd_iov;
2481 rqst[1].rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
2482
2483 rc = SMB2_query_directory_init(xid, tcon, server,
2484 &rqst[1],
2485 COMPOUND_FID, COMPOUND_FID,
2486 0, srch_inf->info_level);
2487 if (rc)
2488 goto qdf_free;
2489
2490 smb2_set_related(&rqst[1]);
2491
2492 if (retries) {
2493 smb2_set_replay(server, &rqst[0]);
2494 smb2_set_replay(server, &rqst[1]);
2495 }
2496
2497 rc = compound_send_recv(xid, tcon->ses, server,
2498 flags, 2, rqst,
2499 resp_buftype, rsp_iov);
2500
2501 /* If the open failed there is nothing to do */
2502 op_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
2503 if (op_rsp == NULL || op_rsp->hdr.Status != STATUS_SUCCESS) {
2504 cifs_dbg(FYI, "query_dir_first: open failed rc=%d\n", rc);
2505 goto qdf_free;
2506 }
2507 fid->persistent_fid = op_rsp->PersistentFileId;
2508 fid->volatile_fid = op_rsp->VolatileFileId;
2509
2510 /* Anything else than ENODATA means a genuine error */
2511 if (rc && rc != -ENODATA) {
2512 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2513 cifs_dbg(FYI, "query_dir_first: query directory failed rc=%d\n", rc);
2514 trace_smb3_query_dir_err(xid, fid->persistent_fid,
2515 tcon->tid, tcon->ses->Suid, 0, 0, rc);
2516 goto qdf_free;
2517 }
2518
2519 atomic_inc(&tcon->num_remote_opens);
2520
2521 qd_rsp = (struct smb2_query_directory_rsp *)rsp_iov[1].iov_base;
2522 if (qd_rsp->hdr.Status == STATUS_NO_MORE_FILES) {
2523 trace_smb3_query_dir_done(xid, fid->persistent_fid,
2524 tcon->tid, tcon->ses->Suid, 0, 0);
2525 srch_inf->endOfSearch = true;
2526 rc = 0;
2527 goto qdf_free;
2528 }
2529
2530 rc = smb2_parse_query_directory(tcon, &rsp_iov[1], resp_buftype[1],
2531 srch_inf);
2532 if (rc) {
2533 trace_smb3_query_dir_err(xid, fid->persistent_fid, tcon->tid,
2534 tcon->ses->Suid, 0, 0, rc);
2535 goto qdf_free;
2536 }
2537 resp_buftype[1] = CIFS_NO_BUFFER;
2538
2539 trace_smb3_query_dir_done(xid, fid->persistent_fid, tcon->tid,
2540 tcon->ses->Suid, 0, srch_inf->entries_in_buffer);
2541
2542 qdf_free:
2543 kfree(utf16_path);
2544 SMB2_open_free(&rqst[0]);
2545 SMB2_query_directory_free(&rqst[1]);
2546 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2547 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
2548
2549 if (is_replayable_error(rc) &&
2550 smb2_should_replay(tcon, &retries, &cur_sleep))
2551 goto replay_again;
2552
2553 return rc;
2554 }
2555
2556 static int
smb2_query_dir_next(const unsigned int xid,struct cifs_tcon * tcon,struct cifs_fid * fid,__u16 search_flags,struct cifs_search_info * srch_inf)2557 smb2_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon,
2558 struct cifs_fid *fid, __u16 search_flags,
2559 struct cifs_search_info *srch_inf)
2560 {
2561 return SMB2_query_directory(xid, tcon, fid->persistent_fid,
2562 fid->volatile_fid, 0, srch_inf);
2563 }
2564
2565 static int
smb2_close_dir(const unsigned int xid,struct cifs_tcon * tcon,struct cifs_fid * fid)2566 smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
2567 struct cifs_fid *fid)
2568 {
2569 return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2570 }
2571
2572 /*
2573 * If we negotiate SMB2 protocol and get STATUS_PENDING - update
2574 * the number of credits and return true. Otherwise - return false.
2575 */
2576 static bool
smb2_is_status_pending(char * buf,struct TCP_Server_Info * server)2577 smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
2578 {
2579 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
2580 int scredits, in_flight;
2581
2582 if (shdr->Status != STATUS_PENDING)
2583 return false;
2584
2585 if (shdr->CreditRequest) {
2586 spin_lock(&server->req_lock);
2587 server->credits += le16_to_cpu(shdr->CreditRequest);
2588 scredits = server->credits;
2589 in_flight = server->in_flight;
2590 spin_unlock(&server->req_lock);
2591 wake_up(&server->request_q);
2592
2593 trace_smb3_pend_credits(server->current_mid,
2594 server->conn_id, server->hostname, scredits,
2595 le16_to_cpu(shdr->CreditRequest), in_flight);
2596 cifs_dbg(FYI, "%s: status pending add %u credits total=%d\n",
2597 __func__, le16_to_cpu(shdr->CreditRequest), scredits);
2598 }
2599
2600 return true;
2601 }
2602
2603 static bool
smb2_is_session_expired(char * buf)2604 smb2_is_session_expired(char *buf)
2605 {
2606 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
2607
2608 if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
2609 shdr->Status != STATUS_USER_SESSION_DELETED)
2610 return false;
2611
2612 trace_smb3_ses_expired(le32_to_cpu(shdr->Id.SyncId.TreeId),
2613 le64_to_cpu(shdr->SessionId),
2614 le16_to_cpu(shdr->Command),
2615 le64_to_cpu(shdr->MessageId));
2616 cifs_dbg(FYI, "Session expired or deleted\n");
2617
2618 return true;
2619 }
2620
2621 static bool
smb2_is_status_io_timeout(char * buf)2622 smb2_is_status_io_timeout(char *buf)
2623 {
2624 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
2625
2626 if (shdr->Status == STATUS_IO_TIMEOUT)
2627 return true;
2628 else
2629 return false;
2630 }
2631
2632 static bool
smb2_is_network_name_deleted(char * buf,struct TCP_Server_Info * server)2633 smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server)
2634 {
2635 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
2636 struct TCP_Server_Info *pserver;
2637 struct cifs_ses *ses;
2638 struct cifs_tcon *tcon;
2639
2640 if (shdr->Status != STATUS_NETWORK_NAME_DELETED)
2641 return false;
2642
2643 /* If server is a channel, select the primary channel */
2644 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
2645
2646 spin_lock(&cifs_tcp_ses_lock);
2647 list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
2648 if (cifs_ses_exiting(ses))
2649 continue;
2650 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
2651 if (tcon->tid == le32_to_cpu(shdr->Id.SyncId.TreeId)) {
2652 spin_lock(&tcon->tc_lock);
2653 tcon->need_reconnect = true;
2654 spin_unlock(&tcon->tc_lock);
2655 spin_unlock(&cifs_tcp_ses_lock);
2656 pr_warn_once("Server share %s deleted.\n",
2657 tcon->tree_name);
2658 return true;
2659 }
2660 }
2661 }
2662 spin_unlock(&cifs_tcp_ses_lock);
2663
2664 return false;
2665 }
2666
2667 static int
smb2_oplock_response(struct cifs_tcon * tcon,__u64 persistent_fid,__u64 volatile_fid,__u16 net_fid,struct cifsInodeInfo * cinode)2668 smb2_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid,
2669 __u64 volatile_fid, __u16 net_fid, struct cifsInodeInfo *cinode)
2670 {
2671 if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
2672 return SMB2_lease_break(0, tcon, cinode->lease_key,
2673 smb2_get_lease_state(cinode));
2674
2675 return SMB2_oplock_break(0, tcon, persistent_fid, volatile_fid,
2676 CIFS_CACHE_READ(cinode) ? 1 : 0);
2677 }
2678
2679 void
smb2_set_replay(struct TCP_Server_Info * server,struct smb_rqst * rqst)2680 smb2_set_replay(struct TCP_Server_Info *server, struct smb_rqst *rqst)
2681 {
2682 struct smb2_hdr *shdr;
2683
2684 if (server->dialect < SMB30_PROT_ID)
2685 return;
2686
2687 shdr = (struct smb2_hdr *)(rqst->rq_iov[0].iov_base);
2688 if (shdr == NULL) {
2689 cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
2690 return;
2691 }
2692 shdr->Flags |= SMB2_FLAGS_REPLAY_OPERATION;
2693 }
2694
2695 void
smb2_set_related(struct smb_rqst * rqst)2696 smb2_set_related(struct smb_rqst *rqst)
2697 {
2698 struct smb2_hdr *shdr;
2699
2700 shdr = (struct smb2_hdr *)(rqst->rq_iov[0].iov_base);
2701 if (shdr == NULL) {
2702 cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
2703 return;
2704 }
2705 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
2706 }
2707
2708 char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
2709
2710 void
smb2_set_next_command(struct cifs_tcon * tcon,struct smb_rqst * rqst)2711 smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
2712 {
2713 struct smb2_hdr *shdr;
2714 struct cifs_ses *ses = tcon->ses;
2715 struct TCP_Server_Info *server = ses->server;
2716 unsigned long len = smb_rqst_len(server, rqst);
2717 int num_padding;
2718
2719 shdr = (struct smb2_hdr *)(rqst->rq_iov[0].iov_base);
2720 if (shdr == NULL) {
2721 cifs_dbg(FYI, "shdr NULL in smb2_set_next_command\n");
2722 return;
2723 }
2724
2725 /* SMB headers in a compound are 8 byte aligned. */
2726 if (IS_ALIGNED(len, 8))
2727 goto out;
2728
2729 num_padding = 8 - (len & 7);
2730 if (smb3_encryption_required(tcon)) {
2731 int i;
2732
2733 /*
2734 * Flatten request into a single buffer with required padding as
2735 * the encryption layer can't handle the padding iovs.
2736 */
2737 for (i = 1; i < rqst->rq_nvec; i++) {
2738 memcpy(rqst->rq_iov[0].iov_base +
2739 rqst->rq_iov[0].iov_len,
2740 rqst->rq_iov[i].iov_base,
2741 rqst->rq_iov[i].iov_len);
2742 rqst->rq_iov[0].iov_len += rqst->rq_iov[i].iov_len;
2743 }
2744 memset(rqst->rq_iov[0].iov_base + rqst->rq_iov[0].iov_len,
2745 0, num_padding);
2746 rqst->rq_iov[0].iov_len += num_padding;
2747 rqst->rq_nvec = 1;
2748 } else {
2749 rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
2750 rqst->rq_iov[rqst->rq_nvec].iov_len = num_padding;
2751 rqst->rq_nvec++;
2752 }
2753 len += num_padding;
2754 out:
2755 shdr->NextCommand = cpu_to_le32(len);
2756 }
2757
2758 /*
2759 * helper function for exponential backoff and check if replayable
2760 */
smb2_should_replay(struct cifs_tcon * tcon,int * pretries,int * pcur_sleep)2761 bool smb2_should_replay(struct cifs_tcon *tcon,
2762 int *pretries,
2763 int *pcur_sleep)
2764 {
2765 if (!pretries || !pcur_sleep)
2766 return false;
2767
2768 if (tcon->retry || (*pretries)++ < tcon->ses->server->retrans) {
2769 msleep(*pcur_sleep);
2770 (*pcur_sleep) = ((*pcur_sleep) << 1);
2771 if ((*pcur_sleep) > CIFS_MAX_SLEEP)
2772 (*pcur_sleep) = CIFS_MAX_SLEEP;
2773 return true;
2774 }
2775
2776 return false;
2777 }
2778
2779 /*
2780 * Passes the query info response back to the caller on success.
2781 * Caller need to free this with free_rsp_buf().
2782 */
2783 int
smb2_query_info_compound(const unsigned int xid,struct cifs_tcon * tcon,const char * path,u32 desired_access,u32 class,u32 type,u32 output_len,struct kvec * rsp,int * buftype,struct cifs_sb_info * cifs_sb)2784 smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
2785 const char *path, u32 desired_access,
2786 u32 class, u32 type, u32 output_len,
2787 struct kvec *rsp, int *buftype,
2788 struct cifs_sb_info *cifs_sb)
2789 {
2790 struct smb2_compound_vars *vars;
2791 struct cifs_ses *ses = tcon->ses;
2792 struct TCP_Server_Info *server;
2793 int flags = CIFS_CP_CREATE_CLOSE_OP;
2794 struct smb_rqst *rqst;
2795 int resp_buftype[3];
2796 struct kvec *rsp_iov;
2797 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2798 struct cifs_open_parms oparms;
2799 struct cifs_fid fid;
2800 int rc;
2801 __le16 *utf16_path;
2802 struct cached_fid *cfid;
2803 int retries = 0, cur_sleep = 1;
2804
2805 replay_again:
2806 /* reinitialize for possible replay */
2807 cfid = NULL;
2808 flags = CIFS_CP_CREATE_CLOSE_OP;
2809 oplock = SMB2_OPLOCK_LEVEL_NONE;
2810 server = cifs_pick_channel(ses);
2811
2812 if (!path)
2813 path = "";
2814 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
2815 if (!utf16_path)
2816 return -ENOMEM;
2817
2818 if (smb3_encryption_required(tcon))
2819 flags |= CIFS_TRANSFORM_REQ;
2820
2821 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
2822 vars = kzalloc(sizeof(*vars), GFP_KERNEL);
2823 if (!vars) {
2824 rc = -ENOMEM;
2825 goto out_free_path;
2826 }
2827 rqst = vars->rqst;
2828 rsp_iov = vars->rsp_iov;
2829
2830 /*
2831 * We can only call this for things we know are directories.
2832 */
2833 if (!strcmp(path, ""))
2834 open_cached_dir(xid, tcon, path, cifs_sb, false,
2835 &cfid); /* cfid null if open dir failed */
2836
2837 rqst[0].rq_iov = vars->open_iov;
2838 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
2839
2840 oparms = (struct cifs_open_parms) {
2841 .tcon = tcon,
2842 .path = path,
2843 .desired_access = desired_access,
2844 .disposition = FILE_OPEN,
2845 .create_options = cifs_create_options(cifs_sb, 0),
2846 .fid = &fid,
2847 .replay = !!(retries),
2848 };
2849
2850 rc = SMB2_open_init(tcon, server,
2851 &rqst[0], &oplock, &oparms, utf16_path);
2852 if (rc)
2853 goto qic_exit;
2854 smb2_set_next_command(tcon, &rqst[0]);
2855
2856 rqst[1].rq_iov = &vars->qi_iov;
2857 rqst[1].rq_nvec = 1;
2858
2859 if (cfid) {
2860 rc = SMB2_query_info_init(tcon, server,
2861 &rqst[1],
2862 cfid->fid.persistent_fid,
2863 cfid->fid.volatile_fid,
2864 class, type, 0,
2865 output_len, 0,
2866 NULL);
2867 } else {
2868 rc = SMB2_query_info_init(tcon, server,
2869 &rqst[1],
2870 COMPOUND_FID,
2871 COMPOUND_FID,
2872 class, type, 0,
2873 output_len, 0,
2874 NULL);
2875 }
2876 if (rc)
2877 goto qic_exit;
2878 if (!cfid) {
2879 smb2_set_next_command(tcon, &rqst[1]);
2880 smb2_set_related(&rqst[1]);
2881 }
2882
2883 rqst[2].rq_iov = &vars->close_iov;
2884 rqst[2].rq_nvec = 1;
2885
2886 rc = SMB2_close_init(tcon, server,
2887 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
2888 if (rc)
2889 goto qic_exit;
2890 smb2_set_related(&rqst[2]);
2891
2892 if (retries) {
2893 if (!cfid) {
2894 smb2_set_replay(server, &rqst[0]);
2895 smb2_set_replay(server, &rqst[2]);
2896 }
2897 smb2_set_replay(server, &rqst[1]);
2898 }
2899
2900 if (cfid) {
2901 rc = compound_send_recv(xid, ses, server,
2902 flags, 1, &rqst[1],
2903 &resp_buftype[1], &rsp_iov[1]);
2904 } else {
2905 rc = compound_send_recv(xid, ses, server,
2906 flags, 3, rqst,
2907 resp_buftype, rsp_iov);
2908 }
2909 if (rc) {
2910 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
2911 if (rc == -EREMCHG) {
2912 tcon->need_reconnect = true;
2913 pr_warn_once("server share %s deleted\n",
2914 tcon->tree_name);
2915 }
2916 goto qic_exit;
2917 }
2918 *rsp = rsp_iov[1];
2919 *buftype = resp_buftype[1];
2920
2921 qic_exit:
2922 SMB2_open_free(&rqst[0]);
2923 SMB2_query_info_free(&rqst[1]);
2924 SMB2_close_free(&rqst[2]);
2925 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2926 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
2927 if (cfid)
2928 close_cached_dir(cfid);
2929 kfree(vars);
2930 out_free_path:
2931 kfree(utf16_path);
2932
2933 if (is_replayable_error(rc) &&
2934 smb2_should_replay(tcon, &retries, &cur_sleep))
2935 goto replay_again;
2936
2937 return rc;
2938 }
2939
2940 static int
smb2_queryfs(const unsigned int xid,struct cifs_tcon * tcon,const char * path,struct cifs_sb_info * cifs_sb,struct kstatfs * buf)2941 smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2942 const char *path, struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
2943 {
2944 struct smb2_query_info_rsp *rsp;
2945 struct smb2_fs_full_size_info *info = NULL;
2946 struct kvec rsp_iov = {NULL, 0};
2947 int buftype = CIFS_NO_BUFFER;
2948 int rc;
2949
2950
2951 rc = smb2_query_info_compound(xid, tcon, path,
2952 FILE_READ_ATTRIBUTES,
2953 FS_FULL_SIZE_INFORMATION,
2954 SMB2_O_INFO_FILESYSTEM,
2955 sizeof(struct smb2_fs_full_size_info),
2956 &rsp_iov, &buftype, cifs_sb);
2957 if (rc)
2958 goto qfs_exit;
2959
2960 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
2961 buf->f_type = SMB2_SUPER_MAGIC;
2962 info = (struct smb2_fs_full_size_info *)(
2963 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
2964 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
2965 le32_to_cpu(rsp->OutputBufferLength),
2966 &rsp_iov,
2967 sizeof(struct smb2_fs_full_size_info));
2968 if (!rc)
2969 smb2_copy_fs_info_to_kstatfs(info, buf);
2970
2971 qfs_exit:
2972 trace_smb3_qfs_done(xid, tcon->tid, tcon->ses->Suid, tcon->tree_name, rc);
2973 free_rsp_buf(buftype, rsp_iov.iov_base);
2974 return rc;
2975 }
2976
2977 static int
smb311_queryfs(const unsigned int xid,struct cifs_tcon * tcon,const char * path,struct cifs_sb_info * cifs_sb,struct kstatfs * buf)2978 smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2979 const char *path, struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
2980 {
2981 int rc;
2982 __le16 *utf16_path = NULL;
2983 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2984 struct cifs_open_parms oparms;
2985 struct cifs_fid fid;
2986
2987 if (!tcon->posix_extensions)
2988 return smb2_queryfs(xid, tcon, path, cifs_sb, buf);
2989
2990 oparms = (struct cifs_open_parms) {
2991 .tcon = tcon,
2992 .path = path,
2993 .desired_access = FILE_READ_ATTRIBUTES,
2994 .disposition = FILE_OPEN,
2995 .create_options = cifs_create_options(cifs_sb, 0),
2996 .fid = &fid,
2997 };
2998
2999 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
3000 if (utf16_path == NULL)
3001 return -ENOMEM;
3002
3003 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
3004 NULL, NULL);
3005 kfree(utf16_path);
3006 if (rc)
3007 return rc;
3008
3009 rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid,
3010 fid.volatile_fid, buf);
3011 buf->f_type = SMB2_SUPER_MAGIC;
3012 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3013 return rc;
3014 }
3015
3016 static bool
smb2_compare_fids(struct cifsFileInfo * ob1,struct cifsFileInfo * ob2)3017 smb2_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
3018 {
3019 return ob1->fid.persistent_fid == ob2->fid.persistent_fid &&
3020 ob1->fid.volatile_fid == ob2->fid.volatile_fid;
3021 }
3022
3023 static int
smb2_mand_lock(const unsigned int xid,struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u32 type,int lock,int unlock,bool wait)3024 smb2_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
3025 __u64 length, __u32 type, int lock, int unlock, bool wait)
3026 {
3027 if (unlock && !lock)
3028 type = SMB2_LOCKFLAG_UNLOCK;
3029 return SMB2_lock(xid, tlink_tcon(cfile->tlink),
3030 cfile->fid.persistent_fid, cfile->fid.volatile_fid,
3031 current->tgid, length, offset, type, wait);
3032 }
3033
3034 static void
smb2_get_lease_key(struct inode * inode,struct cifs_fid * fid)3035 smb2_get_lease_key(struct inode *inode, struct cifs_fid *fid)
3036 {
3037 memcpy(fid->lease_key, CIFS_I(inode)->lease_key, SMB2_LEASE_KEY_SIZE);
3038 }
3039
3040 static void
smb2_set_lease_key(struct inode * inode,struct cifs_fid * fid)3041 smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
3042 {
3043 memcpy(CIFS_I(inode)->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
3044 }
3045
3046 static void
smb2_new_lease_key(struct cifs_fid * fid)3047 smb2_new_lease_key(struct cifs_fid *fid)
3048 {
3049 generate_random_uuid(fid->lease_key);
3050 }
3051
3052 static int
smb2_get_dfs_refer(const unsigned int xid,struct cifs_ses * ses,const char * search_name,struct dfs_info3_param ** target_nodes,unsigned int * num_of_nodes,const struct nls_table * nls_codepage,int remap)3053 smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
3054 const char *search_name,
3055 struct dfs_info3_param **target_nodes,
3056 unsigned int *num_of_nodes,
3057 const struct nls_table *nls_codepage, int remap)
3058 {
3059 int rc;
3060 __le16 *utf16_path = NULL;
3061 int utf16_path_len = 0;
3062 struct cifs_tcon *tcon;
3063 struct fsctl_get_dfs_referral_req *dfs_req = NULL;
3064 struct get_dfs_referral_rsp *dfs_rsp = NULL;
3065 u32 dfs_req_size = 0, dfs_rsp_size = 0;
3066 int retry_once = 0;
3067
3068 cifs_dbg(FYI, "%s: path: %s\n", __func__, search_name);
3069
3070 /*
3071 * Try to use the IPC tcon, otherwise just use any
3072 */
3073 tcon = ses->tcon_ipc;
3074 if (tcon == NULL) {
3075 spin_lock(&cifs_tcp_ses_lock);
3076 tcon = list_first_entry_or_null(&ses->tcon_list,
3077 struct cifs_tcon,
3078 tcon_list);
3079 if (tcon) {
3080 tcon->tc_count++;
3081 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
3082 netfs_trace_tcon_ref_get_dfs_refer);
3083 }
3084 spin_unlock(&cifs_tcp_ses_lock);
3085 }
3086
3087 if (tcon == NULL) {
3088 cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n",
3089 ses);
3090 rc = -ENOTCONN;
3091 goto out;
3092 }
3093
3094 utf16_path = cifs_strndup_to_utf16(search_name, PATH_MAX,
3095 &utf16_path_len,
3096 nls_codepage, remap);
3097 if (!utf16_path) {
3098 rc = -ENOMEM;
3099 goto out;
3100 }
3101
3102 dfs_req_size = sizeof(*dfs_req) + utf16_path_len;
3103 dfs_req = kzalloc(dfs_req_size, GFP_KERNEL);
3104 if (!dfs_req) {
3105 rc = -ENOMEM;
3106 goto out;
3107 }
3108
3109 /* Highest DFS referral version understood */
3110 dfs_req->MaxReferralLevel = DFS_VERSION;
3111
3112 /* Path to resolve in an UTF-16 null-terminated string */
3113 memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len);
3114
3115 for (;;) {
3116 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
3117 FSCTL_DFS_GET_REFERRALS,
3118 (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
3119 (char **)&dfs_rsp, &dfs_rsp_size);
3120 if (fatal_signal_pending(current)) {
3121 rc = -EINTR;
3122 break;
3123 }
3124 if (!is_retryable_error(rc) || retry_once++)
3125 break;
3126 usleep_range(512, 2048);
3127 }
3128
3129 if (!rc && !dfs_rsp)
3130 rc = -EIO;
3131 if (rc) {
3132 if (!is_retryable_error(rc) && rc != -ENOENT && rc != -EOPNOTSUPP)
3133 cifs_tcon_dbg(FYI, "%s: ioctl error: rc=%d\n", __func__, rc);
3134 goto out;
3135 }
3136
3137 rc = parse_dfs_referrals(dfs_rsp, dfs_rsp_size,
3138 num_of_nodes, target_nodes,
3139 nls_codepage, remap, search_name,
3140 true /* is_unicode */);
3141 if (rc && rc != -ENOENT) {
3142 cifs_tcon_dbg(VFS, "%s: failed to parse DFS referral %s: %d\n",
3143 __func__, search_name, rc);
3144 }
3145
3146 out:
3147 if (tcon && !tcon->ipc) {
3148 /* ipc tcons are not refcounted */
3149 spin_lock(&cifs_tcp_ses_lock);
3150 tcon->tc_count--;
3151 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
3152 netfs_trace_tcon_ref_dec_dfs_refer);
3153 /* tc_count can never go negative */
3154 WARN_ON(tcon->tc_count < 0);
3155 spin_unlock(&cifs_tcp_ses_lock);
3156 }
3157 kfree(utf16_path);
3158 kfree(dfs_req);
3159 kfree(dfs_rsp);
3160 return rc;
3161 }
3162
3163 static struct smb_ntsd *
get_smb2_acl_by_fid(struct cifs_sb_info * cifs_sb,const struct cifs_fid * cifsfid,u32 * pacllen,u32 info)3164 get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
3165 const struct cifs_fid *cifsfid, u32 *pacllen, u32 info)
3166 {
3167 struct smb_ntsd *pntsd = NULL;
3168 unsigned int xid;
3169 int rc = -EOPNOTSUPP;
3170 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3171
3172 if (IS_ERR(tlink))
3173 return ERR_CAST(tlink);
3174
3175 xid = get_xid();
3176 cifs_dbg(FYI, "trying to get acl\n");
3177
3178 rc = SMB2_query_acl(xid, tlink_tcon(tlink), cifsfid->persistent_fid,
3179 cifsfid->volatile_fid, (void **)&pntsd, pacllen,
3180 info);
3181 free_xid(xid);
3182
3183 cifs_put_tlink(tlink);
3184
3185 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
3186 if (rc)
3187 return ERR_PTR(rc);
3188 return pntsd;
3189
3190 }
3191
3192 static struct smb_ntsd *
get_smb2_acl_by_path(struct cifs_sb_info * cifs_sb,const char * path,u32 * pacllen,u32 info)3193 get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
3194 const char *path, u32 *pacllen, u32 info)
3195 {
3196 struct smb_ntsd *pntsd = NULL;
3197 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3198 unsigned int xid;
3199 int rc;
3200 struct cifs_tcon *tcon;
3201 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3202 struct cifs_fid fid;
3203 struct cifs_open_parms oparms;
3204 __le16 *utf16_path;
3205
3206 cifs_dbg(FYI, "get smb3 acl for path %s\n", path);
3207 if (IS_ERR(tlink))
3208 return ERR_CAST(tlink);
3209
3210 tcon = tlink_tcon(tlink);
3211 xid = get_xid();
3212
3213 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
3214 if (!utf16_path) {
3215 rc = -ENOMEM;
3216 goto put_tlink;
3217 }
3218
3219 oparms = (struct cifs_open_parms) {
3220 .tcon = tcon,
3221 .path = path,
3222 .desired_access = READ_CONTROL,
3223 .disposition = FILE_OPEN,
3224 /*
3225 * When querying an ACL, even if the file is a symlink
3226 * we want to open the source not the target, and so
3227 * the protocol requires that the client specify this
3228 * flag when opening a reparse point
3229 */
3230 .create_options = cifs_create_options(cifs_sb, 0) |
3231 OPEN_REPARSE_POINT,
3232 .fid = &fid,
3233 };
3234
3235 if (info & SACL_SECINFO)
3236 oparms.desired_access |= SYSTEM_SECURITY;
3237
3238 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
3239 NULL);
3240 kfree(utf16_path);
3241 if (!rc) {
3242 rc = SMB2_query_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
3243 fid.volatile_fid, (void **)&pntsd, pacllen,
3244 info);
3245 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3246 }
3247
3248 put_tlink:
3249 cifs_put_tlink(tlink);
3250 free_xid(xid);
3251
3252 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
3253 if (rc)
3254 return ERR_PTR(rc);
3255 return pntsd;
3256 }
3257
3258 static int
set_smb2_acl(struct smb_ntsd * pnntsd,__u32 acllen,struct inode * inode,const char * path,int aclflag)3259 set_smb2_acl(struct smb_ntsd *pnntsd, __u32 acllen,
3260 struct inode *inode, const char *path, int aclflag)
3261 {
3262 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3263 unsigned int xid;
3264 int rc, access_flags = 0;
3265 struct cifs_tcon *tcon;
3266 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3267 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3268 struct cifs_fid fid;
3269 struct cifs_open_parms oparms;
3270 __le16 *utf16_path;
3271
3272 cifs_dbg(FYI, "set smb3 acl for path %s\n", path);
3273 if (IS_ERR(tlink))
3274 return PTR_ERR(tlink);
3275
3276 tcon = tlink_tcon(tlink);
3277 xid = get_xid();
3278
3279 if (aclflag & CIFS_ACL_OWNER || aclflag & CIFS_ACL_GROUP)
3280 access_flags |= WRITE_OWNER;
3281 if (aclflag & CIFS_ACL_SACL)
3282 access_flags |= SYSTEM_SECURITY;
3283 if (aclflag & CIFS_ACL_DACL)
3284 access_flags |= WRITE_DAC;
3285
3286 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
3287 if (!utf16_path) {
3288 rc = -ENOMEM;
3289 goto put_tlink;
3290 }
3291
3292 oparms = (struct cifs_open_parms) {
3293 .tcon = tcon,
3294 .desired_access = access_flags,
3295 .create_options = cifs_create_options(cifs_sb, 0),
3296 .disposition = FILE_OPEN,
3297 .path = path,
3298 .fid = &fid,
3299 };
3300
3301 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
3302 NULL, NULL);
3303 kfree(utf16_path);
3304 if (!rc) {
3305 rc = SMB2_set_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
3306 fid.volatile_fid, pnntsd, acllen, aclflag);
3307 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3308 }
3309
3310 put_tlink:
3311 cifs_put_tlink(tlink);
3312 free_xid(xid);
3313 return rc;
3314 }
3315
3316 /* Retrieve an ACL from the server */
3317 static struct smb_ntsd *
get_smb2_acl(struct cifs_sb_info * cifs_sb,struct inode * inode,const char * path,u32 * pacllen,u32 info)3318 get_smb2_acl(struct cifs_sb_info *cifs_sb,
3319 struct inode *inode, const char *path,
3320 u32 *pacllen, u32 info)
3321 {
3322 struct smb_ntsd *pntsd = NULL;
3323 struct cifsFileInfo *open_file = NULL;
3324
3325 if (inode && !(info & SACL_SECINFO))
3326 open_file = find_readable_file(CIFS_I(inode), true);
3327 if (!open_file || (info & SACL_SECINFO))
3328 return get_smb2_acl_by_path(cifs_sb, path, pacllen, info);
3329
3330 pntsd = get_smb2_acl_by_fid(cifs_sb, &open_file->fid, pacllen, info);
3331 cifsFileInfo_put(open_file);
3332 return pntsd;
3333 }
3334
smb3_zero_data(struct file * file,struct cifs_tcon * tcon,loff_t offset,loff_t len,unsigned int xid)3335 static long smb3_zero_data(struct file *file, struct cifs_tcon *tcon,
3336 loff_t offset, loff_t len, unsigned int xid)
3337 {
3338 struct cifsFileInfo *cfile = file->private_data;
3339 struct file_zero_data_information fsctl_buf;
3340
3341 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
3342
3343 fsctl_buf.FileOffset = cpu_to_le64(offset);
3344 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
3345
3346 return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3347 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
3348 (char *)&fsctl_buf,
3349 sizeof(struct file_zero_data_information),
3350 0, NULL, NULL);
3351 }
3352
smb3_zero_range(struct file * file,struct cifs_tcon * tcon,unsigned long long offset,unsigned long long len,bool keep_size)3353 static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
3354 unsigned long long offset, unsigned long long len,
3355 bool keep_size)
3356 {
3357 struct cifs_ses *ses = tcon->ses;
3358 struct inode *inode = file_inode(file);
3359 struct cifsInodeInfo *cifsi = CIFS_I(inode);
3360 struct cifsFileInfo *cfile = file->private_data;
3361 struct netfs_inode *ictx = netfs_inode(inode);
3362 unsigned long long i_size, new_size, remote_size;
3363 long rc;
3364 unsigned int xid;
3365
3366 xid = get_xid();
3367
3368 trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
3369 ses->Suid, offset, len);
3370
3371 filemap_invalidate_lock(inode->i_mapping);
3372
3373 i_size = i_size_read(inode);
3374 remote_size = ictx->remote_i_size;
3375 if (offset + len >= remote_size && offset < i_size) {
3376 unsigned long long top = umin(offset + len, i_size);
3377
3378 rc = filemap_write_and_wait_range(inode->i_mapping, offset, top - 1);
3379 if (rc < 0)
3380 goto zero_range_exit;
3381 }
3382
3383 /*
3384 * We zero the range through ioctl, so we need remove the page caches
3385 * first, otherwise the data may be inconsistent with the server.
3386 */
3387 truncate_pagecache_range(inode, offset, offset + len - 1);
3388 netfs_wait_for_outstanding_io(inode);
3389
3390 /* if file not oplocked can't be sure whether asking to extend size */
3391 rc = -EOPNOTSUPP;
3392 if (keep_size == false && !CIFS_CACHE_READ(cifsi))
3393 goto zero_range_exit;
3394
3395 rc = smb3_zero_data(file, tcon, offset, len, xid);
3396 if (rc < 0)
3397 goto zero_range_exit;
3398
3399 /*
3400 * do we also need to change the size of the file?
3401 */
3402 new_size = offset + len;
3403 if (keep_size == false && (unsigned long long)i_size_read(inode) < new_size) {
3404 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3405 cfile->fid.volatile_fid, cfile->pid, new_size);
3406 if (rc >= 0) {
3407 truncate_setsize(inode, new_size);
3408 netfs_resize_file(&cifsi->netfs, new_size, true);
3409 if (offset < cifsi->netfs.zero_point)
3410 cifsi->netfs.zero_point = offset;
3411 fscache_resize_cookie(cifs_inode_cookie(inode), new_size);
3412 }
3413 }
3414
3415 zero_range_exit:
3416 filemap_invalidate_unlock(inode->i_mapping);
3417 free_xid(xid);
3418 if (rc)
3419 trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
3420 ses->Suid, offset, len, rc);
3421 else
3422 trace_smb3_zero_done(xid, cfile->fid.persistent_fid, tcon->tid,
3423 ses->Suid, offset, len);
3424 return rc;
3425 }
3426
smb3_punch_hole(struct file * file,struct cifs_tcon * tcon,loff_t offset,loff_t len)3427 static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
3428 loff_t offset, loff_t len)
3429 {
3430 struct inode *inode = file_inode(file);
3431 struct cifsFileInfo *cfile = file->private_data;
3432 struct file_zero_data_information fsctl_buf;
3433 unsigned long long end = offset + len, i_size, remote_i_size;
3434 long rc;
3435 unsigned int xid;
3436 __u8 set_sparse = 1;
3437
3438 xid = get_xid();
3439
3440 /* Need to make file sparse, if not already, before freeing range. */
3441 /* Consider adding equivalent for compressed since it could also work */
3442 if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
3443 rc = -EOPNOTSUPP;
3444 goto out;
3445 }
3446
3447 filemap_invalidate_lock(inode->i_mapping);
3448 /*
3449 * We implement the punch hole through ioctl, so we need remove the page
3450 * caches first, otherwise the data may be inconsistent with the server.
3451 */
3452 truncate_pagecache_range(inode, offset, offset + len - 1);
3453 netfs_wait_for_outstanding_io(inode);
3454
3455 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
3456
3457 fsctl_buf.FileOffset = cpu_to_le64(offset);
3458 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
3459
3460 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3461 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
3462 (char *)&fsctl_buf,
3463 sizeof(struct file_zero_data_information),
3464 CIFSMaxBufSize, NULL, NULL);
3465
3466 if (rc)
3467 goto unlock;
3468
3469 /* If there's dirty data in the buffer that would extend the EOF if it
3470 * were written, then we need to move the EOF marker over to the lower
3471 * of the high end of the hole and the proposed EOF. The problem is
3472 * that we locally hole-punch the tail of the dirty data, the proposed
3473 * EOF update will end up in the wrong place.
3474 */
3475 i_size = i_size_read(inode);
3476 remote_i_size = netfs_inode(inode)->remote_i_size;
3477 if (end > remote_i_size && i_size > remote_i_size) {
3478 unsigned long long extend_to = umin(end, i_size);
3479 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3480 cfile->fid.volatile_fid, cfile->pid, extend_to);
3481 if (rc >= 0)
3482 netfs_inode(inode)->remote_i_size = extend_to;
3483 }
3484
3485 unlock:
3486 filemap_invalidate_unlock(inode->i_mapping);
3487 out:
3488 free_xid(xid);
3489 return rc;
3490 }
3491
smb3_simple_fallocate_write_range(unsigned int xid,struct cifs_tcon * tcon,struct cifsFileInfo * cfile,loff_t off,loff_t len,char * buf)3492 static int smb3_simple_fallocate_write_range(unsigned int xid,
3493 struct cifs_tcon *tcon,
3494 struct cifsFileInfo *cfile,
3495 loff_t off, loff_t len,
3496 char *buf)
3497 {
3498 struct cifs_io_parms io_parms = {0};
3499 int nbytes;
3500 int rc = 0;
3501 struct kvec iov[2];
3502
3503 io_parms.netfid = cfile->fid.netfid;
3504 io_parms.pid = current->tgid;
3505 io_parms.tcon = tcon;
3506 io_parms.persistent_fid = cfile->fid.persistent_fid;
3507 io_parms.volatile_fid = cfile->fid.volatile_fid;
3508
3509 while (len) {
3510 io_parms.offset = off;
3511 io_parms.length = len;
3512 if (io_parms.length > SMB2_MAX_BUFFER_SIZE)
3513 io_parms.length = SMB2_MAX_BUFFER_SIZE;
3514 /* iov[0] is reserved for smb header */
3515 iov[1].iov_base = buf;
3516 iov[1].iov_len = io_parms.length;
3517 rc = SMB2_write(xid, &io_parms, &nbytes, iov, 1);
3518 if (rc)
3519 break;
3520 if (nbytes > len)
3521 return -EINVAL;
3522 buf += nbytes;
3523 off += nbytes;
3524 len -= nbytes;
3525 }
3526 return rc;
3527 }
3528
smb3_simple_fallocate_range(unsigned int xid,struct cifs_tcon * tcon,struct cifsFileInfo * cfile,loff_t off,loff_t len)3529 static int smb3_simple_fallocate_range(unsigned int xid,
3530 struct cifs_tcon *tcon,
3531 struct cifsFileInfo *cfile,
3532 loff_t off, loff_t len)
3533 {
3534 struct file_allocated_range_buffer in_data, *out_data = NULL, *tmp_data;
3535 u32 out_data_len;
3536 char *buf = NULL;
3537 loff_t l;
3538 int rc;
3539
3540 in_data.file_offset = cpu_to_le64(off);
3541 in_data.length = cpu_to_le64(len);
3542 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3543 cfile->fid.volatile_fid,
3544 FSCTL_QUERY_ALLOCATED_RANGES,
3545 (char *)&in_data, sizeof(in_data),
3546 1024 * sizeof(struct file_allocated_range_buffer),
3547 (char **)&out_data, &out_data_len);
3548 if (rc)
3549 goto out;
3550
3551 buf = kzalloc(1024 * 1024, GFP_KERNEL);
3552 if (buf == NULL) {
3553 rc = -ENOMEM;
3554 goto out;
3555 }
3556
3557 tmp_data = out_data;
3558 while (len) {
3559 /*
3560 * The rest of the region is unmapped so write it all.
3561 */
3562 if (out_data_len == 0) {
3563 rc = smb3_simple_fallocate_write_range(xid, tcon,
3564 cfile, off, len, buf);
3565 goto out;
3566 }
3567
3568 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
3569 rc = -EINVAL;
3570 goto out;
3571 }
3572
3573 if (off < le64_to_cpu(tmp_data->file_offset)) {
3574 /*
3575 * We are at a hole. Write until the end of the region
3576 * or until the next allocated data,
3577 * whichever comes next.
3578 */
3579 l = le64_to_cpu(tmp_data->file_offset) - off;
3580 if (len < l)
3581 l = len;
3582 rc = smb3_simple_fallocate_write_range(xid, tcon,
3583 cfile, off, l, buf);
3584 if (rc)
3585 goto out;
3586 off = off + l;
3587 len = len - l;
3588 if (len == 0)
3589 goto out;
3590 }
3591 /*
3592 * We are at a section of allocated data, just skip forward
3593 * until the end of the data or the end of the region
3594 * we are supposed to fallocate, whichever comes first.
3595 */
3596 l = le64_to_cpu(tmp_data->length);
3597 if (len < l)
3598 l = len;
3599 off += l;
3600 len -= l;
3601
3602 tmp_data = &tmp_data[1];
3603 out_data_len -= sizeof(struct file_allocated_range_buffer);
3604 }
3605
3606 out:
3607 kfree(out_data);
3608 kfree(buf);
3609 return rc;
3610 }
3611
3612
smb3_simple_falloc(struct file * file,struct cifs_tcon * tcon,loff_t off,loff_t len,bool keep_size)3613 static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
3614 loff_t off, loff_t len, bool keep_size)
3615 {
3616 struct inode *inode;
3617 struct cifsInodeInfo *cifsi;
3618 struct cifsFileInfo *cfile = file->private_data;
3619 long rc = -EOPNOTSUPP;
3620 unsigned int xid;
3621 loff_t new_eof;
3622
3623 xid = get_xid();
3624
3625 inode = d_inode(cfile->dentry);
3626 cifsi = CIFS_I(inode);
3627
3628 trace_smb3_falloc_enter(xid, cfile->fid.persistent_fid, tcon->tid,
3629 tcon->ses->Suid, off, len);
3630 /* if file not oplocked can't be sure whether asking to extend size */
3631 if (!CIFS_CACHE_READ(cifsi))
3632 if (keep_size == false) {
3633 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3634 tcon->tid, tcon->ses->Suid, off, len, rc);
3635 free_xid(xid);
3636 return rc;
3637 }
3638
3639 /*
3640 * Extending the file
3641 */
3642 if ((keep_size == false) && i_size_read(inode) < off + len) {
3643 rc = inode_newsize_ok(inode, off + len);
3644 if (rc)
3645 goto out;
3646
3647 if (cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)
3648 smb2_set_sparse(xid, tcon, cfile, inode, false);
3649
3650 new_eof = off + len;
3651 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3652 cfile->fid.volatile_fid, cfile->pid, new_eof);
3653 if (rc == 0) {
3654 netfs_resize_file(&cifsi->netfs, new_eof, true);
3655 cifs_setsize(inode, new_eof);
3656 }
3657 goto out;
3658 }
3659
3660 /*
3661 * Files are non-sparse by default so falloc may be a no-op
3662 * Must check if file sparse. If not sparse, and since we are not
3663 * extending then no need to do anything since file already allocated
3664 */
3665 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
3666 rc = 0;
3667 goto out;
3668 }
3669
3670 if (keep_size == true) {
3671 /*
3672 * We can not preallocate pages beyond the end of the file
3673 * in SMB2
3674 */
3675 if (off >= i_size_read(inode)) {
3676 rc = 0;
3677 goto out;
3678 }
3679 /*
3680 * For fallocates that are partially beyond the end of file,
3681 * clamp len so we only fallocate up to the end of file.
3682 */
3683 if (off + len > i_size_read(inode)) {
3684 len = i_size_read(inode) - off;
3685 }
3686 }
3687
3688 if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
3689 /*
3690 * At this point, we are trying to fallocate an internal
3691 * regions of a sparse file. Since smb2 does not have a
3692 * fallocate command we have two options on how to emulate this.
3693 * We can either turn the entire file to become non-sparse
3694 * which we only do if the fallocate is for virtually
3695 * the whole file, or we can overwrite the region with zeroes
3696 * using SMB2_write, which could be prohibitevly expensive
3697 * if len is large.
3698 */
3699 /*
3700 * We are only trying to fallocate a small region so
3701 * just write it with zero.
3702 */
3703 if (len <= 1024 * 1024) {
3704 rc = smb3_simple_fallocate_range(xid, tcon, cfile,
3705 off, len);
3706 goto out;
3707 }
3708
3709 /*
3710 * Check if falloc starts within first few pages of file
3711 * and ends within a few pages of the end of file to
3712 * ensure that most of file is being forced to be
3713 * fallocated now. If so then setting whole file sparse
3714 * ie potentially making a few extra pages at the beginning
3715 * or end of the file non-sparse via set_sparse is harmless.
3716 */
3717 if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
3718 rc = -EOPNOTSUPP;
3719 goto out;
3720 }
3721 }
3722
3723 smb2_set_sparse(xid, tcon, cfile, inode, false);
3724 rc = 0;
3725
3726 out:
3727 if (rc)
3728 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, tcon->tid,
3729 tcon->ses->Suid, off, len, rc);
3730 else
3731 trace_smb3_falloc_done(xid, cfile->fid.persistent_fid, tcon->tid,
3732 tcon->ses->Suid, off, len);
3733
3734 free_xid(xid);
3735 return rc;
3736 }
3737
smb3_collapse_range(struct file * file,struct cifs_tcon * tcon,loff_t off,loff_t len)3738 static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon,
3739 loff_t off, loff_t len)
3740 {
3741 int rc;
3742 unsigned int xid;
3743 struct inode *inode = file_inode(file);
3744 struct cifsInodeInfo *cifsi = CIFS_I(inode);
3745 struct cifsFileInfo *cfile = file->private_data;
3746 struct netfs_inode *ictx = &cifsi->netfs;
3747 loff_t old_eof, new_eof;
3748
3749 xid = get_xid();
3750
3751 old_eof = i_size_read(inode);
3752 if ((off >= old_eof) ||
3753 off + len >= old_eof) {
3754 rc = -EINVAL;
3755 goto out;
3756 }
3757
3758 filemap_invalidate_lock(inode->i_mapping);
3759 rc = filemap_write_and_wait_range(inode->i_mapping, off, old_eof - 1);
3760 if (rc < 0)
3761 goto out_2;
3762
3763 truncate_pagecache_range(inode, off, old_eof);
3764 ictx->zero_point = old_eof;
3765 netfs_wait_for_outstanding_io(inode);
3766
3767 rc = smb2_copychunk_range(xid, cfile, cfile, off + len,
3768 old_eof - off - len, off);
3769 if (rc < 0)
3770 goto out_2;
3771
3772 new_eof = old_eof - len;
3773 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3774 cfile->fid.volatile_fid, cfile->pid, new_eof);
3775 if (rc < 0)
3776 goto out_2;
3777
3778 rc = 0;
3779
3780 truncate_setsize(inode, new_eof);
3781 netfs_resize_file(&cifsi->netfs, new_eof, true);
3782 ictx->zero_point = new_eof;
3783 fscache_resize_cookie(cifs_inode_cookie(inode), new_eof);
3784 out_2:
3785 filemap_invalidate_unlock(inode->i_mapping);
3786 out:
3787 free_xid(xid);
3788 return rc;
3789 }
3790
smb3_insert_range(struct file * file,struct cifs_tcon * tcon,loff_t off,loff_t len)3791 static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
3792 loff_t off, loff_t len)
3793 {
3794 int rc;
3795 unsigned int xid;
3796 struct cifsFileInfo *cfile = file->private_data;
3797 struct inode *inode = file_inode(file);
3798 struct cifsInodeInfo *cifsi = CIFS_I(inode);
3799 __u64 count, old_eof, new_eof;
3800
3801 xid = get_xid();
3802
3803 old_eof = i_size_read(inode);
3804 if (off >= old_eof) {
3805 rc = -EINVAL;
3806 goto out;
3807 }
3808
3809 count = old_eof - off;
3810 new_eof = old_eof + len;
3811
3812 filemap_invalidate_lock(inode->i_mapping);
3813 rc = filemap_write_and_wait_range(inode->i_mapping, off, new_eof - 1);
3814 if (rc < 0)
3815 goto out_2;
3816 truncate_pagecache_range(inode, off, old_eof);
3817 netfs_wait_for_outstanding_io(inode);
3818
3819 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3820 cfile->fid.volatile_fid, cfile->pid, new_eof);
3821 if (rc < 0)
3822 goto out_2;
3823
3824 truncate_setsize(inode, new_eof);
3825 netfs_resize_file(&cifsi->netfs, i_size_read(inode), true);
3826 fscache_resize_cookie(cifs_inode_cookie(inode), i_size_read(inode));
3827
3828 rc = smb2_copychunk_range(xid, cfile, cfile, off, count, off + len);
3829 if (rc < 0)
3830 goto out_2;
3831 cifsi->netfs.zero_point = new_eof;
3832
3833 rc = smb3_zero_data(file, tcon, off, len, xid);
3834 if (rc < 0)
3835 goto out_2;
3836
3837 rc = 0;
3838 out_2:
3839 filemap_invalidate_unlock(inode->i_mapping);
3840 out:
3841 free_xid(xid);
3842 return rc;
3843 }
3844
smb3_llseek(struct file * file,struct cifs_tcon * tcon,loff_t offset,int whence)3845 static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offset, int whence)
3846 {
3847 struct cifsFileInfo *wrcfile, *cfile = file->private_data;
3848 struct cifsInodeInfo *cifsi;
3849 struct inode *inode;
3850 int rc = 0;
3851 struct file_allocated_range_buffer in_data, *out_data = NULL;
3852 u32 out_data_len;
3853 unsigned int xid;
3854
3855 if (whence != SEEK_HOLE && whence != SEEK_DATA)
3856 return generic_file_llseek(file, offset, whence);
3857
3858 inode = d_inode(cfile->dentry);
3859 cifsi = CIFS_I(inode);
3860
3861 if (offset < 0 || offset >= i_size_read(inode))
3862 return -ENXIO;
3863
3864 xid = get_xid();
3865 /*
3866 * We need to be sure that all dirty pages are written as they
3867 * might fill holes on the server.
3868 * Note that we also MUST flush any written pages since at least
3869 * some servers (Windows2016) will not reflect recent writes in
3870 * QUERY_ALLOCATED_RANGES until SMB2_flush is called.
3871 */
3872 wrcfile = find_writable_file(cifsi, FIND_WR_ANY);
3873 if (wrcfile) {
3874 filemap_write_and_wait(inode->i_mapping);
3875 smb2_flush_file(xid, tcon, &wrcfile->fid);
3876 cifsFileInfo_put(wrcfile);
3877 }
3878
3879 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
3880 if (whence == SEEK_HOLE)
3881 offset = i_size_read(inode);
3882 goto lseek_exit;
3883 }
3884
3885 in_data.file_offset = cpu_to_le64(offset);
3886 in_data.length = cpu_to_le64(i_size_read(inode));
3887
3888 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3889 cfile->fid.volatile_fid,
3890 FSCTL_QUERY_ALLOCATED_RANGES,
3891 (char *)&in_data, sizeof(in_data),
3892 sizeof(struct file_allocated_range_buffer),
3893 (char **)&out_data, &out_data_len);
3894 if (rc == -E2BIG)
3895 rc = 0;
3896 if (rc)
3897 goto lseek_exit;
3898
3899 if (whence == SEEK_HOLE && out_data_len == 0)
3900 goto lseek_exit;
3901
3902 if (whence == SEEK_DATA && out_data_len == 0) {
3903 rc = -ENXIO;
3904 goto lseek_exit;
3905 }
3906
3907 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
3908 rc = -EINVAL;
3909 goto lseek_exit;
3910 }
3911 if (whence == SEEK_DATA) {
3912 offset = le64_to_cpu(out_data->file_offset);
3913 goto lseek_exit;
3914 }
3915 if (offset < le64_to_cpu(out_data->file_offset))
3916 goto lseek_exit;
3917
3918 offset = le64_to_cpu(out_data->file_offset) + le64_to_cpu(out_data->length);
3919
3920 lseek_exit:
3921 free_xid(xid);
3922 kfree(out_data);
3923 if (!rc)
3924 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3925 else
3926 return rc;
3927 }
3928
smb3_fiemap(struct cifs_tcon * tcon,struct cifsFileInfo * cfile,struct fiemap_extent_info * fei,u64 start,u64 len)3929 static int smb3_fiemap(struct cifs_tcon *tcon,
3930 struct cifsFileInfo *cfile,
3931 struct fiemap_extent_info *fei, u64 start, u64 len)
3932 {
3933 unsigned int xid;
3934 struct file_allocated_range_buffer in_data, *out_data;
3935 u32 out_data_len;
3936 int i, num, rc, flags, last_blob;
3937 u64 next;
3938
3939 rc = fiemap_prep(d_inode(cfile->dentry), fei, start, &len, 0);
3940 if (rc)
3941 return rc;
3942
3943 xid = get_xid();
3944 again:
3945 in_data.file_offset = cpu_to_le64(start);
3946 in_data.length = cpu_to_le64(len);
3947
3948 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3949 cfile->fid.volatile_fid,
3950 FSCTL_QUERY_ALLOCATED_RANGES,
3951 (char *)&in_data, sizeof(in_data),
3952 1024 * sizeof(struct file_allocated_range_buffer),
3953 (char **)&out_data, &out_data_len);
3954 if (rc == -E2BIG) {
3955 last_blob = 0;
3956 rc = 0;
3957 } else
3958 last_blob = 1;
3959 if (rc)
3960 goto out;
3961
3962 if (out_data_len && out_data_len < sizeof(struct file_allocated_range_buffer)) {
3963 rc = -EINVAL;
3964 goto out;
3965 }
3966 if (out_data_len % sizeof(struct file_allocated_range_buffer)) {
3967 rc = -EINVAL;
3968 goto out;
3969 }
3970
3971 num = out_data_len / sizeof(struct file_allocated_range_buffer);
3972 for (i = 0; i < num; i++) {
3973 flags = 0;
3974 if (i == num - 1 && last_blob)
3975 flags |= FIEMAP_EXTENT_LAST;
3976
3977 rc = fiemap_fill_next_extent(fei,
3978 le64_to_cpu(out_data[i].file_offset),
3979 le64_to_cpu(out_data[i].file_offset),
3980 le64_to_cpu(out_data[i].length),
3981 flags);
3982 if (rc < 0)
3983 goto out;
3984 if (rc == 1) {
3985 rc = 0;
3986 goto out;
3987 }
3988 }
3989
3990 if (!last_blob) {
3991 next = le64_to_cpu(out_data[num - 1].file_offset) +
3992 le64_to_cpu(out_data[num - 1].length);
3993 len = len - (next - start);
3994 start = next;
3995 goto again;
3996 }
3997
3998 out:
3999 free_xid(xid);
4000 kfree(out_data);
4001 return rc;
4002 }
4003
smb3_fallocate(struct file * file,struct cifs_tcon * tcon,int mode,loff_t off,loff_t len)4004 static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
4005 loff_t off, loff_t len)
4006 {
4007 /* KEEP_SIZE already checked for by do_fallocate */
4008 if (mode & FALLOC_FL_PUNCH_HOLE)
4009 return smb3_punch_hole(file, tcon, off, len);
4010 else if (mode & FALLOC_FL_ZERO_RANGE) {
4011 if (mode & FALLOC_FL_KEEP_SIZE)
4012 return smb3_zero_range(file, tcon, off, len, true);
4013 return smb3_zero_range(file, tcon, off, len, false);
4014 } else if (mode == FALLOC_FL_KEEP_SIZE)
4015 return smb3_simple_falloc(file, tcon, off, len, true);
4016 else if (mode == FALLOC_FL_COLLAPSE_RANGE)
4017 return smb3_collapse_range(file, tcon, off, len);
4018 else if (mode == FALLOC_FL_INSERT_RANGE)
4019 return smb3_insert_range(file, tcon, off, len);
4020 else if (mode == 0)
4021 return smb3_simple_falloc(file, tcon, off, len, false);
4022
4023 return -EOPNOTSUPP;
4024 }
4025
4026 static void
smb2_downgrade_oplock(struct TCP_Server_Info * server,struct cifsInodeInfo * cinode,__u32 oplock,__u16 epoch,bool * purge_cache)4027 smb2_downgrade_oplock(struct TCP_Server_Info *server,
4028 struct cifsInodeInfo *cinode, __u32 oplock,
4029 __u16 epoch, bool *purge_cache)
4030 {
4031 server->ops->set_oplock_level(cinode, oplock, 0, NULL);
4032 }
4033
4034 static void
4035 smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
4036 __u16 epoch, bool *purge_cache);
4037
4038 static void
smb3_downgrade_oplock(struct TCP_Server_Info * server,struct cifsInodeInfo * cinode,__u32 oplock,__u16 epoch,bool * purge_cache)4039 smb3_downgrade_oplock(struct TCP_Server_Info *server,
4040 struct cifsInodeInfo *cinode, __u32 oplock,
4041 __u16 epoch, bool *purge_cache)
4042 {
4043 unsigned int old_state = cinode->oplock;
4044 __u16 old_epoch = cinode->epoch;
4045 unsigned int new_state;
4046
4047 if (epoch > old_epoch) {
4048 smb21_set_oplock_level(cinode, oplock, 0, NULL);
4049 cinode->epoch = epoch;
4050 }
4051
4052 new_state = cinode->oplock;
4053 *purge_cache = false;
4054
4055 if ((old_state & CIFS_CACHE_READ_FLG) != 0 &&
4056 (new_state & CIFS_CACHE_READ_FLG) == 0)
4057 *purge_cache = true;
4058 else if (old_state == new_state && (epoch - old_epoch > 1))
4059 *purge_cache = true;
4060 }
4061
4062 static void
smb2_set_oplock_level(struct cifsInodeInfo * cinode,__u32 oplock,__u16 epoch,bool * purge_cache)4063 smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
4064 __u16 epoch, bool *purge_cache)
4065 {
4066 oplock &= 0xFF;
4067 cinode->lease_granted = false;
4068 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
4069 return;
4070 if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
4071 cinode->oplock = CIFS_CACHE_RHW_FLG;
4072 cifs_dbg(FYI, "Batch Oplock granted on inode %p\n",
4073 &cinode->netfs.inode);
4074 } else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
4075 cinode->oplock = CIFS_CACHE_RW_FLG;
4076 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
4077 &cinode->netfs.inode);
4078 } else if (oplock == SMB2_OPLOCK_LEVEL_II) {
4079 cinode->oplock = CIFS_CACHE_READ_FLG;
4080 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
4081 &cinode->netfs.inode);
4082 } else
4083 cinode->oplock = 0;
4084 }
4085
4086 static void
smb21_set_oplock_level(struct cifsInodeInfo * cinode,__u32 oplock,__u16 epoch,bool * purge_cache)4087 smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
4088 __u16 epoch, bool *purge_cache)
4089 {
4090 char message[5] = {0};
4091 unsigned int new_oplock = 0;
4092
4093 oplock &= 0xFF;
4094 cinode->lease_granted = true;
4095 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
4096 return;
4097
4098 /* Check if the server granted an oplock rather than a lease */
4099 if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE)
4100 return smb2_set_oplock_level(cinode, oplock, epoch,
4101 purge_cache);
4102
4103 if (oplock & SMB2_LEASE_READ_CACHING_HE) {
4104 new_oplock |= CIFS_CACHE_READ_FLG;
4105 strcat(message, "R");
4106 }
4107 if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
4108 new_oplock |= CIFS_CACHE_HANDLE_FLG;
4109 strcat(message, "H");
4110 }
4111 if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
4112 new_oplock |= CIFS_CACHE_WRITE_FLG;
4113 strcat(message, "W");
4114 }
4115 if (!new_oplock)
4116 strscpy(message, "None");
4117
4118 cinode->oplock = new_oplock;
4119 cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
4120 &cinode->netfs.inode);
4121 }
4122
4123 static void
smb3_set_oplock_level(struct cifsInodeInfo * cinode,__u32 oplock,__u16 epoch,bool * purge_cache)4124 smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
4125 __u16 epoch, bool *purge_cache)
4126 {
4127 unsigned int old_oplock = cinode->oplock;
4128
4129 smb21_set_oplock_level(cinode, oplock, epoch, purge_cache);
4130
4131 if (purge_cache) {
4132 *purge_cache = false;
4133 if (old_oplock == CIFS_CACHE_READ_FLG) {
4134 if (cinode->oplock == CIFS_CACHE_READ_FLG &&
4135 (epoch - cinode->epoch > 0))
4136 *purge_cache = true;
4137 else if (cinode->oplock == CIFS_CACHE_RH_FLG &&
4138 (epoch - cinode->epoch > 1))
4139 *purge_cache = true;
4140 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
4141 (epoch - cinode->epoch > 1))
4142 *purge_cache = true;
4143 else if (cinode->oplock == 0 &&
4144 (epoch - cinode->epoch > 0))
4145 *purge_cache = true;
4146 } else if (old_oplock == CIFS_CACHE_RH_FLG) {
4147 if (cinode->oplock == CIFS_CACHE_RH_FLG &&
4148 (epoch - cinode->epoch > 0))
4149 *purge_cache = true;
4150 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
4151 (epoch - cinode->epoch > 1))
4152 *purge_cache = true;
4153 }
4154 cinode->epoch = epoch;
4155 }
4156 }
4157
4158 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
4159 static bool
smb2_is_read_op(__u32 oplock)4160 smb2_is_read_op(__u32 oplock)
4161 {
4162 return oplock == SMB2_OPLOCK_LEVEL_II;
4163 }
4164 #endif /* CIFS_ALLOW_INSECURE_LEGACY */
4165
4166 static bool
smb21_is_read_op(__u32 oplock)4167 smb21_is_read_op(__u32 oplock)
4168 {
4169 return (oplock & SMB2_LEASE_READ_CACHING_HE) &&
4170 !(oplock & SMB2_LEASE_WRITE_CACHING_HE);
4171 }
4172
4173 static __le32
map_oplock_to_lease(u8 oplock)4174 map_oplock_to_lease(u8 oplock)
4175 {
4176 if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
4177 return SMB2_LEASE_WRITE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE;
4178 else if (oplock == SMB2_OPLOCK_LEVEL_II)
4179 return SMB2_LEASE_READ_CACHING_LE | SMB2_LEASE_HANDLE_CACHING_LE;
4180 else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
4181 return SMB2_LEASE_HANDLE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE |
4182 SMB2_LEASE_WRITE_CACHING_LE;
4183 return 0;
4184 }
4185
4186 static char *
smb2_create_lease_buf(u8 * lease_key,u8 oplock,u8 * parent_lease_key,__le32 flags)4187 smb2_create_lease_buf(u8 *lease_key, u8 oplock, u8 *parent_lease_key, __le32 flags)
4188 {
4189 struct create_lease *buf;
4190
4191 buf = kzalloc(sizeof(struct create_lease), GFP_KERNEL);
4192 if (!buf)
4193 return NULL;
4194
4195 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
4196 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
4197
4198 buf->ccontext.DataOffset = cpu_to_le16(offsetof
4199 (struct create_lease, lcontext));
4200 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
4201 buf->ccontext.NameOffset = cpu_to_le16(offsetof
4202 (struct create_lease, Name));
4203 buf->ccontext.NameLength = cpu_to_le16(4);
4204 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
4205 buf->Name[0] = 'R';
4206 buf->Name[1] = 'q';
4207 buf->Name[2] = 'L';
4208 buf->Name[3] = 's';
4209 return (char *)buf;
4210 }
4211
4212 static char *
smb3_create_lease_buf(u8 * lease_key,u8 oplock,u8 * parent_lease_key,__le32 flags)4213 smb3_create_lease_buf(u8 *lease_key, u8 oplock, u8 *parent_lease_key, __le32 flags)
4214 {
4215 struct create_lease_v2 *buf;
4216
4217 buf = kzalloc(sizeof(struct create_lease_v2), GFP_KERNEL);
4218 if (!buf)
4219 return NULL;
4220
4221 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
4222 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
4223 buf->lcontext.LeaseFlags = flags;
4224 if (flags & SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE)
4225 memcpy(&buf->lcontext.ParentLeaseKey, parent_lease_key, SMB2_LEASE_KEY_SIZE);
4226
4227 buf->ccontext.DataOffset = cpu_to_le16(offsetof
4228 (struct create_lease_v2, lcontext));
4229 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
4230 buf->ccontext.NameOffset = cpu_to_le16(offsetof
4231 (struct create_lease_v2, Name));
4232 buf->ccontext.NameLength = cpu_to_le16(4);
4233 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
4234 buf->Name[0] = 'R';
4235 buf->Name[1] = 'q';
4236 buf->Name[2] = 'L';
4237 buf->Name[3] = 's';
4238 return (char *)buf;
4239 }
4240
4241 static __u8
smb2_parse_lease_buf(void * buf,__u16 * epoch,char * lease_key)4242 smb2_parse_lease_buf(void *buf, __u16 *epoch, char *lease_key)
4243 {
4244 struct create_lease *lc = (struct create_lease *)buf;
4245
4246 *epoch = 0; /* not used */
4247 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE)
4248 return SMB2_OPLOCK_LEVEL_NOCHANGE;
4249 return le32_to_cpu(lc->lcontext.LeaseState);
4250 }
4251
4252 static __u8
smb3_parse_lease_buf(void * buf,__u16 * epoch,char * lease_key)4253 smb3_parse_lease_buf(void *buf, __u16 *epoch, char *lease_key)
4254 {
4255 struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
4256
4257 *epoch = le16_to_cpu(lc->lcontext.Epoch);
4258 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE)
4259 return SMB2_OPLOCK_LEVEL_NOCHANGE;
4260 if (lease_key)
4261 memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
4262 return le32_to_cpu(lc->lcontext.LeaseState);
4263 }
4264
4265 static unsigned int
smb2_wp_retry_size(struct inode * inode)4266 smb2_wp_retry_size(struct inode *inode)
4267 {
4268 return min_t(unsigned int, CIFS_SB(inode->i_sb)->ctx->wsize,
4269 SMB2_MAX_BUFFER_SIZE);
4270 }
4271
4272 static bool
smb2_dir_needs_close(struct cifsFileInfo * cfile)4273 smb2_dir_needs_close(struct cifsFileInfo *cfile)
4274 {
4275 return !cfile->invalidHandle;
4276 }
4277
4278 static void
fill_transform_hdr(struct smb2_transform_hdr * tr_hdr,unsigned int orig_len,struct smb_rqst * old_rq,__le16 cipher_type)4279 fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
4280 struct smb_rqst *old_rq, __le16 cipher_type)
4281 {
4282 struct smb2_hdr *shdr =
4283 (struct smb2_hdr *)old_rq->rq_iov[0].iov_base;
4284
4285 memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
4286 tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
4287 tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
4288 tr_hdr->Flags = cpu_to_le16(0x01);
4289 if ((cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
4290 (cipher_type == SMB2_ENCRYPTION_AES256_GCM))
4291 get_random_bytes(&tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
4292 else
4293 get_random_bytes(&tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
4294 memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
4295 }
4296
smb2_aead_req_alloc(struct crypto_aead * tfm,const struct smb_rqst * rqst,int num_rqst,const u8 * sig,u8 ** iv,struct aead_request ** req,struct sg_table * sgt,unsigned int * num_sgs)4297 static void *smb2_aead_req_alloc(struct crypto_aead *tfm, const struct smb_rqst *rqst,
4298 int num_rqst, const u8 *sig, u8 **iv,
4299 struct aead_request **req, struct sg_table *sgt,
4300 unsigned int *num_sgs)
4301 {
4302 unsigned int req_size = sizeof(**req) + crypto_aead_reqsize(tfm);
4303 unsigned int iv_size = crypto_aead_ivsize(tfm);
4304 unsigned int len;
4305 u8 *p;
4306
4307 *num_sgs = cifs_get_num_sgs(rqst, num_rqst, sig);
4308 if (IS_ERR_VALUE((long)(int)*num_sgs))
4309 return ERR_PTR(*num_sgs);
4310
4311 len = iv_size;
4312 len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1);
4313 len = ALIGN(len, crypto_tfm_ctx_alignment());
4314 len += req_size;
4315 len = ALIGN(len, __alignof__(struct scatterlist));
4316 len += array_size(*num_sgs, sizeof(struct scatterlist));
4317
4318 p = kzalloc(len, GFP_NOFS);
4319 if (!p)
4320 return ERR_PTR(-ENOMEM);
4321
4322 *iv = (u8 *)PTR_ALIGN(p, crypto_aead_alignmask(tfm) + 1);
4323 *req = (struct aead_request *)PTR_ALIGN(*iv + iv_size,
4324 crypto_tfm_ctx_alignment());
4325 sgt->sgl = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size,
4326 __alignof__(struct scatterlist));
4327 return p;
4328 }
4329
smb2_get_aead_req(struct crypto_aead * tfm,struct smb_rqst * rqst,int num_rqst,const u8 * sig,u8 ** iv,struct aead_request ** req,struct scatterlist ** sgl)4330 static void *smb2_get_aead_req(struct crypto_aead *tfm, struct smb_rqst *rqst,
4331 int num_rqst, const u8 *sig, u8 **iv,
4332 struct aead_request **req, struct scatterlist **sgl)
4333 {
4334 struct sg_table sgtable = {};
4335 unsigned int skip, num_sgs, i, j;
4336 ssize_t rc;
4337 void *p;
4338
4339 p = smb2_aead_req_alloc(tfm, rqst, num_rqst, sig, iv, req, &sgtable, &num_sgs);
4340 if (IS_ERR(p))
4341 return ERR_CAST(p);
4342
4343 sg_init_marker(sgtable.sgl, num_sgs);
4344
4345 /*
4346 * The first rqst has a transform header where the
4347 * first 20 bytes are not part of the encrypted blob.
4348 */
4349 skip = 20;
4350
4351 for (i = 0; i < num_rqst; i++) {
4352 struct iov_iter *iter = &rqst[i].rq_iter;
4353 size_t count = iov_iter_count(iter);
4354
4355 for (j = 0; j < rqst[i].rq_nvec; j++) {
4356 cifs_sg_set_buf(&sgtable,
4357 rqst[i].rq_iov[j].iov_base + skip,
4358 rqst[i].rq_iov[j].iov_len - skip);
4359
4360 /* See the above comment on the 'skip' assignment */
4361 skip = 0;
4362 }
4363 sgtable.orig_nents = sgtable.nents;
4364
4365 rc = extract_iter_to_sg(iter, count, &sgtable,
4366 num_sgs - sgtable.nents, 0);
4367 iov_iter_revert(iter, rc);
4368 sgtable.orig_nents = sgtable.nents;
4369 }
4370
4371 cifs_sg_set_buf(&sgtable, sig, SMB2_SIGNATURE_SIZE);
4372 sg_mark_end(&sgtable.sgl[sgtable.nents - 1]);
4373 *sgl = sgtable.sgl;
4374 return p;
4375 }
4376
4377 static int
smb2_get_enc_key(struct TCP_Server_Info * server,__u64 ses_id,int enc,u8 * key)4378 smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
4379 {
4380 struct TCP_Server_Info *pserver;
4381 struct cifs_ses *ses;
4382 u8 *ses_enc_key;
4383
4384 /* If server is a channel, select the primary channel */
4385 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
4386
4387 spin_lock(&cifs_tcp_ses_lock);
4388 list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
4389 if (ses->Suid == ses_id) {
4390 spin_lock(&ses->ses_lock);
4391 ses_enc_key = enc ? ses->smb3encryptionkey :
4392 ses->smb3decryptionkey;
4393 memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE);
4394 spin_unlock(&ses->ses_lock);
4395 spin_unlock(&cifs_tcp_ses_lock);
4396 return 0;
4397 }
4398 }
4399 spin_unlock(&cifs_tcp_ses_lock);
4400
4401 trace_smb3_ses_not_found(ses_id);
4402
4403 return -EAGAIN;
4404 }
4405 /*
4406 * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
4407 * iov[0] - transform header (associate data),
4408 * iov[1-N] - SMB2 header and pages - data to encrypt.
4409 * On success return encrypted data in iov[1-N] and pages, leave iov[0]
4410 * untouched.
4411 */
4412 static int
crypt_message(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst,int enc,struct crypto_aead * tfm)4413 crypt_message(struct TCP_Server_Info *server, int num_rqst,
4414 struct smb_rqst *rqst, int enc, struct crypto_aead *tfm)
4415 {
4416 struct smb2_transform_hdr *tr_hdr =
4417 (struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
4418 unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
4419 int rc = 0;
4420 struct scatterlist *sg;
4421 u8 sign[SMB2_SIGNATURE_SIZE] = {};
4422 u8 key[SMB3_ENC_DEC_KEY_SIZE];
4423 struct aead_request *req;
4424 u8 *iv;
4425 DECLARE_CRYPTO_WAIT(wait);
4426 unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
4427 void *creq;
4428
4429 rc = smb2_get_enc_key(server, le64_to_cpu(tr_hdr->SessionId), enc, key);
4430 if (rc) {
4431 cifs_server_dbg(FYI, "%s: Could not get %scryption key. sid: 0x%llx\n", __func__,
4432 enc ? "en" : "de", le64_to_cpu(tr_hdr->SessionId));
4433 return rc;
4434 }
4435
4436 if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
4437 (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
4438 rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE);
4439 else
4440 rc = crypto_aead_setkey(tfm, key, SMB3_GCM128_CRYPTKEY_SIZE);
4441
4442 if (rc) {
4443 cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
4444 return rc;
4445 }
4446
4447 rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE);
4448 if (rc) {
4449 cifs_server_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc);
4450 return rc;
4451 }
4452
4453 creq = smb2_get_aead_req(tfm, rqst, num_rqst, sign, &iv, &req, &sg);
4454 if (IS_ERR(creq))
4455 return PTR_ERR(creq);
4456
4457 if (!enc) {
4458 memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
4459 crypt_len += SMB2_SIGNATURE_SIZE;
4460 }
4461
4462 if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
4463 (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
4464 memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
4465 else {
4466 iv[0] = 3;
4467 memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
4468 }
4469
4470 aead_request_set_tfm(req, tfm);
4471 aead_request_set_crypt(req, sg, sg, crypt_len, iv);
4472 aead_request_set_ad(req, assoc_data_len);
4473
4474 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
4475 crypto_req_done, &wait);
4476
4477 rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
4478 : crypto_aead_decrypt(req), &wait);
4479
4480 if (!rc && enc)
4481 memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
4482
4483 kfree_sensitive(creq);
4484 return rc;
4485 }
4486
4487 /*
4488 * Clear a read buffer, discarding the folios which have the 1st mark set.
4489 */
cifs_clear_folioq_buffer(struct folio_queue * buffer)4490 static void cifs_clear_folioq_buffer(struct folio_queue *buffer)
4491 {
4492 struct folio_queue *folioq;
4493
4494 while ((folioq = buffer)) {
4495 for (int s = 0; s < folioq_count(folioq); s++)
4496 if (folioq_is_marked(folioq, s))
4497 folio_put(folioq_folio(folioq, s));
4498 buffer = folioq->next;
4499 kfree(folioq);
4500 }
4501 }
4502
4503 /*
4504 * Allocate buffer space into a folio queue.
4505 */
cifs_alloc_folioq_buffer(ssize_t size)4506 static struct folio_queue *cifs_alloc_folioq_buffer(ssize_t size)
4507 {
4508 struct folio_queue *buffer = NULL, *tail = NULL, *p;
4509 struct folio *folio;
4510 unsigned int slot;
4511
4512 do {
4513 if (!tail || folioq_full(tail)) {
4514 p = kmalloc(sizeof(*p), GFP_NOFS);
4515 if (!p)
4516 goto nomem;
4517 folioq_init(p, 0);
4518 if (tail) {
4519 tail->next = p;
4520 p->prev = tail;
4521 } else {
4522 buffer = p;
4523 }
4524 tail = p;
4525 }
4526
4527 folio = folio_alloc(GFP_KERNEL|__GFP_HIGHMEM, 0);
4528 if (!folio)
4529 goto nomem;
4530
4531 slot = folioq_append_mark(tail, folio);
4532 size -= folioq_folio_size(tail, slot);
4533 } while (size > 0);
4534
4535 return buffer;
4536
4537 nomem:
4538 cifs_clear_folioq_buffer(buffer);
4539 return NULL;
4540 }
4541
4542 /*
4543 * Copy data from an iterator to the folios in a folio queue buffer.
4544 */
cifs_copy_iter_to_folioq(struct iov_iter * iter,size_t size,struct folio_queue * buffer)4545 static bool cifs_copy_iter_to_folioq(struct iov_iter *iter, size_t size,
4546 struct folio_queue *buffer)
4547 {
4548 for (; buffer; buffer = buffer->next) {
4549 for (int s = 0; s < folioq_count(buffer); s++) {
4550 struct folio *folio = folioq_folio(buffer, s);
4551 size_t part = folioq_folio_size(buffer, s);
4552
4553 part = umin(part, size);
4554
4555 if (copy_folio_from_iter(folio, 0, part, iter) != part)
4556 return false;
4557 size -= part;
4558 }
4559 }
4560 return true;
4561 }
4562
4563 void
smb3_free_compound_rqst(int num_rqst,struct smb_rqst * rqst)4564 smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
4565 {
4566 for (int i = 0; i < num_rqst; i++)
4567 cifs_clear_folioq_buffer(rqst[i].rq_buffer);
4568 }
4569
4570 /*
4571 * This function will initialize new_rq and encrypt the content.
4572 * The first entry, new_rq[0], only contains a single iov which contains
4573 * a smb2_transform_hdr and is pre-allocated by the caller.
4574 * This function then populates new_rq[1+] with the content from olq_rq[0+].
4575 *
4576 * The end result is an array of smb_rqst structures where the first structure
4577 * only contains a single iov for the transform header which we then can pass
4578 * to crypt_message().
4579 *
4580 * new_rq[0].rq_iov[0] : smb2_transform_hdr pre-allocated by the caller
4581 * new_rq[1+].rq_iov[*] == old_rq[0+].rq_iov[*] : SMB2/3 requests
4582 */
4583 static int
smb3_init_transform_rq(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * new_rq,struct smb_rqst * old_rq)4584 smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
4585 struct smb_rqst *new_rq, struct smb_rqst *old_rq)
4586 {
4587 struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
4588 unsigned int orig_len = 0;
4589 int rc = -ENOMEM;
4590
4591 for (int i = 1; i < num_rqst; i++) {
4592 struct smb_rqst *old = &old_rq[i - 1];
4593 struct smb_rqst *new = &new_rq[i];
4594 struct folio_queue *buffer = NULL;
4595 size_t size = iov_iter_count(&old->rq_iter);
4596
4597 orig_len += smb_rqst_len(server, old);
4598 new->rq_iov = old->rq_iov;
4599 new->rq_nvec = old->rq_nvec;
4600
4601 if (size > 0) {
4602 buffer = cifs_alloc_folioq_buffer(size);
4603 if (!buffer)
4604 goto err_free;
4605
4606 new->rq_buffer = buffer;
4607 iov_iter_folio_queue(&new->rq_iter, ITER_SOURCE,
4608 buffer, 0, 0, size);
4609
4610 if (!cifs_copy_iter_to_folioq(&old->rq_iter, size, buffer)) {
4611 rc = -EIO;
4612 goto err_free;
4613 }
4614 }
4615 }
4616
4617 /* fill the 1st iov with a transform header */
4618 fill_transform_hdr(tr_hdr, orig_len, old_rq, server->cipher_type);
4619
4620 rc = crypt_message(server, num_rqst, new_rq, 1, server->secmech.enc);
4621 cifs_dbg(FYI, "Encrypt message returned %d\n", rc);
4622 if (rc)
4623 goto err_free;
4624
4625 return rc;
4626
4627 err_free:
4628 smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]);
4629 return rc;
4630 }
4631
4632 static int
smb3_is_transform_hdr(void * buf)4633 smb3_is_transform_hdr(void *buf)
4634 {
4635 struct smb2_transform_hdr *trhdr = buf;
4636
4637 return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
4638 }
4639
4640 static int
decrypt_raw_data(struct TCP_Server_Info * server,char * buf,unsigned int buf_data_size,struct iov_iter * iter,bool is_offloaded)4641 decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
4642 unsigned int buf_data_size, struct iov_iter *iter,
4643 bool is_offloaded)
4644 {
4645 struct crypto_aead *tfm;
4646 struct smb_rqst rqst = {NULL};
4647 struct kvec iov[2];
4648 size_t iter_size = 0;
4649 int rc;
4650
4651 iov[0].iov_base = buf;
4652 iov[0].iov_len = sizeof(struct smb2_transform_hdr);
4653 iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
4654 iov[1].iov_len = buf_data_size;
4655
4656 rqst.rq_iov = iov;
4657 rqst.rq_nvec = 2;
4658 if (iter) {
4659 rqst.rq_iter = *iter;
4660 iter_size = iov_iter_count(iter);
4661 }
4662
4663 if (is_offloaded) {
4664 if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
4665 (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
4666 tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
4667 else
4668 tfm = crypto_alloc_aead("ccm(aes)", 0, 0);
4669 if (IS_ERR(tfm)) {
4670 rc = PTR_ERR(tfm);
4671 cifs_server_dbg(VFS, "%s: Failed alloc decrypt TFM, rc=%d\n", __func__, rc);
4672
4673 return rc;
4674 }
4675 } else {
4676 rc = smb3_crypto_aead_allocate(server);
4677 if (unlikely(rc))
4678 return rc;
4679 tfm = server->secmech.dec;
4680 }
4681
4682 rc = crypt_message(server, 1, &rqst, 0, tfm);
4683 cifs_dbg(FYI, "Decrypt message returned %d\n", rc);
4684
4685 if (is_offloaded)
4686 crypto_free_aead(tfm);
4687
4688 if (rc)
4689 return rc;
4690
4691 memmove(buf, iov[1].iov_base, buf_data_size);
4692
4693 if (!is_offloaded)
4694 server->total_read = buf_data_size + iter_size;
4695
4696 return rc;
4697 }
4698
4699 static int
cifs_copy_folioq_to_iter(struct folio_queue * folioq,size_t data_size,size_t skip,struct iov_iter * iter)4700 cifs_copy_folioq_to_iter(struct folio_queue *folioq, size_t data_size,
4701 size_t skip, struct iov_iter *iter)
4702 {
4703 for (; folioq; folioq = folioq->next) {
4704 for (int s = 0; s < folioq_count(folioq); s++) {
4705 struct folio *folio = folioq_folio(folioq, s);
4706 size_t fsize = folio_size(folio);
4707 size_t n, len = umin(fsize - skip, data_size);
4708
4709 n = copy_folio_to_iter(folio, skip, len, iter);
4710 if (n != len) {
4711 cifs_dbg(VFS, "%s: something went wrong\n", __func__);
4712 return -EIO;
4713 }
4714 data_size -= n;
4715 skip = 0;
4716 }
4717 }
4718
4719 return 0;
4720 }
4721
4722 static int
handle_read_data(struct TCP_Server_Info * server,struct mid_q_entry * mid,char * buf,unsigned int buf_len,struct folio_queue * buffer,unsigned int buffer_len,bool is_offloaded)4723 handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
4724 char *buf, unsigned int buf_len, struct folio_queue *buffer,
4725 unsigned int buffer_len, bool is_offloaded)
4726 {
4727 unsigned int data_offset;
4728 unsigned int data_len;
4729 unsigned int cur_off;
4730 unsigned int cur_page_idx;
4731 unsigned int pad_len;
4732 struct cifs_io_subrequest *rdata = mid->callback_data;
4733 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
4734 size_t copied;
4735 bool use_rdma_mr = false;
4736
4737 if (shdr->Command != SMB2_READ) {
4738 cifs_server_dbg(VFS, "only big read responses are supported\n");
4739 return -EOPNOTSUPP;
4740 }
4741
4742 if (server->ops->is_session_expired &&
4743 server->ops->is_session_expired(buf)) {
4744 if (!is_offloaded)
4745 cifs_reconnect(server, true);
4746 return -1;
4747 }
4748
4749 if (server->ops->is_status_pending &&
4750 server->ops->is_status_pending(buf, server))
4751 return -1;
4752
4753 /* set up first two iov to get credits */
4754 rdata->iov[0].iov_base = buf;
4755 rdata->iov[0].iov_len = 0;
4756 rdata->iov[1].iov_base = buf;
4757 rdata->iov[1].iov_len =
4758 min_t(unsigned int, buf_len, server->vals->read_rsp_size);
4759 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
4760 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
4761 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
4762 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
4763
4764 rdata->result = server->ops->map_error(buf, true);
4765 if (rdata->result != 0) {
4766 cifs_dbg(FYI, "%s: server returned error %d\n",
4767 __func__, rdata->result);
4768 /* normal error on read response */
4769 if (is_offloaded)
4770 mid->mid_state = MID_RESPONSE_RECEIVED;
4771 else
4772 dequeue_mid(mid, false);
4773 return 0;
4774 }
4775
4776 data_offset = server->ops->read_data_offset(buf);
4777 #ifdef CONFIG_CIFS_SMB_DIRECT
4778 use_rdma_mr = rdata->mr;
4779 #endif
4780 data_len = server->ops->read_data_length(buf, use_rdma_mr);
4781
4782 if (data_offset < server->vals->read_rsp_size) {
4783 /*
4784 * win2k8 sometimes sends an offset of 0 when the read
4785 * is beyond the EOF. Treat it as if the data starts just after
4786 * the header.
4787 */
4788 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
4789 __func__, data_offset);
4790 data_offset = server->vals->read_rsp_size;
4791 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
4792 /* data_offset is beyond the end of smallbuf */
4793 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
4794 __func__, data_offset);
4795 rdata->result = -EIO;
4796 if (is_offloaded)
4797 mid->mid_state = MID_RESPONSE_MALFORMED;
4798 else
4799 dequeue_mid(mid, rdata->result);
4800 return 0;
4801 }
4802
4803 pad_len = data_offset - server->vals->read_rsp_size;
4804
4805 if (buf_len <= data_offset) {
4806 /* read response payload is in pages */
4807 cur_page_idx = pad_len / PAGE_SIZE;
4808 cur_off = pad_len % PAGE_SIZE;
4809
4810 if (cur_page_idx != 0) {
4811 /* data offset is beyond the 1st page of response */
4812 cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
4813 __func__, data_offset);
4814 rdata->result = -EIO;
4815 if (is_offloaded)
4816 mid->mid_state = MID_RESPONSE_MALFORMED;
4817 else
4818 dequeue_mid(mid, rdata->result);
4819 return 0;
4820 }
4821
4822 if (data_len > buffer_len - pad_len) {
4823 /* data_len is corrupt -- discard frame */
4824 rdata->result = -EIO;
4825 if (is_offloaded)
4826 mid->mid_state = MID_RESPONSE_MALFORMED;
4827 else
4828 dequeue_mid(mid, rdata->result);
4829 return 0;
4830 }
4831
4832 /* Copy the data to the output I/O iterator. */
4833 rdata->result = cifs_copy_folioq_to_iter(buffer, buffer_len,
4834 cur_off, &rdata->subreq.io_iter);
4835 if (rdata->result != 0) {
4836 if (is_offloaded)
4837 mid->mid_state = MID_RESPONSE_MALFORMED;
4838 else
4839 dequeue_mid(mid, rdata->result);
4840 return 0;
4841 }
4842 rdata->got_bytes = buffer_len;
4843
4844 } else if (buf_len >= data_offset + data_len) {
4845 /* read response payload is in buf */
4846 WARN_ONCE(buffer, "read data can be either in buf or in buffer");
4847 copied = copy_to_iter(buf + data_offset, data_len, &rdata->subreq.io_iter);
4848 if (copied == 0)
4849 return -EIO;
4850 rdata->got_bytes = copied;
4851 } else {
4852 /* read response payload cannot be in both buf and pages */
4853 WARN_ONCE(1, "buf can not contain only a part of read data");
4854 rdata->result = -EIO;
4855 if (is_offloaded)
4856 mid->mid_state = MID_RESPONSE_MALFORMED;
4857 else
4858 dequeue_mid(mid, rdata->result);
4859 return 0;
4860 }
4861
4862 if (is_offloaded)
4863 mid->mid_state = MID_RESPONSE_RECEIVED;
4864 else
4865 dequeue_mid(mid, false);
4866 return 0;
4867 }
4868
4869 struct smb2_decrypt_work {
4870 struct work_struct decrypt;
4871 struct TCP_Server_Info *server;
4872 struct folio_queue *buffer;
4873 char *buf;
4874 unsigned int len;
4875 };
4876
4877
smb2_decrypt_offload(struct work_struct * work)4878 static void smb2_decrypt_offload(struct work_struct *work)
4879 {
4880 struct smb2_decrypt_work *dw = container_of(work,
4881 struct smb2_decrypt_work, decrypt);
4882 int rc;
4883 struct mid_q_entry *mid;
4884 struct iov_iter iter;
4885
4886 iov_iter_folio_queue(&iter, ITER_DEST, dw->buffer, 0, 0, dw->len);
4887 rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
4888 &iter, true);
4889 if (rc) {
4890 cifs_dbg(VFS, "error decrypting rc=%d\n", rc);
4891 goto free_pages;
4892 }
4893
4894 dw->server->lstrp = jiffies;
4895 mid = smb2_find_dequeue_mid(dw->server, dw->buf);
4896 if (mid == NULL)
4897 cifs_dbg(FYI, "mid not found\n");
4898 else {
4899 mid->decrypted = true;
4900 rc = handle_read_data(dw->server, mid, dw->buf,
4901 dw->server->vals->read_rsp_size,
4902 dw->buffer, dw->len,
4903 true);
4904 if (rc >= 0) {
4905 #ifdef CONFIG_CIFS_STATS2
4906 mid->when_received = jiffies;
4907 #endif
4908 if (dw->server->ops->is_network_name_deleted)
4909 dw->server->ops->is_network_name_deleted(dw->buf,
4910 dw->server);
4911
4912 mid_execute_callback(mid);
4913 } else {
4914 spin_lock(&dw->server->srv_lock);
4915 if (dw->server->tcpStatus == CifsNeedReconnect) {
4916 spin_lock(&dw->server->mid_queue_lock);
4917 mid->mid_state = MID_RETRY_NEEDED;
4918 spin_unlock(&dw->server->mid_queue_lock);
4919 spin_unlock(&dw->server->srv_lock);
4920 mid_execute_callback(mid);
4921 } else {
4922 spin_lock(&dw->server->mid_queue_lock);
4923 mid->mid_state = MID_REQUEST_SUBMITTED;
4924 mid->deleted_from_q = false;
4925 list_add_tail(&mid->qhead,
4926 &dw->server->pending_mid_q);
4927 spin_unlock(&dw->server->mid_queue_lock);
4928 spin_unlock(&dw->server->srv_lock);
4929 }
4930 }
4931 release_mid(mid);
4932 }
4933
4934 free_pages:
4935 cifs_clear_folioq_buffer(dw->buffer);
4936 cifs_small_buf_release(dw->buf);
4937 kfree(dw);
4938 }
4939
4940
4941 static int
receive_encrypted_read(struct TCP_Server_Info * server,struct mid_q_entry ** mid,int * num_mids)4942 receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
4943 int *num_mids)
4944 {
4945 char *buf = server->smallbuf;
4946 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
4947 struct iov_iter iter;
4948 unsigned int len;
4949 unsigned int buflen = server->pdu_size;
4950 int rc;
4951 struct smb2_decrypt_work *dw;
4952
4953 dw = kzalloc(sizeof(struct smb2_decrypt_work), GFP_KERNEL);
4954 if (!dw)
4955 return -ENOMEM;
4956 INIT_WORK(&dw->decrypt, smb2_decrypt_offload);
4957 dw->server = server;
4958
4959 *num_mids = 1;
4960 len = min_t(unsigned int, buflen, server->vals->read_rsp_size +
4961 sizeof(struct smb2_transform_hdr)) - HEADER_SIZE(server) + 1;
4962
4963 rc = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, len);
4964 if (rc < 0)
4965 goto free_dw;
4966 server->total_read += rc;
4967
4968 len = le32_to_cpu(tr_hdr->OriginalMessageSize) -
4969 server->vals->read_rsp_size;
4970 dw->len = len;
4971 len = round_up(dw->len, PAGE_SIZE);
4972
4973 rc = -ENOMEM;
4974 dw->buffer = cifs_alloc_folioq_buffer(len);
4975 if (!dw->buffer)
4976 goto discard_data;
4977
4978 iov_iter_folio_queue(&iter, ITER_DEST, dw->buffer, 0, 0, len);
4979
4980 /* Read the data into the buffer and clear excess bufferage. */
4981 rc = cifs_read_iter_from_socket(server, &iter, dw->len);
4982 if (rc < 0)
4983 goto discard_data;
4984
4985 server->total_read += rc;
4986 if (rc < len) {
4987 struct iov_iter tmp = iter;
4988
4989 iov_iter_advance(&tmp, rc);
4990 iov_iter_zero(len - rc, &tmp);
4991 }
4992 iov_iter_truncate(&iter, dw->len);
4993
4994 rc = cifs_discard_remaining_data(server);
4995 if (rc)
4996 goto free_pages;
4997
4998 /*
4999 * For large reads, offload to different thread for better performance,
5000 * use more cores decrypting which can be expensive
5001 */
5002
5003 if ((server->min_offload) && (server->in_flight > 1) &&
5004 (server->pdu_size >= server->min_offload)) {
5005 dw->buf = server->smallbuf;
5006 server->smallbuf = (char *)cifs_small_buf_get();
5007
5008 queue_work(decrypt_wq, &dw->decrypt);
5009 *num_mids = 0; /* worker thread takes care of finding mid */
5010 return -1;
5011 }
5012
5013 rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
5014 &iter, false);
5015 if (rc)
5016 goto free_pages;
5017
5018 *mid = smb2_find_mid(server, buf);
5019 if (*mid == NULL) {
5020 cifs_dbg(FYI, "mid not found\n");
5021 } else {
5022 cifs_dbg(FYI, "mid found\n");
5023 (*mid)->decrypted = true;
5024 rc = handle_read_data(server, *mid, buf,
5025 server->vals->read_rsp_size,
5026 dw->buffer, dw->len, false);
5027 if (rc >= 0) {
5028 if (server->ops->is_network_name_deleted) {
5029 server->ops->is_network_name_deleted(buf,
5030 server);
5031 }
5032 }
5033 }
5034
5035 free_pages:
5036 cifs_clear_folioq_buffer(dw->buffer);
5037 free_dw:
5038 kfree(dw);
5039 return rc;
5040 discard_data:
5041 cifs_discard_remaining_data(server);
5042 goto free_pages;
5043 }
5044
5045 static int
receive_encrypted_standard(struct TCP_Server_Info * server,struct mid_q_entry ** mids,char ** bufs,int * num_mids)5046 receive_encrypted_standard(struct TCP_Server_Info *server,
5047 struct mid_q_entry **mids, char **bufs,
5048 int *num_mids)
5049 {
5050 int ret, length;
5051 char *buf = server->smallbuf;
5052 struct smb2_hdr *shdr;
5053 unsigned int pdu_length = server->pdu_size;
5054 unsigned int buf_size;
5055 unsigned int next_cmd;
5056 struct mid_q_entry *mid_entry;
5057 int next_is_large;
5058 char *next_buffer = NULL;
5059
5060 *num_mids = 0;
5061
5062 /* switch to large buffer if too big for a small one */
5063 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) {
5064 server->large_buf = true;
5065 memcpy(server->bigbuf, buf, server->total_read);
5066 buf = server->bigbuf;
5067 }
5068
5069 /* now read the rest */
5070 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
5071 pdu_length - HEADER_SIZE(server) + 1);
5072 if (length < 0)
5073 return length;
5074 server->total_read += length;
5075
5076 buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
5077 length = decrypt_raw_data(server, buf, buf_size, NULL, false);
5078 if (length)
5079 return length;
5080
5081 next_is_large = server->large_buf;
5082 one_more:
5083 shdr = (struct smb2_hdr *)buf;
5084 next_cmd = le32_to_cpu(shdr->NextCommand);
5085 if (next_cmd) {
5086 if (WARN_ON_ONCE(next_cmd > pdu_length))
5087 return -1;
5088 if (next_is_large)
5089 next_buffer = (char *)cifs_buf_get();
5090 else
5091 next_buffer = (char *)cifs_small_buf_get();
5092 if (!next_buffer) {
5093 cifs_server_dbg(VFS, "No memory for (large) SMB response\n");
5094 return -1;
5095 }
5096 memcpy(next_buffer, buf + next_cmd, pdu_length - next_cmd);
5097 }
5098
5099 mid_entry = smb2_find_mid(server, buf);
5100 if (mid_entry == NULL)
5101 cifs_dbg(FYI, "mid not found\n");
5102 else {
5103 cifs_dbg(FYI, "mid found\n");
5104 mid_entry->decrypted = true;
5105 mid_entry->resp_buf_size = server->pdu_size;
5106 }
5107
5108 if (*num_mids >= MAX_COMPOUND) {
5109 cifs_server_dbg(VFS, "too many PDUs in compound\n");
5110 return -1;
5111 }
5112 bufs[*num_mids] = buf;
5113 mids[(*num_mids)++] = mid_entry;
5114
5115 if (mid_entry && mid_entry->handle)
5116 ret = mid_entry->handle(server, mid_entry);
5117 else
5118 ret = cifs_handle_standard(server, mid_entry);
5119
5120 if (ret == 0 && next_cmd) {
5121 pdu_length -= next_cmd;
5122 server->large_buf = next_is_large;
5123 if (next_is_large)
5124 server->bigbuf = buf = next_buffer;
5125 else
5126 server->smallbuf = buf = next_buffer;
5127 goto one_more;
5128 } else if (ret != 0) {
5129 /*
5130 * ret != 0 here means that we didn't get to handle_mid() thus
5131 * server->smallbuf and server->bigbuf are still valid. We need
5132 * to free next_buffer because it is not going to be used
5133 * anywhere.
5134 */
5135 if (next_is_large)
5136 free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
5137 else
5138 free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
5139 }
5140
5141 return ret;
5142 }
5143
5144 static int
smb3_receive_transform(struct TCP_Server_Info * server,struct mid_q_entry ** mids,char ** bufs,int * num_mids)5145 smb3_receive_transform(struct TCP_Server_Info *server,
5146 struct mid_q_entry **mids, char **bufs, int *num_mids)
5147 {
5148 char *buf = server->smallbuf;
5149 unsigned int pdu_length = server->pdu_size;
5150 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
5151 unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
5152
5153 if (pdu_length < sizeof(struct smb2_transform_hdr) +
5154 sizeof(struct smb2_hdr)) {
5155 cifs_server_dbg(VFS, "Transform message is too small (%u)\n",
5156 pdu_length);
5157 cifs_reconnect(server, true);
5158 return -ECONNABORTED;
5159 }
5160
5161 if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) {
5162 cifs_server_dbg(VFS, "Transform message is broken\n");
5163 cifs_reconnect(server, true);
5164 return -ECONNABORTED;
5165 }
5166
5167 /* TODO: add support for compounds containing READ. */
5168 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
5169 return receive_encrypted_read(server, &mids[0], num_mids);
5170 }
5171
5172 return receive_encrypted_standard(server, mids, bufs, num_mids);
5173 }
5174
5175 int
smb3_handle_read_data(struct TCP_Server_Info * server,struct mid_q_entry * mid)5176 smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
5177 {
5178 char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
5179
5180 return handle_read_data(server, mid, buf, server->pdu_size,
5181 NULL, 0, false);
5182 }
5183
smb2_next_header(struct TCP_Server_Info * server,char * buf,unsigned int * noff)5184 static int smb2_next_header(struct TCP_Server_Info *server, char *buf,
5185 unsigned int *noff)
5186 {
5187 struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
5188 struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
5189
5190 if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
5191 *noff = le32_to_cpu(t_hdr->OriginalMessageSize);
5192 if (unlikely(check_add_overflow(*noff, sizeof(*t_hdr), noff)))
5193 return -EINVAL;
5194 } else {
5195 *noff = le32_to_cpu(hdr->NextCommand);
5196 }
5197 if (unlikely(*noff && *noff < MID_HEADER_SIZE(server)))
5198 return -EINVAL;
5199 return 0;
5200 }
5201
__cifs_sfu_make_node(unsigned int xid,struct inode * inode,struct dentry * dentry,struct cifs_tcon * tcon,const char * full_path,umode_t mode,dev_t dev,const char * symname)5202 int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
5203 struct dentry *dentry, struct cifs_tcon *tcon,
5204 const char *full_path, umode_t mode, dev_t dev,
5205 const char *symname)
5206 {
5207 struct TCP_Server_Info *server = tcon->ses->server;
5208 struct cifs_open_parms oparms;
5209 struct cifs_open_info_data idata;
5210 struct cifs_io_parms io_parms = {};
5211 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
5212 struct cifs_fid fid;
5213 unsigned int bytes_written;
5214 u8 type[8];
5215 int type_len = 0;
5216 struct {
5217 __le64 major;
5218 __le64 minor;
5219 } __packed pdev = {};
5220 __le16 *symname_utf16 = NULL;
5221 u8 *data = NULL;
5222 int data_len = 0;
5223 struct kvec iov[3];
5224 __u32 oplock = server->oplocks ? REQ_OPLOCK : 0;
5225 int rc;
5226
5227 switch (mode & S_IFMT) {
5228 case S_IFCHR:
5229 type_len = 8;
5230 memcpy(type, "IntxCHR\0", type_len);
5231 pdev.major = cpu_to_le64(MAJOR(dev));
5232 pdev.minor = cpu_to_le64(MINOR(dev));
5233 data = (u8 *)&pdev;
5234 data_len = sizeof(pdev);
5235 break;
5236 case S_IFBLK:
5237 type_len = 8;
5238 memcpy(type, "IntxBLK\0", type_len);
5239 pdev.major = cpu_to_le64(MAJOR(dev));
5240 pdev.minor = cpu_to_le64(MINOR(dev));
5241 data = (u8 *)&pdev;
5242 data_len = sizeof(pdev);
5243 break;
5244 case S_IFLNK:
5245 type_len = 8;
5246 memcpy(type, "IntxLNK\1", type_len);
5247 symname_utf16 = cifs_strndup_to_utf16(symname, strlen(symname),
5248 &data_len, cifs_sb->local_nls,
5249 NO_MAP_UNI_RSVD);
5250 if (!symname_utf16) {
5251 rc = -ENOMEM;
5252 goto out;
5253 }
5254 data_len -= 2; /* symlink is without trailing wide-nul */
5255 data = (u8 *)symname_utf16;
5256 break;
5257 case S_IFSOCK:
5258 type_len = 8;
5259 strscpy(type, "LnxSOCK");
5260 data = (u8 *)&pdev;
5261 data_len = sizeof(pdev);
5262 break;
5263 case S_IFIFO:
5264 type_len = 8;
5265 strscpy(type, "LnxFIFO");
5266 data = (u8 *)&pdev;
5267 data_len = sizeof(pdev);
5268 break;
5269 default:
5270 rc = -EPERM;
5271 goto out;
5272 }
5273
5274 oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, GENERIC_WRITE,
5275 FILE_CREATE, CREATE_NOT_DIR |
5276 CREATE_OPTION_SPECIAL, ACL_NO_MODE);
5277 oparms.fid = &fid;
5278 idata.contains_posix_file_info = false;
5279 rc = server->ops->open(xid, &oparms, &oplock, &idata);
5280 if (rc)
5281 goto out;
5282
5283 /*
5284 * Check if the server honored ATTR_SYSTEM flag by CREATE_OPTION_SPECIAL
5285 * option. If not then server does not support ATTR_SYSTEM and newly
5286 * created file is not SFU compatible, which means that the call failed.
5287 */
5288 if (!(le32_to_cpu(idata.fi.Attributes) & ATTR_SYSTEM)) {
5289 rc = -EOPNOTSUPP;
5290 goto out_close;
5291 }
5292
5293 if (type_len + data_len > 0) {
5294 io_parms.pid = current->tgid;
5295 io_parms.tcon = tcon;
5296 io_parms.length = type_len + data_len;
5297 iov[1].iov_base = type;
5298 iov[1].iov_len = type_len;
5299 iov[2].iov_base = data;
5300 iov[2].iov_len = data_len;
5301
5302 rc = server->ops->sync_write(xid, &fid, &io_parms,
5303 &bytes_written,
5304 iov, ARRAY_SIZE(iov)-1);
5305 }
5306
5307 out_close:
5308 server->ops->close(xid, tcon, &fid);
5309
5310 /*
5311 * If CREATE was successful but either setting ATTR_SYSTEM failed or
5312 * writing type/data information failed then remove the intermediate
5313 * object created by CREATE. Otherwise intermediate empty object stay
5314 * on the server.
5315 */
5316 if (rc)
5317 server->ops->unlink(xid, tcon, full_path, cifs_sb, NULL);
5318
5319 out:
5320 kfree(symname_utf16);
5321 return rc;
5322 }
5323
cifs_sfu_make_node(unsigned int xid,struct inode * inode,struct dentry * dentry,struct cifs_tcon * tcon,const char * full_path,umode_t mode,dev_t dev)5324 int cifs_sfu_make_node(unsigned int xid, struct inode *inode,
5325 struct dentry *dentry, struct cifs_tcon *tcon,
5326 const char *full_path, umode_t mode, dev_t dev)
5327 {
5328 struct inode *new = NULL;
5329 int rc;
5330
5331 rc = __cifs_sfu_make_node(xid, inode, dentry, tcon,
5332 full_path, mode, dev, NULL);
5333 if (rc)
5334 return rc;
5335
5336 if (tcon->posix_extensions) {
5337 rc = smb311_posix_get_inode_info(&new, full_path, NULL,
5338 inode->i_sb, xid);
5339 } else if (tcon->unix_ext) {
5340 rc = cifs_get_inode_info_unix(&new, full_path,
5341 inode->i_sb, xid);
5342 } else {
5343 rc = cifs_get_inode_info(&new, full_path, NULL,
5344 inode->i_sb, xid, NULL);
5345 }
5346 if (!rc)
5347 d_instantiate(dentry, new);
5348 return rc;
5349 }
5350
smb2_make_node(unsigned int xid,struct inode * inode,struct dentry * dentry,struct cifs_tcon * tcon,const char * full_path,umode_t mode,dev_t dev)5351 static int smb2_make_node(unsigned int xid, struct inode *inode,
5352 struct dentry *dentry, struct cifs_tcon *tcon,
5353 const char *full_path, umode_t mode, dev_t dev)
5354 {
5355 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
5356 int rc = -EOPNOTSUPP;
5357
5358 /*
5359 * Check if mounted with mount parm 'sfu' mount parm.
5360 * SFU emulation should work with all servers, but only
5361 * supports block and char device, socket & fifo,
5362 * and was used by default in earlier versions of Windows
5363 */
5364 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
5365 rc = cifs_sfu_make_node(xid, inode, dentry, tcon,
5366 full_path, mode, dev);
5367 } else if (CIFS_REPARSE_SUPPORT(tcon)) {
5368 rc = mknod_reparse(xid, inode, dentry, tcon,
5369 full_path, mode, dev);
5370 }
5371 return rc;
5372 }
5373
5374 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
5375 struct smb_version_operations smb20_operations = {
5376 .compare_fids = smb2_compare_fids,
5377 .setup_request = smb2_setup_request,
5378 .setup_async_request = smb2_setup_async_request,
5379 .check_receive = smb2_check_receive,
5380 .add_credits = smb2_add_credits,
5381 .set_credits = smb2_set_credits,
5382 .get_credits_field = smb2_get_credits_field,
5383 .get_credits = smb2_get_credits,
5384 .wait_mtu_credits = cifs_wait_mtu_credits,
5385 .get_next_mid = smb2_get_next_mid,
5386 .revert_current_mid = smb2_revert_current_mid,
5387 .read_data_offset = smb2_read_data_offset,
5388 .read_data_length = smb2_read_data_length,
5389 .map_error = map_smb2_to_linux_error,
5390 .find_mid = smb2_find_mid,
5391 .check_message = smb2_check_message,
5392 .dump_detail = smb2_dump_detail,
5393 .clear_stats = smb2_clear_stats,
5394 .print_stats = smb2_print_stats,
5395 .is_oplock_break = smb2_is_valid_oplock_break,
5396 .handle_cancelled_mid = smb2_handle_cancelled_mid,
5397 .downgrade_oplock = smb2_downgrade_oplock,
5398 .need_neg = smb2_need_neg,
5399 .negotiate = smb2_negotiate,
5400 .negotiate_wsize = smb2_negotiate_wsize,
5401 .negotiate_rsize = smb2_negotiate_rsize,
5402 .sess_setup = SMB2_sess_setup,
5403 .logoff = SMB2_logoff,
5404 .tree_connect = SMB2_tcon,
5405 .tree_disconnect = SMB2_tdis,
5406 .qfs_tcon = smb2_qfs_tcon,
5407 .is_path_accessible = smb2_is_path_accessible,
5408 .can_echo = smb2_can_echo,
5409 .echo = SMB2_echo,
5410 .query_path_info = smb2_query_path_info,
5411 .query_reparse_point = smb2_query_reparse_point,
5412 .get_srv_inum = smb2_get_srv_inum,
5413 .query_file_info = smb2_query_file_info,
5414 .set_path_size = smb2_set_path_size,
5415 .set_file_size = smb2_set_file_size,
5416 .set_file_info = smb2_set_file_info,
5417 .set_compression = smb2_set_compression,
5418 .mkdir = smb2_mkdir,
5419 .mkdir_setinfo = smb2_mkdir_setinfo,
5420 .rmdir = smb2_rmdir,
5421 .unlink = smb2_unlink,
5422 .rename = smb2_rename_path,
5423 .create_hardlink = smb2_create_hardlink,
5424 .get_reparse_point_buffer = smb2_get_reparse_point_buffer,
5425 .query_mf_symlink = smb3_query_mf_symlink,
5426 .create_mf_symlink = smb3_create_mf_symlink,
5427 .create_reparse_inode = smb2_create_reparse_inode,
5428 .open = smb2_open_file,
5429 .set_fid = smb2_set_fid,
5430 .close = smb2_close_file,
5431 .flush = smb2_flush_file,
5432 .async_readv = smb2_async_readv,
5433 .async_writev = smb2_async_writev,
5434 .sync_read = smb2_sync_read,
5435 .sync_write = smb2_sync_write,
5436 .query_dir_first = smb2_query_dir_first,
5437 .query_dir_next = smb2_query_dir_next,
5438 .close_dir = smb2_close_dir,
5439 .calc_smb_size = smb2_calc_size,
5440 .is_status_pending = smb2_is_status_pending,
5441 .is_session_expired = smb2_is_session_expired,
5442 .oplock_response = smb2_oplock_response,
5443 .queryfs = smb2_queryfs,
5444 .mand_lock = smb2_mand_lock,
5445 .mand_unlock_range = smb2_unlock_range,
5446 .push_mand_locks = smb2_push_mandatory_locks,
5447 .get_lease_key = smb2_get_lease_key,
5448 .set_lease_key = smb2_set_lease_key,
5449 .new_lease_key = smb2_new_lease_key,
5450 .is_read_op = smb2_is_read_op,
5451 .set_oplock_level = smb2_set_oplock_level,
5452 .create_lease_buf = smb2_create_lease_buf,
5453 .parse_lease_buf = smb2_parse_lease_buf,
5454 .copychunk_range = smb2_copychunk_range,
5455 .wp_retry_size = smb2_wp_retry_size,
5456 .dir_needs_close = smb2_dir_needs_close,
5457 .get_dfs_refer = smb2_get_dfs_refer,
5458 .select_sectype = smb2_select_sectype,
5459 #ifdef CONFIG_CIFS_XATTR
5460 .query_all_EAs = smb2_query_eas,
5461 .set_EA = smb2_set_ea,
5462 #endif /* CIFS_XATTR */
5463 .get_acl = get_smb2_acl,
5464 .get_acl_by_fid = get_smb2_acl_by_fid,
5465 .set_acl = set_smb2_acl,
5466 .next_header = smb2_next_header,
5467 .ioctl_query_info = smb2_ioctl_query_info,
5468 .make_node = smb2_make_node,
5469 .fiemap = smb3_fiemap,
5470 .llseek = smb3_llseek,
5471 .is_status_io_timeout = smb2_is_status_io_timeout,
5472 .is_network_name_deleted = smb2_is_network_name_deleted,
5473 .rename_pending_delete = smb2_rename_pending_delete,
5474 };
5475 #endif /* CIFS_ALLOW_INSECURE_LEGACY */
5476
5477 struct smb_version_operations smb21_operations = {
5478 .compare_fids = smb2_compare_fids,
5479 .setup_request = smb2_setup_request,
5480 .setup_async_request = smb2_setup_async_request,
5481 .check_receive = smb2_check_receive,
5482 .add_credits = smb2_add_credits,
5483 .set_credits = smb2_set_credits,
5484 .get_credits_field = smb2_get_credits_field,
5485 .get_credits = smb2_get_credits,
5486 .wait_mtu_credits = smb2_wait_mtu_credits,
5487 .adjust_credits = smb2_adjust_credits,
5488 .get_next_mid = smb2_get_next_mid,
5489 .revert_current_mid = smb2_revert_current_mid,
5490 .read_data_offset = smb2_read_data_offset,
5491 .read_data_length = smb2_read_data_length,
5492 .map_error = map_smb2_to_linux_error,
5493 .find_mid = smb2_find_mid,
5494 .check_message = smb2_check_message,
5495 .dump_detail = smb2_dump_detail,
5496 .clear_stats = smb2_clear_stats,
5497 .print_stats = smb2_print_stats,
5498 .is_oplock_break = smb2_is_valid_oplock_break,
5499 .handle_cancelled_mid = smb2_handle_cancelled_mid,
5500 .downgrade_oplock = smb2_downgrade_oplock,
5501 .need_neg = smb2_need_neg,
5502 .negotiate = smb2_negotiate,
5503 .negotiate_wsize = smb2_negotiate_wsize,
5504 .negotiate_rsize = smb2_negotiate_rsize,
5505 .sess_setup = SMB2_sess_setup,
5506 .logoff = SMB2_logoff,
5507 .tree_connect = SMB2_tcon,
5508 .tree_disconnect = SMB2_tdis,
5509 .qfs_tcon = smb2_qfs_tcon,
5510 .is_path_accessible = smb2_is_path_accessible,
5511 .can_echo = smb2_can_echo,
5512 .echo = SMB2_echo,
5513 .query_path_info = smb2_query_path_info,
5514 .query_reparse_point = smb2_query_reparse_point,
5515 .get_srv_inum = smb2_get_srv_inum,
5516 .query_file_info = smb2_query_file_info,
5517 .set_path_size = smb2_set_path_size,
5518 .set_file_size = smb2_set_file_size,
5519 .set_file_info = smb2_set_file_info,
5520 .set_compression = smb2_set_compression,
5521 .mkdir = smb2_mkdir,
5522 .mkdir_setinfo = smb2_mkdir_setinfo,
5523 .rmdir = smb2_rmdir,
5524 .unlink = smb2_unlink,
5525 .rename = smb2_rename_path,
5526 .create_hardlink = smb2_create_hardlink,
5527 .get_reparse_point_buffer = smb2_get_reparse_point_buffer,
5528 .query_mf_symlink = smb3_query_mf_symlink,
5529 .create_mf_symlink = smb3_create_mf_symlink,
5530 .create_reparse_inode = smb2_create_reparse_inode,
5531 .open = smb2_open_file,
5532 .set_fid = smb2_set_fid,
5533 .close = smb2_close_file,
5534 .flush = smb2_flush_file,
5535 .async_readv = smb2_async_readv,
5536 .async_writev = smb2_async_writev,
5537 .sync_read = smb2_sync_read,
5538 .sync_write = smb2_sync_write,
5539 .query_dir_first = smb2_query_dir_first,
5540 .query_dir_next = smb2_query_dir_next,
5541 .close_dir = smb2_close_dir,
5542 .calc_smb_size = smb2_calc_size,
5543 .is_status_pending = smb2_is_status_pending,
5544 .is_session_expired = smb2_is_session_expired,
5545 .oplock_response = smb2_oplock_response,
5546 .queryfs = smb2_queryfs,
5547 .mand_lock = smb2_mand_lock,
5548 .mand_unlock_range = smb2_unlock_range,
5549 .push_mand_locks = smb2_push_mandatory_locks,
5550 .get_lease_key = smb2_get_lease_key,
5551 .set_lease_key = smb2_set_lease_key,
5552 .new_lease_key = smb2_new_lease_key,
5553 .is_read_op = smb21_is_read_op,
5554 .set_oplock_level = smb21_set_oplock_level,
5555 .create_lease_buf = smb2_create_lease_buf,
5556 .parse_lease_buf = smb2_parse_lease_buf,
5557 .copychunk_range = smb2_copychunk_range,
5558 .wp_retry_size = smb2_wp_retry_size,
5559 .dir_needs_close = smb2_dir_needs_close,
5560 .enum_snapshots = smb3_enum_snapshots,
5561 .notify = smb3_notify,
5562 .get_dfs_refer = smb2_get_dfs_refer,
5563 .select_sectype = smb2_select_sectype,
5564 #ifdef CONFIG_CIFS_XATTR
5565 .query_all_EAs = smb2_query_eas,
5566 .set_EA = smb2_set_ea,
5567 #endif /* CIFS_XATTR */
5568 .get_acl = get_smb2_acl,
5569 .get_acl_by_fid = get_smb2_acl_by_fid,
5570 .set_acl = set_smb2_acl,
5571 .next_header = smb2_next_header,
5572 .ioctl_query_info = smb2_ioctl_query_info,
5573 .make_node = smb2_make_node,
5574 .fiemap = smb3_fiemap,
5575 .llseek = smb3_llseek,
5576 .is_status_io_timeout = smb2_is_status_io_timeout,
5577 .is_network_name_deleted = smb2_is_network_name_deleted,
5578 .rename_pending_delete = smb2_rename_pending_delete,
5579 };
5580
5581 struct smb_version_operations smb30_operations = {
5582 .compare_fids = smb2_compare_fids,
5583 .setup_request = smb2_setup_request,
5584 .setup_async_request = smb2_setup_async_request,
5585 .check_receive = smb2_check_receive,
5586 .add_credits = smb2_add_credits,
5587 .set_credits = smb2_set_credits,
5588 .get_credits_field = smb2_get_credits_field,
5589 .get_credits = smb2_get_credits,
5590 .wait_mtu_credits = smb2_wait_mtu_credits,
5591 .adjust_credits = smb2_adjust_credits,
5592 .get_next_mid = smb2_get_next_mid,
5593 .revert_current_mid = smb2_revert_current_mid,
5594 .read_data_offset = smb2_read_data_offset,
5595 .read_data_length = smb2_read_data_length,
5596 .map_error = map_smb2_to_linux_error,
5597 .find_mid = smb2_find_mid,
5598 .check_message = smb2_check_message,
5599 .dump_detail = smb2_dump_detail,
5600 .clear_stats = smb2_clear_stats,
5601 .print_stats = smb2_print_stats,
5602 .dump_share_caps = smb2_dump_share_caps,
5603 .is_oplock_break = smb2_is_valid_oplock_break,
5604 .handle_cancelled_mid = smb2_handle_cancelled_mid,
5605 .downgrade_oplock = smb3_downgrade_oplock,
5606 .need_neg = smb2_need_neg,
5607 .negotiate = smb2_negotiate,
5608 .negotiate_wsize = smb3_negotiate_wsize,
5609 .negotiate_rsize = smb3_negotiate_rsize,
5610 .sess_setup = SMB2_sess_setup,
5611 .logoff = SMB2_logoff,
5612 .tree_connect = SMB2_tcon,
5613 .tree_disconnect = SMB2_tdis,
5614 .qfs_tcon = smb3_qfs_tcon,
5615 .query_server_interfaces = SMB3_request_interfaces,
5616 .is_path_accessible = smb2_is_path_accessible,
5617 .can_echo = smb2_can_echo,
5618 .echo = SMB2_echo,
5619 .query_path_info = smb2_query_path_info,
5620 /* WSL tags introduced long after smb2.1, enable for SMB3, 3.11 only */
5621 .query_reparse_point = smb2_query_reparse_point,
5622 .get_srv_inum = smb2_get_srv_inum,
5623 .query_file_info = smb2_query_file_info,
5624 .set_path_size = smb2_set_path_size,
5625 .set_file_size = smb2_set_file_size,
5626 .set_file_info = smb2_set_file_info,
5627 .set_compression = smb2_set_compression,
5628 .mkdir = smb2_mkdir,
5629 .mkdir_setinfo = smb2_mkdir_setinfo,
5630 .rmdir = smb2_rmdir,
5631 .unlink = smb2_unlink,
5632 .rename = smb2_rename_path,
5633 .create_hardlink = smb2_create_hardlink,
5634 .get_reparse_point_buffer = smb2_get_reparse_point_buffer,
5635 .query_mf_symlink = smb3_query_mf_symlink,
5636 .create_mf_symlink = smb3_create_mf_symlink,
5637 .create_reparse_inode = smb2_create_reparse_inode,
5638 .open = smb2_open_file,
5639 .set_fid = smb2_set_fid,
5640 .close = smb2_close_file,
5641 .close_getattr = smb2_close_getattr,
5642 .flush = smb2_flush_file,
5643 .async_readv = smb2_async_readv,
5644 .async_writev = smb2_async_writev,
5645 .sync_read = smb2_sync_read,
5646 .sync_write = smb2_sync_write,
5647 .query_dir_first = smb2_query_dir_first,
5648 .query_dir_next = smb2_query_dir_next,
5649 .close_dir = smb2_close_dir,
5650 .calc_smb_size = smb2_calc_size,
5651 .is_status_pending = smb2_is_status_pending,
5652 .is_session_expired = smb2_is_session_expired,
5653 .oplock_response = smb2_oplock_response,
5654 .queryfs = smb2_queryfs,
5655 .mand_lock = smb2_mand_lock,
5656 .mand_unlock_range = smb2_unlock_range,
5657 .push_mand_locks = smb2_push_mandatory_locks,
5658 .get_lease_key = smb2_get_lease_key,
5659 .set_lease_key = smb2_set_lease_key,
5660 .new_lease_key = smb2_new_lease_key,
5661 .generate_signingkey = generate_smb30signingkey,
5662 .set_integrity = smb3_set_integrity,
5663 .is_read_op = smb21_is_read_op,
5664 .set_oplock_level = smb3_set_oplock_level,
5665 .create_lease_buf = smb3_create_lease_buf,
5666 .parse_lease_buf = smb3_parse_lease_buf,
5667 .copychunk_range = smb2_copychunk_range,
5668 .duplicate_extents = smb2_duplicate_extents,
5669 .validate_negotiate = smb3_validate_negotiate,
5670 .wp_retry_size = smb2_wp_retry_size,
5671 .dir_needs_close = smb2_dir_needs_close,
5672 .fallocate = smb3_fallocate,
5673 .enum_snapshots = smb3_enum_snapshots,
5674 .notify = smb3_notify,
5675 .init_transform_rq = smb3_init_transform_rq,
5676 .is_transform_hdr = smb3_is_transform_hdr,
5677 .receive_transform = smb3_receive_transform,
5678 .get_dfs_refer = smb2_get_dfs_refer,
5679 .select_sectype = smb2_select_sectype,
5680 #ifdef CONFIG_CIFS_XATTR
5681 .query_all_EAs = smb2_query_eas,
5682 .set_EA = smb2_set_ea,
5683 #endif /* CIFS_XATTR */
5684 .get_acl = get_smb2_acl,
5685 .get_acl_by_fid = get_smb2_acl_by_fid,
5686 .set_acl = set_smb2_acl,
5687 .next_header = smb2_next_header,
5688 .ioctl_query_info = smb2_ioctl_query_info,
5689 .make_node = smb2_make_node,
5690 .fiemap = smb3_fiemap,
5691 .llseek = smb3_llseek,
5692 .is_status_io_timeout = smb2_is_status_io_timeout,
5693 .is_network_name_deleted = smb2_is_network_name_deleted,
5694 .rename_pending_delete = smb2_rename_pending_delete,
5695 };
5696
5697 struct smb_version_operations smb311_operations = {
5698 .compare_fids = smb2_compare_fids,
5699 .setup_request = smb2_setup_request,
5700 .setup_async_request = smb2_setup_async_request,
5701 .check_receive = smb2_check_receive,
5702 .add_credits = smb2_add_credits,
5703 .set_credits = smb2_set_credits,
5704 .get_credits_field = smb2_get_credits_field,
5705 .get_credits = smb2_get_credits,
5706 .wait_mtu_credits = smb2_wait_mtu_credits,
5707 .adjust_credits = smb2_adjust_credits,
5708 .get_next_mid = smb2_get_next_mid,
5709 .revert_current_mid = smb2_revert_current_mid,
5710 .read_data_offset = smb2_read_data_offset,
5711 .read_data_length = smb2_read_data_length,
5712 .map_error = map_smb2_to_linux_error,
5713 .find_mid = smb2_find_mid,
5714 .check_message = smb2_check_message,
5715 .dump_detail = smb2_dump_detail,
5716 .clear_stats = smb2_clear_stats,
5717 .print_stats = smb2_print_stats,
5718 .dump_share_caps = smb2_dump_share_caps,
5719 .is_oplock_break = smb2_is_valid_oplock_break,
5720 .handle_cancelled_mid = smb2_handle_cancelled_mid,
5721 .downgrade_oplock = smb3_downgrade_oplock,
5722 .need_neg = smb2_need_neg,
5723 .negotiate = smb2_negotiate,
5724 .negotiate_wsize = smb3_negotiate_wsize,
5725 .negotiate_rsize = smb3_negotiate_rsize,
5726 .sess_setup = SMB2_sess_setup,
5727 .logoff = SMB2_logoff,
5728 .tree_connect = SMB2_tcon,
5729 .tree_disconnect = SMB2_tdis,
5730 .qfs_tcon = smb3_qfs_tcon,
5731 .query_server_interfaces = SMB3_request_interfaces,
5732 .is_path_accessible = smb2_is_path_accessible,
5733 .can_echo = smb2_can_echo,
5734 .echo = SMB2_echo,
5735 .query_path_info = smb2_query_path_info,
5736 .query_reparse_point = smb2_query_reparse_point,
5737 .get_srv_inum = smb2_get_srv_inum,
5738 .query_file_info = smb2_query_file_info,
5739 .set_path_size = smb2_set_path_size,
5740 .set_file_size = smb2_set_file_size,
5741 .set_file_info = smb2_set_file_info,
5742 .set_compression = smb2_set_compression,
5743 .mkdir = smb2_mkdir,
5744 .mkdir_setinfo = smb2_mkdir_setinfo,
5745 .posix_mkdir = smb311_posix_mkdir,
5746 .rmdir = smb2_rmdir,
5747 .unlink = smb2_unlink,
5748 .rename = smb2_rename_path,
5749 .create_hardlink = smb2_create_hardlink,
5750 .get_reparse_point_buffer = smb2_get_reparse_point_buffer,
5751 .query_mf_symlink = smb3_query_mf_symlink,
5752 .create_mf_symlink = smb3_create_mf_symlink,
5753 .create_reparse_inode = smb2_create_reparse_inode,
5754 .open = smb2_open_file,
5755 .set_fid = smb2_set_fid,
5756 .close = smb2_close_file,
5757 .close_getattr = smb2_close_getattr,
5758 .flush = smb2_flush_file,
5759 .async_readv = smb2_async_readv,
5760 .async_writev = smb2_async_writev,
5761 .sync_read = smb2_sync_read,
5762 .sync_write = smb2_sync_write,
5763 .query_dir_first = smb2_query_dir_first,
5764 .query_dir_next = smb2_query_dir_next,
5765 .close_dir = smb2_close_dir,
5766 .calc_smb_size = smb2_calc_size,
5767 .is_status_pending = smb2_is_status_pending,
5768 .is_session_expired = smb2_is_session_expired,
5769 .oplock_response = smb2_oplock_response,
5770 .queryfs = smb311_queryfs,
5771 .mand_lock = smb2_mand_lock,
5772 .mand_unlock_range = smb2_unlock_range,
5773 .push_mand_locks = smb2_push_mandatory_locks,
5774 .get_lease_key = smb2_get_lease_key,
5775 .set_lease_key = smb2_set_lease_key,
5776 .new_lease_key = smb2_new_lease_key,
5777 .generate_signingkey = generate_smb311signingkey,
5778 .set_integrity = smb3_set_integrity,
5779 .is_read_op = smb21_is_read_op,
5780 .set_oplock_level = smb3_set_oplock_level,
5781 .create_lease_buf = smb3_create_lease_buf,
5782 .parse_lease_buf = smb3_parse_lease_buf,
5783 .copychunk_range = smb2_copychunk_range,
5784 .duplicate_extents = smb2_duplicate_extents,
5785 /* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
5786 .wp_retry_size = smb2_wp_retry_size,
5787 .dir_needs_close = smb2_dir_needs_close,
5788 .fallocate = smb3_fallocate,
5789 .enum_snapshots = smb3_enum_snapshots,
5790 .notify = smb3_notify,
5791 .init_transform_rq = smb3_init_transform_rq,
5792 .is_transform_hdr = smb3_is_transform_hdr,
5793 .receive_transform = smb3_receive_transform,
5794 .get_dfs_refer = smb2_get_dfs_refer,
5795 .select_sectype = smb2_select_sectype,
5796 #ifdef CONFIG_CIFS_XATTR
5797 .query_all_EAs = smb2_query_eas,
5798 .set_EA = smb2_set_ea,
5799 #endif /* CIFS_XATTR */
5800 .get_acl = get_smb2_acl,
5801 .get_acl_by_fid = get_smb2_acl_by_fid,
5802 .set_acl = set_smb2_acl,
5803 .next_header = smb2_next_header,
5804 .ioctl_query_info = smb2_ioctl_query_info,
5805 .make_node = smb2_make_node,
5806 .fiemap = smb3_fiemap,
5807 .llseek = smb3_llseek,
5808 .is_status_io_timeout = smb2_is_status_io_timeout,
5809 .is_network_name_deleted = smb2_is_network_name_deleted,
5810 .rename_pending_delete = smb2_rename_pending_delete,
5811 };
5812
5813 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
5814 struct smb_version_values smb20_values = {
5815 .version_string = SMB20_VERSION_STRING,
5816 .protocol_id = SMB20_PROT_ID,
5817 .req_capabilities = 0, /* MBZ */
5818 .large_lock_type = 0,
5819 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
5820 .shared_lock_type = SMB2_LOCKFLAG_SHARED,
5821 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5822 .header_size = sizeof(struct smb2_hdr),
5823 .header_preamble_size = 0,
5824 .max_header_size = MAX_SMB2_HDR_SIZE,
5825 .read_rsp_size = sizeof(struct smb2_read_rsp),
5826 .lock_cmd = SMB2_LOCK,
5827 .cap_unix = 0,
5828 .cap_nt_find = SMB2_NT_FIND,
5829 .cap_large_files = SMB2_LARGE_FILES,
5830 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5831 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5832 .create_lease_size = sizeof(struct create_lease),
5833 };
5834 #endif /* ALLOW_INSECURE_LEGACY */
5835
5836 struct smb_version_values smb21_values = {
5837 .version_string = SMB21_VERSION_STRING,
5838 .protocol_id = SMB21_PROT_ID,
5839 .req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */
5840 .large_lock_type = 0,
5841 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
5842 .shared_lock_type = SMB2_LOCKFLAG_SHARED,
5843 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5844 .header_size = sizeof(struct smb2_hdr),
5845 .header_preamble_size = 0,
5846 .max_header_size = MAX_SMB2_HDR_SIZE,
5847 .read_rsp_size = sizeof(struct smb2_read_rsp),
5848 .lock_cmd = SMB2_LOCK,
5849 .cap_unix = 0,
5850 .cap_nt_find = SMB2_NT_FIND,
5851 .cap_large_files = SMB2_LARGE_FILES,
5852 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5853 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5854 .create_lease_size = sizeof(struct create_lease),
5855 };
5856
5857 struct smb_version_values smb3any_values = {
5858 .version_string = SMB3ANY_VERSION_STRING,
5859 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
5860 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
5861 .large_lock_type = 0,
5862 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
5863 .shared_lock_type = SMB2_LOCKFLAG_SHARED,
5864 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5865 .header_size = sizeof(struct smb2_hdr),
5866 .header_preamble_size = 0,
5867 .max_header_size = MAX_SMB2_HDR_SIZE,
5868 .read_rsp_size = sizeof(struct smb2_read_rsp),
5869 .lock_cmd = SMB2_LOCK,
5870 .cap_unix = 0,
5871 .cap_nt_find = SMB2_NT_FIND,
5872 .cap_large_files = SMB2_LARGE_FILES,
5873 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5874 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5875 .create_lease_size = sizeof(struct create_lease_v2),
5876 };
5877
5878 struct smb_version_values smbdefault_values = {
5879 .version_string = SMBDEFAULT_VERSION_STRING,
5880 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
5881 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
5882 .large_lock_type = 0,
5883 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
5884 .shared_lock_type = SMB2_LOCKFLAG_SHARED,
5885 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5886 .header_size = sizeof(struct smb2_hdr),
5887 .header_preamble_size = 0,
5888 .max_header_size = MAX_SMB2_HDR_SIZE,
5889 .read_rsp_size = sizeof(struct smb2_read_rsp),
5890 .lock_cmd = SMB2_LOCK,
5891 .cap_unix = 0,
5892 .cap_nt_find = SMB2_NT_FIND,
5893 .cap_large_files = SMB2_LARGE_FILES,
5894 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5895 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5896 .create_lease_size = sizeof(struct create_lease_v2),
5897 };
5898
5899 struct smb_version_values smb30_values = {
5900 .version_string = SMB30_VERSION_STRING,
5901 .protocol_id = SMB30_PROT_ID,
5902 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
5903 .large_lock_type = 0,
5904 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
5905 .shared_lock_type = SMB2_LOCKFLAG_SHARED,
5906 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5907 .header_size = sizeof(struct smb2_hdr),
5908 .header_preamble_size = 0,
5909 .max_header_size = MAX_SMB2_HDR_SIZE,
5910 .read_rsp_size = sizeof(struct smb2_read_rsp),
5911 .lock_cmd = SMB2_LOCK,
5912 .cap_unix = 0,
5913 .cap_nt_find = SMB2_NT_FIND,
5914 .cap_large_files = SMB2_LARGE_FILES,
5915 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5916 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5917 .create_lease_size = sizeof(struct create_lease_v2),
5918 };
5919
5920 struct smb_version_values smb302_values = {
5921 .version_string = SMB302_VERSION_STRING,
5922 .protocol_id = SMB302_PROT_ID,
5923 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
5924 .large_lock_type = 0,
5925 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
5926 .shared_lock_type = SMB2_LOCKFLAG_SHARED,
5927 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5928 .header_size = sizeof(struct smb2_hdr),
5929 .header_preamble_size = 0,
5930 .max_header_size = MAX_SMB2_HDR_SIZE,
5931 .read_rsp_size = sizeof(struct smb2_read_rsp),
5932 .lock_cmd = SMB2_LOCK,
5933 .cap_unix = 0,
5934 .cap_nt_find = SMB2_NT_FIND,
5935 .cap_large_files = SMB2_LARGE_FILES,
5936 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5937 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5938 .create_lease_size = sizeof(struct create_lease_v2),
5939 };
5940
5941 struct smb_version_values smb311_values = {
5942 .version_string = SMB311_VERSION_STRING,
5943 .protocol_id = SMB311_PROT_ID,
5944 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
5945 .large_lock_type = 0,
5946 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
5947 .shared_lock_type = SMB2_LOCKFLAG_SHARED,
5948 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5949 .header_size = sizeof(struct smb2_hdr),
5950 .header_preamble_size = 0,
5951 .max_header_size = MAX_SMB2_HDR_SIZE,
5952 .read_rsp_size = sizeof(struct smb2_read_rsp),
5953 .lock_cmd = SMB2_LOCK,
5954 .cap_unix = 0,
5955 .cap_nt_find = SMB2_NT_FIND,
5956 .cap_large_files = SMB2_LARGE_FILES,
5957 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5958 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5959 .create_lease_size = sizeof(struct create_lease_v2),
5960 };
5961