1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * SMB2 version specific operations
4 *
5 * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
6 */
7
8 #include <linux/pagemap.h>
9 #include <linux/vfs.h>
10 #include <linux/falloc.h>
11 #include <linux/scatterlist.h>
12 #include <linux/uuid.h>
13 #include <linux/sort.h>
14 #include <crypto/aead.h>
15 #include <linux/fiemap.h>
16 #include <linux/folio_queue.h>
17 #include <uapi/linux/magic.h>
18 #include "cifsfs.h"
19 #include "cifsglob.h"
20 #include "cifsproto.h"
21 #include "smb2proto.h"
22 #include "smb2pdu.h"
23 #include "cifs_debug.h"
24 #include "cifs_unicode.h"
25 #include "../common/smb2status.h"
26 #include "smb2glob.h"
27 #include "cifs_ioctl.h"
28 #include "smbdirect.h"
29 #include "fscache.h"
30 #include "fs_context.h"
31 #include "cached_dir.h"
32 #include "reparse.h"
33
34 /* Change credits for different ops and return the total number of credits */
35 static int
change_conf(struct TCP_Server_Info * server)36 change_conf(struct TCP_Server_Info *server)
37 {
38 server->credits += server->echo_credits + server->oplock_credits;
39 if (server->credits > server->max_credits)
40 server->credits = server->max_credits;
41 server->oplock_credits = server->echo_credits = 0;
42 switch (server->credits) {
43 case 0:
44 return 0;
45 case 1:
46 server->echoes = false;
47 server->oplocks = false;
48 break;
49 case 2:
50 server->echoes = true;
51 server->oplocks = false;
52 server->echo_credits = 1;
53 break;
54 default:
55 server->echoes = true;
56 if (enable_oplocks) {
57 server->oplocks = true;
58 server->oplock_credits = 1;
59 } else
60 server->oplocks = false;
61
62 server->echo_credits = 1;
63 }
64 server->credits -= server->echo_credits + server->oplock_credits;
65 return server->credits + server->echo_credits + server->oplock_credits;
66 }
67
68 static void
smb2_add_credits(struct TCP_Server_Info * server,struct cifs_credits * credits,const int optype)69 smb2_add_credits(struct TCP_Server_Info *server,
70 struct cifs_credits *credits, const int optype)
71 {
72 int *val, rc = -1;
73 int scredits, in_flight;
74 unsigned int add = credits->value;
75 unsigned int instance = credits->instance;
76 bool reconnect_detected = false;
77 bool reconnect_with_invalid_credits = false;
78
79 spin_lock(&server->req_lock);
80 val = server->ops->get_credits_field(server, optype);
81
82 /* eg found case where write overlapping reconnect messed up credits */
83 if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
84 reconnect_with_invalid_credits = true;
85
86 if ((instance == 0) || (instance == server->reconnect_instance))
87 *val += add;
88 else
89 reconnect_detected = true;
90
91 if (*val > 65000) {
92 *val = 65000; /* Don't get near 64K credits, avoid srv bugs */
93 pr_warn_once("server overflowed SMB3 credits\n");
94 trace_smb3_overflow_credits(server->current_mid,
95 server->conn_id, server->hostname, *val,
96 add, server->in_flight);
97 }
98 if (credits->in_flight_check > 1) {
99 pr_warn_once("rreq R=%08x[%x] Credits not in flight\n",
100 credits->rreq_debug_id, credits->rreq_debug_index);
101 } else {
102 credits->in_flight_check = 2;
103 }
104 if (WARN_ON_ONCE(server->in_flight == 0)) {
105 pr_warn_once("rreq R=%08x[%x] Zero in_flight\n",
106 credits->rreq_debug_id, credits->rreq_debug_index);
107 trace_smb3_rw_credits(credits->rreq_debug_id,
108 credits->rreq_debug_index,
109 credits->value,
110 server->credits, server->in_flight, 0,
111 cifs_trace_rw_credits_zero_in_flight);
112 }
113 server->in_flight--;
114
115 /*
116 * Rebalance credits when an op drains in_flight. For session setup,
117 * do this only when the total accumulated credits are high enough (>2)
118 * so that a newly established secondary channel can reserve credits for
119 * echoes and oplocks. We expect this to happen at the end of the final
120 * session setup response.
121 */
122 if (server->in_flight == 0 &&
123 ((optype & CIFS_OP_MASK) != CIFS_NEG_OP) &&
124 ((optype & CIFS_OP_MASK) != CIFS_SESS_OP))
125 rc = change_conf(server);
126 else if (server->in_flight == 0 &&
127 ((optype & CIFS_OP_MASK) == CIFS_SESS_OP) && *val > 2)
128 rc = change_conf(server);
129 /*
130 * Sometimes server returns 0 credits on oplock break ack - we need to
131 * rebalance credits in this case.
132 */
133 else if (server->in_flight > 0 && server->oplock_credits == 0 &&
134 server->oplocks) {
135 if (server->credits > 1) {
136 server->credits--;
137 server->oplock_credits++;
138 }
139 } else if ((server->in_flight > 0) && (server->oplock_credits > 3) &&
140 ((optype & CIFS_OP_MASK) == CIFS_OBREAK_OP))
141 /* if now have too many oplock credits, rebalance so don't starve normal ops */
142 change_conf(server);
143
144 scredits = *val;
145 in_flight = server->in_flight;
146 spin_unlock(&server->req_lock);
147 wake_up(&server->request_q);
148
149 if (reconnect_detected) {
150 trace_smb3_reconnect_detected(server->current_mid,
151 server->conn_id, server->hostname, scredits, add, in_flight);
152
153 cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n",
154 add, instance);
155 }
156
157 if (reconnect_with_invalid_credits) {
158 trace_smb3_reconnect_with_invalid_credits(server->current_mid,
159 server->conn_id, server->hostname, scredits, add, in_flight);
160 cifs_dbg(FYI, "Negotiate operation when server credits is non-zero. Optype: %d, server credits: %d, credits added: %d\n",
161 optype, scredits, add);
162 }
163
164 spin_lock(&server->srv_lock);
165 if (server->tcpStatus == CifsNeedReconnect
166 || server->tcpStatus == CifsExiting) {
167 spin_unlock(&server->srv_lock);
168 return;
169 }
170 spin_unlock(&server->srv_lock);
171
172 switch (rc) {
173 case -1:
174 /* change_conf hasn't been executed */
175 break;
176 case 0:
177 cifs_server_dbg(VFS, "Possible client or server bug - zero credits\n");
178 break;
179 case 1:
180 cifs_server_dbg(VFS, "disabling echoes and oplocks\n");
181 break;
182 case 2:
183 cifs_dbg(FYI, "disabling oplocks\n");
184 break;
185 default:
186 /* change_conf rebalanced credits for different types */
187 break;
188 }
189
190 trace_smb3_add_credits(server->current_mid,
191 server->conn_id, server->hostname, scredits, add, in_flight);
192 cifs_dbg(FYI, "%s: added %u credits total=%d\n", __func__, add, scredits);
193 }
194
195 static void
smb2_set_credits(struct TCP_Server_Info * server,const int val)196 smb2_set_credits(struct TCP_Server_Info *server, const int val)
197 {
198 int scredits, in_flight;
199
200 spin_lock(&server->req_lock);
201 server->credits = val;
202 if (val == 1) {
203 server->reconnect_instance++;
204 /*
205 * ChannelSequence updated for all channels in primary channel so that consistent
206 * across SMB3 requests sent on any channel. See MS-SMB2 3.2.4.1 and 3.2.7.1
207 */
208 if (SERVER_IS_CHAN(server))
209 server->primary_server->channel_sequence_num++;
210 else
211 server->channel_sequence_num++;
212 }
213 scredits = server->credits;
214 in_flight = server->in_flight;
215 spin_unlock(&server->req_lock);
216
217 trace_smb3_set_credits(server->current_mid,
218 server->conn_id, server->hostname, scredits, val, in_flight);
219 cifs_dbg(FYI, "%s: set %u credits\n", __func__, val);
220
221 /* don't log while holding the lock */
222 if (val == 1)
223 cifs_dbg(FYI, "set credits to 1 due to smb2 reconnect\n");
224 }
225
226 static int *
smb2_get_credits_field(struct TCP_Server_Info * server,const int optype)227 smb2_get_credits_field(struct TCP_Server_Info *server, const int optype)
228 {
229 switch (optype) {
230 case CIFS_ECHO_OP:
231 return &server->echo_credits;
232 case CIFS_OBREAK_OP:
233 return &server->oplock_credits;
234 default:
235 return &server->credits;
236 }
237 }
238
239 static unsigned int
smb2_get_credits(struct mid_q_entry * mid)240 smb2_get_credits(struct mid_q_entry *mid)
241 {
242 return mid->credits_received;
243 }
244
245 static int
smb2_wait_mtu_credits(struct TCP_Server_Info * server,size_t size,size_t * num,struct cifs_credits * credits)246 smb2_wait_mtu_credits(struct TCP_Server_Info *server, size_t size,
247 size_t *num, struct cifs_credits *credits)
248 {
249 int rc = 0;
250 unsigned int scredits, in_flight;
251
252 spin_lock(&server->req_lock);
253 while (1) {
254 spin_unlock(&server->req_lock);
255
256 spin_lock(&server->srv_lock);
257 if (server->tcpStatus == CifsExiting) {
258 spin_unlock(&server->srv_lock);
259 return -ENOENT;
260 }
261 spin_unlock(&server->srv_lock);
262
263 spin_lock(&server->req_lock);
264 if (server->credits <= 0) {
265 spin_unlock(&server->req_lock);
266 cifs_num_waiters_inc(server);
267 rc = wait_event_killable(server->request_q,
268 has_credits(server, &server->credits, 1));
269 cifs_num_waiters_dec(server);
270 if (rc)
271 return rc;
272 spin_lock(&server->req_lock);
273 } else {
274 scredits = server->credits;
275 /* can deadlock with reopen */
276 if (scredits <= 8) {
277 *num = SMB2_MAX_BUFFER_SIZE;
278 credits->value = 0;
279 credits->instance = 0;
280 break;
281 }
282
283 /* leave some credits for reopen and other ops */
284 scredits -= 8;
285 *num = min_t(unsigned int, size,
286 scredits * SMB2_MAX_BUFFER_SIZE);
287
288 credits->value =
289 DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE);
290 credits->instance = server->reconnect_instance;
291 server->credits -= credits->value;
292 server->in_flight++;
293 if (server->in_flight > server->max_in_flight)
294 server->max_in_flight = server->in_flight;
295 break;
296 }
297 }
298 scredits = server->credits;
299 in_flight = server->in_flight;
300 spin_unlock(&server->req_lock);
301
302 trace_smb3_wait_credits(server->current_mid,
303 server->conn_id, server->hostname, scredits, -(credits->value), in_flight);
304 cifs_dbg(FYI, "%s: removed %u credits total=%d\n",
305 __func__, credits->value, scredits);
306
307 return rc;
308 }
309
310 static int
smb2_adjust_credits(struct TCP_Server_Info * server,struct cifs_io_subrequest * subreq,unsigned int trace)311 smb2_adjust_credits(struct TCP_Server_Info *server,
312 struct cifs_io_subrequest *subreq,
313 unsigned int /*enum smb3_rw_credits_trace*/ trace)
314 {
315 struct cifs_credits *credits = &subreq->credits;
316 int new_val = DIV_ROUND_UP(subreq->subreq.len - subreq->subreq.transferred,
317 SMB2_MAX_BUFFER_SIZE);
318 int scredits, in_flight;
319
320 if (!credits->value || credits->value == new_val)
321 return 0;
322
323 if (credits->value < new_val) {
324 trace_smb3_rw_credits(subreq->rreq->debug_id,
325 subreq->subreq.debug_index,
326 credits->value,
327 server->credits, server->in_flight,
328 new_val - credits->value,
329 cifs_trace_rw_credits_no_adjust_up);
330 trace_smb3_too_many_credits(server->current_mid,
331 server->conn_id, server->hostname, 0, credits->value - new_val, 0);
332 cifs_server_dbg(VFS, "R=%x[%x] request has less credits (%d) than required (%d)",
333 subreq->rreq->debug_id, subreq->subreq.debug_index,
334 credits->value, new_val);
335
336 return -EOPNOTSUPP;
337 }
338
339 spin_lock(&server->req_lock);
340
341 if (server->reconnect_instance != credits->instance) {
342 scredits = server->credits;
343 in_flight = server->in_flight;
344 spin_unlock(&server->req_lock);
345
346 trace_smb3_rw_credits(subreq->rreq->debug_id,
347 subreq->subreq.debug_index,
348 credits->value,
349 server->credits, server->in_flight,
350 new_val - credits->value,
351 cifs_trace_rw_credits_old_session);
352 trace_smb3_reconnect_detected(server->current_mid,
353 server->conn_id, server->hostname, scredits,
354 credits->value - new_val, in_flight);
355 cifs_server_dbg(VFS, "R=%x[%x] trying to return %d credits to old session\n",
356 subreq->rreq->debug_id, subreq->subreq.debug_index,
357 credits->value - new_val);
358 return -EAGAIN;
359 }
360
361 trace_smb3_rw_credits(subreq->rreq->debug_id,
362 subreq->subreq.debug_index,
363 credits->value,
364 server->credits, server->in_flight,
365 new_val - credits->value, trace);
366 server->credits += credits->value - new_val;
367 scredits = server->credits;
368 in_flight = server->in_flight;
369 spin_unlock(&server->req_lock);
370 wake_up(&server->request_q);
371
372 trace_smb3_adj_credits(server->current_mid,
373 server->conn_id, server->hostname, scredits,
374 credits->value - new_val, in_flight);
375 cifs_dbg(FYI, "%s: adjust added %u credits total=%d\n",
376 __func__, credits->value - new_val, scredits);
377
378 credits->value = new_val;
379
380 return 0;
381 }
382
383 static __u64
smb2_get_next_mid(struct TCP_Server_Info * server)384 smb2_get_next_mid(struct TCP_Server_Info *server)
385 {
386 __u64 mid;
387 /* for SMB2 we need the current value */
388 spin_lock(&server->mid_counter_lock);
389 mid = server->current_mid++;
390 spin_unlock(&server->mid_counter_lock);
391 return mid;
392 }
393
394 static void
smb2_revert_current_mid(struct TCP_Server_Info * server,const unsigned int val)395 smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
396 {
397 spin_lock(&server->mid_counter_lock);
398 if (server->current_mid >= val)
399 server->current_mid -= val;
400 spin_unlock(&server->mid_counter_lock);
401 }
402
403 static struct mid_q_entry *
__smb2_find_mid(struct TCP_Server_Info * server,char * buf,bool dequeue)404 __smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
405 {
406 struct mid_q_entry *mid;
407 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
408 __u64 wire_mid = le64_to_cpu(shdr->MessageId);
409
410 if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
411 cifs_server_dbg(VFS, "Encrypted frame parsing not supported yet\n");
412 return NULL;
413 }
414
415 spin_lock(&server->mid_queue_lock);
416 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
417 if ((mid->mid == wire_mid) &&
418 (mid->mid_state == MID_REQUEST_SUBMITTED) &&
419 (mid->command == shdr->Command)) {
420 smb_get_mid(mid);
421 if (dequeue) {
422 list_del_init(&mid->qhead);
423 mid->deleted_from_q = true;
424 }
425 spin_unlock(&server->mid_queue_lock);
426 return mid;
427 }
428 }
429 spin_unlock(&server->mid_queue_lock);
430 return NULL;
431 }
432
433 static struct mid_q_entry *
smb2_find_mid(struct TCP_Server_Info * server,char * buf)434 smb2_find_mid(struct TCP_Server_Info *server, char *buf)
435 {
436 return __smb2_find_mid(server, buf, false);
437 }
438
439 static struct mid_q_entry *
smb2_find_dequeue_mid(struct TCP_Server_Info * server,char * buf)440 smb2_find_dequeue_mid(struct TCP_Server_Info *server, char *buf)
441 {
442 return __smb2_find_mid(server, buf, true);
443 }
444
445 static void
smb2_dump_detail(void * buf,size_t buf_len,struct TCP_Server_Info * server)446 smb2_dump_detail(void *buf, size_t buf_len, struct TCP_Server_Info *server)
447 {
448 #ifdef CONFIG_CIFS_DEBUG2
449 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
450
451 cifs_server_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
452 shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
453 shdr->Id.SyncId.ProcessId);
454 if (!server->ops->check_message(buf, buf_len, server->total_read, server)) {
455 cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
456 server->ops->calc_smb_size(buf));
457 }
458 #endif
459 }
460
461 static bool
smb2_need_neg(struct TCP_Server_Info * server)462 smb2_need_neg(struct TCP_Server_Info *server)
463 {
464 return server->max_read == 0;
465 }
466
467 static int
smb2_negotiate(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server)468 smb2_negotiate(const unsigned int xid,
469 struct cifs_ses *ses,
470 struct TCP_Server_Info *server)
471 {
472 int rc;
473
474 spin_lock(&server->mid_counter_lock);
475 server->current_mid = 0;
476 spin_unlock(&server->mid_counter_lock);
477 rc = SMB2_negotiate(xid, ses, server);
478 return rc;
479 }
480
481 static inline unsigned int
prevent_zero_iosize(unsigned int size,const char * type)482 prevent_zero_iosize(unsigned int size, const char *type)
483 {
484 if (size == 0) {
485 cifs_dbg(VFS, "SMB: Zero %ssize calculated, using minimum value %u\n",
486 type, CIFS_MIN_DEFAULT_IOSIZE);
487 return CIFS_MIN_DEFAULT_IOSIZE;
488 }
489 return size;
490 }
491
492 static unsigned int
smb2_negotiate_wsize(struct cifs_tcon * tcon,struct smb3_fs_context * ctx)493 smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
494 {
495 struct TCP_Server_Info *server = tcon->ses->server;
496 unsigned int wsize;
497
498 /* start with specified wsize, or default */
499 wsize = ctx->got_wsize ? ctx->vol_wsize : CIFS_DEFAULT_IOSIZE;
500 wsize = min_t(unsigned int, wsize, server->max_write);
501 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
502 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
503
504 return prevent_zero_iosize(wsize, "w");
505 }
506
507 static unsigned int
smb3_negotiate_wsize(struct cifs_tcon * tcon,struct smb3_fs_context * ctx)508 smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
509 {
510 struct TCP_Server_Info *server = tcon->ses->server;
511 unsigned int wsize;
512
513 /* start with specified wsize, or default */
514 wsize = ctx->got_wsize ? ctx->vol_wsize : SMB3_DEFAULT_IOSIZE;
515 wsize = min_t(unsigned int, wsize, server->max_write);
516 #ifdef CONFIG_CIFS_SMB_DIRECT
517 if (server->rdma) {
518 const struct smbdirect_socket_parameters *sp =
519 smbd_get_parameters(server->smbd_conn);
520
521 if (server->sign)
522 /*
523 * Account for SMB2 data transfer packet header and
524 * possible encryption header
525 */
526 wsize = min_t(unsigned int,
527 wsize,
528 sp->max_fragmented_send_size -
529 SMB2_READWRITE_PDU_HEADER_SIZE -
530 sizeof(struct smb2_transform_hdr));
531 else
532 wsize = min_t(unsigned int,
533 wsize, sp->max_read_write_size);
534 }
535 #endif
536 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
537 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
538
539 return prevent_zero_iosize(wsize, "w");
540 }
541
542 static unsigned int
smb2_negotiate_rsize(struct cifs_tcon * tcon,struct smb3_fs_context * ctx)543 smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
544 {
545 struct TCP_Server_Info *server = tcon->ses->server;
546 unsigned int rsize;
547
548 /* start with specified rsize, or default */
549 rsize = ctx->got_rsize ? ctx->vol_rsize : CIFS_DEFAULT_IOSIZE;
550 rsize = min_t(unsigned int, rsize, server->max_read);
551
552 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
553 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
554
555 return prevent_zero_iosize(rsize, "r");
556 }
557
558 static unsigned int
smb3_negotiate_rsize(struct cifs_tcon * tcon,struct smb3_fs_context * ctx)559 smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
560 {
561 struct TCP_Server_Info *server = tcon->ses->server;
562 unsigned int rsize;
563
564 /* start with specified rsize, or default */
565 rsize = ctx->got_rsize ? ctx->vol_rsize : SMB3_DEFAULT_IOSIZE;
566 rsize = min_t(unsigned int, rsize, server->max_read);
567 #ifdef CONFIG_CIFS_SMB_DIRECT
568 if (server->rdma) {
569 const struct smbdirect_socket_parameters *sp =
570 smbd_get_parameters(server->smbd_conn);
571
572 if (server->sign)
573 /*
574 * Account for SMB2 data transfer packet header and
575 * possible encryption header
576 */
577 rsize = min_t(unsigned int,
578 rsize,
579 sp->max_fragmented_recv_size -
580 SMB2_READWRITE_PDU_HEADER_SIZE -
581 sizeof(struct smb2_transform_hdr));
582 else
583 rsize = min_t(unsigned int,
584 rsize, sp->max_read_write_size);
585 }
586 #endif
587
588 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
589 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
590
591 return prevent_zero_iosize(rsize, "r");
592 }
593
594 /*
595 * compare two interfaces a and b
596 * return 0 if everything matches.
597 * return 1 if a is rdma capable, or rss capable, or has higher link speed
598 * return -1 otherwise.
599 */
600 static int
iface_cmp(struct cifs_server_iface * a,struct cifs_server_iface * b)601 iface_cmp(struct cifs_server_iface *a, struct cifs_server_iface *b)
602 {
603 int cmp_ret = 0;
604
605 WARN_ON(!a || !b);
606 if (a->rdma_capable == b->rdma_capable) {
607 if (a->rss_capable == b->rss_capable) {
608 if (a->speed == b->speed) {
609 cmp_ret = cifs_ipaddr_cmp((struct sockaddr *) &a->sockaddr,
610 (struct sockaddr *) &b->sockaddr);
611 if (!cmp_ret)
612 return 0;
613 else if (cmp_ret > 0)
614 return 1;
615 else
616 return -1;
617 } else if (a->speed > b->speed)
618 return 1;
619 else
620 return -1;
621 } else if (a->rss_capable > b->rss_capable)
622 return 1;
623 else
624 return -1;
625 } else if (a->rdma_capable > b->rdma_capable)
626 return 1;
627 else
628 return -1;
629 }
630
631 static int
parse_server_interfaces(struct network_interface_info_ioctl_rsp * buf,size_t buf_len,struct cifs_ses * ses,bool in_mount)632 parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
633 size_t buf_len, struct cifs_ses *ses, bool in_mount)
634 {
635 struct network_interface_info_ioctl_rsp *p;
636 struct sockaddr_in *addr4;
637 struct sockaddr_in6 *addr6;
638 struct smb_sockaddr_in *p4;
639 struct smb_sockaddr_in6 *p6;
640 struct cifs_server_iface *info = NULL, *iface = NULL, *niface = NULL;
641 struct cifs_server_iface tmp_iface;
642 __be16 port;
643 ssize_t bytes_left;
644 size_t next = 0;
645 int nb_iface = 0;
646 int rc = 0, ret = 0;
647
648 bytes_left = buf_len;
649 p = buf;
650
651 spin_lock(&ses->iface_lock);
652
653 /*
654 * Go through iface_list and mark them as inactive
655 */
656 list_for_each_entry_safe(iface, niface, &ses->iface_list,
657 iface_head)
658 iface->is_active = 0;
659
660 spin_unlock(&ses->iface_lock);
661
662 /*
663 * Samba server e.g. can return an empty interface list in some cases,
664 * which would only be a problem if we were requesting multichannel
665 */
666 if (bytes_left == 0) {
667 /* avoid spamming logs every 10 minutes, so log only in mount */
668 if ((ses->chan_max > 1) && in_mount)
669 cifs_dbg(VFS,
670 "multichannel not available\n"
671 "Empty network interface list returned by server %s\n",
672 ses->server->hostname);
673 rc = -EOPNOTSUPP;
674 goto out;
675 }
676
677 spin_lock(&ses->server->srv_lock);
678 if (ses->server->dstaddr.ss_family == AF_INET)
679 port = ((struct sockaddr_in *)&ses->server->dstaddr)->sin_port;
680 else if (ses->server->dstaddr.ss_family == AF_INET6)
681 port = ((struct sockaddr_in6 *)&ses->server->dstaddr)->sin6_port;
682 else
683 port = cpu_to_be16(CIFS_PORT);
684 spin_unlock(&ses->server->srv_lock);
685
686 while (bytes_left >= (ssize_t)sizeof(*p)) {
687 memset(&tmp_iface, 0, sizeof(tmp_iface));
688 /* default to 1Gbps when link speed is unset */
689 tmp_iface.speed = le64_to_cpu(p->LinkSpeed) ?: 1000000000;
690 tmp_iface.rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
691 tmp_iface.rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
692
693 switch (p->Family) {
694 /*
695 * The kernel and wire socket structures have the same
696 * layout and use network byte order but make the
697 * conversion explicit in case either one changes.
698 */
699 case INTERNETWORK:
700 addr4 = (struct sockaddr_in *)&tmp_iface.sockaddr;
701 p4 = (struct smb_sockaddr_in *)p->Buffer;
702 addr4->sin_family = AF_INET;
703 memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
704
705 /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
706 addr4->sin_port = port;
707
708 cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
709 &addr4->sin_addr);
710 break;
711 case INTERNETWORKV6:
712 addr6 = (struct sockaddr_in6 *)&tmp_iface.sockaddr;
713 p6 = (struct smb_sockaddr_in6 *)p->Buffer;
714 addr6->sin6_family = AF_INET6;
715 memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
716
717 /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
718 addr6->sin6_flowinfo = 0;
719 addr6->sin6_scope_id = 0;
720 addr6->sin6_port = port;
721
722 cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
723 &addr6->sin6_addr);
724 break;
725 default:
726 cifs_dbg(VFS,
727 "%s: skipping unsupported socket family\n",
728 __func__);
729 goto next_iface;
730 }
731
732 /*
733 * The iface_list is assumed to be sorted by speed.
734 * Check if the new interface exists in that list.
735 * NEVER change iface. it could be in use.
736 * Add a new one instead
737 */
738 spin_lock(&ses->iface_lock);
739 list_for_each_entry_safe(iface, niface, &ses->iface_list,
740 iface_head) {
741 ret = iface_cmp(iface, &tmp_iface);
742 if (!ret) {
743 iface->is_active = 1;
744 spin_unlock(&ses->iface_lock);
745 goto next_iface;
746 } else if (ret < 0) {
747 /* all remaining ifaces are slower */
748 kref_get(&iface->refcount);
749 break;
750 }
751 }
752 spin_unlock(&ses->iface_lock);
753
754 /* no match. insert the entry in the list */
755 info = kmalloc_obj(struct cifs_server_iface);
756 if (!info) {
757 rc = -ENOMEM;
758 goto out;
759 }
760 memcpy(info, &tmp_iface, sizeof(tmp_iface));
761
762 /* add this new entry to the list */
763 kref_init(&info->refcount);
764 info->is_active = 1;
765
766 cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, ses->iface_count);
767 cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
768 cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
769 le32_to_cpu(p->Capability));
770
771 spin_lock(&ses->iface_lock);
772 if (!list_entry_is_head(iface, &ses->iface_list, iface_head)) {
773 list_add_tail(&info->iface_head, &iface->iface_head);
774 kref_put(&iface->refcount, release_iface);
775 } else
776 list_add_tail(&info->iface_head, &ses->iface_list);
777
778 ses->iface_count++;
779 spin_unlock(&ses->iface_lock);
780 next_iface:
781 nb_iface++;
782 next = le32_to_cpu(p->Next);
783 if (!next) {
784 bytes_left -= sizeof(*p);
785 break;
786 }
787 /* Validate that Next doesn't point beyond the buffer */
788 if (next > bytes_left) {
789 cifs_dbg(VFS, "%s: invalid Next pointer %zu > %zd\n",
790 __func__, next, bytes_left);
791 rc = -EINVAL;
792 goto out;
793 }
794 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
795 bytes_left -= next;
796 }
797
798 if (!nb_iface) {
799 cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
800 rc = -EINVAL;
801 goto out;
802 }
803
804 /* Azure rounds the buffer size up 8, to a 16 byte boundary */
805 if ((bytes_left > 8) ||
806 (bytes_left >= offsetof(struct network_interface_info_ioctl_rsp, Next)
807 + sizeof(p->Next) && p->Next))
808 cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
809
810 out:
811 /*
812 * Go through the list again and put the inactive entries
813 */
814 spin_lock(&ses->iface_lock);
815 list_for_each_entry_safe(iface, niface, &ses->iface_list,
816 iface_head) {
817 if (!iface->is_active) {
818 list_del(&iface->iface_head);
819 kref_put(&iface->refcount, release_iface);
820 ses->iface_count--;
821 }
822 }
823 spin_unlock(&ses->iface_lock);
824
825 return rc;
826 }
827
828 int
SMB3_request_interfaces(const unsigned int xid,struct cifs_tcon * tcon,bool in_mount)829 SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_mount)
830 {
831 int rc;
832 unsigned int ret_data_len = 0;
833 struct network_interface_info_ioctl_rsp *out_buf = NULL;
834 struct cifs_ses *ses = tcon->ses;
835 struct TCP_Server_Info *pserver;
836
837 /* do not query too frequently */
838 spin_lock(&ses->iface_lock);
839 if (ses->iface_last_update &&
840 time_before(jiffies, ses->iface_last_update +
841 (SMB_INTERFACE_POLL_INTERVAL * HZ))) {
842 spin_unlock(&ses->iface_lock);
843 return 0;
844 }
845
846 ses->iface_last_update = jiffies;
847
848 spin_unlock(&ses->iface_lock);
849
850 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
851 FSCTL_QUERY_NETWORK_INTERFACE_INFO,
852 NULL /* no data input */, 0 /* no data input */,
853 CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
854 if (rc == -EOPNOTSUPP) {
855 cifs_dbg(FYI,
856 "server does not support query network interfaces\n");
857 ret_data_len = 0;
858 } else if (rc != 0) {
859 cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
860 goto out;
861 }
862
863 rc = parse_server_interfaces(out_buf, ret_data_len, ses, in_mount);
864 if (rc)
865 goto out;
866
867 /* check if iface is still active */
868 spin_lock(&ses->chan_lock);
869 pserver = ses->chans[0].server;
870 if (pserver && !cifs_chan_is_iface_active(ses, pserver)) {
871 spin_unlock(&ses->chan_lock);
872 cifs_chan_update_iface(ses, pserver);
873 spin_lock(&ses->chan_lock);
874 }
875 spin_unlock(&ses->chan_lock);
876
877 out:
878 kfree(out_buf);
879 return rc;
880 }
881
882 static void
smb3_qfs_tcon(const unsigned int xid,struct cifs_tcon * tcon,struct cifs_sb_info * cifs_sb)883 smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
884 struct cifs_sb_info *cifs_sb)
885 {
886 int rc;
887 __le16 srch_path = 0; /* Null - open root of share */
888 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
889 struct cifs_open_parms oparms;
890 struct cifs_fid fid;
891 struct cached_fid *cfid = NULL;
892
893 oparms = (struct cifs_open_parms) {
894 .tcon = tcon,
895 .path = "",
896 .desired_access = FILE_READ_ATTRIBUTES,
897 .disposition = FILE_OPEN,
898 .create_options = cifs_create_options(cifs_sb, 0),
899 .fid = &fid,
900 };
901
902 rc = open_cached_dir(xid, tcon, "", cifs_sb, false, &cfid);
903 if (rc == 0)
904 memcpy(&fid, &cfid->fid, sizeof(struct cifs_fid));
905 else
906 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
907 NULL, NULL);
908 if (rc)
909 return;
910
911 SMB3_request_interfaces(xid, tcon, true /* called during mount */);
912
913 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
914 FS_ATTRIBUTE_INFORMATION);
915 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
916 FS_DEVICE_INFORMATION);
917 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
918 FS_VOLUME_INFORMATION);
919 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
920 FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
921 if (cfid == NULL)
922 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
923 else
924 close_cached_dir(cfid);
925 }
926
927 static void
smb2_qfs_tcon(const unsigned int xid,struct cifs_tcon * tcon,struct cifs_sb_info * cifs_sb)928 smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
929 struct cifs_sb_info *cifs_sb)
930 {
931 int rc;
932 __le16 srch_path = 0; /* Null - open root of share */
933 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
934 struct cifs_open_parms oparms;
935 struct cifs_fid fid;
936
937 oparms = (struct cifs_open_parms) {
938 .tcon = tcon,
939 .path = "",
940 .desired_access = FILE_READ_ATTRIBUTES,
941 .disposition = FILE_OPEN,
942 .create_options = cifs_create_options(cifs_sb, 0),
943 .fid = &fid,
944 };
945
946 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
947 NULL, NULL);
948 if (rc)
949 return;
950
951 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
952 FS_ATTRIBUTE_INFORMATION);
953 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
954 FS_DEVICE_INFORMATION);
955 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
956 }
957
958 static int
smb2_is_path_accessible(const unsigned int xid,struct cifs_tcon * tcon,struct cifs_sb_info * cifs_sb,const char * full_path)959 smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
960 struct cifs_sb_info *cifs_sb, const char *full_path)
961 {
962 __le16 *utf16_path;
963 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
964 int err_buftype = CIFS_NO_BUFFER;
965 struct cifs_open_parms oparms;
966 struct kvec err_iov = {};
967 struct cifs_fid fid;
968 struct cached_fid *cfid;
969 bool islink;
970 int rc, rc2;
971
972 rc = open_cached_dir(xid, tcon, full_path, cifs_sb, true, &cfid);
973 if (!rc) {
974 close_cached_dir(cfid);
975 return 0;
976 }
977
978 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
979 if (!utf16_path)
980 return -ENOMEM;
981
982 oparms = (struct cifs_open_parms) {
983 .tcon = tcon,
984 .path = full_path,
985 .desired_access = FILE_READ_ATTRIBUTES,
986 .disposition = FILE_OPEN,
987 .create_options = cifs_create_options(cifs_sb, 0),
988 .fid = &fid,
989 };
990
991 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
992 &err_iov, &err_buftype);
993 if (rc) {
994 struct smb2_hdr *hdr = err_iov.iov_base;
995
996 if (unlikely(!hdr || err_buftype == CIFS_NO_BUFFER))
997 goto out;
998
999 if (rc != -EREMOTE && hdr->Status == STATUS_OBJECT_NAME_INVALID) {
1000 rc2 = cifs_inval_name_dfs_link_error(xid, tcon, cifs_sb,
1001 full_path, &islink);
1002 if (rc2) {
1003 rc = rc2;
1004 goto out;
1005 }
1006 if (islink)
1007 rc = -EREMOTE;
1008 }
1009 if (rc == -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) &&
1010 (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NO_DFS))
1011 rc = -EOPNOTSUPP;
1012 goto out;
1013 }
1014
1015 rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
1016
1017 out:
1018 free_rsp_buf(err_buftype, err_iov.iov_base);
1019 kfree(utf16_path);
1020 return rc;
1021 }
1022
smb2_get_srv_inum(const unsigned int xid,struct cifs_tcon * tcon,struct cifs_sb_info * cifs_sb,const char * full_path,u64 * uniqueid,struct cifs_open_info_data * data)1023 static int smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
1024 struct cifs_sb_info *cifs_sb, const char *full_path,
1025 u64 *uniqueid, struct cifs_open_info_data *data)
1026 {
1027 *uniqueid = le64_to_cpu(data->fi.IndexNumber);
1028 return 0;
1029 }
1030
smb2_query_file_info(const unsigned int xid,struct cifs_tcon * tcon,struct cifsFileInfo * cfile,struct cifs_open_info_data * data)1031 static int smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
1032 struct cifsFileInfo *cfile, struct cifs_open_info_data *data)
1033 {
1034 struct cifs_fid *fid = &cfile->fid;
1035
1036 if (cfile->symlink_target) {
1037 data->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
1038 if (!data->symlink_target)
1039 return -ENOMEM;
1040 }
1041 data->contains_posix_file_info = false;
1042 return SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid, &data->fi);
1043 }
1044
1045 #ifdef CONFIG_CIFS_XATTR
1046 static ssize_t
move_smb2_ea_to_cifs(char * dst,size_t dst_size,struct smb2_file_full_ea_info * src,size_t src_size,const unsigned char * ea_name)1047 move_smb2_ea_to_cifs(char *dst, size_t dst_size,
1048 struct smb2_file_full_ea_info *src, size_t src_size,
1049 const unsigned char *ea_name)
1050 {
1051 int rc = 0;
1052 unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
1053 char *name, *value;
1054 size_t buf_size = dst_size;
1055 size_t name_len, value_len, user_name_len;
1056
1057 while (src_size > 0) {
1058 name_len = (size_t)src->ea_name_length;
1059 value_len = (size_t)le16_to_cpu(src->ea_value_length);
1060
1061 if (name_len == 0)
1062 break;
1063
1064 if (src_size < 8 + name_len + 1 + value_len) {
1065 cifs_dbg(FYI, "EA entry goes beyond length of list\n");
1066 rc = smb_EIO2(smb_eio_trace_ea_overrun,
1067 src_size, 8 + name_len + 1 + value_len);
1068 goto out;
1069 }
1070
1071 name = &src->ea_data[0];
1072 value = &src->ea_data[src->ea_name_length + 1];
1073
1074 if (ea_name) {
1075 if (ea_name_len == name_len &&
1076 memcmp(ea_name, name, name_len) == 0) {
1077 rc = value_len;
1078 if (dst_size == 0)
1079 goto out;
1080 if (dst_size < value_len) {
1081 rc = -ERANGE;
1082 goto out;
1083 }
1084 memcpy(dst, value, value_len);
1085 goto out;
1086 }
1087 } else {
1088 /* 'user.' plus a terminating null */
1089 user_name_len = 5 + 1 + name_len;
1090
1091 if (buf_size == 0) {
1092 /* skip copy - calc size only */
1093 rc += user_name_len;
1094 } else if (dst_size >= user_name_len) {
1095 dst_size -= user_name_len;
1096 memcpy(dst, "user.", 5);
1097 dst += 5;
1098 memcpy(dst, src->ea_data, name_len);
1099 dst += name_len;
1100 *dst = 0;
1101 ++dst;
1102 rc += user_name_len;
1103 } else {
1104 /* stop before overrun buffer */
1105 rc = -ERANGE;
1106 break;
1107 }
1108 }
1109
1110 if (!src->next_entry_offset)
1111 break;
1112
1113 if (src_size < le32_to_cpu(src->next_entry_offset)) {
1114 /* stop before overrun buffer */
1115 rc = -ERANGE;
1116 break;
1117 }
1118 src_size -= le32_to_cpu(src->next_entry_offset);
1119 src = (void *)((char *)src +
1120 le32_to_cpu(src->next_entry_offset));
1121 }
1122
1123 /* didn't find the named attribute */
1124 if (ea_name)
1125 rc = -ENODATA;
1126
1127 out:
1128 return (ssize_t)rc;
1129 }
1130
1131 static ssize_t
smb2_query_eas(const unsigned int xid,struct cifs_tcon * tcon,const unsigned char * path,const unsigned char * ea_name,char * ea_data,size_t buf_size,struct cifs_sb_info * cifs_sb)1132 smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
1133 const unsigned char *path, const unsigned char *ea_name,
1134 char *ea_data, size_t buf_size,
1135 struct cifs_sb_info *cifs_sb)
1136 {
1137 int rc;
1138 struct kvec rsp_iov = {NULL, 0};
1139 int buftype = CIFS_NO_BUFFER;
1140 struct smb2_query_info_rsp *rsp;
1141 struct smb2_file_full_ea_info *info = NULL;
1142
1143 rc = smb2_query_info_compound(xid, tcon, path,
1144 FILE_READ_EA,
1145 FILE_FULL_EA_INFORMATION,
1146 SMB2_O_INFO_FILE,
1147 CIFSMaxBufSize -
1148 MAX_SMB2_CREATE_RESPONSE_SIZE -
1149 MAX_SMB2_CLOSE_RESPONSE_SIZE,
1150 &rsp_iov, &buftype, cifs_sb);
1151 if (rc) {
1152 /*
1153 * If ea_name is NULL (listxattr) and there are no EAs,
1154 * return 0 as it's not an error. Otherwise, the specified
1155 * ea_name was not found.
1156 */
1157 if (!ea_name && rc == -ENODATA)
1158 rc = 0;
1159 goto qeas_exit;
1160 }
1161
1162 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
1163 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
1164 le32_to_cpu(rsp->OutputBufferLength),
1165 &rsp_iov,
1166 sizeof(struct smb2_file_full_ea_info));
1167 if (rc)
1168 goto qeas_exit;
1169
1170 info = (struct smb2_file_full_ea_info *)(
1171 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
1172 rc = move_smb2_ea_to_cifs(ea_data, buf_size, info,
1173 le32_to_cpu(rsp->OutputBufferLength), ea_name);
1174
1175 qeas_exit:
1176 free_rsp_buf(buftype, rsp_iov.iov_base);
1177 return rc;
1178 }
1179
1180 static int
smb2_set_ea(const unsigned int xid,struct cifs_tcon * tcon,const char * path,const char * ea_name,const void * ea_value,const __u16 ea_value_len,const struct nls_table * nls_codepage,struct cifs_sb_info * cifs_sb)1181 smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
1182 const char *path, const char *ea_name, const void *ea_value,
1183 const __u16 ea_value_len, const struct nls_table *nls_codepage,
1184 struct cifs_sb_info *cifs_sb)
1185 {
1186 struct smb2_compound_vars *vars;
1187 struct cifs_ses *ses = tcon->ses;
1188 struct TCP_Server_Info *server;
1189 struct smb_rqst *rqst;
1190 struct kvec *rsp_iov;
1191 __le16 *utf16_path = NULL;
1192 int ea_name_len = strlen(ea_name);
1193 int flags = CIFS_CP_CREATE_CLOSE_OP;
1194 int len;
1195 int resp_buftype[3];
1196 struct cifs_open_parms oparms;
1197 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1198 struct cifs_fid fid;
1199 unsigned int size[1];
1200 void *data[1];
1201 struct smb2_file_full_ea_info *ea;
1202 struct smb2_query_info_rsp *rsp;
1203 int rc, used_len = 0;
1204 int retries = 0, cur_sleep = 0;
1205
1206 replay_again:
1207 /* reinitialize for possible replay */
1208 used_len = 0;
1209 flags = CIFS_CP_CREATE_CLOSE_OP;
1210 oplock = SMB2_OPLOCK_LEVEL_NONE;
1211 server = cifs_pick_channel(ses);
1212
1213 if (smb3_encryption_required(tcon))
1214 flags |= CIFS_TRANSFORM_REQ;
1215
1216 if (ea_name_len > 255)
1217 return -EINVAL;
1218
1219 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1220 if (!utf16_path)
1221 return -ENOMEM;
1222
1223 ea = NULL;
1224 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1225 vars = kzalloc_obj(*vars);
1226 if (!vars) {
1227 rc = -ENOMEM;
1228 goto out_free_path;
1229 }
1230 rqst = vars->rqst;
1231 rsp_iov = vars->rsp_iov;
1232
1233 if (ses->server->ops->query_all_EAs) {
1234 if (!ea_value) {
1235 rc = ses->server->ops->query_all_EAs(xid, tcon, path,
1236 ea_name, NULL, 0,
1237 cifs_sb);
1238 if (rc == -ENODATA)
1239 goto sea_exit;
1240 } else {
1241 /* If we are adding a attribute we should first check
1242 * if there will be enough space available to store
1243 * the new EA. If not we should not add it since we
1244 * would not be able to even read the EAs back.
1245 */
1246 rc = smb2_query_info_compound(xid, tcon, path,
1247 FILE_READ_EA,
1248 FILE_FULL_EA_INFORMATION,
1249 SMB2_O_INFO_FILE,
1250 CIFSMaxBufSize -
1251 MAX_SMB2_CREATE_RESPONSE_SIZE -
1252 MAX_SMB2_CLOSE_RESPONSE_SIZE,
1253 &rsp_iov[1], &resp_buftype[1], cifs_sb);
1254 if (rc == 0) {
1255 rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1256 used_len = le32_to_cpu(rsp->OutputBufferLength);
1257 }
1258 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1259 resp_buftype[1] = CIFS_NO_BUFFER;
1260 memset(&rsp_iov[1], 0, sizeof(rsp_iov[1]));
1261 rc = 0;
1262
1263 /* Use a fudge factor of 256 bytes in case we collide
1264 * with a different set_EAs command.
1265 */
1266 if (CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
1267 MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 <
1268 used_len + ea_name_len + ea_value_len + 1) {
1269 rc = -ENOSPC;
1270 goto sea_exit;
1271 }
1272 }
1273 }
1274
1275 /* Open */
1276 rqst[0].rq_iov = vars->open_iov;
1277 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
1278
1279 oparms = (struct cifs_open_parms) {
1280 .tcon = tcon,
1281 .path = path,
1282 .desired_access = FILE_WRITE_EA,
1283 .disposition = FILE_OPEN,
1284 .create_options = cifs_create_options(cifs_sb, 0),
1285 .fid = &fid,
1286 .replay = !!(retries),
1287 };
1288
1289 rc = SMB2_open_init(tcon, server,
1290 &rqst[0], &oplock, &oparms, utf16_path);
1291 if (rc)
1292 goto sea_exit;
1293 smb2_set_next_command(tcon, &rqst[0]);
1294
1295
1296 /* Set Info */
1297 rqst[1].rq_iov = vars->si_iov;
1298 rqst[1].rq_nvec = 1;
1299
1300 len = sizeof(*ea) + ea_name_len + ea_value_len + 1;
1301 ea = kzalloc(len, GFP_KERNEL);
1302 if (ea == NULL) {
1303 rc = -ENOMEM;
1304 goto sea_exit;
1305 }
1306
1307 ea->ea_name_length = ea_name_len;
1308 ea->ea_value_length = cpu_to_le16(ea_value_len);
1309 memcpy(ea->ea_data, ea_name, ea_name_len + 1);
1310 memcpy(ea->ea_data + ea_name_len + 1, ea_value, ea_value_len);
1311
1312 size[0] = len;
1313 data[0] = ea;
1314
1315 rc = SMB2_set_info_init(tcon, server,
1316 &rqst[1], COMPOUND_FID,
1317 COMPOUND_FID, current->tgid,
1318 FILE_FULL_EA_INFORMATION,
1319 SMB2_O_INFO_FILE, 0, data, size);
1320 if (rc)
1321 goto sea_exit;
1322 smb2_set_next_command(tcon, &rqst[1]);
1323 smb2_set_related(&rqst[1]);
1324
1325 /* Close */
1326 rqst[2].rq_iov = &vars->close_iov;
1327 rqst[2].rq_nvec = 1;
1328 rc = SMB2_close_init(tcon, server,
1329 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
1330 if (rc)
1331 goto sea_exit;
1332 smb2_set_related(&rqst[2]);
1333
1334 if (retries) {
1335 /* Back-off before retry */
1336 if (cur_sleep)
1337 msleep(cur_sleep);
1338 smb2_set_replay(server, &rqst[0]);
1339 smb2_set_replay(server, &rqst[1]);
1340 smb2_set_replay(server, &rqst[2]);
1341 }
1342
1343 rc = compound_send_recv(xid, ses, server,
1344 flags, 3, rqst,
1345 resp_buftype, rsp_iov);
1346 /* no need to bump num_remote_opens because handle immediately closed */
1347
1348 sea_exit:
1349 kfree(ea);
1350 SMB2_open_free(&rqst[0]);
1351 SMB2_set_info_free(&rqst[1]);
1352 SMB2_close_free(&rqst[2]);
1353 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1354 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1355 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
1356 kfree(vars);
1357 out_free_path:
1358 kfree(utf16_path);
1359
1360 if (is_replayable_error(rc) &&
1361 smb2_should_replay(tcon, &retries, &cur_sleep))
1362 goto replay_again;
1363
1364 return rc;
1365 }
1366 #endif
1367
1368 static bool
smb2_can_echo(struct TCP_Server_Info * server)1369 smb2_can_echo(struct TCP_Server_Info *server)
1370 {
1371 return server->echoes;
1372 }
1373
1374 static void
smb2_clear_stats(struct cifs_tcon * tcon)1375 smb2_clear_stats(struct cifs_tcon *tcon)
1376 {
1377 int i;
1378
1379 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
1380 atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
1381 atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
1382 }
1383 }
1384
1385 static void
smb2_dump_share_caps(struct seq_file * m,struct cifs_tcon * tcon)1386 smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
1387 {
1388 seq_puts(m, "\n\tShare Capabilities:");
1389 if (tcon->capabilities & SMB2_SHARE_CAP_DFS)
1390 seq_puts(m, " DFS,");
1391 if (tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
1392 seq_puts(m, " CONTINUOUS AVAILABILITY,");
1393 if (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT)
1394 seq_puts(m, " SCALEOUT,");
1395 if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER)
1396 seq_puts(m, " CLUSTER,");
1397 if (tcon->capabilities & SMB2_SHARE_CAP_ASYMMETRIC)
1398 seq_puts(m, " ASYMMETRIC,");
1399 if (tcon->capabilities == 0)
1400 seq_puts(m, " None");
1401 if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE)
1402 seq_puts(m, " Aligned,");
1403 if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE)
1404 seq_puts(m, " Partition Aligned,");
1405 if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY)
1406 seq_puts(m, " SSD,");
1407 if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED)
1408 seq_puts(m, " TRIM-support,");
1409
1410 seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags);
1411 seq_printf(m, "\n\ttid: 0x%x", tcon->tid);
1412 if (tcon->perf_sector_size)
1413 seq_printf(m, "\tOptimal sector size: 0x%x",
1414 tcon->perf_sector_size);
1415 seq_printf(m, "\tMaximal Access: 0x%x", tcon->maximal_access);
1416 }
1417
1418 static void
smb2_print_stats(struct seq_file * m,struct cifs_tcon * tcon)1419 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
1420 {
1421 atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
1422 atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
1423
1424 /*
1425 * Can't display SMB2_NEGOTIATE, SESSION_SETUP, LOGOFF, CANCEL and ECHO
1426 * totals (requests sent) since those SMBs are per-session not per tcon
1427 */
1428 seq_printf(m, "\nBytes read: %llu Bytes written: %llu",
1429 (long long)(tcon->bytes_read),
1430 (long long)(tcon->bytes_written));
1431 seq_printf(m, "\nOpen files: %d total (local), %d open on server",
1432 atomic_read(&tcon->num_local_opens),
1433 atomic_read(&tcon->num_remote_opens));
1434 seq_printf(m, "\nTreeConnects: %d total %d failed",
1435 atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
1436 atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
1437 seq_printf(m, "\nTreeDisconnects: %d total %d failed",
1438 atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
1439 atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
1440 seq_printf(m, "\nCreates: %d total %d failed",
1441 atomic_read(&sent[SMB2_CREATE_HE]),
1442 atomic_read(&failed[SMB2_CREATE_HE]));
1443 seq_printf(m, "\nCloses: %d total %d failed",
1444 atomic_read(&sent[SMB2_CLOSE_HE]),
1445 atomic_read(&failed[SMB2_CLOSE_HE]));
1446 seq_printf(m, "\nFlushes: %d total %d failed",
1447 atomic_read(&sent[SMB2_FLUSH_HE]),
1448 atomic_read(&failed[SMB2_FLUSH_HE]));
1449 seq_printf(m, "\nReads: %d total %d failed",
1450 atomic_read(&sent[SMB2_READ_HE]),
1451 atomic_read(&failed[SMB2_READ_HE]));
1452 seq_printf(m, "\nWrites: %d total %d failed",
1453 atomic_read(&sent[SMB2_WRITE_HE]),
1454 atomic_read(&failed[SMB2_WRITE_HE]));
1455 seq_printf(m, "\nLocks: %d total %d failed",
1456 atomic_read(&sent[SMB2_LOCK_HE]),
1457 atomic_read(&failed[SMB2_LOCK_HE]));
1458 seq_printf(m, "\nIOCTLs: %d total %d failed",
1459 atomic_read(&sent[SMB2_IOCTL_HE]),
1460 atomic_read(&failed[SMB2_IOCTL_HE]));
1461 seq_printf(m, "\nQueryDirectories: %d total %d failed",
1462 atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
1463 atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
1464 seq_printf(m, "\nChangeNotifies: %d total %d failed",
1465 atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
1466 atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
1467 seq_printf(m, "\nQueryInfos: %d total %d failed",
1468 atomic_read(&sent[SMB2_QUERY_INFO_HE]),
1469 atomic_read(&failed[SMB2_QUERY_INFO_HE]));
1470 seq_printf(m, "\nSetInfos: %d total %d failed",
1471 atomic_read(&sent[SMB2_SET_INFO_HE]),
1472 atomic_read(&failed[SMB2_SET_INFO_HE]));
1473 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
1474 atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
1475 atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
1476 }
1477
1478 static void
smb2_set_fid(struct cifsFileInfo * cfile,struct cifs_fid * fid,__u32 oplock)1479 smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
1480 {
1481 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1482 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1483
1484 lockdep_assert_held(&cinode->open_file_lock);
1485
1486 cfile->fid.persistent_fid = fid->persistent_fid;
1487 cfile->fid.volatile_fid = fid->volatile_fid;
1488 cfile->fid.access = fid->access;
1489 #ifdef CONFIG_CIFS_DEBUG2
1490 cfile->fid.mid = fid->mid;
1491 #endif /* CIFS_DEBUG2 */
1492 server->ops->set_oplock_level(cinode, oplock, fid->epoch,
1493 &fid->purge_cache);
1494 cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
1495 memcpy(cfile->fid.create_guid, fid->create_guid, 16);
1496 }
1497
1498 static int
smb2_close_file(const unsigned int xid,struct cifs_tcon * tcon,struct cifs_fid * fid)1499 smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
1500 struct cifs_fid *fid)
1501 {
1502 return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
1503 }
1504
1505 static int
smb2_close_getattr(const unsigned int xid,struct cifs_tcon * tcon,struct cifsFileInfo * cfile)1506 smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
1507 struct cifsFileInfo *cfile)
1508 {
1509 struct smb2_file_network_open_info file_inf;
1510 struct inode *inode;
1511 u64 asize;
1512 int rc;
1513
1514 rc = __SMB2_close(xid, tcon, cfile->fid.persistent_fid,
1515 cfile->fid.volatile_fid, &file_inf);
1516 if (rc)
1517 return rc;
1518
1519 inode = d_inode(cfile->dentry);
1520
1521 spin_lock(&inode->i_lock);
1522 CIFS_I(inode)->time = jiffies;
1523
1524 /* Creation time should not need to be updated on close */
1525 if (file_inf.LastWriteTime)
1526 inode_set_mtime_to_ts(inode,
1527 cifs_NTtimeToUnix(file_inf.LastWriteTime));
1528 if (file_inf.ChangeTime)
1529 inode_set_ctime_to_ts(inode,
1530 cifs_NTtimeToUnix(file_inf.ChangeTime));
1531 if (file_inf.LastAccessTime)
1532 inode_set_atime_to_ts(inode,
1533 cifs_NTtimeToUnix(file_inf.LastAccessTime));
1534
1535 asize = le64_to_cpu(file_inf.AllocationSize);
1536 if (asize > 4096)
1537 inode->i_blocks = CIFS_INO_BLOCKS(asize);
1538
1539 /* End of file and Attributes should not have to be updated on close */
1540 spin_unlock(&inode->i_lock);
1541 return rc;
1542 }
1543
1544 static int
SMB2_request_res_key(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,struct copychunk_ioctl_req * pcchunk)1545 SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
1546 u64 persistent_fid, u64 volatile_fid,
1547 struct copychunk_ioctl_req *pcchunk)
1548 {
1549 int rc;
1550 unsigned int ret_data_len;
1551 struct resume_key_ioctl_rsp *res_key;
1552
1553 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
1554 FSCTL_SRV_REQUEST_RESUME_KEY, NULL, 0 /* no input */,
1555 CIFSMaxBufSize, (char **)&res_key, &ret_data_len);
1556
1557 if (rc == -EOPNOTSUPP) {
1558 pr_warn_once("Server share %s does not support copy range\n", tcon->tree_name);
1559 goto req_res_key_exit;
1560 } else if (rc) {
1561 cifs_tcon_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
1562 goto req_res_key_exit;
1563 }
1564 if (ret_data_len < sizeof(struct resume_key_ioctl_rsp)) {
1565 cifs_tcon_dbg(VFS, "Invalid refcopy resume key length\n");
1566 rc = -EINVAL;
1567 goto req_res_key_exit;
1568 }
1569 memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE);
1570
1571 req_res_key_exit:
1572 kfree(res_key);
1573 return rc;
1574 }
1575
1576 static int
smb2_ioctl_query_info(const unsigned int xid,struct cifs_tcon * tcon,struct cifs_sb_info * cifs_sb,__le16 * path,int is_dir,unsigned long p)1577 smb2_ioctl_query_info(const unsigned int xid,
1578 struct cifs_tcon *tcon,
1579 struct cifs_sb_info *cifs_sb,
1580 __le16 *path, int is_dir,
1581 unsigned long p)
1582 {
1583 struct smb2_compound_vars *vars;
1584 struct smb_rqst *rqst;
1585 struct kvec *rsp_iov;
1586 struct cifs_ses *ses = tcon->ses;
1587 struct TCP_Server_Info *server;
1588 char __user *arg = (char __user *)p;
1589 struct smb_query_info qi;
1590 struct smb_query_info __user *pqi;
1591 int rc = 0;
1592 int flags = CIFS_CP_CREATE_CLOSE_OP;
1593 struct smb2_query_info_rsp *qi_rsp = NULL;
1594 struct smb2_ioctl_rsp *io_rsp = NULL;
1595 void *buffer = NULL;
1596 int resp_buftype[3];
1597 struct cifs_open_parms oparms;
1598 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1599 struct cifs_fid fid;
1600 unsigned int size[2];
1601 void *data[2];
1602 int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
1603 void (*free_req1_func)(struct smb_rqst *r);
1604 int retries = 0, cur_sleep = 0;
1605
1606 replay_again:
1607 /* reinitialize for possible replay */
1608 buffer = NULL;
1609 flags = CIFS_CP_CREATE_CLOSE_OP;
1610 oplock = SMB2_OPLOCK_LEVEL_NONE;
1611 server = cifs_pick_channel(ses);
1612
1613 vars = kzalloc_obj(*vars, GFP_ATOMIC);
1614 if (vars == NULL)
1615 return -ENOMEM;
1616 rqst = &vars->rqst[0];
1617 rsp_iov = &vars->rsp_iov[0];
1618
1619 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1620
1621 if (copy_from_user(&qi, arg, sizeof(struct smb_query_info))) {
1622 rc = -EFAULT;
1623 goto free_vars;
1624 }
1625 if (qi.output_buffer_length > 1024) {
1626 rc = -EINVAL;
1627 goto free_vars;
1628 }
1629
1630 if (!ses || !server) {
1631 rc = smb_EIO(smb_eio_trace_null_pointers);
1632 goto free_vars;
1633 }
1634
1635 if (smb3_encryption_required(tcon))
1636 flags |= CIFS_TRANSFORM_REQ;
1637
1638 if (qi.output_buffer_length) {
1639 buffer = memdup_user(arg + sizeof(struct smb_query_info), qi.output_buffer_length);
1640 if (IS_ERR(buffer)) {
1641 rc = PTR_ERR(buffer);
1642 goto free_vars;
1643 }
1644 }
1645
1646 /* Open */
1647 rqst[0].rq_iov = &vars->open_iov[0];
1648 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
1649
1650 oparms = (struct cifs_open_parms) {
1651 .tcon = tcon,
1652 .disposition = FILE_OPEN,
1653 .create_options = cifs_create_options(cifs_sb, create_options),
1654 .fid = &fid,
1655 .replay = !!(retries),
1656 };
1657
1658 if (qi.flags & PASSTHRU_FSCTL) {
1659 switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) {
1660 case FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS:
1661 oparms.desired_access = FILE_READ_DATA | FILE_WRITE_DATA | FILE_READ_ATTRIBUTES | SYNCHRONIZE;
1662 break;
1663 case FSCTL_DEVICE_ACCESS_FILE_ANY_ACCESS:
1664 oparms.desired_access = GENERIC_ALL;
1665 break;
1666 case FSCTL_DEVICE_ACCESS_FILE_READ_ACCESS:
1667 oparms.desired_access = GENERIC_READ;
1668 break;
1669 case FSCTL_DEVICE_ACCESS_FILE_WRITE_ACCESS:
1670 oparms.desired_access = GENERIC_WRITE;
1671 break;
1672 }
1673 } else if (qi.flags & PASSTHRU_SET_INFO) {
1674 oparms.desired_access = GENERIC_WRITE;
1675 } else {
1676 oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
1677 }
1678
1679 rc = SMB2_open_init(tcon, server,
1680 &rqst[0], &oplock, &oparms, path);
1681 if (rc)
1682 goto free_output_buffer;
1683 smb2_set_next_command(tcon, &rqst[0]);
1684
1685 /* Query */
1686 if (qi.flags & PASSTHRU_FSCTL) {
1687 /* Can eventually relax perm check since server enforces too */
1688 if (!capable(CAP_SYS_ADMIN)) {
1689 rc = -EPERM;
1690 goto free_open_req;
1691 }
1692 rqst[1].rq_iov = &vars->io_iov[0];
1693 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
1694
1695 rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID,
1696 qi.info_type, buffer, qi.output_buffer_length,
1697 CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
1698 MAX_SMB2_CLOSE_RESPONSE_SIZE);
1699 free_req1_func = SMB2_ioctl_free;
1700 } else if (qi.flags == PASSTHRU_SET_INFO) {
1701 /* Can eventually relax perm check since server enforces too */
1702 if (!capable(CAP_SYS_ADMIN)) {
1703 rc = -EPERM;
1704 goto free_open_req;
1705 }
1706 if (qi.output_buffer_length < 8) {
1707 rc = -EINVAL;
1708 goto free_open_req;
1709 }
1710 rqst[1].rq_iov = vars->si_iov;
1711 rqst[1].rq_nvec = 1;
1712
1713 /* MS-FSCC 2.4.13 FileEndOfFileInformation */
1714 size[0] = 8;
1715 data[0] = buffer;
1716
1717 rc = SMB2_set_info_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID,
1718 current->tgid, FILE_END_OF_FILE_INFORMATION,
1719 SMB2_O_INFO_FILE, 0, data, size);
1720 free_req1_func = SMB2_set_info_free;
1721 } else if (qi.flags == PASSTHRU_QUERY_INFO) {
1722 rqst[1].rq_iov = &vars->qi_iov;
1723 rqst[1].rq_nvec = 1;
1724
1725 rc = SMB2_query_info_init(tcon, server,
1726 &rqst[1], COMPOUND_FID,
1727 COMPOUND_FID, qi.file_info_class,
1728 qi.info_type, qi.additional_information,
1729 qi.input_buffer_length,
1730 qi.output_buffer_length, buffer);
1731 free_req1_func = SMB2_query_info_free;
1732 } else { /* unknown flags */
1733 cifs_tcon_dbg(VFS, "Invalid passthru query flags: 0x%x\n",
1734 qi.flags);
1735 rc = -EINVAL;
1736 }
1737
1738 if (rc)
1739 goto free_open_req;
1740 smb2_set_next_command(tcon, &rqst[1]);
1741 smb2_set_related(&rqst[1]);
1742
1743 /* Close */
1744 rqst[2].rq_iov = &vars->close_iov;
1745 rqst[2].rq_nvec = 1;
1746
1747 rc = SMB2_close_init(tcon, server,
1748 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
1749 if (rc)
1750 goto free_req_1;
1751 smb2_set_related(&rqst[2]);
1752
1753 if (retries) {
1754 /* Back-off before retry */
1755 if (cur_sleep)
1756 msleep(cur_sleep);
1757 smb2_set_replay(server, &rqst[0]);
1758 smb2_set_replay(server, &rqst[1]);
1759 smb2_set_replay(server, &rqst[2]);
1760 }
1761
1762 rc = compound_send_recv(xid, ses, server,
1763 flags, 3, rqst,
1764 resp_buftype, rsp_iov);
1765 if (rc)
1766 goto out;
1767
1768 /* No need to bump num_remote_opens since handle immediately closed */
1769 if (qi.flags & PASSTHRU_FSCTL) {
1770 pqi = (struct smb_query_info __user *)arg;
1771 io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
1772 if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length)
1773 qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
1774 if (qi.input_buffer_length > 0 &&
1775 le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length
1776 > rsp_iov[1].iov_len) {
1777 rc = -EFAULT;
1778 goto out;
1779 }
1780
1781 if (copy_to_user(&pqi->input_buffer_length,
1782 &qi.input_buffer_length,
1783 sizeof(qi.input_buffer_length))) {
1784 rc = -EFAULT;
1785 goto out;
1786 }
1787
1788 if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
1789 (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
1790 qi.input_buffer_length))
1791 rc = -EFAULT;
1792 } else {
1793 pqi = (struct smb_query_info __user *)arg;
1794 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1795 if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length)
1796 qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
1797 if (qi.input_buffer_length > 0 &&
1798 struct_size(qi_rsp, Buffer, qi.input_buffer_length) >
1799 rsp_iov[1].iov_len) {
1800 rc = -EFAULT;
1801 goto out;
1802 }
1803 if (copy_to_user(&pqi->input_buffer_length,
1804 &qi.input_buffer_length,
1805 sizeof(qi.input_buffer_length))) {
1806 rc = -EFAULT;
1807 goto out;
1808 }
1809
1810 if (copy_to_user(pqi + 1, qi_rsp->Buffer,
1811 qi.input_buffer_length))
1812 rc = -EFAULT;
1813 }
1814
1815 out:
1816 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1817 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1818 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
1819 SMB2_close_free(&rqst[2]);
1820 free_req_1:
1821 free_req1_func(&rqst[1]);
1822 free_open_req:
1823 SMB2_open_free(&rqst[0]);
1824 free_output_buffer:
1825 kfree(buffer);
1826 free_vars:
1827 kfree(vars);
1828
1829 if (is_replayable_error(rc) &&
1830 smb2_should_replay(tcon, &retries, &cur_sleep))
1831 goto replay_again;
1832
1833 return rc;
1834 }
1835
1836 /**
1837 * calc_chunk_count - calculates the number chunks to be filled in the Chunks[]
1838 * array of struct copychunk_ioctl
1839 *
1840 * @tcon: destination file tcon
1841 * @bytes_left: how many bytes are left to copy
1842 *
1843 * Return: maximum number of chunks with which Chunks[] can be filled.
1844 */
1845 static inline u32
calc_chunk_count(struct cifs_tcon * tcon,u64 bytes_left)1846 calc_chunk_count(struct cifs_tcon *tcon, u64 bytes_left)
1847 {
1848 u32 max_chunks = READ_ONCE(tcon->max_chunks);
1849 u32 max_bytes_copy = READ_ONCE(tcon->max_bytes_copy);
1850 u32 max_bytes_chunk = READ_ONCE(tcon->max_bytes_chunk);
1851 u64 need;
1852 u32 allowed;
1853
1854 if (!max_bytes_chunk || !max_bytes_copy || !max_chunks)
1855 return 0;
1856
1857 /* chunks needed for the remaining bytes */
1858 need = DIV_ROUND_UP_ULL(bytes_left, max_bytes_chunk);
1859 /* chunks allowed per cc request */
1860 allowed = DIV_ROUND_UP(max_bytes_copy, max_bytes_chunk);
1861
1862 return (u32)umin(need, umin(max_chunks, allowed));
1863 }
1864
1865 /**
1866 * smb2_copychunk_range - server-side copy of data range
1867 *
1868 * @xid: transaction id
1869 * @src_file: source file
1870 * @dst_file: destination file
1871 * @src_off: source file byte offset
1872 * @len: number of bytes to copy
1873 * @dst_off: destination file byte offset
1874 *
1875 * Obtains a resume key for @src_file and issues FSCTL_SRV_COPYCHUNK_WRITE
1876 * IOCTLs, splitting the request into chunks limited by tcon->max_*.
1877 *
1878 * Return: @len on success; negative errno on failure.
1879 */
1880 static ssize_t
smb2_copychunk_range(const unsigned int xid,struct cifsFileInfo * src_file,struct cifsFileInfo * dst_file,u64 src_off,u64 len,u64 dst_off)1881 smb2_copychunk_range(const unsigned int xid,
1882 struct cifsFileInfo *src_file,
1883 struct cifsFileInfo *dst_file,
1884 u64 src_off,
1885 u64 len,
1886 u64 dst_off)
1887 {
1888 int rc = 0;
1889 unsigned int ret_data_len = 0;
1890 struct copychunk_ioctl_req *cc_req = NULL;
1891 struct copychunk_ioctl_rsp *cc_rsp = NULL;
1892 struct cifs_tcon *tcon;
1893 struct srv_copychunk *chunk;
1894 u32 chunks, chunk_count, chunk_bytes;
1895 u32 copy_bytes, copy_bytes_left;
1896 u32 chunks_written, bytes_written;
1897 u64 total_bytes_left = len;
1898 u64 src_off_prev, dst_off_prev;
1899 u32 retries = 0;
1900
1901 tcon = tlink_tcon(dst_file->tlink);
1902
1903 trace_smb3_copychunk_enter(xid, src_file->fid.volatile_fid,
1904 dst_file->fid.volatile_fid, tcon->tid,
1905 tcon->ses->Suid, src_off, dst_off, len);
1906
1907 retry:
1908 chunk_count = calc_chunk_count(tcon, total_bytes_left);
1909 if (!chunk_count) {
1910 rc = -EOPNOTSUPP;
1911 goto out;
1912 }
1913
1914 cc_req = kzalloc_flex(*cc_req, Chunks, chunk_count);
1915 if (!cc_req) {
1916 rc = -ENOMEM;
1917 goto out;
1918 }
1919
1920 /* Request a key from the server to identify the source of the copy */
1921 rc = SMB2_request_res_key(xid,
1922 tlink_tcon(src_file->tlink),
1923 src_file->fid.persistent_fid,
1924 src_file->fid.volatile_fid,
1925 cc_req);
1926
1927 /* Note: request_res_key sets res_key null only if rc != 0 */
1928 if (rc)
1929 goto out;
1930
1931 while (total_bytes_left > 0) {
1932
1933 /* Store previous offsets to allow rewind */
1934 src_off_prev = src_off;
1935 dst_off_prev = dst_off;
1936
1937 /*
1938 * __counted_by_le(ChunkCount): set to allocated chunks before
1939 * populating Chunks[]
1940 */
1941 cc_req->ChunkCount = cpu_to_le32(chunk_count);
1942
1943 chunks = 0;
1944 copy_bytes = 0;
1945 copy_bytes_left = umin(total_bytes_left, tcon->max_bytes_copy);
1946 while (copy_bytes_left > 0 && chunks < chunk_count) {
1947 chunk = &cc_req->Chunks[chunks++];
1948
1949 chunk->SourceOffset = cpu_to_le64(src_off);
1950 chunk->TargetOffset = cpu_to_le64(dst_off);
1951
1952 chunk_bytes = umin(copy_bytes_left, tcon->max_bytes_chunk);
1953
1954 chunk->Length = cpu_to_le32(chunk_bytes);
1955 /* Buffer is zeroed, no need to set chunk->Reserved = 0 */
1956
1957 src_off += chunk_bytes;
1958 dst_off += chunk_bytes;
1959
1960 copy_bytes_left -= chunk_bytes;
1961 copy_bytes += chunk_bytes;
1962 }
1963
1964 cc_req->ChunkCount = cpu_to_le32(chunks);
1965 /* Buffer is zeroed, no need to set cc_req->Reserved = 0 */
1966
1967 /* Request server copy to target from src identified by key */
1968 kfree(cc_rsp);
1969 cc_rsp = NULL;
1970 rc = SMB2_ioctl(xid, tcon, dst_file->fid.persistent_fid,
1971 dst_file->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
1972 (char *)cc_req, struct_size(cc_req, Chunks, chunks),
1973 CIFSMaxBufSize, (char **)&cc_rsp, &ret_data_len);
1974
1975 if (rc && rc != -EINVAL)
1976 goto out;
1977
1978 if (unlikely(ret_data_len != sizeof(*cc_rsp))) {
1979 cifs_tcon_dbg(VFS, "Copychunk invalid response: size %u/%zu\n",
1980 ret_data_len, sizeof(*cc_rsp));
1981 rc = smb_EIO1(smb_eio_trace_copychunk_inv_rsp, ret_data_len);
1982 goto out;
1983 }
1984
1985 bytes_written = le32_to_cpu(cc_rsp->TotalBytesWritten);
1986 chunks_written = le32_to_cpu(cc_rsp->ChunksWritten);
1987 chunk_bytes = le32_to_cpu(cc_rsp->ChunkBytesWritten);
1988
1989 if (rc == 0) {
1990 /* Check if server claimed to write more than we asked */
1991 if (unlikely(!bytes_written || bytes_written > copy_bytes)) {
1992 cifs_tcon_dbg(VFS, "Copychunk invalid response: bytes written %u/%u\n",
1993 bytes_written, copy_bytes);
1994 rc = smb_EIO2(smb_eio_trace_copychunk_overcopy_b,
1995 bytes_written, copy_bytes);
1996 goto out;
1997 }
1998 if (unlikely(!chunks_written || chunks_written > chunks)) {
1999 cifs_tcon_dbg(VFS, "Copychunk invalid response: chunks written %u/%u\n",
2000 chunks_written, chunks);
2001 rc = smb_EIO2(smb_eio_trace_copychunk_overcopy_c,
2002 chunks_written, chunks);
2003 goto out;
2004 }
2005
2006 /* Partial write: rewind */
2007 if (bytes_written < copy_bytes) {
2008 u32 delta = copy_bytes - bytes_written;
2009
2010 src_off -= delta;
2011 dst_off -= delta;
2012 }
2013
2014 total_bytes_left -= bytes_written;
2015 continue;
2016 }
2017
2018 /*
2019 * Check if server is not asking us to reduce size.
2020 *
2021 * Note: As per MS-SMB2 2.2.32.1, the values returned
2022 * in cc_rsp are not strictly lower than what existed
2023 * before.
2024 */
2025 if (bytes_written < tcon->max_bytes_copy) {
2026 cifs_tcon_dbg(FYI, "Copychunk MaxBytesCopy updated: %u -> %u\n",
2027 tcon->max_bytes_copy, bytes_written);
2028 tcon->max_bytes_copy = bytes_written;
2029 }
2030
2031 if (chunks_written < tcon->max_chunks) {
2032 cifs_tcon_dbg(FYI, "Copychunk MaxChunks updated: %u -> %u\n",
2033 tcon->max_chunks, chunks_written);
2034 tcon->max_chunks = chunks_written;
2035 }
2036
2037 if (chunk_bytes < tcon->max_bytes_chunk) {
2038 cifs_tcon_dbg(FYI, "Copychunk MaxBytesChunk updated: %u -> %u\n",
2039 tcon->max_bytes_chunk, chunk_bytes);
2040 tcon->max_bytes_chunk = chunk_bytes;
2041 }
2042
2043 /* reset to last offsets */
2044 if (retries++ < 2) {
2045 src_off = src_off_prev;
2046 dst_off = dst_off_prev;
2047 kfree(cc_req);
2048 cc_req = NULL;
2049 goto retry;
2050 }
2051
2052 break;
2053 }
2054
2055 out:
2056 kfree(cc_req);
2057 kfree(cc_rsp);
2058 if (rc) {
2059 trace_smb3_copychunk_err(xid, src_file->fid.volatile_fid,
2060 dst_file->fid.volatile_fid, tcon->tid,
2061 tcon->ses->Suid, src_off, dst_off, len, rc);
2062 return rc;
2063 } else {
2064 trace_smb3_copychunk_done(xid, src_file->fid.volatile_fid,
2065 dst_file->fid.volatile_fid, tcon->tid,
2066 tcon->ses->Suid, src_off, dst_off, len);
2067 return len;
2068 }
2069 }
2070
2071 static int
smb2_flush_file(const unsigned int xid,struct cifs_tcon * tcon,struct cifs_fid * fid)2072 smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
2073 struct cifs_fid *fid)
2074 {
2075 return SMB2_flush(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2076 }
2077
2078 static unsigned int
smb2_read_data_offset(char * buf)2079 smb2_read_data_offset(char *buf)
2080 {
2081 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
2082
2083 return rsp->DataOffset;
2084 }
2085
2086 static unsigned int
smb2_read_data_length(char * buf,bool in_remaining)2087 smb2_read_data_length(char *buf, bool in_remaining)
2088 {
2089 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
2090
2091 if (in_remaining)
2092 return le32_to_cpu(rsp->DataRemaining);
2093
2094 return le32_to_cpu(rsp->DataLength);
2095 }
2096
2097
2098 static int
smb2_sync_read(const unsigned int xid,struct cifs_fid * pfid,struct cifs_io_parms * parms,unsigned int * bytes_read,char ** buf,int * buf_type)2099 smb2_sync_read(const unsigned int xid, struct cifs_fid *pfid,
2100 struct cifs_io_parms *parms, unsigned int *bytes_read,
2101 char **buf, int *buf_type)
2102 {
2103 parms->persistent_fid = pfid->persistent_fid;
2104 parms->volatile_fid = pfid->volatile_fid;
2105 return SMB2_read(xid, parms, bytes_read, buf, buf_type);
2106 }
2107
2108 static int
smb2_sync_write(const unsigned int xid,struct cifs_fid * pfid,struct cifs_io_parms * parms,unsigned int * written,struct kvec * iov,unsigned long nr_segs)2109 smb2_sync_write(const unsigned int xid, struct cifs_fid *pfid,
2110 struct cifs_io_parms *parms, unsigned int *written,
2111 struct kvec *iov, unsigned long nr_segs)
2112 {
2113
2114 parms->persistent_fid = pfid->persistent_fid;
2115 parms->volatile_fid = pfid->volatile_fid;
2116 return SMB2_write(xid, parms, written, iov, nr_segs);
2117 }
2118
2119 /* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */
smb2_set_sparse(const unsigned int xid,struct cifs_tcon * tcon,struct cifsFileInfo * cfile,struct inode * inode,__u8 setsparse)2120 static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
2121 struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse)
2122 {
2123 struct cifsInodeInfo *cifsi;
2124 int rc;
2125
2126 cifsi = CIFS_I(inode);
2127
2128 /* if file already sparse don't bother setting sparse again */
2129 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse)
2130 return true; /* already sparse */
2131
2132 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse)
2133 return true; /* already not sparse */
2134
2135 /*
2136 * Can't check for sparse support on share the usual way via the
2137 * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share
2138 * since Samba server doesn't set the flag on the share, yet
2139 * supports the set sparse FSCTL and returns sparse correctly
2140 * in the file attributes. If we fail setting sparse though we
2141 * mark that server does not support sparse files for this share
2142 * to avoid repeatedly sending the unsupported fsctl to server
2143 * if the file is repeatedly extended.
2144 */
2145 if (tcon->broken_sparse_sup)
2146 return false;
2147
2148 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2149 cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
2150 &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
2151 if (rc) {
2152 tcon->broken_sparse_sup = true;
2153 cifs_dbg(FYI, "set sparse rc = %d\n", rc);
2154 return false;
2155 }
2156
2157 if (setsparse)
2158 cifsi->cifsAttrs |= FILE_ATTRIBUTE_SPARSE_FILE;
2159 else
2160 cifsi->cifsAttrs &= (~FILE_ATTRIBUTE_SPARSE_FILE);
2161
2162 return true;
2163 }
2164
2165 static int
smb2_set_file_size(const unsigned int xid,struct cifs_tcon * tcon,struct cifsFileInfo * cfile,__u64 size,bool set_alloc)2166 smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
2167 struct cifsFileInfo *cfile, __u64 size, bool set_alloc)
2168 {
2169 struct inode *inode;
2170
2171 /*
2172 * If extending file more than one page make sparse. Many Linux fs
2173 * make files sparse by default when extending via ftruncate
2174 */
2175 inode = d_inode(cfile->dentry);
2176
2177 if (!set_alloc && (size > inode->i_size + 8192)) {
2178 __u8 set_sparse = 1;
2179
2180 /* whether set sparse succeeds or not, extend the file */
2181 smb2_set_sparse(xid, tcon, cfile, inode, set_sparse);
2182 }
2183
2184 return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
2185 cfile->fid.volatile_fid, cfile->pid, size);
2186 }
2187
2188 static int
smb2_duplicate_extents(const unsigned int xid,struct cifsFileInfo * srcfile,struct cifsFileInfo * trgtfile,u64 src_off,u64 len,u64 dest_off)2189 smb2_duplicate_extents(const unsigned int xid,
2190 struct cifsFileInfo *srcfile,
2191 struct cifsFileInfo *trgtfile, u64 src_off,
2192 u64 len, u64 dest_off)
2193 {
2194 int rc;
2195 unsigned int ret_data_len;
2196 struct inode *inode;
2197 struct duplicate_extents_to_file dup_ext_buf;
2198 struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
2199
2200 /* server fileays advertise duplicate extent support with this flag */
2201 if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) &
2202 FILE_SUPPORTS_BLOCK_REFCOUNTING) == 0)
2203 return -EOPNOTSUPP;
2204
2205 dup_ext_buf.VolatileFileHandle = srcfile->fid.volatile_fid;
2206 dup_ext_buf.PersistentFileHandle = srcfile->fid.persistent_fid;
2207 dup_ext_buf.SourceFileOffset = cpu_to_le64(src_off);
2208 dup_ext_buf.TargetFileOffset = cpu_to_le64(dest_off);
2209 dup_ext_buf.ByteCount = cpu_to_le64(len);
2210 cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n",
2211 src_off, dest_off, len);
2212 trace_smb3_clone_enter(xid, srcfile->fid.volatile_fid,
2213 trgtfile->fid.volatile_fid, tcon->tid,
2214 tcon->ses->Suid, src_off, dest_off, len);
2215 inode = d_inode(trgtfile->dentry);
2216 if (inode->i_size < dest_off + len) {
2217 rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
2218 if (rc)
2219 goto duplicate_extents_out;
2220 netfs_resize_file(netfs_inode(inode), dest_off + len, true);
2221 cifs_setsize(inode, dest_off + len);
2222 }
2223 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
2224 trgtfile->fid.volatile_fid,
2225 FSCTL_DUPLICATE_EXTENTS_TO_FILE,
2226 (char *)&dup_ext_buf,
2227 sizeof(struct duplicate_extents_to_file),
2228 CIFSMaxBufSize, NULL,
2229 &ret_data_len);
2230
2231 if (ret_data_len > 0)
2232 cifs_dbg(FYI, "Non-zero response length in duplicate extents\n");
2233
2234 duplicate_extents_out:
2235 if (rc)
2236 trace_smb3_clone_err(xid, srcfile->fid.volatile_fid,
2237 trgtfile->fid.volatile_fid,
2238 tcon->tid, tcon->ses->Suid, src_off,
2239 dest_off, len, rc);
2240 else
2241 trace_smb3_clone_done(xid, srcfile->fid.volatile_fid,
2242 trgtfile->fid.volatile_fid, tcon->tid,
2243 tcon->ses->Suid, src_off, dest_off, len);
2244 return rc;
2245 }
2246
2247 static int
smb2_set_compression(const unsigned int xid,struct cifs_tcon * tcon,struct cifsFileInfo * cfile)2248 smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
2249 struct cifsFileInfo *cfile)
2250 {
2251 return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid,
2252 cfile->fid.volatile_fid);
2253 }
2254
2255 static int
smb3_set_integrity(const unsigned int xid,struct cifs_tcon * tcon,struct cifsFileInfo * cfile)2256 smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
2257 struct cifsFileInfo *cfile)
2258 {
2259 struct fsctl_set_integrity_information_req integr_info;
2260 unsigned int ret_data_len;
2261
2262 integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
2263 integr_info.Flags = 0;
2264 integr_info.Reserved = 0;
2265
2266 return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2267 cfile->fid.volatile_fid,
2268 FSCTL_SET_INTEGRITY_INFORMATION,
2269 (char *)&integr_info,
2270 sizeof(struct fsctl_set_integrity_information_req),
2271 CIFSMaxBufSize, NULL,
2272 &ret_data_len);
2273
2274 }
2275
2276 /* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
2277 #define GMT_TOKEN_SIZE 50
2278
2279 #define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
2280
2281 /*
2282 * Input buffer contains (empty) struct smb_snapshot array with size filled in
2283 * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
2284 */
2285 static int
smb3_enum_snapshots(const unsigned int xid,struct cifs_tcon * tcon,struct cifsFileInfo * cfile,void __user * ioc_buf)2286 smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
2287 struct cifsFileInfo *cfile, void __user *ioc_buf)
2288 {
2289 char *retbuf = NULL;
2290 unsigned int ret_data_len = 0;
2291 int rc;
2292 u32 max_response_size;
2293 struct smb_snapshot_array snapshot_in;
2294
2295 /*
2296 * On the first query to enumerate the list of snapshots available
2297 * for this volume the buffer begins with 0 (number of snapshots
2298 * which can be returned is zero since at that point we do not know
2299 * how big the buffer needs to be). On the second query,
2300 * it (ret_data_len) is set to number of snapshots so we can
2301 * know to set the maximum response size larger (see below).
2302 */
2303 if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
2304 return -EFAULT;
2305
2306 /*
2307 * Note that for snapshot queries that servers like Azure expect that
2308 * the first query be minimal size (and just used to get the number/size
2309 * of previous versions) so response size must be specified as EXACTLY
2310 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
2311 * of eight bytes.
2312 */
2313 if (ret_data_len == 0)
2314 max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
2315 else
2316 max_response_size = CIFSMaxBufSize;
2317
2318 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2319 cfile->fid.volatile_fid,
2320 FSCTL_SRV_ENUMERATE_SNAPSHOTS,
2321 NULL, 0 /* no input data */, max_response_size,
2322 (char **)&retbuf,
2323 &ret_data_len);
2324 cifs_dbg(FYI, "enum snapshots ioctl returned %d and ret buflen is %d\n",
2325 rc, ret_data_len);
2326 if (rc)
2327 return rc;
2328
2329 if (ret_data_len && (ioc_buf != NULL) && (retbuf != NULL)) {
2330 /* Fixup buffer */
2331 if (copy_from_user(&snapshot_in, ioc_buf,
2332 sizeof(struct smb_snapshot_array))) {
2333 rc = -EFAULT;
2334 kfree(retbuf);
2335 return rc;
2336 }
2337
2338 /*
2339 * Check for min size, ie not large enough to fit even one GMT
2340 * token (snapshot). On the first ioctl some users may pass in
2341 * smaller size (or zero) to simply get the size of the array
2342 * so the user space caller can allocate sufficient memory
2343 * and retry the ioctl again with larger array size sufficient
2344 * to hold all of the snapshot GMT tokens on the second try.
2345 */
2346 if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
2347 ret_data_len = sizeof(struct smb_snapshot_array);
2348
2349 /*
2350 * We return struct SRV_SNAPSHOT_ARRAY, followed by
2351 * the snapshot array (of 50 byte GMT tokens) each
2352 * representing an available previous version of the data
2353 */
2354 if (ret_data_len > (snapshot_in.snapshot_array_size +
2355 sizeof(struct smb_snapshot_array)))
2356 ret_data_len = snapshot_in.snapshot_array_size +
2357 sizeof(struct smb_snapshot_array);
2358
2359 if (copy_to_user(ioc_buf, retbuf, ret_data_len))
2360 rc = -EFAULT;
2361 }
2362
2363 kfree(retbuf);
2364 return rc;
2365 }
2366
2367
2368
2369 static int
smb3_notify(const unsigned int xid,struct file * pfile,void __user * ioc_buf,bool return_changes)2370 smb3_notify(const unsigned int xid, struct file *pfile,
2371 void __user *ioc_buf, bool return_changes)
2372 {
2373 struct smb3_notify_info notify;
2374 struct smb3_notify_info __user *pnotify_buf;
2375 struct dentry *dentry = pfile->f_path.dentry;
2376 struct inode *inode = file_inode(pfile);
2377 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2378 struct cifs_open_parms oparms;
2379 struct cifs_fid fid;
2380 struct cifs_tcon *tcon;
2381 const unsigned char *path;
2382 char *returned_ioctl_info = NULL;
2383 void *page = alloc_dentry_path();
2384 __le16 *utf16_path = NULL;
2385 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2386 int rc = 0;
2387 __u32 ret_len = 0;
2388
2389 path = build_path_from_dentry(dentry, page);
2390 if (IS_ERR(path)) {
2391 rc = PTR_ERR(path);
2392 goto notify_exit;
2393 }
2394
2395 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
2396 if (utf16_path == NULL) {
2397 rc = -ENOMEM;
2398 goto notify_exit;
2399 }
2400
2401 if (return_changes) {
2402 if (copy_from_user(¬ify, ioc_buf, sizeof(struct smb3_notify_info))) {
2403 rc = -EFAULT;
2404 goto notify_exit;
2405 }
2406 } else {
2407 if (copy_from_user(¬ify, ioc_buf, sizeof(struct smb3_notify))) {
2408 rc = -EFAULT;
2409 goto notify_exit;
2410 }
2411 notify.data_len = 0;
2412 }
2413
2414 tcon = cifs_sb_master_tcon(cifs_sb);
2415 oparms = (struct cifs_open_parms) {
2416 .tcon = tcon,
2417 .path = path,
2418 .desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA,
2419 .disposition = FILE_OPEN,
2420 .create_options = cifs_create_options(cifs_sb, 0),
2421 .fid = &fid,
2422 };
2423
2424 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
2425 NULL);
2426 if (rc)
2427 goto notify_exit;
2428
2429 rc = SMB2_change_notify(xid, tcon, fid.persistent_fid, fid.volatile_fid,
2430 notify.watch_tree, notify.completion_filter,
2431 notify.data_len, &returned_ioctl_info, &ret_len);
2432
2433 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2434
2435 cifs_dbg(FYI, "change notify for path %s rc %d\n", path, rc);
2436 if (return_changes && (ret_len > 0) && (notify.data_len > 0)) {
2437 if (ret_len > notify.data_len)
2438 ret_len = notify.data_len;
2439 pnotify_buf = (struct smb3_notify_info __user *)ioc_buf;
2440 if (copy_to_user(pnotify_buf->notify_data, returned_ioctl_info, ret_len))
2441 rc = -EFAULT;
2442 else if (copy_to_user(&pnotify_buf->data_len, &ret_len, sizeof(ret_len)))
2443 rc = -EFAULT;
2444 }
2445 kfree(returned_ioctl_info);
2446 notify_exit:
2447 free_dentry_path(page);
2448 kfree(utf16_path);
2449 return rc;
2450 }
2451
2452 static int
smb2_query_dir_first(const unsigned int xid,struct cifs_tcon * tcon,const char * path,struct cifs_sb_info * cifs_sb,struct cifs_fid * fid,__u16 search_flags,struct cifs_search_info * srch_inf)2453 smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
2454 const char *path, struct cifs_sb_info *cifs_sb,
2455 struct cifs_fid *fid, __u16 search_flags,
2456 struct cifs_search_info *srch_inf)
2457 {
2458 __le16 *utf16_path;
2459 struct smb_rqst rqst[2];
2460 struct kvec rsp_iov[2];
2461 int resp_buftype[2];
2462 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
2463 struct kvec qd_iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
2464 int rc, flags = 0;
2465 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2466 struct cifs_open_parms oparms;
2467 struct smb2_query_directory_rsp *qd_rsp = NULL;
2468 struct smb2_create_rsp *op_rsp = NULL;
2469 struct TCP_Server_Info *server;
2470 int retries = 0, cur_sleep = 0;
2471
2472 replay_again:
2473 /* reinitialize for possible replay */
2474 flags = 0;
2475 oplock = SMB2_OPLOCK_LEVEL_NONE;
2476 server = cifs_pick_channel(tcon->ses);
2477
2478 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
2479 if (!utf16_path)
2480 return -ENOMEM;
2481
2482 if (smb3_encryption_required(tcon))
2483 flags |= CIFS_TRANSFORM_REQ;
2484
2485 memset(rqst, 0, sizeof(rqst));
2486 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
2487 memset(rsp_iov, 0, sizeof(rsp_iov));
2488
2489 /* Open */
2490 memset(&open_iov, 0, sizeof(open_iov));
2491 rqst[0].rq_iov = open_iov;
2492 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
2493
2494 oparms = (struct cifs_open_parms) {
2495 .tcon = tcon,
2496 .path = path,
2497 .desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA,
2498 .disposition = FILE_OPEN,
2499 .create_options = cifs_create_options(cifs_sb, 0),
2500 .fid = fid,
2501 .replay = !!(retries),
2502 };
2503
2504 rc = SMB2_open_init(tcon, server,
2505 &rqst[0], &oplock, &oparms, utf16_path);
2506 if (rc)
2507 goto qdf_free;
2508 smb2_set_next_command(tcon, &rqst[0]);
2509
2510 /* Query directory */
2511 srch_inf->entries_in_buffer = 0;
2512 srch_inf->index_of_last_entry = 2;
2513
2514 memset(&qd_iov, 0, sizeof(qd_iov));
2515 rqst[1].rq_iov = qd_iov;
2516 rqst[1].rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
2517
2518 rc = SMB2_query_directory_init(xid, tcon, server,
2519 &rqst[1],
2520 COMPOUND_FID, COMPOUND_FID,
2521 0, srch_inf->info_level);
2522 if (rc)
2523 goto qdf_free;
2524
2525 smb2_set_related(&rqst[1]);
2526
2527 if (retries) {
2528 /* Back-off before retry */
2529 if (cur_sleep)
2530 msleep(cur_sleep);
2531 smb2_set_replay(server, &rqst[0]);
2532 smb2_set_replay(server, &rqst[1]);
2533 }
2534
2535 rc = compound_send_recv(xid, tcon->ses, server,
2536 flags, 2, rqst,
2537 resp_buftype, rsp_iov);
2538
2539 /* If the open failed there is nothing to do */
2540 op_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
2541 if (op_rsp == NULL || op_rsp->hdr.Status != STATUS_SUCCESS) {
2542 cifs_dbg(FYI, "query_dir_first: open failed rc=%d\n", rc);
2543 goto qdf_free;
2544 }
2545 fid->persistent_fid = op_rsp->PersistentFileId;
2546 fid->volatile_fid = op_rsp->VolatileFileId;
2547
2548 /* Anything else than ENODATA means a genuine error */
2549 if (rc && rc != -ENODATA) {
2550 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2551 cifs_dbg(FYI, "query_dir_first: query directory failed rc=%d\n", rc);
2552 trace_smb3_query_dir_err(xid, fid->persistent_fid,
2553 tcon->tid, tcon->ses->Suid, 0, 0, rc);
2554 goto qdf_free;
2555 }
2556
2557 atomic_inc(&tcon->num_remote_opens);
2558
2559 qd_rsp = (struct smb2_query_directory_rsp *)rsp_iov[1].iov_base;
2560 if (qd_rsp->hdr.Status == STATUS_NO_MORE_FILES) {
2561 trace_smb3_query_dir_done(xid, fid->persistent_fid,
2562 tcon->tid, tcon->ses->Suid, 0, 0);
2563 srch_inf->endOfSearch = true;
2564 rc = 0;
2565 goto qdf_free;
2566 }
2567
2568 rc = smb2_parse_query_directory(tcon, &rsp_iov[1], resp_buftype[1],
2569 srch_inf);
2570 if (rc) {
2571 trace_smb3_query_dir_err(xid, fid->persistent_fid, tcon->tid,
2572 tcon->ses->Suid, 0, 0, rc);
2573 goto qdf_free;
2574 }
2575 resp_buftype[1] = CIFS_NO_BUFFER;
2576
2577 trace_smb3_query_dir_done(xid, fid->persistent_fid, tcon->tid,
2578 tcon->ses->Suid, 0, srch_inf->entries_in_buffer);
2579
2580 qdf_free:
2581 kfree(utf16_path);
2582 SMB2_open_free(&rqst[0]);
2583 SMB2_query_directory_free(&rqst[1]);
2584 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2585 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
2586
2587 if (is_replayable_error(rc) &&
2588 smb2_should_replay(tcon, &retries, &cur_sleep))
2589 goto replay_again;
2590
2591 return rc;
2592 }
2593
2594 static int
smb2_query_dir_next(const unsigned int xid,struct cifs_tcon * tcon,struct cifs_fid * fid,__u16 search_flags,struct cifs_search_info * srch_inf)2595 smb2_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon,
2596 struct cifs_fid *fid, __u16 search_flags,
2597 struct cifs_search_info *srch_inf)
2598 {
2599 return SMB2_query_directory(xid, tcon, fid->persistent_fid,
2600 fid->volatile_fid, 0, srch_inf);
2601 }
2602
2603 static int
smb2_close_dir(const unsigned int xid,struct cifs_tcon * tcon,struct cifs_fid * fid)2604 smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
2605 struct cifs_fid *fid)
2606 {
2607 return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2608 }
2609
2610 /*
2611 * If we negotiate SMB2 protocol and get STATUS_PENDING - update
2612 * the number of credits and return true. Otherwise - return false.
2613 */
2614 static bool
smb2_is_status_pending(char * buf,struct TCP_Server_Info * server)2615 smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
2616 {
2617 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
2618 int scredits, in_flight;
2619
2620 if (shdr->Status != STATUS_PENDING)
2621 return false;
2622
2623 if (shdr->CreditRequest) {
2624 spin_lock(&server->req_lock);
2625 server->credits += le16_to_cpu(shdr->CreditRequest);
2626 scredits = server->credits;
2627 in_flight = server->in_flight;
2628 spin_unlock(&server->req_lock);
2629 wake_up(&server->request_q);
2630
2631 trace_smb3_pend_credits(server->current_mid,
2632 server->conn_id, server->hostname, scredits,
2633 le16_to_cpu(shdr->CreditRequest), in_flight);
2634 cifs_dbg(FYI, "%s: status pending add %u credits total=%d\n",
2635 __func__, le16_to_cpu(shdr->CreditRequest), scredits);
2636 }
2637
2638 return true;
2639 }
2640
2641 static bool
smb2_is_session_expired(char * buf)2642 smb2_is_session_expired(char *buf)
2643 {
2644 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
2645
2646 if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
2647 shdr->Status != STATUS_USER_SESSION_DELETED)
2648 return false;
2649
2650 trace_smb3_ses_expired(le32_to_cpu(shdr->Id.SyncId.TreeId),
2651 le64_to_cpu(shdr->SessionId),
2652 le16_to_cpu(shdr->Command),
2653 le64_to_cpu(shdr->MessageId));
2654 cifs_dbg(FYI, "Session expired or deleted\n");
2655
2656 return true;
2657 }
2658
2659 static bool
smb2_is_status_io_timeout(char * buf)2660 smb2_is_status_io_timeout(char *buf)
2661 {
2662 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
2663
2664 if (shdr->Status == STATUS_IO_TIMEOUT)
2665 return true;
2666 else
2667 return false;
2668 }
2669
2670 static bool
smb2_is_network_name_deleted(char * buf,struct TCP_Server_Info * server)2671 smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server)
2672 {
2673 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
2674 struct TCP_Server_Info *pserver;
2675 struct cifs_ses *ses;
2676 struct cifs_tcon *tcon;
2677
2678 if (shdr->Status != STATUS_NETWORK_NAME_DELETED)
2679 return false;
2680
2681 /* If server is a channel, select the primary channel */
2682 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
2683
2684 spin_lock(&cifs_tcp_ses_lock);
2685 list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
2686 if (cifs_ses_exiting(ses))
2687 continue;
2688 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
2689 if (tcon->tid == le32_to_cpu(shdr->Id.SyncId.TreeId)) {
2690 spin_lock(&tcon->tc_lock);
2691 tcon->need_reconnect = true;
2692 spin_unlock(&tcon->tc_lock);
2693 spin_unlock(&cifs_tcp_ses_lock);
2694 pr_warn_once("Server share %s deleted.\n",
2695 tcon->tree_name);
2696 return true;
2697 }
2698 }
2699 }
2700 spin_unlock(&cifs_tcp_ses_lock);
2701
2702 return false;
2703 }
2704
smb2_oplock_response(struct cifs_tcon * tcon,__u64 persistent_fid,__u64 volatile_fid,__u16 net_fid,struct cifsInodeInfo * cinode,unsigned int oplock)2705 static int smb2_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid,
2706 __u64 volatile_fid, __u16 net_fid,
2707 struct cifsInodeInfo *cinode, unsigned int oplock)
2708 {
2709 unsigned int sbflags = cifs_sb_flags(CIFS_SB(cinode));
2710 __u8 op;
2711
2712 if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
2713 return SMB2_lease_break(0, tcon, cinode->lease_key,
2714 smb2_get_lease_state(cinode, oplock));
2715
2716 op = !!((oplock & CIFS_CACHE_READ_FLG) || (sbflags & CIFS_MOUNT_RO_CACHE));
2717 return SMB2_oplock_break(0, tcon, persistent_fid, volatile_fid, op);
2718 }
2719
2720 void
smb2_set_replay(struct TCP_Server_Info * server,struct smb_rqst * rqst)2721 smb2_set_replay(struct TCP_Server_Info *server, struct smb_rqst *rqst)
2722 {
2723 struct smb2_hdr *shdr;
2724
2725 if (server->dialect < SMB30_PROT_ID)
2726 return;
2727
2728 shdr = (struct smb2_hdr *)(rqst->rq_iov[0].iov_base);
2729 if (shdr == NULL) {
2730 cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
2731 return;
2732 }
2733 shdr->Flags |= SMB2_FLAGS_REPLAY_OPERATION;
2734 }
2735
2736 void
smb2_set_related(struct smb_rqst * rqst)2737 smb2_set_related(struct smb_rqst *rqst)
2738 {
2739 struct smb2_hdr *shdr;
2740
2741 shdr = (struct smb2_hdr *)(rqst->rq_iov[0].iov_base);
2742 if (shdr == NULL) {
2743 cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
2744 return;
2745 }
2746 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
2747 }
2748
2749 char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
2750
2751 void
smb2_set_next_command(struct cifs_tcon * tcon,struct smb_rqst * rqst)2752 smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
2753 {
2754 struct smb2_hdr *shdr;
2755 struct cifs_ses *ses = tcon->ses;
2756 struct TCP_Server_Info *server = ses->server;
2757 unsigned long len = smb_rqst_len(server, rqst);
2758 int num_padding;
2759
2760 shdr = (struct smb2_hdr *)(rqst->rq_iov[0].iov_base);
2761 if (shdr == NULL) {
2762 cifs_dbg(FYI, "shdr NULL in smb2_set_next_command\n");
2763 return;
2764 }
2765
2766 /* SMB headers in a compound are 8 byte aligned. */
2767 if (IS_ALIGNED(len, 8))
2768 goto out;
2769
2770 num_padding = 8 - (len & 7);
2771 if (smb3_encryption_required(tcon)) {
2772 int i;
2773
2774 /*
2775 * Flatten request into a single buffer with required padding as
2776 * the encryption layer can't handle the padding iovs.
2777 */
2778 for (i = 1; i < rqst->rq_nvec; i++) {
2779 memcpy(rqst->rq_iov[0].iov_base +
2780 rqst->rq_iov[0].iov_len,
2781 rqst->rq_iov[i].iov_base,
2782 rqst->rq_iov[i].iov_len);
2783 rqst->rq_iov[0].iov_len += rqst->rq_iov[i].iov_len;
2784 }
2785 memset(rqst->rq_iov[0].iov_base + rqst->rq_iov[0].iov_len,
2786 0, num_padding);
2787 rqst->rq_iov[0].iov_len += num_padding;
2788 rqst->rq_nvec = 1;
2789 } else {
2790 rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
2791 rqst->rq_iov[rqst->rq_nvec].iov_len = num_padding;
2792 rqst->rq_nvec++;
2793 }
2794 len += num_padding;
2795 out:
2796 shdr->NextCommand = cpu_to_le32(len);
2797 }
2798
2799 /*
2800 * helper function for exponential backoff and check if replayable
2801 */
smb2_should_replay(struct cifs_tcon * tcon,int * pretries,int * pcur_sleep)2802 bool smb2_should_replay(struct cifs_tcon *tcon,
2803 int *pretries,
2804 int *pcur_sleep)
2805 {
2806 if (!pretries || !pcur_sleep)
2807 return false;
2808
2809 if (tcon->retry || (*pretries)++ < tcon->ses->server->retrans) {
2810 /* Update sleep time for exponential backoff */
2811 if (!(*pcur_sleep))
2812 (*pcur_sleep) = 1;
2813 else {
2814 (*pcur_sleep) = ((*pcur_sleep) << 1);
2815 if ((*pcur_sleep) > CIFS_MAX_SLEEP)
2816 (*pcur_sleep) = CIFS_MAX_SLEEP;
2817 }
2818 return true;
2819 }
2820
2821 return false;
2822 }
2823
2824 /*
2825 * Passes the query info response back to the caller on success.
2826 * Caller need to free this with free_rsp_buf().
2827 */
2828 int
smb2_query_info_compound(const unsigned int xid,struct cifs_tcon * tcon,const char * path,u32 desired_access,u32 class,u32 type,u32 output_len,struct kvec * rsp,int * buftype,struct cifs_sb_info * cifs_sb)2829 smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
2830 const char *path, u32 desired_access,
2831 u32 class, u32 type, u32 output_len,
2832 struct kvec *rsp, int *buftype,
2833 struct cifs_sb_info *cifs_sb)
2834 {
2835 struct smb2_compound_vars *vars;
2836 struct cifs_ses *ses = tcon->ses;
2837 struct TCP_Server_Info *server;
2838 int flags = CIFS_CP_CREATE_CLOSE_OP;
2839 struct smb_rqst *rqst;
2840 int resp_buftype[3];
2841 struct kvec *rsp_iov;
2842 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2843 struct cifs_open_parms oparms;
2844 struct cifs_fid fid;
2845 int rc;
2846 __le16 *utf16_path;
2847 struct cached_fid *cfid;
2848 int retries = 0, cur_sleep = 0;
2849
2850 replay_again:
2851 /* reinitialize for possible replay */
2852 cfid = NULL;
2853 flags = CIFS_CP_CREATE_CLOSE_OP;
2854 oplock = SMB2_OPLOCK_LEVEL_NONE;
2855 server = cifs_pick_channel(ses);
2856
2857 if (!path)
2858 path = "";
2859 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
2860 if (!utf16_path)
2861 return -ENOMEM;
2862
2863 if (smb3_encryption_required(tcon))
2864 flags |= CIFS_TRANSFORM_REQ;
2865
2866 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
2867 vars = kzalloc_obj(*vars);
2868 if (!vars) {
2869 rc = -ENOMEM;
2870 goto out_free_path;
2871 }
2872 rqst = vars->rqst;
2873 rsp_iov = vars->rsp_iov;
2874
2875 /*
2876 * We can only call this for things we know are directories.
2877 */
2878 if (!strcmp(path, ""))
2879 open_cached_dir(xid, tcon, path, cifs_sb, false,
2880 &cfid); /* cfid null if open dir failed */
2881
2882 rqst[0].rq_iov = vars->open_iov;
2883 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
2884
2885 oparms = (struct cifs_open_parms) {
2886 .tcon = tcon,
2887 .path = path,
2888 .desired_access = desired_access,
2889 .disposition = FILE_OPEN,
2890 .create_options = cifs_create_options(cifs_sb, 0),
2891 .fid = &fid,
2892 .replay = !!(retries),
2893 };
2894
2895 rc = SMB2_open_init(tcon, server,
2896 &rqst[0], &oplock, &oparms, utf16_path);
2897 if (rc)
2898 goto qic_exit;
2899 smb2_set_next_command(tcon, &rqst[0]);
2900
2901 rqst[1].rq_iov = &vars->qi_iov;
2902 rqst[1].rq_nvec = 1;
2903
2904 if (cfid) {
2905 rc = SMB2_query_info_init(tcon, server,
2906 &rqst[1],
2907 cfid->fid.persistent_fid,
2908 cfid->fid.volatile_fid,
2909 class, type, 0,
2910 output_len, 0,
2911 NULL);
2912 } else {
2913 rc = SMB2_query_info_init(tcon, server,
2914 &rqst[1],
2915 COMPOUND_FID,
2916 COMPOUND_FID,
2917 class, type, 0,
2918 output_len, 0,
2919 NULL);
2920 }
2921 if (rc)
2922 goto qic_exit;
2923 if (!cfid) {
2924 smb2_set_next_command(tcon, &rqst[1]);
2925 smb2_set_related(&rqst[1]);
2926 }
2927
2928 rqst[2].rq_iov = &vars->close_iov;
2929 rqst[2].rq_nvec = 1;
2930
2931 rc = SMB2_close_init(tcon, server,
2932 &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
2933 if (rc)
2934 goto qic_exit;
2935 smb2_set_related(&rqst[2]);
2936
2937 if (retries) {
2938 /* Back-off before retry */
2939 if (cur_sleep)
2940 msleep(cur_sleep);
2941 if (!cfid) {
2942 smb2_set_replay(server, &rqst[0]);
2943 smb2_set_replay(server, &rqst[2]);
2944 }
2945 smb2_set_replay(server, &rqst[1]);
2946 }
2947
2948 if (cfid) {
2949 rc = compound_send_recv(xid, ses, server,
2950 flags, 1, &rqst[1],
2951 &resp_buftype[1], &rsp_iov[1]);
2952 } else {
2953 rc = compound_send_recv(xid, ses, server,
2954 flags, 3, rqst,
2955 resp_buftype, rsp_iov);
2956 }
2957 if (rc) {
2958 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
2959 if (rc == -EREMCHG) {
2960 tcon->need_reconnect = true;
2961 pr_warn_once("server share %s deleted\n",
2962 tcon->tree_name);
2963 }
2964 goto qic_exit;
2965 }
2966 *rsp = rsp_iov[1];
2967 *buftype = resp_buftype[1];
2968
2969 qic_exit:
2970 SMB2_open_free(&rqst[0]);
2971 SMB2_query_info_free(&rqst[1]);
2972 SMB2_close_free(&rqst[2]);
2973 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2974 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
2975 if (cfid)
2976 close_cached_dir(cfid);
2977 kfree(vars);
2978 out_free_path:
2979 kfree(utf16_path);
2980
2981 if (is_replayable_error(rc) &&
2982 smb2_should_replay(tcon, &retries, &cur_sleep))
2983 goto replay_again;
2984
2985 return rc;
2986 }
2987
2988 static int
smb2_queryfs(const unsigned int xid,struct cifs_tcon * tcon,const char * path,struct cifs_sb_info * cifs_sb,struct kstatfs * buf)2989 smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2990 const char *path, struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
2991 {
2992 struct smb2_query_info_rsp *rsp;
2993 struct smb2_fs_full_size_info *info = NULL;
2994 struct kvec rsp_iov = {NULL, 0};
2995 int buftype = CIFS_NO_BUFFER;
2996 int rc;
2997
2998
2999 rc = smb2_query_info_compound(xid, tcon, path,
3000 FILE_READ_ATTRIBUTES,
3001 FS_FULL_SIZE_INFORMATION,
3002 SMB2_O_INFO_FILESYSTEM,
3003 sizeof(struct smb2_fs_full_size_info),
3004 &rsp_iov, &buftype, cifs_sb);
3005 if (rc)
3006 goto qfs_exit;
3007
3008 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
3009 buf->f_type = SMB2_SUPER_MAGIC;
3010 info = (struct smb2_fs_full_size_info *)(
3011 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
3012 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
3013 le32_to_cpu(rsp->OutputBufferLength),
3014 &rsp_iov,
3015 sizeof(struct smb2_fs_full_size_info));
3016 if (!rc)
3017 smb2_copy_fs_info_to_kstatfs(info, buf);
3018
3019 qfs_exit:
3020 trace_smb3_qfs_done(xid, tcon->tid, tcon->ses->Suid, tcon->tree_name, rc);
3021 free_rsp_buf(buftype, rsp_iov.iov_base);
3022 return rc;
3023 }
3024
3025 static int
smb311_queryfs(const unsigned int xid,struct cifs_tcon * tcon,const char * path,struct cifs_sb_info * cifs_sb,struct kstatfs * buf)3026 smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
3027 const char *path, struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
3028 {
3029 int rc;
3030 __le16 *utf16_path = NULL;
3031 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3032 struct cifs_open_parms oparms;
3033 struct cifs_fid fid;
3034
3035 if (!tcon->posix_extensions)
3036 return smb2_queryfs(xid, tcon, path, cifs_sb, buf);
3037
3038 oparms = (struct cifs_open_parms) {
3039 .tcon = tcon,
3040 .path = path,
3041 .desired_access = FILE_READ_ATTRIBUTES,
3042 .disposition = FILE_OPEN,
3043 .create_options = cifs_create_options(cifs_sb, 0),
3044 .fid = &fid,
3045 };
3046
3047 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
3048 if (utf16_path == NULL)
3049 return -ENOMEM;
3050
3051 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
3052 NULL, NULL);
3053 kfree(utf16_path);
3054 if (rc)
3055 return rc;
3056
3057 rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid,
3058 fid.volatile_fid, buf);
3059 buf->f_type = SMB2_SUPER_MAGIC;
3060 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3061 return rc;
3062 }
3063
3064 static bool
smb2_compare_fids(struct cifsFileInfo * ob1,struct cifsFileInfo * ob2)3065 smb2_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
3066 {
3067 return ob1->fid.persistent_fid == ob2->fid.persistent_fid &&
3068 ob1->fid.volatile_fid == ob2->fid.volatile_fid;
3069 }
3070
3071 static int
smb2_mand_lock(const unsigned int xid,struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u32 type,int lock,int unlock,bool wait)3072 smb2_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
3073 __u64 length, __u32 type, int lock, int unlock, bool wait)
3074 {
3075 if (unlock && !lock)
3076 type = SMB2_LOCKFLAG_UNLOCK;
3077 return SMB2_lock(xid, tlink_tcon(cfile->tlink),
3078 cfile->fid.persistent_fid, cfile->fid.volatile_fid,
3079 current->tgid, length, offset, type, wait);
3080 }
3081
3082 static void
smb2_get_lease_key(struct inode * inode,struct cifs_fid * fid)3083 smb2_get_lease_key(struct inode *inode, struct cifs_fid *fid)
3084 {
3085 memcpy(fid->lease_key, CIFS_I(inode)->lease_key, SMB2_LEASE_KEY_SIZE);
3086 }
3087
3088 static void
smb2_set_lease_key(struct inode * inode,struct cifs_fid * fid)3089 smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
3090 {
3091 memcpy(CIFS_I(inode)->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
3092 }
3093
3094 static void
smb2_new_lease_key(struct cifs_fid * fid)3095 smb2_new_lease_key(struct cifs_fid *fid)
3096 {
3097 generate_random_uuid(fid->lease_key);
3098 }
3099
3100 static int
smb2_get_dfs_refer(const unsigned int xid,struct cifs_ses * ses,const char * search_name,struct dfs_info3_param ** target_nodes,unsigned int * num_of_nodes,const struct nls_table * nls_codepage,int remap)3101 smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
3102 const char *search_name,
3103 struct dfs_info3_param **target_nodes,
3104 unsigned int *num_of_nodes,
3105 const struct nls_table *nls_codepage, int remap)
3106 {
3107 int rc;
3108 __le16 *utf16_path = NULL;
3109 int utf16_path_len = 0;
3110 struct cifs_tcon *tcon;
3111 struct fsctl_get_dfs_referral_req *dfs_req = NULL;
3112 struct get_dfs_referral_rsp *dfs_rsp = NULL;
3113 u32 dfs_req_size = 0, dfs_rsp_size = 0;
3114 int retry_once = 0;
3115
3116 cifs_dbg(FYI, "%s: path: %s\n", __func__, search_name);
3117
3118 /*
3119 * Try to use the IPC tcon, otherwise just use any
3120 */
3121 tcon = ses->tcon_ipc;
3122 if (tcon == NULL) {
3123 spin_lock(&cifs_tcp_ses_lock);
3124 tcon = list_first_entry_or_null(&ses->tcon_list,
3125 struct cifs_tcon,
3126 tcon_list);
3127 if (tcon) {
3128 spin_lock(&tcon->tc_lock);
3129 tcon->tc_count++;
3130 spin_unlock(&tcon->tc_lock);
3131 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
3132 netfs_trace_tcon_ref_get_dfs_refer);
3133 }
3134 spin_unlock(&cifs_tcp_ses_lock);
3135 }
3136
3137 if (tcon == NULL) {
3138 cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n",
3139 ses);
3140 rc = -ENOTCONN;
3141 goto out;
3142 }
3143
3144 utf16_path = cifs_strndup_to_utf16(search_name, PATH_MAX,
3145 &utf16_path_len,
3146 nls_codepage, remap);
3147 if (!utf16_path) {
3148 rc = -ENOMEM;
3149 goto out;
3150 }
3151
3152 dfs_req_size = sizeof(*dfs_req) + utf16_path_len;
3153 dfs_req = kzalloc(dfs_req_size, GFP_KERNEL);
3154 if (!dfs_req) {
3155 rc = -ENOMEM;
3156 goto out;
3157 }
3158
3159 /* Highest DFS referral version understood */
3160 dfs_req->MaxReferralLevel = DFS_VERSION;
3161
3162 /* Path to resolve in an UTF-16 null-terminated string */
3163 memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len);
3164
3165 for (;;) {
3166 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
3167 FSCTL_DFS_GET_REFERRALS,
3168 (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
3169 (char **)&dfs_rsp, &dfs_rsp_size);
3170 if (fatal_signal_pending(current)) {
3171 rc = -EINTR;
3172 break;
3173 }
3174 if (!is_retryable_error(rc) || retry_once++)
3175 break;
3176 usleep_range(512, 2048);
3177 }
3178
3179 if (!rc && !dfs_rsp)
3180 rc = smb_EIO(smb_eio_trace_dfsref_no_rsp);
3181 if (rc) {
3182 if (!is_retryable_error(rc) && rc != -ENOENT && rc != -EOPNOTSUPP)
3183 cifs_tcon_dbg(FYI, "%s: ioctl error: rc=%d\n", __func__, rc);
3184 goto out;
3185 }
3186
3187 rc = parse_dfs_referrals(dfs_rsp, dfs_rsp_size,
3188 num_of_nodes, target_nodes,
3189 nls_codepage, remap, search_name,
3190 true /* is_unicode */);
3191 if (rc && rc != -ENOENT) {
3192 cifs_tcon_dbg(VFS, "%s: failed to parse DFS referral %s: %d\n",
3193 __func__, search_name, rc);
3194 }
3195
3196 out:
3197 if (tcon && !tcon->ipc) {
3198 /* ipc tcons are not refcounted */
3199 cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_dfs_refer);
3200 }
3201 kfree(utf16_path);
3202 kfree(dfs_req);
3203 kfree(dfs_rsp);
3204 return rc;
3205 }
3206
3207 static struct smb_ntsd *
get_smb2_acl_by_fid(struct cifs_sb_info * cifs_sb,const struct cifs_fid * cifsfid,u32 * pacllen,u32 info)3208 get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
3209 const struct cifs_fid *cifsfid, u32 *pacllen, u32 info)
3210 {
3211 struct smb_ntsd *pntsd = NULL;
3212 unsigned int xid;
3213 int rc = -EOPNOTSUPP;
3214 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3215
3216 if (IS_ERR(tlink))
3217 return ERR_CAST(tlink);
3218
3219 xid = get_xid();
3220 cifs_dbg(FYI, "trying to get acl\n");
3221
3222 rc = SMB2_query_acl(xid, tlink_tcon(tlink), cifsfid->persistent_fid,
3223 cifsfid->volatile_fid, (void **)&pntsd, pacllen,
3224 info);
3225 free_xid(xid);
3226
3227 cifs_put_tlink(tlink);
3228
3229 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
3230 if (rc)
3231 return ERR_PTR(rc);
3232 return pntsd;
3233
3234 }
3235
3236 static struct smb_ntsd *
get_smb2_acl_by_path(struct cifs_sb_info * cifs_sb,const char * path,u32 * pacllen,u32 info)3237 get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
3238 const char *path, u32 *pacllen, u32 info)
3239 {
3240 struct smb_ntsd *pntsd = NULL;
3241 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3242 unsigned int xid;
3243 int rc;
3244 struct cifs_tcon *tcon;
3245 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3246 struct cifs_fid fid;
3247 struct cifs_open_parms oparms;
3248 __le16 *utf16_path;
3249
3250 cifs_dbg(FYI, "get smb3 acl for path %s\n", path);
3251 if (IS_ERR(tlink))
3252 return ERR_CAST(tlink);
3253
3254 tcon = tlink_tcon(tlink);
3255 xid = get_xid();
3256
3257 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
3258 if (!utf16_path) {
3259 rc = -ENOMEM;
3260 goto put_tlink;
3261 }
3262
3263 oparms = (struct cifs_open_parms) {
3264 .tcon = tcon,
3265 .path = path,
3266 .desired_access = READ_CONTROL,
3267 .disposition = FILE_OPEN,
3268 /*
3269 * When querying an ACL, even if the file is a symlink
3270 * we want to open the source not the target, and so
3271 * the protocol requires that the client specify this
3272 * flag when opening a reparse point
3273 */
3274 .create_options = cifs_create_options(cifs_sb, 0) |
3275 OPEN_REPARSE_POINT,
3276 .fid = &fid,
3277 };
3278
3279 if (info & SACL_SECINFO)
3280 oparms.desired_access |= SYSTEM_SECURITY;
3281
3282 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
3283 NULL);
3284 kfree(utf16_path);
3285 if (!rc) {
3286 rc = SMB2_query_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
3287 fid.volatile_fid, (void **)&pntsd, pacllen,
3288 info);
3289 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3290 }
3291
3292 put_tlink:
3293 cifs_put_tlink(tlink);
3294 free_xid(xid);
3295
3296 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
3297 if (rc)
3298 return ERR_PTR(rc);
3299 return pntsd;
3300 }
3301
3302 static int
set_smb2_acl(struct smb_ntsd * pnntsd,__u32 acllen,struct inode * inode,const char * path,int aclflag)3303 set_smb2_acl(struct smb_ntsd *pnntsd, __u32 acllen,
3304 struct inode *inode, const char *path, int aclflag)
3305 {
3306 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3307 unsigned int xid;
3308 int rc, access_flags = 0;
3309 struct cifs_tcon *tcon;
3310 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3311 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3312 struct cifs_fid fid;
3313 struct cifs_open_parms oparms;
3314 __le16 *utf16_path;
3315
3316 cifs_dbg(FYI, "set smb3 acl for path %s\n", path);
3317 if (IS_ERR(tlink))
3318 return PTR_ERR(tlink);
3319
3320 tcon = tlink_tcon(tlink);
3321 xid = get_xid();
3322
3323 if (aclflag & CIFS_ACL_OWNER || aclflag & CIFS_ACL_GROUP)
3324 access_flags |= WRITE_OWNER;
3325 if (aclflag & CIFS_ACL_SACL)
3326 access_flags |= SYSTEM_SECURITY;
3327 if (aclflag & CIFS_ACL_DACL)
3328 access_flags |= WRITE_DAC;
3329
3330 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
3331 if (!utf16_path) {
3332 rc = -ENOMEM;
3333 goto put_tlink;
3334 }
3335
3336 oparms = (struct cifs_open_parms) {
3337 .tcon = tcon,
3338 .desired_access = access_flags,
3339 .create_options = cifs_create_options(cifs_sb, 0),
3340 .disposition = FILE_OPEN,
3341 .path = path,
3342 .fid = &fid,
3343 };
3344
3345 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
3346 NULL, NULL);
3347 kfree(utf16_path);
3348 if (!rc) {
3349 rc = SMB2_set_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
3350 fid.volatile_fid, pnntsd, acllen, aclflag);
3351 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3352 }
3353
3354 put_tlink:
3355 cifs_put_tlink(tlink);
3356 free_xid(xid);
3357 return rc;
3358 }
3359
3360 /* Retrieve an ACL from the server */
3361 static struct smb_ntsd *
get_smb2_acl(struct cifs_sb_info * cifs_sb,struct inode * inode,const char * path,u32 * pacllen,u32 info)3362 get_smb2_acl(struct cifs_sb_info *cifs_sb,
3363 struct inode *inode, const char *path,
3364 u32 *pacllen, u32 info)
3365 {
3366 struct smb_ntsd *pntsd = NULL;
3367 struct cifsFileInfo *open_file = NULL;
3368
3369 if (inode && !(info & SACL_SECINFO))
3370 open_file = find_readable_file(CIFS_I(inode), FIND_FSUID_ONLY);
3371 if (!open_file || (info & SACL_SECINFO))
3372 return get_smb2_acl_by_path(cifs_sb, path, pacllen, info);
3373
3374 pntsd = get_smb2_acl_by_fid(cifs_sb, &open_file->fid, pacllen, info);
3375 cifsFileInfo_put(open_file);
3376 return pntsd;
3377 }
3378
smb3_zero_data(struct file * file,struct cifs_tcon * tcon,loff_t offset,loff_t len,unsigned int xid)3379 static long smb3_zero_data(struct file *file, struct cifs_tcon *tcon,
3380 loff_t offset, loff_t len, unsigned int xid)
3381 {
3382 struct cifsFileInfo *cfile = file->private_data;
3383 struct file_zero_data_information fsctl_buf;
3384
3385 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
3386
3387 fsctl_buf.FileOffset = cpu_to_le64(offset);
3388 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
3389
3390 return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3391 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
3392 (char *)&fsctl_buf,
3393 sizeof(struct file_zero_data_information),
3394 0, NULL, NULL);
3395 }
3396
smb3_zero_range(struct file * file,struct cifs_tcon * tcon,unsigned long long offset,unsigned long long len,bool keep_size)3397 static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
3398 unsigned long long offset, unsigned long long len,
3399 bool keep_size)
3400 {
3401 struct cifs_ses *ses = tcon->ses;
3402 struct inode *inode = file_inode(file);
3403 struct cifsInodeInfo *cifsi = CIFS_I(inode);
3404 struct cifsFileInfo *cfile = file->private_data;
3405 struct netfs_inode *ictx = netfs_inode(inode);
3406 unsigned long long i_size, new_size, remote_size;
3407 long rc;
3408 unsigned int xid;
3409
3410 xid = get_xid();
3411
3412 trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
3413 ses->Suid, offset, len);
3414
3415 filemap_invalidate_lock(inode->i_mapping);
3416
3417 i_size = i_size_read(inode);
3418 remote_size = ictx->remote_i_size;
3419 if (offset + len >= remote_size && offset < i_size) {
3420 unsigned long long top = umin(offset + len, i_size);
3421
3422 rc = filemap_write_and_wait_range(inode->i_mapping, offset, top - 1);
3423 if (rc < 0)
3424 goto zero_range_exit;
3425 }
3426
3427 /*
3428 * We zero the range through ioctl, so we need remove the page caches
3429 * first, otherwise the data may be inconsistent with the server.
3430 */
3431 truncate_pagecache_range(inode, offset, offset + len - 1);
3432 netfs_wait_for_outstanding_io(inode);
3433
3434 /* if file not oplocked can't be sure whether asking to extend size */
3435 rc = -EOPNOTSUPP;
3436 if (keep_size == false && !CIFS_CACHE_READ(cifsi))
3437 goto zero_range_exit;
3438
3439 rc = smb3_zero_data(file, tcon, offset, len, xid);
3440 if (rc < 0)
3441 goto zero_range_exit;
3442
3443 /*
3444 * do we also need to change the size of the file?
3445 */
3446 new_size = offset + len;
3447 if (keep_size == false && (unsigned long long)i_size_read(inode) < new_size) {
3448 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3449 cfile->fid.volatile_fid, cfile->pid, new_size);
3450 if (rc >= 0) {
3451 truncate_setsize(inode, new_size);
3452 netfs_resize_file(&cifsi->netfs, new_size, true);
3453 if (offset < cifsi->netfs.zero_point)
3454 cifsi->netfs.zero_point = offset;
3455 fscache_resize_cookie(cifs_inode_cookie(inode), new_size);
3456 }
3457 }
3458
3459 zero_range_exit:
3460 filemap_invalidate_unlock(inode->i_mapping);
3461 free_xid(xid);
3462 if (rc)
3463 trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
3464 ses->Suid, offset, len, rc);
3465 else
3466 trace_smb3_zero_done(xid, cfile->fid.persistent_fid, tcon->tid,
3467 ses->Suid, offset, len);
3468 return rc;
3469 }
3470
smb3_punch_hole(struct file * file,struct cifs_tcon * tcon,loff_t offset,loff_t len)3471 static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
3472 loff_t offset, loff_t len)
3473 {
3474 struct inode *inode = file_inode(file);
3475 struct cifsFileInfo *cfile = file->private_data;
3476 struct file_zero_data_information fsctl_buf;
3477 unsigned long long end = offset + len, i_size, remote_i_size;
3478 long rc;
3479 unsigned int xid;
3480 __u8 set_sparse = 1;
3481
3482 xid = get_xid();
3483
3484 /* Need to make file sparse, if not already, before freeing range. */
3485 /* Consider adding equivalent for compressed since it could also work */
3486 if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
3487 rc = -EOPNOTSUPP;
3488 goto out;
3489 }
3490
3491 filemap_invalidate_lock(inode->i_mapping);
3492 /*
3493 * We implement the punch hole through ioctl, so we need remove the page
3494 * caches first, otherwise the data may be inconsistent with the server.
3495 */
3496 truncate_pagecache_range(inode, offset, offset + len - 1);
3497 netfs_wait_for_outstanding_io(inode);
3498
3499 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
3500
3501 fsctl_buf.FileOffset = cpu_to_le64(offset);
3502 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
3503
3504 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3505 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
3506 (char *)&fsctl_buf,
3507 sizeof(struct file_zero_data_information),
3508 CIFSMaxBufSize, NULL, NULL);
3509
3510 if (rc)
3511 goto unlock;
3512
3513 /* If there's dirty data in the buffer that would extend the EOF if it
3514 * were written, then we need to move the EOF marker over to the lower
3515 * of the high end of the hole and the proposed EOF. The problem is
3516 * that we locally hole-punch the tail of the dirty data, the proposed
3517 * EOF update will end up in the wrong place.
3518 */
3519 i_size = i_size_read(inode);
3520 remote_i_size = netfs_inode(inode)->remote_i_size;
3521 if (end > remote_i_size && i_size > remote_i_size) {
3522 unsigned long long extend_to = umin(end, i_size);
3523 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3524 cfile->fid.volatile_fid, cfile->pid, extend_to);
3525 if (rc >= 0)
3526 netfs_inode(inode)->remote_i_size = extend_to;
3527 }
3528
3529 unlock:
3530 filemap_invalidate_unlock(inode->i_mapping);
3531 out:
3532 free_xid(xid);
3533 return rc;
3534 }
3535
smb3_simple_fallocate_write_range(unsigned int xid,struct cifs_tcon * tcon,struct cifsFileInfo * cfile,loff_t off,loff_t len,char * buf)3536 static int smb3_simple_fallocate_write_range(unsigned int xid,
3537 struct cifs_tcon *tcon,
3538 struct cifsFileInfo *cfile,
3539 loff_t off, loff_t len,
3540 char *buf)
3541 {
3542 struct cifs_io_parms io_parms = {0};
3543 int nbytes;
3544 int rc = 0;
3545 struct kvec iov[2];
3546
3547 io_parms.netfid = cfile->fid.netfid;
3548 io_parms.pid = current->tgid;
3549 io_parms.tcon = tcon;
3550 io_parms.persistent_fid = cfile->fid.persistent_fid;
3551 io_parms.volatile_fid = cfile->fid.volatile_fid;
3552
3553 while (len) {
3554 io_parms.offset = off;
3555 io_parms.length = len;
3556 if (io_parms.length > SMB2_MAX_BUFFER_SIZE)
3557 io_parms.length = SMB2_MAX_BUFFER_SIZE;
3558 /* iov[0] is reserved for smb header */
3559 iov[1].iov_base = buf;
3560 iov[1].iov_len = io_parms.length;
3561 rc = SMB2_write(xid, &io_parms, &nbytes, iov, 1);
3562 if (rc)
3563 break;
3564 if (nbytes > len)
3565 return -EINVAL;
3566 buf += nbytes;
3567 off += nbytes;
3568 len -= nbytes;
3569 }
3570 return rc;
3571 }
3572
smb3_simple_fallocate_range(unsigned int xid,struct cifs_tcon * tcon,struct cifsFileInfo * cfile,loff_t off,loff_t len)3573 static int smb3_simple_fallocate_range(unsigned int xid,
3574 struct cifs_tcon *tcon,
3575 struct cifsFileInfo *cfile,
3576 loff_t off, loff_t len)
3577 {
3578 struct file_allocated_range_buffer in_data, *out_data = NULL, *tmp_data;
3579 u32 out_data_len;
3580 char *buf = NULL;
3581 loff_t l;
3582 int rc;
3583
3584 in_data.file_offset = cpu_to_le64(off);
3585 in_data.length = cpu_to_le64(len);
3586 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3587 cfile->fid.volatile_fid,
3588 FSCTL_QUERY_ALLOCATED_RANGES,
3589 (char *)&in_data, sizeof(in_data),
3590 1024 * sizeof(struct file_allocated_range_buffer),
3591 (char **)&out_data, &out_data_len);
3592 if (rc)
3593 goto out;
3594
3595 buf = kzalloc(1024 * 1024, GFP_KERNEL);
3596 if (buf == NULL) {
3597 rc = -ENOMEM;
3598 goto out;
3599 }
3600
3601 tmp_data = out_data;
3602 while (len) {
3603 /*
3604 * The rest of the region is unmapped so write it all.
3605 */
3606 if (out_data_len == 0) {
3607 rc = smb3_simple_fallocate_write_range(xid, tcon,
3608 cfile, off, len, buf);
3609 goto out;
3610 }
3611
3612 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
3613 rc = -EINVAL;
3614 goto out;
3615 }
3616
3617 if (off < le64_to_cpu(tmp_data->file_offset)) {
3618 /*
3619 * We are at a hole. Write until the end of the region
3620 * or until the next allocated data,
3621 * whichever comes next.
3622 */
3623 l = le64_to_cpu(tmp_data->file_offset) - off;
3624 if (len < l)
3625 l = len;
3626 rc = smb3_simple_fallocate_write_range(xid, tcon,
3627 cfile, off, l, buf);
3628 if (rc)
3629 goto out;
3630 off = off + l;
3631 len = len - l;
3632 if (len == 0)
3633 goto out;
3634 }
3635 /*
3636 * We are at a section of allocated data, just skip forward
3637 * until the end of the data or the end of the region
3638 * we are supposed to fallocate, whichever comes first.
3639 */
3640 l = le64_to_cpu(tmp_data->length);
3641 if (len < l)
3642 l = len;
3643 off += l;
3644 len -= l;
3645
3646 tmp_data = &tmp_data[1];
3647 out_data_len -= sizeof(struct file_allocated_range_buffer);
3648 }
3649
3650 out:
3651 kfree(out_data);
3652 kfree(buf);
3653 return rc;
3654 }
3655
3656
smb3_simple_falloc(struct file * file,struct cifs_tcon * tcon,loff_t off,loff_t len,bool keep_size)3657 static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
3658 loff_t off, loff_t len, bool keep_size)
3659 {
3660 struct inode *inode;
3661 struct cifsInodeInfo *cifsi;
3662 struct cifsFileInfo *cfile = file->private_data;
3663 long rc = -EOPNOTSUPP;
3664 unsigned int xid;
3665 loff_t new_eof;
3666
3667 xid = get_xid();
3668
3669 inode = d_inode(cfile->dentry);
3670 cifsi = CIFS_I(inode);
3671
3672 trace_smb3_falloc_enter(xid, cfile->fid.persistent_fid, tcon->tid,
3673 tcon->ses->Suid, off, len);
3674 /* if file not oplocked can't be sure whether asking to extend size */
3675 if (!CIFS_CACHE_READ(cifsi))
3676 if (keep_size == false) {
3677 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3678 tcon->tid, tcon->ses->Suid, off, len, rc);
3679 free_xid(xid);
3680 return rc;
3681 }
3682
3683 /*
3684 * Extending the file
3685 */
3686 if ((keep_size == false) && i_size_read(inode) < off + len) {
3687 rc = inode_newsize_ok(inode, off + len);
3688 if (rc)
3689 goto out;
3690
3691 if (cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)
3692 smb2_set_sparse(xid, tcon, cfile, inode, false);
3693
3694 new_eof = off + len;
3695 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3696 cfile->fid.volatile_fid, cfile->pid, new_eof);
3697 if (rc == 0) {
3698 netfs_resize_file(&cifsi->netfs, new_eof, true);
3699 cifs_setsize(inode, new_eof);
3700 }
3701 goto out;
3702 }
3703
3704 /*
3705 * Files are non-sparse by default so falloc may be a no-op
3706 * Must check if file sparse. If not sparse, and since we are not
3707 * extending then no need to do anything since file already allocated
3708 */
3709 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
3710 rc = 0;
3711 goto out;
3712 }
3713
3714 if (keep_size == true) {
3715 /*
3716 * We can not preallocate pages beyond the end of the file
3717 * in SMB2
3718 */
3719 if (off >= i_size_read(inode)) {
3720 rc = 0;
3721 goto out;
3722 }
3723 /*
3724 * For fallocates that are partially beyond the end of file,
3725 * clamp len so we only fallocate up to the end of file.
3726 */
3727 if (off + len > i_size_read(inode)) {
3728 len = i_size_read(inode) - off;
3729 }
3730 }
3731
3732 if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
3733 /*
3734 * At this point, we are trying to fallocate an internal
3735 * regions of a sparse file. Since smb2 does not have a
3736 * fallocate command we have two options on how to emulate this.
3737 * We can either turn the entire file to become non-sparse
3738 * which we only do if the fallocate is for virtually
3739 * the whole file, or we can overwrite the region with zeroes
3740 * using SMB2_write, which could be prohibitevly expensive
3741 * if len is large.
3742 */
3743 /*
3744 * We are only trying to fallocate a small region so
3745 * just write it with zero.
3746 */
3747 if (len <= 1024 * 1024) {
3748 rc = smb3_simple_fallocate_range(xid, tcon, cfile,
3749 off, len);
3750 goto out;
3751 }
3752
3753 /*
3754 * Check if falloc starts within first few pages of file
3755 * and ends within a few pages of the end of file to
3756 * ensure that most of file is being forced to be
3757 * fallocated now. If so then setting whole file sparse
3758 * ie potentially making a few extra pages at the beginning
3759 * or end of the file non-sparse via set_sparse is harmless.
3760 */
3761 if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
3762 rc = -EOPNOTSUPP;
3763 goto out;
3764 }
3765 }
3766
3767 smb2_set_sparse(xid, tcon, cfile, inode, false);
3768 rc = 0;
3769
3770 out:
3771 if (rc)
3772 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, tcon->tid,
3773 tcon->ses->Suid, off, len, rc);
3774 else
3775 trace_smb3_falloc_done(xid, cfile->fid.persistent_fid, tcon->tid,
3776 tcon->ses->Suid, off, len);
3777
3778 free_xid(xid);
3779 return rc;
3780 }
3781
smb3_collapse_range(struct file * file,struct cifs_tcon * tcon,loff_t off,loff_t len)3782 static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon,
3783 loff_t off, loff_t len)
3784 {
3785 int rc;
3786 unsigned int xid;
3787 struct inode *inode = file_inode(file);
3788 struct cifsInodeInfo *cifsi = CIFS_I(inode);
3789 struct cifsFileInfo *cfile = file->private_data;
3790 struct netfs_inode *ictx = &cifsi->netfs;
3791 loff_t old_eof, new_eof;
3792
3793 xid = get_xid();
3794
3795 old_eof = i_size_read(inode);
3796 if ((off >= old_eof) ||
3797 off + len >= old_eof) {
3798 rc = -EINVAL;
3799 goto out;
3800 }
3801
3802 filemap_invalidate_lock(inode->i_mapping);
3803 rc = filemap_write_and_wait_range(inode->i_mapping, off, old_eof - 1);
3804 if (rc < 0)
3805 goto out_2;
3806
3807 truncate_pagecache_range(inode, off, old_eof);
3808 ictx->zero_point = old_eof;
3809 netfs_wait_for_outstanding_io(inode);
3810
3811 rc = smb2_copychunk_range(xid, cfile, cfile, off + len,
3812 old_eof - off - len, off);
3813 if (rc < 0)
3814 goto out_2;
3815
3816 new_eof = old_eof - len;
3817 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3818 cfile->fid.volatile_fid, cfile->pid, new_eof);
3819 if (rc < 0)
3820 goto out_2;
3821
3822 rc = 0;
3823
3824 truncate_setsize(inode, new_eof);
3825 netfs_resize_file(&cifsi->netfs, new_eof, true);
3826 ictx->zero_point = new_eof;
3827 fscache_resize_cookie(cifs_inode_cookie(inode), new_eof);
3828 out_2:
3829 filemap_invalidate_unlock(inode->i_mapping);
3830 out:
3831 free_xid(xid);
3832 return rc;
3833 }
3834
smb3_insert_range(struct file * file,struct cifs_tcon * tcon,loff_t off,loff_t len)3835 static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
3836 loff_t off, loff_t len)
3837 {
3838 int rc;
3839 unsigned int xid;
3840 struct cifsFileInfo *cfile = file->private_data;
3841 struct inode *inode = file_inode(file);
3842 struct cifsInodeInfo *cifsi = CIFS_I(inode);
3843 __u64 count, old_eof, new_eof;
3844
3845 xid = get_xid();
3846
3847 old_eof = i_size_read(inode);
3848 if (off >= old_eof) {
3849 rc = -EINVAL;
3850 goto out;
3851 }
3852
3853 count = old_eof - off;
3854 new_eof = old_eof + len;
3855
3856 filemap_invalidate_lock(inode->i_mapping);
3857 rc = filemap_write_and_wait_range(inode->i_mapping, off, new_eof - 1);
3858 if (rc < 0)
3859 goto out_2;
3860 truncate_pagecache_range(inode, off, old_eof);
3861 netfs_wait_for_outstanding_io(inode);
3862
3863 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3864 cfile->fid.volatile_fid, cfile->pid, new_eof);
3865 if (rc < 0)
3866 goto out_2;
3867
3868 truncate_setsize(inode, new_eof);
3869 netfs_resize_file(&cifsi->netfs, i_size_read(inode), true);
3870 fscache_resize_cookie(cifs_inode_cookie(inode), i_size_read(inode));
3871
3872 rc = smb2_copychunk_range(xid, cfile, cfile, off, count, off + len);
3873 if (rc < 0)
3874 goto out_2;
3875 cifsi->netfs.zero_point = new_eof;
3876
3877 rc = smb3_zero_data(file, tcon, off, len, xid);
3878 if (rc < 0)
3879 goto out_2;
3880
3881 rc = 0;
3882 out_2:
3883 filemap_invalidate_unlock(inode->i_mapping);
3884 out:
3885 free_xid(xid);
3886 return rc;
3887 }
3888
smb3_llseek(struct file * file,struct cifs_tcon * tcon,loff_t offset,int whence)3889 static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offset, int whence)
3890 {
3891 struct cifsFileInfo *wrcfile, *cfile = file->private_data;
3892 struct cifsInodeInfo *cifsi;
3893 struct inode *inode;
3894 int rc = 0;
3895 struct file_allocated_range_buffer in_data, *out_data = NULL;
3896 u32 out_data_len;
3897 unsigned int xid;
3898
3899 if (whence != SEEK_HOLE && whence != SEEK_DATA)
3900 return generic_file_llseek(file, offset, whence);
3901
3902 inode = d_inode(cfile->dentry);
3903 cifsi = CIFS_I(inode);
3904
3905 if (offset < 0 || offset >= i_size_read(inode))
3906 return -ENXIO;
3907
3908 xid = get_xid();
3909 /*
3910 * We need to be sure that all dirty pages are written as they
3911 * might fill holes on the server.
3912 * Note that we also MUST flush any written pages since at least
3913 * some servers (Windows2016) will not reflect recent writes in
3914 * QUERY_ALLOCATED_RANGES until SMB2_flush is called.
3915 */
3916 wrcfile = find_writable_file(cifsi, FIND_ANY);
3917 if (wrcfile) {
3918 filemap_write_and_wait(inode->i_mapping);
3919 smb2_flush_file(xid, tcon, &wrcfile->fid);
3920 cifsFileInfo_put(wrcfile);
3921 }
3922
3923 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
3924 if (whence == SEEK_HOLE)
3925 offset = i_size_read(inode);
3926 goto lseek_exit;
3927 }
3928
3929 in_data.file_offset = cpu_to_le64(offset);
3930 in_data.length = cpu_to_le64(i_size_read(inode));
3931
3932 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3933 cfile->fid.volatile_fid,
3934 FSCTL_QUERY_ALLOCATED_RANGES,
3935 (char *)&in_data, sizeof(in_data),
3936 sizeof(struct file_allocated_range_buffer),
3937 (char **)&out_data, &out_data_len);
3938 if (rc == -E2BIG)
3939 rc = 0;
3940 if (rc)
3941 goto lseek_exit;
3942
3943 if (whence == SEEK_HOLE && out_data_len == 0)
3944 goto lseek_exit;
3945
3946 if (whence == SEEK_DATA && out_data_len == 0) {
3947 rc = -ENXIO;
3948 goto lseek_exit;
3949 }
3950
3951 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
3952 rc = -EINVAL;
3953 goto lseek_exit;
3954 }
3955 if (whence == SEEK_DATA) {
3956 offset = le64_to_cpu(out_data->file_offset);
3957 goto lseek_exit;
3958 }
3959 if (offset < le64_to_cpu(out_data->file_offset))
3960 goto lseek_exit;
3961
3962 offset = le64_to_cpu(out_data->file_offset) + le64_to_cpu(out_data->length);
3963
3964 lseek_exit:
3965 free_xid(xid);
3966 kfree(out_data);
3967 if (!rc)
3968 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3969 else
3970 return rc;
3971 }
3972
smb3_fiemap(struct cifs_tcon * tcon,struct cifsFileInfo * cfile,struct fiemap_extent_info * fei,u64 start,u64 len)3973 static int smb3_fiemap(struct cifs_tcon *tcon,
3974 struct cifsFileInfo *cfile,
3975 struct fiemap_extent_info *fei, u64 start, u64 len)
3976 {
3977 unsigned int xid;
3978 struct file_allocated_range_buffer in_data, *out_data;
3979 u32 out_data_len;
3980 int i, num, rc, flags, last_blob;
3981 u64 next;
3982
3983 rc = fiemap_prep(d_inode(cfile->dentry), fei, start, &len, 0);
3984 if (rc)
3985 return rc;
3986
3987 xid = get_xid();
3988 again:
3989 in_data.file_offset = cpu_to_le64(start);
3990 in_data.length = cpu_to_le64(len);
3991
3992 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3993 cfile->fid.volatile_fid,
3994 FSCTL_QUERY_ALLOCATED_RANGES,
3995 (char *)&in_data, sizeof(in_data),
3996 1024 * sizeof(struct file_allocated_range_buffer),
3997 (char **)&out_data, &out_data_len);
3998 if (rc == -E2BIG) {
3999 last_blob = 0;
4000 rc = 0;
4001 } else
4002 last_blob = 1;
4003 if (rc)
4004 goto out;
4005
4006 if (out_data_len && out_data_len < sizeof(struct file_allocated_range_buffer)) {
4007 rc = -EINVAL;
4008 goto out;
4009 }
4010 if (out_data_len % sizeof(struct file_allocated_range_buffer)) {
4011 rc = -EINVAL;
4012 goto out;
4013 }
4014
4015 num = out_data_len / sizeof(struct file_allocated_range_buffer);
4016 for (i = 0; i < num; i++) {
4017 flags = 0;
4018 if (i == num - 1 && last_blob)
4019 flags |= FIEMAP_EXTENT_LAST;
4020
4021 rc = fiemap_fill_next_extent(fei,
4022 le64_to_cpu(out_data[i].file_offset),
4023 le64_to_cpu(out_data[i].file_offset),
4024 le64_to_cpu(out_data[i].length),
4025 flags);
4026 if (rc < 0)
4027 goto out;
4028 if (rc == 1) {
4029 rc = 0;
4030 goto out;
4031 }
4032 }
4033
4034 if (!last_blob) {
4035 next = le64_to_cpu(out_data[num - 1].file_offset) +
4036 le64_to_cpu(out_data[num - 1].length);
4037 len = len - (next - start);
4038 start = next;
4039 goto again;
4040 }
4041
4042 out:
4043 free_xid(xid);
4044 kfree(out_data);
4045 return rc;
4046 }
4047
smb3_fallocate(struct file * file,struct cifs_tcon * tcon,int mode,loff_t off,loff_t len)4048 static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
4049 loff_t off, loff_t len)
4050 {
4051 /* KEEP_SIZE already checked for by do_fallocate */
4052 if (mode & FALLOC_FL_PUNCH_HOLE)
4053 return smb3_punch_hole(file, tcon, off, len);
4054 else if (mode & FALLOC_FL_ZERO_RANGE) {
4055 if (mode & FALLOC_FL_KEEP_SIZE)
4056 return smb3_zero_range(file, tcon, off, len, true);
4057 return smb3_zero_range(file, tcon, off, len, false);
4058 } else if (mode == FALLOC_FL_KEEP_SIZE)
4059 return smb3_simple_falloc(file, tcon, off, len, true);
4060 else if (mode == FALLOC_FL_COLLAPSE_RANGE)
4061 return smb3_collapse_range(file, tcon, off, len);
4062 else if (mode == FALLOC_FL_INSERT_RANGE)
4063 return smb3_insert_range(file, tcon, off, len);
4064 else if (mode == 0)
4065 return smb3_simple_falloc(file, tcon, off, len, false);
4066
4067 return -EOPNOTSUPP;
4068 }
4069
4070 static void
smb2_downgrade_oplock(struct TCP_Server_Info * server,struct cifsInodeInfo * cinode,__u32 oplock,__u16 epoch,bool * purge_cache)4071 smb2_downgrade_oplock(struct TCP_Server_Info *server,
4072 struct cifsInodeInfo *cinode, __u32 oplock,
4073 __u16 epoch, bool *purge_cache)
4074 {
4075 lockdep_assert_held(&cinode->open_file_lock);
4076 server->ops->set_oplock_level(cinode, oplock, 0, NULL);
4077 }
4078
4079 static void
4080 smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
4081 __u16 epoch, bool *purge_cache);
4082
4083 static void
smb3_downgrade_oplock(struct TCP_Server_Info * server,struct cifsInodeInfo * cinode,__u32 oplock,__u16 epoch,bool * purge_cache)4084 smb3_downgrade_oplock(struct TCP_Server_Info *server,
4085 struct cifsInodeInfo *cinode, __u32 oplock,
4086 __u16 epoch, bool *purge_cache)
4087 {
4088 unsigned int old_state = cinode->oplock;
4089 __u16 old_epoch = cinode->epoch;
4090 unsigned int new_state;
4091
4092 if (epoch > old_epoch) {
4093 smb21_set_oplock_level(cinode, oplock, 0, NULL);
4094 cinode->epoch = epoch;
4095 }
4096
4097 new_state = cinode->oplock;
4098 *purge_cache = false;
4099
4100 if ((old_state & CIFS_CACHE_READ_FLG) != 0 &&
4101 (new_state & CIFS_CACHE_READ_FLG) == 0)
4102 *purge_cache = true;
4103 else if (old_state == new_state && (epoch - old_epoch > 1))
4104 *purge_cache = true;
4105 }
4106
4107 static void
smb2_set_oplock_level(struct cifsInodeInfo * cinode,__u32 oplock,__u16 epoch,bool * purge_cache)4108 smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
4109 __u16 epoch, bool *purge_cache)
4110 {
4111 oplock &= 0xFF;
4112 cinode->lease_granted = false;
4113 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
4114 return;
4115 if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
4116 WRITE_ONCE(cinode->oplock, CIFS_CACHE_RHW_FLG);
4117 cifs_dbg(FYI, "Batch Oplock granted on inode %p\n",
4118 &cinode->netfs.inode);
4119 } else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
4120 WRITE_ONCE(cinode->oplock, CIFS_CACHE_RW_FLG);
4121 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
4122 &cinode->netfs.inode);
4123 } else if (oplock == SMB2_OPLOCK_LEVEL_II) {
4124 WRITE_ONCE(cinode->oplock, CIFS_CACHE_READ_FLG);
4125 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
4126 &cinode->netfs.inode);
4127 } else
4128 WRITE_ONCE(cinode->oplock, 0);
4129 }
4130
4131 static void
smb21_set_oplock_level(struct cifsInodeInfo * cinode,__u32 oplock,__u16 epoch,bool * purge_cache)4132 smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
4133 __u16 epoch, bool *purge_cache)
4134 {
4135 char message[5] = {0};
4136 unsigned int new_oplock = 0;
4137
4138 oplock &= 0xFF;
4139 cinode->lease_granted = true;
4140 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
4141 return;
4142
4143 /* Check if the server granted an oplock rather than a lease */
4144 if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE)
4145 return smb2_set_oplock_level(cinode, oplock, epoch,
4146 purge_cache);
4147
4148 if (oplock & SMB2_LEASE_READ_CACHING_HE) {
4149 new_oplock |= CIFS_CACHE_READ_FLG;
4150 strcat(message, "R");
4151 }
4152 if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
4153 new_oplock |= CIFS_CACHE_HANDLE_FLG;
4154 strcat(message, "H");
4155 }
4156 if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
4157 new_oplock |= CIFS_CACHE_WRITE_FLG;
4158 strcat(message, "W");
4159 }
4160 if (!new_oplock)
4161 strscpy(message, "None");
4162
4163 WRITE_ONCE(cinode->oplock, new_oplock);
4164 cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
4165 &cinode->netfs.inode);
4166 }
4167
4168 static void
smb3_set_oplock_level(struct cifsInodeInfo * cinode,__u32 oplock,__u16 epoch,bool * purge_cache)4169 smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
4170 __u16 epoch, bool *purge_cache)
4171 {
4172 unsigned int old_oplock = READ_ONCE(cinode->oplock);
4173 unsigned int new_oplock;
4174
4175 smb21_set_oplock_level(cinode, oplock, epoch, purge_cache);
4176 new_oplock = READ_ONCE(cinode->oplock);
4177
4178 if (purge_cache) {
4179 *purge_cache = false;
4180 if (old_oplock == CIFS_CACHE_READ_FLG) {
4181 if (new_oplock == CIFS_CACHE_READ_FLG &&
4182 (epoch - cinode->epoch > 0))
4183 *purge_cache = true;
4184 else if (new_oplock == CIFS_CACHE_RH_FLG &&
4185 (epoch - cinode->epoch > 1))
4186 *purge_cache = true;
4187 else if (new_oplock == CIFS_CACHE_RHW_FLG &&
4188 (epoch - cinode->epoch > 1))
4189 *purge_cache = true;
4190 else if (new_oplock == 0 &&
4191 (epoch - cinode->epoch > 0))
4192 *purge_cache = true;
4193 } else if (old_oplock == CIFS_CACHE_RH_FLG) {
4194 if (new_oplock == CIFS_CACHE_RH_FLG &&
4195 (epoch - cinode->epoch > 0))
4196 *purge_cache = true;
4197 else if (new_oplock == CIFS_CACHE_RHW_FLG &&
4198 (epoch - cinode->epoch > 1))
4199 *purge_cache = true;
4200 }
4201 cinode->epoch = epoch;
4202 }
4203 }
4204
4205 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
4206 static bool
smb2_is_read_op(__u32 oplock)4207 smb2_is_read_op(__u32 oplock)
4208 {
4209 return oplock == SMB2_OPLOCK_LEVEL_II;
4210 }
4211 #endif /* CIFS_ALLOW_INSECURE_LEGACY */
4212
4213 static bool
smb21_is_read_op(__u32 oplock)4214 smb21_is_read_op(__u32 oplock)
4215 {
4216 return (oplock & SMB2_LEASE_READ_CACHING_HE) &&
4217 !(oplock & SMB2_LEASE_WRITE_CACHING_HE);
4218 }
4219
4220 static __le32
map_oplock_to_lease(u8 oplock)4221 map_oplock_to_lease(u8 oplock)
4222 {
4223 if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
4224 return SMB2_LEASE_WRITE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE;
4225 else if (oplock == SMB2_OPLOCK_LEVEL_II)
4226 return SMB2_LEASE_READ_CACHING_LE | SMB2_LEASE_HANDLE_CACHING_LE;
4227 else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
4228 return SMB2_LEASE_HANDLE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE |
4229 SMB2_LEASE_WRITE_CACHING_LE;
4230 return 0;
4231 }
4232
4233 static char *
smb2_create_lease_buf(u8 * lease_key,u8 oplock,u8 * parent_lease_key,__le32 flags)4234 smb2_create_lease_buf(u8 *lease_key, u8 oplock, u8 *parent_lease_key, __le32 flags)
4235 {
4236 struct create_lease *buf;
4237
4238 buf = kzalloc_obj(struct create_lease);
4239 if (!buf)
4240 return NULL;
4241
4242 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
4243 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
4244
4245 buf->ccontext.DataOffset = cpu_to_le16(offsetof
4246 (struct create_lease, lcontext));
4247 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
4248 buf->ccontext.NameOffset = cpu_to_le16(offsetof
4249 (struct create_lease, Name));
4250 buf->ccontext.NameLength = cpu_to_le16(4);
4251 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
4252 buf->Name[0] = 'R';
4253 buf->Name[1] = 'q';
4254 buf->Name[2] = 'L';
4255 buf->Name[3] = 's';
4256 return (char *)buf;
4257 }
4258
4259 static char *
smb3_create_lease_buf(u8 * lease_key,u8 oplock,u8 * parent_lease_key,__le32 flags)4260 smb3_create_lease_buf(u8 *lease_key, u8 oplock, u8 *parent_lease_key, __le32 flags)
4261 {
4262 struct create_lease_v2 *buf;
4263
4264 buf = kzalloc_obj(struct create_lease_v2);
4265 if (!buf)
4266 return NULL;
4267
4268 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
4269 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
4270 buf->lcontext.LeaseFlags = flags;
4271 if (flags & SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE)
4272 memcpy(&buf->lcontext.ParentLeaseKey, parent_lease_key, SMB2_LEASE_KEY_SIZE);
4273
4274 buf->ccontext.DataOffset = cpu_to_le16(offsetof
4275 (struct create_lease_v2, lcontext));
4276 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
4277 buf->ccontext.NameOffset = cpu_to_le16(offsetof
4278 (struct create_lease_v2, Name));
4279 buf->ccontext.NameLength = cpu_to_le16(4);
4280 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
4281 buf->Name[0] = 'R';
4282 buf->Name[1] = 'q';
4283 buf->Name[2] = 'L';
4284 buf->Name[3] = 's';
4285 return (char *)buf;
4286 }
4287
4288 static __u8
smb2_parse_lease_buf(void * buf,__u16 * epoch,char * lease_key)4289 smb2_parse_lease_buf(void *buf, __u16 *epoch, char *lease_key)
4290 {
4291 struct create_lease *lc = (struct create_lease *)buf;
4292
4293 *epoch = 0; /* not used */
4294 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE)
4295 return SMB2_OPLOCK_LEVEL_NOCHANGE;
4296 return le32_to_cpu(lc->lcontext.LeaseState);
4297 }
4298
4299 static __u8
smb3_parse_lease_buf(void * buf,__u16 * epoch,char * lease_key)4300 smb3_parse_lease_buf(void *buf, __u16 *epoch, char *lease_key)
4301 {
4302 struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
4303
4304 *epoch = le16_to_cpu(lc->lcontext.Epoch);
4305 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE)
4306 return SMB2_OPLOCK_LEVEL_NOCHANGE;
4307 if (lease_key)
4308 memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
4309 return le32_to_cpu(lc->lcontext.LeaseState);
4310 }
4311
4312 static unsigned int
smb2_wp_retry_size(struct inode * inode)4313 smb2_wp_retry_size(struct inode *inode)
4314 {
4315 return min_t(unsigned int, CIFS_SB(inode->i_sb)->ctx->wsize,
4316 SMB2_MAX_BUFFER_SIZE);
4317 }
4318
4319 static bool
smb2_dir_needs_close(struct cifsFileInfo * cfile)4320 smb2_dir_needs_close(struct cifsFileInfo *cfile)
4321 {
4322 return !cfile->invalidHandle;
4323 }
4324
4325 static void
fill_transform_hdr(struct smb2_transform_hdr * tr_hdr,unsigned int orig_len,struct smb_rqst * old_rq,__le16 cipher_type)4326 fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
4327 struct smb_rqst *old_rq, __le16 cipher_type)
4328 {
4329 struct smb2_hdr *shdr =
4330 (struct smb2_hdr *)old_rq->rq_iov[0].iov_base;
4331
4332 memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
4333 tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
4334 tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
4335 tr_hdr->Flags = cpu_to_le16(0x01);
4336 if ((cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
4337 (cipher_type == SMB2_ENCRYPTION_AES256_GCM))
4338 get_random_bytes(&tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
4339 else
4340 get_random_bytes(&tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
4341 memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
4342 }
4343
smb2_aead_req_alloc(struct crypto_aead * tfm,const struct smb_rqst * rqst,int num_rqst,const u8 * sig,u8 ** iv,struct aead_request ** req,struct sg_table * sgt,unsigned int * num_sgs)4344 static void *smb2_aead_req_alloc(struct crypto_aead *tfm, const struct smb_rqst *rqst,
4345 int num_rqst, const u8 *sig, u8 **iv,
4346 struct aead_request **req, struct sg_table *sgt,
4347 unsigned int *num_sgs)
4348 {
4349 unsigned int req_size = sizeof(**req) + crypto_aead_reqsize(tfm);
4350 unsigned int iv_size = crypto_aead_ivsize(tfm);
4351 unsigned int len;
4352 u8 *p;
4353
4354 *num_sgs = cifs_get_num_sgs(rqst, num_rqst, sig);
4355 if (IS_ERR_VALUE((long)(int)*num_sgs))
4356 return ERR_PTR(*num_sgs);
4357
4358 len = iv_size;
4359 len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1);
4360 len = ALIGN(len, crypto_tfm_ctx_alignment());
4361 len += req_size;
4362 len = ALIGN(len, __alignof__(struct scatterlist));
4363 len += array_size(*num_sgs, sizeof(struct scatterlist));
4364
4365 p = kzalloc(len, GFP_NOFS);
4366 if (!p)
4367 return ERR_PTR(-ENOMEM);
4368
4369 *iv = (u8 *)PTR_ALIGN(p, crypto_aead_alignmask(tfm) + 1);
4370 *req = (struct aead_request *)PTR_ALIGN(*iv + iv_size,
4371 crypto_tfm_ctx_alignment());
4372 sgt->sgl = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size,
4373 __alignof__(struct scatterlist));
4374 return p;
4375 }
4376
smb2_get_aead_req(struct crypto_aead * tfm,struct smb_rqst * rqst,int num_rqst,const u8 * sig,u8 ** iv,struct aead_request ** req,struct scatterlist ** sgl)4377 static void *smb2_get_aead_req(struct crypto_aead *tfm, struct smb_rqst *rqst,
4378 int num_rqst, const u8 *sig, u8 **iv,
4379 struct aead_request **req, struct scatterlist **sgl)
4380 {
4381 struct sg_table sgtable = {};
4382 unsigned int skip, num_sgs, i, j;
4383 ssize_t rc;
4384 void *p;
4385
4386 p = smb2_aead_req_alloc(tfm, rqst, num_rqst, sig, iv, req, &sgtable, &num_sgs);
4387 if (IS_ERR(p))
4388 return ERR_CAST(p);
4389
4390 sg_init_marker(sgtable.sgl, num_sgs);
4391
4392 /*
4393 * The first rqst has a transform header where the
4394 * first 20 bytes are not part of the encrypted blob.
4395 */
4396 skip = 20;
4397
4398 for (i = 0; i < num_rqst; i++) {
4399 struct iov_iter *iter = &rqst[i].rq_iter;
4400 size_t count = iov_iter_count(iter);
4401
4402 for (j = 0; j < rqst[i].rq_nvec; j++) {
4403 cifs_sg_set_buf(&sgtable,
4404 rqst[i].rq_iov[j].iov_base + skip,
4405 rqst[i].rq_iov[j].iov_len - skip);
4406
4407 /* See the above comment on the 'skip' assignment */
4408 skip = 0;
4409 }
4410 sgtable.orig_nents = sgtable.nents;
4411
4412 rc = extract_iter_to_sg(iter, count, &sgtable,
4413 num_sgs - sgtable.nents, 0);
4414 iov_iter_revert(iter, rc);
4415 sgtable.orig_nents = sgtable.nents;
4416 }
4417
4418 cifs_sg_set_buf(&sgtable, sig, SMB2_SIGNATURE_SIZE);
4419 sg_mark_end(&sgtable.sgl[sgtable.nents - 1]);
4420 *sgl = sgtable.sgl;
4421 return p;
4422 }
4423
4424 static int
smb2_get_enc_key(struct TCP_Server_Info * server,__u64 ses_id,int enc,u8 * key)4425 smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
4426 {
4427 struct TCP_Server_Info *pserver;
4428 struct cifs_ses *ses;
4429 u8 *ses_enc_key;
4430
4431 /* If server is a channel, select the primary channel */
4432 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
4433
4434 spin_lock(&cifs_tcp_ses_lock);
4435 list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
4436 if (ses->Suid == ses_id) {
4437 spin_lock(&ses->ses_lock);
4438 ses_enc_key = enc ? ses->smb3encryptionkey :
4439 ses->smb3decryptionkey;
4440 memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE);
4441 spin_unlock(&ses->ses_lock);
4442 spin_unlock(&cifs_tcp_ses_lock);
4443 return 0;
4444 }
4445 }
4446 spin_unlock(&cifs_tcp_ses_lock);
4447
4448 trace_smb3_ses_not_found(ses_id);
4449
4450 return -EAGAIN;
4451 }
4452 /*
4453 * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
4454 * iov[0] - transform header (associate data),
4455 * iov[1-N] - SMB2 header and pages - data to encrypt.
4456 * On success return encrypted data in iov[1-N] and pages, leave iov[0]
4457 * untouched.
4458 */
4459 static int
crypt_message(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst,int enc,struct crypto_aead * tfm)4460 crypt_message(struct TCP_Server_Info *server, int num_rqst,
4461 struct smb_rqst *rqst, int enc, struct crypto_aead *tfm)
4462 {
4463 struct smb2_transform_hdr *tr_hdr =
4464 (struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
4465 unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
4466 int rc = 0;
4467 struct scatterlist *sg;
4468 u8 sign[SMB2_SIGNATURE_SIZE] = {};
4469 u8 key[SMB3_ENC_DEC_KEY_SIZE];
4470 struct aead_request *req;
4471 u8 *iv;
4472 DECLARE_CRYPTO_WAIT(wait);
4473 unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
4474 void *creq;
4475
4476 rc = smb2_get_enc_key(server, le64_to_cpu(tr_hdr->SessionId), enc, key);
4477 if (rc) {
4478 cifs_server_dbg(FYI, "%s: Could not get %scryption key. sid: 0x%llx\n", __func__,
4479 enc ? "en" : "de", le64_to_cpu(tr_hdr->SessionId));
4480 return rc;
4481 }
4482
4483 if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
4484 (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
4485 rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE);
4486 else
4487 rc = crypto_aead_setkey(tfm, key, SMB3_GCM128_CRYPTKEY_SIZE);
4488
4489 if (rc) {
4490 cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
4491 return rc;
4492 }
4493
4494 rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE);
4495 if (rc) {
4496 cifs_server_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc);
4497 return rc;
4498 }
4499
4500 creq = smb2_get_aead_req(tfm, rqst, num_rqst, sign, &iv, &req, &sg);
4501 if (IS_ERR(creq))
4502 return PTR_ERR(creq);
4503
4504 if (!enc) {
4505 memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
4506 crypt_len += SMB2_SIGNATURE_SIZE;
4507 }
4508
4509 if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
4510 (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
4511 memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
4512 else {
4513 iv[0] = 3;
4514 memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
4515 }
4516
4517 aead_request_set_tfm(req, tfm);
4518 aead_request_set_crypt(req, sg, sg, crypt_len, iv);
4519 aead_request_set_ad(req, assoc_data_len);
4520
4521 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
4522 crypto_req_done, &wait);
4523
4524 rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
4525 : crypto_aead_decrypt(req), &wait);
4526
4527 if (!rc && enc)
4528 memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
4529
4530 kfree_sensitive(creq);
4531 return rc;
4532 }
4533
4534 /*
4535 * Copy data from an iterator to the folios in a folio queue buffer.
4536 */
cifs_copy_iter_to_folioq(struct iov_iter * iter,size_t size,struct folio_queue * buffer)4537 static bool cifs_copy_iter_to_folioq(struct iov_iter *iter, size_t size,
4538 struct folio_queue *buffer)
4539 {
4540 for (; buffer; buffer = buffer->next) {
4541 for (int s = 0; s < folioq_count(buffer); s++) {
4542 struct folio *folio = folioq_folio(buffer, s);
4543 size_t part = folioq_folio_size(buffer, s);
4544
4545 part = umin(part, size);
4546
4547 if (copy_folio_from_iter(folio, 0, part, iter) != part)
4548 return false;
4549 size -= part;
4550 }
4551 }
4552 return true;
4553 }
4554
4555 void
smb3_free_compound_rqst(int num_rqst,struct smb_rqst * rqst)4556 smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
4557 {
4558 for (int i = 0; i < num_rqst; i++)
4559 netfs_free_folioq_buffer(rqst[i].rq_buffer);
4560 }
4561
4562 /*
4563 * This function will initialize new_rq and encrypt the content.
4564 * The first entry, new_rq[0], only contains a single iov which contains
4565 * a smb2_transform_hdr and is pre-allocated by the caller.
4566 * This function then populates new_rq[1+] with the content from olq_rq[0+].
4567 *
4568 * The end result is an array of smb_rqst structures where the first structure
4569 * only contains a single iov for the transform header which we then can pass
4570 * to crypt_message().
4571 *
4572 * new_rq[0].rq_iov[0] : smb2_transform_hdr pre-allocated by the caller
4573 * new_rq[1+].rq_iov[*] == old_rq[0+].rq_iov[*] : SMB2/3 requests
4574 */
4575 static int
smb3_init_transform_rq(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * new_rq,struct smb_rqst * old_rq)4576 smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
4577 struct smb_rqst *new_rq, struct smb_rqst *old_rq)
4578 {
4579 struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
4580 unsigned int orig_len = 0;
4581 int rc = -ENOMEM;
4582
4583 for (int i = 1; i < num_rqst; i++) {
4584 struct smb_rqst *old = &old_rq[i - 1];
4585 struct smb_rqst *new = &new_rq[i];
4586 struct folio_queue *buffer = NULL;
4587 size_t size = iov_iter_count(&old->rq_iter);
4588
4589 orig_len += smb_rqst_len(server, old);
4590 new->rq_iov = old->rq_iov;
4591 new->rq_nvec = old->rq_nvec;
4592
4593 if (size > 0) {
4594 size_t cur_size = 0;
4595 rc = netfs_alloc_folioq_buffer(NULL, &buffer, &cur_size,
4596 size, GFP_NOFS);
4597 if (rc < 0)
4598 goto err_free;
4599
4600 new->rq_buffer = buffer;
4601 iov_iter_folio_queue(&new->rq_iter, ITER_SOURCE,
4602 buffer, 0, 0, size);
4603
4604 if (!cifs_copy_iter_to_folioq(&old->rq_iter, size, buffer)) {
4605 rc = smb_EIO1(smb_eio_trace_tx_copy_iter_to_buf, size);
4606 goto err_free;
4607 }
4608 }
4609 }
4610
4611 /* fill the 1st iov with a transform header */
4612 fill_transform_hdr(tr_hdr, orig_len, old_rq, server->cipher_type);
4613
4614 rc = crypt_message(server, num_rqst, new_rq, 1, server->secmech.enc);
4615 cifs_dbg(FYI, "Encrypt message returned %d\n", rc);
4616 if (rc)
4617 goto err_free;
4618
4619 return rc;
4620
4621 err_free:
4622 smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]);
4623 return rc;
4624 }
4625
4626 static int
smb3_is_transform_hdr(void * buf)4627 smb3_is_transform_hdr(void *buf)
4628 {
4629 struct smb2_transform_hdr *trhdr = buf;
4630
4631 return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
4632 }
4633
4634 static int
decrypt_raw_data(struct TCP_Server_Info * server,char * buf,unsigned int buf_data_size,struct iov_iter * iter,bool is_offloaded)4635 decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
4636 unsigned int buf_data_size, struct iov_iter *iter,
4637 bool is_offloaded)
4638 {
4639 struct crypto_aead *tfm;
4640 struct smb_rqst rqst = {NULL};
4641 struct kvec iov[2];
4642 size_t iter_size = 0;
4643 int rc;
4644
4645 iov[0].iov_base = buf;
4646 iov[0].iov_len = sizeof(struct smb2_transform_hdr);
4647 iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
4648 iov[1].iov_len = buf_data_size;
4649
4650 rqst.rq_iov = iov;
4651 rqst.rq_nvec = 2;
4652 if (iter) {
4653 rqst.rq_iter = *iter;
4654 iter_size = iov_iter_count(iter);
4655 }
4656
4657 if (is_offloaded) {
4658 if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
4659 (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
4660 tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
4661 else
4662 tfm = crypto_alloc_aead("ccm(aes)", 0, 0);
4663 if (IS_ERR(tfm)) {
4664 rc = PTR_ERR(tfm);
4665 cifs_server_dbg(VFS, "%s: Failed alloc decrypt TFM, rc=%d\n", __func__, rc);
4666
4667 return rc;
4668 }
4669 } else {
4670 rc = smb3_crypto_aead_allocate(server);
4671 if (unlikely(rc))
4672 return rc;
4673 tfm = server->secmech.dec;
4674 }
4675
4676 rc = crypt_message(server, 1, &rqst, 0, tfm);
4677 cifs_dbg(FYI, "Decrypt message returned %d\n", rc);
4678
4679 if (is_offloaded)
4680 crypto_free_aead(tfm);
4681
4682 if (rc)
4683 return rc;
4684
4685 memmove(buf, iov[1].iov_base, buf_data_size);
4686
4687 if (!is_offloaded)
4688 server->total_read = buf_data_size + iter_size;
4689
4690 return rc;
4691 }
4692
4693 static int
cifs_copy_folioq_to_iter(struct folio_queue * folioq,size_t data_size,size_t skip,struct iov_iter * iter)4694 cifs_copy_folioq_to_iter(struct folio_queue *folioq, size_t data_size,
4695 size_t skip, struct iov_iter *iter)
4696 {
4697 for (; folioq; folioq = folioq->next) {
4698 for (int s = 0; s < folioq_count(folioq); s++) {
4699 struct folio *folio = folioq_folio(folioq, s);
4700 size_t fsize = folio_size(folio);
4701 size_t n, len = umin(fsize - skip, data_size);
4702
4703 n = copy_folio_to_iter(folio, skip, len, iter);
4704 if (n != len) {
4705 cifs_dbg(VFS, "%s: something went wrong\n", __func__);
4706 return smb_EIO2(smb_eio_trace_rx_copy_to_iter,
4707 n, len);
4708 }
4709 data_size -= n;
4710 skip = 0;
4711 }
4712 }
4713
4714 return 0;
4715 }
4716
4717 static int
handle_read_data(struct TCP_Server_Info * server,struct mid_q_entry * mid,char * buf,unsigned int buf_len,struct folio_queue * buffer,unsigned int buffer_len,bool is_offloaded)4718 handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
4719 char *buf, unsigned int buf_len, struct folio_queue *buffer,
4720 unsigned int buffer_len, bool is_offloaded)
4721 {
4722 unsigned int data_offset;
4723 unsigned int data_len;
4724 unsigned int cur_off;
4725 unsigned int cur_page_idx;
4726 unsigned int pad_len;
4727 struct cifs_io_subrequest *rdata = mid->callback_data;
4728 struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
4729 size_t copied;
4730 bool use_rdma_mr = false;
4731
4732 if (shdr->Command != SMB2_READ) {
4733 cifs_server_dbg(VFS, "only big read responses are supported\n");
4734 return -EOPNOTSUPP;
4735 }
4736
4737 if (server->ops->is_session_expired &&
4738 server->ops->is_session_expired(buf)) {
4739 if (!is_offloaded)
4740 cifs_reconnect(server, true);
4741 return -1;
4742 }
4743
4744 if (server->ops->is_status_pending &&
4745 server->ops->is_status_pending(buf, server))
4746 return -1;
4747
4748 /* set up first two iov to get credits */
4749 rdata->iov[0].iov_base = buf;
4750 rdata->iov[0].iov_len = 0;
4751 rdata->iov[1].iov_base = buf;
4752 rdata->iov[1].iov_len =
4753 min_t(unsigned int, buf_len, server->vals->read_rsp_size);
4754 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
4755 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
4756 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
4757 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
4758
4759 rdata->result = server->ops->map_error(buf, true);
4760 if (rdata->result != 0) {
4761 cifs_dbg(FYI, "%s: server returned error %d\n",
4762 __func__, rdata->result);
4763 /* normal error on read response */
4764 if (is_offloaded)
4765 mid->mid_state = MID_RESPONSE_RECEIVED;
4766 else
4767 dequeue_mid(server, mid, false);
4768 return 0;
4769 }
4770
4771 data_offset = server->ops->read_data_offset(buf);
4772 #ifdef CONFIG_CIFS_SMB_DIRECT
4773 use_rdma_mr = rdata->mr;
4774 #endif
4775 data_len = server->ops->read_data_length(buf, use_rdma_mr);
4776
4777 if (data_offset < server->vals->read_rsp_size) {
4778 /*
4779 * win2k8 sometimes sends an offset of 0 when the read
4780 * is beyond the EOF. Treat it as if the data starts just after
4781 * the header.
4782 */
4783 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
4784 __func__, data_offset);
4785 data_offset = server->vals->read_rsp_size;
4786 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
4787 /* data_offset is beyond the end of smallbuf */
4788 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
4789 __func__, data_offset);
4790 rdata->result = smb_EIO1(smb_eio_trace_rx_overlong, data_offset);
4791 if (is_offloaded)
4792 mid->mid_state = MID_RESPONSE_MALFORMED;
4793 else
4794 dequeue_mid(server, mid, rdata->result);
4795 return 0;
4796 }
4797
4798 pad_len = data_offset - server->vals->read_rsp_size;
4799
4800 if (buf_len <= data_offset) {
4801 /* read response payload is in pages */
4802 cur_page_idx = pad_len / PAGE_SIZE;
4803 cur_off = pad_len % PAGE_SIZE;
4804
4805 if (cur_page_idx != 0) {
4806 /* data offset is beyond the 1st page of response */
4807 cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
4808 __func__, data_offset);
4809 rdata->result = smb_EIO1(smb_eio_trace_rx_overpage, data_offset);
4810 if (is_offloaded)
4811 mid->mid_state = MID_RESPONSE_MALFORMED;
4812 else
4813 dequeue_mid(server, mid, rdata->result);
4814 return 0;
4815 }
4816
4817 if (data_len > buffer_len - pad_len) {
4818 /* data_len is corrupt -- discard frame */
4819 rdata->result = smb_EIO1(smb_eio_trace_rx_bad_datalen, data_len);
4820 if (is_offloaded)
4821 mid->mid_state = MID_RESPONSE_MALFORMED;
4822 else
4823 dequeue_mid(server, mid, rdata->result);
4824 return 0;
4825 }
4826
4827 /* Copy the data to the output I/O iterator. */
4828 rdata->result = cifs_copy_folioq_to_iter(buffer, buffer_len,
4829 cur_off, &rdata->subreq.io_iter);
4830 if (rdata->result != 0) {
4831 if (is_offloaded)
4832 mid->mid_state = MID_RESPONSE_MALFORMED;
4833 else
4834 dequeue_mid(server, mid, rdata->result);
4835 return 0;
4836 }
4837 rdata->got_bytes = buffer_len;
4838
4839 } else if (buf_len >= data_offset + data_len) {
4840 /* read response payload is in buf */
4841 WARN_ONCE(buffer, "read data can be either in buf or in buffer");
4842 copied = copy_to_iter(buf + data_offset, data_len, &rdata->subreq.io_iter);
4843 if (copied == 0)
4844 return smb_EIO2(smb_eio_trace_rx_copy_to_iter, copied, data_len);
4845 rdata->got_bytes = copied;
4846 } else {
4847 /* read response payload cannot be in both buf and pages */
4848 WARN_ONCE(1, "buf can not contain only a part of read data");
4849 rdata->result = smb_EIO(smb_eio_trace_rx_both_buf);
4850 if (is_offloaded)
4851 mid->mid_state = MID_RESPONSE_MALFORMED;
4852 else
4853 dequeue_mid(server, mid, rdata->result);
4854 return 0;
4855 }
4856
4857 if (is_offloaded)
4858 mid->mid_state = MID_RESPONSE_RECEIVED;
4859 else
4860 dequeue_mid(server, mid, false);
4861 return 0;
4862 }
4863
4864 struct smb2_decrypt_work {
4865 struct work_struct decrypt;
4866 struct TCP_Server_Info *server;
4867 struct folio_queue *buffer;
4868 char *buf;
4869 unsigned int len;
4870 };
4871
4872
smb2_decrypt_offload(struct work_struct * work)4873 static void smb2_decrypt_offload(struct work_struct *work)
4874 {
4875 struct smb2_decrypt_work *dw = container_of(work,
4876 struct smb2_decrypt_work, decrypt);
4877 int rc;
4878 struct mid_q_entry *mid;
4879 struct iov_iter iter;
4880
4881 iov_iter_folio_queue(&iter, ITER_DEST, dw->buffer, 0, 0, dw->len);
4882 rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
4883 &iter, true);
4884 if (rc) {
4885 cifs_dbg(VFS, "error decrypting rc=%d\n", rc);
4886 goto free_pages;
4887 }
4888
4889 dw->server->lstrp = jiffies;
4890 mid = smb2_find_dequeue_mid(dw->server, dw->buf);
4891 if (mid == NULL)
4892 cifs_dbg(FYI, "mid not found\n");
4893 else {
4894 mid->decrypted = true;
4895 rc = handle_read_data(dw->server, mid, dw->buf,
4896 dw->server->vals->read_rsp_size,
4897 dw->buffer, dw->len,
4898 true);
4899 if (rc >= 0) {
4900 #ifdef CONFIG_CIFS_STATS2
4901 mid->when_received = jiffies;
4902 #endif
4903 if (dw->server->ops->is_network_name_deleted)
4904 dw->server->ops->is_network_name_deleted(dw->buf,
4905 dw->server);
4906
4907 mid_execute_callback(dw->server, mid);
4908 } else {
4909 spin_lock(&dw->server->srv_lock);
4910 if (dw->server->tcpStatus == CifsNeedReconnect) {
4911 spin_lock(&dw->server->mid_queue_lock);
4912 mid->mid_state = MID_RETRY_NEEDED;
4913 spin_unlock(&dw->server->mid_queue_lock);
4914 spin_unlock(&dw->server->srv_lock);
4915 mid_execute_callback(dw->server, mid);
4916 } else {
4917 spin_lock(&dw->server->mid_queue_lock);
4918 mid->mid_state = MID_REQUEST_SUBMITTED;
4919 mid->deleted_from_q = false;
4920 list_add_tail(&mid->qhead,
4921 &dw->server->pending_mid_q);
4922 spin_unlock(&dw->server->mid_queue_lock);
4923 spin_unlock(&dw->server->srv_lock);
4924 }
4925 }
4926 release_mid(dw->server, mid);
4927 }
4928
4929 free_pages:
4930 netfs_free_folioq_buffer(dw->buffer);
4931 cifs_small_buf_release(dw->buf);
4932 kfree(dw);
4933 }
4934
4935
4936 static int
receive_encrypted_read(struct TCP_Server_Info * server,struct mid_q_entry ** mid,int * num_mids)4937 receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
4938 int *num_mids)
4939 {
4940 char *buf = server->smallbuf;
4941 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
4942 struct iov_iter iter;
4943 unsigned int len;
4944 unsigned int buflen = server->pdu_size;
4945 int rc;
4946 struct smb2_decrypt_work *dw;
4947
4948 dw = kzalloc_obj(struct smb2_decrypt_work);
4949 if (!dw)
4950 return -ENOMEM;
4951 INIT_WORK(&dw->decrypt, smb2_decrypt_offload);
4952 dw->server = server;
4953
4954 *num_mids = 1;
4955 len = min_t(unsigned int, buflen, server->vals->read_rsp_size +
4956 sizeof(struct smb2_transform_hdr)) - HEADER_SIZE(server) + 1;
4957
4958 rc = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, len);
4959 if (rc < 0)
4960 goto free_dw;
4961 server->total_read += rc;
4962
4963 if (le32_to_cpu(tr_hdr->OriginalMessageSize) <
4964 server->vals->read_rsp_size) {
4965 cifs_server_dbg(VFS, "OriginalMessageSize %u too small for read response (%zu)\n",
4966 le32_to_cpu(tr_hdr->OriginalMessageSize),
4967 server->vals->read_rsp_size);
4968 rc = -EINVAL;
4969 goto discard_data;
4970 }
4971 len = le32_to_cpu(tr_hdr->OriginalMessageSize) -
4972 server->vals->read_rsp_size;
4973 dw->len = len;
4974 len = round_up(dw->len, PAGE_SIZE);
4975
4976 size_t cur_size = 0;
4977 rc = netfs_alloc_folioq_buffer(NULL, &dw->buffer, &cur_size, len, GFP_NOFS);
4978 if (rc < 0)
4979 goto discard_data;
4980
4981 iov_iter_folio_queue(&iter, ITER_DEST, dw->buffer, 0, 0, len);
4982
4983 /* Read the data into the buffer and clear excess bufferage. */
4984 rc = cifs_read_iter_from_socket(server, &iter, dw->len);
4985 if (rc < 0)
4986 goto discard_data;
4987
4988 server->total_read += rc;
4989 if (rc < len) {
4990 struct iov_iter tmp = iter;
4991
4992 iov_iter_advance(&tmp, rc);
4993 iov_iter_zero(len - rc, &tmp);
4994 }
4995 iov_iter_truncate(&iter, dw->len);
4996
4997 rc = cifs_discard_remaining_data(server);
4998 if (rc)
4999 goto free_pages;
5000
5001 /*
5002 * For large reads, offload to different thread for better performance,
5003 * use more cores decrypting which can be expensive
5004 */
5005
5006 if ((server->min_offload) && (server->in_flight > 1) &&
5007 (server->pdu_size >= server->min_offload)) {
5008 dw->buf = server->smallbuf;
5009 server->smallbuf = (char *)cifs_small_buf_get();
5010
5011 queue_work(decrypt_wq, &dw->decrypt);
5012 *num_mids = 0; /* worker thread takes care of finding mid */
5013 return -1;
5014 }
5015
5016 rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
5017 &iter, false);
5018 if (rc)
5019 goto free_pages;
5020
5021 *mid = smb2_find_mid(server, buf);
5022 if (*mid == NULL) {
5023 cifs_dbg(FYI, "mid not found\n");
5024 } else {
5025 cifs_dbg(FYI, "mid found\n");
5026 (*mid)->decrypted = true;
5027 rc = handle_read_data(server, *mid, buf,
5028 server->vals->read_rsp_size,
5029 dw->buffer, dw->len, false);
5030 if (rc >= 0) {
5031 if (server->ops->is_network_name_deleted) {
5032 server->ops->is_network_name_deleted(buf,
5033 server);
5034 }
5035 }
5036 }
5037
5038 free_pages:
5039 netfs_free_folioq_buffer(dw->buffer);
5040 free_dw:
5041 kfree(dw);
5042 return rc;
5043 discard_data:
5044 cifs_discard_remaining_data(server);
5045 goto free_pages;
5046 }
5047
5048 static int
receive_encrypted_standard(struct TCP_Server_Info * server,struct mid_q_entry ** mids,char ** bufs,int * num_mids)5049 receive_encrypted_standard(struct TCP_Server_Info *server,
5050 struct mid_q_entry **mids, char **bufs,
5051 int *num_mids)
5052 {
5053 int ret, length;
5054 char *buf = server->smallbuf;
5055 struct smb2_hdr *shdr;
5056 unsigned int pdu_length = server->pdu_size;
5057 unsigned int buf_size;
5058 unsigned int next_cmd;
5059 struct mid_q_entry *mid_entry;
5060 int next_is_large;
5061 char *next_buffer = NULL;
5062
5063 *num_mids = 0;
5064
5065 /* switch to large buffer if too big for a small one */
5066 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) {
5067 server->large_buf = true;
5068 memcpy(server->bigbuf, buf, server->total_read);
5069 buf = server->bigbuf;
5070 }
5071
5072 /* now read the rest */
5073 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
5074 pdu_length - HEADER_SIZE(server) + 1);
5075 if (length < 0)
5076 return length;
5077 server->total_read += length;
5078
5079 buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
5080 length = decrypt_raw_data(server, buf, buf_size, NULL, false);
5081 if (length)
5082 return length;
5083
5084 next_is_large = server->large_buf;
5085 one_more:
5086 shdr = (struct smb2_hdr *)buf;
5087 next_cmd = le32_to_cpu(shdr->NextCommand);
5088 if (next_cmd) {
5089 if (WARN_ON_ONCE(next_cmd > pdu_length))
5090 return -1;
5091 if (next_is_large)
5092 next_buffer = (char *)cifs_buf_get();
5093 else
5094 next_buffer = (char *)cifs_small_buf_get();
5095 if (!next_buffer) {
5096 cifs_server_dbg(VFS, "No memory for (large) SMB response\n");
5097 return -1;
5098 }
5099 memcpy(next_buffer, buf + next_cmd, pdu_length - next_cmd);
5100 }
5101
5102 mid_entry = smb2_find_mid(server, buf);
5103 if (mid_entry == NULL)
5104 cifs_dbg(FYI, "mid not found\n");
5105 else {
5106 cifs_dbg(FYI, "mid found\n");
5107 mid_entry->decrypted = true;
5108 mid_entry->resp_buf_size = server->pdu_size;
5109 }
5110
5111 if (*num_mids >= MAX_COMPOUND) {
5112 cifs_server_dbg(VFS, "too many PDUs in compound\n");
5113 return -1;
5114 }
5115 bufs[*num_mids] = buf;
5116 mids[(*num_mids)++] = mid_entry;
5117
5118 if (mid_entry && mid_entry->handle)
5119 ret = mid_entry->handle(server, mid_entry);
5120 else
5121 ret = cifs_handle_standard(server, mid_entry);
5122
5123 if (ret == 0 && next_cmd) {
5124 pdu_length -= next_cmd;
5125 server->large_buf = next_is_large;
5126 if (next_is_large)
5127 server->bigbuf = buf = next_buffer;
5128 else
5129 server->smallbuf = buf = next_buffer;
5130 goto one_more;
5131 } else if (ret != 0) {
5132 /*
5133 * ret != 0 here means that we didn't get to handle_mid() thus
5134 * server->smallbuf and server->bigbuf are still valid. We need
5135 * to free next_buffer because it is not going to be used
5136 * anywhere.
5137 */
5138 if (next_is_large)
5139 free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
5140 else
5141 free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
5142 }
5143
5144 return ret;
5145 }
5146
5147 static int
smb3_receive_transform(struct TCP_Server_Info * server,struct mid_q_entry ** mids,char ** bufs,int * num_mids)5148 smb3_receive_transform(struct TCP_Server_Info *server,
5149 struct mid_q_entry **mids, char **bufs, int *num_mids)
5150 {
5151 char *buf = server->smallbuf;
5152 unsigned int pdu_length = server->pdu_size;
5153 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
5154 unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
5155
5156 if (pdu_length < sizeof(struct smb2_transform_hdr) +
5157 sizeof(struct smb2_hdr)) {
5158 cifs_server_dbg(VFS, "Transform message is too small (%u)\n",
5159 pdu_length);
5160 cifs_reconnect(server, true);
5161 return -ECONNABORTED;
5162 }
5163
5164 if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) {
5165 cifs_server_dbg(VFS, "Transform message is broken\n");
5166 cifs_reconnect(server, true);
5167 return -ECONNABORTED;
5168 }
5169
5170 /* TODO: add support for compounds containing READ. */
5171 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
5172 return receive_encrypted_read(server, &mids[0], num_mids);
5173 }
5174
5175 return receive_encrypted_standard(server, mids, bufs, num_mids);
5176 }
5177
5178 int
smb3_handle_read_data(struct TCP_Server_Info * server,struct mid_q_entry * mid)5179 smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
5180 {
5181 char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
5182
5183 return handle_read_data(server, mid, buf, server->pdu_size,
5184 NULL, 0, false);
5185 }
5186
smb2_next_header(struct TCP_Server_Info * server,char * buf,unsigned int * noff)5187 static int smb2_next_header(struct TCP_Server_Info *server, char *buf,
5188 unsigned int *noff)
5189 {
5190 struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
5191 struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
5192
5193 if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
5194 *noff = le32_to_cpu(t_hdr->OriginalMessageSize);
5195 if (unlikely(check_add_overflow(*noff, sizeof(*t_hdr), noff)))
5196 return -EINVAL;
5197 } else {
5198 *noff = le32_to_cpu(hdr->NextCommand);
5199 }
5200 if (unlikely(*noff && *noff < MID_HEADER_SIZE(server)))
5201 return -EINVAL;
5202 return 0;
5203 }
5204
__cifs_sfu_make_node(unsigned int xid,struct inode * inode,struct dentry * dentry,struct cifs_tcon * tcon,const char * full_path,umode_t mode,dev_t dev,const char * symname)5205 int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
5206 struct dentry *dentry, struct cifs_tcon *tcon,
5207 const char *full_path, umode_t mode, dev_t dev,
5208 const char *symname)
5209 {
5210 struct TCP_Server_Info *server = tcon->ses->server;
5211 struct cifs_open_parms oparms;
5212 struct cifs_open_info_data idata;
5213 struct cifs_io_parms io_parms = {};
5214 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
5215 struct cifs_fid fid;
5216 unsigned int bytes_written;
5217 u8 type[8];
5218 int type_len = 0;
5219 struct {
5220 __le64 major;
5221 __le64 minor;
5222 } __packed pdev = {};
5223 __le16 *symname_utf16 = NULL;
5224 u8 *data = NULL;
5225 int data_len = 0;
5226 struct kvec iov[3];
5227 __u32 oplock = server->oplocks ? REQ_OPLOCK : 0;
5228 int rc;
5229
5230 switch (mode & S_IFMT) {
5231 case S_IFCHR:
5232 type_len = 8;
5233 memcpy(type, "IntxCHR\0", type_len);
5234 pdev.major = cpu_to_le64(MAJOR(dev));
5235 pdev.minor = cpu_to_le64(MINOR(dev));
5236 data = (u8 *)&pdev;
5237 data_len = sizeof(pdev);
5238 break;
5239 case S_IFBLK:
5240 type_len = 8;
5241 memcpy(type, "IntxBLK\0", type_len);
5242 pdev.major = cpu_to_le64(MAJOR(dev));
5243 pdev.minor = cpu_to_le64(MINOR(dev));
5244 data = (u8 *)&pdev;
5245 data_len = sizeof(pdev);
5246 break;
5247 case S_IFLNK:
5248 type_len = 8;
5249 memcpy(type, "IntxLNK\1", type_len);
5250 symname_utf16 = cifs_strndup_to_utf16(symname, strlen(symname),
5251 &data_len, cifs_sb->local_nls,
5252 NO_MAP_UNI_RSVD);
5253 if (!symname_utf16) {
5254 rc = -ENOMEM;
5255 goto out;
5256 }
5257 data_len -= 2; /* symlink is without trailing wide-nul */
5258 data = (u8 *)symname_utf16;
5259 break;
5260 case S_IFSOCK:
5261 type_len = 8;
5262 strscpy(type, "LnxSOCK");
5263 data = (u8 *)&pdev;
5264 data_len = sizeof(pdev);
5265 break;
5266 case S_IFIFO:
5267 type_len = 8;
5268 strscpy(type, "LnxFIFO");
5269 data = (u8 *)&pdev;
5270 data_len = sizeof(pdev);
5271 break;
5272 default:
5273 rc = -EPERM;
5274 goto out;
5275 }
5276
5277 oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, GENERIC_WRITE,
5278 FILE_CREATE, CREATE_NOT_DIR |
5279 CREATE_OPTION_SPECIAL, ACL_NO_MODE);
5280 oparms.fid = &fid;
5281 idata.contains_posix_file_info = false;
5282 rc = server->ops->open(xid, &oparms, &oplock, &idata);
5283 if (rc)
5284 goto out;
5285
5286 /*
5287 * Check if the server honored ATTR_SYSTEM flag by CREATE_OPTION_SPECIAL
5288 * option. If not then server does not support ATTR_SYSTEM and newly
5289 * created file is not SFU compatible, which means that the call failed.
5290 */
5291 if (!(le32_to_cpu(idata.fi.Attributes) & ATTR_SYSTEM)) {
5292 rc = -EOPNOTSUPP;
5293 goto out_close;
5294 }
5295
5296 if (type_len + data_len > 0) {
5297 io_parms.pid = current->tgid;
5298 io_parms.tcon = tcon;
5299 io_parms.length = type_len + data_len;
5300 iov[1].iov_base = type;
5301 iov[1].iov_len = type_len;
5302 iov[2].iov_base = data;
5303 iov[2].iov_len = data_len;
5304
5305 rc = server->ops->sync_write(xid, &fid, &io_parms,
5306 &bytes_written,
5307 iov, ARRAY_SIZE(iov)-1);
5308 }
5309
5310 out_close:
5311 server->ops->close(xid, tcon, &fid);
5312
5313 /*
5314 * If CREATE was successful but either setting ATTR_SYSTEM failed or
5315 * writing type/data information failed then remove the intermediate
5316 * object created by CREATE. Otherwise intermediate empty object stay
5317 * on the server.
5318 */
5319 if (rc)
5320 server->ops->unlink(xid, tcon, full_path, cifs_sb, NULL);
5321
5322 out:
5323 kfree(symname_utf16);
5324 return rc;
5325 }
5326
cifs_sfu_make_node(unsigned int xid,struct inode * inode,struct dentry * dentry,struct cifs_tcon * tcon,const char * full_path,umode_t mode,dev_t dev)5327 int cifs_sfu_make_node(unsigned int xid, struct inode *inode,
5328 struct dentry *dentry, struct cifs_tcon *tcon,
5329 const char *full_path, umode_t mode, dev_t dev)
5330 {
5331 struct inode *new = NULL;
5332 int rc;
5333
5334 rc = __cifs_sfu_make_node(xid, inode, dentry, tcon,
5335 full_path, mode, dev, NULL);
5336 if (rc)
5337 return rc;
5338
5339 if (tcon->posix_extensions) {
5340 rc = smb311_posix_get_inode_info(&new, full_path, NULL,
5341 inode->i_sb, xid);
5342 } else if (tcon->unix_ext) {
5343 rc = cifs_get_inode_info_unix(&new, full_path,
5344 inode->i_sb, xid);
5345 } else {
5346 rc = cifs_get_inode_info(&new, full_path, NULL,
5347 inode->i_sb, xid, NULL);
5348 }
5349 if (!rc)
5350 d_instantiate(dentry, new);
5351 return rc;
5352 }
5353
smb2_make_node(unsigned int xid,struct inode * inode,struct dentry * dentry,struct cifs_tcon * tcon,const char * full_path,umode_t mode,dev_t dev)5354 static int smb2_make_node(unsigned int xid, struct inode *inode,
5355 struct dentry *dentry, struct cifs_tcon *tcon,
5356 const char *full_path, umode_t mode, dev_t dev)
5357 {
5358 unsigned int sbflags = cifs_sb_flags(CIFS_SB(inode));
5359 int rc = -EOPNOTSUPP;
5360
5361 /*
5362 * Check if mounted with mount parm 'sfu' mount parm.
5363 * SFU emulation should work with all servers, but only
5364 * supports block and char device, socket & fifo,
5365 * and was used by default in earlier versions of Windows
5366 */
5367 if (sbflags & CIFS_MOUNT_UNX_EMUL) {
5368 rc = cifs_sfu_make_node(xid, inode, dentry, tcon,
5369 full_path, mode, dev);
5370 } else if (CIFS_REPARSE_SUPPORT(tcon)) {
5371 rc = mknod_reparse(xid, inode, dentry, tcon,
5372 full_path, mode, dev);
5373 }
5374 return rc;
5375 }
5376
5377 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
5378 struct smb_version_operations smb20_operations = {
5379 .compare_fids = smb2_compare_fids,
5380 .setup_request = smb2_setup_request,
5381 .setup_async_request = smb2_setup_async_request,
5382 .check_receive = smb2_check_receive,
5383 .add_credits = smb2_add_credits,
5384 .set_credits = smb2_set_credits,
5385 .get_credits_field = smb2_get_credits_field,
5386 .get_credits = smb2_get_credits,
5387 .wait_mtu_credits = cifs_wait_mtu_credits,
5388 .get_next_mid = smb2_get_next_mid,
5389 .revert_current_mid = smb2_revert_current_mid,
5390 .read_data_offset = smb2_read_data_offset,
5391 .read_data_length = smb2_read_data_length,
5392 .map_error = map_smb2_to_linux_error,
5393 .find_mid = smb2_find_mid,
5394 .check_message = smb2_check_message,
5395 .dump_detail = smb2_dump_detail,
5396 .clear_stats = smb2_clear_stats,
5397 .print_stats = smb2_print_stats,
5398 .is_oplock_break = smb2_is_valid_oplock_break,
5399 .handle_cancelled_mid = smb2_handle_cancelled_mid,
5400 .downgrade_oplock = smb2_downgrade_oplock,
5401 .need_neg = smb2_need_neg,
5402 .negotiate = smb2_negotiate,
5403 .negotiate_wsize = smb2_negotiate_wsize,
5404 .negotiate_rsize = smb2_negotiate_rsize,
5405 .sess_setup = SMB2_sess_setup,
5406 .logoff = SMB2_logoff,
5407 .tree_connect = SMB2_tcon,
5408 .tree_disconnect = SMB2_tdis,
5409 .qfs_tcon = smb2_qfs_tcon,
5410 .is_path_accessible = smb2_is_path_accessible,
5411 .can_echo = smb2_can_echo,
5412 .echo = SMB2_echo,
5413 .query_path_info = smb2_query_path_info,
5414 .query_reparse_point = smb2_query_reparse_point,
5415 .get_srv_inum = smb2_get_srv_inum,
5416 .query_file_info = smb2_query_file_info,
5417 .set_path_size = smb2_set_path_size,
5418 .set_file_size = smb2_set_file_size,
5419 .set_file_info = smb2_set_file_info,
5420 .set_compression = smb2_set_compression,
5421 .mkdir = smb2_mkdir,
5422 .mkdir_setinfo = smb2_mkdir_setinfo,
5423 .rmdir = smb2_rmdir,
5424 .unlink = smb2_unlink,
5425 .rename = smb2_rename_path,
5426 .create_hardlink = smb2_create_hardlink,
5427 .get_reparse_point_buffer = smb2_get_reparse_point_buffer,
5428 .query_mf_symlink = smb3_query_mf_symlink,
5429 .create_mf_symlink = smb3_create_mf_symlink,
5430 .create_reparse_inode = smb2_create_reparse_inode,
5431 .open = smb2_open_file,
5432 .set_fid = smb2_set_fid,
5433 .close = smb2_close_file,
5434 .flush = smb2_flush_file,
5435 .async_readv = smb2_async_readv,
5436 .async_writev = smb2_async_writev,
5437 .sync_read = smb2_sync_read,
5438 .sync_write = smb2_sync_write,
5439 .query_dir_first = smb2_query_dir_first,
5440 .query_dir_next = smb2_query_dir_next,
5441 .close_dir = smb2_close_dir,
5442 .calc_smb_size = smb2_calc_size,
5443 .is_status_pending = smb2_is_status_pending,
5444 .is_session_expired = smb2_is_session_expired,
5445 .oplock_response = smb2_oplock_response,
5446 .queryfs = smb2_queryfs,
5447 .mand_lock = smb2_mand_lock,
5448 .mand_unlock_range = smb2_unlock_range,
5449 .push_mand_locks = smb2_push_mandatory_locks,
5450 .get_lease_key = smb2_get_lease_key,
5451 .set_lease_key = smb2_set_lease_key,
5452 .new_lease_key = smb2_new_lease_key,
5453 .is_read_op = smb2_is_read_op,
5454 .set_oplock_level = smb2_set_oplock_level,
5455 .create_lease_buf = smb2_create_lease_buf,
5456 .parse_lease_buf = smb2_parse_lease_buf,
5457 .copychunk_range = smb2_copychunk_range,
5458 .wp_retry_size = smb2_wp_retry_size,
5459 .dir_needs_close = smb2_dir_needs_close,
5460 .get_dfs_refer = smb2_get_dfs_refer,
5461 .select_sectype = smb2_select_sectype,
5462 #ifdef CONFIG_CIFS_XATTR
5463 .query_all_EAs = smb2_query_eas,
5464 .set_EA = smb2_set_ea,
5465 #endif /* CIFS_XATTR */
5466 .get_acl = get_smb2_acl,
5467 .get_acl_by_fid = get_smb2_acl_by_fid,
5468 .set_acl = set_smb2_acl,
5469 .next_header = smb2_next_header,
5470 .ioctl_query_info = smb2_ioctl_query_info,
5471 .make_node = smb2_make_node,
5472 .fiemap = smb3_fiemap,
5473 .llseek = smb3_llseek,
5474 .is_status_io_timeout = smb2_is_status_io_timeout,
5475 .is_network_name_deleted = smb2_is_network_name_deleted,
5476 .rename_pending_delete = smb2_rename_pending_delete,
5477 };
5478 #endif /* CIFS_ALLOW_INSECURE_LEGACY */
5479
5480 struct smb_version_operations smb21_operations = {
5481 .compare_fids = smb2_compare_fids,
5482 .setup_request = smb2_setup_request,
5483 .setup_async_request = smb2_setup_async_request,
5484 .check_receive = smb2_check_receive,
5485 .add_credits = smb2_add_credits,
5486 .set_credits = smb2_set_credits,
5487 .get_credits_field = smb2_get_credits_field,
5488 .get_credits = smb2_get_credits,
5489 .wait_mtu_credits = smb2_wait_mtu_credits,
5490 .adjust_credits = smb2_adjust_credits,
5491 .get_next_mid = smb2_get_next_mid,
5492 .revert_current_mid = smb2_revert_current_mid,
5493 .read_data_offset = smb2_read_data_offset,
5494 .read_data_length = smb2_read_data_length,
5495 .map_error = map_smb2_to_linux_error,
5496 .find_mid = smb2_find_mid,
5497 .check_message = smb2_check_message,
5498 .dump_detail = smb2_dump_detail,
5499 .clear_stats = smb2_clear_stats,
5500 .print_stats = smb2_print_stats,
5501 .is_oplock_break = smb2_is_valid_oplock_break,
5502 .handle_cancelled_mid = smb2_handle_cancelled_mid,
5503 .downgrade_oplock = smb2_downgrade_oplock,
5504 .need_neg = smb2_need_neg,
5505 .negotiate = smb2_negotiate,
5506 .negotiate_wsize = smb2_negotiate_wsize,
5507 .negotiate_rsize = smb2_negotiate_rsize,
5508 .sess_setup = SMB2_sess_setup,
5509 .logoff = SMB2_logoff,
5510 .tree_connect = SMB2_tcon,
5511 .tree_disconnect = SMB2_tdis,
5512 .qfs_tcon = smb2_qfs_tcon,
5513 .is_path_accessible = smb2_is_path_accessible,
5514 .can_echo = smb2_can_echo,
5515 .echo = SMB2_echo,
5516 .query_path_info = smb2_query_path_info,
5517 .query_reparse_point = smb2_query_reparse_point,
5518 .get_srv_inum = smb2_get_srv_inum,
5519 .query_file_info = smb2_query_file_info,
5520 .set_path_size = smb2_set_path_size,
5521 .set_file_size = smb2_set_file_size,
5522 .set_file_info = smb2_set_file_info,
5523 .set_compression = smb2_set_compression,
5524 .mkdir = smb2_mkdir,
5525 .mkdir_setinfo = smb2_mkdir_setinfo,
5526 .rmdir = smb2_rmdir,
5527 .unlink = smb2_unlink,
5528 .rename = smb2_rename_path,
5529 .create_hardlink = smb2_create_hardlink,
5530 .get_reparse_point_buffer = smb2_get_reparse_point_buffer,
5531 .query_mf_symlink = smb3_query_mf_symlink,
5532 .create_mf_symlink = smb3_create_mf_symlink,
5533 .create_reparse_inode = smb2_create_reparse_inode,
5534 .open = smb2_open_file,
5535 .set_fid = smb2_set_fid,
5536 .close = smb2_close_file,
5537 .flush = smb2_flush_file,
5538 .async_readv = smb2_async_readv,
5539 .async_writev = smb2_async_writev,
5540 .sync_read = smb2_sync_read,
5541 .sync_write = smb2_sync_write,
5542 .query_dir_first = smb2_query_dir_first,
5543 .query_dir_next = smb2_query_dir_next,
5544 .close_dir = smb2_close_dir,
5545 .calc_smb_size = smb2_calc_size,
5546 .is_status_pending = smb2_is_status_pending,
5547 .is_session_expired = smb2_is_session_expired,
5548 .oplock_response = smb2_oplock_response,
5549 .queryfs = smb2_queryfs,
5550 .mand_lock = smb2_mand_lock,
5551 .mand_unlock_range = smb2_unlock_range,
5552 .push_mand_locks = smb2_push_mandatory_locks,
5553 .get_lease_key = smb2_get_lease_key,
5554 .set_lease_key = smb2_set_lease_key,
5555 .new_lease_key = smb2_new_lease_key,
5556 .is_read_op = smb21_is_read_op,
5557 .set_oplock_level = smb21_set_oplock_level,
5558 .create_lease_buf = smb2_create_lease_buf,
5559 .parse_lease_buf = smb2_parse_lease_buf,
5560 .copychunk_range = smb2_copychunk_range,
5561 .wp_retry_size = smb2_wp_retry_size,
5562 .dir_needs_close = smb2_dir_needs_close,
5563 .enum_snapshots = smb3_enum_snapshots,
5564 .notify = smb3_notify,
5565 .get_dfs_refer = smb2_get_dfs_refer,
5566 .select_sectype = smb2_select_sectype,
5567 #ifdef CONFIG_CIFS_XATTR
5568 .query_all_EAs = smb2_query_eas,
5569 .set_EA = smb2_set_ea,
5570 #endif /* CIFS_XATTR */
5571 .get_acl = get_smb2_acl,
5572 .get_acl_by_fid = get_smb2_acl_by_fid,
5573 .set_acl = set_smb2_acl,
5574 .next_header = smb2_next_header,
5575 .ioctl_query_info = smb2_ioctl_query_info,
5576 .make_node = smb2_make_node,
5577 .fiemap = smb3_fiemap,
5578 .llseek = smb3_llseek,
5579 .is_status_io_timeout = smb2_is_status_io_timeout,
5580 .is_network_name_deleted = smb2_is_network_name_deleted,
5581 .rename_pending_delete = smb2_rename_pending_delete,
5582 };
5583
5584 struct smb_version_operations smb30_operations = {
5585 .compare_fids = smb2_compare_fids,
5586 .setup_request = smb2_setup_request,
5587 .setup_async_request = smb2_setup_async_request,
5588 .check_receive = smb2_check_receive,
5589 .add_credits = smb2_add_credits,
5590 .set_credits = smb2_set_credits,
5591 .get_credits_field = smb2_get_credits_field,
5592 .get_credits = smb2_get_credits,
5593 .wait_mtu_credits = smb2_wait_mtu_credits,
5594 .adjust_credits = smb2_adjust_credits,
5595 .get_next_mid = smb2_get_next_mid,
5596 .revert_current_mid = smb2_revert_current_mid,
5597 .read_data_offset = smb2_read_data_offset,
5598 .read_data_length = smb2_read_data_length,
5599 .map_error = map_smb2_to_linux_error,
5600 .find_mid = smb2_find_mid,
5601 .check_message = smb2_check_message,
5602 .dump_detail = smb2_dump_detail,
5603 .clear_stats = smb2_clear_stats,
5604 .print_stats = smb2_print_stats,
5605 .dump_share_caps = smb2_dump_share_caps,
5606 .is_oplock_break = smb2_is_valid_oplock_break,
5607 .handle_cancelled_mid = smb2_handle_cancelled_mid,
5608 .downgrade_oplock = smb3_downgrade_oplock,
5609 .need_neg = smb2_need_neg,
5610 .negotiate = smb2_negotiate,
5611 .negotiate_wsize = smb3_negotiate_wsize,
5612 .negotiate_rsize = smb3_negotiate_rsize,
5613 .sess_setup = SMB2_sess_setup,
5614 .logoff = SMB2_logoff,
5615 .tree_connect = SMB2_tcon,
5616 .tree_disconnect = SMB2_tdis,
5617 .qfs_tcon = smb3_qfs_tcon,
5618 .query_server_interfaces = SMB3_request_interfaces,
5619 .is_path_accessible = smb2_is_path_accessible,
5620 .can_echo = smb2_can_echo,
5621 .echo = SMB2_echo,
5622 .query_path_info = smb2_query_path_info,
5623 /* WSL tags introduced long after smb2.1, enable for SMB3, 3.11 only */
5624 .query_reparse_point = smb2_query_reparse_point,
5625 .get_srv_inum = smb2_get_srv_inum,
5626 .query_file_info = smb2_query_file_info,
5627 .set_path_size = smb2_set_path_size,
5628 .set_file_size = smb2_set_file_size,
5629 .set_file_info = smb2_set_file_info,
5630 .set_compression = smb2_set_compression,
5631 .mkdir = smb2_mkdir,
5632 .mkdir_setinfo = smb2_mkdir_setinfo,
5633 .rmdir = smb2_rmdir,
5634 .unlink = smb2_unlink,
5635 .rename = smb2_rename_path,
5636 .create_hardlink = smb2_create_hardlink,
5637 .get_reparse_point_buffer = smb2_get_reparse_point_buffer,
5638 .query_mf_symlink = smb3_query_mf_symlink,
5639 .create_mf_symlink = smb3_create_mf_symlink,
5640 .create_reparse_inode = smb2_create_reparse_inode,
5641 .open = smb2_open_file,
5642 .set_fid = smb2_set_fid,
5643 .close = smb2_close_file,
5644 .close_getattr = smb2_close_getattr,
5645 .flush = smb2_flush_file,
5646 .async_readv = smb2_async_readv,
5647 .async_writev = smb2_async_writev,
5648 .sync_read = smb2_sync_read,
5649 .sync_write = smb2_sync_write,
5650 .query_dir_first = smb2_query_dir_first,
5651 .query_dir_next = smb2_query_dir_next,
5652 .close_dir = smb2_close_dir,
5653 .calc_smb_size = smb2_calc_size,
5654 .is_status_pending = smb2_is_status_pending,
5655 .is_session_expired = smb2_is_session_expired,
5656 .oplock_response = smb2_oplock_response,
5657 .queryfs = smb2_queryfs,
5658 .mand_lock = smb2_mand_lock,
5659 .mand_unlock_range = smb2_unlock_range,
5660 .push_mand_locks = smb2_push_mandatory_locks,
5661 .get_lease_key = smb2_get_lease_key,
5662 .set_lease_key = smb2_set_lease_key,
5663 .new_lease_key = smb2_new_lease_key,
5664 .generate_signingkey = generate_smb30signingkey,
5665 .set_integrity = smb3_set_integrity,
5666 .is_read_op = smb21_is_read_op,
5667 .set_oplock_level = smb3_set_oplock_level,
5668 .create_lease_buf = smb3_create_lease_buf,
5669 .parse_lease_buf = smb3_parse_lease_buf,
5670 .copychunk_range = smb2_copychunk_range,
5671 .duplicate_extents = smb2_duplicate_extents,
5672 .validate_negotiate = smb3_validate_negotiate,
5673 .wp_retry_size = smb2_wp_retry_size,
5674 .dir_needs_close = smb2_dir_needs_close,
5675 .fallocate = smb3_fallocate,
5676 .enum_snapshots = smb3_enum_snapshots,
5677 .notify = smb3_notify,
5678 .init_transform_rq = smb3_init_transform_rq,
5679 .is_transform_hdr = smb3_is_transform_hdr,
5680 .receive_transform = smb3_receive_transform,
5681 .get_dfs_refer = smb2_get_dfs_refer,
5682 .select_sectype = smb2_select_sectype,
5683 #ifdef CONFIG_CIFS_XATTR
5684 .query_all_EAs = smb2_query_eas,
5685 .set_EA = smb2_set_ea,
5686 #endif /* CIFS_XATTR */
5687 .get_acl = get_smb2_acl,
5688 .get_acl_by_fid = get_smb2_acl_by_fid,
5689 .set_acl = set_smb2_acl,
5690 .next_header = smb2_next_header,
5691 .ioctl_query_info = smb2_ioctl_query_info,
5692 .make_node = smb2_make_node,
5693 .fiemap = smb3_fiemap,
5694 .llseek = smb3_llseek,
5695 .is_status_io_timeout = smb2_is_status_io_timeout,
5696 .is_network_name_deleted = smb2_is_network_name_deleted,
5697 .rename_pending_delete = smb2_rename_pending_delete,
5698 };
5699
5700 struct smb_version_operations smb311_operations = {
5701 .compare_fids = smb2_compare_fids,
5702 .setup_request = smb2_setup_request,
5703 .setup_async_request = smb2_setup_async_request,
5704 .check_receive = smb2_check_receive,
5705 .add_credits = smb2_add_credits,
5706 .set_credits = smb2_set_credits,
5707 .get_credits_field = smb2_get_credits_field,
5708 .get_credits = smb2_get_credits,
5709 .wait_mtu_credits = smb2_wait_mtu_credits,
5710 .adjust_credits = smb2_adjust_credits,
5711 .get_next_mid = smb2_get_next_mid,
5712 .revert_current_mid = smb2_revert_current_mid,
5713 .read_data_offset = smb2_read_data_offset,
5714 .read_data_length = smb2_read_data_length,
5715 .map_error = map_smb2_to_linux_error,
5716 .find_mid = smb2_find_mid,
5717 .check_message = smb2_check_message,
5718 .dump_detail = smb2_dump_detail,
5719 .clear_stats = smb2_clear_stats,
5720 .print_stats = smb2_print_stats,
5721 .dump_share_caps = smb2_dump_share_caps,
5722 .is_oplock_break = smb2_is_valid_oplock_break,
5723 .handle_cancelled_mid = smb2_handle_cancelled_mid,
5724 .downgrade_oplock = smb3_downgrade_oplock,
5725 .need_neg = smb2_need_neg,
5726 .negotiate = smb2_negotiate,
5727 .negotiate_wsize = smb3_negotiate_wsize,
5728 .negotiate_rsize = smb3_negotiate_rsize,
5729 .sess_setup = SMB2_sess_setup,
5730 .logoff = SMB2_logoff,
5731 .tree_connect = SMB2_tcon,
5732 .tree_disconnect = SMB2_tdis,
5733 .qfs_tcon = smb3_qfs_tcon,
5734 .query_server_interfaces = SMB3_request_interfaces,
5735 .is_path_accessible = smb2_is_path_accessible,
5736 .can_echo = smb2_can_echo,
5737 .echo = SMB2_echo,
5738 .query_path_info = smb2_query_path_info,
5739 .query_reparse_point = smb2_query_reparse_point,
5740 .get_srv_inum = smb2_get_srv_inum,
5741 .query_file_info = smb2_query_file_info,
5742 .set_path_size = smb2_set_path_size,
5743 .set_file_size = smb2_set_file_size,
5744 .set_file_info = smb2_set_file_info,
5745 .set_compression = smb2_set_compression,
5746 .mkdir = smb2_mkdir,
5747 .mkdir_setinfo = smb2_mkdir_setinfo,
5748 .posix_mkdir = smb311_posix_mkdir,
5749 .rmdir = smb2_rmdir,
5750 .unlink = smb2_unlink,
5751 .rename = smb2_rename_path,
5752 .create_hardlink = smb2_create_hardlink,
5753 .get_reparse_point_buffer = smb2_get_reparse_point_buffer,
5754 .query_mf_symlink = smb3_query_mf_symlink,
5755 .create_mf_symlink = smb3_create_mf_symlink,
5756 .create_reparse_inode = smb2_create_reparse_inode,
5757 .open = smb2_open_file,
5758 .set_fid = smb2_set_fid,
5759 .close = smb2_close_file,
5760 .close_getattr = smb2_close_getattr,
5761 .flush = smb2_flush_file,
5762 .async_readv = smb2_async_readv,
5763 .async_writev = smb2_async_writev,
5764 .sync_read = smb2_sync_read,
5765 .sync_write = smb2_sync_write,
5766 .query_dir_first = smb2_query_dir_first,
5767 .query_dir_next = smb2_query_dir_next,
5768 .close_dir = smb2_close_dir,
5769 .calc_smb_size = smb2_calc_size,
5770 .is_status_pending = smb2_is_status_pending,
5771 .is_session_expired = smb2_is_session_expired,
5772 .oplock_response = smb2_oplock_response,
5773 .queryfs = smb311_queryfs,
5774 .mand_lock = smb2_mand_lock,
5775 .mand_unlock_range = smb2_unlock_range,
5776 .push_mand_locks = smb2_push_mandatory_locks,
5777 .get_lease_key = smb2_get_lease_key,
5778 .set_lease_key = smb2_set_lease_key,
5779 .new_lease_key = smb2_new_lease_key,
5780 .generate_signingkey = generate_smb311signingkey,
5781 .set_integrity = smb3_set_integrity,
5782 .is_read_op = smb21_is_read_op,
5783 .set_oplock_level = smb3_set_oplock_level,
5784 .create_lease_buf = smb3_create_lease_buf,
5785 .parse_lease_buf = smb3_parse_lease_buf,
5786 .copychunk_range = smb2_copychunk_range,
5787 .duplicate_extents = smb2_duplicate_extents,
5788 /* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
5789 .wp_retry_size = smb2_wp_retry_size,
5790 .dir_needs_close = smb2_dir_needs_close,
5791 .fallocate = smb3_fallocate,
5792 .enum_snapshots = smb3_enum_snapshots,
5793 .notify = smb3_notify,
5794 .init_transform_rq = smb3_init_transform_rq,
5795 .is_transform_hdr = smb3_is_transform_hdr,
5796 .receive_transform = smb3_receive_transform,
5797 .get_dfs_refer = smb2_get_dfs_refer,
5798 .select_sectype = smb2_select_sectype,
5799 #ifdef CONFIG_CIFS_XATTR
5800 .query_all_EAs = smb2_query_eas,
5801 .set_EA = smb2_set_ea,
5802 #endif /* CIFS_XATTR */
5803 .get_acl = get_smb2_acl,
5804 .get_acl_by_fid = get_smb2_acl_by_fid,
5805 .set_acl = set_smb2_acl,
5806 .next_header = smb2_next_header,
5807 .ioctl_query_info = smb2_ioctl_query_info,
5808 .make_node = smb2_make_node,
5809 .fiemap = smb3_fiemap,
5810 .llseek = smb3_llseek,
5811 .is_status_io_timeout = smb2_is_status_io_timeout,
5812 .is_network_name_deleted = smb2_is_network_name_deleted,
5813 .rename_pending_delete = smb2_rename_pending_delete,
5814 };
5815
5816 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
5817 struct smb_version_values smb20_values = {
5818 .version_string = SMB20_VERSION_STRING,
5819 .protocol_id = SMB20_PROT_ID,
5820 .req_capabilities = 0, /* MBZ */
5821 .large_lock_type = 0,
5822 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
5823 .shared_lock_type = SMB2_LOCKFLAG_SHARED,
5824 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5825 .header_size = sizeof(struct smb2_hdr),
5826 .max_header_size = MAX_SMB2_HDR_SIZE,
5827 .read_rsp_size = sizeof(struct smb2_read_rsp),
5828 .lock_cmd = SMB2_LOCK,
5829 .cap_unix = 0,
5830 .cap_nt_find = SMB2_NT_FIND,
5831 .cap_large_files = SMB2_LARGE_FILES,
5832 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5833 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5834 .create_lease_size = sizeof(struct create_lease),
5835 };
5836 #endif /* ALLOW_INSECURE_LEGACY */
5837
5838 struct smb_version_values smb21_values = {
5839 .version_string = SMB21_VERSION_STRING,
5840 .protocol_id = SMB21_PROT_ID,
5841 .req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */
5842 .large_lock_type = 0,
5843 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
5844 .shared_lock_type = SMB2_LOCKFLAG_SHARED,
5845 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5846 .header_size = sizeof(struct smb2_hdr),
5847 .max_header_size = MAX_SMB2_HDR_SIZE,
5848 .read_rsp_size = sizeof(struct smb2_read_rsp),
5849 .lock_cmd = SMB2_LOCK,
5850 .cap_unix = 0,
5851 .cap_nt_find = SMB2_NT_FIND,
5852 .cap_large_files = SMB2_LARGE_FILES,
5853 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5854 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5855 .create_lease_size = sizeof(struct create_lease),
5856 };
5857
5858 struct smb_version_values smb3any_values = {
5859 .version_string = SMB3ANY_VERSION_STRING,
5860 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
5861 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
5862 .large_lock_type = 0,
5863 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
5864 .shared_lock_type = SMB2_LOCKFLAG_SHARED,
5865 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5866 .header_size = sizeof(struct smb2_hdr),
5867 .max_header_size = MAX_SMB2_HDR_SIZE,
5868 .read_rsp_size = sizeof(struct smb2_read_rsp),
5869 .lock_cmd = SMB2_LOCK,
5870 .cap_unix = 0,
5871 .cap_nt_find = SMB2_NT_FIND,
5872 .cap_large_files = SMB2_LARGE_FILES,
5873 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5874 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5875 .create_lease_size = sizeof(struct create_lease_v2),
5876 };
5877
5878 struct smb_version_values smbdefault_values = {
5879 .version_string = SMBDEFAULT_VERSION_STRING,
5880 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
5881 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
5882 .large_lock_type = 0,
5883 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
5884 .shared_lock_type = SMB2_LOCKFLAG_SHARED,
5885 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5886 .header_size = sizeof(struct smb2_hdr),
5887 .max_header_size = MAX_SMB2_HDR_SIZE,
5888 .read_rsp_size = sizeof(struct smb2_read_rsp),
5889 .lock_cmd = SMB2_LOCK,
5890 .cap_unix = 0,
5891 .cap_nt_find = SMB2_NT_FIND,
5892 .cap_large_files = SMB2_LARGE_FILES,
5893 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5894 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5895 .create_lease_size = sizeof(struct create_lease_v2),
5896 };
5897
5898 struct smb_version_values smb30_values = {
5899 .version_string = SMB30_VERSION_STRING,
5900 .protocol_id = SMB30_PROT_ID,
5901 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
5902 .large_lock_type = 0,
5903 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
5904 .shared_lock_type = SMB2_LOCKFLAG_SHARED,
5905 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5906 .header_size = sizeof(struct smb2_hdr),
5907 .max_header_size = MAX_SMB2_HDR_SIZE,
5908 .read_rsp_size = sizeof(struct smb2_read_rsp),
5909 .lock_cmd = SMB2_LOCK,
5910 .cap_unix = 0,
5911 .cap_nt_find = SMB2_NT_FIND,
5912 .cap_large_files = SMB2_LARGE_FILES,
5913 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5914 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5915 .create_lease_size = sizeof(struct create_lease_v2),
5916 };
5917
5918 struct smb_version_values smb302_values = {
5919 .version_string = SMB302_VERSION_STRING,
5920 .protocol_id = SMB302_PROT_ID,
5921 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
5922 .large_lock_type = 0,
5923 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
5924 .shared_lock_type = SMB2_LOCKFLAG_SHARED,
5925 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5926 .header_size = sizeof(struct smb2_hdr),
5927 .max_header_size = MAX_SMB2_HDR_SIZE,
5928 .read_rsp_size = sizeof(struct smb2_read_rsp),
5929 .lock_cmd = SMB2_LOCK,
5930 .cap_unix = 0,
5931 .cap_nt_find = SMB2_NT_FIND,
5932 .cap_large_files = SMB2_LARGE_FILES,
5933 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5934 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5935 .create_lease_size = sizeof(struct create_lease_v2),
5936 };
5937
5938 struct smb_version_values smb311_values = {
5939 .version_string = SMB311_VERSION_STRING,
5940 .protocol_id = SMB311_PROT_ID,
5941 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
5942 .large_lock_type = 0,
5943 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
5944 .shared_lock_type = SMB2_LOCKFLAG_SHARED,
5945 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5946 .header_size = sizeof(struct smb2_hdr),
5947 .max_header_size = MAX_SMB2_HDR_SIZE,
5948 .read_rsp_size = sizeof(struct smb2_read_rsp),
5949 .lock_cmd = SMB2_LOCK,
5950 .cap_unix = 0,
5951 .cap_nt_find = SMB2_NT_FIND,
5952 .cap_large_files = SMB2_LARGE_FILES,
5953 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5954 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5955 .create_lease_size = sizeof(struct create_lease_v2),
5956 };
5957