1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3 *
4 * Copyright (C) International Business Machines Corp., 2009, 2013
5 * Etersoft, 2012
6 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Pavel Shilovsky (pshilovsky@samba.org) 2012
8 *
9 * Contains the routines for constructing the SMB2 PDUs themselves
10 *
11 */
12
13 /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
14 /* Note that there are handle based routines which must be */
15 /* treated slightly differently for reconnection purposes since we never */
16 /* want to reuse a stale file handle and only the caller knows the file info */
17
18 #include <linux/fs.h>
19 #include <linux/kernel.h>
20 #include <linux/vfs.h>
21 #include <linux/task_io_accounting_ops.h>
22 #include <linux/uaccess.h>
23 #include <linux/uuid.h>
24 #include <linux/pagemap.h>
25 #include <linux/xattr.h>
26 #include <linux/netfs.h>
27 #include <trace/events/netfs.h>
28 #include "cifsglob.h"
29 #include "cifsproto.h"
30 #include "cifsacl.h"
31 #include "smb2proto.h"
32 #include "cifs_unicode.h"
33 #include "cifs_debug.h"
34 #include "ntlmssp.h"
35 #include "../common/smb2status.h"
36 #include "smb2glob.h"
37 #include "cifspdu.h"
38 #include "cifs_spnego.h"
39 #include "../common/smbdirect/smbdirect.h"
40 #include "smbdirect.h"
41 #include "trace.h"
42 #ifdef CONFIG_CIFS_DFS_UPCALL
43 #include "dfs_cache.h"
44 #endif
45 #include "cached_dir.h"
46 #include "compress.h"
47 #include "fs_context.h"
48
49 /*
50 * The following table defines the expected "StructureSize" of SMB2 requests
51 * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests.
52 *
53 * Note that commands are defined in smb2pdu.h in le16 but the array below is
54 * indexed by command in host byte order.
55 */
56 static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
57 /* SMB2_NEGOTIATE */ 36,
58 /* SMB2_SESSION_SETUP */ 25,
59 /* SMB2_LOGOFF */ 4,
60 /* SMB2_TREE_CONNECT */ 9,
61 /* SMB2_TREE_DISCONNECT */ 4,
62 /* SMB2_CREATE */ 57,
63 /* SMB2_CLOSE */ 24,
64 /* SMB2_FLUSH */ 24,
65 /* SMB2_READ */ 49,
66 /* SMB2_WRITE */ 49,
67 /* SMB2_LOCK */ 48,
68 /* SMB2_IOCTL */ 57,
69 /* SMB2_CANCEL */ 4,
70 /* SMB2_ECHO */ 4,
71 /* SMB2_QUERY_DIRECTORY */ 33,
72 /* SMB2_CHANGE_NOTIFY */ 32,
73 /* SMB2_QUERY_INFO */ 41,
74 /* SMB2_SET_INFO */ 33,
75 /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
76 };
77
smb3_encryption_required(const struct cifs_tcon * tcon)78 int smb3_encryption_required(const struct cifs_tcon *tcon)
79 {
80 if (!tcon || !tcon->ses)
81 return 0;
82 if ((tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) ||
83 (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA))
84 return 1;
85 if (tcon->seal &&
86 (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
87 return 1;
88 if (((global_secflags & CIFSSEC_MUST_SEAL) == CIFSSEC_MUST_SEAL) &&
89 (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
90 return 1;
91 return 0;
92 }
93
94 static void
smb2_hdr_assemble(struct smb2_hdr * shdr,__le16 smb2_cmd,const struct cifs_tcon * tcon,struct TCP_Server_Info * server)95 smb2_hdr_assemble(struct smb2_hdr *shdr, __le16 smb2_cmd,
96 const struct cifs_tcon *tcon,
97 struct TCP_Server_Info *server)
98 {
99 struct smb3_hdr_req *smb3_hdr;
100
101 shdr->ProtocolId = SMB2_PROTO_NUMBER;
102 shdr->StructureSize = cpu_to_le16(64);
103 shdr->Command = smb2_cmd;
104
105 if (server) {
106 /* After reconnect SMB3 must set ChannelSequence on subsequent reqs */
107 if (server->dialect >= SMB30_PROT_ID) {
108 smb3_hdr = (struct smb3_hdr_req *)shdr;
109 /*
110 * if primary channel is not set yet, use default
111 * channel for chan sequence num
112 */
113 if (SERVER_IS_CHAN(server))
114 smb3_hdr->ChannelSequence =
115 cpu_to_le16(server->primary_server->channel_sequence_num);
116 else
117 smb3_hdr->ChannelSequence =
118 cpu_to_le16(server->channel_sequence_num);
119 }
120 spin_lock(&server->req_lock);
121 /* Request up to 10 credits but don't go over the limit. */
122 if (server->credits >= server->max_credits)
123 shdr->CreditRequest = cpu_to_le16(0);
124 else
125 shdr->CreditRequest = cpu_to_le16(
126 min_t(int, server->max_credits -
127 server->credits, 10));
128 spin_unlock(&server->req_lock);
129 } else {
130 shdr->CreditRequest = cpu_to_le16(2);
131 }
132 shdr->Id.SyncId.ProcessId = cpu_to_le32((__u16)current->tgid);
133
134 if (!tcon)
135 goto out;
136
137 /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */
138 /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */
139 if (server && (server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
140 shdr->CreditCharge = cpu_to_le16(1);
141 /* else CreditCharge MBZ */
142
143 shdr->Id.SyncId.TreeId = cpu_to_le32(tcon->tid);
144 /* Uid is not converted */
145 if (tcon->ses)
146 shdr->SessionId = cpu_to_le64(tcon->ses->Suid);
147
148 /*
149 * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have
150 * to pass the path on the Open SMB prefixed by \\server\share.
151 * Not sure when we would need to do the augmented path (if ever) and
152 * setting this flag breaks the SMB2 open operation since it is
153 * illegal to send an empty path name (without \\server\share prefix)
154 * when the DFS flag is set in the SMB open header. We could
155 * consider setting the flag on all operations other than open
156 * but it is safer to net set it for now.
157 */
158 /* if (tcon->share_flags & SHI1005_FLAGS_DFS)
159 shdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */
160
161 if (server && server->sign && !smb3_encryption_required(tcon))
162 shdr->Flags |= SMB2_FLAGS_SIGNED;
163 out:
164 return;
165 }
166
167 /* helper function for code reuse */
168 static int
cifs_chan_skip_or_disable(struct cifs_ses * ses,struct TCP_Server_Info * server,bool from_reconnect,bool disable_mchan)169 cifs_chan_skip_or_disable(struct cifs_ses *ses,
170 struct TCP_Server_Info *server,
171 bool from_reconnect, bool disable_mchan)
172 {
173 struct TCP_Server_Info *pserver;
174 unsigned int chan_index;
175
176 if (SERVER_IS_CHAN(server)) {
177 cifs_dbg(VFS,
178 "server %s does not support multichannel anymore. Skip secondary channel\n",
179 ses->server->hostname);
180
181 spin_lock(&ses->chan_lock);
182 chan_index = cifs_ses_get_chan_index(ses, server);
183 if (chan_index == CIFS_INVAL_CHAN_INDEX) {
184 spin_unlock(&ses->chan_lock);
185 goto skip_terminate;
186 }
187
188 ses->chans[chan_index].server = NULL;
189 server->terminate = true;
190 spin_unlock(&ses->chan_lock);
191
192 /*
193 * the above reference of server by channel
194 * needs to be dropped without holding chan_lock
195 * as cifs_put_tcp_session takes a higher lock
196 * i.e. cifs_tcp_ses_lock
197 */
198 cifs_put_tcp_session(server, from_reconnect);
199
200 cifs_signal_cifsd_for_reconnect(server, false);
201
202 /* mark primary server as needing reconnect */
203 pserver = server->primary_server;
204 cifs_signal_cifsd_for_reconnect(pserver, false);
205 skip_terminate:
206 return -EHOSTDOWN;
207 }
208
209 cifs_decrease_secondary_channels(ses, disable_mchan);
210
211 return 0;
212 }
213
214 /*
215 * smb3_update_ses_channels - Synchronize session channels with new configuration
216 * @ses: pointer to the CIFS session structure
217 * @server: pointer to the TCP server info structure
218 * @from_reconnect: indicates if called from reconnect context
219 * @disable_mchan: indicates if called from reconnect to disable multichannel
220 *
221 * Returns 0 on success or error code on failure.
222 *
223 * Outside of reconfigure, this function is called from cifs_mount() during mount
224 * and from reconnect scenarios to adjust channel count when the
225 * server's multichannel support changes.
226 */
smb3_update_ses_channels(struct cifs_ses * ses,struct TCP_Server_Info * server,bool from_reconnect,bool disable_mchan)227 int smb3_update_ses_channels(struct cifs_ses *ses, struct TCP_Server_Info *server,
228 bool from_reconnect, bool disable_mchan)
229 {
230 int rc = 0;
231 /*
232 * Manage session channels based on current count vs max:
233 * - If disable requested, skip or disable the channel
234 * - If below max channels, attempt to add more
235 * - If above max channels, skip or disable excess channels
236 */
237 if (disable_mchan)
238 rc = cifs_chan_skip_or_disable(ses, server, from_reconnect, disable_mchan);
239 else {
240 if (ses->chan_count < ses->chan_max)
241 rc = cifs_try_adding_channels(ses);
242 else if (ses->chan_count > ses->chan_max)
243 rc = cifs_chan_skip_or_disable(ses, server, from_reconnect, disable_mchan);
244 }
245
246 return rc;
247 }
248
249 static int
smb2_reconnect(__le16 smb2_command,struct cifs_tcon * tcon,struct TCP_Server_Info * server,bool from_reconnect)250 smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
251 struct TCP_Server_Info *server, bool from_reconnect)
252 {
253 struct cifs_ses *ses;
254 int xid;
255 int rc = 0;
256
257 /*
258 * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
259 * check for tcp and smb session status done differently
260 * for those three - in the calling routine.
261 */
262 if (tcon == NULL)
263 return 0;
264
265 if (smb2_command == SMB2_TREE_CONNECT)
266 return 0;
267
268 spin_lock(&tcon->tc_lock);
269 if (tcon->status == TID_EXITING) {
270 /*
271 * only tree disconnect allowed when disconnecting ...
272 */
273 if (smb2_command != SMB2_TREE_DISCONNECT) {
274 spin_unlock(&tcon->tc_lock);
275 cifs_tcon_dbg(FYI, "can not send cmd %d while umounting\n",
276 smb2_command);
277 return -ENODEV;
278 }
279 }
280 spin_unlock(&tcon->tc_lock);
281
282 ses = tcon->ses;
283 if (!ses)
284 return smb_EIO(smb_eio_trace_null_pointers);
285 spin_lock(&ses->ses_lock);
286 if (ses->ses_status == SES_EXITING) {
287 spin_unlock(&ses->ses_lock);
288 return smb_EIO(smb_eio_trace_sess_exiting);
289 }
290 spin_unlock(&ses->ses_lock);
291 if (!ses->server || !server)
292 return smb_EIO(smb_eio_trace_null_pointers);
293
294 spin_lock(&server->srv_lock);
295 if (server->tcpStatus == CifsNeedReconnect) {
296 /*
297 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
298 * here since they are implicitly done when session drops.
299 */
300 switch (smb2_command) {
301 /*
302 * BB Should we keep oplock break and add flush to exceptions?
303 */
304 case SMB2_TREE_DISCONNECT:
305 case SMB2_CANCEL:
306 case SMB2_CLOSE:
307 case SMB2_OPLOCK_BREAK:
308 spin_unlock(&server->srv_lock);
309 return -EAGAIN;
310 }
311 }
312
313 /* if server is marked for termination, cifsd will cleanup */
314 if (server->terminate) {
315 spin_unlock(&server->srv_lock);
316 return -EHOSTDOWN;
317 }
318 spin_unlock(&server->srv_lock);
319
320 again:
321 rc = cifs_wait_for_server_reconnect(server, tcon->retry);
322 if (rc)
323 return rc;
324
325 spin_lock(&ses->chan_lock);
326 if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) {
327 spin_unlock(&ses->chan_lock);
328 return 0;
329 }
330 spin_unlock(&ses->chan_lock);
331 cifs_tcon_dbg(FYI, "sess reconnect mask: 0x%lx, tcon reconnect: %d\n",
332 tcon->ses->chans_need_reconnect,
333 tcon->need_reconnect);
334
335 mutex_lock(&ses->session_mutex);
336 /*
337 * Handle the case where a concurrent thread failed to negotiate or
338 * killed a channel.
339 */
340 spin_lock(&server->srv_lock);
341 switch (server->tcpStatus) {
342 case CifsExiting:
343 spin_unlock(&server->srv_lock);
344 mutex_unlock(&ses->session_mutex);
345 return -EHOSTDOWN;
346 case CifsNeedReconnect:
347 spin_unlock(&server->srv_lock);
348 mutex_unlock(&ses->session_mutex);
349 if (!tcon->retry)
350 return -EHOSTDOWN;
351 goto again;
352 default:
353 break;
354 }
355 spin_unlock(&server->srv_lock);
356
357 /*
358 * need to prevent multiple threads trying to simultaneously
359 * reconnect the same SMB session
360 */
361 spin_lock(&ses->ses_lock);
362 spin_lock(&ses->chan_lock);
363 if (!cifs_chan_needs_reconnect(ses, server) &&
364 ses->ses_status == SES_GOOD) {
365 spin_unlock(&ses->chan_lock);
366 spin_unlock(&ses->ses_lock);
367 /* this means that we only need to tree connect */
368 if (tcon->need_reconnect)
369 goto skip_sess_setup;
370
371 mutex_unlock(&ses->session_mutex);
372 goto out;
373 }
374 spin_unlock(&ses->chan_lock);
375 spin_unlock(&ses->ses_lock);
376
377 rc = cifs_negotiate_protocol(0, ses, server);
378 if (rc) {
379 mutex_unlock(&ses->session_mutex);
380 if (!tcon->retry)
381 return -EHOSTDOWN;
382 goto again;
383 }
384 /*
385 * if server stopped supporting multichannel
386 * and the first channel reconnected, disable all the others.
387 */
388 if (ses->chan_count > 1 &&
389 !(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
390 rc = smb3_update_ses_channels(ses, server,
391 from_reconnect, true /* disable_mchan */);
392 if (rc) {
393 mutex_unlock(&ses->session_mutex);
394 goto out;
395 }
396 }
397
398 rc = cifs_setup_session(0, ses, server, ses->local_nls);
399 if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED)) {
400 /*
401 * Try alternate password for next reconnect (key rotation
402 * could be enabled on the server e.g.) if an alternate
403 * password is available and the current password is expired,
404 * but do not swap on non pwd related errors like host down
405 */
406 if (ses->password2)
407 swap(ses->password2, ses->password);
408 }
409 if (rc) {
410 mutex_unlock(&ses->session_mutex);
411 if (rc == -EACCES && !tcon->retry)
412 return -EHOSTDOWN;
413 goto out;
414 }
415
416 skip_sess_setup:
417 if (!tcon->need_reconnect) {
418 mutex_unlock(&ses->session_mutex);
419 goto out;
420 }
421 cifs_mark_open_files_invalid(tcon);
422 if (tcon->use_persistent)
423 tcon->need_reopen_files = true;
424
425 rc = cifs_tree_connect(0, tcon);
426
427 cifs_tcon_dbg(FYI, "reconnect tcon rc = %d\n", rc);
428 if (rc) {
429 /* If sess reconnected but tcon didn't, something strange ... */
430 mutex_unlock(&ses->session_mutex);
431 cifs_tcon_dbg(VFS, "reconnect tcon failed rc = %d\n", rc);
432 goto out;
433 }
434
435 spin_lock(&ses->ses_lock);
436 if (ses->flags & CIFS_SES_FLAG_SCALE_CHANNELS) {
437 spin_unlock(&ses->ses_lock);
438 mutex_unlock(&ses->session_mutex);
439 goto skip_add_channels;
440 }
441 ses->flags |= CIFS_SES_FLAG_SCALE_CHANNELS;
442 spin_unlock(&ses->ses_lock);
443
444 if (!rc &&
445 (server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL) &&
446 server->ops->query_server_interfaces) {
447 /*
448 * query server network interfaces, in case they change.
449 * Also mark the session as pending this update while the query
450 * is in progress. This will be used to avoid calling
451 * smb2_reconnect recursively.
452 */
453 ses->flags |= CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES;
454 xid = get_xid();
455 rc = server->ops->query_server_interfaces(xid, tcon, false);
456 free_xid(xid);
457 ses->flags &= ~CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES;
458
459 if (!tcon->ipc && !tcon->dummy)
460 queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
461 (SMB_INTERFACE_POLL_INTERVAL * HZ));
462
463 mutex_unlock(&ses->session_mutex);
464
465 if (rc == -EOPNOTSUPP && ses->chan_count > 1) {
466 /*
467 * some servers like Azure SMB server do not advertise
468 * that multichannel has been disabled with server
469 * capabilities, rather return STATUS_NOT_IMPLEMENTED.
470 * treat this as server not supporting multichannel
471 */
472
473 rc = smb3_update_ses_channels(ses, server,
474 from_reconnect,
475 true /* disable_mchan */);
476 goto skip_add_channels;
477 } else if (rc)
478 cifs_tcon_dbg(FYI, "%s: failed to query server interfaces: %d\n",
479 __func__, rc);
480
481 if (ses->chan_max > ses->chan_count &&
482 ses->iface_count &&
483 !SERVER_IS_CHAN(server)) {
484 if (ses->chan_count == 1)
485 cifs_server_dbg(VFS, "supports multichannel now\n");
486
487 smb3_update_ses_channels(ses, server, from_reconnect,
488 false /* disable_mchan */);
489 }
490 } else {
491 mutex_unlock(&ses->session_mutex);
492 }
493
494 skip_add_channels:
495 spin_lock(&ses->ses_lock);
496 ses->flags &= ~CIFS_SES_FLAG_SCALE_CHANNELS;
497 spin_unlock(&ses->ses_lock);
498
499 if (smb2_command != SMB2_INTERNAL_CMD)
500 cifs_queue_server_reconn(server);
501
502 atomic_inc(&tconInfoReconnectCount);
503 out:
504 /*
505 * Check if handle based operation so we know whether we can continue
506 * or not without returning to caller to reset file handle.
507 */
508 /*
509 * BB Is flush done by server on drop of tcp session? Should we special
510 * case it and skip above?
511 */
512 switch (smb2_command) {
513 case SMB2_FLUSH:
514 case SMB2_READ:
515 case SMB2_WRITE:
516 case SMB2_LOCK:
517 case SMB2_QUERY_DIRECTORY:
518 case SMB2_CHANGE_NOTIFY:
519 case SMB2_QUERY_INFO:
520 case SMB2_SET_INFO:
521 case SMB2_IOCTL:
522 rc = -EAGAIN;
523 }
524 return rc;
525 }
526
527 static void
fill_small_buf(__le16 smb2_command,struct cifs_tcon * tcon,struct TCP_Server_Info * server,void * buf,unsigned int * total_len)528 fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon,
529 struct TCP_Server_Info *server,
530 void *buf,
531 unsigned int *total_len)
532 {
533 struct smb2_pdu *spdu = buf;
534 /* lookup word count ie StructureSize from table */
535 __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)];
536
537 /*
538 * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of
539 * largest operations (Create)
540 */
541 memset(buf, 0, 256);
542
543 smb2_hdr_assemble(&spdu->hdr, smb2_command, tcon, server);
544 spdu->StructureSize2 = cpu_to_le16(parmsize);
545
546 *total_len = parmsize + sizeof(struct smb2_hdr);
547 }
548
549 /*
550 * Allocate and return pointer to an SMB request hdr, and set basic
551 * SMB information in the SMB header. If the return code is zero, this
552 * function must have filled in request_buf pointer.
553 */
__smb2_plain_req_init(__le16 smb2_command,struct cifs_tcon * tcon,struct TCP_Server_Info * server,void ** request_buf,unsigned int * total_len)554 static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
555 struct TCP_Server_Info *server,
556 void **request_buf, unsigned int *total_len)
557 {
558 /* BB eventually switch this to SMB2 specific small buf size */
559 switch (smb2_command) {
560 case SMB2_SET_INFO:
561 case SMB2_QUERY_INFO:
562 *request_buf = cifs_buf_get();
563 break;
564 default:
565 *request_buf = cifs_small_buf_get();
566 break;
567 }
568 if (*request_buf == NULL) {
569 /* BB should we add a retry in here if not a writepage? */
570 return -ENOMEM;
571 }
572
573 fill_small_buf(smb2_command, tcon, server,
574 (struct smb2_hdr *)(*request_buf),
575 total_len);
576
577 if (tcon != NULL) {
578 uint16_t com_code = le16_to_cpu(smb2_command);
579 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
580 cifs_stats_inc(&tcon->num_smbs_sent);
581 }
582
583 return 0;
584 }
585
smb2_plain_req_init(__le16 smb2_command,struct cifs_tcon * tcon,struct TCP_Server_Info * server,void ** request_buf,unsigned int * total_len)586 static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
587 struct TCP_Server_Info *server,
588 void **request_buf, unsigned int *total_len)
589 {
590 int rc;
591
592 rc = smb2_reconnect(smb2_command, tcon, server, false);
593 if (rc)
594 return rc;
595
596 return __smb2_plain_req_init(smb2_command, tcon, server, request_buf,
597 total_len);
598 }
599
smb2_ioctl_req_init(u32 opcode,struct cifs_tcon * tcon,struct TCP_Server_Info * server,void ** request_buf,unsigned int * total_len)600 static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon,
601 struct TCP_Server_Info *server,
602 void **request_buf, unsigned int *total_len)
603 {
604 /*
605 * Skip reconnect in one of the following cases:
606 * 1. For FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs
607 * 2. For FSCTL_QUERY_NETWORK_INTERFACE_INFO IOCTL when called from
608 * smb2_reconnect (indicated by CIFS_SES_FLAG_SCALE_CHANNELS ses flag)
609 */
610 if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO ||
611 (opcode == FSCTL_QUERY_NETWORK_INTERFACE_INFO &&
612 (tcon->ses->flags & CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES)))
613 return __smb2_plain_req_init(SMB2_IOCTL, tcon, server,
614 request_buf, total_len);
615
616 return smb2_plain_req_init(SMB2_IOCTL, tcon, server,
617 request_buf, total_len);
618 }
619
620 /* For explanation of negotiate contexts see MS-SMB2 section 2.2.3.1 */
621
622 static void
build_preauth_ctxt(struct smb2_preauth_neg_context * pneg_ctxt)623 build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt)
624 {
625 pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES;
626 pneg_ctxt->DataLength = cpu_to_le16(38);
627 pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1);
628 pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE);
629 get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE);
630 pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512;
631 }
632
633 static void
build_compression_ctxt(struct smb2_compression_capabilities_context * pneg_ctxt)634 build_compression_ctxt(struct smb2_compression_capabilities_context *pneg_ctxt)
635 {
636 pneg_ctxt->ContextType = SMB2_COMPRESSION_CAPABILITIES;
637 pneg_ctxt->DataLength =
638 cpu_to_le16(sizeof(struct smb2_compression_capabilities_context)
639 - sizeof(struct smb2_neg_context));
640 pneg_ctxt->CompressionAlgorithmCount = cpu_to_le16(3);
641 pneg_ctxt->CompressionAlgorithms[0] = SMB3_COMPRESS_LZ77;
642 pneg_ctxt->CompressionAlgorithms[1] = SMB3_COMPRESS_LZ77_HUFF;
643 pneg_ctxt->CompressionAlgorithms[2] = SMB3_COMPRESS_LZNT1;
644 }
645
646 static unsigned int
build_signing_ctxt(struct smb2_signing_capabilities * pneg_ctxt)647 build_signing_ctxt(struct smb2_signing_capabilities *pneg_ctxt)
648 {
649 unsigned int ctxt_len = sizeof(struct smb2_signing_capabilities);
650 unsigned short num_algs = 1; /* number of signing algorithms sent */
651
652 pneg_ctxt->ContextType = SMB2_SIGNING_CAPABILITIES;
653 /*
654 * Context Data length must be rounded to multiple of 8 for some servers
655 */
656 pneg_ctxt->DataLength = cpu_to_le16(ALIGN(sizeof(struct smb2_signing_capabilities) -
657 sizeof(struct smb2_neg_context) +
658 (num_algs * sizeof(u16)), 8));
659 pneg_ctxt->SigningAlgorithmCount = cpu_to_le16(num_algs);
660 pneg_ctxt->SigningAlgorithms[0] = cpu_to_le16(SIGNING_ALG_AES_CMAC);
661
662 ctxt_len += sizeof(__le16) * num_algs;
663 ctxt_len = ALIGN(ctxt_len, 8);
664 return ctxt_len;
665 /* TBD add SIGNING_ALG_AES_GMAC and/or SIGNING_ALG_HMAC_SHA256 */
666 }
667
668 static void
build_encrypt_ctxt(struct smb2_encryption_neg_context * pneg_ctxt)669 build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt)
670 {
671 pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES;
672 if (require_gcm_256) {
673 pneg_ctxt->DataLength = cpu_to_le16(4); /* Cipher Count + 1 cipher */
674 pneg_ctxt->CipherCount = cpu_to_le16(1);
675 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES256_GCM;
676 } else if (enable_gcm_256) {
677 pneg_ctxt->DataLength = cpu_to_le16(8); /* Cipher Count + 3 ciphers */
678 pneg_ctxt->CipherCount = cpu_to_le16(3);
679 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;
680 pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES256_GCM;
681 pneg_ctxt->Ciphers[2] = SMB2_ENCRYPTION_AES128_CCM;
682 } else {
683 pneg_ctxt->DataLength = cpu_to_le16(6); /* Cipher Count + 2 ciphers */
684 pneg_ctxt->CipherCount = cpu_to_le16(2);
685 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;
686 pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES128_CCM;
687 }
688 }
689
690 static unsigned int
build_netname_ctxt(struct smb2_netname_neg_context * pneg_ctxt,char * hostname)691 build_netname_ctxt(struct smb2_netname_neg_context *pneg_ctxt, char *hostname)
692 {
693 struct nls_table *cp = load_nls_default();
694
695 pneg_ctxt->ContextType = SMB2_NETNAME_NEGOTIATE_CONTEXT_ID;
696
697 /* copy up to max of first 100 bytes of server name to NetName field */
698 pneg_ctxt->DataLength = cpu_to_le16(2 * cifs_strtoUTF16(pneg_ctxt->NetName, hostname, 100, cp));
699 /* context size is DataLength + minimal smb2_neg_context */
700 return ALIGN(le16_to_cpu(pneg_ctxt->DataLength) + sizeof(struct smb2_neg_context), 8);
701 }
702
703 static void
build_posix_ctxt(struct smb2_posix_neg_context * pneg_ctxt)704 build_posix_ctxt(struct smb2_posix_neg_context *pneg_ctxt)
705 {
706 pneg_ctxt->ContextType = SMB2_POSIX_EXTENSIONS_AVAILABLE;
707 pneg_ctxt->DataLength = cpu_to_le16(POSIX_CTXT_DATA_LEN);
708 /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
709 pneg_ctxt->Name[0] = 0x93;
710 pneg_ctxt->Name[1] = 0xAD;
711 pneg_ctxt->Name[2] = 0x25;
712 pneg_ctxt->Name[3] = 0x50;
713 pneg_ctxt->Name[4] = 0x9C;
714 pneg_ctxt->Name[5] = 0xB4;
715 pneg_ctxt->Name[6] = 0x11;
716 pneg_ctxt->Name[7] = 0xE7;
717 pneg_ctxt->Name[8] = 0xB4;
718 pneg_ctxt->Name[9] = 0x23;
719 pneg_ctxt->Name[10] = 0x83;
720 pneg_ctxt->Name[11] = 0xDE;
721 pneg_ctxt->Name[12] = 0x96;
722 pneg_ctxt->Name[13] = 0x8B;
723 pneg_ctxt->Name[14] = 0xCD;
724 pneg_ctxt->Name[15] = 0x7C;
725 }
726
727 static void
assemble_neg_contexts(struct smb2_negotiate_req * req,struct TCP_Server_Info * server,unsigned int * total_len)728 assemble_neg_contexts(struct smb2_negotiate_req *req,
729 struct TCP_Server_Info *server, unsigned int *total_len)
730 {
731 unsigned int ctxt_len, neg_context_count;
732 struct TCP_Server_Info *pserver;
733 char *pneg_ctxt;
734 char *hostname;
735
736 if (*total_len > 200) {
737 /* In case length corrupted don't want to overrun smb buffer */
738 cifs_server_dbg(VFS, "Bad frame length assembling neg contexts\n");
739 return;
740 }
741
742 /*
743 * round up total_len of fixed part of SMB3 negotiate request to 8
744 * byte boundary before adding negotiate contexts
745 */
746 *total_len = ALIGN(*total_len, 8);
747
748 pneg_ctxt = (*total_len) + (char *)req;
749 req->NegotiateContextOffset = cpu_to_le32(*total_len);
750
751 build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt);
752 ctxt_len = ALIGN(sizeof(struct smb2_preauth_neg_context), 8);
753 *total_len += ctxt_len;
754 pneg_ctxt += ctxt_len;
755
756 build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
757 ctxt_len = ALIGN(sizeof(struct smb2_encryption_neg_context), 8);
758 *total_len += ctxt_len;
759 pneg_ctxt += ctxt_len;
760
761 /*
762 * secondary channels don't have the hostname field populated
763 * use the hostname field in the primary channel instead
764 */
765 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
766 cifs_server_lock(pserver);
767 hostname = pserver->hostname;
768 if (hostname && (hostname[0] != 0)) {
769 ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt,
770 hostname);
771 *total_len += ctxt_len;
772 pneg_ctxt += ctxt_len;
773 neg_context_count = 3;
774 } else
775 neg_context_count = 2;
776 cifs_server_unlock(pserver);
777
778 build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt);
779 *total_len += sizeof(struct smb2_posix_neg_context);
780 pneg_ctxt += sizeof(struct smb2_posix_neg_context);
781 neg_context_count++;
782
783 if (server->compression.requested) {
784 build_compression_ctxt((struct smb2_compression_capabilities_context *)
785 pneg_ctxt);
786 ctxt_len = ALIGN(sizeof(struct smb2_compression_capabilities_context), 8);
787 *total_len += ctxt_len;
788 pneg_ctxt += ctxt_len;
789 neg_context_count++;
790 }
791
792 if (enable_negotiate_signing) {
793 ctxt_len = build_signing_ctxt((struct smb2_signing_capabilities *)
794 pneg_ctxt);
795 *total_len += ctxt_len;
796 pneg_ctxt += ctxt_len;
797 neg_context_count++;
798 }
799
800 /* check for and add transport_capabilities and signing capabilities */
801 req->NegotiateContextCount = cpu_to_le16(neg_context_count);
802
803 }
804
805 /* If invalid preauth context warn but use what we requested, SHA-512 */
decode_preauth_context(struct smb2_preauth_neg_context * ctxt)806 static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt)
807 {
808 unsigned int len = le16_to_cpu(ctxt->DataLength);
809
810 /*
811 * Caller checked that DataLength remains within SMB boundary. We still
812 * need to confirm that one HashAlgorithms member is accounted for.
813 */
814 if (len < MIN_PREAUTH_CTXT_DATA_LEN) {
815 pr_warn_once("server sent bad preauth context\n");
816 return;
817 } else if (len < MIN_PREAUTH_CTXT_DATA_LEN + le16_to_cpu(ctxt->SaltLength)) {
818 pr_warn_once("server sent invalid SaltLength\n");
819 return;
820 }
821 if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1)
822 pr_warn_once("Invalid SMB3 hash algorithm count\n");
823 if (ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512)
824 pr_warn_once("unknown SMB3 hash algorithm\n");
825 }
826
decode_compress_ctx(struct TCP_Server_Info * server,struct smb2_compression_capabilities_context * ctxt)827 static void decode_compress_ctx(struct TCP_Server_Info *server,
828 struct smb2_compression_capabilities_context *ctxt)
829 {
830 unsigned int len = le16_to_cpu(ctxt->DataLength);
831 __le16 alg;
832
833 server->compression.enabled = false;
834
835 /*
836 * Caller checked that DataLength remains within SMB boundary. We still
837 * need to confirm that one CompressionAlgorithms member is accounted
838 * for.
839 */
840 if (len < 10) {
841 pr_warn_once("server sent bad compression cntxt\n");
842 return;
843 }
844
845 if (le16_to_cpu(ctxt->CompressionAlgorithmCount) != 1) {
846 pr_warn_once("invalid SMB3 compress algorithm count\n");
847 return;
848 }
849
850 alg = ctxt->CompressionAlgorithms[0];
851
852 /* 'NONE' (0) compressor type is never negotiated */
853 if (alg == 0 || le16_to_cpu(alg) > 3) {
854 pr_warn_once("invalid compression algorithm '%u'\n", alg);
855 return;
856 }
857
858 server->compression.alg = alg;
859 server->compression.enabled = true;
860 }
861
decode_encrypt_ctx(struct TCP_Server_Info * server,struct smb2_encryption_neg_context * ctxt)862 static int decode_encrypt_ctx(struct TCP_Server_Info *server,
863 struct smb2_encryption_neg_context *ctxt)
864 {
865 unsigned int len = le16_to_cpu(ctxt->DataLength);
866
867 cifs_dbg(FYI, "decode SMB3.11 encryption neg context of len %d\n", len);
868 /*
869 * Caller checked that DataLength remains within SMB boundary. We still
870 * need to confirm that one Cipher flexible array member is accounted
871 * for.
872 */
873 if (len < MIN_ENCRYPT_CTXT_DATA_LEN) {
874 pr_warn_once("server sent bad crypto ctxt len\n");
875 return -EINVAL;
876 }
877
878 if (le16_to_cpu(ctxt->CipherCount) != 1) {
879 pr_warn_once("Invalid SMB3.11 cipher count\n");
880 return -EINVAL;
881 }
882 cifs_dbg(FYI, "SMB311 cipher type:%d\n", le16_to_cpu(ctxt->Ciphers[0]));
883 if (require_gcm_256) {
884 if (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM) {
885 cifs_dbg(VFS, "Server does not support requested encryption type (AES256 GCM)\n");
886 return -EOPNOTSUPP;
887 }
888 } else if (ctxt->Ciphers[0] == 0) {
889 /*
890 * e.g. if server only supported AES256_CCM (very unlikely)
891 * or server supported no encryption types or had all disabled.
892 * Since GLOBAL_CAP_ENCRYPTION will be not set, in the case
893 * in which mount requested encryption ("seal") checks later
894 * on during tree connection will return proper rc, but if
895 * seal not requested by client, since server is allowed to
896 * return 0 to indicate no supported cipher, we can't fail here
897 */
898 server->cipher_type = 0;
899 server->capabilities &= ~SMB2_GLOBAL_CAP_ENCRYPTION;
900 pr_warn_once("Server does not support requested encryption types\n");
901 return 0;
902 } else if ((ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_CCM) &&
903 (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_GCM) &&
904 (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM)) {
905 /* server returned a cipher we didn't ask for */
906 pr_warn_once("Invalid SMB3.11 cipher returned\n");
907 return -EINVAL;
908 }
909 server->cipher_type = ctxt->Ciphers[0];
910 server->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
911 return 0;
912 }
913
decode_signing_ctx(struct TCP_Server_Info * server,struct smb2_signing_capabilities * pctxt)914 static void decode_signing_ctx(struct TCP_Server_Info *server,
915 struct smb2_signing_capabilities *pctxt)
916 {
917 unsigned int len = le16_to_cpu(pctxt->DataLength);
918
919 /*
920 * Caller checked that DataLength remains within SMB boundary. We still
921 * need to confirm that one SigningAlgorithms flexible array member is
922 * accounted for.
923 */
924 if ((len < 4) || (len > 16)) {
925 pr_warn_once("server sent bad signing negcontext\n");
926 return;
927 }
928 if (le16_to_cpu(pctxt->SigningAlgorithmCount) != 1) {
929 pr_warn_once("Invalid signing algorithm count\n");
930 return;
931 }
932 if (le16_to_cpu(pctxt->SigningAlgorithms[0]) > 2) {
933 pr_warn_once("unknown signing algorithm\n");
934 return;
935 }
936
937 server->signing_negotiated = true;
938 server->signing_algorithm = le16_to_cpu(pctxt->SigningAlgorithms[0]);
939 cifs_dbg(FYI, "signing algorithm %d chosen\n",
940 server->signing_algorithm);
941 }
942
943
smb311_decode_neg_context(struct smb2_negotiate_rsp * rsp,struct TCP_Server_Info * server,unsigned int len_of_smb)944 static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
945 struct TCP_Server_Info *server,
946 unsigned int len_of_smb)
947 {
948 struct smb2_neg_context *pctx;
949 unsigned int offset = le32_to_cpu(rsp->NegotiateContextOffset);
950 unsigned int ctxt_cnt = le16_to_cpu(rsp->NegotiateContextCount);
951 unsigned int len_of_ctxts, i;
952 int rc = 0;
953
954 cifs_dbg(FYI, "decoding %d negotiate contexts\n", ctxt_cnt);
955 if (len_of_smb <= offset) {
956 cifs_server_dbg(VFS, "Invalid response: negotiate context offset\n");
957 return -EINVAL;
958 }
959
960 len_of_ctxts = len_of_smb - offset;
961
962 for (i = 0; i < ctxt_cnt; i++) {
963 int clen;
964 /* check that offset is not beyond end of SMB */
965 if (len_of_ctxts < sizeof(struct smb2_neg_context))
966 break;
967
968 pctx = (struct smb2_neg_context *)(offset + (char *)rsp);
969 clen = sizeof(struct smb2_neg_context)
970 + le16_to_cpu(pctx->DataLength);
971 /*
972 * 2.2.4 SMB2 NEGOTIATE Response
973 * Subsequent negotiate contexts MUST appear at the first 8-byte
974 * aligned offset following the previous negotiate context.
975 */
976 if (i + 1 != ctxt_cnt)
977 clen = ALIGN(clen, 8);
978 if (clen > len_of_ctxts)
979 break;
980
981 if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES)
982 decode_preauth_context(
983 (struct smb2_preauth_neg_context *)pctx);
984 else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES)
985 rc = decode_encrypt_ctx(server,
986 (struct smb2_encryption_neg_context *)pctx);
987 else if (pctx->ContextType == SMB2_COMPRESSION_CAPABILITIES)
988 decode_compress_ctx(server,
989 (struct smb2_compression_capabilities_context *)pctx);
990 else if (pctx->ContextType == SMB2_POSIX_EXTENSIONS_AVAILABLE)
991 server->posix_ext_supported = true;
992 else if (pctx->ContextType == SMB2_SIGNING_CAPABILITIES)
993 decode_signing_ctx(server,
994 (struct smb2_signing_capabilities *)pctx);
995 else
996 cifs_server_dbg(VFS, "unknown negcontext of type %d ignored\n",
997 le16_to_cpu(pctx->ContextType));
998 if (rc)
999 break;
1000
1001 offset += clen;
1002 len_of_ctxts -= clen;
1003 }
1004 return rc;
1005 }
1006
1007 static struct create_posix *
create_posix_buf(umode_t mode)1008 create_posix_buf(umode_t mode)
1009 {
1010 struct create_posix *buf;
1011
1012 buf = kzalloc(sizeof(struct create_posix),
1013 GFP_KERNEL);
1014 if (!buf)
1015 return NULL;
1016
1017 buf->ccontext.DataOffset =
1018 cpu_to_le16(offsetof(struct create_posix, Mode));
1019 buf->ccontext.DataLength = cpu_to_le32(4);
1020 buf->ccontext.NameOffset =
1021 cpu_to_le16(offsetof(struct create_posix, Name));
1022 buf->ccontext.NameLength = cpu_to_le16(16);
1023
1024 /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
1025 buf->Name[0] = 0x93;
1026 buf->Name[1] = 0xAD;
1027 buf->Name[2] = 0x25;
1028 buf->Name[3] = 0x50;
1029 buf->Name[4] = 0x9C;
1030 buf->Name[5] = 0xB4;
1031 buf->Name[6] = 0x11;
1032 buf->Name[7] = 0xE7;
1033 buf->Name[8] = 0xB4;
1034 buf->Name[9] = 0x23;
1035 buf->Name[10] = 0x83;
1036 buf->Name[11] = 0xDE;
1037 buf->Name[12] = 0x96;
1038 buf->Name[13] = 0x8B;
1039 buf->Name[14] = 0xCD;
1040 buf->Name[15] = 0x7C;
1041 buf->Mode = cpu_to_le32(mode);
1042 cifs_dbg(FYI, "mode on posix create 0%o\n", mode);
1043 return buf;
1044 }
1045
1046 static int
add_posix_context(struct kvec * iov,unsigned int * num_iovec,umode_t mode)1047 add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode)
1048 {
1049 unsigned int num = *num_iovec;
1050
1051 iov[num].iov_base = create_posix_buf(mode);
1052 if (mode == ACL_NO_MODE)
1053 cifs_dbg(FYI, "%s: no mode\n", __func__);
1054 if (iov[num].iov_base == NULL)
1055 return -ENOMEM;
1056 iov[num].iov_len = sizeof(struct create_posix);
1057 *num_iovec = num + 1;
1058 return 0;
1059 }
1060
1061
1062 /*
1063 *
1064 * SMB2 Worker functions follow:
1065 *
1066 * The general structure of the worker functions is:
1067 * 1) Call smb2_init (assembles SMB2 header)
1068 * 2) Initialize SMB2 command specific fields in fixed length area of SMB
1069 * 3) Call smb_sendrcv2 (sends request on socket and waits for response)
1070 * 4) Decode SMB2 command specific fields in the fixed length area
1071 * 5) Decode variable length data area (if any for this SMB2 command type)
1072 * 6) Call free smb buffer
1073 * 7) return
1074 *
1075 */
1076
1077 int
SMB2_negotiate(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server)1078 SMB2_negotiate(const unsigned int xid,
1079 struct cifs_ses *ses,
1080 struct TCP_Server_Info *server)
1081 {
1082 struct smb_rqst rqst;
1083 struct smb2_negotiate_req *req;
1084 struct smb2_negotiate_rsp *rsp;
1085 struct kvec iov[1];
1086 struct kvec rsp_iov;
1087 int rc;
1088 int resp_buftype;
1089 int blob_offset, blob_length;
1090 char *security_blob;
1091 int flags = CIFS_NEG_OP;
1092 unsigned int total_len;
1093
1094 cifs_dbg(FYI, "Negotiate protocol\n");
1095
1096 if (!server) {
1097 WARN(1, "%s: server is NULL!\n", __func__);
1098 return smb_EIO(smb_eio_trace_null_pointers);
1099 }
1100
1101 rc = smb2_plain_req_init(SMB2_NEGOTIATE, NULL, server,
1102 (void **) &req, &total_len);
1103 if (rc)
1104 return rc;
1105
1106 req->hdr.SessionId = 0;
1107
1108 memset(server->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
1109 memset(ses->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
1110
1111 if (strcmp(server->vals->version_string,
1112 SMB3ANY_VERSION_STRING) == 0) {
1113 req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
1114 req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
1115 req->Dialects[2] = cpu_to_le16(SMB311_PROT_ID);
1116 req->DialectCount = cpu_to_le16(3);
1117 total_len += 6;
1118 } else if (strcmp(server->vals->version_string,
1119 SMBDEFAULT_VERSION_STRING) == 0) {
1120 req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
1121 req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
1122 req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
1123 req->Dialects[3] = cpu_to_le16(SMB311_PROT_ID);
1124 req->DialectCount = cpu_to_le16(4);
1125 total_len += 8;
1126 } else {
1127 /* otherwise send specific dialect */
1128 req->Dialects[0] = cpu_to_le16(server->vals->protocol_id);
1129 req->DialectCount = cpu_to_le16(1);
1130 total_len += 2;
1131 }
1132
1133 /* only one of SMB2 signing flags may be set in SMB2 request */
1134 if (ses->sign)
1135 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
1136 else if (global_secflags & CIFSSEC_MAY_SIGN)
1137 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
1138 else
1139 req->SecurityMode = 0;
1140
1141 req->Capabilities = cpu_to_le32(server->vals->req_capabilities);
1142 req->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
1143
1144 /* ClientGUID must be zero for SMB2.02 dialect */
1145 if (server->vals->protocol_id == SMB20_PROT_ID)
1146 memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE);
1147 else {
1148 memcpy(req->ClientGUID, server->client_guid,
1149 SMB2_CLIENT_GUID_SIZE);
1150 if ((server->vals->protocol_id == SMB311_PROT_ID) ||
1151 (strcmp(server->vals->version_string,
1152 SMB3ANY_VERSION_STRING) == 0) ||
1153 (strcmp(server->vals->version_string,
1154 SMBDEFAULT_VERSION_STRING) == 0))
1155 assemble_neg_contexts(req, server, &total_len);
1156 }
1157 iov[0].iov_base = (char *)req;
1158 iov[0].iov_len = total_len;
1159
1160 memset(&rqst, 0, sizeof(struct smb_rqst));
1161 rqst.rq_iov = iov;
1162 rqst.rq_nvec = 1;
1163
1164 rc = cifs_send_recv(xid, ses, server,
1165 &rqst, &resp_buftype, flags, &rsp_iov);
1166 cifs_small_buf_release(req);
1167 rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base;
1168 /*
1169 * No tcon so can't do
1170 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
1171 */
1172 if (rc == -EOPNOTSUPP) {
1173 cifs_server_dbg(VFS, "Dialect not supported by server. Consider specifying vers=1.0 or vers=2.0 on mount for accessing older servers\n");
1174 goto neg_exit;
1175 } else if (rc != 0)
1176 goto neg_exit;
1177
1178 u16 dialect = le16_to_cpu(rsp->DialectRevision);
1179 if (strcmp(server->vals->version_string,
1180 SMB3ANY_VERSION_STRING) == 0) {
1181 switch (dialect) {
1182 case SMB20_PROT_ID:
1183 cifs_server_dbg(VFS,
1184 "SMB2 dialect returned but not requested\n");
1185 rc = smb_EIO2(smb_eio_trace_neg_unreq_dialect, dialect, 3);
1186 goto neg_exit;
1187 case SMB21_PROT_ID:
1188 cifs_server_dbg(VFS,
1189 "SMB2.1 dialect returned but not requested\n");
1190 rc = smb_EIO2(smb_eio_trace_neg_unreq_dialect, dialect, 3);
1191 goto neg_exit;
1192 case SMB311_PROT_ID:
1193 /* ops set to 3.0 by default for default so update */
1194 server->ops = &smb311_operations;
1195 server->vals = &smb311_values;
1196 break;
1197 default:
1198 break;
1199 }
1200 } else if (strcmp(server->vals->version_string,
1201 SMBDEFAULT_VERSION_STRING) == 0) {
1202 switch (dialect) {
1203 case SMB20_PROT_ID:
1204 cifs_server_dbg(VFS,
1205 "SMB2 dialect returned but not requested\n");
1206 rc = smb_EIO2(smb_eio_trace_neg_unreq_dialect, dialect, 0);
1207 goto neg_exit;
1208 case SMB21_PROT_ID:
1209 /* ops set to 3.0 by default for default so update */
1210 server->ops = &smb21_operations;
1211 server->vals = &smb21_values;
1212 break;
1213 case SMB311_PROT_ID:
1214 server->ops = &smb311_operations;
1215 server->vals = &smb311_values;
1216 break;
1217 default:
1218 break;
1219 }
1220 } else if (dialect != server->vals->protocol_id) {
1221 /* if requested single dialect ensure returned dialect matched */
1222 cifs_server_dbg(VFS, "Invalid 0x%x dialect returned: not requested\n",
1223 dialect);
1224 rc = smb_EIO2(smb_eio_trace_neg_unreq_dialect,
1225 dialect, server->vals->protocol_id);
1226 goto neg_exit;
1227 }
1228
1229 cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
1230
1231 switch (dialect) {
1232 case SMB20_PROT_ID:
1233 cifs_dbg(FYI, "negotiated smb2.0 dialect\n");
1234 break;
1235 case SMB21_PROT_ID:
1236 cifs_dbg(FYI, "negotiated smb2.1 dialect\n");
1237 break;
1238 case SMB30_PROT_ID:
1239 cifs_dbg(FYI, "negotiated smb3.0 dialect\n");
1240 break;
1241 case SMB302_PROT_ID:
1242 cifs_dbg(FYI, "negotiated smb3.02 dialect\n");
1243 break;
1244 case SMB311_PROT_ID:
1245 cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n");
1246 break;
1247 default:
1248 cifs_server_dbg(VFS, "Invalid dialect returned by server 0x%x\n",
1249 dialect);
1250 rc = smb_EIO1(smb_eio_trace_neg_inval_dialect, dialect);
1251 goto neg_exit;
1252 }
1253
1254 rc = 0;
1255 server->dialect = dialect;
1256
1257 /*
1258 * Keep a copy of the hash after negprot. This hash will be
1259 * the starting hash value for all sessions made from this
1260 * server.
1261 */
1262 memcpy(server->preauth_sha_hash, ses->preauth_sha_hash,
1263 SMB2_PREAUTH_HASH_SIZE);
1264
1265 /* SMB2 only has an extended negflavor */
1266 server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
1267 /* set it to the maximum buffer size value we can send with 1 credit */
1268 server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
1269 SMB2_MAX_BUFFER_SIZE);
1270 server->max_read = le32_to_cpu(rsp->MaxReadSize);
1271 server->max_write = le32_to_cpu(rsp->MaxWriteSize);
1272 server->sec_mode = le16_to_cpu(rsp->SecurityMode);
1273 if ((server->sec_mode & SMB2_SEC_MODE_FLAGS_ALL) != server->sec_mode)
1274 cifs_dbg(FYI, "Server returned unexpected security mode 0x%x\n",
1275 server->sec_mode);
1276 server->capabilities = le32_to_cpu(rsp->Capabilities);
1277 /* Internal types */
1278 server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
1279
1280 /*
1281 * SMB3.0 supports only 1 cipher and doesn't have a encryption neg context
1282 * Set the cipher type manually.
1283 */
1284 if ((server->dialect == SMB30_PROT_ID ||
1285 server->dialect == SMB302_PROT_ID) &&
1286 (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
1287 server->cipher_type = SMB2_ENCRYPTION_AES128_CCM;
1288
1289 security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
1290 (struct smb2_hdr *)rsp);
1291 /*
1292 * See MS-SMB2 section 2.2.4: if no blob, client picks default which
1293 * for us will be
1294 * ses->sectype = RawNTLMSSP;
1295 * but for time being this is our only auth choice so doesn't matter.
1296 * We just found a server which sets blob length to zero expecting raw.
1297 */
1298 if (blob_length == 0) {
1299 cifs_dbg(FYI, "missing security blob on negprot\n");
1300 server->sec_ntlmssp = true;
1301 }
1302
1303 rc = cifs_enable_signing(server, ses->sign);
1304 if (rc)
1305 goto neg_exit;
1306 if (blob_length) {
1307 rc = decode_negTokenInit(security_blob, blob_length, server);
1308 if (rc == 1)
1309 rc = 0;
1310 else if (rc == 0)
1311 rc = smb_EIO1(smb_eio_trace_neg_decode_token, rc);
1312 }
1313
1314 if (server->dialect == SMB311_PROT_ID) {
1315 if (rsp->NegotiateContextCount)
1316 rc = smb311_decode_neg_context(rsp, server,
1317 rsp_iov.iov_len);
1318 else
1319 cifs_server_dbg(VFS, "Missing expected negotiate contexts\n");
1320 }
1321
1322 if (server->cipher_type && !rc)
1323 rc = smb3_crypto_aead_allocate(server);
1324 neg_exit:
1325 free_rsp_buf(resp_buftype, rsp);
1326 return rc;
1327 }
1328
smb3_validate_negotiate(const unsigned int xid,struct cifs_tcon * tcon)1329 int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
1330 {
1331 int rc;
1332 struct validate_negotiate_info_req *pneg_inbuf;
1333 struct validate_negotiate_info_rsp *pneg_rsp = NULL;
1334 u32 rsplen;
1335 u32 inbuflen; /* max of 4 dialects */
1336 struct TCP_Server_Info *server = tcon->ses->server;
1337
1338 cifs_dbg(FYI, "validate negotiate\n");
1339
1340 /* In SMB3.11 preauth integrity supersedes validate negotiate */
1341 if (server->dialect == SMB311_PROT_ID)
1342 return 0;
1343
1344 /*
1345 * validation ioctl must be signed, so no point sending this if we
1346 * can not sign it (ie are not known user). Even if signing is not
1347 * required (enabled but not negotiated), in those cases we selectively
1348 * sign just this, the first and only signed request on a connection.
1349 * Having validation of negotiate info helps reduce attack vectors.
1350 */
1351 if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
1352 return 0; /* validation requires signing */
1353
1354 if (tcon->ses->user_name == NULL) {
1355 cifs_dbg(FYI, "Can't validate negotiate: null user mount\n");
1356 return 0; /* validation requires signing */
1357 }
1358
1359 if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
1360 cifs_tcon_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n");
1361
1362 pneg_inbuf = kmalloc(sizeof(*pneg_inbuf), GFP_NOFS);
1363 if (!pneg_inbuf)
1364 return -ENOMEM;
1365
1366 pneg_inbuf->Capabilities =
1367 cpu_to_le32(server->vals->req_capabilities);
1368 pneg_inbuf->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
1369
1370 memcpy(pneg_inbuf->Guid, server->client_guid,
1371 SMB2_CLIENT_GUID_SIZE);
1372
1373 if (tcon->ses->sign)
1374 pneg_inbuf->SecurityMode =
1375 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
1376 else if (global_secflags & CIFSSEC_MAY_SIGN)
1377 pneg_inbuf->SecurityMode =
1378 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
1379 else
1380 pneg_inbuf->SecurityMode = 0;
1381
1382
1383 if (strcmp(server->vals->version_string,
1384 SMB3ANY_VERSION_STRING) == 0) {
1385 pneg_inbuf->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
1386 pneg_inbuf->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
1387 pneg_inbuf->Dialects[2] = cpu_to_le16(SMB311_PROT_ID);
1388 pneg_inbuf->DialectCount = cpu_to_le16(3);
1389 /* SMB 2.1 not included so subtract one dialect from len */
1390 inbuflen = sizeof(*pneg_inbuf) -
1391 (sizeof(pneg_inbuf->Dialects[0]));
1392 } else if (strcmp(server->vals->version_string,
1393 SMBDEFAULT_VERSION_STRING) == 0) {
1394 pneg_inbuf->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
1395 pneg_inbuf->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
1396 pneg_inbuf->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
1397 pneg_inbuf->Dialects[3] = cpu_to_le16(SMB311_PROT_ID);
1398 pneg_inbuf->DialectCount = cpu_to_le16(4);
1399 /* structure is big enough for 4 dialects */
1400 inbuflen = sizeof(*pneg_inbuf);
1401 } else {
1402 /* otherwise specific dialect was requested */
1403 pneg_inbuf->Dialects[0] =
1404 cpu_to_le16(server->vals->protocol_id);
1405 pneg_inbuf->DialectCount = cpu_to_le16(1);
1406 /* structure is big enough for 4 dialects, sending only 1 */
1407 inbuflen = sizeof(*pneg_inbuf) -
1408 sizeof(pneg_inbuf->Dialects[0]) * 3;
1409 }
1410
1411 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
1412 FSCTL_VALIDATE_NEGOTIATE_INFO,
1413 (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize,
1414 (char **)&pneg_rsp, &rsplen);
1415 if (rc == -EOPNOTSUPP) {
1416 /*
1417 * Old Windows versions or Netapp SMB server can return
1418 * not supported error. Client should accept it.
1419 */
1420 cifs_tcon_dbg(VFS, "Server does not support validate negotiate\n");
1421 rc = 0;
1422 goto out_free_inbuf;
1423 } else if (rc != 0) {
1424 cifs_tcon_dbg(VFS, "validate protocol negotiate failed: %d\n",
1425 rc);
1426 rc = smb_EIO1(smb_eio_trace_neg_info_fail, rc);
1427 goto out_free_inbuf;
1428 }
1429
1430 if (rsplen != sizeof(*pneg_rsp)) {
1431 cifs_tcon_dbg(VFS, "Invalid protocol negotiate response size: %d\n",
1432 rsplen);
1433
1434 /* relax check since Mac returns max bufsize allowed on ioctl */
1435 if (rsplen > CIFSMaxBufSize || rsplen < sizeof(*pneg_rsp)) {
1436 rc = smb_EIO1(smb_eio_trace_neg_bad_rsplen, rsplen);
1437 goto out_free_rsp;
1438 }
1439 }
1440
1441 /* check validate negotiate info response matches what we got earlier */
1442 u16 dialect = le16_to_cpu(pneg_rsp->Dialect);
1443
1444 if (dialect != server->dialect) {
1445 rc = smb_EIO2(smb_eio_trace_neg_info_dialect,
1446 dialect, server->dialect);
1447 goto vneg_out;
1448 }
1449
1450 u16 sec_mode = le16_to_cpu(pneg_rsp->SecurityMode);
1451
1452 if (sec_mode != server->sec_mode) {
1453 rc = smb_EIO2(smb_eio_trace_neg_info_sec_mode,
1454 sec_mode, server->sec_mode);
1455 goto vneg_out;
1456 }
1457
1458 /* do not validate server guid because not saved at negprot time yet */
1459 u32 caps = le32_to_cpu(pneg_rsp->Capabilities);
1460
1461 if ((caps | SMB2_NT_FIND |
1462 SMB2_LARGE_FILES) != server->capabilities) {
1463 rc = smb_EIO2(smb_eio_trace_neg_info_caps,
1464 caps, server->capabilities);
1465 goto vneg_out;
1466 }
1467
1468 /* validate negotiate successful */
1469 rc = 0;
1470 cifs_dbg(FYI, "validate negotiate info successful\n");
1471 goto out_free_rsp;
1472
1473 vneg_out:
1474 cifs_tcon_dbg(VFS, "protocol revalidation - security settings mismatch\n");
1475 out_free_rsp:
1476 kfree(pneg_rsp);
1477 out_free_inbuf:
1478 kfree(pneg_inbuf);
1479 return rc;
1480 }
1481
1482 enum securityEnum
smb2_select_sectype(struct TCP_Server_Info * server,enum securityEnum requested)1483 smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
1484 {
1485 switch (requested) {
1486 case Kerberos:
1487 case RawNTLMSSP:
1488 return requested;
1489 case NTLMv2:
1490 return RawNTLMSSP;
1491 case Unspecified:
1492 if (server->sec_ntlmssp &&
1493 (global_secflags & CIFSSEC_MAY_NTLMSSP))
1494 return RawNTLMSSP;
1495 if ((server->sec_kerberos || server->sec_mskerberos || server->sec_iakerb) &&
1496 (global_secflags & CIFSSEC_MAY_KRB5))
1497 return Kerberos;
1498 fallthrough;
1499 default:
1500 return Unspecified;
1501 }
1502 }
1503
1504 struct SMB2_sess_data {
1505 unsigned int xid;
1506 struct cifs_ses *ses;
1507 struct TCP_Server_Info *server;
1508 struct nls_table *nls_cp;
1509 void (*func)(struct SMB2_sess_data *);
1510 int result;
1511 u64 previous_session;
1512
1513 /* we will send the SMB in three pieces:
1514 * a fixed length beginning part, an optional
1515 * SPNEGO blob (which can be zero length), and a
1516 * last part which will include the strings
1517 * and rest of bcc area. This allows us to avoid
1518 * a large buffer 17K allocation
1519 */
1520 int buf0_type;
1521 struct kvec iov[2];
1522 };
1523
1524 static int
SMB2_sess_alloc_buffer(struct SMB2_sess_data * sess_data)1525 SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
1526 {
1527 int rc;
1528 struct cifs_ses *ses = sess_data->ses;
1529 struct TCP_Server_Info *server = sess_data->server;
1530 struct smb2_sess_setup_req *req;
1531 unsigned int total_len;
1532 bool is_binding = false;
1533
1534 rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, server,
1535 (void **) &req,
1536 &total_len);
1537 if (rc)
1538 return rc;
1539
1540 spin_lock(&ses->ses_lock);
1541 is_binding = (ses->ses_status == SES_GOOD);
1542 spin_unlock(&ses->ses_lock);
1543
1544 if (is_binding) {
1545 req->hdr.SessionId = cpu_to_le64(ses->Suid);
1546 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
1547 req->PreviousSessionId = 0;
1548 req->Flags = SMB2_SESSION_REQ_FLAG_BINDING;
1549 cifs_dbg(FYI, "Binding to sess id: %llx\n", ses->Suid);
1550 } else {
1551 /* First session, not a reauthenticate */
1552 req->hdr.SessionId = 0;
1553 /*
1554 * if reconnect, we need to send previous sess id
1555 * otherwise it is 0
1556 */
1557 req->PreviousSessionId = cpu_to_le64(sess_data->previous_session);
1558 req->Flags = 0; /* MBZ */
1559 cifs_dbg(FYI, "Fresh session. Previous: %llx\n",
1560 sess_data->previous_session);
1561 }
1562
1563 /* enough to enable echos and oplocks and one max size write */
1564 if (server->credits >= server->max_credits)
1565 req->hdr.CreditRequest = cpu_to_le16(0);
1566 else
1567 req->hdr.CreditRequest = cpu_to_le16(
1568 min_t(int, server->max_credits -
1569 server->credits, 130));
1570
1571 /* only one of SMB2 signing flags may be set in SMB2 request */
1572 if (server->sign)
1573 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED;
1574 else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */
1575 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED;
1576 else
1577 req->SecurityMode = 0;
1578
1579 #ifdef CONFIG_CIFS_DFS_UPCALL
1580 req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
1581 #else
1582 req->Capabilities = 0;
1583 #endif /* DFS_UPCALL */
1584
1585 req->Channel = 0; /* MBZ */
1586
1587 sess_data->iov[0].iov_base = (char *)req;
1588 /* 1 for pad */
1589 sess_data->iov[0].iov_len = total_len - 1;
1590 /*
1591 * This variable will be used to clear the buffer
1592 * allocated above in case of any error in the calling function.
1593 */
1594 sess_data->buf0_type = CIFS_SMALL_BUFFER;
1595
1596 return 0;
1597 }
1598
1599 static void
SMB2_sess_free_buffer(struct SMB2_sess_data * sess_data)1600 SMB2_sess_free_buffer(struct SMB2_sess_data *sess_data)
1601 {
1602 struct kvec *iov = sess_data->iov;
1603
1604 /* iov[1] is already freed by caller */
1605 if (sess_data->buf0_type != CIFS_NO_BUFFER && iov[0].iov_base)
1606 memzero_explicit(iov[0].iov_base, iov[0].iov_len);
1607
1608 free_rsp_buf(sess_data->buf0_type, iov[0].iov_base);
1609 sess_data->buf0_type = CIFS_NO_BUFFER;
1610 }
1611
1612 static int
SMB2_sess_sendreceive(struct SMB2_sess_data * sess_data)1613 SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
1614 {
1615 int rc;
1616 struct smb_rqst rqst;
1617 struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base;
1618 struct kvec rsp_iov = { NULL, 0 };
1619
1620 /* Testing shows that buffer offset must be at location of Buffer[0] */
1621 req->SecurityBufferOffset =
1622 cpu_to_le16(sizeof(struct smb2_sess_setup_req));
1623 req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
1624
1625 memset(&rqst, 0, sizeof(struct smb_rqst));
1626 rqst.rq_iov = sess_data->iov;
1627 rqst.rq_nvec = 2;
1628
1629 /* BB add code to build os and lm fields */
1630 rc = cifs_send_recv(sess_data->xid, sess_data->ses,
1631 sess_data->server,
1632 &rqst,
1633 &sess_data->buf0_type,
1634 CIFS_LOG_ERROR | CIFS_SESS_OP, &rsp_iov);
1635 cifs_small_buf_release(sess_data->iov[0].iov_base);
1636 if (rc == 0)
1637 sess_data->ses->expired_pwd = false;
1638 else if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED)) {
1639 if (sess_data->ses->expired_pwd == false)
1640 trace_smb3_key_expired(sess_data->server->hostname,
1641 sess_data->ses->user_name,
1642 sess_data->server->conn_id,
1643 &sess_data->server->dstaddr, rc);
1644 sess_data->ses->expired_pwd = true;
1645 }
1646
1647 memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec));
1648
1649 return rc;
1650 }
1651
1652 static int
SMB2_sess_establish_session(struct SMB2_sess_data * sess_data)1653 SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
1654 {
1655 int rc = 0;
1656 struct cifs_ses *ses = sess_data->ses;
1657 struct TCP_Server_Info *server = sess_data->server;
1658
1659 cifs_server_lock(server);
1660 if (server->ops->generate_signingkey) {
1661 rc = server->ops->generate_signingkey(ses, server);
1662 if (rc) {
1663 cifs_dbg(FYI,
1664 "SMB3 session key generation failed\n");
1665 cifs_server_unlock(server);
1666 return rc;
1667 }
1668 }
1669 if (!server->session_estab) {
1670 server->sequence_number = 0x2;
1671 server->session_estab = true;
1672 }
1673 cifs_server_unlock(server);
1674
1675 cifs_dbg(FYI, "SMB2/3 session established successfully\n");
1676 return rc;
1677 }
1678
1679 #ifdef CONFIG_CIFS_UPCALL
1680 static void
SMB2_auth_kerberos(struct SMB2_sess_data * sess_data)1681 SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
1682 {
1683 int rc;
1684 struct cifs_ses *ses = sess_data->ses;
1685 struct TCP_Server_Info *server = sess_data->server;
1686 struct cifs_spnego_msg *msg;
1687 struct key *spnego_key = NULL;
1688 struct smb2_sess_setup_rsp *rsp = NULL;
1689 bool is_binding = false;
1690
1691 rc = SMB2_sess_alloc_buffer(sess_data);
1692 if (rc)
1693 goto out;
1694
1695 spnego_key = cifs_get_spnego_key(ses, server);
1696 if (IS_ERR(spnego_key)) {
1697 rc = PTR_ERR(spnego_key);
1698 spnego_key = NULL;
1699 goto out;
1700 }
1701
1702 msg = spnego_key->payload.data[0];
1703 /*
1704 * check version field to make sure that cifs.upcall is
1705 * sending us a response in an expected form
1706 */
1707 if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
1708 cifs_dbg(VFS, "bad cifs.upcall version. Expected %d got %d\n",
1709 CIFS_SPNEGO_UPCALL_VERSION, msg->version);
1710 rc = -EKEYREJECTED;
1711 goto out_put_spnego_key;
1712 }
1713
1714 spin_lock(&ses->ses_lock);
1715 is_binding = (ses->ses_status == SES_GOOD);
1716 spin_unlock(&ses->ses_lock);
1717
1718 /* keep session key if binding */
1719 if (!is_binding) {
1720 kfree_sensitive(ses->auth_key.response);
1721 ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
1722 GFP_KERNEL);
1723 if (!ses->auth_key.response) {
1724 cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory\n",
1725 msg->sesskey_len);
1726 rc = -ENOMEM;
1727 goto out_put_spnego_key;
1728 }
1729 ses->auth_key.len = msg->sesskey_len;
1730 }
1731
1732 sess_data->iov[1].iov_base = msg->data + msg->sesskey_len;
1733 sess_data->iov[1].iov_len = msg->secblob_len;
1734
1735 rc = SMB2_sess_sendreceive(sess_data);
1736 if (rc)
1737 goto out_put_spnego_key;
1738
1739 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
1740 /* keep session id and flags if binding */
1741 if (!is_binding) {
1742 ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
1743 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
1744 }
1745
1746 rc = SMB2_sess_establish_session(sess_data);
1747 out_put_spnego_key:
1748 key_invalidate(spnego_key);
1749 key_put(spnego_key);
1750 if (rc) {
1751 kfree_sensitive(ses->auth_key.response);
1752 ses->auth_key.response = NULL;
1753 ses->auth_key.len = 0;
1754 }
1755 out:
1756 sess_data->result = rc;
1757 sess_data->func = NULL;
1758 SMB2_sess_free_buffer(sess_data);
1759 }
1760 #else
1761 static void
SMB2_auth_kerberos(struct SMB2_sess_data * sess_data)1762 SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
1763 {
1764 cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n");
1765 sess_data->result = -EOPNOTSUPP;
1766 sess_data->func = NULL;
1767 }
1768 #endif
1769
1770 static void
1771 SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data);
1772
1773 static void
SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data * sess_data)1774 SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
1775 {
1776 int rc;
1777 struct cifs_ses *ses = sess_data->ses;
1778 struct TCP_Server_Info *server = sess_data->server;
1779 struct smb2_sess_setup_rsp *rsp = NULL;
1780 unsigned char *ntlmssp_blob = NULL;
1781 bool use_spnego = false; /* else use raw ntlmssp */
1782 u16 blob_length = 0;
1783 bool is_binding = false;
1784
1785 /*
1786 * If memory allocation is successful, caller of this function
1787 * frees it.
1788 */
1789 ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
1790 if (!ses->ntlmssp) {
1791 rc = -ENOMEM;
1792 goto out_err;
1793 }
1794 ses->ntlmssp->sesskey_per_smbsess = true;
1795
1796 rc = SMB2_sess_alloc_buffer(sess_data);
1797 if (rc)
1798 goto out_err;
1799
1800 rc = build_ntlmssp_smb3_negotiate_blob(&ntlmssp_blob,
1801 &blob_length, ses, server,
1802 sess_data->nls_cp);
1803 if (rc)
1804 goto out;
1805
1806 if (use_spnego) {
1807 /* BB eventually need to add this */
1808 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
1809 rc = -EOPNOTSUPP;
1810 goto out;
1811 }
1812 sess_data->iov[1].iov_base = ntlmssp_blob;
1813 sess_data->iov[1].iov_len = blob_length;
1814
1815 rc = SMB2_sess_sendreceive(sess_data);
1816 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
1817
1818 /* If true, rc here is expected and not an error */
1819 if (sess_data->buf0_type != CIFS_NO_BUFFER &&
1820 rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED)
1821 rc = 0;
1822
1823 if (rc)
1824 goto out;
1825
1826 u16 boff = le16_to_cpu(rsp->SecurityBufferOffset);
1827
1828 if (offsetof(struct smb2_sess_setup_rsp, Buffer) != boff) {
1829 cifs_dbg(VFS, "Invalid security buffer offset %d\n", boff);
1830 rc = smb_EIO1(smb_eio_trace_sess_buf_off, boff);
1831 goto out;
1832 }
1833 rc = decode_ntlmssp_challenge(rsp->Buffer,
1834 le16_to_cpu(rsp->SecurityBufferLength), ses);
1835 if (rc)
1836 goto out;
1837
1838 cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
1839
1840 spin_lock(&ses->ses_lock);
1841 is_binding = (ses->ses_status == SES_GOOD);
1842 spin_unlock(&ses->ses_lock);
1843
1844 /* keep existing ses id and flags if binding */
1845 if (!is_binding) {
1846 ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
1847 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
1848 }
1849
1850 out:
1851 kfree_sensitive(ntlmssp_blob);
1852 SMB2_sess_free_buffer(sess_data);
1853 if (!rc) {
1854 sess_data->result = 0;
1855 sess_data->func = SMB2_sess_auth_rawntlmssp_authenticate;
1856 return;
1857 }
1858 out_err:
1859 kfree_sensitive(ses->ntlmssp);
1860 ses->ntlmssp = NULL;
1861 sess_data->result = rc;
1862 sess_data->func = NULL;
1863 }
1864
1865 static void
SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data * sess_data)1866 SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
1867 {
1868 int rc;
1869 struct cifs_ses *ses = sess_data->ses;
1870 struct TCP_Server_Info *server = sess_data->server;
1871 struct smb2_sess_setup_req *req;
1872 struct smb2_sess_setup_rsp *rsp = NULL;
1873 unsigned char *ntlmssp_blob = NULL;
1874 bool use_spnego = false; /* else use raw ntlmssp */
1875 u16 blob_length = 0;
1876 bool is_binding = false;
1877
1878 rc = SMB2_sess_alloc_buffer(sess_data);
1879 if (rc)
1880 goto out;
1881
1882 req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base;
1883 req->hdr.SessionId = cpu_to_le64(ses->Suid);
1884
1885 rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length,
1886 ses, server,
1887 sess_data->nls_cp);
1888 if (rc) {
1889 cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc);
1890 goto out;
1891 }
1892
1893 if (use_spnego) {
1894 /* BB eventually need to add this */
1895 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
1896 rc = -EOPNOTSUPP;
1897 goto out;
1898 }
1899 sess_data->iov[1].iov_base = ntlmssp_blob;
1900 sess_data->iov[1].iov_len = blob_length;
1901
1902 rc = SMB2_sess_sendreceive(sess_data);
1903 if (rc)
1904 goto out;
1905
1906 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
1907
1908 spin_lock(&ses->ses_lock);
1909 is_binding = (ses->ses_status == SES_GOOD);
1910 spin_unlock(&ses->ses_lock);
1911
1912 /* keep existing ses id and flags if binding */
1913 if (!is_binding) {
1914 ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
1915 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
1916 }
1917
1918 rc = SMB2_sess_establish_session(sess_data);
1919 #ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS
1920 if (ses->server->dialect < SMB30_PROT_ID) {
1921 cifs_dbg(VFS, "%s: dumping generated SMB2 session keys\n", __func__);
1922 /*
1923 * The session id is opaque in terms of endianness, so we can't
1924 * print it as a long long. we dump it as we got it on the wire
1925 */
1926 cifs_dbg(VFS, "Session Id %*ph\n", (int)sizeof(ses->Suid),
1927 &ses->Suid);
1928 cifs_dbg(VFS, "Session Key %*ph\n",
1929 SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response);
1930 cifs_dbg(VFS, "Signing Key %*ph\n",
1931 SMB3_SIGN_KEY_SIZE, ses->auth_key.response);
1932 }
1933 #endif
1934 out:
1935 kfree_sensitive(ntlmssp_blob);
1936 SMB2_sess_free_buffer(sess_data);
1937 kfree_sensitive(ses->ntlmssp);
1938 ses->ntlmssp = NULL;
1939 sess_data->result = rc;
1940 sess_data->func = NULL;
1941 }
1942
1943 static int
SMB2_select_sec(struct SMB2_sess_data * sess_data)1944 SMB2_select_sec(struct SMB2_sess_data *sess_data)
1945 {
1946 int type;
1947 struct cifs_ses *ses = sess_data->ses;
1948 struct TCP_Server_Info *server = sess_data->server;
1949
1950 type = smb2_select_sectype(server, ses->sectype);
1951 cifs_dbg(FYI, "sess setup type %d\n", type);
1952 if (type == Unspecified) {
1953 cifs_dbg(VFS, "Unable to select appropriate authentication method!\n");
1954 return -EINVAL;
1955 }
1956
1957 switch (type) {
1958 case Kerberos:
1959 sess_data->func = SMB2_auth_kerberos;
1960 break;
1961 case RawNTLMSSP:
1962 sess_data->func = SMB2_sess_auth_rawntlmssp_negotiate;
1963 break;
1964 default:
1965 cifs_dbg(VFS, "secType %d not supported!\n", type);
1966 return -EOPNOTSUPP;
1967 }
1968
1969 return 0;
1970 }
1971
1972 int
SMB2_sess_setup(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server,const struct nls_table * nls_cp)1973 SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
1974 struct TCP_Server_Info *server,
1975 const struct nls_table *nls_cp)
1976 {
1977 int rc = 0;
1978 struct SMB2_sess_data *sess_data;
1979
1980 cifs_dbg(FYI, "Session Setup\n");
1981
1982 if (!server) {
1983 WARN(1, "%s: server is NULL!\n", __func__);
1984 return smb_EIO(smb_eio_trace_null_pointers);
1985 }
1986
1987 sess_data = kzalloc(sizeof(struct SMB2_sess_data), GFP_KERNEL);
1988 if (!sess_data)
1989 return -ENOMEM;
1990
1991 sess_data->xid = xid;
1992 sess_data->ses = ses;
1993 sess_data->server = server;
1994 sess_data->buf0_type = CIFS_NO_BUFFER;
1995 sess_data->nls_cp = (struct nls_table *) nls_cp;
1996 sess_data->previous_session = ses->Suid;
1997
1998 rc = SMB2_select_sec(sess_data);
1999 if (rc)
2000 goto out;
2001
2002 /*
2003 * Initialize the session hash with the server one.
2004 */
2005 memcpy(ses->preauth_sha_hash, server->preauth_sha_hash,
2006 SMB2_PREAUTH_HASH_SIZE);
2007
2008 while (sess_data->func)
2009 sess_data->func(sess_data);
2010
2011 if ((ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) && (ses->sign))
2012 cifs_server_dbg(VFS, "signing requested but authenticated as guest\n");
2013 rc = sess_data->result;
2014 out:
2015 kfree_sensitive(sess_data);
2016 return rc;
2017 }
2018
2019 int
SMB2_logoff(const unsigned int xid,struct cifs_ses * ses)2020 SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
2021 {
2022 struct smb_rqst rqst;
2023 struct smb2_logoff_req *req; /* response is also trivial struct */
2024 int rc = 0;
2025 struct TCP_Server_Info *server;
2026 int flags = 0;
2027 unsigned int total_len;
2028 struct kvec iov[1];
2029 struct kvec rsp_iov;
2030 int resp_buf_type;
2031
2032 cifs_dbg(FYI, "disconnect session %p\n", ses);
2033
2034 if (!ses || !ses->server)
2035 return smb_EIO(smb_eio_trace_null_pointers);
2036 server = ses->server;
2037
2038 /* no need to send SMB logoff if uid already closed due to reconnect */
2039 spin_lock(&ses->chan_lock);
2040 if (CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
2041 spin_unlock(&ses->chan_lock);
2042 goto smb2_session_already_dead;
2043 }
2044 spin_unlock(&ses->chan_lock);
2045
2046 rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, ses->server,
2047 (void **) &req, &total_len);
2048 if (rc)
2049 return rc;
2050
2051 /* since no tcon, smb2_init can not do this, so do here */
2052 req->hdr.SessionId = cpu_to_le64(ses->Suid);
2053
2054 if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
2055 flags |= CIFS_TRANSFORM_REQ;
2056 else if (server->sign)
2057 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
2058
2059 flags |= CIFS_NO_RSP_BUF;
2060
2061 iov[0].iov_base = (char *)req;
2062 iov[0].iov_len = total_len;
2063
2064 memset(&rqst, 0, sizeof(struct smb_rqst));
2065 rqst.rq_iov = iov;
2066 rqst.rq_nvec = 1;
2067
2068 rc = cifs_send_recv(xid, ses, ses->server,
2069 &rqst, &resp_buf_type, flags, &rsp_iov);
2070 cifs_small_buf_release(req);
2071 /*
2072 * No tcon so can't do
2073 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
2074 */
2075
2076 smb2_session_already_dead:
2077 return rc;
2078 }
2079
cifs_stats_fail_inc(struct cifs_tcon * tcon,uint16_t code)2080 static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code)
2081 {
2082 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]);
2083 }
2084
2085 #define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */)
2086
2087 /* These are similar values to what Windows uses */
init_copy_chunk_defaults(struct cifs_tcon * tcon)2088 static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon)
2089 {
2090 tcon->max_chunks = 256;
2091 tcon->max_bytes_chunk = 1048576;
2092 tcon->max_bytes_copy = 16777216;
2093 }
2094
2095 int
SMB2_tcon(const unsigned int xid,struct cifs_ses * ses,const char * tree,struct cifs_tcon * tcon,const struct nls_table * cp)2096 SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
2097 struct cifs_tcon *tcon, const struct nls_table *cp)
2098 {
2099 struct smb_rqst rqst;
2100 struct smb2_tree_connect_req *req;
2101 struct smb2_tree_connect_rsp *rsp = NULL;
2102 struct kvec iov[2];
2103 struct kvec rsp_iov = { NULL, 0 };
2104 int rc = 0;
2105 int resp_buftype;
2106 int unc_path_len;
2107 __le16 *unc_path = NULL;
2108 int flags = 0;
2109 unsigned int total_len;
2110 struct TCP_Server_Info *server = cifs_pick_channel(ses);
2111
2112 cifs_dbg(FYI, "TCON\n");
2113
2114 if (!server || !tree)
2115 return smb_EIO(smb_eio_trace_null_pointers);
2116
2117 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
2118 if (unc_path == NULL)
2119 return -ENOMEM;
2120
2121 unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp);
2122 if (unc_path_len <= 0) {
2123 kfree(unc_path);
2124 return -EINVAL;
2125 }
2126 unc_path_len *= 2;
2127
2128 /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
2129 tcon->tid = 0;
2130 atomic_set(&tcon->num_remote_opens, 0);
2131 rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, server,
2132 (void **) &req, &total_len);
2133 if (rc) {
2134 kfree(unc_path);
2135 return rc;
2136 }
2137
2138 if (smb3_encryption_required(tcon))
2139 flags |= CIFS_TRANSFORM_REQ;
2140
2141 iov[0].iov_base = (char *)req;
2142 /* 1 for pad */
2143 iov[0].iov_len = total_len - 1;
2144
2145 /* Testing shows that buffer offset must be at location of Buffer[0] */
2146 req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req));
2147 req->PathLength = cpu_to_le16(unc_path_len);
2148 iov[1].iov_base = unc_path;
2149 iov[1].iov_len = unc_path_len;
2150
2151 /*
2152 * 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1
2153 * unless it is guest or anonymous user. See MS-SMB2 3.2.5.3.1
2154 * (Samba servers don't always set the flag so also check if null user)
2155 */
2156 if ((server->dialect == SMB311_PROT_ID) &&
2157 !smb3_encryption_required(tcon) &&
2158 !(ses->session_flags &
2159 (SMB2_SESSION_FLAG_IS_GUEST|SMB2_SESSION_FLAG_IS_NULL)) &&
2160 ((ses->user_name != NULL) || (ses->sectype == Kerberos)))
2161 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
2162
2163 memset(&rqst, 0, sizeof(struct smb_rqst));
2164 rqst.rq_iov = iov;
2165 rqst.rq_nvec = 2;
2166
2167 /* Need 64 for max size write so ask for more in case not there yet */
2168 if (server->credits >= server->max_credits)
2169 req->hdr.CreditRequest = cpu_to_le16(0);
2170 else
2171 req->hdr.CreditRequest = cpu_to_le16(
2172 min_t(int, server->max_credits -
2173 server->credits, 64));
2174
2175 rc = cifs_send_recv(xid, ses, server,
2176 &rqst, &resp_buftype, flags, &rsp_iov);
2177 cifs_small_buf_release(req);
2178 rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
2179 trace_smb3_tcon(xid, tcon->tid, ses->Suid, tree, rc);
2180 if ((rc != 0) || (rsp == NULL)) {
2181 cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
2182 tcon->need_reconnect = true;
2183 goto tcon_error_exit;
2184 }
2185
2186 switch (rsp->ShareType) {
2187 case SMB2_SHARE_TYPE_DISK:
2188 cifs_dbg(FYI, "connection to disk share\n");
2189 break;
2190 case SMB2_SHARE_TYPE_PIPE:
2191 tcon->pipe = true;
2192 cifs_dbg(FYI, "connection to pipe share\n");
2193 break;
2194 case SMB2_SHARE_TYPE_PRINT:
2195 tcon->print = true;
2196 cifs_dbg(FYI, "connection to printer\n");
2197 break;
2198 default:
2199 cifs_server_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
2200 rc = -EOPNOTSUPP;
2201 goto tcon_error_exit;
2202 }
2203
2204 tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
2205 tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */
2206 tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
2207 tcon->tid = le32_to_cpu(rsp->hdr.Id.SyncId.TreeId);
2208 strscpy(tcon->tree_name, tree, sizeof(tcon->tree_name));
2209
2210 if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
2211 ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
2212 cifs_tcon_dbg(VFS, "DFS capability contradicts DFS flag\n");
2213
2214 if (tcon->seal &&
2215 !(server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
2216 cifs_tcon_dbg(VFS, "Encryption is requested but not supported\n");
2217
2218 init_copy_chunk_defaults(tcon);
2219 if (server->ops->validate_negotiate)
2220 rc = server->ops->validate_negotiate(xid, tcon);
2221 if (rc == 0) /* See MS-SMB2 2.2.10 and 3.2.5.5 */
2222 if (tcon->share_flags & SMB2_SHAREFLAG_ISOLATED_TRANSPORT)
2223 server->nosharesock = true;
2224 tcon_exit:
2225
2226 free_rsp_buf(resp_buftype, rsp);
2227 kfree(unc_path);
2228 return rc;
2229
2230 tcon_error_exit:
2231 if (rsp && rsp->hdr.Status == STATUS_BAD_NETWORK_NAME)
2232 cifs_dbg(VFS | ONCE, "BAD_NETWORK_NAME: %s\n", tree);
2233 goto tcon_exit;
2234 }
2235
2236 int
SMB2_tdis(const unsigned int xid,struct cifs_tcon * tcon)2237 SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
2238 {
2239 struct smb_rqst rqst;
2240 struct smb2_tree_disconnect_req *req; /* response is trivial */
2241 int rc = 0;
2242 struct cifs_ses *ses = tcon->ses;
2243 struct TCP_Server_Info *server = cifs_pick_channel(ses);
2244 int flags = 0;
2245 unsigned int total_len;
2246 struct kvec iov[1];
2247 struct kvec rsp_iov;
2248 int resp_buf_type;
2249
2250 cifs_dbg(FYI, "Tree Disconnect\n");
2251
2252 if (!ses || !(ses->server))
2253 return smb_EIO(smb_eio_trace_null_pointers);
2254
2255 trace_smb3_tdis_enter(xid, tcon->tid, ses->Suid, tcon->tree_name);
2256 spin_lock(&ses->chan_lock);
2257 if ((tcon->need_reconnect) ||
2258 (CIFS_ALL_CHANS_NEED_RECONNECT(tcon->ses))) {
2259 spin_unlock(&ses->chan_lock);
2260 return 0;
2261 }
2262 spin_unlock(&ses->chan_lock);
2263
2264 invalidate_all_cached_dirs(tcon);
2265
2266 rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, server,
2267 (void **) &req,
2268 &total_len);
2269 if (rc)
2270 return rc;
2271
2272 if (smb3_encryption_required(tcon))
2273 flags |= CIFS_TRANSFORM_REQ;
2274
2275 flags |= CIFS_NO_RSP_BUF;
2276
2277 iov[0].iov_base = (char *)req;
2278 iov[0].iov_len = total_len;
2279
2280 memset(&rqst, 0, sizeof(struct smb_rqst));
2281 rqst.rq_iov = iov;
2282 rqst.rq_nvec = 1;
2283
2284 rc = cifs_send_recv(xid, ses, server,
2285 &rqst, &resp_buf_type, flags, &rsp_iov);
2286 cifs_small_buf_release(req);
2287 if (rc) {
2288 cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
2289 trace_smb3_tdis_err(xid, tcon->tid, ses->Suid, rc);
2290 }
2291 trace_smb3_tdis_done(xid, tcon->tid, ses->Suid);
2292
2293 return rc;
2294 }
2295
2296 static create_durable_req_t *
create_durable_buf(void)2297 create_durable_buf(void)
2298 {
2299 create_durable_req_t *buf;
2300
2301 buf = kzalloc(sizeof(create_durable_req_t), GFP_KERNEL);
2302 if (!buf)
2303 return NULL;
2304
2305 buf->ccontext.DataOffset = cpu_to_le16(offsetof
2306 (create_durable_req_t, Data));
2307 buf->ccontext.DataLength = cpu_to_le32(16);
2308 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2309 (create_durable_req_t, Name));
2310 buf->ccontext.NameLength = cpu_to_le16(4);
2311 /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DHnQ" */
2312 buf->Name[0] = 'D';
2313 buf->Name[1] = 'H';
2314 buf->Name[2] = 'n';
2315 buf->Name[3] = 'Q';
2316 return buf;
2317 }
2318
2319 static create_durable_req_t *
create_reconnect_durable_buf(struct cifs_fid * fid)2320 create_reconnect_durable_buf(struct cifs_fid *fid)
2321 {
2322 create_durable_req_t *buf;
2323
2324 buf = kzalloc(sizeof(create_durable_req_t), GFP_KERNEL);
2325 if (!buf)
2326 return NULL;
2327
2328 buf->ccontext.DataOffset = cpu_to_le16(offsetof
2329 (create_durable_req_t, Data));
2330 buf->ccontext.DataLength = cpu_to_le32(16);
2331 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2332 (create_durable_req_t, Name));
2333 buf->ccontext.NameLength = cpu_to_le16(4);
2334 buf->Data.Fid.PersistentFileId = fid->persistent_fid;
2335 buf->Data.Fid.VolatileFileId = fid->volatile_fid;
2336 /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT is "DHnC" */
2337 buf->Name[0] = 'D';
2338 buf->Name[1] = 'H';
2339 buf->Name[2] = 'n';
2340 buf->Name[3] = 'C';
2341 return buf;
2342 }
2343
2344 static void
parse_query_id_ctxt(struct create_context * cc,struct smb2_file_all_info * buf)2345 parse_query_id_ctxt(struct create_context *cc, struct smb2_file_all_info *buf)
2346 {
2347 struct create_disk_id_rsp *pdisk_id = (struct create_disk_id_rsp *)cc;
2348
2349 cifs_dbg(FYI, "parse query id context 0x%llx 0x%llx\n",
2350 pdisk_id->DiskFileId, pdisk_id->VolumeId);
2351 buf->IndexNumber = pdisk_id->DiskFileId;
2352 }
2353
2354 static void
parse_posix_ctxt(struct create_context * cc,struct smb2_file_all_info * info,struct create_posix_rsp * posix)2355 parse_posix_ctxt(struct create_context *cc, struct smb2_file_all_info *info,
2356 struct create_posix_rsp *posix)
2357 {
2358 int sid_len;
2359 u8 *beg = (u8 *)cc + le16_to_cpu(cc->DataOffset);
2360 u8 *end = beg + le32_to_cpu(cc->DataLength);
2361 u8 *sid;
2362
2363 memset(posix, 0, sizeof(*posix));
2364
2365 posix->nlink = le32_to_cpu(*(__le32 *)(beg + 0));
2366 posix->reparse_tag = le32_to_cpu(*(__le32 *)(beg + 4));
2367 posix->mode = le32_to_cpu(*(__le32 *)(beg + 8));
2368
2369 sid = beg + 12;
2370 sid_len = posix_info_sid_size(sid, end);
2371 if (sid_len < 0) {
2372 cifs_dbg(VFS, "bad owner sid in posix create response\n");
2373 return;
2374 }
2375 memcpy(&posix->owner, sid, sid_len);
2376
2377 sid = sid + sid_len;
2378 sid_len = posix_info_sid_size(sid, end);
2379 if (sid_len < 0) {
2380 cifs_dbg(VFS, "bad group sid in posix create response\n");
2381 return;
2382 }
2383 memcpy(&posix->group, sid, sid_len);
2384
2385 cifs_dbg(FYI, "nlink=%d mode=%o reparse_tag=%x\n",
2386 posix->nlink, posix->mode, posix->reparse_tag);
2387 }
2388
smb2_parse_contexts(struct TCP_Server_Info * server,struct kvec * rsp_iov,__u16 * epoch,char * lease_key,__u8 * oplock,struct smb2_file_all_info * buf,struct create_posix_rsp * posix)2389 int smb2_parse_contexts(struct TCP_Server_Info *server,
2390 struct kvec *rsp_iov,
2391 __u16 *epoch,
2392 char *lease_key, __u8 *oplock,
2393 struct smb2_file_all_info *buf,
2394 struct create_posix_rsp *posix)
2395 {
2396 struct smb2_create_rsp *rsp = rsp_iov->iov_base;
2397 struct create_context *cc;
2398 size_t rem, off, len;
2399 size_t doff, dlen;
2400 size_t noff, nlen;
2401 char *name;
2402 static const char smb3_create_tag_posix[] = {
2403 0x93, 0xAD, 0x25, 0x50, 0x9C,
2404 0xB4, 0x11, 0xE7, 0xB4, 0x23, 0x83,
2405 0xDE, 0x96, 0x8B, 0xCD, 0x7C
2406 };
2407
2408 *oplock = 0;
2409
2410 off = le32_to_cpu(rsp->CreateContextsOffset);
2411 rem = le32_to_cpu(rsp->CreateContextsLength);
2412 if (check_add_overflow(off, rem, &len) || len > rsp_iov->iov_len)
2413 return -EINVAL;
2414 cc = (struct create_context *)((u8 *)rsp + off);
2415
2416 /* Initialize inode number to 0 in case no valid data in qfid context */
2417 if (buf)
2418 buf->IndexNumber = 0;
2419
2420 while (rem >= sizeof(*cc)) {
2421 doff = le16_to_cpu(cc->DataOffset);
2422 dlen = le32_to_cpu(cc->DataLength);
2423 if (check_add_overflow(doff, dlen, &len) || len > rem)
2424 return -EINVAL;
2425
2426 noff = le16_to_cpu(cc->NameOffset);
2427 nlen = le16_to_cpu(cc->NameLength);
2428 if (noff + nlen > doff)
2429 return -EINVAL;
2430
2431 name = (char *)cc + noff;
2432 switch (nlen) {
2433 case 4:
2434 if (!strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4)) {
2435 *oplock = server->ops->parse_lease_buf(cc, epoch,
2436 lease_key);
2437 } else if (buf &&
2438 !strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4)) {
2439 parse_query_id_ctxt(cc, buf);
2440 }
2441 break;
2442 case 16:
2443 if (posix && !memcmp(name, smb3_create_tag_posix, 16))
2444 parse_posix_ctxt(cc, buf, posix);
2445 break;
2446 default:
2447 cifs_dbg(FYI, "%s: unhandled context (nlen=%zu dlen=%zu)\n",
2448 __func__, nlen, dlen);
2449 if (IS_ENABLED(CONFIG_CIFS_DEBUG2))
2450 cifs_dump_mem("context data: ", cc, dlen);
2451 break;
2452 }
2453
2454 off = le32_to_cpu(cc->Next);
2455 if (!off)
2456 break;
2457 if (check_sub_overflow(rem, off, &rem))
2458 return -EINVAL;
2459 cc = (struct create_context *)((u8 *)cc + off);
2460 }
2461
2462 if (rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
2463 *oplock = rsp->OplockLevel;
2464
2465 return 0;
2466 }
2467
2468 static int
add_lease_context(struct TCP_Server_Info * server,struct smb2_create_req * req,struct kvec * iov,unsigned int * num_iovec,u8 * lease_key,__u8 * oplock,u8 * parent_lease_key,__le32 flags)2469 add_lease_context(struct TCP_Server_Info *server,
2470 struct smb2_create_req *req,
2471 struct kvec *iov,
2472 unsigned int *num_iovec,
2473 u8 *lease_key,
2474 __u8 *oplock,
2475 u8 *parent_lease_key,
2476 __le32 flags)
2477 {
2478 unsigned int num = *num_iovec;
2479
2480 iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock,
2481 parent_lease_key, flags);
2482 if (iov[num].iov_base == NULL)
2483 return -ENOMEM;
2484 iov[num].iov_len = server->vals->create_lease_size;
2485 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
2486 *num_iovec = num + 1;
2487 return 0;
2488 }
2489
2490 static struct create_durable_req_v2 *
create_durable_v2_buf(struct cifs_open_parms * oparms)2491 create_durable_v2_buf(struct cifs_open_parms *oparms)
2492 {
2493 struct cifs_fid *pfid = oparms->fid;
2494 struct create_durable_req_v2 *buf;
2495
2496 buf = kzalloc(sizeof(struct create_durable_req_v2), GFP_KERNEL);
2497 if (!buf)
2498 return NULL;
2499
2500 buf->ccontext.DataOffset = cpu_to_le16(offsetof
2501 (struct create_durable_req_v2, dcontext));
2502 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct durable_context_v2_req));
2503 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2504 (struct create_durable_req_v2, Name));
2505 buf->ccontext.NameLength = cpu_to_le16(4);
2506
2507 /*
2508 * NB: Handle timeout defaults to 0, which allows server to choose
2509 * (most servers default to 120 seconds) and most clients default to 0.
2510 * This can be overridden at mount ("handletimeout=") if the user wants
2511 * a different persistent (or resilient) handle timeout for all opens
2512 * on a particular SMB3 mount.
2513 */
2514 buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout);
2515 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
2516
2517 /* for replay, we should not overwrite the existing create guid */
2518 if (!oparms->replay) {
2519 generate_random_uuid(buf->dcontext.CreateGuid);
2520 memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
2521 } else
2522 memcpy(buf->dcontext.CreateGuid, pfid->create_guid, 16);
2523
2524 /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */
2525 buf->Name[0] = 'D';
2526 buf->Name[1] = 'H';
2527 buf->Name[2] = '2';
2528 buf->Name[3] = 'Q';
2529 return buf;
2530 }
2531
2532 static struct create_durable_handle_reconnect_v2 *
create_reconnect_durable_v2_buf(struct cifs_fid * fid)2533 create_reconnect_durable_v2_buf(struct cifs_fid *fid)
2534 {
2535 struct create_durable_handle_reconnect_v2 *buf;
2536
2537 buf = kzalloc(sizeof(struct create_durable_handle_reconnect_v2),
2538 GFP_KERNEL);
2539 if (!buf)
2540 return NULL;
2541
2542 buf->ccontext.DataOffset =
2543 cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
2544 dcontext));
2545 buf->ccontext.DataLength =
2546 cpu_to_le32(sizeof(struct durable_reconnect_context_v2));
2547 buf->ccontext.NameOffset =
2548 cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
2549 Name));
2550 buf->ccontext.NameLength = cpu_to_le16(4);
2551
2552 buf->dcontext.Fid.PersistentFileId = fid->persistent_fid;
2553 buf->dcontext.Fid.VolatileFileId = fid->volatile_fid;
2554 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
2555 memcpy(buf->dcontext.CreateGuid, fid->create_guid, 16);
2556
2557 /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 is "DH2C" */
2558 buf->Name[0] = 'D';
2559 buf->Name[1] = 'H';
2560 buf->Name[2] = '2';
2561 buf->Name[3] = 'C';
2562 return buf;
2563 }
2564
2565 static int
add_durable_v2_context(struct kvec * iov,unsigned int * num_iovec,struct cifs_open_parms * oparms)2566 add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec,
2567 struct cifs_open_parms *oparms)
2568 {
2569 unsigned int num = *num_iovec;
2570
2571 iov[num].iov_base = create_durable_v2_buf(oparms);
2572 if (iov[num].iov_base == NULL)
2573 return -ENOMEM;
2574 iov[num].iov_len = sizeof(struct create_durable_req_v2);
2575 *num_iovec = num + 1;
2576 return 0;
2577 }
2578
2579 static int
add_durable_reconnect_v2_context(struct kvec * iov,unsigned int * num_iovec,struct cifs_open_parms * oparms)2580 add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec,
2581 struct cifs_open_parms *oparms)
2582 {
2583 unsigned int num = *num_iovec;
2584
2585 /* indicate that we don't need to relock the file */
2586 oparms->reconnect = false;
2587
2588 iov[num].iov_base = create_reconnect_durable_v2_buf(oparms->fid);
2589 if (iov[num].iov_base == NULL)
2590 return -ENOMEM;
2591 iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2);
2592 *num_iovec = num + 1;
2593 return 0;
2594 }
2595
2596 static int
add_durable_context(struct kvec * iov,unsigned int * num_iovec,struct cifs_open_parms * oparms,bool use_persistent)2597 add_durable_context(struct kvec *iov, unsigned int *num_iovec,
2598 struct cifs_open_parms *oparms, bool use_persistent)
2599 {
2600 unsigned int num = *num_iovec;
2601
2602 if (use_persistent) {
2603 if (oparms->reconnect)
2604 return add_durable_reconnect_v2_context(iov, num_iovec,
2605 oparms);
2606 else
2607 return add_durable_v2_context(iov, num_iovec, oparms);
2608 }
2609
2610 if (oparms->reconnect) {
2611 iov[num].iov_base = create_reconnect_durable_buf(oparms->fid);
2612 /* indicate that we don't need to relock the file */
2613 oparms->reconnect = false;
2614 } else
2615 iov[num].iov_base = create_durable_buf();
2616 if (iov[num].iov_base == NULL)
2617 return -ENOMEM;
2618 iov[num].iov_len = sizeof(create_durable_req_t);
2619 *num_iovec = num + 1;
2620 return 0;
2621 }
2622
2623 /* See MS-SMB2 2.2.13.2.7 */
2624 static struct crt_twarp_ctxt *
create_twarp_buf(__u64 timewarp)2625 create_twarp_buf(__u64 timewarp)
2626 {
2627 struct crt_twarp_ctxt *buf;
2628
2629 buf = kzalloc(sizeof(struct crt_twarp_ctxt), GFP_KERNEL);
2630 if (!buf)
2631 return NULL;
2632
2633 buf->ccontext.DataOffset = cpu_to_le16(offsetof
2634 (struct crt_twarp_ctxt, Timestamp));
2635 buf->ccontext.DataLength = cpu_to_le32(8);
2636 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2637 (struct crt_twarp_ctxt, Name));
2638 buf->ccontext.NameLength = cpu_to_le16(4);
2639 /* SMB2_CREATE_TIMEWARP_TOKEN is "TWrp" */
2640 buf->Name[0] = 'T';
2641 buf->Name[1] = 'W';
2642 buf->Name[2] = 'r';
2643 buf->Name[3] = 'p';
2644 buf->Timestamp = cpu_to_le64(timewarp);
2645 return buf;
2646 }
2647
2648 /* See MS-SMB2 2.2.13.2.7 */
2649 static int
add_twarp_context(struct kvec * iov,unsigned int * num_iovec,__u64 timewarp)2650 add_twarp_context(struct kvec *iov, unsigned int *num_iovec, __u64 timewarp)
2651 {
2652 unsigned int num = *num_iovec;
2653
2654 iov[num].iov_base = create_twarp_buf(timewarp);
2655 if (iov[num].iov_base == NULL)
2656 return -ENOMEM;
2657 iov[num].iov_len = sizeof(struct crt_twarp_ctxt);
2658 *num_iovec = num + 1;
2659 return 0;
2660 }
2661
2662 /* See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */
setup_owner_group_sids(char * buf)2663 static void setup_owner_group_sids(char *buf)
2664 {
2665 struct owner_group_sids *sids = (struct owner_group_sids *)buf;
2666
2667 /* Populate the user ownership fields S-1-5-88-1 */
2668 sids->owner.Revision = 1;
2669 sids->owner.NumAuth = 3;
2670 sids->owner.Authority[5] = 5;
2671 sids->owner.SubAuthorities[0] = cpu_to_le32(88);
2672 sids->owner.SubAuthorities[1] = cpu_to_le32(1);
2673 sids->owner.SubAuthorities[2] = cpu_to_le32(current_fsuid().val);
2674
2675 /* Populate the group ownership fields S-1-5-88-2 */
2676 sids->group.Revision = 1;
2677 sids->group.NumAuth = 3;
2678 sids->group.Authority[5] = 5;
2679 sids->group.SubAuthorities[0] = cpu_to_le32(88);
2680 sids->group.SubAuthorities[1] = cpu_to_le32(2);
2681 sids->group.SubAuthorities[2] = cpu_to_le32(current_fsgid().val);
2682
2683 cifs_dbg(FYI, "owner S-1-5-88-1-%d, group S-1-5-88-2-%d\n", current_fsuid().val, current_fsgid().val);
2684 }
2685
2686 /* See MS-SMB2 2.2.13.2.2 and MS-DTYP 2.4.6 */
2687 static struct crt_sd_ctxt *
create_sd_buf(umode_t mode,bool set_owner,unsigned int * len)2688 create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
2689 {
2690 struct crt_sd_ctxt *buf;
2691 __u8 *ptr, *aclptr;
2692 unsigned int acelen, acl_size, ace_count;
2693 unsigned int owner_offset = 0;
2694 unsigned int group_offset = 0;
2695 struct smb3_acl acl = {};
2696
2697 *len = round_up(sizeof(struct crt_sd_ctxt) + (sizeof(struct smb_ace) * 4), 8);
2698
2699 if (set_owner) {
2700 /* sizeof(struct owner_group_sids) is already multiple of 8 so no need to round */
2701 *len += sizeof(struct owner_group_sids);
2702 }
2703
2704 buf = kzalloc(*len, GFP_KERNEL);
2705 if (buf == NULL)
2706 return buf;
2707
2708 ptr = (__u8 *)&buf[1];
2709 if (set_owner) {
2710 /* offset fields are from beginning of security descriptor not of create context */
2711 owner_offset = ptr - (__u8 *)&buf->sd;
2712 buf->sd.OffsetOwner = cpu_to_le32(owner_offset);
2713 group_offset = owner_offset + offsetof(struct owner_group_sids, group);
2714 buf->sd.OffsetGroup = cpu_to_le32(group_offset);
2715
2716 setup_owner_group_sids(ptr);
2717 ptr += sizeof(struct owner_group_sids);
2718 } else {
2719 buf->sd.OffsetOwner = 0;
2720 buf->sd.OffsetGroup = 0;
2721 }
2722
2723 buf->ccontext.DataOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, sd));
2724 buf->ccontext.NameOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, Name));
2725 buf->ccontext.NameLength = cpu_to_le16(4);
2726 /* SMB2_CREATE_SD_BUFFER_TOKEN is "SecD" */
2727 buf->Name[0] = 'S';
2728 buf->Name[1] = 'e';
2729 buf->Name[2] = 'c';
2730 buf->Name[3] = 'D';
2731 buf->sd.Revision = 1; /* Must be one see MS-DTYP 2.4.6 */
2732
2733 /*
2734 * ACL is "self relative" ie ACL is stored in contiguous block of memory
2735 * and "DP" ie the DACL is present
2736 */
2737 buf->sd.Control = cpu_to_le16(ACL_CONTROL_SR | ACL_CONTROL_DP);
2738
2739 /* offset owner, group and Sbz1 and SACL are all zero */
2740 buf->sd.OffsetDacl = cpu_to_le32(ptr - (__u8 *)&buf->sd);
2741 /* Ship the ACL for now. we will copy it into buf later. */
2742 aclptr = ptr;
2743 ptr += sizeof(struct smb3_acl);
2744
2745 /* create one ACE to hold the mode embedded in reserved special SID */
2746 acelen = setup_special_mode_ACE((struct smb_ace *)ptr, false, (__u64)mode);
2747 ptr += acelen;
2748 acl_size = acelen + sizeof(struct smb3_acl);
2749 ace_count = 1;
2750
2751 if (set_owner) {
2752 /* we do not need to reallocate buffer to add the two more ACEs. plenty of space */
2753 acelen = setup_special_user_owner_ACE((struct smb_ace *)ptr);
2754 ptr += acelen;
2755 acl_size += acelen;
2756 ace_count += 1;
2757 }
2758
2759 /* and one more ACE to allow access for authenticated users */
2760 acelen = setup_authusers_ACE((struct smb_ace *)ptr);
2761 ptr += acelen;
2762 acl_size += acelen;
2763 ace_count += 1;
2764
2765 acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */
2766 acl.AclSize = cpu_to_le16(acl_size);
2767 acl.AceCount = cpu_to_le16(ace_count);
2768 /* acl.Sbz1 and Sbz2 MBZ so are not set here, but initialized above */
2769 memcpy(aclptr, &acl, sizeof(struct smb3_acl));
2770
2771 buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd);
2772 *len = round_up((unsigned int)(ptr - (__u8 *)buf), 8);
2773
2774 return buf;
2775 }
2776
2777 static int
add_sd_context(struct kvec * iov,unsigned int * num_iovec,umode_t mode,bool set_owner)2778 add_sd_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode, bool set_owner)
2779 {
2780 unsigned int num = *num_iovec;
2781 unsigned int len = 0;
2782
2783 iov[num].iov_base = create_sd_buf(mode, set_owner, &len);
2784 if (iov[num].iov_base == NULL)
2785 return -ENOMEM;
2786 iov[num].iov_len = len;
2787 *num_iovec = num + 1;
2788 return 0;
2789 }
2790
2791 static struct crt_query_id_ctxt *
create_query_id_buf(void)2792 create_query_id_buf(void)
2793 {
2794 struct crt_query_id_ctxt *buf;
2795
2796 buf = kzalloc(sizeof(struct crt_query_id_ctxt), GFP_KERNEL);
2797 if (!buf)
2798 return NULL;
2799
2800 buf->ccontext.DataOffset = cpu_to_le16(0);
2801 buf->ccontext.DataLength = cpu_to_le32(0);
2802 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2803 (struct crt_query_id_ctxt, Name));
2804 buf->ccontext.NameLength = cpu_to_le16(4);
2805 /* SMB2_CREATE_QUERY_ON_DISK_ID is "QFid" */
2806 buf->Name[0] = 'Q';
2807 buf->Name[1] = 'F';
2808 buf->Name[2] = 'i';
2809 buf->Name[3] = 'd';
2810 return buf;
2811 }
2812
2813 /* See MS-SMB2 2.2.13.2.9 */
2814 static int
add_query_id_context(struct kvec * iov,unsigned int * num_iovec)2815 add_query_id_context(struct kvec *iov, unsigned int *num_iovec)
2816 {
2817 unsigned int num = *num_iovec;
2818
2819 iov[num].iov_base = create_query_id_buf();
2820 if (iov[num].iov_base == NULL)
2821 return -ENOMEM;
2822 iov[num].iov_len = sizeof(struct crt_query_id_ctxt);
2823 *num_iovec = num + 1;
2824 return 0;
2825 }
2826
add_ea_context(struct cifs_open_parms * oparms,struct kvec * rq_iov,unsigned int * num_iovs)2827 static void add_ea_context(struct cifs_open_parms *oparms,
2828 struct kvec *rq_iov, unsigned int *num_iovs)
2829 {
2830 struct kvec *iov = oparms->ea_cctx;
2831
2832 if (iov && iov->iov_base && iov->iov_len) {
2833 rq_iov[(*num_iovs)++] = *iov;
2834 memset(iov, 0, sizeof(*iov));
2835 }
2836 }
2837
2838 static int
alloc_path_with_tree_prefix(__le16 ** out_path,int * out_size,int * out_len,const char * treename,const __le16 * path)2839 alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
2840 const char *treename, const __le16 *path)
2841 {
2842 int treename_len, path_len;
2843 struct nls_table *cp;
2844 const __le16 sep[] = {cpu_to_le16('\\'), cpu_to_le16(0x0000)};
2845
2846 /*
2847 * skip leading "\\"
2848 */
2849 treename_len = strlen(treename);
2850 if (treename_len < 2 || !(treename[0] == '\\' && treename[1] == '\\'))
2851 return -EINVAL;
2852
2853 treename += 2;
2854 treename_len -= 2;
2855
2856 path_len = UniStrnlen((wchar_t *)path, PATH_MAX);
2857
2858 /* make room for one path separator only if @path isn't empty */
2859 *out_len = treename_len + (path[0] ? 1 : 0) + path_len;
2860
2861 /*
2862 * final path needs to be 8-byte aligned as specified in
2863 * MS-SMB2 2.2.13 SMB2 CREATE Request.
2864 */
2865 *out_size = round_up(*out_len * sizeof(__le16), 8);
2866 *out_path = kzalloc(*out_size + sizeof(__le16) /* null */, GFP_KERNEL);
2867 if (!*out_path)
2868 return -ENOMEM;
2869
2870 cp = load_nls_default();
2871 cifs_strtoUTF16(*out_path, treename, treename_len, cp);
2872
2873 /* Do not append the separator if the path is empty */
2874 if (path[0] != cpu_to_le16(0x0000)) {
2875 UniStrcat((wchar_t *)*out_path, (wchar_t *)sep);
2876 UniStrcat((wchar_t *)*out_path, (wchar_t *)path);
2877 }
2878
2879 unload_nls(cp);
2880
2881 return 0;
2882 }
2883
smb311_posix_mkdir(const unsigned int xid,struct inode * inode,umode_t mode,struct cifs_tcon * tcon,const char * full_path,struct cifs_sb_info * cifs_sb)2884 int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
2885 umode_t mode, struct cifs_tcon *tcon,
2886 const char *full_path,
2887 struct cifs_sb_info *cifs_sb)
2888 {
2889 struct smb_rqst rqst;
2890 struct smb2_create_req *req;
2891 struct smb2_create_rsp *rsp = NULL;
2892 struct cifs_ses *ses = tcon->ses;
2893 struct kvec iov[3]; /* make sure at least one for each open context */
2894 struct kvec rsp_iov = {NULL, 0};
2895 int resp_buftype;
2896 int uni_path_len;
2897 __le16 *copy_path = NULL;
2898 int copy_size;
2899 int rc = 0;
2900 unsigned int n_iov = 2;
2901 __u32 file_attributes = 0;
2902 char *pc_buf = NULL;
2903 int flags = 0;
2904 unsigned int total_len;
2905 __le16 *utf16_path = NULL;
2906 struct TCP_Server_Info *server;
2907 int retries = 0, cur_sleep = 1;
2908
2909 replay_again:
2910 /* reinitialize for possible replay */
2911 flags = 0;
2912 n_iov = 2;
2913 server = cifs_pick_channel(ses);
2914
2915 cifs_dbg(FYI, "mkdir\n");
2916
2917 /* resource #1: path allocation */
2918 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
2919 if (!utf16_path)
2920 return -ENOMEM;
2921
2922 if (!ses || !server) {
2923 rc = smb_EIO(smb_eio_trace_null_pointers);
2924 goto err_free_path;
2925 }
2926
2927 /* resource #2: request */
2928 rc = smb2_plain_req_init(SMB2_CREATE, tcon, server,
2929 (void **) &req, &total_len);
2930 if (rc)
2931 goto err_free_path;
2932
2933
2934 if (smb3_encryption_required(tcon))
2935 flags |= CIFS_TRANSFORM_REQ;
2936
2937 req->ImpersonationLevel = IL_IMPERSONATION;
2938 req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES);
2939 /* File attributes ignored on open (used in create though) */
2940 req->FileAttributes = cpu_to_le32(file_attributes);
2941 req->ShareAccess = FILE_SHARE_ALL_LE;
2942 req->CreateDisposition = cpu_to_le32(FILE_CREATE);
2943 req->CreateOptions = cpu_to_le32(CREATE_NOT_FILE);
2944
2945 iov[0].iov_base = (char *)req;
2946 /* -1 since last byte is buf[0] which is sent below (path) */
2947 iov[0].iov_len = total_len - 1;
2948
2949 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
2950
2951 /* [MS-SMB2] 2.2.13 NameOffset:
2952 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
2953 * the SMB2 header, the file name includes a prefix that will
2954 * be processed during DFS name normalization as specified in
2955 * section 3.3.5.9. Otherwise, the file name is relative to
2956 * the share that is identified by the TreeId in the SMB2
2957 * header.
2958 */
2959 if (tcon->share_flags & SHI1005_FLAGS_DFS) {
2960 int name_len;
2961
2962 req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
2963 rc = alloc_path_with_tree_prefix(©_path, ©_size,
2964 &name_len,
2965 tcon->tree_name, utf16_path);
2966 if (rc)
2967 goto err_free_req;
2968
2969 req->NameLength = cpu_to_le16(name_len * 2);
2970 uni_path_len = copy_size;
2971 /* free before overwriting resource */
2972 kfree(utf16_path);
2973 utf16_path = copy_path;
2974 } else {
2975 uni_path_len = (2 * UniStrnlen((wchar_t *)utf16_path, PATH_MAX)) + 2;
2976 /* MUST set path len (NameLength) to 0 opening root of share */
2977 req->NameLength = cpu_to_le16(uni_path_len - 2);
2978 if (uni_path_len % 8 != 0) {
2979 copy_size = roundup(uni_path_len, 8);
2980 copy_path = kzalloc(copy_size, GFP_KERNEL);
2981 if (!copy_path) {
2982 rc = -ENOMEM;
2983 goto err_free_req;
2984 }
2985 memcpy((char *)copy_path, (const char *)utf16_path,
2986 uni_path_len);
2987 uni_path_len = copy_size;
2988 /* free before overwriting resource */
2989 kfree(utf16_path);
2990 utf16_path = copy_path;
2991 }
2992 }
2993
2994 iov[1].iov_len = uni_path_len;
2995 iov[1].iov_base = utf16_path;
2996 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE;
2997
2998 if (tcon->posix_extensions) {
2999 /* resource #3: posix buf */
3000 rc = add_posix_context(iov, &n_iov, mode);
3001 if (rc)
3002 goto err_free_req;
3003 req->CreateContextsOffset = cpu_to_le32(
3004 sizeof(struct smb2_create_req) +
3005 iov[1].iov_len);
3006 le32_add_cpu(&req->CreateContextsLength, iov[n_iov-1].iov_len);
3007 pc_buf = iov[n_iov-1].iov_base;
3008 }
3009
3010
3011 memset(&rqst, 0, sizeof(struct smb_rqst));
3012 rqst.rq_iov = iov;
3013 rqst.rq_nvec = n_iov;
3014
3015 /* no need to inc num_remote_opens because we close it just below */
3016 trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, full_path, CREATE_NOT_FILE,
3017 FILE_WRITE_ATTRIBUTES);
3018
3019 if (retries)
3020 smb2_set_replay(server, &rqst);
3021
3022 /* resource #4: response buffer */
3023 rc = cifs_send_recv(xid, ses, server,
3024 &rqst, &resp_buftype, flags, &rsp_iov);
3025 if (rc) {
3026 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
3027 trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid,
3028 CREATE_NOT_FILE,
3029 FILE_WRITE_ATTRIBUTES, rc);
3030 goto err_free_rsp_buf;
3031 }
3032
3033 /*
3034 * Although unlikely to be possible for rsp to be null and rc not set,
3035 * adding check below is slightly safer long term (and quiets Coverity
3036 * warning)
3037 */
3038 rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
3039 if (rsp == NULL) {
3040 rc = smb_EIO(smb_eio_trace_mkdir_no_rsp);
3041 kfree(pc_buf);
3042 goto err_free_req;
3043 }
3044
3045 trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid,
3046 CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES);
3047
3048 SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId);
3049
3050 /* Eventually save off posix specific response info and timestamps */
3051
3052 err_free_rsp_buf:
3053 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
3054 kfree(pc_buf);
3055 err_free_req:
3056 cifs_small_buf_release(req);
3057 err_free_path:
3058 kfree(utf16_path);
3059
3060 if (is_replayable_error(rc) &&
3061 smb2_should_replay(tcon, &retries, &cur_sleep))
3062 goto replay_again;
3063
3064 return rc;
3065 }
3066
3067 int
SMB2_open_init(struct cifs_tcon * tcon,struct TCP_Server_Info * server,struct smb_rqst * rqst,__u8 * oplock,struct cifs_open_parms * oparms,__le16 * path)3068 SMB2_open_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3069 struct smb_rqst *rqst, __u8 *oplock,
3070 struct cifs_open_parms *oparms, __le16 *path)
3071 {
3072 struct smb2_create_req *req;
3073 unsigned int n_iov = 2;
3074 __u32 file_attributes = 0;
3075 int copy_size;
3076 int uni_path_len;
3077 unsigned int total_len;
3078 struct kvec *iov = rqst->rq_iov;
3079 __le16 *copy_path;
3080 int rc;
3081
3082 rc = smb2_plain_req_init(SMB2_CREATE, tcon, server,
3083 (void **) &req, &total_len);
3084 if (rc)
3085 return rc;
3086
3087 iov[0].iov_base = (char *)req;
3088 /* -1 since last byte is buf[0] which is sent below (path) */
3089 iov[0].iov_len = total_len - 1;
3090
3091 if (oparms->create_options & CREATE_OPTION_READONLY)
3092 file_attributes |= ATTR_READONLY;
3093 if (oparms->create_options & CREATE_OPTION_SPECIAL)
3094 file_attributes |= ATTR_SYSTEM;
3095
3096 req->ImpersonationLevel = IL_IMPERSONATION;
3097 req->DesiredAccess = cpu_to_le32(oparms->desired_access);
3098 /* File attributes ignored on open (used in create though) */
3099 req->FileAttributes = cpu_to_le32(file_attributes);
3100 req->ShareAccess = FILE_SHARE_ALL_LE;
3101
3102 req->CreateDisposition = cpu_to_le32(oparms->disposition);
3103 req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK);
3104 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
3105
3106 /* [MS-SMB2] 2.2.13 NameOffset:
3107 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
3108 * the SMB2 header, the file name includes a prefix that will
3109 * be processed during DFS name normalization as specified in
3110 * section 3.3.5.9. Otherwise, the file name is relative to
3111 * the share that is identified by the TreeId in the SMB2
3112 * header.
3113 */
3114 if (tcon->share_flags & SHI1005_FLAGS_DFS) {
3115 int name_len;
3116
3117 req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
3118 rc = alloc_path_with_tree_prefix(©_path, ©_size,
3119 &name_len,
3120 tcon->tree_name, path);
3121 if (rc)
3122 return rc;
3123 req->NameLength = cpu_to_le16(name_len * 2);
3124 uni_path_len = copy_size;
3125 path = copy_path;
3126 } else {
3127 uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
3128 /* MUST set path len (NameLength) to 0 opening root of share */
3129 req->NameLength = cpu_to_le16(uni_path_len - 2);
3130 copy_size = round_up(uni_path_len, 8);
3131 copy_path = kzalloc(copy_size, GFP_KERNEL);
3132 if (!copy_path)
3133 return -ENOMEM;
3134 memcpy((char *)copy_path, (const char *)path,
3135 uni_path_len);
3136 uni_path_len = copy_size;
3137 path = copy_path;
3138 }
3139
3140 iov[1].iov_len = uni_path_len;
3141 iov[1].iov_base = path;
3142
3143 if ((!server->oplocks) || (tcon->no_lease))
3144 *oplock = SMB2_OPLOCK_LEVEL_NONE;
3145
3146 if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
3147 *oplock == SMB2_OPLOCK_LEVEL_NONE)
3148 req->RequestedOplockLevel = *oplock;
3149 else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) &&
3150 (oparms->create_options & CREATE_NOT_FILE))
3151 req->RequestedOplockLevel = *oplock; /* no srv lease support */
3152 else {
3153 rc = add_lease_context(server, req, iov, &n_iov,
3154 oparms->fid->lease_key, oplock,
3155 oparms->fid->parent_lease_key,
3156 oparms->lease_flags);
3157 if (rc)
3158 return rc;
3159 }
3160
3161 if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) {
3162 rc = add_durable_context(iov, &n_iov, oparms,
3163 tcon->use_persistent);
3164 if (rc)
3165 return rc;
3166 }
3167
3168 if (tcon->posix_extensions) {
3169 rc = add_posix_context(iov, &n_iov, oparms->mode);
3170 if (rc)
3171 return rc;
3172 }
3173
3174 if (tcon->snapshot_time) {
3175 cifs_dbg(FYI, "adding snapshot context\n");
3176 rc = add_twarp_context(iov, &n_iov, tcon->snapshot_time);
3177 if (rc)
3178 return rc;
3179 }
3180
3181 if ((oparms->disposition != FILE_OPEN) && (oparms->cifs_sb)) {
3182 bool set_mode;
3183 bool set_owner;
3184
3185 if ((oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) &&
3186 (oparms->mode != ACL_NO_MODE))
3187 set_mode = true;
3188 else {
3189 set_mode = false;
3190 oparms->mode = ACL_NO_MODE;
3191 }
3192
3193 if (oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
3194 set_owner = true;
3195 else
3196 set_owner = false;
3197
3198 if (set_owner | set_mode) {
3199 cifs_dbg(FYI, "add sd with mode 0x%x\n", oparms->mode);
3200 rc = add_sd_context(iov, &n_iov, oparms->mode, set_owner);
3201 if (rc)
3202 return rc;
3203 }
3204 }
3205
3206 add_query_id_context(iov, &n_iov);
3207 add_ea_context(oparms, iov, &n_iov);
3208
3209 if (n_iov > 2) {
3210 /*
3211 * We have create contexts behind iov[1] (the file
3212 * name), point at them from the main create request
3213 */
3214 req->CreateContextsOffset = cpu_to_le32(
3215 sizeof(struct smb2_create_req) +
3216 iov[1].iov_len);
3217 req->CreateContextsLength = 0;
3218
3219 for (unsigned int i = 2; i < (n_iov-1); i++) {
3220 struct kvec *v = &iov[i];
3221 size_t len = v->iov_len;
3222 struct create_context *cctx =
3223 (struct create_context *)v->iov_base;
3224
3225 cctx->Next = cpu_to_le32(len);
3226 le32_add_cpu(&req->CreateContextsLength, len);
3227 }
3228 le32_add_cpu(&req->CreateContextsLength,
3229 iov[n_iov-1].iov_len);
3230 }
3231
3232 rqst->rq_nvec = n_iov;
3233 return 0;
3234 }
3235
3236 /* rq_iov[0] is the request and is released by cifs_small_buf_release().
3237 * All other vectors are freed by kfree().
3238 */
3239 void
SMB2_open_free(struct smb_rqst * rqst)3240 SMB2_open_free(struct smb_rqst *rqst)
3241 {
3242 int i;
3243
3244 if (rqst && rqst->rq_iov) {
3245 cifs_small_buf_release(rqst->rq_iov[0].iov_base);
3246 for (i = 1; i < rqst->rq_nvec; i++)
3247 if (rqst->rq_iov[i].iov_base != smb2_padding)
3248 kfree(rqst->rq_iov[i].iov_base);
3249 }
3250 }
3251
3252 int
SMB2_open(const unsigned int xid,struct cifs_open_parms * oparms,__le16 * path,__u8 * oplock,struct smb2_file_all_info * buf,struct create_posix_rsp * posix,struct kvec * err_iov,int * buftype)3253 SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
3254 __u8 *oplock, struct smb2_file_all_info *buf,
3255 struct create_posix_rsp *posix,
3256 struct kvec *err_iov, int *buftype)
3257 {
3258 struct smb_rqst rqst;
3259 struct smb2_create_rsp *rsp = NULL;
3260 struct cifs_tcon *tcon = oparms->tcon;
3261 struct cifs_ses *ses = tcon->ses;
3262 struct TCP_Server_Info *server;
3263 struct kvec iov[SMB2_CREATE_IOV_SIZE];
3264 struct kvec rsp_iov = {NULL, 0};
3265 int resp_buftype = CIFS_NO_BUFFER;
3266 int rc = 0;
3267 int flags = 0;
3268 int retries = 0, cur_sleep = 1;
3269
3270 replay_again:
3271 /* reinitialize for possible replay */
3272 flags = 0;
3273 server = cifs_pick_channel(ses);
3274 oparms->replay = !!(retries);
3275
3276 cifs_dbg(FYI, "create/open\n");
3277 if (!ses || !server)
3278 return smb_EIO(smb_eio_trace_null_pointers);
3279
3280 if (smb3_encryption_required(tcon))
3281 flags |= CIFS_TRANSFORM_REQ;
3282
3283 memset(&rqst, 0, sizeof(struct smb_rqst));
3284 memset(&iov, 0, sizeof(iov));
3285 rqst.rq_iov = iov;
3286 rqst.rq_nvec = SMB2_CREATE_IOV_SIZE;
3287
3288 rc = SMB2_open_init(tcon, server,
3289 &rqst, oplock, oparms, path);
3290 if (rc)
3291 goto creat_exit;
3292
3293 trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid, oparms->path,
3294 oparms->create_options, oparms->desired_access);
3295
3296 if (retries)
3297 smb2_set_replay(server, &rqst);
3298
3299 rc = cifs_send_recv(xid, ses, server,
3300 &rqst, &resp_buftype, flags,
3301 &rsp_iov);
3302 rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
3303
3304 if (rc != 0) {
3305 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
3306 if (err_iov && rsp) {
3307 *err_iov = rsp_iov;
3308 *buftype = resp_buftype;
3309 resp_buftype = CIFS_NO_BUFFER;
3310 rsp = NULL;
3311 }
3312 trace_smb3_open_err(xid, tcon->tid, ses->Suid,
3313 oparms->create_options, oparms->desired_access, rc);
3314 if (rc == -EREMCHG) {
3315 pr_warn_once("server share %s deleted\n",
3316 tcon->tree_name);
3317 tcon->need_reconnect = true;
3318 }
3319 goto creat_exit;
3320 } else if (rsp == NULL) /* unlikely to happen, but safer to check */
3321 goto creat_exit;
3322 else
3323 trace_smb3_open_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid,
3324 oparms->create_options, oparms->desired_access);
3325
3326 atomic_inc(&tcon->num_remote_opens);
3327 oparms->fid->persistent_fid = rsp->PersistentFileId;
3328 oparms->fid->volatile_fid = rsp->VolatileFileId;
3329 oparms->fid->access = oparms->desired_access;
3330 #ifdef CONFIG_CIFS_DEBUG2
3331 oparms->fid->mid = le64_to_cpu(rsp->hdr.MessageId);
3332 #endif /* CIFS_DEBUG2 */
3333
3334 if (buf) {
3335 buf->CreationTime = rsp->CreationTime;
3336 buf->LastAccessTime = rsp->LastAccessTime;
3337 buf->LastWriteTime = rsp->LastWriteTime;
3338 buf->ChangeTime = rsp->ChangeTime;
3339 buf->AllocationSize = rsp->AllocationSize;
3340 buf->EndOfFile = rsp->EndofFile;
3341 buf->Attributes = rsp->FileAttributes;
3342 buf->NumberOfLinks = cpu_to_le32(1);
3343 buf->DeletePending = 0; /* successful open = not delete pending */
3344 }
3345
3346
3347 rc = smb2_parse_contexts(server, &rsp_iov, &oparms->fid->epoch,
3348 oparms->fid->lease_key, oplock, buf, posix);
3349 creat_exit:
3350 SMB2_open_free(&rqst);
3351 free_rsp_buf(resp_buftype, rsp);
3352
3353 if (is_replayable_error(rc) &&
3354 smb2_should_replay(tcon, &retries, &cur_sleep))
3355 goto replay_again;
3356
3357 return rc;
3358 }
3359
3360 int
SMB2_ioctl_init(struct cifs_tcon * tcon,struct TCP_Server_Info * server,struct smb_rqst * rqst,u64 persistent_fid,u64 volatile_fid,u32 opcode,char * in_data,u32 indatalen,__u32 max_response_size)3361 SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3362 struct smb_rqst *rqst,
3363 u64 persistent_fid, u64 volatile_fid, u32 opcode,
3364 char *in_data, u32 indatalen,
3365 __u32 max_response_size)
3366 {
3367 struct smb2_ioctl_req *req;
3368 struct kvec *iov = rqst->rq_iov;
3369 unsigned int total_len;
3370 int rc;
3371 char *in_data_buf;
3372
3373 rc = smb2_ioctl_req_init(opcode, tcon, server,
3374 (void **) &req, &total_len);
3375 if (rc)
3376 return rc;
3377
3378 if (indatalen) {
3379 /*
3380 * indatalen is usually small at a couple of bytes max, so
3381 * just allocate through generic pool
3382 */
3383 in_data_buf = kmemdup(in_data, indatalen, GFP_NOFS);
3384 if (!in_data_buf) {
3385 cifs_small_buf_release(req);
3386 return -ENOMEM;
3387 }
3388 }
3389
3390 req->CtlCode = cpu_to_le32(opcode);
3391 req->PersistentFileId = persistent_fid;
3392 req->VolatileFileId = volatile_fid;
3393
3394 iov[0].iov_base = (char *)req;
3395 /*
3396 * If no input data, the size of ioctl struct in
3397 * protocol spec still includes a 1 byte data buffer,
3398 * but if input data passed to ioctl, we do not
3399 * want to double count this, so we do not send
3400 * the dummy one byte of data in iovec[0] if sending
3401 * input data (in iovec[1]).
3402 */
3403 if (indatalen) {
3404 req->InputCount = cpu_to_le32(indatalen);
3405 /* do not set InputOffset if no input data */
3406 req->InputOffset =
3407 cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer));
3408 rqst->rq_nvec = 2;
3409 iov[0].iov_len = total_len - 1;
3410 iov[1].iov_base = in_data_buf;
3411 iov[1].iov_len = indatalen;
3412 } else {
3413 rqst->rq_nvec = 1;
3414 iov[0].iov_len = total_len;
3415 }
3416
3417 req->OutputOffset = 0;
3418 req->OutputCount = 0; /* MBZ */
3419
3420 /*
3421 * In most cases max_response_size is set to 16K (CIFSMaxBufSize)
3422 * We Could increase default MaxOutputResponse, but that could require
3423 * more credits. Windows typically sets this smaller, but for some
3424 * ioctls it may be useful to allow server to send more. No point
3425 * limiting what the server can send as long as fits in one credit
3426 * We can not handle more than CIFS_MAX_BUF_SIZE yet but may want
3427 * to increase this limit up in the future.
3428 * Note that for snapshot queries that servers like Azure expect that
3429 * the first query be minimal size (and just used to get the number/size
3430 * of previous versions) so response size must be specified as EXACTLY
3431 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
3432 * of eight bytes. Currently that is the only case where we set max
3433 * response size smaller.
3434 */
3435 req->MaxOutputResponse = cpu_to_le32(max_response_size);
3436 req->hdr.CreditCharge =
3437 cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size),
3438 SMB2_MAX_BUFFER_SIZE));
3439 /* always an FSCTL (for now) */
3440 req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
3441
3442 /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
3443 if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
3444 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
3445
3446 return 0;
3447 }
3448
3449 void
SMB2_ioctl_free(struct smb_rqst * rqst)3450 SMB2_ioctl_free(struct smb_rqst *rqst)
3451 {
3452 int i;
3453
3454 if (rqst && rqst->rq_iov) {
3455 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
3456 for (i = 1; i < rqst->rq_nvec; i++)
3457 if (rqst->rq_iov[i].iov_base != smb2_padding)
3458 kfree(rqst->rq_iov[i].iov_base);
3459 }
3460 }
3461
3462
3463 /*
3464 * SMB2 IOCTL is used for both IOCTLs and FSCTLs
3465 */
3466 int
SMB2_ioctl(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,u32 opcode,char * in_data,u32 indatalen,u32 max_out_data_len,char ** out_data,u32 * plen)3467 SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
3468 u64 volatile_fid, u32 opcode, char *in_data, u32 indatalen,
3469 u32 max_out_data_len, char **out_data,
3470 u32 *plen /* returned data len */)
3471 {
3472 struct smb_rqst rqst;
3473 struct smb2_ioctl_rsp *rsp = NULL;
3474 struct cifs_ses *ses;
3475 struct TCP_Server_Info *server;
3476 struct kvec iov[SMB2_IOCTL_IOV_SIZE];
3477 struct kvec rsp_iov = {NULL, 0};
3478 int resp_buftype = CIFS_NO_BUFFER;
3479 int rc = 0;
3480 int flags = 0;
3481 int retries = 0, cur_sleep = 1;
3482
3483 if (!tcon)
3484 return smb_EIO(smb_eio_trace_null_pointers);
3485
3486 ses = tcon->ses;
3487 if (!ses)
3488 return smb_EIO(smb_eio_trace_null_pointers);
3489
3490 replay_again:
3491 /* reinitialize for possible replay */
3492 flags = 0;
3493 server = cifs_pick_channel(ses);
3494
3495 if (!server)
3496 return smb_EIO(smb_eio_trace_null_pointers);
3497
3498 cifs_dbg(FYI, "SMB2 IOCTL\n");
3499
3500 if (out_data != NULL)
3501 *out_data = NULL;
3502
3503 /* zero out returned data len, in case of error */
3504 if (plen)
3505 *plen = 0;
3506
3507 if (smb3_encryption_required(tcon))
3508 flags |= CIFS_TRANSFORM_REQ;
3509
3510 memset(&rqst, 0, sizeof(struct smb_rqst));
3511 memset(&iov, 0, sizeof(iov));
3512 rqst.rq_iov = iov;
3513 rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE;
3514
3515 rc = SMB2_ioctl_init(tcon, server,
3516 &rqst, persistent_fid, volatile_fid, opcode,
3517 in_data, indatalen, max_out_data_len);
3518 if (rc)
3519 goto ioctl_exit;
3520
3521 if (retries)
3522 smb2_set_replay(server, &rqst);
3523
3524 rc = cifs_send_recv(xid, ses, server,
3525 &rqst, &resp_buftype, flags,
3526 &rsp_iov);
3527 rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base;
3528
3529 if (rc != 0)
3530 trace_smb3_fsctl_err(xid, persistent_fid, tcon->tid,
3531 ses->Suid, 0, opcode, rc);
3532
3533 if ((rc != 0) && (rc != -EINVAL) && (rc != -E2BIG)) {
3534 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
3535 goto ioctl_exit;
3536 } else if (rc == -EINVAL) {
3537 if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
3538 (opcode != FSCTL_SRV_COPYCHUNK)) {
3539 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
3540 goto ioctl_exit;
3541 }
3542 } else if (rc == -E2BIG) {
3543 if (opcode != FSCTL_QUERY_ALLOCATED_RANGES) {
3544 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
3545 goto ioctl_exit;
3546 }
3547 }
3548
3549 /* check if caller wants to look at return data or just return rc */
3550 if ((plen == NULL) || (out_data == NULL))
3551 goto ioctl_exit;
3552
3553 /*
3554 * Although unlikely to be possible for rsp to be null and rc not set,
3555 * adding check below is slightly safer long term (and quiets Coverity
3556 * warning)
3557 */
3558 if (rsp == NULL) {
3559 rc = smb_EIO(smb_eio_trace_ioctl_no_rsp);
3560 goto ioctl_exit;
3561 }
3562
3563 *plen = le32_to_cpu(rsp->OutputCount);
3564
3565 /* We check for obvious errors in the output buffer length and offset */
3566 if (*plen == 0)
3567 goto ioctl_exit; /* server returned no data */
3568 else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) {
3569 cifs_tcon_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
3570 rc = smb_EIO2(smb_eio_trace_ioctl_data_len, *plen, rsp_iov.iov_len);
3571 *plen = 0;
3572 goto ioctl_exit;
3573 }
3574
3575 u32 outoff = le32_to_cpu(rsp->OutputOffset);
3576
3577 if (rsp_iov.iov_len - *plen < outoff) {
3578 cifs_tcon_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n",
3579 *plen, outoff);
3580 rc = smb_EIO2(smb_eio_trace_ioctl_out_off, rsp_iov.iov_len - *plen, outoff);
3581 *plen = 0;
3582 goto ioctl_exit;
3583 }
3584
3585 *out_data = kmemdup((char *)rsp + le32_to_cpu(rsp->OutputOffset),
3586 *plen, GFP_KERNEL);
3587 if (*out_data == NULL) {
3588 rc = -ENOMEM;
3589 goto ioctl_exit;
3590 }
3591
3592 ioctl_exit:
3593 SMB2_ioctl_free(&rqst);
3594 free_rsp_buf(resp_buftype, rsp);
3595
3596 if (is_replayable_error(rc) &&
3597 smb2_should_replay(tcon, &retries, &cur_sleep))
3598 goto replay_again;
3599
3600 return rc;
3601 }
3602
3603 /*
3604 * Individual callers to ioctl worker function follow
3605 */
3606
3607 int
SMB2_set_compression(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid)3608 SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
3609 u64 persistent_fid, u64 volatile_fid)
3610 {
3611 int rc;
3612 struct compress_ioctl fsctl_input;
3613 char *ret_data = NULL;
3614
3615 fsctl_input.CompressionState =
3616 cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
3617
3618 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
3619 FSCTL_SET_COMPRESSION,
3620 (char *)&fsctl_input /* data input */,
3621 2 /* in data len */, CIFSMaxBufSize /* max out data */,
3622 &ret_data /* out data */, NULL);
3623
3624 cifs_dbg(FYI, "set compression rc %d\n", rc);
3625
3626 return rc;
3627 }
3628
3629 int
SMB2_close_init(struct cifs_tcon * tcon,struct TCP_Server_Info * server,struct smb_rqst * rqst,u64 persistent_fid,u64 volatile_fid,bool query_attrs)3630 SMB2_close_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3631 struct smb_rqst *rqst,
3632 u64 persistent_fid, u64 volatile_fid, bool query_attrs)
3633 {
3634 struct smb2_close_req *req;
3635 struct kvec *iov = rqst->rq_iov;
3636 unsigned int total_len;
3637 int rc;
3638
3639 rc = smb2_plain_req_init(SMB2_CLOSE, tcon, server,
3640 (void **) &req, &total_len);
3641 if (rc)
3642 return rc;
3643
3644 req->PersistentFileId = persistent_fid;
3645 req->VolatileFileId = volatile_fid;
3646 if (query_attrs)
3647 req->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB;
3648 else
3649 req->Flags = 0;
3650 iov[0].iov_base = (char *)req;
3651 iov[0].iov_len = total_len;
3652
3653 return 0;
3654 }
3655
3656 void
SMB2_close_free(struct smb_rqst * rqst)3657 SMB2_close_free(struct smb_rqst *rqst)
3658 {
3659 if (rqst && rqst->rq_iov)
3660 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
3661 }
3662
3663 int
__SMB2_close(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,struct smb2_file_network_open_info * pbuf)3664 __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
3665 u64 persistent_fid, u64 volatile_fid,
3666 struct smb2_file_network_open_info *pbuf)
3667 {
3668 struct smb_rqst rqst;
3669 struct smb2_close_rsp *rsp = NULL;
3670 struct cifs_ses *ses = tcon->ses;
3671 struct TCP_Server_Info *server;
3672 struct kvec iov[1];
3673 struct kvec rsp_iov;
3674 int resp_buftype = CIFS_NO_BUFFER;
3675 int rc = 0;
3676 int flags = 0;
3677 bool query_attrs = false;
3678 int retries = 0, cur_sleep = 1;
3679
3680 replay_again:
3681 /* reinitialize for possible replay */
3682 flags = 0;
3683 query_attrs = false;
3684 server = cifs_pick_channel(ses);
3685
3686 cifs_dbg(FYI, "Close\n");
3687
3688 if (!ses || !server)
3689 return smb_EIO(smb_eio_trace_null_pointers);
3690
3691 if (smb3_encryption_required(tcon))
3692 flags |= CIFS_TRANSFORM_REQ;
3693
3694 memset(&rqst, 0, sizeof(struct smb_rqst));
3695 memset(&iov, 0, sizeof(iov));
3696 rqst.rq_iov = iov;
3697 rqst.rq_nvec = 1;
3698
3699 /* check if need to ask server to return timestamps in close response */
3700 if (pbuf)
3701 query_attrs = true;
3702
3703 trace_smb3_close_enter(xid, persistent_fid, tcon->tid, ses->Suid);
3704 rc = SMB2_close_init(tcon, server,
3705 &rqst, persistent_fid, volatile_fid,
3706 query_attrs);
3707 if (rc)
3708 goto close_exit;
3709
3710 if (retries)
3711 smb2_set_replay(server, &rqst);
3712
3713 rc = cifs_send_recv(xid, ses, server,
3714 &rqst, &resp_buftype, flags, &rsp_iov);
3715 rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
3716
3717 if (rc != 0) {
3718 cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
3719 trace_smb3_close_err(xid, persistent_fid, tcon->tid, ses->Suid,
3720 rc);
3721 goto close_exit;
3722 } else {
3723 trace_smb3_close_done(xid, persistent_fid, tcon->tid,
3724 ses->Suid);
3725 if (pbuf)
3726 memcpy(&pbuf->network_open_info,
3727 &rsp->network_open_info,
3728 sizeof(pbuf->network_open_info));
3729 atomic_dec(&tcon->num_remote_opens);
3730 }
3731
3732 close_exit:
3733 SMB2_close_free(&rqst);
3734 free_rsp_buf(resp_buftype, rsp);
3735
3736 /* retry close in a worker thread if this one is interrupted */
3737 if (is_interrupt_error(rc)) {
3738 int tmp_rc;
3739
3740 tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid,
3741 volatile_fid);
3742 if (tmp_rc)
3743 cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n",
3744 persistent_fid, tmp_rc);
3745 }
3746
3747 if (is_replayable_error(rc) &&
3748 smb2_should_replay(tcon, &retries, &cur_sleep))
3749 goto replay_again;
3750
3751 return rc;
3752 }
3753
3754 int
SMB2_close(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid)3755 SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
3756 u64 persistent_fid, u64 volatile_fid)
3757 {
3758 return __SMB2_close(xid, tcon, persistent_fid, volatile_fid, NULL);
3759 }
3760
3761 int
smb2_validate_iov(unsigned int offset,unsigned int buffer_length,struct kvec * iov,unsigned int min_buf_size)3762 smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
3763 struct kvec *iov, unsigned int min_buf_size)
3764 {
3765 unsigned int smb_len = iov->iov_len;
3766 char *end_of_smb = smb_len + (char *)iov->iov_base;
3767 char *begin_of_buf = offset + (char *)iov->iov_base;
3768 char *end_of_buf = begin_of_buf + buffer_length;
3769
3770
3771 if (buffer_length < min_buf_size) {
3772 cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n",
3773 buffer_length, min_buf_size);
3774 return -EINVAL;
3775 }
3776
3777 /* check if beyond RFC1001 maximum length */
3778 if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) {
3779 cifs_dbg(VFS, "buffer length %d or smb length %d too large\n",
3780 buffer_length, smb_len);
3781 return -EINVAL;
3782 }
3783
3784 if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
3785 cifs_dbg(VFS, "Invalid server response, bad offset to data\n");
3786 return -EINVAL;
3787 }
3788
3789 return 0;
3790 }
3791
3792 /*
3793 * If SMB buffer fields are valid, copy into temporary buffer to hold result.
3794 * Caller must free buffer.
3795 */
3796 int
smb2_validate_and_copy_iov(unsigned int offset,unsigned int buffer_length,struct kvec * iov,unsigned int minbufsize,char * data)3797 smb2_validate_and_copy_iov(unsigned int offset, unsigned int buffer_length,
3798 struct kvec *iov, unsigned int minbufsize,
3799 char *data)
3800 {
3801 char *begin_of_buf = offset + (char *)iov->iov_base;
3802 int rc;
3803
3804 if (!data)
3805 return -EINVAL;
3806
3807 rc = smb2_validate_iov(offset, buffer_length, iov, minbufsize);
3808 if (rc)
3809 return rc;
3810
3811 memcpy(data, begin_of_buf, minbufsize);
3812
3813 return 0;
3814 }
3815
3816 int
SMB2_query_info_init(struct cifs_tcon * tcon,struct TCP_Server_Info * server,struct smb_rqst * rqst,u64 persistent_fid,u64 volatile_fid,u8 info_class,u8 info_type,u32 additional_info,size_t output_len,size_t input_len,void * input)3817 SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3818 struct smb_rqst *rqst,
3819 u64 persistent_fid, u64 volatile_fid,
3820 u8 info_class, u8 info_type, u32 additional_info,
3821 size_t output_len, size_t input_len, void *input)
3822 {
3823 struct smb2_query_info_req *req;
3824 struct kvec *iov = rqst->rq_iov;
3825 unsigned int total_len;
3826 size_t len;
3827 int rc;
3828
3829 if (unlikely(check_add_overflow(input_len, sizeof(*req), &len) ||
3830 len > CIFSMaxBufSize))
3831 return -EINVAL;
3832
3833 rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
3834 (void **) &req, &total_len);
3835 if (rc)
3836 return rc;
3837
3838 req->InfoType = info_type;
3839 req->FileInfoClass = info_class;
3840 req->PersistentFileId = persistent_fid;
3841 req->VolatileFileId = volatile_fid;
3842 req->AdditionalInformation = cpu_to_le32(additional_info);
3843
3844 req->OutputBufferLength = cpu_to_le32(output_len);
3845 if (input_len) {
3846 req->InputBufferLength = cpu_to_le32(input_len);
3847 /* total_len for smb query request never close to le16 max */
3848 req->InputBufferOffset = cpu_to_le16(total_len - 1);
3849 memcpy(req->Buffer, input, input_len);
3850 }
3851
3852 iov[0].iov_base = (char *)req;
3853 /* 1 for Buffer */
3854 iov[0].iov_len = len;
3855 return 0;
3856 }
3857
3858 void
SMB2_query_info_free(struct smb_rqst * rqst)3859 SMB2_query_info_free(struct smb_rqst *rqst)
3860 {
3861 if (rqst && rqst->rq_iov)
3862 cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */
3863 }
3864
3865 static int
query_info(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,u8 info_class,u8 info_type,u32 additional_info,size_t output_len,size_t min_len,void ** data,u32 * dlen)3866 query_info(const unsigned int xid, struct cifs_tcon *tcon,
3867 u64 persistent_fid, u64 volatile_fid, u8 info_class, u8 info_type,
3868 u32 additional_info, size_t output_len, size_t min_len, void **data,
3869 u32 *dlen)
3870 {
3871 struct smb_rqst rqst;
3872 struct smb2_query_info_rsp *rsp = NULL;
3873 struct kvec iov[1];
3874 struct kvec rsp_iov;
3875 int rc = 0;
3876 int resp_buftype = CIFS_NO_BUFFER;
3877 struct cifs_ses *ses = tcon->ses;
3878 struct TCP_Server_Info *server;
3879 int flags = 0;
3880 bool allocated = false;
3881 int retries = 0, cur_sleep = 1;
3882
3883 cifs_dbg(FYI, "Query Info\n");
3884
3885 if (!ses)
3886 return smb_EIO(smb_eio_trace_null_pointers);
3887
3888 replay_again:
3889 /* reinitialize for possible replay */
3890 flags = 0;
3891 allocated = false;
3892 server = cifs_pick_channel(ses);
3893
3894 if (!server)
3895 return smb_EIO(smb_eio_trace_null_pointers);
3896
3897 if (smb3_encryption_required(tcon))
3898 flags |= CIFS_TRANSFORM_REQ;
3899
3900 memset(&rqst, 0, sizeof(struct smb_rqst));
3901 memset(&iov, 0, sizeof(iov));
3902 rqst.rq_iov = iov;
3903 rqst.rq_nvec = 1;
3904
3905 rc = SMB2_query_info_init(tcon, server,
3906 &rqst, persistent_fid, volatile_fid,
3907 info_class, info_type, additional_info,
3908 output_len, 0, NULL);
3909 if (rc)
3910 goto qinf_exit;
3911
3912 trace_smb3_query_info_enter(xid, persistent_fid, tcon->tid,
3913 ses->Suid, info_class, (__u32)info_type);
3914
3915 if (retries)
3916 smb2_set_replay(server, &rqst);
3917
3918 rc = cifs_send_recv(xid, ses, server,
3919 &rqst, &resp_buftype, flags, &rsp_iov);
3920 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
3921
3922 if (rc) {
3923 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
3924 trace_smb3_query_info_err(xid, persistent_fid, tcon->tid,
3925 ses->Suid, info_class, (__u32)info_type, rc);
3926 goto qinf_exit;
3927 }
3928
3929 trace_smb3_query_info_done(xid, persistent_fid, tcon->tid,
3930 ses->Suid, info_class, (__u32)info_type);
3931
3932 if (dlen) {
3933 *dlen = le32_to_cpu(rsp->OutputBufferLength);
3934 if (!*data) {
3935 *data = kmalloc(*dlen, GFP_KERNEL);
3936 if (!*data) {
3937 cifs_tcon_dbg(VFS,
3938 "Error %d allocating memory for acl\n",
3939 rc);
3940 *dlen = 0;
3941 rc = -ENOMEM;
3942 goto qinf_exit;
3943 }
3944 allocated = true;
3945 }
3946 }
3947
3948 rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset),
3949 le32_to_cpu(rsp->OutputBufferLength),
3950 &rsp_iov, dlen ? *dlen : min_len, *data);
3951 if (rc && allocated) {
3952 kfree(*data);
3953 *data = NULL;
3954 *dlen = 0;
3955 }
3956
3957 qinf_exit:
3958 SMB2_query_info_free(&rqst);
3959 free_rsp_buf(resp_buftype, rsp);
3960
3961 if (is_replayable_error(rc) &&
3962 smb2_should_replay(tcon, &retries, &cur_sleep))
3963 goto replay_again;
3964
3965 return rc;
3966 }
3967
SMB2_query_info(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,struct smb2_file_all_info * data)3968 int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
3969 u64 persistent_fid, u64 volatile_fid, struct smb2_file_all_info *data)
3970 {
3971 return query_info(xid, tcon, persistent_fid, volatile_fid,
3972 FILE_ALL_INFORMATION, SMB2_O_INFO_FILE, 0,
3973 sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
3974 sizeof(struct smb2_file_all_info), (void **)&data,
3975 NULL);
3976 }
3977
3978 #if 0
3979 /* currently unused, as now we are doing compounding instead (see smb311_posix_query_path_info) */
3980 int
3981 SMB311_posix_query_info(const unsigned int xid, struct cifs_tcon *tcon,
3982 u64 persistent_fid, u64 volatile_fid,
3983 struct smb311_posix_qinfo *data, u32 *plen)
3984 {
3985 size_t output_len = sizeof(struct smb311_posix_qinfo *) +
3986 (sizeof(struct smb_sid) * 2) + (PATH_MAX * 2);
3987 *plen = 0;
3988
3989 return query_info(xid, tcon, persistent_fid, volatile_fid,
3990 SMB_FIND_FILE_POSIX_INFO, SMB2_O_INFO_FILE, 0,
3991 output_len, sizeof(struct smb311_posix_qinfo), (void **)&data, plen);
3992 /* Note caller must free "data" (passed in above). It may be allocated in query_info call */
3993 }
3994 #endif
3995
3996 int
SMB2_query_acl(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,void ** data,u32 * plen,u32 extra_info)3997 SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
3998 u64 persistent_fid, u64 volatile_fid,
3999 void **data, u32 *plen, u32 extra_info)
4000 {
4001 *plen = 0;
4002
4003 return query_info(xid, tcon, persistent_fid, volatile_fid,
4004 0, SMB2_O_INFO_SECURITY, extra_info,
4005 SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen);
4006 }
4007
4008 int
SMB2_get_srv_num(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,__le64 * uniqueid)4009 SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
4010 u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid)
4011 {
4012 return query_info(xid, tcon, persistent_fid, volatile_fid,
4013 FILE_INTERNAL_INFORMATION, SMB2_O_INFO_FILE, 0,
4014 sizeof(struct smb2_file_internal_info),
4015 sizeof(struct smb2_file_internal_info),
4016 (void **)&uniqueid, NULL);
4017 }
4018
4019 /*
4020 * CHANGE_NOTIFY Request is sent to get notifications on changes to a directory
4021 * See MS-SMB2 2.2.35 and 2.2.36
4022 */
4023
4024 static int
SMB2_notify_init(const unsigned int xid,struct smb_rqst * rqst,struct cifs_tcon * tcon,struct TCP_Server_Info * server,u64 persistent_fid,u64 volatile_fid,u32 completion_filter,bool watch_tree)4025 SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst,
4026 struct cifs_tcon *tcon, struct TCP_Server_Info *server,
4027 u64 persistent_fid, u64 volatile_fid,
4028 u32 completion_filter, bool watch_tree)
4029 {
4030 struct smb2_change_notify_req *req;
4031 struct kvec *iov = rqst->rq_iov;
4032 unsigned int total_len;
4033 int rc;
4034
4035 rc = smb2_plain_req_init(SMB2_CHANGE_NOTIFY, tcon, server,
4036 (void **) &req, &total_len);
4037 if (rc)
4038 return rc;
4039
4040 req->PersistentFileId = persistent_fid;
4041 req->VolatileFileId = volatile_fid;
4042 /* See note 354 of MS-SMB2, 64K max */
4043 req->OutputBufferLength =
4044 cpu_to_le32(SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE);
4045 req->CompletionFilter = cpu_to_le32(completion_filter);
4046 if (watch_tree)
4047 req->Flags = cpu_to_le16(SMB2_WATCH_TREE);
4048 else
4049 req->Flags = 0;
4050
4051 iov[0].iov_base = (char *)req;
4052 iov[0].iov_len = total_len;
4053
4054 return 0;
4055 }
4056
4057 int
SMB2_change_notify(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,bool watch_tree,u32 completion_filter,u32 max_out_data_len,char ** out_data,u32 * plen)4058 SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
4059 u64 persistent_fid, u64 volatile_fid, bool watch_tree,
4060 u32 completion_filter, u32 max_out_data_len, char **out_data,
4061 u32 *plen /* returned data len */)
4062 {
4063 struct cifs_ses *ses = tcon->ses;
4064 struct TCP_Server_Info *server;
4065 struct smb_rqst rqst;
4066 struct smb2_change_notify_rsp *smb_rsp;
4067 struct kvec iov[1];
4068 struct kvec rsp_iov = {NULL, 0};
4069 int resp_buftype = CIFS_NO_BUFFER;
4070 int flags = 0;
4071 int rc = 0;
4072 int retries = 0, cur_sleep = 1;
4073
4074 replay_again:
4075 /* reinitialize for possible replay */
4076 flags = 0;
4077 server = cifs_pick_channel(ses);
4078
4079 cifs_dbg(FYI, "change notify\n");
4080 if (!ses || !server)
4081 return smb_EIO(smb_eio_trace_null_pointers);
4082
4083 if (smb3_encryption_required(tcon))
4084 flags |= CIFS_TRANSFORM_REQ;
4085
4086 memset(&rqst, 0, sizeof(struct smb_rqst));
4087 memset(&iov, 0, sizeof(iov));
4088 if (plen)
4089 *plen = 0;
4090
4091 rqst.rq_iov = iov;
4092 rqst.rq_nvec = 1;
4093
4094 rc = SMB2_notify_init(xid, &rqst, tcon, server,
4095 persistent_fid, volatile_fid,
4096 completion_filter, watch_tree);
4097 if (rc)
4098 goto cnotify_exit;
4099
4100 trace_smb3_notify_enter(xid, persistent_fid, tcon->tid, ses->Suid,
4101 (u8)watch_tree, completion_filter);
4102
4103 if (retries)
4104 smb2_set_replay(server, &rqst);
4105
4106 rc = cifs_send_recv(xid, ses, server,
4107 &rqst, &resp_buftype, flags, &rsp_iov);
4108
4109 if (rc != 0) {
4110 cifs_stats_fail_inc(tcon, SMB2_CHANGE_NOTIFY_HE);
4111 trace_smb3_notify_err(xid, persistent_fid, tcon->tid, ses->Suid,
4112 (u8)watch_tree, completion_filter, rc);
4113 } else {
4114 trace_smb3_notify_done(xid, persistent_fid, tcon->tid,
4115 ses->Suid, (u8)watch_tree, completion_filter);
4116 /* validate that notify information is plausible */
4117 if ((rsp_iov.iov_base == NULL) ||
4118 (rsp_iov.iov_len < sizeof(struct smb2_change_notify_rsp) + 1))
4119 goto cnotify_exit;
4120
4121 smb_rsp = (struct smb2_change_notify_rsp *)rsp_iov.iov_base;
4122
4123 rc = smb2_validate_iov(le16_to_cpu(smb_rsp->OutputBufferOffset),
4124 le32_to_cpu(smb_rsp->OutputBufferLength),
4125 &rsp_iov,
4126 sizeof(struct file_notify_information));
4127 if (rc)
4128 goto cnotify_exit;
4129
4130 *out_data = kmemdup((char *)smb_rsp + le16_to_cpu(smb_rsp->OutputBufferOffset),
4131 le32_to_cpu(smb_rsp->OutputBufferLength), GFP_KERNEL);
4132 if (*out_data == NULL) {
4133 rc = -ENOMEM;
4134 goto cnotify_exit;
4135 } else if (plen)
4136 *plen = le32_to_cpu(smb_rsp->OutputBufferLength);
4137 }
4138
4139 cnotify_exit:
4140 if (rqst.rq_iov)
4141 cifs_small_buf_release(rqst.rq_iov[0].iov_base); /* request */
4142 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
4143
4144 if (is_replayable_error(rc) &&
4145 smb2_should_replay(tcon, &retries, &cur_sleep))
4146 goto replay_again;
4147
4148 return rc;
4149 }
4150
4151
4152
4153 /*
4154 * This is a no-op for now. We're not really interested in the reply, but
4155 * rather in the fact that the server sent one and that server->lstrp
4156 * gets updated.
4157 *
4158 * FIXME: maybe we should consider checking that the reply matches request?
4159 */
4160 static void
smb2_echo_callback(struct TCP_Server_Info * server,struct mid_q_entry * mid)4161 smb2_echo_callback(struct TCP_Server_Info *server, struct mid_q_entry *mid)
4162 {
4163 struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf;
4164 struct cifs_credits credits = { .value = 0, .instance = 0 };
4165
4166 if (mid->mid_state == MID_RESPONSE_RECEIVED
4167 || mid->mid_state == MID_RESPONSE_MALFORMED) {
4168 credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
4169 credits.instance = server->reconnect_instance;
4170 }
4171
4172 release_mid(server, mid);
4173 add_credits(server, &credits, CIFS_ECHO_OP);
4174 }
4175
cifs_renegotiate_iosize(struct TCP_Server_Info * server,struct cifs_tcon * tcon)4176 static void cifs_renegotiate_iosize(struct TCP_Server_Info *server,
4177 struct cifs_tcon *tcon)
4178 {
4179 struct cifs_sb_info *cifs_sb;
4180
4181 if (server == NULL || tcon == NULL)
4182 return;
4183
4184 spin_lock(&tcon->sb_list_lock);
4185 list_for_each_entry(cifs_sb, &tcon->cifs_sb_list, tcon_sb_link)
4186 cifs_negotiate_iosize(server, cifs_sb->ctx, tcon);
4187 spin_unlock(&tcon->sb_list_lock);
4188 }
4189
smb2_reconnect_server(struct work_struct * work)4190 void smb2_reconnect_server(struct work_struct *work)
4191 {
4192 struct TCP_Server_Info *server = container_of(work,
4193 struct TCP_Server_Info, reconnect.work);
4194 struct TCP_Server_Info *pserver;
4195 struct cifs_ses *ses, *ses2;
4196 struct cifs_tcon *tcon, *tcon2;
4197 struct list_head tmp_list, tmp_ses_list;
4198 bool ses_exist = false;
4199 bool tcon_selected = false;
4200 int rc;
4201 bool resched = false;
4202
4203 /* first check if ref count has reached 0, if not inc ref count */
4204 spin_lock(&cifs_tcp_ses_lock);
4205 if (!server->srv_count) {
4206 spin_unlock(&cifs_tcp_ses_lock);
4207 return;
4208 }
4209 server->srv_count++;
4210 spin_unlock(&cifs_tcp_ses_lock);
4211
4212 /* If server is a channel, select the primary channel */
4213 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
4214
4215 /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
4216 mutex_lock(&pserver->reconnect_mutex);
4217
4218 /* if the server is marked for termination, drop the ref count here */
4219 if (server->terminate) {
4220 cifs_put_tcp_session(server, true);
4221 mutex_unlock(&pserver->reconnect_mutex);
4222 return;
4223 }
4224
4225 INIT_LIST_HEAD(&tmp_list);
4226 INIT_LIST_HEAD(&tmp_ses_list);
4227 cifs_dbg(FYI, "Reconnecting tcons and channels\n");
4228
4229 spin_lock(&cifs_tcp_ses_lock);
4230 list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
4231 spin_lock(&ses->ses_lock);
4232 if (ses->ses_status == SES_EXITING) {
4233 spin_unlock(&ses->ses_lock);
4234 continue;
4235 }
4236 spin_unlock(&ses->ses_lock);
4237
4238 tcon_selected = false;
4239
4240 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
4241 if (tcon->need_reconnect || tcon->need_reopen_files) {
4242 tcon->tc_count++;
4243 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
4244 netfs_trace_tcon_ref_get_reconnect_server);
4245 list_add_tail(&tcon->rlist, &tmp_list);
4246 tcon_selected = true;
4247 }
4248 }
4249 /*
4250 * IPC has the same lifetime as its session and uses its
4251 * refcount.
4252 */
4253 if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) {
4254 list_add_tail(&ses->tcon_ipc->rlist, &tmp_list);
4255 tcon_selected = true;
4256 cifs_smb_ses_inc_refcount(ses);
4257 }
4258 /*
4259 * handle the case where channel needs to reconnect
4260 * binding session, but tcon is healthy (some other channel
4261 * is active)
4262 */
4263 spin_lock(&ses->chan_lock);
4264 if (!tcon_selected && cifs_chan_needs_reconnect(ses, server)) {
4265 list_add_tail(&ses->rlist, &tmp_ses_list);
4266 ses_exist = true;
4267 cifs_smb_ses_inc_refcount(ses);
4268 }
4269 spin_unlock(&ses->chan_lock);
4270 }
4271 spin_unlock(&cifs_tcp_ses_lock);
4272
4273 list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
4274 rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server, true);
4275 if (!rc) {
4276 cifs_renegotiate_iosize(server, tcon);
4277 cifs_reopen_persistent_handles(tcon);
4278 } else
4279 resched = true;
4280 list_del_init(&tcon->rlist);
4281 if (tcon->ipc)
4282 cifs_put_smb_ses(tcon->ses);
4283 else
4284 cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_reconnect_server);
4285 }
4286
4287 if (!ses_exist)
4288 goto done;
4289
4290 /* allocate a dummy tcon struct used for reconnect */
4291 tcon = tcon_info_alloc(false, netfs_trace_tcon_ref_new_reconnect_server);
4292 if (!tcon) {
4293 resched = true;
4294 list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
4295 list_del_init(&ses->rlist);
4296 cifs_put_smb_ses(ses);
4297 }
4298 goto done;
4299 }
4300 tcon->status = TID_GOOD;
4301 tcon->dummy = true;
4302
4303 /* now reconnect sessions for necessary channels */
4304 list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
4305 tcon->ses = ses;
4306 rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server, true);
4307 if (rc)
4308 resched = true;
4309 list_del_init(&ses->rlist);
4310 cifs_put_smb_ses(ses);
4311 }
4312 tconInfoFree(tcon, netfs_trace_tcon_ref_free_reconnect_server);
4313
4314 done:
4315 cifs_dbg(FYI, "Reconnecting tcons and channels finished\n");
4316 if (resched)
4317 cifs_requeue_server_reconn(server);
4318 mutex_unlock(&pserver->reconnect_mutex);
4319
4320 /* now we can safely release srv struct */
4321 cifs_put_tcp_session(server, true);
4322 }
4323
4324 int
SMB2_echo(struct TCP_Server_Info * server)4325 SMB2_echo(struct TCP_Server_Info *server)
4326 {
4327 struct smb2_echo_req *req;
4328 int rc = 0;
4329 struct kvec iov[1];
4330 struct smb_rqst rqst = { .rq_iov = iov,
4331 .rq_nvec = 1 };
4332 unsigned int total_len;
4333
4334 cifs_dbg(FYI, "In echo request for conn_id %lld\n", server->conn_id);
4335
4336 spin_lock(&server->srv_lock);
4337 if (server->ops->need_neg &&
4338 server->ops->need_neg(server)) {
4339 spin_unlock(&server->srv_lock);
4340 /* No need to send echo on newly established connections */
4341 cifs_queue_server_reconn(server);
4342 return rc;
4343 }
4344 spin_unlock(&server->srv_lock);
4345
4346 rc = smb2_plain_req_init(SMB2_ECHO, NULL, server,
4347 (void **)&req, &total_len);
4348 if (rc)
4349 return rc;
4350
4351 req->hdr.CreditRequest = cpu_to_le16(1);
4352
4353 iov[0].iov_len = total_len;
4354 iov[0].iov_base = (char *)req;
4355
4356 rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL,
4357 server, CIFS_ECHO_OP, NULL);
4358 if (rc)
4359 cifs_dbg(FYI, "Echo request failed: %d\n", rc);
4360
4361 cifs_small_buf_release(req);
4362 return rc;
4363 }
4364
4365 void
SMB2_flush_free(struct smb_rqst * rqst)4366 SMB2_flush_free(struct smb_rqst *rqst)
4367 {
4368 if (rqst && rqst->rq_iov)
4369 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
4370 }
4371
4372 int
SMB2_flush_init(const unsigned int xid,struct smb_rqst * rqst,struct cifs_tcon * tcon,struct TCP_Server_Info * server,u64 persistent_fid,u64 volatile_fid)4373 SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst,
4374 struct cifs_tcon *tcon, struct TCP_Server_Info *server,
4375 u64 persistent_fid, u64 volatile_fid)
4376 {
4377 struct smb2_flush_req *req;
4378 struct kvec *iov = rqst->rq_iov;
4379 unsigned int total_len;
4380 int rc;
4381
4382 rc = smb2_plain_req_init(SMB2_FLUSH, tcon, server,
4383 (void **) &req, &total_len);
4384 if (rc)
4385 return rc;
4386
4387 req->PersistentFileId = persistent_fid;
4388 req->VolatileFileId = volatile_fid;
4389
4390 iov[0].iov_base = (char *)req;
4391 iov[0].iov_len = total_len;
4392
4393 return 0;
4394 }
4395
4396 int
SMB2_flush(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid)4397 SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
4398 u64 volatile_fid)
4399 {
4400 struct cifs_ses *ses = tcon->ses;
4401 struct smb_rqst rqst;
4402 struct kvec iov[1];
4403 struct kvec rsp_iov = {NULL, 0};
4404 struct TCP_Server_Info *server;
4405 int resp_buftype = CIFS_NO_BUFFER;
4406 int flags = 0;
4407 int rc = 0;
4408 int retries = 0, cur_sleep = 1;
4409
4410 replay_again:
4411 /* reinitialize for possible replay */
4412 flags = 0;
4413 server = cifs_pick_channel(ses);
4414
4415 cifs_dbg(FYI, "flush\n");
4416 if (!ses || !(ses->server))
4417 return smb_EIO(smb_eio_trace_null_pointers);
4418
4419 if (smb3_encryption_required(tcon))
4420 flags |= CIFS_TRANSFORM_REQ;
4421
4422 memset(&rqst, 0, sizeof(struct smb_rqst));
4423 memset(&iov, 0, sizeof(iov));
4424 rqst.rq_iov = iov;
4425 rqst.rq_nvec = 1;
4426
4427 rc = SMB2_flush_init(xid, &rqst, tcon, server,
4428 persistent_fid, volatile_fid);
4429 if (rc)
4430 goto flush_exit;
4431
4432 trace_smb3_flush_enter(xid, persistent_fid, tcon->tid, ses->Suid);
4433
4434 if (retries)
4435 smb2_set_replay(server, &rqst);
4436
4437 rc = cifs_send_recv(xid, ses, server,
4438 &rqst, &resp_buftype, flags, &rsp_iov);
4439
4440 if (rc != 0) {
4441 cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
4442 trace_smb3_flush_err(xid, persistent_fid, tcon->tid, ses->Suid,
4443 rc);
4444 } else
4445 trace_smb3_flush_done(xid, persistent_fid, tcon->tid,
4446 ses->Suid);
4447
4448 flush_exit:
4449 SMB2_flush_free(&rqst);
4450 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
4451
4452 if (is_replayable_error(rc) &&
4453 smb2_should_replay(tcon, &retries, &cur_sleep))
4454 goto replay_again;
4455
4456 return rc;
4457 }
4458
4459 #ifdef CONFIG_CIFS_SMB_DIRECT
smb3_use_rdma_offload(struct cifs_io_parms * io_parms)4460 static inline bool smb3_use_rdma_offload(struct cifs_io_parms *io_parms)
4461 {
4462 struct TCP_Server_Info *server = io_parms->server;
4463 struct cifs_tcon *tcon = io_parms->tcon;
4464
4465 /* we can only offload if we're connected */
4466 if (!server || !tcon)
4467 return false;
4468
4469 /* we can only offload on an rdma connection */
4470 if (!server->rdma || !server->smbd_conn)
4471 return false;
4472
4473 /* we don't support signed offload yet */
4474 if (server->sign)
4475 return false;
4476
4477 /* we don't support encrypted offload yet */
4478 if (smb3_encryption_required(tcon))
4479 return false;
4480
4481 /* offload also has its overhead, so only do it if desired */
4482 if (io_parms->length < server->rdma_readwrite_threshold)
4483 return false;
4484
4485 return true;
4486 }
4487 #endif /* CONFIG_CIFS_SMB_DIRECT */
4488
4489 /*
4490 * To form a chain of read requests, any read requests after the first should
4491 * have the end_of_chain boolean set to true.
4492 */
4493 static int
smb2_new_read_req(void ** buf,unsigned int * total_len,struct cifs_io_parms * io_parms,struct cifs_io_subrequest * rdata,unsigned int remaining_bytes,int request_type)4494 smb2_new_read_req(void **buf, unsigned int *total_len,
4495 struct cifs_io_parms *io_parms, struct cifs_io_subrequest *rdata,
4496 unsigned int remaining_bytes, int request_type)
4497 {
4498 int rc = -EACCES;
4499 struct smb2_read_req *req = NULL;
4500 struct smb2_hdr *shdr;
4501 struct TCP_Server_Info *server = io_parms->server;
4502
4503 rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, server,
4504 (void **) &req, total_len);
4505 if (rc)
4506 return rc;
4507
4508 if (server == NULL)
4509 return -ECONNABORTED;
4510
4511 shdr = &req->hdr;
4512 shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
4513
4514 req->PersistentFileId = io_parms->persistent_fid;
4515 req->VolatileFileId = io_parms->volatile_fid;
4516 req->ReadChannelInfoOffset = 0; /* reserved */
4517 req->ReadChannelInfoLength = 0; /* reserved */
4518 req->Channel = 0; /* reserved */
4519 req->MinimumCount = 0;
4520 req->Length = cpu_to_le32(io_parms->length);
4521 req->Offset = cpu_to_le64(io_parms->offset);
4522
4523 trace_smb3_read_enter(rdata ? rdata->rreq->debug_id : 0,
4524 rdata ? rdata->subreq.debug_index : 0,
4525 rdata ? rdata->xid : 0,
4526 io_parms->persistent_fid,
4527 io_parms->tcon->tid, io_parms->tcon->ses->Suid,
4528 io_parms->offset, io_parms->length);
4529 #ifdef CONFIG_CIFS_SMB_DIRECT
4530 /*
4531 * If we want to do a RDMA write, fill in and append
4532 * smbdirect_buffer_descriptor_v1 to the end of read request
4533 */
4534 if (rdata && smb3_use_rdma_offload(io_parms)) {
4535 struct smbdirect_buffer_descriptor_v1 *v1;
4536 bool need_invalidate = server->dialect == SMB30_PROT_ID;
4537
4538 rdata->mr = smbd_register_mr(server->smbd_conn, &rdata->subreq.io_iter,
4539 true, need_invalidate);
4540 if (!rdata->mr)
4541 return -EAGAIN;
4542
4543 req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
4544 if (need_invalidate)
4545 req->Channel = SMB2_CHANNEL_RDMA_V1;
4546 req->ReadChannelInfoOffset =
4547 cpu_to_le16(offsetof(struct smb2_read_req, Buffer));
4548 req->ReadChannelInfoLength =
4549 cpu_to_le16(sizeof(struct smbdirect_buffer_descriptor_v1));
4550 v1 = (struct smbdirect_buffer_descriptor_v1 *) &req->Buffer[0];
4551 v1->offset = cpu_to_le64(rdata->mr->mr->iova);
4552 v1->token = cpu_to_le32(rdata->mr->mr->rkey);
4553 v1->length = cpu_to_le32(rdata->mr->mr->length);
4554
4555 *total_len += sizeof(*v1) - 1;
4556 }
4557 #endif
4558 if (request_type & CHAINED_REQUEST) {
4559 if (!(request_type & END_OF_CHAIN)) {
4560 /* next 8-byte aligned request */
4561 *total_len = ALIGN(*total_len, 8);
4562 shdr->NextCommand = cpu_to_le32(*total_len);
4563 } else /* END_OF_CHAIN */
4564 shdr->NextCommand = 0;
4565 if (request_type & RELATED_REQUEST) {
4566 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
4567 /*
4568 * Related requests use info from previous read request
4569 * in chain.
4570 */
4571 shdr->SessionId = cpu_to_le64(0xFFFFFFFFFFFFFFFF);
4572 shdr->Id.SyncId.TreeId = cpu_to_le32(0xFFFFFFFF);
4573 req->PersistentFileId = (u64)-1;
4574 req->VolatileFileId = (u64)-1;
4575 }
4576 }
4577 if (remaining_bytes > io_parms->length)
4578 req->RemainingBytes = cpu_to_le32(remaining_bytes);
4579 else
4580 req->RemainingBytes = 0;
4581
4582 *buf = req;
4583 return rc;
4584 }
4585
4586 static void
smb2_readv_callback(struct TCP_Server_Info * server,struct mid_q_entry * mid)4587 smb2_readv_callback(struct TCP_Server_Info *server, struct mid_q_entry *mid)
4588 {
4589 struct cifs_io_subrequest *rdata = mid->callback_data;
4590 struct netfs_inode *ictx = netfs_inode(rdata->rreq->inode);
4591 struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink);
4592 struct smb2_hdr *shdr = (struct smb2_hdr *)rdata->iov[0].iov_base;
4593 struct cifs_credits credits = {
4594 .value = 0,
4595 .instance = 0,
4596 .rreq_debug_id = rdata->rreq->debug_id,
4597 .rreq_debug_index = rdata->subreq.debug_index,
4598 };
4599 struct smb_rqst rqst = { .rq_iov = &rdata->iov[0], .rq_nvec = 1 };
4600 unsigned int rreq_debug_id = rdata->rreq->debug_id;
4601 unsigned int subreq_debug_index = rdata->subreq.debug_index;
4602
4603 if (rdata->got_bytes) {
4604 rqst.rq_iter = rdata->subreq.io_iter;
4605 }
4606
4607 WARN_ONCE(rdata->server != server,
4608 "rdata server %p != mid server %p",
4609 rdata->server, server);
4610
4611 cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%zu/%zu\n",
4612 __func__, mid->mid, mid->mid_state, rdata->result,
4613 rdata->got_bytes, rdata->subreq.len - rdata->subreq.transferred);
4614
4615 switch (mid->mid_state) {
4616 case MID_RESPONSE_RECEIVED:
4617 credits.value = le16_to_cpu(shdr->CreditRequest);
4618 credits.instance = server->reconnect_instance;
4619 /* result already set, check signature */
4620 if (server->sign && !mid->decrypted) {
4621 int rc;
4622
4623 iov_iter_truncate(&rqst.rq_iter, rdata->got_bytes);
4624 rc = smb2_verify_signature(&rqst, server);
4625 if (rc)
4626 cifs_tcon_dbg(VFS, "SMB signature verification returned error = %d\n",
4627 rc);
4628 }
4629 /* FIXME: should this be counted toward the initiating task? */
4630 task_io_account_read(rdata->got_bytes);
4631 cifs_stats_bytes_read(tcon, rdata->got_bytes);
4632 break;
4633 case MID_REQUEST_SUBMITTED:
4634 trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_req_submitted);
4635 goto do_retry;
4636 case MID_RETRY_NEEDED:
4637 trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_retry_needed);
4638 do_retry:
4639 __set_bit(NETFS_SREQ_NEED_RETRY, &rdata->subreq.flags);
4640 rdata->result = -EAGAIN;
4641 if (server->sign && rdata->got_bytes)
4642 /* reset bytes number since we can not check a sign */
4643 rdata->got_bytes = 0;
4644 /* FIXME: should this be counted toward the initiating task? */
4645 task_io_account_read(rdata->got_bytes);
4646 cifs_stats_bytes_read(tcon, rdata->got_bytes);
4647 break;
4648 case MID_RESPONSE_MALFORMED:
4649 trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_malformed);
4650 credits.value = le16_to_cpu(shdr->CreditRequest);
4651 credits.instance = server->reconnect_instance;
4652 rdata->result = smb_EIO(smb_eio_trace_read_rsp_malformed);
4653 break;
4654 default:
4655 trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_unknown);
4656 rdata->result = smb_EIO1(smb_eio_trace_read_mid_state_unknown,
4657 mid->mid_state);
4658 break;
4659 }
4660 #ifdef CONFIG_CIFS_SMB_DIRECT
4661 /*
4662 * If this rdata has a memory registered, the MR can be freed
4663 * MR needs to be freed as soon as I/O finishes to prevent deadlock
4664 * because they have limited number and are used for future I/Os
4665 */
4666 if (rdata->mr) {
4667 smbd_deregister_mr(rdata->mr);
4668 rdata->mr = NULL;
4669 }
4670 #endif
4671 if (rdata->result && rdata->result != -ENODATA) {
4672 cifs_stats_fail_inc(tcon, SMB2_READ_HE);
4673 trace_smb3_read_err(rdata->rreq->debug_id,
4674 rdata->subreq.debug_index,
4675 rdata->xid,
4676 rdata->req->cfile->fid.persistent_fid,
4677 tcon->tid, tcon->ses->Suid,
4678 rdata->subreq.start + rdata->subreq.transferred,
4679 rdata->subreq.len - rdata->subreq.transferred,
4680 rdata->result);
4681 } else
4682 trace_smb3_read_done(rdata->rreq->debug_id,
4683 rdata->subreq.debug_index,
4684 rdata->xid,
4685 rdata->req->cfile->fid.persistent_fid,
4686 tcon->tid, tcon->ses->Suid,
4687 rdata->subreq.start + rdata->subreq.transferred,
4688 rdata->got_bytes);
4689
4690 if (rdata->result == -ENODATA) {
4691 __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
4692 rdata->result = 0;
4693 } else {
4694 size_t trans = rdata->subreq.transferred + rdata->got_bytes;
4695 if (trans < rdata->subreq.len &&
4696 rdata->subreq.start + trans >= ictx->remote_i_size) {
4697 __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
4698 rdata->result = 0;
4699 }
4700 if (rdata->got_bytes)
4701 __set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags);
4702 }
4703 trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, rdata->credits.value,
4704 server->credits, server->in_flight,
4705 0, cifs_trace_rw_credits_read_response_clear);
4706 rdata->credits.value = 0;
4707 rdata->subreq.error = rdata->result;
4708 rdata->subreq.transferred += rdata->got_bytes;
4709 trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_progress);
4710 netfs_read_subreq_terminated(&rdata->subreq);
4711 release_mid(server, mid);
4712 trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0,
4713 server->credits, server->in_flight,
4714 credits.value, cifs_trace_rw_credits_read_response_add);
4715 add_credits(server, &credits, 0);
4716 }
4717
4718 /* smb2_async_readv - send an async read, and set up mid to handle result */
4719 int
smb2_async_readv(struct cifs_io_subrequest * rdata)4720 smb2_async_readv(struct cifs_io_subrequest *rdata)
4721 {
4722 int rc, flags = 0;
4723 char *buf;
4724 struct netfs_io_subrequest *subreq = &rdata->subreq;
4725 struct smb2_hdr *shdr;
4726 struct cifs_io_parms io_parms;
4727 struct smb_rqst rqst = { .rq_iov = rdata->iov,
4728 .rq_nvec = 1 };
4729 struct TCP_Server_Info *server;
4730 struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink);
4731 unsigned int total_len;
4732 int credit_request;
4733
4734 cifs_dbg(FYI, "%s: offset=%llu bytes=%zu\n",
4735 __func__, subreq->start, subreq->len);
4736
4737 if (!rdata->server)
4738 rdata->server = cifs_pick_channel(tcon->ses);
4739
4740 io_parms.tcon = tlink_tcon(rdata->req->cfile->tlink);
4741 io_parms.server = server = rdata->server;
4742 io_parms.offset = subreq->start + subreq->transferred;
4743 io_parms.length = subreq->len - subreq->transferred;
4744 io_parms.persistent_fid = rdata->req->cfile->fid.persistent_fid;
4745 io_parms.volatile_fid = rdata->req->cfile->fid.volatile_fid;
4746 io_parms.pid = rdata->req->pid;
4747
4748 rc = smb2_new_read_req(
4749 (void **) &buf, &total_len, &io_parms, rdata, 0, 0);
4750 if (rc)
4751 return rc;
4752
4753 if (smb3_encryption_required(io_parms.tcon))
4754 flags |= CIFS_TRANSFORM_REQ;
4755
4756 rdata->iov[0].iov_base = buf;
4757 rdata->iov[0].iov_len = total_len;
4758 rdata->got_bytes = 0;
4759 rdata->result = 0;
4760
4761 shdr = (struct smb2_hdr *)buf;
4762
4763 if (rdata->credits.value > 0) {
4764 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(io_parms.length,
4765 SMB2_MAX_BUFFER_SIZE));
4766 credit_request = le16_to_cpu(shdr->CreditCharge) + 8;
4767 if (server->credits >= server->max_credits)
4768 shdr->CreditRequest = cpu_to_le16(0);
4769 else
4770 shdr->CreditRequest = cpu_to_le16(
4771 min_t(int, server->max_credits -
4772 server->credits, credit_request));
4773
4774 rc = adjust_credits(server, rdata, cifs_trace_rw_credits_call_readv_adjust);
4775 if (rc)
4776 goto async_readv_out;
4777
4778 flags |= CIFS_HAS_CREDITS;
4779 }
4780
4781 rc = cifs_call_async(server, &rqst,
4782 cifs_readv_receive, smb2_readv_callback,
4783 smb3_handle_read_data, rdata, flags,
4784 &rdata->credits);
4785 if (rc) {
4786 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
4787 trace_smb3_read_err(rdata->rreq->debug_id,
4788 subreq->debug_index,
4789 rdata->xid, io_parms.persistent_fid,
4790 io_parms.tcon->tid,
4791 io_parms.tcon->ses->Suid,
4792 io_parms.offset,
4793 subreq->len - subreq->transferred, rc);
4794 }
4795
4796 async_readv_out:
4797 cifs_small_buf_release(buf);
4798 return rc;
4799 }
4800
4801 int
SMB2_read(const unsigned int xid,struct cifs_io_parms * io_parms,unsigned int * nbytes,char ** buf,int * buf_type)4802 SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
4803 unsigned int *nbytes, char **buf, int *buf_type)
4804 {
4805 struct smb_rqst rqst;
4806 int resp_buftype, rc;
4807 struct smb2_read_req *req = NULL;
4808 struct smb2_read_rsp *rsp = NULL;
4809 struct kvec iov[1];
4810 struct kvec rsp_iov;
4811 unsigned int total_len;
4812 int flags = CIFS_LOG_ERROR;
4813 struct cifs_ses *ses = io_parms->tcon->ses;
4814
4815 if (!io_parms->server)
4816 io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
4817
4818 *nbytes = 0;
4819 rc = smb2_new_read_req((void **)&req, &total_len, io_parms, NULL, 0, 0);
4820 if (rc)
4821 return rc;
4822
4823 if (smb3_encryption_required(io_parms->tcon))
4824 flags |= CIFS_TRANSFORM_REQ;
4825
4826 iov[0].iov_base = (char *)req;
4827 iov[0].iov_len = total_len;
4828
4829 memset(&rqst, 0, sizeof(struct smb_rqst));
4830 rqst.rq_iov = iov;
4831 rqst.rq_nvec = 1;
4832
4833 rc = cifs_send_recv(xid, ses, io_parms->server,
4834 &rqst, &resp_buftype, flags, &rsp_iov);
4835 rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
4836
4837 if (rc) {
4838 if (rc != -ENODATA) {
4839 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
4840 cifs_dbg(VFS, "Send error in read = %d\n", rc);
4841 trace_smb3_read_err(0, 0, xid,
4842 req->PersistentFileId,
4843 io_parms->tcon->tid, ses->Suid,
4844 io_parms->offset, io_parms->length,
4845 rc);
4846 } else
4847 trace_smb3_read_done(0, 0, xid,
4848 req->PersistentFileId, io_parms->tcon->tid,
4849 ses->Suid, io_parms->offset, 0);
4850 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
4851 cifs_small_buf_release(req);
4852 return rc == -ENODATA ? 0 : rc;
4853 } else
4854 trace_smb3_read_done(0, 0, xid,
4855 req->PersistentFileId,
4856 io_parms->tcon->tid, ses->Suid,
4857 io_parms->offset, io_parms->length);
4858
4859 cifs_small_buf_release(req);
4860
4861 *nbytes = le32_to_cpu(rsp->DataLength);
4862 if ((*nbytes > CIFS_MAX_MSGSIZE) ||
4863 (*nbytes > io_parms->length)) {
4864 cifs_dbg(FYI, "bad length %d for count %d\n",
4865 *nbytes, io_parms->length);
4866 rc = smb_EIO2(smb_eio_trace_read_overlarge,
4867 *nbytes, io_parms->length);
4868 *nbytes = 0;
4869 }
4870
4871 if (*buf) {
4872 memcpy(*buf, (char *)rsp + rsp->DataOffset, *nbytes);
4873 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
4874 } else if (resp_buftype != CIFS_NO_BUFFER) {
4875 *buf = rsp_iov.iov_base;
4876 if (resp_buftype == CIFS_SMALL_BUFFER)
4877 *buf_type = CIFS_SMALL_BUFFER;
4878 else if (resp_buftype == CIFS_LARGE_BUFFER)
4879 *buf_type = CIFS_LARGE_BUFFER;
4880 }
4881 return rc;
4882 }
4883
4884 /*
4885 * Check the mid_state and signature on received buffer (if any), and queue the
4886 * workqueue completion task.
4887 */
4888 static void
smb2_writev_callback(struct TCP_Server_Info * server,struct mid_q_entry * mid)4889 smb2_writev_callback(struct TCP_Server_Info *server, struct mid_q_entry *mid)
4890 {
4891 struct cifs_io_subrequest *wdata = mid->callback_data;
4892 struct cifs_tcon *tcon = tlink_tcon(wdata->req->cfile->tlink);
4893 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
4894 struct cifs_credits credits = {
4895 .value = 0,
4896 .instance = 0,
4897 .rreq_debug_id = wdata->rreq->debug_id,
4898 .rreq_debug_index = wdata->subreq.debug_index,
4899 };
4900 unsigned int rreq_debug_id = wdata->rreq->debug_id;
4901 unsigned int subreq_debug_index = wdata->subreq.debug_index;
4902 ssize_t result = 0;
4903 size_t written;
4904
4905 WARN_ONCE(wdata->server != server,
4906 "wdata server %p != mid server %p",
4907 wdata->server, server);
4908
4909 switch (mid->mid_state) {
4910 case MID_RESPONSE_RECEIVED:
4911 trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_progress);
4912 credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
4913 credits.instance = server->reconnect_instance;
4914 result = smb2_check_receive(mid, server, 0);
4915 if (result != 0) {
4916 trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_bad);
4917 break;
4918 }
4919
4920 written = le32_to_cpu(rsp->DataLength);
4921 /*
4922 * Mask off high 16 bits when bytes written as returned
4923 * by the server is greater than bytes requested by the
4924 * client. OS/2 servers are known to set incorrect
4925 * CountHigh values.
4926 */
4927 if (written > wdata->subreq.len)
4928 written &= 0xFFFF;
4929
4930 cifs_stats_bytes_written(tcon, written);
4931
4932 if (written < wdata->subreq.len) {
4933 wdata->result = -ENOSPC;
4934 } else if (written > 0) {
4935 wdata->subreq.len = written;
4936 __set_bit(NETFS_SREQ_MADE_PROGRESS, &wdata->subreq.flags);
4937 }
4938 break;
4939 case MID_REQUEST_SUBMITTED:
4940 trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_req_submitted);
4941 __set_bit(NETFS_SREQ_NEED_RETRY, &wdata->subreq.flags);
4942 result = -EAGAIN;
4943 break;
4944 case MID_RETRY_NEEDED:
4945 trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_retry_needed);
4946 __set_bit(NETFS_SREQ_NEED_RETRY, &wdata->subreq.flags);
4947 result = -EAGAIN;
4948 break;
4949 case MID_RESPONSE_MALFORMED:
4950 trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_malformed);
4951 credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
4952 credits.instance = server->reconnect_instance;
4953 result = smb_EIO(smb_eio_trace_write_rsp_malformed);
4954 break;
4955 default:
4956 trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_unknown);
4957 result = smb_EIO1(smb_eio_trace_write_mid_state_unknown,
4958 mid->mid_state);
4959 break;
4960 }
4961 #ifdef CONFIG_CIFS_SMB_DIRECT
4962 /*
4963 * If this wdata has a memory registered, the MR can be freed
4964 * The number of MRs available is limited, it's important to recover
4965 * used MR as soon as I/O is finished. Hold MR longer in the later
4966 * I/O process can possibly result in I/O deadlock due to lack of MR
4967 * to send request on I/O retry
4968 */
4969 if (wdata->mr) {
4970 smbd_deregister_mr(wdata->mr);
4971 wdata->mr = NULL;
4972 }
4973 #endif
4974 if (result) {
4975 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
4976 trace_smb3_write_err(wdata->rreq->debug_id,
4977 wdata->subreq.debug_index,
4978 wdata->xid,
4979 wdata->req->cfile->fid.persistent_fid,
4980 tcon->tid, tcon->ses->Suid, wdata->subreq.start,
4981 wdata->subreq.len, wdata->result);
4982 if (wdata->result == -ENOSPC)
4983 pr_warn_once("Out of space writing to %s\n",
4984 tcon->tree_name);
4985 } else
4986 trace_smb3_write_done(wdata->rreq->debug_id,
4987 wdata->subreq.debug_index,
4988 wdata->xid,
4989 wdata->req->cfile->fid.persistent_fid,
4990 tcon->tid, tcon->ses->Suid,
4991 wdata->subreq.start, wdata->subreq.len);
4992
4993 trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, wdata->credits.value,
4994 server->credits, server->in_flight,
4995 0, cifs_trace_rw_credits_write_response_clear);
4996 wdata->credits.value = 0;
4997 cifs_write_subrequest_terminated(wdata, result ?: written);
4998 release_mid(server, mid);
4999 trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0,
5000 server->credits, server->in_flight,
5001 credits.value, cifs_trace_rw_credits_write_response_add);
5002 add_credits(server, &credits, 0);
5003 }
5004
5005 /* smb2_async_writev - send an async write, and set up mid to handle result */
5006 void
smb2_async_writev(struct cifs_io_subrequest * wdata)5007 smb2_async_writev(struct cifs_io_subrequest *wdata)
5008 {
5009 int rc = -EACCES, flags = 0;
5010 struct smb2_write_req *req = NULL;
5011 struct smb2_hdr *shdr;
5012 struct cifs_tcon *tcon = tlink_tcon(wdata->req->cfile->tlink);
5013 struct TCP_Server_Info *server = wdata->server;
5014 struct kvec iov[1];
5015 struct smb_rqst rqst = { };
5016 unsigned int total_len, xid = wdata->xid;
5017 struct cifs_io_parms _io_parms;
5018 struct cifs_io_parms *io_parms = NULL;
5019 int credit_request;
5020
5021 /*
5022 * in future we may get cifs_io_parms passed in from the caller,
5023 * but for now we construct it here...
5024 */
5025 _io_parms = (struct cifs_io_parms) {
5026 .tcon = tcon,
5027 .server = server,
5028 .offset = wdata->subreq.start,
5029 .length = wdata->subreq.len,
5030 .persistent_fid = wdata->req->cfile->fid.persistent_fid,
5031 .volatile_fid = wdata->req->cfile->fid.volatile_fid,
5032 .pid = wdata->req->pid,
5033 };
5034 io_parms = &_io_parms;
5035
5036 rc = smb2_plain_req_init(SMB2_WRITE, tcon, server,
5037 (void **) &req, &total_len);
5038 if (rc)
5039 goto out;
5040
5041 rqst.rq_iov = iov;
5042 rqst.rq_iter = wdata->subreq.io_iter;
5043
5044 rqst.rq_iov[0].iov_len = total_len - 1;
5045 rqst.rq_iov[0].iov_base = (char *)req;
5046 rqst.rq_nvec += 1;
5047
5048 if (smb3_encryption_required(tcon))
5049 flags |= CIFS_TRANSFORM_REQ;
5050
5051 shdr = (struct smb2_hdr *)req;
5052 shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
5053
5054 req->PersistentFileId = io_parms->persistent_fid;
5055 req->VolatileFileId = io_parms->volatile_fid;
5056 req->WriteChannelInfoOffset = 0;
5057 req->WriteChannelInfoLength = 0;
5058 req->Channel = SMB2_CHANNEL_NONE;
5059 req->Length = cpu_to_le32(io_parms->length);
5060 req->Offset = cpu_to_le64(io_parms->offset);
5061 req->DataOffset = cpu_to_le16(
5062 offsetof(struct smb2_write_req, Buffer));
5063 req->RemainingBytes = 0;
5064
5065 trace_smb3_write_enter(wdata->rreq->debug_id,
5066 wdata->subreq.debug_index,
5067 wdata->xid,
5068 io_parms->persistent_fid,
5069 io_parms->tcon->tid,
5070 io_parms->tcon->ses->Suid,
5071 io_parms->offset,
5072 io_parms->length);
5073
5074 #ifdef CONFIG_CIFS_SMB_DIRECT
5075 /*
5076 * If we want to do a server RDMA read, fill in and append
5077 * smbdirect_buffer_descriptor_v1 to the end of write request
5078 */
5079 if (smb3_use_rdma_offload(io_parms)) {
5080 struct smbdirect_buffer_descriptor_v1 *v1;
5081 bool need_invalidate = server->dialect == SMB30_PROT_ID;
5082
5083 wdata->mr = smbd_register_mr(server->smbd_conn, &wdata->subreq.io_iter,
5084 false, need_invalidate);
5085 if (!wdata->mr) {
5086 rc = -EAGAIN;
5087 goto async_writev_out;
5088 }
5089 /* For RDMA read, I/O size is in RemainingBytes not in Length */
5090 req->RemainingBytes = req->Length;
5091 req->Length = 0;
5092 req->DataOffset = 0;
5093 req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
5094 if (need_invalidate)
5095 req->Channel = SMB2_CHANNEL_RDMA_V1;
5096 req->WriteChannelInfoOffset =
5097 cpu_to_le16(offsetof(struct smb2_write_req, Buffer));
5098 req->WriteChannelInfoLength =
5099 cpu_to_le16(sizeof(struct smbdirect_buffer_descriptor_v1));
5100 v1 = (struct smbdirect_buffer_descriptor_v1 *) &req->Buffer[0];
5101 v1->offset = cpu_to_le64(wdata->mr->mr->iova);
5102 v1->token = cpu_to_le32(wdata->mr->mr->rkey);
5103 v1->length = cpu_to_le32(wdata->mr->mr->length);
5104
5105 rqst.rq_iov[0].iov_len += sizeof(*v1);
5106
5107 /*
5108 * We keep wdata->subreq.io_iter,
5109 * but we have to truncate rqst.rq_iter
5110 */
5111 iov_iter_truncate(&rqst.rq_iter, 0);
5112 }
5113 #endif
5114
5115 if (wdata->subreq.retry_count > 0)
5116 smb2_set_replay(server, &rqst);
5117
5118 cifs_dbg(FYI, "async write at %llu %u bytes iter=%zx\n",
5119 io_parms->offset, io_parms->length, iov_iter_count(&wdata->subreq.io_iter));
5120
5121 if (wdata->credits.value > 0) {
5122 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->subreq.len,
5123 SMB2_MAX_BUFFER_SIZE));
5124 credit_request = le16_to_cpu(shdr->CreditCharge) + 8;
5125 if (server->credits >= server->max_credits)
5126 shdr->CreditRequest = cpu_to_le16(0);
5127 else
5128 shdr->CreditRequest = cpu_to_le16(
5129 min_t(int, server->max_credits -
5130 server->credits, credit_request));
5131
5132 rc = adjust_credits(server, wdata, cifs_trace_rw_credits_call_writev_adjust);
5133 if (rc)
5134 goto async_writev_out;
5135
5136 flags |= CIFS_HAS_CREDITS;
5137 }
5138
5139 /* XXX: compression + encryption is unsupported for now */
5140 if (((flags & CIFS_TRANSFORM_REQ) != CIFS_TRANSFORM_REQ) && should_compress(tcon, &rqst))
5141 flags |= CIFS_COMPRESS_REQ;
5142
5143 rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, NULL,
5144 wdata, flags, &wdata->credits);
5145 /* Can't touch wdata if rc == 0 */
5146 if (rc) {
5147 trace_smb3_write_err(wdata->rreq->debug_id,
5148 wdata->subreq.debug_index,
5149 xid,
5150 io_parms->persistent_fid,
5151 io_parms->tcon->tid,
5152 io_parms->tcon->ses->Suid,
5153 io_parms->offset,
5154 io_parms->length,
5155 rc);
5156 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
5157 }
5158
5159 async_writev_out:
5160 cifs_small_buf_release(req);
5161 out:
5162 if (rc) {
5163 trace_smb3_rw_credits(wdata->rreq->debug_id,
5164 wdata->subreq.debug_index,
5165 wdata->credits.value,
5166 server->credits, server->in_flight,
5167 -(int)wdata->credits.value,
5168 cifs_trace_rw_credits_write_response_clear);
5169 add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
5170 cifs_write_subrequest_terminated(wdata, rc);
5171 }
5172 }
5173
5174 /*
5175 * SMB2_write function gets iov pointer to kvec array with n_vec as a length.
5176 * The length field from io_parms must be at least 1 and indicates a number of
5177 * elements with data to write that begins with position 1 in iov array. All
5178 * data length is specified by count.
5179 */
5180 int
SMB2_write(const unsigned int xid,struct cifs_io_parms * io_parms,unsigned int * nbytes,struct kvec * iov,int n_vec)5181 SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
5182 unsigned int *nbytes, struct kvec *iov, int n_vec)
5183 {
5184 struct smb_rqst rqst;
5185 int rc = 0;
5186 struct smb2_write_req *req = NULL;
5187 struct smb2_write_rsp *rsp = NULL;
5188 int resp_buftype;
5189 struct kvec rsp_iov;
5190 int flags = 0;
5191 unsigned int total_len;
5192 struct TCP_Server_Info *server;
5193 int retries = 0, cur_sleep = 1;
5194
5195 replay_again:
5196 /* reinitialize for possible replay */
5197 flags = 0;
5198 *nbytes = 0;
5199 if (!io_parms->server)
5200 io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
5201 server = io_parms->server;
5202 if (server == NULL)
5203 return -ECONNABORTED;
5204
5205 if (n_vec < 1)
5206 return rc;
5207
5208 rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, server,
5209 (void **) &req, &total_len);
5210 if (rc)
5211 return rc;
5212
5213 if (smb3_encryption_required(io_parms->tcon))
5214 flags |= CIFS_TRANSFORM_REQ;
5215
5216 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
5217
5218 req->PersistentFileId = io_parms->persistent_fid;
5219 req->VolatileFileId = io_parms->volatile_fid;
5220 req->WriteChannelInfoOffset = 0;
5221 req->WriteChannelInfoLength = 0;
5222 req->Channel = 0;
5223 req->Length = cpu_to_le32(io_parms->length);
5224 req->Offset = cpu_to_le64(io_parms->offset);
5225 req->DataOffset = cpu_to_le16(
5226 offsetof(struct smb2_write_req, Buffer));
5227 req->RemainingBytes = 0;
5228
5229 trace_smb3_write_enter(0, 0, xid, io_parms->persistent_fid,
5230 io_parms->tcon->tid, io_parms->tcon->ses->Suid,
5231 io_parms->offset, io_parms->length);
5232
5233 iov[0].iov_base = (char *)req;
5234 /* 1 for Buffer */
5235 iov[0].iov_len = total_len - 1;
5236
5237 memset(&rqst, 0, sizeof(struct smb_rqst));
5238 rqst.rq_iov = iov;
5239 rqst.rq_nvec = n_vec + 1;
5240
5241 if (retries)
5242 smb2_set_replay(server, &rqst);
5243
5244 rc = cifs_send_recv(xid, io_parms->tcon->ses, server,
5245 &rqst,
5246 &resp_buftype, flags, &rsp_iov);
5247 rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
5248
5249 if (rc) {
5250 trace_smb3_write_err(0, 0, xid,
5251 req->PersistentFileId,
5252 io_parms->tcon->tid,
5253 io_parms->tcon->ses->Suid,
5254 io_parms->offset, io_parms->length, rc);
5255 cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE);
5256 cifs_dbg(VFS, "Send error in write = %d\n", rc);
5257 } else {
5258 *nbytes = le32_to_cpu(rsp->DataLength);
5259 cifs_stats_bytes_written(io_parms->tcon, *nbytes);
5260 trace_smb3_write_done(0, 0, xid,
5261 req->PersistentFileId,
5262 io_parms->tcon->tid,
5263 io_parms->tcon->ses->Suid,
5264 io_parms->offset, *nbytes);
5265 }
5266
5267 cifs_small_buf_release(req);
5268 free_rsp_buf(resp_buftype, rsp);
5269
5270 if (is_replayable_error(rc) &&
5271 smb2_should_replay(io_parms->tcon, &retries, &cur_sleep))
5272 goto replay_again;
5273
5274 return rc;
5275 }
5276
posix_info_sid_size(const void * beg,const void * end)5277 int posix_info_sid_size(const void *beg, const void *end)
5278 {
5279 size_t subauth;
5280 int total;
5281
5282 if (beg + 1 > end)
5283 return -1;
5284
5285 subauth = *(u8 *)(beg+1);
5286 if (subauth < 1 || subauth > 15)
5287 return -1;
5288
5289 total = 1 + 1 + 6 + 4*subauth;
5290 if (beg + total > end)
5291 return -1;
5292
5293 return total;
5294 }
5295
posix_info_parse(const void * beg,const void * end,struct smb2_posix_info_parsed * out)5296 int posix_info_parse(const void *beg, const void *end,
5297 struct smb2_posix_info_parsed *out)
5298
5299 {
5300 int total_len = 0;
5301 int owner_len, group_len;
5302 int name_len;
5303 const void *owner_sid;
5304 const void *group_sid;
5305 const void *name;
5306
5307 /* if no end bound given, assume payload to be correct */
5308 if (!end) {
5309 const struct smb2_posix_info *p = beg;
5310
5311 end = beg + le32_to_cpu(p->NextEntryOffset);
5312 /* last element will have a 0 offset, pick a sensible bound */
5313 if (end == beg)
5314 end += 0xFFFF;
5315 }
5316
5317 /* check base buf */
5318 if (beg + sizeof(struct smb2_posix_info) > end)
5319 return -1;
5320 total_len = sizeof(struct smb2_posix_info);
5321
5322 /* check owner sid */
5323 owner_sid = beg + total_len;
5324 owner_len = posix_info_sid_size(owner_sid, end);
5325 if (owner_len < 0)
5326 return -1;
5327 total_len += owner_len;
5328
5329 /* check group sid */
5330 group_sid = beg + total_len;
5331 group_len = posix_info_sid_size(group_sid, end);
5332 if (group_len < 0)
5333 return -1;
5334 total_len += group_len;
5335
5336 /* check name len */
5337 if (beg + total_len + 4 > end)
5338 return -1;
5339 name_len = le32_to_cpu(*(__le32 *)(beg + total_len));
5340 if (name_len < 1 || name_len > 0xFFFF)
5341 return -1;
5342 total_len += 4;
5343
5344 /* check name */
5345 name = beg + total_len;
5346 if (name + name_len > end)
5347 return -1;
5348 total_len += name_len;
5349
5350 if (out) {
5351 out->base = beg;
5352 out->size = total_len;
5353 out->name_len = name_len;
5354 out->name = name;
5355 memcpy(&out->owner, owner_sid, owner_len);
5356 memcpy(&out->group, group_sid, group_len);
5357 }
5358 return total_len;
5359 }
5360
posix_info_extra_size(const void * beg,const void * end)5361 static int posix_info_extra_size(const void *beg, const void *end)
5362 {
5363 int len = posix_info_parse(beg, end, NULL);
5364
5365 if (len < 0)
5366 return -1;
5367 return len - sizeof(struct smb2_posix_info);
5368 }
5369
5370 static unsigned int
num_entries(int infotype,char * bufstart,char * end_of_buf,char ** lastentry,size_t size)5371 num_entries(int infotype, char *bufstart, char *end_of_buf, char **lastentry,
5372 size_t size)
5373 {
5374 int len;
5375 unsigned int entrycount = 0;
5376 unsigned int next_offset = 0;
5377 char *entryptr;
5378 FILE_DIRECTORY_INFO *dir_info;
5379
5380 if (bufstart == NULL)
5381 return 0;
5382
5383 entryptr = bufstart;
5384
5385 while (1) {
5386 if (entryptr + next_offset < entryptr ||
5387 entryptr + next_offset > end_of_buf ||
5388 entryptr + next_offset + size > end_of_buf) {
5389 cifs_dbg(VFS, "malformed search entry would overflow\n");
5390 break;
5391 }
5392
5393 entryptr = entryptr + next_offset;
5394 dir_info = (FILE_DIRECTORY_INFO *)entryptr;
5395
5396 if (infotype == SMB_FIND_FILE_POSIX_INFO)
5397 len = posix_info_extra_size(entryptr, end_of_buf);
5398 else
5399 len = le32_to_cpu(dir_info->FileNameLength);
5400
5401 if (len < 0 ||
5402 entryptr + len < entryptr ||
5403 entryptr + len > end_of_buf ||
5404 entryptr + len + size > end_of_buf) {
5405 cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
5406 end_of_buf);
5407 break;
5408 }
5409
5410 *lastentry = entryptr;
5411 entrycount++;
5412
5413 next_offset = le32_to_cpu(dir_info->NextEntryOffset);
5414 if (!next_offset)
5415 break;
5416 }
5417
5418 return entrycount;
5419 }
5420
5421 /*
5422 * Readdir/FindFirst
5423 */
SMB2_query_directory_init(const unsigned int xid,struct cifs_tcon * tcon,struct TCP_Server_Info * server,struct smb_rqst * rqst,u64 persistent_fid,u64 volatile_fid,int index,int info_level)5424 int SMB2_query_directory_init(const unsigned int xid,
5425 struct cifs_tcon *tcon,
5426 struct TCP_Server_Info *server,
5427 struct smb_rqst *rqst,
5428 u64 persistent_fid, u64 volatile_fid,
5429 int index, int info_level)
5430 {
5431 struct smb2_query_directory_req *req;
5432 unsigned char *bufptr;
5433 __le16 asteriks = cpu_to_le16('*');
5434 unsigned int output_size = CIFSMaxBufSize -
5435 MAX_SMB2_CREATE_RESPONSE_SIZE -
5436 MAX_SMB2_CLOSE_RESPONSE_SIZE;
5437 unsigned int total_len;
5438 struct kvec *iov = rqst->rq_iov;
5439 int len, rc;
5440
5441 rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, server,
5442 (void **) &req, &total_len);
5443 if (rc)
5444 return rc;
5445
5446 switch (info_level) {
5447 case SMB_FIND_FILE_DIRECTORY_INFO:
5448 req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
5449 break;
5450 case SMB_FIND_FILE_ID_FULL_DIR_INFO:
5451 req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
5452 break;
5453 case SMB_FIND_FILE_POSIX_INFO:
5454 req->FileInformationClass = SMB_FIND_FILE_POSIX_INFO;
5455 break;
5456 case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
5457 req->FileInformationClass = FILE_FULL_DIRECTORY_INFORMATION;
5458 break;
5459 default:
5460 cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
5461 info_level);
5462 return -EINVAL;
5463 }
5464
5465 req->FileIndex = cpu_to_le32(index);
5466 req->PersistentFileId = persistent_fid;
5467 req->VolatileFileId = volatile_fid;
5468
5469 len = 0x2;
5470 bufptr = req->Buffer;
5471 memcpy(bufptr, &asteriks, len);
5472
5473 req->FileNameOffset =
5474 cpu_to_le16(sizeof(struct smb2_query_directory_req));
5475 req->FileNameLength = cpu_to_le16(len);
5476 /*
5477 * BB could be 30 bytes or so longer if we used SMB2 specific
5478 * buffer lengths, but this is safe and close enough.
5479 */
5480 output_size = min_t(unsigned int, output_size, server->maxBuf);
5481 output_size = min_t(unsigned int, output_size, 2 << 15);
5482 req->OutputBufferLength = cpu_to_le32(output_size);
5483
5484 iov[0].iov_base = (char *)req;
5485 /* 1 for Buffer */
5486 iov[0].iov_len = total_len - 1;
5487
5488 iov[1].iov_base = (char *)(req->Buffer);
5489 iov[1].iov_len = len;
5490
5491 trace_smb3_query_dir_enter(xid, persistent_fid, tcon->tid,
5492 tcon->ses->Suid, index, output_size);
5493
5494 return 0;
5495 }
5496
SMB2_query_directory_free(struct smb_rqst * rqst)5497 void SMB2_query_directory_free(struct smb_rqst *rqst)
5498 {
5499 if (rqst && rqst->rq_iov) {
5500 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
5501 }
5502 }
5503
5504 int
smb2_parse_query_directory(struct cifs_tcon * tcon,struct kvec * rsp_iov,int resp_buftype,struct cifs_search_info * srch_inf)5505 smb2_parse_query_directory(struct cifs_tcon *tcon,
5506 struct kvec *rsp_iov,
5507 int resp_buftype,
5508 struct cifs_search_info *srch_inf)
5509 {
5510 struct smb2_query_directory_rsp *rsp;
5511 size_t info_buf_size;
5512 char *end_of_smb;
5513 int rc;
5514
5515 rsp = (struct smb2_query_directory_rsp *)rsp_iov->iov_base;
5516
5517 switch (srch_inf->info_level) {
5518 case SMB_FIND_FILE_DIRECTORY_INFO:
5519 info_buf_size = sizeof(FILE_DIRECTORY_INFO);
5520 break;
5521 case SMB_FIND_FILE_ID_FULL_DIR_INFO:
5522 info_buf_size = sizeof(FILE_ID_FULL_DIR_INFO);
5523 break;
5524 case SMB_FIND_FILE_POSIX_INFO:
5525 /* note that posix payload are variable size */
5526 info_buf_size = sizeof(struct smb2_posix_info);
5527 break;
5528 case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
5529 info_buf_size = sizeof(FILE_FULL_DIRECTORY_INFO);
5530 break;
5531 default:
5532 cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
5533 srch_inf->info_level);
5534 return -EINVAL;
5535 }
5536
5537 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
5538 le32_to_cpu(rsp->OutputBufferLength), rsp_iov,
5539 info_buf_size);
5540 if (rc) {
5541 cifs_tcon_dbg(VFS, "bad info payload");
5542 return rc;
5543 }
5544
5545 srch_inf->unicode = true;
5546
5547 if (srch_inf->ntwrk_buf_start) {
5548 if (srch_inf->smallBuf)
5549 cifs_small_buf_release(srch_inf->ntwrk_buf_start);
5550 else
5551 cifs_buf_release(srch_inf->ntwrk_buf_start);
5552 }
5553 srch_inf->ntwrk_buf_start = (char *)rsp;
5554 srch_inf->srch_entries_start = srch_inf->last_entry =
5555 (char *)rsp + le16_to_cpu(rsp->OutputBufferOffset);
5556 end_of_smb = rsp_iov->iov_len + (char *)rsp;
5557
5558 srch_inf->entries_in_buffer = num_entries(
5559 srch_inf->info_level,
5560 srch_inf->srch_entries_start,
5561 end_of_smb,
5562 &srch_inf->last_entry,
5563 info_buf_size);
5564
5565 srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
5566 cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n",
5567 srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
5568 srch_inf->srch_entries_start, srch_inf->last_entry);
5569 if (resp_buftype == CIFS_LARGE_BUFFER)
5570 srch_inf->smallBuf = false;
5571 else if (resp_buftype == CIFS_SMALL_BUFFER)
5572 srch_inf->smallBuf = true;
5573 else
5574 cifs_tcon_dbg(VFS, "Invalid search buffer type\n");
5575
5576 return 0;
5577 }
5578
5579 int
SMB2_query_directory(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,int index,struct cifs_search_info * srch_inf)5580 SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
5581 u64 persistent_fid, u64 volatile_fid, int index,
5582 struct cifs_search_info *srch_inf)
5583 {
5584 struct smb_rqst rqst;
5585 struct kvec iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
5586 struct smb2_query_directory_rsp *rsp = NULL;
5587 int resp_buftype = CIFS_NO_BUFFER;
5588 struct kvec rsp_iov;
5589 int rc = 0;
5590 struct cifs_ses *ses = tcon->ses;
5591 struct TCP_Server_Info *server;
5592 int flags = 0;
5593 int retries = 0, cur_sleep = 1;
5594
5595 replay_again:
5596 /* reinitialize for possible replay */
5597 flags = 0;
5598 server = cifs_pick_channel(ses);
5599
5600 if (!ses || !(ses->server))
5601 return smb_EIO(smb_eio_trace_null_pointers);
5602
5603 if (smb3_encryption_required(tcon))
5604 flags |= CIFS_TRANSFORM_REQ;
5605
5606 memset(&rqst, 0, sizeof(struct smb_rqst));
5607 memset(&iov, 0, sizeof(iov));
5608 rqst.rq_iov = iov;
5609 rqst.rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
5610
5611 rc = SMB2_query_directory_init(xid, tcon, server,
5612 &rqst, persistent_fid,
5613 volatile_fid, index,
5614 srch_inf->info_level);
5615 if (rc)
5616 goto qdir_exit;
5617
5618 if (retries)
5619 smb2_set_replay(server, &rqst);
5620
5621 rc = cifs_send_recv(xid, ses, server,
5622 &rqst, &resp_buftype, flags, &rsp_iov);
5623 rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
5624
5625 if (rc) {
5626 if (rc == -ENODATA &&
5627 rsp->hdr.Status == STATUS_NO_MORE_FILES) {
5628 trace_smb3_query_dir_done(xid, persistent_fid,
5629 tcon->tid, tcon->ses->Suid, index, 0);
5630 srch_inf->endOfSearch = true;
5631 rc = 0;
5632 } else {
5633 trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
5634 tcon->ses->Suid, index, 0, rc);
5635 cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
5636 }
5637 goto qdir_exit;
5638 }
5639
5640 rc = smb2_parse_query_directory(tcon, &rsp_iov, resp_buftype,
5641 srch_inf);
5642 if (rc) {
5643 trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
5644 tcon->ses->Suid, index, 0, rc);
5645 goto qdir_exit;
5646 }
5647 resp_buftype = CIFS_NO_BUFFER;
5648
5649 trace_smb3_query_dir_done(xid, persistent_fid, tcon->tid,
5650 tcon->ses->Suid, index, srch_inf->entries_in_buffer);
5651
5652 qdir_exit:
5653 SMB2_query_directory_free(&rqst);
5654 free_rsp_buf(resp_buftype, rsp);
5655
5656 if (is_replayable_error(rc) &&
5657 smb2_should_replay(tcon, &retries, &cur_sleep))
5658 goto replay_again;
5659
5660 return rc;
5661 }
5662
5663 int
SMB2_set_info_init(struct cifs_tcon * tcon,struct TCP_Server_Info * server,struct smb_rqst * rqst,u64 persistent_fid,u64 volatile_fid,u32 pid,u8 info_class,u8 info_type,u32 additional_info,void ** data,unsigned int * size)5664 SMB2_set_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
5665 struct smb_rqst *rqst,
5666 u64 persistent_fid, u64 volatile_fid, u32 pid,
5667 u8 info_class, u8 info_type, u32 additional_info,
5668 void **data, unsigned int *size)
5669 {
5670 struct smb2_set_info_req *req;
5671 struct kvec *iov = rqst->rq_iov;
5672 unsigned int i, total_len;
5673 int rc;
5674
5675 rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, server,
5676 (void **) &req, &total_len);
5677 if (rc)
5678 return rc;
5679
5680 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid);
5681 req->InfoType = info_type;
5682 req->FileInfoClass = info_class;
5683 req->PersistentFileId = persistent_fid;
5684 req->VolatileFileId = volatile_fid;
5685 req->AdditionalInformation = cpu_to_le32(additional_info);
5686
5687 req->BufferOffset = cpu_to_le16(sizeof(struct smb2_set_info_req));
5688 req->BufferLength = cpu_to_le32(*size);
5689
5690 memcpy(req->Buffer, *data, *size);
5691 total_len += *size;
5692
5693 iov[0].iov_base = (char *)req;
5694 /* 1 for Buffer */
5695 iov[0].iov_len = total_len - 1;
5696
5697 for (i = 1; i < rqst->rq_nvec; i++) {
5698 le32_add_cpu(&req->BufferLength, size[i]);
5699 iov[i].iov_base = (char *)data[i];
5700 iov[i].iov_len = size[i];
5701 }
5702
5703 return 0;
5704 }
5705
5706 void
SMB2_set_info_free(struct smb_rqst * rqst)5707 SMB2_set_info_free(struct smb_rqst *rqst)
5708 {
5709 if (rqst && rqst->rq_iov)
5710 cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */
5711 }
5712
5713 static int
send_set_info(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,u32 pid,u8 info_class,u8 info_type,u32 additional_info,unsigned int num,void ** data,unsigned int * size)5714 send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
5715 u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class,
5716 u8 info_type, u32 additional_info, unsigned int num,
5717 void **data, unsigned int *size)
5718 {
5719 struct smb_rqst rqst;
5720 struct smb2_set_info_rsp *rsp = NULL;
5721 struct kvec *iov;
5722 struct kvec rsp_iov;
5723 int rc = 0;
5724 int resp_buftype;
5725 struct cifs_ses *ses = tcon->ses;
5726 struct TCP_Server_Info *server;
5727 int flags = 0;
5728 int retries = 0, cur_sleep = 1;
5729
5730 replay_again:
5731 /* reinitialize for possible replay */
5732 flags = 0;
5733 server = cifs_pick_channel(ses);
5734
5735 if (!ses || !server)
5736 return smb_EIO(smb_eio_trace_null_pointers);
5737
5738 if (!num)
5739 return -EINVAL;
5740
5741 if (smb3_encryption_required(tcon))
5742 flags |= CIFS_TRANSFORM_REQ;
5743
5744 iov = kmalloc_array(num, sizeof(struct kvec), GFP_KERNEL);
5745 if (!iov)
5746 return -ENOMEM;
5747
5748 memset(&rqst, 0, sizeof(struct smb_rqst));
5749 rqst.rq_iov = iov;
5750 rqst.rq_nvec = num;
5751
5752 rc = SMB2_set_info_init(tcon, server,
5753 &rqst, persistent_fid, volatile_fid, pid,
5754 info_class, info_type, additional_info,
5755 data, size);
5756 if (rc) {
5757 kfree(iov);
5758 return rc;
5759 }
5760
5761 if (retries)
5762 smb2_set_replay(server, &rqst);
5763
5764 rc = cifs_send_recv(xid, ses, server,
5765 &rqst, &resp_buftype, flags,
5766 &rsp_iov);
5767 SMB2_set_info_free(&rqst);
5768 rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base;
5769
5770 if (rc != 0) {
5771 cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
5772 trace_smb3_set_info_err(xid, persistent_fid, tcon->tid,
5773 ses->Suid, info_class, (__u32)info_type, rc);
5774 }
5775
5776 free_rsp_buf(resp_buftype, rsp);
5777 kfree(iov);
5778
5779 if (is_replayable_error(rc) &&
5780 smb2_should_replay(tcon, &retries, &cur_sleep))
5781 goto replay_again;
5782
5783 return rc;
5784 }
5785
5786 int
SMB2_set_eof(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,u32 pid,loff_t new_eof)5787 SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
5788 u64 volatile_fid, u32 pid, loff_t new_eof)
5789 {
5790 struct smb2_file_eof_info info;
5791 void *data;
5792 unsigned int size;
5793
5794 info.EndOfFile = cpu_to_le64(new_eof);
5795
5796 data = &info;
5797 size = sizeof(struct smb2_file_eof_info);
5798
5799 trace_smb3_set_eof(xid, persistent_fid, tcon->tid, tcon->ses->Suid, new_eof);
5800
5801 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
5802 pid, FILE_END_OF_FILE_INFORMATION, SMB2_O_INFO_FILE,
5803 0, 1, &data, &size);
5804 }
5805
5806 int
SMB2_set_acl(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,struct smb_ntsd * pnntsd,int pacllen,int aclflag)5807 SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon,
5808 u64 persistent_fid, u64 volatile_fid,
5809 struct smb_ntsd *pnntsd, int pacllen, int aclflag)
5810 {
5811 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
5812 current->tgid, 0, SMB2_O_INFO_SECURITY, aclflag,
5813 1, (void **)&pnntsd, &pacllen);
5814 }
5815
5816 int
SMB2_set_ea(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,struct smb2_file_full_ea_info * buf,int len)5817 SMB2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
5818 u64 persistent_fid, u64 volatile_fid,
5819 struct smb2_file_full_ea_info *buf, int len)
5820 {
5821 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
5822 current->tgid, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE,
5823 0, 1, (void **)&buf, &len);
5824 }
5825
5826 int
SMB2_oplock_break(const unsigned int xid,struct cifs_tcon * tcon,const u64 persistent_fid,const u64 volatile_fid,__u8 oplock_level)5827 SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
5828 const u64 persistent_fid, const u64 volatile_fid,
5829 __u8 oplock_level)
5830 {
5831 struct smb_rqst rqst;
5832 int rc;
5833 struct smb2_oplock_break *req = NULL;
5834 struct cifs_ses *ses = tcon->ses;
5835 struct TCP_Server_Info *server;
5836 int flags = CIFS_OBREAK_OP;
5837 unsigned int total_len;
5838 struct kvec iov[1];
5839 struct kvec rsp_iov;
5840 int resp_buf_type;
5841 int retries = 0, cur_sleep = 1;
5842
5843 replay_again:
5844 /* reinitialize for possible replay */
5845 flags = CIFS_OBREAK_OP;
5846 server = cifs_pick_channel(ses);
5847
5848 cifs_dbg(FYI, "SMB2_oplock_break\n");
5849 rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
5850 (void **) &req, &total_len);
5851 if (rc)
5852 return rc;
5853
5854 if (smb3_encryption_required(tcon))
5855 flags |= CIFS_TRANSFORM_REQ;
5856
5857 req->VolatileFid = volatile_fid;
5858 req->PersistentFid = persistent_fid;
5859 req->OplockLevel = oplock_level;
5860 req->hdr.CreditRequest = cpu_to_le16(1);
5861
5862 flags |= CIFS_NO_RSP_BUF;
5863
5864 iov[0].iov_base = (char *)req;
5865 iov[0].iov_len = total_len;
5866
5867 memset(&rqst, 0, sizeof(struct smb_rqst));
5868 rqst.rq_iov = iov;
5869 rqst.rq_nvec = 1;
5870
5871 if (retries)
5872 smb2_set_replay(server, &rqst);
5873
5874 rc = cifs_send_recv(xid, ses, server,
5875 &rqst, &resp_buf_type, flags, &rsp_iov);
5876 cifs_small_buf_release(req);
5877 if (rc) {
5878 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
5879 cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc);
5880 }
5881
5882 if (is_replayable_error(rc) &&
5883 smb2_should_replay(tcon, &retries, &cur_sleep))
5884 goto replay_again;
5885
5886 return rc;
5887 }
5888
5889 void
smb2_copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info * pfs_inf,struct kstatfs * kst)5890 smb2_copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
5891 struct kstatfs *kst)
5892 {
5893 kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
5894 le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
5895 kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
5896 kst->f_bfree = kst->f_bavail =
5897 le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
5898 return;
5899 }
5900
5901 static void
copy_posix_fs_info_to_kstatfs(FILE_SYSTEM_POSIX_INFO * response_data,struct kstatfs * kst)5902 copy_posix_fs_info_to_kstatfs(FILE_SYSTEM_POSIX_INFO *response_data,
5903 struct kstatfs *kst)
5904 {
5905 kst->f_bsize = le32_to_cpu(response_data->BlockSize);
5906 kst->f_blocks = le64_to_cpu(response_data->TotalBlocks);
5907 kst->f_bfree = le64_to_cpu(response_data->BlocksAvail);
5908 if (response_data->UserBlocksAvail == cpu_to_le64(-1))
5909 kst->f_bavail = kst->f_bfree;
5910 else
5911 kst->f_bavail = le64_to_cpu(response_data->UserBlocksAvail);
5912 if (response_data->TotalFileNodes != cpu_to_le64(-1))
5913 kst->f_files = le64_to_cpu(response_data->TotalFileNodes);
5914 if (response_data->FreeFileNodes != cpu_to_le64(-1))
5915 kst->f_ffree = le64_to_cpu(response_data->FreeFileNodes);
5916
5917 return;
5918 }
5919
5920 static int
build_qfs_info_req(struct kvec * iov,struct cifs_tcon * tcon,struct TCP_Server_Info * server,int level,int outbuf_len,u64 persistent_fid,u64 volatile_fid)5921 build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon,
5922 struct TCP_Server_Info *server,
5923 int level, int outbuf_len, u64 persistent_fid,
5924 u64 volatile_fid)
5925 {
5926 int rc;
5927 struct smb2_query_info_req *req;
5928 unsigned int total_len;
5929
5930 cifs_dbg(FYI, "Query FSInfo level %d\n", level);
5931
5932 if ((tcon->ses == NULL) || server == NULL)
5933 return smb_EIO(smb_eio_trace_null_pointers);
5934
5935 rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
5936 (void **) &req, &total_len);
5937 if (rc)
5938 return rc;
5939
5940 req->InfoType = SMB2_O_INFO_FILESYSTEM;
5941 req->FileInfoClass = level;
5942 req->PersistentFileId = persistent_fid;
5943 req->VolatileFileId = volatile_fid;
5944 /* 1 for pad */
5945 req->InputBufferOffset =
5946 cpu_to_le16(sizeof(struct smb2_query_info_req));
5947 req->OutputBufferLength = cpu_to_le32(
5948 outbuf_len + sizeof(struct smb2_query_info_rsp));
5949
5950 iov->iov_base = (char *)req;
5951 iov->iov_len = total_len;
5952 return 0;
5953 }
5954
free_qfs_info_req(struct kvec * iov)5955 static inline void free_qfs_info_req(struct kvec *iov)
5956 {
5957 cifs_buf_release(iov->iov_base);
5958 }
5959
5960 int
SMB311_posix_qfs_info(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,struct kstatfs * fsdata)5961 SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
5962 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
5963 {
5964 struct smb_rqst rqst;
5965 struct smb2_query_info_rsp *rsp = NULL;
5966 struct kvec iov;
5967 struct kvec rsp_iov;
5968 int rc = 0;
5969 int resp_buftype;
5970 struct cifs_ses *ses = tcon->ses;
5971 struct TCP_Server_Info *server;
5972 FILE_SYSTEM_POSIX_INFO *info = NULL;
5973 int flags = 0;
5974 int retries = 0, cur_sleep = 1;
5975
5976 replay_again:
5977 /* reinitialize for possible replay */
5978 flags = 0;
5979 server = cifs_pick_channel(ses);
5980
5981 rc = build_qfs_info_req(&iov, tcon, server,
5982 FS_POSIX_INFORMATION,
5983 sizeof(FILE_SYSTEM_POSIX_INFO),
5984 persistent_fid, volatile_fid);
5985 if (rc)
5986 return rc;
5987
5988 if (smb3_encryption_required(tcon))
5989 flags |= CIFS_TRANSFORM_REQ;
5990
5991 memset(&rqst, 0, sizeof(struct smb_rqst));
5992 rqst.rq_iov = &iov;
5993 rqst.rq_nvec = 1;
5994
5995 if (retries)
5996 smb2_set_replay(server, &rqst);
5997
5998 rc = cifs_send_recv(xid, ses, server,
5999 &rqst, &resp_buftype, flags, &rsp_iov);
6000 free_qfs_info_req(&iov);
6001 if (rc) {
6002 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
6003 goto posix_qfsinf_exit;
6004 }
6005 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
6006
6007 info = (FILE_SYSTEM_POSIX_INFO *)(
6008 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
6009 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
6010 le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
6011 sizeof(FILE_SYSTEM_POSIX_INFO));
6012 if (!rc)
6013 copy_posix_fs_info_to_kstatfs(info, fsdata);
6014
6015 posix_qfsinf_exit:
6016 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
6017
6018 if (is_replayable_error(rc) &&
6019 smb2_should_replay(tcon, &retries, &cur_sleep))
6020 goto replay_again;
6021
6022 return rc;
6023 }
6024
6025 int
SMB2_QFS_attr(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,int level)6026 SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
6027 u64 persistent_fid, u64 volatile_fid, int level)
6028 {
6029 struct smb_rqst rqst;
6030 struct smb2_query_info_rsp *rsp = NULL;
6031 struct kvec iov;
6032 struct kvec rsp_iov;
6033 int rc = 0;
6034 int resp_buftype, max_len, min_len;
6035 struct cifs_ses *ses = tcon->ses;
6036 struct TCP_Server_Info *server;
6037 unsigned int rsp_len, offset;
6038 int flags = 0;
6039 int retries = 0, cur_sleep = 1;
6040
6041 replay_again:
6042 /* reinitialize for possible replay */
6043 flags = 0;
6044 server = cifs_pick_channel(ses);
6045
6046 if (level == FS_DEVICE_INFORMATION) {
6047 max_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
6048 min_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
6049 } else if (level == FS_ATTRIBUTE_INFORMATION) {
6050 max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO) + MAX_FS_NAME_LEN;
6051 min_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO);
6052 } else if (level == FS_SECTOR_SIZE_INFORMATION) {
6053 max_len = sizeof(struct smb3_fs_ss_info);
6054 min_len = sizeof(struct smb3_fs_ss_info);
6055 } else if (level == FS_VOLUME_INFORMATION) {
6056 max_len = sizeof(struct smb3_fs_vol_info) + MAX_VOL_LABEL_LEN;
6057 min_len = sizeof(struct smb3_fs_vol_info);
6058 } else {
6059 cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
6060 return -EINVAL;
6061 }
6062
6063 rc = build_qfs_info_req(&iov, tcon, server,
6064 level, max_len,
6065 persistent_fid, volatile_fid);
6066 if (rc)
6067 return rc;
6068
6069 if (smb3_encryption_required(tcon))
6070 flags |= CIFS_TRANSFORM_REQ;
6071
6072 memset(&rqst, 0, sizeof(struct smb_rqst));
6073 rqst.rq_iov = &iov;
6074 rqst.rq_nvec = 1;
6075
6076 if (retries)
6077 smb2_set_replay(server, &rqst);
6078
6079 rc = cifs_send_recv(xid, ses, server,
6080 &rqst, &resp_buftype, flags, &rsp_iov);
6081 free_qfs_info_req(&iov);
6082 if (rc) {
6083 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
6084 goto qfsattr_exit;
6085 }
6086 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
6087
6088 rsp_len = le32_to_cpu(rsp->OutputBufferLength);
6089 offset = le16_to_cpu(rsp->OutputBufferOffset);
6090 rc = smb2_validate_iov(offset, rsp_len, &rsp_iov, min_len);
6091 if (rc)
6092 goto qfsattr_exit;
6093
6094 if (level == FS_ATTRIBUTE_INFORMATION)
6095 memcpy(&tcon->fsAttrInfo, offset
6096 + (char *)rsp, min_t(unsigned int,
6097 rsp_len, min_len));
6098 else if (level == FS_DEVICE_INFORMATION)
6099 memcpy(&tcon->fsDevInfo, offset
6100 + (char *)rsp, sizeof(FILE_SYSTEM_DEVICE_INFO));
6101 else if (level == FS_SECTOR_SIZE_INFORMATION) {
6102 struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *)
6103 (offset + (char *)rsp);
6104 tcon->ss_flags = le32_to_cpu(ss_info->Flags);
6105 tcon->perf_sector_size =
6106 le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
6107 } else if (level == FS_VOLUME_INFORMATION) {
6108 struct smb3_fs_vol_info *vol_info = (struct smb3_fs_vol_info *)
6109 (offset + (char *)rsp);
6110 tcon->vol_serial_number = vol_info->VolumeSerialNumber;
6111 tcon->vol_create_time = vol_info->VolumeCreationTime;
6112 }
6113
6114 qfsattr_exit:
6115 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
6116
6117 if (is_replayable_error(rc) &&
6118 smb2_should_replay(tcon, &retries, &cur_sleep))
6119 goto replay_again;
6120
6121 return rc;
6122 }
6123
6124 int
smb2_lockv(const unsigned int xid,struct cifs_tcon * tcon,const __u64 persist_fid,const __u64 volatile_fid,const __u32 pid,const __u32 num_lock,struct smb2_lock_element * buf)6125 smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
6126 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
6127 const __u32 num_lock, struct smb2_lock_element *buf)
6128 {
6129 struct smb_rqst rqst;
6130 int rc = 0;
6131 struct smb2_lock_req *req = NULL;
6132 struct kvec iov[2];
6133 struct kvec rsp_iov;
6134 int resp_buf_type;
6135 unsigned int count;
6136 int flags = CIFS_NO_RSP_BUF;
6137 unsigned int total_len;
6138 struct TCP_Server_Info *server;
6139 int retries = 0, cur_sleep = 1;
6140
6141 replay_again:
6142 /* reinitialize for possible replay */
6143 flags = CIFS_NO_RSP_BUF;
6144 server = cifs_pick_channel(tcon->ses);
6145
6146 cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
6147
6148 rc = smb2_plain_req_init(SMB2_LOCK, tcon, server,
6149 (void **) &req, &total_len);
6150 if (rc)
6151 return rc;
6152
6153 if (smb3_encryption_required(tcon))
6154 flags |= CIFS_TRANSFORM_REQ;
6155
6156 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid);
6157 req->LockCount = cpu_to_le16(num_lock);
6158
6159 req->PersistentFileId = persist_fid;
6160 req->VolatileFileId = volatile_fid;
6161
6162 count = num_lock * sizeof(struct smb2_lock_element);
6163
6164 iov[0].iov_base = (char *)req;
6165 iov[0].iov_len = total_len - sizeof(struct smb2_lock_element);
6166 iov[1].iov_base = (char *)buf;
6167 iov[1].iov_len = count;
6168
6169 cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
6170
6171 memset(&rqst, 0, sizeof(struct smb_rqst));
6172 rqst.rq_iov = iov;
6173 rqst.rq_nvec = 2;
6174
6175 if (retries)
6176 smb2_set_replay(server, &rqst);
6177
6178 rc = cifs_send_recv(xid, tcon->ses, server,
6179 &rqst, &resp_buf_type, flags,
6180 &rsp_iov);
6181 cifs_small_buf_release(req);
6182 if (rc) {
6183 cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc);
6184 cifs_stats_fail_inc(tcon, SMB2_LOCK_HE);
6185 trace_smb3_lock_err(xid, persist_fid, tcon->tid,
6186 tcon->ses->Suid, rc);
6187 }
6188
6189 if (is_replayable_error(rc) &&
6190 smb2_should_replay(tcon, &retries, &cur_sleep))
6191 goto replay_again;
6192
6193 return rc;
6194 }
6195
6196 int
SMB2_lock(const unsigned int xid,struct cifs_tcon * tcon,const __u64 persist_fid,const __u64 volatile_fid,const __u32 pid,const __u64 length,const __u64 offset,const __u32 lock_flags,const bool wait)6197 SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
6198 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
6199 const __u64 length, const __u64 offset, const __u32 lock_flags,
6200 const bool wait)
6201 {
6202 struct smb2_lock_element lock;
6203
6204 lock.Offset = cpu_to_le64(offset);
6205 lock.Length = cpu_to_le64(length);
6206 lock.Flags = cpu_to_le32(lock_flags);
6207 if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK)
6208 lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
6209
6210 return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock);
6211 }
6212
6213 int
SMB2_lease_break(const unsigned int xid,struct cifs_tcon * tcon,__u8 * lease_key,const __le32 lease_state)6214 SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
6215 __u8 *lease_key, const __le32 lease_state)
6216 {
6217 struct smb_rqst rqst;
6218 int rc;
6219 struct smb2_lease_ack *req = NULL;
6220 struct cifs_ses *ses = tcon->ses;
6221 int flags = CIFS_OBREAK_OP;
6222 unsigned int total_len;
6223 struct kvec iov[1];
6224 struct kvec rsp_iov;
6225 int resp_buf_type;
6226 __u64 *please_key_high;
6227 __u64 *please_key_low;
6228 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
6229
6230 cifs_dbg(FYI, "SMB2_lease_break\n");
6231 rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
6232 (void **) &req, &total_len);
6233 if (rc)
6234 return rc;
6235
6236 if (smb3_encryption_required(tcon))
6237 flags |= CIFS_TRANSFORM_REQ;
6238
6239 req->hdr.CreditRequest = cpu_to_le16(1);
6240 req->StructureSize = cpu_to_le16(36);
6241 total_len += 12;
6242
6243 memcpy(req->LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
6244 req->LeaseState = lease_state;
6245
6246 flags |= CIFS_NO_RSP_BUF;
6247
6248 iov[0].iov_base = (char *)req;
6249 iov[0].iov_len = total_len;
6250
6251 memset(&rqst, 0, sizeof(struct smb_rqst));
6252 rqst.rq_iov = iov;
6253 rqst.rq_nvec = 1;
6254
6255 rc = cifs_send_recv(xid, ses, server,
6256 &rqst, &resp_buf_type, flags, &rsp_iov);
6257 cifs_small_buf_release(req);
6258
6259 please_key_low = (__u64 *)lease_key;
6260 please_key_high = (__u64 *)(lease_key+8);
6261 if (rc) {
6262 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
6263 trace_smb3_lease_ack_err(le32_to_cpu(lease_state), tcon->tid,
6264 ses->Suid, *please_key_low, *please_key_high, rc);
6265 cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc);
6266 } else
6267 trace_smb3_lease_ack_done(le32_to_cpu(lease_state), tcon->tid,
6268 ses->Suid, *please_key_low, *please_key_high);
6269
6270 return rc;
6271 }
6272