1 // SPDX-License-Identifier: LGPL-2.1 2 /* 3 * 4 * Copyright (C) International Business Machines Corp., 2009, 2013 5 * Etersoft, 2012 6 * Author(s): Steve French (sfrench@us.ibm.com) 7 * Pavel Shilovsky (pshilovsky@samba.org) 2012 8 * 9 * Contains the routines for constructing the SMB2 PDUs themselves 10 * 11 */ 12 13 /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */ 14 /* Note that there are handle based routines which must be */ 15 /* treated slightly differently for reconnection purposes since we never */ 16 /* want to reuse a stale file handle and only the caller knows the file info */ 17 18 #include <linux/fs.h> 19 #include <linux/kernel.h> 20 #include <linux/vfs.h> 21 #include <linux/task_io_accounting_ops.h> 22 #include <linux/uaccess.h> 23 #include <linux/uuid.h> 24 #include <linux/pagemap.h> 25 #include <linux/xattr.h> 26 #include <linux/netfs.h> 27 #include <trace/events/netfs.h> 28 #include "cifsglob.h" 29 #include "cifsproto.h" 30 #include "cifsacl.h" 31 #include "smb2proto.h" 32 #include "cifs_unicode.h" 33 #include "cifs_debug.h" 34 #include "ntlmssp.h" 35 #include "../common/smbfsctl.h" 36 #include "../common/smb2status.h" 37 #include "smb2glob.h" 38 #include "cifs_spnego.h" 39 #include "smbdirect.h" 40 #include "trace.h" 41 #ifdef CONFIG_CIFS_DFS_UPCALL 42 #include "dfs_cache.h" 43 #endif 44 #include "cached_dir.h" 45 #include "compress.h" 46 #include "fs_context.h" 47 48 /* 49 * The following table defines the expected "StructureSize" of SMB2 requests 50 * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests. 51 * 52 * Note that commands are defined in smb2pdu.h in le16 but the array below is 53 * indexed by command in host byte order. 54 */ 55 static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = { 56 /* SMB2_NEGOTIATE */ 36, 57 /* SMB2_SESSION_SETUP */ 25, 58 /* SMB2_LOGOFF */ 4, 59 /* SMB2_TREE_CONNECT */ 9, 60 /* SMB2_TREE_DISCONNECT */ 4, 61 /* SMB2_CREATE */ 57, 62 /* SMB2_CLOSE */ 24, 63 /* SMB2_FLUSH */ 24, 64 /* SMB2_READ */ 49, 65 /* SMB2_WRITE */ 49, 66 /* SMB2_LOCK */ 48, 67 /* SMB2_IOCTL */ 57, 68 /* SMB2_CANCEL */ 4, 69 /* SMB2_ECHO */ 4, 70 /* SMB2_QUERY_DIRECTORY */ 33, 71 /* SMB2_CHANGE_NOTIFY */ 32, 72 /* SMB2_QUERY_INFO */ 41, 73 /* SMB2_SET_INFO */ 33, 74 /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */ 75 }; 76 77 int smb3_encryption_required(const struct cifs_tcon *tcon) 78 { 79 if (!tcon || !tcon->ses) 80 return 0; 81 if ((tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) || 82 (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA)) 83 return 1; 84 if (tcon->seal && 85 (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) 86 return 1; 87 if (((global_secflags & CIFSSEC_MUST_SEAL) == CIFSSEC_MUST_SEAL) && 88 (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) 89 return 1; 90 return 0; 91 } 92 93 static void 94 smb2_hdr_assemble(struct smb2_hdr *shdr, __le16 smb2_cmd, 95 const struct cifs_tcon *tcon, 96 struct TCP_Server_Info *server) 97 { 98 struct smb3_hdr_req *smb3_hdr; 99 100 shdr->ProtocolId = SMB2_PROTO_NUMBER; 101 shdr->StructureSize = cpu_to_le16(64); 102 shdr->Command = smb2_cmd; 103 104 if (server) { 105 /* After reconnect SMB3 must set ChannelSequence on subsequent reqs */ 106 if (server->dialect >= SMB30_PROT_ID) { 107 smb3_hdr = (struct smb3_hdr_req *)shdr; 108 /* 109 * if primary channel is not set yet, use default 110 * channel for chan sequence num 111 */ 112 if (SERVER_IS_CHAN(server)) 113 smb3_hdr->ChannelSequence = 114 cpu_to_le16(server->primary_server->channel_sequence_num); 115 else 116 smb3_hdr->ChannelSequence = 117 cpu_to_le16(server->channel_sequence_num); 118 } 119 spin_lock(&server->req_lock); 120 /* Request up to 10 credits but don't go over the limit. */ 121 if (server->credits >= server->max_credits) 122 shdr->CreditRequest = cpu_to_le16(0); 123 else 124 shdr->CreditRequest = cpu_to_le16( 125 min_t(int, server->max_credits - 126 server->credits, 10)); 127 spin_unlock(&server->req_lock); 128 } else { 129 shdr->CreditRequest = cpu_to_le16(2); 130 } 131 shdr->Id.SyncId.ProcessId = cpu_to_le32((__u16)current->tgid); 132 133 if (!tcon) 134 goto out; 135 136 /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */ 137 /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */ 138 if (server && (server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) 139 shdr->CreditCharge = cpu_to_le16(1); 140 /* else CreditCharge MBZ */ 141 142 shdr->Id.SyncId.TreeId = cpu_to_le32(tcon->tid); 143 /* Uid is not converted */ 144 if (tcon->ses) 145 shdr->SessionId = cpu_to_le64(tcon->ses->Suid); 146 147 /* 148 * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have 149 * to pass the path on the Open SMB prefixed by \\server\share. 150 * Not sure when we would need to do the augmented path (if ever) and 151 * setting this flag breaks the SMB2 open operation since it is 152 * illegal to send an empty path name (without \\server\share prefix) 153 * when the DFS flag is set in the SMB open header. We could 154 * consider setting the flag on all operations other than open 155 * but it is safer to net set it for now. 156 */ 157 /* if (tcon->share_flags & SHI1005_FLAGS_DFS) 158 shdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */ 159 160 if (server && server->sign && !smb3_encryption_required(tcon)) 161 shdr->Flags |= SMB2_FLAGS_SIGNED; 162 out: 163 return; 164 } 165 166 /* helper function for code reuse */ 167 static int 168 cifs_chan_skip_or_disable(struct cifs_ses *ses, 169 struct TCP_Server_Info *server, 170 bool from_reconnect, bool disable_mchan) 171 { 172 struct TCP_Server_Info *pserver; 173 unsigned int chan_index; 174 175 if (SERVER_IS_CHAN(server)) { 176 cifs_dbg(VFS, 177 "server %s does not support multichannel anymore. Skip secondary channel\n", 178 ses->server->hostname); 179 180 spin_lock(&ses->chan_lock); 181 chan_index = cifs_ses_get_chan_index(ses, server); 182 if (chan_index == CIFS_INVAL_CHAN_INDEX) { 183 spin_unlock(&ses->chan_lock); 184 goto skip_terminate; 185 } 186 187 ses->chans[chan_index].server = NULL; 188 server->terminate = true; 189 spin_unlock(&ses->chan_lock); 190 191 /* 192 * the above reference of server by channel 193 * needs to be dropped without holding chan_lock 194 * as cifs_put_tcp_session takes a higher lock 195 * i.e. cifs_tcp_ses_lock 196 */ 197 cifs_put_tcp_session(server, from_reconnect); 198 199 cifs_signal_cifsd_for_reconnect(server, false); 200 201 /* mark primary server as needing reconnect */ 202 pserver = server->primary_server; 203 cifs_signal_cifsd_for_reconnect(pserver, false); 204 skip_terminate: 205 return -EHOSTDOWN; 206 } 207 208 cifs_decrease_secondary_channels(ses, disable_mchan); 209 210 return 0; 211 } 212 213 /* 214 * smb3_update_ses_channels - Synchronize session channels with new configuration 215 * @ses: pointer to the CIFS session structure 216 * @server: pointer to the TCP server info structure 217 * @from_reconnect: indicates if called from reconnect context 218 * @disable_mchan: indicates if called from reconnect to disable multichannel 219 * 220 * Returns 0 on success or error code on failure. 221 * 222 * Outside of reconfigure, this function is called from cifs_mount() during mount 223 * and from reconnect scenarios to adjust channel count when the 224 * server's multichannel support changes. 225 */ 226 int smb3_update_ses_channels(struct cifs_ses *ses, struct TCP_Server_Info *server, 227 bool from_reconnect, bool disable_mchan) 228 { 229 int rc = 0; 230 /* 231 * Manage session channels based on current count vs max: 232 * - If disable requested, skip or disable the channel 233 * - If below max channels, attempt to add more 234 * - If above max channels, skip or disable excess channels 235 */ 236 if (disable_mchan) 237 rc = cifs_chan_skip_or_disable(ses, server, from_reconnect, disable_mchan); 238 else { 239 if (ses->chan_count < ses->chan_max) 240 rc = cifs_try_adding_channels(ses); 241 else if (ses->chan_count > ses->chan_max) 242 rc = cifs_chan_skip_or_disable(ses, server, from_reconnect, disable_mchan); 243 } 244 245 return rc; 246 } 247 248 static int 249 smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, 250 struct TCP_Server_Info *server, bool from_reconnect) 251 { 252 struct cifs_ses *ses; 253 int xid; 254 int rc = 0; 255 256 /* 257 * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so 258 * check for tcp and smb session status done differently 259 * for those three - in the calling routine. 260 */ 261 if (tcon == NULL) 262 return 0; 263 264 if (smb2_command == SMB2_TREE_CONNECT) 265 return 0; 266 267 spin_lock(&tcon->tc_lock); 268 if (tcon->status == TID_EXITING) { 269 /* 270 * only tree disconnect allowed when disconnecting ... 271 */ 272 if (smb2_command != SMB2_TREE_DISCONNECT) { 273 spin_unlock(&tcon->tc_lock); 274 cifs_tcon_dbg(FYI, "can not send cmd %d while umounting\n", 275 smb2_command); 276 return -ENODEV; 277 } 278 } 279 spin_unlock(&tcon->tc_lock); 280 281 ses = tcon->ses; 282 if (!ses) 283 return smb_EIO(smb_eio_trace_null_pointers); 284 spin_lock(&ses->ses_lock); 285 if (ses->ses_status == SES_EXITING) { 286 spin_unlock(&ses->ses_lock); 287 return smb_EIO(smb_eio_trace_sess_exiting); 288 } 289 spin_unlock(&ses->ses_lock); 290 if (!ses->server || !server) 291 return smb_EIO(smb_eio_trace_null_pointers); 292 293 spin_lock(&server->srv_lock); 294 if (server->tcpStatus == CifsNeedReconnect) { 295 /* 296 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE 297 * here since they are implicitly done when session drops. 298 */ 299 switch (smb2_command) { 300 /* 301 * BB Should we keep oplock break and add flush to exceptions? 302 */ 303 case SMB2_TREE_DISCONNECT: 304 case SMB2_CANCEL: 305 case SMB2_CLOSE: 306 case SMB2_OPLOCK_BREAK: 307 spin_unlock(&server->srv_lock); 308 return -EAGAIN; 309 } 310 } 311 312 /* if server is marked for termination, cifsd will cleanup */ 313 if (server->terminate) { 314 spin_unlock(&server->srv_lock); 315 return -EHOSTDOWN; 316 } 317 spin_unlock(&server->srv_lock); 318 319 again: 320 rc = cifs_wait_for_server_reconnect(server, tcon->retry); 321 if (rc) 322 return rc; 323 324 spin_lock(&ses->chan_lock); 325 if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) { 326 spin_unlock(&ses->chan_lock); 327 return 0; 328 } 329 spin_unlock(&ses->chan_lock); 330 cifs_tcon_dbg(FYI, "sess reconnect mask: 0x%lx, tcon reconnect: %d\n", 331 tcon->ses->chans_need_reconnect, 332 tcon->need_reconnect); 333 334 mutex_lock(&ses->session_mutex); 335 /* 336 * Handle the case where a concurrent thread failed to negotiate or 337 * killed a channel. 338 */ 339 spin_lock(&server->srv_lock); 340 switch (server->tcpStatus) { 341 case CifsExiting: 342 spin_unlock(&server->srv_lock); 343 mutex_unlock(&ses->session_mutex); 344 return -EHOSTDOWN; 345 case CifsNeedReconnect: 346 spin_unlock(&server->srv_lock); 347 mutex_unlock(&ses->session_mutex); 348 if (!tcon->retry) 349 return -EHOSTDOWN; 350 goto again; 351 default: 352 break; 353 } 354 spin_unlock(&server->srv_lock); 355 356 /* 357 * need to prevent multiple threads trying to simultaneously 358 * reconnect the same SMB session 359 */ 360 spin_lock(&ses->ses_lock); 361 spin_lock(&ses->chan_lock); 362 if (!cifs_chan_needs_reconnect(ses, server) && 363 ses->ses_status == SES_GOOD) { 364 spin_unlock(&ses->chan_lock); 365 spin_unlock(&ses->ses_lock); 366 /* this means that we only need to tree connect */ 367 if (tcon->need_reconnect) 368 goto skip_sess_setup; 369 370 mutex_unlock(&ses->session_mutex); 371 goto out; 372 } 373 spin_unlock(&ses->chan_lock); 374 spin_unlock(&ses->ses_lock); 375 376 rc = cifs_negotiate_protocol(0, ses, server); 377 if (rc) { 378 mutex_unlock(&ses->session_mutex); 379 if (!tcon->retry) 380 return -EHOSTDOWN; 381 goto again; 382 } 383 /* 384 * if server stopped supporting multichannel 385 * and the first channel reconnected, disable all the others. 386 */ 387 if (ses->chan_count > 1 && 388 !(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) { 389 rc = smb3_update_ses_channels(ses, server, 390 from_reconnect, true /* disable_mchan */); 391 if (rc) { 392 mutex_unlock(&ses->session_mutex); 393 goto out; 394 } 395 } 396 397 rc = cifs_setup_session(0, ses, server, ses->local_nls); 398 if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED)) { 399 /* 400 * Try alternate password for next reconnect (key rotation 401 * could be enabled on the server e.g.) if an alternate 402 * password is available and the current password is expired, 403 * but do not swap on non pwd related errors like host down 404 */ 405 if (ses->password2) 406 swap(ses->password2, ses->password); 407 } 408 if (rc) { 409 mutex_unlock(&ses->session_mutex); 410 if (rc == -EACCES && !tcon->retry) 411 return -EHOSTDOWN; 412 goto out; 413 } 414 415 skip_sess_setup: 416 if (!tcon->need_reconnect) { 417 mutex_unlock(&ses->session_mutex); 418 goto out; 419 } 420 cifs_mark_open_files_invalid(tcon); 421 if (tcon->use_persistent) 422 tcon->need_reopen_files = true; 423 424 rc = cifs_tree_connect(0, tcon); 425 426 cifs_tcon_dbg(FYI, "reconnect tcon rc = %d\n", rc); 427 if (rc) { 428 /* If sess reconnected but tcon didn't, something strange ... */ 429 mutex_unlock(&ses->session_mutex); 430 cifs_tcon_dbg(VFS, "reconnect tcon failed rc = %d\n", rc); 431 goto out; 432 } 433 434 spin_lock(&ses->ses_lock); 435 if (ses->flags & CIFS_SES_FLAG_SCALE_CHANNELS) { 436 spin_unlock(&ses->ses_lock); 437 mutex_unlock(&ses->session_mutex); 438 goto skip_add_channels; 439 } 440 ses->flags |= CIFS_SES_FLAG_SCALE_CHANNELS; 441 spin_unlock(&ses->ses_lock); 442 443 if (!rc && 444 (server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL) && 445 server->ops->query_server_interfaces) { 446 /* 447 * query server network interfaces, in case they change. 448 * Also mark the session as pending this update while the query 449 * is in progress. This will be used to avoid calling 450 * smb2_reconnect recursively. 451 */ 452 ses->flags |= CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES; 453 xid = get_xid(); 454 rc = server->ops->query_server_interfaces(xid, tcon, false); 455 free_xid(xid); 456 ses->flags &= ~CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES; 457 458 if (!tcon->ipc && !tcon->dummy) 459 queue_delayed_work(cifsiod_wq, &tcon->query_interfaces, 460 (SMB_INTERFACE_POLL_INTERVAL * HZ)); 461 462 mutex_unlock(&ses->session_mutex); 463 464 if (rc == -EOPNOTSUPP && ses->chan_count > 1) { 465 /* 466 * some servers like Azure SMB server do not advertise 467 * that multichannel has been disabled with server 468 * capabilities, rather return STATUS_NOT_IMPLEMENTED. 469 * treat this as server not supporting multichannel 470 */ 471 472 rc = smb3_update_ses_channels(ses, server, 473 from_reconnect, 474 true /* disable_mchan */); 475 goto skip_add_channels; 476 } else if (rc) 477 cifs_tcon_dbg(FYI, "%s: failed to query server interfaces: %d\n", 478 __func__, rc); 479 480 if (ses->chan_max > ses->chan_count && 481 ses->iface_count && 482 !SERVER_IS_CHAN(server)) { 483 if (ses->chan_count == 1) 484 cifs_server_dbg(VFS, "supports multichannel now\n"); 485 486 smb3_update_ses_channels(ses, server, from_reconnect, 487 false /* disable_mchan */); 488 } 489 } else { 490 mutex_unlock(&ses->session_mutex); 491 } 492 493 skip_add_channels: 494 spin_lock(&ses->ses_lock); 495 ses->flags &= ~CIFS_SES_FLAG_SCALE_CHANNELS; 496 spin_unlock(&ses->ses_lock); 497 498 if (smb2_command != SMB2_INTERNAL_CMD) 499 cifs_queue_server_reconn(server); 500 501 atomic_inc(&tconInfoReconnectCount); 502 out: 503 /* 504 * Check if handle based operation so we know whether we can continue 505 * or not without returning to caller to reset file handle. 506 */ 507 /* 508 * BB Is flush done by server on drop of tcp session? Should we special 509 * case it and skip above? 510 */ 511 switch (smb2_command) { 512 case SMB2_FLUSH: 513 case SMB2_READ: 514 case SMB2_WRITE: 515 case SMB2_LOCK: 516 case SMB2_QUERY_DIRECTORY: 517 case SMB2_CHANGE_NOTIFY: 518 case SMB2_QUERY_INFO: 519 case SMB2_SET_INFO: 520 case SMB2_IOCTL: 521 rc = -EAGAIN; 522 } 523 return rc; 524 } 525 526 static void 527 fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, 528 struct TCP_Server_Info *server, 529 void *buf, 530 unsigned int *total_len) 531 { 532 struct smb2_pdu *spdu = buf; 533 /* lookup word count ie StructureSize from table */ 534 __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)]; 535 536 /* 537 * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of 538 * largest operations (Create) 539 */ 540 memset(buf, 0, 256); 541 542 smb2_hdr_assemble(&spdu->hdr, smb2_command, tcon, server); 543 spdu->StructureSize2 = cpu_to_le16(parmsize); 544 545 *total_len = parmsize + sizeof(struct smb2_hdr); 546 } 547 548 /* 549 * Allocate and return pointer to an SMB request hdr, and set basic 550 * SMB information in the SMB header. If the return code is zero, this 551 * function must have filled in request_buf pointer. 552 */ 553 static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, 554 struct TCP_Server_Info *server, 555 void **request_buf, unsigned int *total_len) 556 { 557 /* BB eventually switch this to SMB2 specific small buf size */ 558 switch (smb2_command) { 559 case SMB2_SET_INFO: 560 case SMB2_QUERY_INFO: 561 *request_buf = cifs_buf_get(); 562 break; 563 default: 564 *request_buf = cifs_small_buf_get(); 565 break; 566 } 567 if (*request_buf == NULL) { 568 /* BB should we add a retry in here if not a writepage? */ 569 return -ENOMEM; 570 } 571 572 fill_small_buf(smb2_command, tcon, server, 573 (struct smb2_hdr *)(*request_buf), 574 total_len); 575 576 if (tcon != NULL) { 577 uint16_t com_code = le16_to_cpu(smb2_command); 578 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]); 579 cifs_stats_inc(&tcon->num_smbs_sent); 580 } 581 582 return 0; 583 } 584 585 static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, 586 struct TCP_Server_Info *server, 587 void **request_buf, unsigned int *total_len) 588 { 589 int rc; 590 591 rc = smb2_reconnect(smb2_command, tcon, server, false); 592 if (rc) 593 return rc; 594 595 return __smb2_plain_req_init(smb2_command, tcon, server, request_buf, 596 total_len); 597 } 598 599 static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon, 600 struct TCP_Server_Info *server, 601 void **request_buf, unsigned int *total_len) 602 { 603 /* 604 * Skip reconnect in one of the following cases: 605 * 1. For FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs 606 * 2. For FSCTL_QUERY_NETWORK_INTERFACE_INFO IOCTL when called from 607 * smb2_reconnect (indicated by CIFS_SES_FLAG_SCALE_CHANNELS ses flag) 608 */ 609 if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO || 610 (opcode == FSCTL_QUERY_NETWORK_INTERFACE_INFO && 611 (tcon->ses->flags & CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES))) 612 return __smb2_plain_req_init(SMB2_IOCTL, tcon, server, 613 request_buf, total_len); 614 615 return smb2_plain_req_init(SMB2_IOCTL, tcon, server, 616 request_buf, total_len); 617 } 618 619 /* For explanation of negotiate contexts see MS-SMB2 section 2.2.3.1 */ 620 621 static void 622 build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt) 623 { 624 pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES; 625 pneg_ctxt->DataLength = cpu_to_le16(38); 626 pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1); 627 pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE); 628 get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE); 629 pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512; 630 } 631 632 static void 633 build_compression_ctxt(struct smb2_compression_capabilities_context *pneg_ctxt) 634 { 635 pneg_ctxt->ContextType = SMB2_COMPRESSION_CAPABILITIES; 636 pneg_ctxt->DataLength = 637 cpu_to_le16(sizeof(struct smb2_compression_capabilities_context) 638 - sizeof(struct smb2_neg_context)); 639 pneg_ctxt->CompressionAlgorithmCount = cpu_to_le16(3); 640 pneg_ctxt->CompressionAlgorithms[0] = SMB3_COMPRESS_LZ77; 641 pneg_ctxt->CompressionAlgorithms[1] = SMB3_COMPRESS_LZ77_HUFF; 642 pneg_ctxt->CompressionAlgorithms[2] = SMB3_COMPRESS_LZNT1; 643 } 644 645 static unsigned int 646 build_signing_ctxt(struct smb2_signing_capabilities *pneg_ctxt) 647 { 648 unsigned int ctxt_len = sizeof(struct smb2_signing_capabilities); 649 unsigned short num_algs = 1; /* number of signing algorithms sent */ 650 651 pneg_ctxt->ContextType = SMB2_SIGNING_CAPABILITIES; 652 /* 653 * Context Data length must be rounded to multiple of 8 for some servers 654 */ 655 pneg_ctxt->DataLength = cpu_to_le16(ALIGN(sizeof(struct smb2_signing_capabilities) - 656 sizeof(struct smb2_neg_context) + 657 (num_algs * sizeof(u16)), 8)); 658 pneg_ctxt->SigningAlgorithmCount = cpu_to_le16(num_algs); 659 pneg_ctxt->SigningAlgorithms[0] = cpu_to_le16(SIGNING_ALG_AES_CMAC); 660 661 ctxt_len += sizeof(__le16) * num_algs; 662 ctxt_len = ALIGN(ctxt_len, 8); 663 return ctxt_len; 664 /* TBD add SIGNING_ALG_AES_GMAC and/or SIGNING_ALG_HMAC_SHA256 */ 665 } 666 667 static void 668 build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt) 669 { 670 pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES; 671 if (require_gcm_256) { 672 pneg_ctxt->DataLength = cpu_to_le16(4); /* Cipher Count + 1 cipher */ 673 pneg_ctxt->CipherCount = cpu_to_le16(1); 674 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES256_GCM; 675 } else if (enable_gcm_256) { 676 pneg_ctxt->DataLength = cpu_to_le16(8); /* Cipher Count + 3 ciphers */ 677 pneg_ctxt->CipherCount = cpu_to_le16(3); 678 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM; 679 pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES256_GCM; 680 pneg_ctxt->Ciphers[2] = SMB2_ENCRYPTION_AES128_CCM; 681 } else { 682 pneg_ctxt->DataLength = cpu_to_le16(6); /* Cipher Count + 2 ciphers */ 683 pneg_ctxt->CipherCount = cpu_to_le16(2); 684 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM; 685 pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES128_CCM; 686 } 687 } 688 689 static unsigned int 690 build_netname_ctxt(struct smb2_netname_neg_context *pneg_ctxt, char *hostname) 691 { 692 struct nls_table *cp = load_nls_default(); 693 694 pneg_ctxt->ContextType = SMB2_NETNAME_NEGOTIATE_CONTEXT_ID; 695 696 /* copy up to max of first 100 bytes of server name to NetName field */ 697 pneg_ctxt->DataLength = cpu_to_le16(2 * cifs_strtoUTF16(pneg_ctxt->NetName, hostname, 100, cp)); 698 /* context size is DataLength + minimal smb2_neg_context */ 699 return ALIGN(le16_to_cpu(pneg_ctxt->DataLength) + sizeof(struct smb2_neg_context), 8); 700 } 701 702 static void 703 build_posix_ctxt(struct smb2_posix_neg_context *pneg_ctxt) 704 { 705 pneg_ctxt->ContextType = SMB2_POSIX_EXTENSIONS_AVAILABLE; 706 pneg_ctxt->DataLength = cpu_to_le16(POSIX_CTXT_DATA_LEN); 707 /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */ 708 pneg_ctxt->Name[0] = 0x93; 709 pneg_ctxt->Name[1] = 0xAD; 710 pneg_ctxt->Name[2] = 0x25; 711 pneg_ctxt->Name[3] = 0x50; 712 pneg_ctxt->Name[4] = 0x9C; 713 pneg_ctxt->Name[5] = 0xB4; 714 pneg_ctxt->Name[6] = 0x11; 715 pneg_ctxt->Name[7] = 0xE7; 716 pneg_ctxt->Name[8] = 0xB4; 717 pneg_ctxt->Name[9] = 0x23; 718 pneg_ctxt->Name[10] = 0x83; 719 pneg_ctxt->Name[11] = 0xDE; 720 pneg_ctxt->Name[12] = 0x96; 721 pneg_ctxt->Name[13] = 0x8B; 722 pneg_ctxt->Name[14] = 0xCD; 723 pneg_ctxt->Name[15] = 0x7C; 724 } 725 726 static void 727 assemble_neg_contexts(struct smb2_negotiate_req *req, 728 struct TCP_Server_Info *server, unsigned int *total_len) 729 { 730 unsigned int ctxt_len, neg_context_count; 731 struct TCP_Server_Info *pserver; 732 char *pneg_ctxt; 733 char *hostname; 734 735 if (*total_len > 200) { 736 /* In case length corrupted don't want to overrun smb buffer */ 737 cifs_server_dbg(VFS, "Bad frame length assembling neg contexts\n"); 738 return; 739 } 740 741 /* 742 * round up total_len of fixed part of SMB3 negotiate request to 8 743 * byte boundary before adding negotiate contexts 744 */ 745 *total_len = ALIGN(*total_len, 8); 746 747 pneg_ctxt = (*total_len) + (char *)req; 748 req->NegotiateContextOffset = cpu_to_le32(*total_len); 749 750 build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt); 751 ctxt_len = ALIGN(sizeof(struct smb2_preauth_neg_context), 8); 752 *total_len += ctxt_len; 753 pneg_ctxt += ctxt_len; 754 755 build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt); 756 ctxt_len = ALIGN(sizeof(struct smb2_encryption_neg_context), 8); 757 *total_len += ctxt_len; 758 pneg_ctxt += ctxt_len; 759 760 /* 761 * secondary channels don't have the hostname field populated 762 * use the hostname field in the primary channel instead 763 */ 764 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; 765 cifs_server_lock(pserver); 766 hostname = pserver->hostname; 767 if (hostname && (hostname[0] != 0)) { 768 ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt, 769 hostname); 770 *total_len += ctxt_len; 771 pneg_ctxt += ctxt_len; 772 neg_context_count = 3; 773 } else 774 neg_context_count = 2; 775 cifs_server_unlock(pserver); 776 777 build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt); 778 *total_len += sizeof(struct smb2_posix_neg_context); 779 pneg_ctxt += sizeof(struct smb2_posix_neg_context); 780 neg_context_count++; 781 782 if (server->compression.requested) { 783 build_compression_ctxt((struct smb2_compression_capabilities_context *) 784 pneg_ctxt); 785 ctxt_len = ALIGN(sizeof(struct smb2_compression_capabilities_context), 8); 786 *total_len += ctxt_len; 787 pneg_ctxt += ctxt_len; 788 neg_context_count++; 789 } 790 791 if (enable_negotiate_signing) { 792 ctxt_len = build_signing_ctxt((struct smb2_signing_capabilities *) 793 pneg_ctxt); 794 *total_len += ctxt_len; 795 pneg_ctxt += ctxt_len; 796 neg_context_count++; 797 } 798 799 /* check for and add transport_capabilities and signing capabilities */ 800 req->NegotiateContextCount = cpu_to_le16(neg_context_count); 801 802 } 803 804 /* If invalid preauth context warn but use what we requested, SHA-512 */ 805 static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt) 806 { 807 unsigned int len = le16_to_cpu(ctxt->DataLength); 808 809 /* 810 * Caller checked that DataLength remains within SMB boundary. We still 811 * need to confirm that one HashAlgorithms member is accounted for. 812 */ 813 if (len < MIN_PREAUTH_CTXT_DATA_LEN) { 814 pr_warn_once("server sent bad preauth context\n"); 815 return; 816 } else if (len < MIN_PREAUTH_CTXT_DATA_LEN + le16_to_cpu(ctxt->SaltLength)) { 817 pr_warn_once("server sent invalid SaltLength\n"); 818 return; 819 } 820 if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1) 821 pr_warn_once("Invalid SMB3 hash algorithm count\n"); 822 if (ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512) 823 pr_warn_once("unknown SMB3 hash algorithm\n"); 824 } 825 826 static void decode_compress_ctx(struct TCP_Server_Info *server, 827 struct smb2_compression_capabilities_context *ctxt) 828 { 829 unsigned int len = le16_to_cpu(ctxt->DataLength); 830 __le16 alg; 831 832 server->compression.enabled = false; 833 834 /* 835 * Caller checked that DataLength remains within SMB boundary. We still 836 * need to confirm that one CompressionAlgorithms member is accounted 837 * for. 838 */ 839 if (len < 10) { 840 pr_warn_once("server sent bad compression cntxt\n"); 841 return; 842 } 843 844 if (le16_to_cpu(ctxt->CompressionAlgorithmCount) != 1) { 845 pr_warn_once("invalid SMB3 compress algorithm count\n"); 846 return; 847 } 848 849 alg = ctxt->CompressionAlgorithms[0]; 850 851 /* 'NONE' (0) compressor type is never negotiated */ 852 if (alg == 0 || le16_to_cpu(alg) > 3) { 853 pr_warn_once("invalid compression algorithm '%u'\n", alg); 854 return; 855 } 856 857 server->compression.alg = alg; 858 server->compression.enabled = true; 859 } 860 861 static int decode_encrypt_ctx(struct TCP_Server_Info *server, 862 struct smb2_encryption_neg_context *ctxt) 863 { 864 unsigned int len = le16_to_cpu(ctxt->DataLength); 865 866 cifs_dbg(FYI, "decode SMB3.11 encryption neg context of len %d\n", len); 867 /* 868 * Caller checked that DataLength remains within SMB boundary. We still 869 * need to confirm that one Cipher flexible array member is accounted 870 * for. 871 */ 872 if (len < MIN_ENCRYPT_CTXT_DATA_LEN) { 873 pr_warn_once("server sent bad crypto ctxt len\n"); 874 return -EINVAL; 875 } 876 877 if (le16_to_cpu(ctxt->CipherCount) != 1) { 878 pr_warn_once("Invalid SMB3.11 cipher count\n"); 879 return -EINVAL; 880 } 881 cifs_dbg(FYI, "SMB311 cipher type:%d\n", le16_to_cpu(ctxt->Ciphers[0])); 882 if (require_gcm_256) { 883 if (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM) { 884 cifs_dbg(VFS, "Server does not support requested encryption type (AES256 GCM)\n"); 885 return -EOPNOTSUPP; 886 } 887 } else if (ctxt->Ciphers[0] == 0) { 888 /* 889 * e.g. if server only supported AES256_CCM (very unlikely) 890 * or server supported no encryption types or had all disabled. 891 * Since GLOBAL_CAP_ENCRYPTION will be not set, in the case 892 * in which mount requested encryption ("seal") checks later 893 * on during tree connection will return proper rc, but if 894 * seal not requested by client, since server is allowed to 895 * return 0 to indicate no supported cipher, we can't fail here 896 */ 897 server->cipher_type = 0; 898 server->capabilities &= ~SMB2_GLOBAL_CAP_ENCRYPTION; 899 pr_warn_once("Server does not support requested encryption types\n"); 900 return 0; 901 } else if ((ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_CCM) && 902 (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_GCM) && 903 (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM)) { 904 /* server returned a cipher we didn't ask for */ 905 pr_warn_once("Invalid SMB3.11 cipher returned\n"); 906 return -EINVAL; 907 } 908 server->cipher_type = ctxt->Ciphers[0]; 909 server->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION; 910 return 0; 911 } 912 913 static void decode_signing_ctx(struct TCP_Server_Info *server, 914 struct smb2_signing_capabilities *pctxt) 915 { 916 unsigned int len = le16_to_cpu(pctxt->DataLength); 917 918 /* 919 * Caller checked that DataLength remains within SMB boundary. We still 920 * need to confirm that one SigningAlgorithms flexible array member is 921 * accounted for. 922 */ 923 if ((len < 4) || (len > 16)) { 924 pr_warn_once("server sent bad signing negcontext\n"); 925 return; 926 } 927 if (le16_to_cpu(pctxt->SigningAlgorithmCount) != 1) { 928 pr_warn_once("Invalid signing algorithm count\n"); 929 return; 930 } 931 if (le16_to_cpu(pctxt->SigningAlgorithms[0]) > 2) { 932 pr_warn_once("unknown signing algorithm\n"); 933 return; 934 } 935 936 server->signing_negotiated = true; 937 server->signing_algorithm = le16_to_cpu(pctxt->SigningAlgorithms[0]); 938 cifs_dbg(FYI, "signing algorithm %d chosen\n", 939 server->signing_algorithm); 940 } 941 942 943 static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp, 944 struct TCP_Server_Info *server, 945 unsigned int len_of_smb) 946 { 947 struct smb2_neg_context *pctx; 948 unsigned int offset = le32_to_cpu(rsp->NegotiateContextOffset); 949 unsigned int ctxt_cnt = le16_to_cpu(rsp->NegotiateContextCount); 950 unsigned int len_of_ctxts, i; 951 int rc = 0; 952 953 cifs_dbg(FYI, "decoding %d negotiate contexts\n", ctxt_cnt); 954 if (len_of_smb <= offset) { 955 cifs_server_dbg(VFS, "Invalid response: negotiate context offset\n"); 956 return -EINVAL; 957 } 958 959 len_of_ctxts = len_of_smb - offset; 960 961 for (i = 0; i < ctxt_cnt; i++) { 962 int clen; 963 /* check that offset is not beyond end of SMB */ 964 if (len_of_ctxts < sizeof(struct smb2_neg_context)) 965 break; 966 967 pctx = (struct smb2_neg_context *)(offset + (char *)rsp); 968 clen = sizeof(struct smb2_neg_context) 969 + le16_to_cpu(pctx->DataLength); 970 /* 971 * 2.2.4 SMB2 NEGOTIATE Response 972 * Subsequent negotiate contexts MUST appear at the first 8-byte 973 * aligned offset following the previous negotiate context. 974 */ 975 if (i + 1 != ctxt_cnt) 976 clen = ALIGN(clen, 8); 977 if (clen > len_of_ctxts) 978 break; 979 980 if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES) 981 decode_preauth_context( 982 (struct smb2_preauth_neg_context *)pctx); 983 else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES) 984 rc = decode_encrypt_ctx(server, 985 (struct smb2_encryption_neg_context *)pctx); 986 else if (pctx->ContextType == SMB2_COMPRESSION_CAPABILITIES) 987 decode_compress_ctx(server, 988 (struct smb2_compression_capabilities_context *)pctx); 989 else if (pctx->ContextType == SMB2_POSIX_EXTENSIONS_AVAILABLE) 990 server->posix_ext_supported = true; 991 else if (pctx->ContextType == SMB2_SIGNING_CAPABILITIES) 992 decode_signing_ctx(server, 993 (struct smb2_signing_capabilities *)pctx); 994 else 995 cifs_server_dbg(VFS, "unknown negcontext of type %d ignored\n", 996 le16_to_cpu(pctx->ContextType)); 997 if (rc) 998 break; 999 1000 offset += clen; 1001 len_of_ctxts -= clen; 1002 } 1003 return rc; 1004 } 1005 1006 static struct create_posix * 1007 create_posix_buf(umode_t mode) 1008 { 1009 struct create_posix *buf; 1010 1011 buf = kzalloc_obj(struct create_posix); 1012 if (!buf) 1013 return NULL; 1014 1015 buf->ccontext.DataOffset = 1016 cpu_to_le16(offsetof(struct create_posix, Mode)); 1017 buf->ccontext.DataLength = cpu_to_le32(4); 1018 buf->ccontext.NameOffset = 1019 cpu_to_le16(offsetof(struct create_posix, Name)); 1020 buf->ccontext.NameLength = cpu_to_le16(16); 1021 1022 /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */ 1023 buf->Name[0] = 0x93; 1024 buf->Name[1] = 0xAD; 1025 buf->Name[2] = 0x25; 1026 buf->Name[3] = 0x50; 1027 buf->Name[4] = 0x9C; 1028 buf->Name[5] = 0xB4; 1029 buf->Name[6] = 0x11; 1030 buf->Name[7] = 0xE7; 1031 buf->Name[8] = 0xB4; 1032 buf->Name[9] = 0x23; 1033 buf->Name[10] = 0x83; 1034 buf->Name[11] = 0xDE; 1035 buf->Name[12] = 0x96; 1036 buf->Name[13] = 0x8B; 1037 buf->Name[14] = 0xCD; 1038 buf->Name[15] = 0x7C; 1039 buf->Mode = cpu_to_le32(mode); 1040 cifs_dbg(FYI, "mode on posix create 0%o\n", mode); 1041 return buf; 1042 } 1043 1044 static int 1045 add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode) 1046 { 1047 unsigned int num = *num_iovec; 1048 1049 iov[num].iov_base = create_posix_buf(mode); 1050 if (mode == ACL_NO_MODE) 1051 cifs_dbg(FYI, "%s: no mode\n", __func__); 1052 if (iov[num].iov_base == NULL) 1053 return -ENOMEM; 1054 iov[num].iov_len = sizeof(struct create_posix); 1055 *num_iovec = num + 1; 1056 return 0; 1057 } 1058 1059 1060 /* 1061 * 1062 * SMB2 Worker functions follow: 1063 * 1064 * The general structure of the worker functions is: 1065 * 1) Call smb2_init (assembles SMB2 header) 1066 * 2) Initialize SMB2 command specific fields in fixed length area of SMB 1067 * 3) Call smb_sendrcv2 (sends request on socket and waits for response) 1068 * 4) Decode SMB2 command specific fields in the fixed length area 1069 * 5) Decode variable length data area (if any for this SMB2 command type) 1070 * 6) Call free smb buffer 1071 * 7) return 1072 * 1073 */ 1074 1075 int 1076 SMB2_negotiate(const unsigned int xid, 1077 struct cifs_ses *ses, 1078 struct TCP_Server_Info *server) 1079 { 1080 struct smb_rqst rqst; 1081 struct smb2_negotiate_req *req; 1082 struct smb2_negotiate_rsp *rsp; 1083 struct kvec iov[1]; 1084 struct kvec rsp_iov; 1085 int rc; 1086 int resp_buftype; 1087 int blob_offset, blob_length; 1088 char *security_blob; 1089 int flags = CIFS_NEG_OP; 1090 unsigned int total_len; 1091 1092 cifs_dbg(FYI, "Negotiate protocol\n"); 1093 1094 if (!server) { 1095 WARN(1, "%s: server is NULL!\n", __func__); 1096 return smb_EIO(smb_eio_trace_null_pointers); 1097 } 1098 1099 rc = smb2_plain_req_init(SMB2_NEGOTIATE, NULL, server, 1100 (void **) &req, &total_len); 1101 if (rc) 1102 return rc; 1103 1104 req->hdr.SessionId = 0; 1105 1106 memset(server->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE); 1107 memset(ses->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE); 1108 1109 if (strcmp(server->vals->version_string, 1110 SMB3ANY_VERSION_STRING) == 0) { 1111 req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID); 1112 req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID); 1113 req->Dialects[2] = cpu_to_le16(SMB311_PROT_ID); 1114 req->DialectCount = cpu_to_le16(3); 1115 total_len += 6; 1116 } else if (strcmp(server->vals->version_string, 1117 SMBDEFAULT_VERSION_STRING) == 0) { 1118 req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID); 1119 req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID); 1120 req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID); 1121 req->Dialects[3] = cpu_to_le16(SMB311_PROT_ID); 1122 req->DialectCount = cpu_to_le16(4); 1123 total_len += 8; 1124 } else { 1125 /* otherwise send specific dialect */ 1126 req->Dialects[0] = cpu_to_le16(server->vals->protocol_id); 1127 req->DialectCount = cpu_to_le16(1); 1128 total_len += 2; 1129 } 1130 1131 /* only one of SMB2 signing flags may be set in SMB2 request */ 1132 if (ses->sign) 1133 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED); 1134 else if (global_secflags & CIFSSEC_MAY_SIGN) 1135 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED); 1136 else 1137 req->SecurityMode = 0; 1138 1139 req->Capabilities = cpu_to_le32(server->vals->req_capabilities); 1140 req->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL); 1141 1142 /* ClientGUID must be zero for SMB2.02 dialect */ 1143 if (server->vals->protocol_id == SMB20_PROT_ID) 1144 memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE); 1145 else { 1146 memcpy(req->ClientGUID, server->client_guid, 1147 SMB2_CLIENT_GUID_SIZE); 1148 if ((server->vals->protocol_id == SMB311_PROT_ID) || 1149 (strcmp(server->vals->version_string, 1150 SMB3ANY_VERSION_STRING) == 0) || 1151 (strcmp(server->vals->version_string, 1152 SMBDEFAULT_VERSION_STRING) == 0)) 1153 assemble_neg_contexts(req, server, &total_len); 1154 } 1155 iov[0].iov_base = (char *)req; 1156 iov[0].iov_len = total_len; 1157 1158 memset(&rqst, 0, sizeof(struct smb_rqst)); 1159 rqst.rq_iov = iov; 1160 rqst.rq_nvec = 1; 1161 1162 rc = cifs_send_recv(xid, ses, server, 1163 &rqst, &resp_buftype, flags, &rsp_iov); 1164 cifs_small_buf_release(req); 1165 rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base; 1166 /* 1167 * No tcon so can't do 1168 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]); 1169 */ 1170 if (rc == -EOPNOTSUPP) { 1171 cifs_server_dbg(VFS, "Dialect not supported by server. Consider specifying vers=1.0 or vers=2.0 on mount for accessing older servers\n"); 1172 goto neg_exit; 1173 } else if (rc != 0) 1174 goto neg_exit; 1175 1176 u16 dialect = le16_to_cpu(rsp->DialectRevision); 1177 if (strcmp(server->vals->version_string, 1178 SMB3ANY_VERSION_STRING) == 0) { 1179 switch (dialect) { 1180 case SMB20_PROT_ID: 1181 cifs_server_dbg(VFS, 1182 "SMB2 dialect returned but not requested\n"); 1183 rc = smb_EIO2(smb_eio_trace_neg_unreq_dialect, dialect, 3); 1184 goto neg_exit; 1185 case SMB21_PROT_ID: 1186 cifs_server_dbg(VFS, 1187 "SMB2.1 dialect returned but not requested\n"); 1188 rc = smb_EIO2(smb_eio_trace_neg_unreq_dialect, dialect, 3); 1189 goto neg_exit; 1190 case SMB311_PROT_ID: 1191 /* ops set to 3.0 by default for default so update */ 1192 server->ops = &smb311_operations; 1193 server->vals = &smb311_values; 1194 break; 1195 default: 1196 break; 1197 } 1198 } else if (strcmp(server->vals->version_string, 1199 SMBDEFAULT_VERSION_STRING) == 0) { 1200 switch (dialect) { 1201 case SMB20_PROT_ID: 1202 cifs_server_dbg(VFS, 1203 "SMB2 dialect returned but not requested\n"); 1204 rc = smb_EIO2(smb_eio_trace_neg_unreq_dialect, dialect, 0); 1205 goto neg_exit; 1206 case SMB21_PROT_ID: 1207 /* ops set to 3.0 by default for default so update */ 1208 server->ops = &smb21_operations; 1209 server->vals = &smb21_values; 1210 break; 1211 case SMB311_PROT_ID: 1212 server->ops = &smb311_operations; 1213 server->vals = &smb311_values; 1214 break; 1215 default: 1216 break; 1217 } 1218 } else if (dialect != server->vals->protocol_id) { 1219 /* if requested single dialect ensure returned dialect matched */ 1220 cifs_server_dbg(VFS, "Invalid 0x%x dialect returned: not requested\n", 1221 dialect); 1222 rc = smb_EIO2(smb_eio_trace_neg_unreq_dialect, 1223 dialect, server->vals->protocol_id); 1224 goto neg_exit; 1225 } 1226 1227 cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode); 1228 1229 switch (dialect) { 1230 case SMB20_PROT_ID: 1231 cifs_dbg(FYI, "negotiated smb2.0 dialect\n"); 1232 break; 1233 case SMB21_PROT_ID: 1234 cifs_dbg(FYI, "negotiated smb2.1 dialect\n"); 1235 break; 1236 case SMB30_PROT_ID: 1237 cifs_dbg(FYI, "negotiated smb3.0 dialect\n"); 1238 break; 1239 case SMB302_PROT_ID: 1240 cifs_dbg(FYI, "negotiated smb3.02 dialect\n"); 1241 break; 1242 case SMB311_PROT_ID: 1243 cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n"); 1244 break; 1245 default: 1246 cifs_server_dbg(VFS, "Invalid dialect returned by server 0x%x\n", 1247 dialect); 1248 rc = smb_EIO1(smb_eio_trace_neg_inval_dialect, dialect); 1249 goto neg_exit; 1250 } 1251 1252 rc = 0; 1253 server->dialect = dialect; 1254 1255 /* 1256 * Keep a copy of the hash after negprot. This hash will be 1257 * the starting hash value for all sessions made from this 1258 * server. 1259 */ 1260 memcpy(server->preauth_sha_hash, ses->preauth_sha_hash, 1261 SMB2_PREAUTH_HASH_SIZE); 1262 1263 /* SMB2 only has an extended negflavor */ 1264 server->negflavor = CIFS_NEGFLAVOR_EXTENDED; 1265 /* set it to the maximum buffer size value we can send with 1 credit */ 1266 server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize), 1267 SMB2_MAX_BUFFER_SIZE); 1268 server->max_read = le32_to_cpu(rsp->MaxReadSize); 1269 server->max_write = le32_to_cpu(rsp->MaxWriteSize); 1270 server->sec_mode = le16_to_cpu(rsp->SecurityMode); 1271 if ((server->sec_mode & SMB2_SEC_MODE_FLAGS_ALL) != server->sec_mode) 1272 cifs_dbg(FYI, "Server returned unexpected security mode 0x%x\n", 1273 server->sec_mode); 1274 server->capabilities = le32_to_cpu(rsp->Capabilities); 1275 /* Internal types */ 1276 server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES; 1277 1278 /* 1279 * SMB3.0 supports only 1 cipher and doesn't have a encryption neg context 1280 * Set the cipher type manually. 1281 */ 1282 if ((server->dialect == SMB30_PROT_ID || 1283 server->dialect == SMB302_PROT_ID) && 1284 (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) 1285 server->cipher_type = SMB2_ENCRYPTION_AES128_CCM; 1286 1287 security_blob = smb2_get_data_area_len(&blob_offset, &blob_length, 1288 (struct smb2_hdr *)rsp); 1289 /* 1290 * See MS-SMB2 section 2.2.4: if no blob, client picks default which 1291 * for us will be 1292 * ses->sectype = RawNTLMSSP; 1293 * but for time being this is our only auth choice so doesn't matter. 1294 * We just found a server which sets blob length to zero expecting raw. 1295 */ 1296 if (blob_length == 0) { 1297 cifs_dbg(FYI, "missing security blob on negprot\n"); 1298 server->sec_ntlmssp = true; 1299 } 1300 1301 rc = cifs_enable_signing(server, ses->sign); 1302 if (rc) 1303 goto neg_exit; 1304 if (blob_length) { 1305 rc = decode_negTokenInit(security_blob, blob_length, server); 1306 if (rc == 1) 1307 rc = 0; 1308 else if (rc == 0) 1309 rc = smb_EIO1(smb_eio_trace_neg_decode_token, rc); 1310 } 1311 1312 if (server->dialect == SMB311_PROT_ID) { 1313 if (rsp->NegotiateContextCount) 1314 rc = smb311_decode_neg_context(rsp, server, 1315 rsp_iov.iov_len); 1316 else 1317 cifs_server_dbg(VFS, "Missing expected negotiate contexts\n"); 1318 } 1319 1320 if (server->cipher_type && !rc) 1321 rc = smb3_crypto_aead_allocate(server); 1322 neg_exit: 1323 free_rsp_buf(resp_buftype, rsp); 1324 return rc; 1325 } 1326 1327 int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) 1328 { 1329 int rc; 1330 struct validate_negotiate_info_req *pneg_inbuf; 1331 struct validate_negotiate_info_rsp *pneg_rsp = NULL; 1332 u32 rsplen; 1333 u32 inbuflen; /* max of 4 dialects */ 1334 struct TCP_Server_Info *server = tcon->ses->server; 1335 1336 cifs_dbg(FYI, "validate negotiate\n"); 1337 1338 /* In SMB3.11 preauth integrity supersedes validate negotiate */ 1339 if (server->dialect == SMB311_PROT_ID) 1340 return 0; 1341 1342 /* 1343 * validation ioctl must be signed, so no point sending this if we 1344 * can not sign it (ie are not known user). Even if signing is not 1345 * required (enabled but not negotiated), in those cases we selectively 1346 * sign just this, the first and only signed request on a connection. 1347 * Having validation of negotiate info helps reduce attack vectors. 1348 */ 1349 if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) 1350 return 0; /* validation requires signing */ 1351 1352 if (tcon->ses->user_name == NULL) { 1353 cifs_dbg(FYI, "Can't validate negotiate: null user mount\n"); 1354 return 0; /* validation requires signing */ 1355 } 1356 1357 if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL) 1358 cifs_tcon_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n"); 1359 1360 pneg_inbuf = kmalloc_obj(*pneg_inbuf, GFP_NOFS); 1361 if (!pneg_inbuf) 1362 return -ENOMEM; 1363 1364 pneg_inbuf->Capabilities = 1365 cpu_to_le32(server->vals->req_capabilities); 1366 pneg_inbuf->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL); 1367 1368 memcpy(pneg_inbuf->Guid, server->client_guid, 1369 SMB2_CLIENT_GUID_SIZE); 1370 1371 if (tcon->ses->sign) 1372 pneg_inbuf->SecurityMode = 1373 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED); 1374 else if (global_secflags & CIFSSEC_MAY_SIGN) 1375 pneg_inbuf->SecurityMode = 1376 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED); 1377 else 1378 pneg_inbuf->SecurityMode = 0; 1379 1380 1381 if (strcmp(server->vals->version_string, 1382 SMB3ANY_VERSION_STRING) == 0) { 1383 pneg_inbuf->Dialects[0] = cpu_to_le16(SMB30_PROT_ID); 1384 pneg_inbuf->Dialects[1] = cpu_to_le16(SMB302_PROT_ID); 1385 pneg_inbuf->Dialects[2] = cpu_to_le16(SMB311_PROT_ID); 1386 pneg_inbuf->DialectCount = cpu_to_le16(3); 1387 /* SMB 2.1 not included so subtract one dialect from len */ 1388 inbuflen = sizeof(*pneg_inbuf) - 1389 (sizeof(pneg_inbuf->Dialects[0])); 1390 } else if (strcmp(server->vals->version_string, 1391 SMBDEFAULT_VERSION_STRING) == 0) { 1392 pneg_inbuf->Dialects[0] = cpu_to_le16(SMB21_PROT_ID); 1393 pneg_inbuf->Dialects[1] = cpu_to_le16(SMB30_PROT_ID); 1394 pneg_inbuf->Dialects[2] = cpu_to_le16(SMB302_PROT_ID); 1395 pneg_inbuf->Dialects[3] = cpu_to_le16(SMB311_PROT_ID); 1396 pneg_inbuf->DialectCount = cpu_to_le16(4); 1397 /* structure is big enough for 4 dialects */ 1398 inbuflen = sizeof(*pneg_inbuf); 1399 } else { 1400 /* otherwise specific dialect was requested */ 1401 pneg_inbuf->Dialects[0] = 1402 cpu_to_le16(server->vals->protocol_id); 1403 pneg_inbuf->DialectCount = cpu_to_le16(1); 1404 /* structure is big enough for 4 dialects, sending only 1 */ 1405 inbuflen = sizeof(*pneg_inbuf) - 1406 sizeof(pneg_inbuf->Dialects[0]) * 3; 1407 } 1408 1409 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, 1410 FSCTL_VALIDATE_NEGOTIATE_INFO, 1411 (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize, 1412 (char **)&pneg_rsp, &rsplen); 1413 if (rc == -EOPNOTSUPP) { 1414 /* 1415 * Old Windows versions or Netapp SMB server can return 1416 * not supported error. Client should accept it. 1417 */ 1418 cifs_tcon_dbg(VFS, "Server does not support validate negotiate\n"); 1419 rc = 0; 1420 goto out_free_inbuf; 1421 } else if (rc != 0) { 1422 cifs_tcon_dbg(VFS, "validate protocol negotiate failed: %d\n", 1423 rc); 1424 rc = smb_EIO1(smb_eio_trace_neg_info_fail, rc); 1425 goto out_free_inbuf; 1426 } 1427 1428 if (rsplen != sizeof(*pneg_rsp)) { 1429 cifs_tcon_dbg(VFS, "Invalid protocol negotiate response size: %d\n", 1430 rsplen); 1431 1432 /* relax check since Mac returns max bufsize allowed on ioctl */ 1433 if (rsplen > CIFSMaxBufSize || rsplen < sizeof(*pneg_rsp)) { 1434 rc = smb_EIO1(smb_eio_trace_neg_bad_rsplen, rsplen); 1435 goto out_free_rsp; 1436 } 1437 } 1438 1439 /* check validate negotiate info response matches what we got earlier */ 1440 u16 dialect = le16_to_cpu(pneg_rsp->Dialect); 1441 1442 if (dialect != server->dialect) { 1443 rc = smb_EIO2(smb_eio_trace_neg_info_dialect, 1444 dialect, server->dialect); 1445 goto vneg_out; 1446 } 1447 1448 u16 sec_mode = le16_to_cpu(pneg_rsp->SecurityMode); 1449 1450 if (sec_mode != server->sec_mode) { 1451 rc = smb_EIO2(smb_eio_trace_neg_info_sec_mode, 1452 sec_mode, server->sec_mode); 1453 goto vneg_out; 1454 } 1455 1456 /* do not validate server guid because not saved at negprot time yet */ 1457 u32 caps = le32_to_cpu(pneg_rsp->Capabilities); 1458 1459 if ((caps | SMB2_NT_FIND | 1460 SMB2_LARGE_FILES) != server->capabilities) { 1461 rc = smb_EIO2(smb_eio_trace_neg_info_caps, 1462 caps, server->capabilities); 1463 goto vneg_out; 1464 } 1465 1466 /* validate negotiate successful */ 1467 rc = 0; 1468 cifs_dbg(FYI, "validate negotiate info successful\n"); 1469 goto out_free_rsp; 1470 1471 vneg_out: 1472 cifs_tcon_dbg(VFS, "protocol revalidation - security settings mismatch\n"); 1473 out_free_rsp: 1474 kfree(pneg_rsp); 1475 out_free_inbuf: 1476 kfree(pneg_inbuf); 1477 return rc; 1478 } 1479 1480 enum securityEnum 1481 smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) 1482 { 1483 switch (requested) { 1484 case Kerberos: 1485 case RawNTLMSSP: 1486 return requested; 1487 case NTLMv2: 1488 return RawNTLMSSP; 1489 case Unspecified: 1490 if (server->sec_ntlmssp && 1491 (global_secflags & CIFSSEC_MAY_NTLMSSP)) 1492 return RawNTLMSSP; 1493 if ((server->sec_kerberos || server->sec_mskerberos || server->sec_iakerb) && 1494 (global_secflags & CIFSSEC_MAY_KRB5)) 1495 return Kerberos; 1496 fallthrough; 1497 default: 1498 return Unspecified; 1499 } 1500 } 1501 1502 struct SMB2_sess_data { 1503 unsigned int xid; 1504 struct cifs_ses *ses; 1505 struct TCP_Server_Info *server; 1506 struct nls_table *nls_cp; 1507 void (*func)(struct SMB2_sess_data *); 1508 int result; 1509 u64 previous_session; 1510 1511 /* we will send the SMB in three pieces: 1512 * a fixed length beginning part, an optional 1513 * SPNEGO blob (which can be zero length), and a 1514 * last part which will include the strings 1515 * and rest of bcc area. This allows us to avoid 1516 * a large buffer 17K allocation 1517 */ 1518 int buf0_type; 1519 struct kvec iov[2]; 1520 }; 1521 1522 static int 1523 SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data) 1524 { 1525 int rc; 1526 struct cifs_ses *ses = sess_data->ses; 1527 struct TCP_Server_Info *server = sess_data->server; 1528 struct smb2_sess_setup_req *req; 1529 unsigned int total_len; 1530 bool is_binding = false; 1531 1532 rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, server, 1533 (void **) &req, 1534 &total_len); 1535 if (rc) 1536 return rc; 1537 1538 spin_lock(&ses->ses_lock); 1539 is_binding = (ses->ses_status == SES_GOOD); 1540 spin_unlock(&ses->ses_lock); 1541 1542 if (is_binding) { 1543 req->hdr.SessionId = cpu_to_le64(ses->Suid); 1544 req->hdr.Flags |= SMB2_FLAGS_SIGNED; 1545 req->PreviousSessionId = 0; 1546 req->Flags = SMB2_SESSION_REQ_FLAG_BINDING; 1547 cifs_dbg(FYI, "Binding to sess id: %llx\n", ses->Suid); 1548 } else { 1549 /* First session, not a reauthenticate */ 1550 req->hdr.SessionId = 0; 1551 /* 1552 * if reconnect, we need to send previous sess id 1553 * otherwise it is 0 1554 */ 1555 req->PreviousSessionId = cpu_to_le64(sess_data->previous_session); 1556 req->Flags = 0; /* MBZ */ 1557 cifs_dbg(FYI, "Fresh session. Previous: %llx\n", 1558 sess_data->previous_session); 1559 } 1560 1561 /* enough to enable echos and oplocks and one max size write */ 1562 if (server->credits >= server->max_credits) 1563 req->hdr.CreditRequest = cpu_to_le16(0); 1564 else 1565 req->hdr.CreditRequest = cpu_to_le16( 1566 min_t(int, server->max_credits - 1567 server->credits, 130)); 1568 1569 /* only one of SMB2 signing flags may be set in SMB2 request */ 1570 if (server->sign) 1571 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED; 1572 else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */ 1573 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED; 1574 else 1575 req->SecurityMode = 0; 1576 1577 #ifdef CONFIG_CIFS_DFS_UPCALL 1578 req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS); 1579 #else 1580 req->Capabilities = 0; 1581 #endif /* DFS_UPCALL */ 1582 1583 req->Channel = 0; /* MBZ */ 1584 1585 sess_data->iov[0].iov_base = (char *)req; 1586 /* 1 for pad */ 1587 sess_data->iov[0].iov_len = total_len - 1; 1588 /* 1589 * This variable will be used to clear the buffer 1590 * allocated above in case of any error in the calling function. 1591 */ 1592 sess_data->buf0_type = CIFS_SMALL_BUFFER; 1593 1594 return 0; 1595 } 1596 1597 static void 1598 SMB2_sess_free_buffer(struct SMB2_sess_data *sess_data) 1599 { 1600 struct kvec *iov = sess_data->iov; 1601 1602 /* iov[1] is already freed by caller */ 1603 if (sess_data->buf0_type != CIFS_NO_BUFFER && iov[0].iov_base) 1604 memzero_explicit(iov[0].iov_base, iov[0].iov_len); 1605 1606 free_rsp_buf(sess_data->buf0_type, iov[0].iov_base); 1607 sess_data->buf0_type = CIFS_NO_BUFFER; 1608 } 1609 1610 static int 1611 SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data) 1612 { 1613 int rc; 1614 struct smb_rqst rqst; 1615 struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base; 1616 struct kvec rsp_iov = { NULL, 0 }; 1617 1618 /* Testing shows that buffer offset must be at location of Buffer[0] */ 1619 req->SecurityBufferOffset = 1620 cpu_to_le16(sizeof(struct smb2_sess_setup_req)); 1621 req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len); 1622 1623 memset(&rqst, 0, sizeof(struct smb_rqst)); 1624 rqst.rq_iov = sess_data->iov; 1625 rqst.rq_nvec = 2; 1626 1627 /* BB add code to build os and lm fields */ 1628 rc = cifs_send_recv(sess_data->xid, sess_data->ses, 1629 sess_data->server, 1630 &rqst, 1631 &sess_data->buf0_type, 1632 CIFS_LOG_ERROR | CIFS_SESS_OP, &rsp_iov); 1633 cifs_small_buf_release(sess_data->iov[0].iov_base); 1634 if (rc == 0) 1635 sess_data->ses->expired_pwd = false; 1636 else if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED)) { 1637 if (sess_data->ses->expired_pwd == false) 1638 trace_smb3_key_expired(sess_data->server->hostname, 1639 sess_data->ses->user_name, 1640 sess_data->server->conn_id, 1641 &sess_data->server->dstaddr, rc); 1642 sess_data->ses->expired_pwd = true; 1643 } 1644 1645 memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec)); 1646 1647 return rc; 1648 } 1649 1650 static int 1651 SMB2_sess_establish_session(struct SMB2_sess_data *sess_data) 1652 { 1653 int rc = 0; 1654 struct cifs_ses *ses = sess_data->ses; 1655 struct TCP_Server_Info *server = sess_data->server; 1656 1657 cifs_server_lock(server); 1658 if (server->ops->generate_signingkey) { 1659 rc = server->ops->generate_signingkey(ses, server); 1660 if (rc) { 1661 cifs_dbg(FYI, 1662 "SMB3 session key generation failed\n"); 1663 cifs_server_unlock(server); 1664 return rc; 1665 } 1666 } 1667 if (!server->session_estab) { 1668 server->sequence_number = 0x2; 1669 server->session_estab = true; 1670 } 1671 cifs_server_unlock(server); 1672 1673 cifs_dbg(FYI, "SMB2/3 session established successfully\n"); 1674 return rc; 1675 } 1676 1677 #ifdef CONFIG_CIFS_UPCALL 1678 static void 1679 SMB2_auth_kerberos(struct SMB2_sess_data *sess_data) 1680 { 1681 int rc; 1682 struct cifs_ses *ses = sess_data->ses; 1683 struct TCP_Server_Info *server = sess_data->server; 1684 struct cifs_spnego_msg *msg; 1685 struct key *spnego_key = NULL; 1686 struct smb2_sess_setup_rsp *rsp = NULL; 1687 bool is_binding = false; 1688 1689 rc = SMB2_sess_alloc_buffer(sess_data); 1690 if (rc) 1691 goto out; 1692 1693 spnego_key = cifs_get_spnego_key(ses, server); 1694 if (IS_ERR(spnego_key)) { 1695 rc = PTR_ERR(spnego_key); 1696 spnego_key = NULL; 1697 goto out; 1698 } 1699 1700 msg = spnego_key->payload.data[0]; 1701 /* 1702 * check version field to make sure that cifs.upcall is 1703 * sending us a response in an expected form 1704 */ 1705 if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) { 1706 cifs_dbg(VFS, "bad cifs.upcall version. Expected %d got %d\n", 1707 CIFS_SPNEGO_UPCALL_VERSION, msg->version); 1708 rc = -EKEYREJECTED; 1709 goto out_put_spnego_key; 1710 } 1711 1712 spin_lock(&ses->ses_lock); 1713 is_binding = (ses->ses_status == SES_GOOD); 1714 spin_unlock(&ses->ses_lock); 1715 1716 /* 1717 * Per MS-SMB2 3.2.5.3, Session.SessionKey is the first 16 bytes of the 1718 * GSS cryptographic key, right-padded with zero bytes if shorter. 1719 * Allocate at least SMB2_NTLMV2_SESSKEY_SIZE bytes (zeroed) so the KDF 1720 * input buffer is always valid for HMAC-SHA256 even with deprecated 1721 * Kerberos enctypes that return a short session key. 1722 */ 1723 if (unlikely(msg->sesskey_len < SMB2_NTLMV2_SESSKEY_SIZE)) 1724 cifs_dbg(VFS, 1725 "short GSS session key (%u bytes); zero-padding per MS-SMB2 3.2.5.3\n", 1726 msg->sesskey_len); 1727 1728 kfree_sensitive(ses->auth_key.response); 1729 ses->auth_key.len = max_t(unsigned int, msg->sesskey_len, 1730 SMB2_NTLMV2_SESSKEY_SIZE); 1731 ses->auth_key.response = kzalloc(ses->auth_key.len, GFP_KERNEL); 1732 if (!ses->auth_key.response) { 1733 cifs_dbg(VFS, "%s: can't allocate (%u bytes) memory\n", 1734 __func__, ses->auth_key.len); 1735 ses->auth_key.len = 0; 1736 rc = -ENOMEM; 1737 goto out_put_spnego_key; 1738 } 1739 memcpy(ses->auth_key.response, msg->data, msg->sesskey_len); 1740 1741 sess_data->iov[1].iov_base = msg->data + msg->sesskey_len; 1742 sess_data->iov[1].iov_len = msg->secblob_len; 1743 1744 rc = SMB2_sess_sendreceive(sess_data); 1745 if (rc) 1746 goto out_put_spnego_key; 1747 1748 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; 1749 /* keep session id and flags if binding */ 1750 if (!is_binding) { 1751 ses->Suid = le64_to_cpu(rsp->hdr.SessionId); 1752 ses->session_flags = le16_to_cpu(rsp->SessionFlags); 1753 } 1754 1755 rc = SMB2_sess_establish_session(sess_data); 1756 out_put_spnego_key: 1757 key_invalidate(spnego_key); 1758 key_put(spnego_key); 1759 if (rc) { 1760 kfree_sensitive(ses->auth_key.response); 1761 ses->auth_key.response = NULL; 1762 ses->auth_key.len = 0; 1763 } 1764 out: 1765 sess_data->result = rc; 1766 sess_data->func = NULL; 1767 SMB2_sess_free_buffer(sess_data); 1768 } 1769 #else 1770 static void 1771 SMB2_auth_kerberos(struct SMB2_sess_data *sess_data) 1772 { 1773 cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n"); 1774 sess_data->result = -EOPNOTSUPP; 1775 sess_data->func = NULL; 1776 } 1777 #endif 1778 1779 static void 1780 SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data); 1781 1782 static void 1783 SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data) 1784 { 1785 int rc; 1786 struct cifs_ses *ses = sess_data->ses; 1787 struct TCP_Server_Info *server = sess_data->server; 1788 struct smb2_sess_setup_rsp *rsp = NULL; 1789 unsigned char *ntlmssp_blob = NULL; 1790 bool use_spnego = false; /* else use raw ntlmssp */ 1791 u16 blob_length = 0; 1792 bool is_binding = false; 1793 1794 /* 1795 * If memory allocation is successful, caller of this function 1796 * frees it. 1797 */ 1798 ses->ntlmssp = kmalloc_obj(struct ntlmssp_auth); 1799 if (!ses->ntlmssp) { 1800 rc = -ENOMEM; 1801 goto out_err; 1802 } 1803 ses->ntlmssp->sesskey_per_smbsess = true; 1804 1805 rc = SMB2_sess_alloc_buffer(sess_data); 1806 if (rc) 1807 goto out_err; 1808 1809 rc = build_ntlmssp_smb3_negotiate_blob(&ntlmssp_blob, 1810 &blob_length, ses, server, 1811 sess_data->nls_cp); 1812 if (rc) 1813 goto out; 1814 1815 if (use_spnego) { 1816 /* BB eventually need to add this */ 1817 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n"); 1818 rc = -EOPNOTSUPP; 1819 goto out; 1820 } 1821 sess_data->iov[1].iov_base = ntlmssp_blob; 1822 sess_data->iov[1].iov_len = blob_length; 1823 1824 rc = SMB2_sess_sendreceive(sess_data); 1825 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; 1826 1827 /* If true, rc here is expected and not an error */ 1828 if (sess_data->buf0_type != CIFS_NO_BUFFER && 1829 rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) 1830 rc = 0; 1831 1832 if (rc) 1833 goto out; 1834 1835 u16 boff = le16_to_cpu(rsp->SecurityBufferOffset); 1836 1837 if (offsetof(struct smb2_sess_setup_rsp, Buffer) != boff) { 1838 cifs_dbg(VFS, "Invalid security buffer offset %d\n", boff); 1839 rc = smb_EIO1(smb_eio_trace_sess_buf_off, boff); 1840 goto out; 1841 } 1842 rc = decode_ntlmssp_challenge(rsp->Buffer, 1843 le16_to_cpu(rsp->SecurityBufferLength), ses); 1844 if (rc) 1845 goto out; 1846 1847 cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n"); 1848 1849 spin_lock(&ses->ses_lock); 1850 is_binding = (ses->ses_status == SES_GOOD); 1851 spin_unlock(&ses->ses_lock); 1852 1853 /* keep existing ses id and flags if binding */ 1854 if (!is_binding) { 1855 ses->Suid = le64_to_cpu(rsp->hdr.SessionId); 1856 ses->session_flags = le16_to_cpu(rsp->SessionFlags); 1857 } 1858 1859 out: 1860 kfree_sensitive(ntlmssp_blob); 1861 SMB2_sess_free_buffer(sess_data); 1862 if (!rc) { 1863 sess_data->result = 0; 1864 sess_data->func = SMB2_sess_auth_rawntlmssp_authenticate; 1865 return; 1866 } 1867 out_err: 1868 kfree_sensitive(ses->ntlmssp); 1869 ses->ntlmssp = NULL; 1870 sess_data->result = rc; 1871 sess_data->func = NULL; 1872 } 1873 1874 static void 1875 SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data) 1876 { 1877 int rc; 1878 struct cifs_ses *ses = sess_data->ses; 1879 struct TCP_Server_Info *server = sess_data->server; 1880 struct smb2_sess_setup_req *req; 1881 struct smb2_sess_setup_rsp *rsp = NULL; 1882 unsigned char *ntlmssp_blob = NULL; 1883 bool use_spnego = false; /* else use raw ntlmssp */ 1884 u16 blob_length = 0; 1885 bool is_binding = false; 1886 1887 rc = SMB2_sess_alloc_buffer(sess_data); 1888 if (rc) 1889 goto out; 1890 1891 req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base; 1892 req->hdr.SessionId = cpu_to_le64(ses->Suid); 1893 1894 rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, 1895 ses, server, 1896 sess_data->nls_cp); 1897 if (rc) { 1898 cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc); 1899 goto out; 1900 } 1901 1902 if (use_spnego) { 1903 /* BB eventually need to add this */ 1904 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n"); 1905 rc = -EOPNOTSUPP; 1906 goto out; 1907 } 1908 sess_data->iov[1].iov_base = ntlmssp_blob; 1909 sess_data->iov[1].iov_len = blob_length; 1910 1911 rc = SMB2_sess_sendreceive(sess_data); 1912 if (rc) 1913 goto out; 1914 1915 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; 1916 1917 spin_lock(&ses->ses_lock); 1918 is_binding = (ses->ses_status == SES_GOOD); 1919 spin_unlock(&ses->ses_lock); 1920 1921 /* keep existing ses id and flags if binding */ 1922 if (!is_binding) { 1923 ses->Suid = le64_to_cpu(rsp->hdr.SessionId); 1924 ses->session_flags = le16_to_cpu(rsp->SessionFlags); 1925 } 1926 1927 rc = SMB2_sess_establish_session(sess_data); 1928 #ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS 1929 if (ses->server->dialect < SMB30_PROT_ID) { 1930 cifs_dbg(VFS, "%s: dumping generated SMB2 session keys\n", __func__); 1931 /* 1932 * The session id is opaque in terms of endianness, so we can't 1933 * print it as a long long. we dump it as we got it on the wire 1934 */ 1935 cifs_dbg(VFS, "Session Id %*ph\n", (int)sizeof(ses->Suid), 1936 &ses->Suid); 1937 cifs_dbg(VFS, "Session Key %*ph\n", 1938 SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response); 1939 cifs_dbg(VFS, "Signing Key %*ph\n", 1940 SMB3_SIGN_KEY_SIZE, ses->auth_key.response); 1941 } 1942 #endif 1943 out: 1944 kfree_sensitive(ntlmssp_blob); 1945 SMB2_sess_free_buffer(sess_data); 1946 kfree_sensitive(ses->ntlmssp); 1947 ses->ntlmssp = NULL; 1948 sess_data->result = rc; 1949 sess_data->func = NULL; 1950 } 1951 1952 static int 1953 SMB2_select_sec(struct SMB2_sess_data *sess_data) 1954 { 1955 int type; 1956 struct cifs_ses *ses = sess_data->ses; 1957 struct TCP_Server_Info *server = sess_data->server; 1958 1959 type = smb2_select_sectype(server, ses->sectype); 1960 cifs_dbg(FYI, "sess setup type %d\n", type); 1961 if (type == Unspecified) { 1962 cifs_dbg(VFS, "Unable to select appropriate authentication method!\n"); 1963 return -EINVAL; 1964 } 1965 1966 switch (type) { 1967 case Kerberos: 1968 sess_data->func = SMB2_auth_kerberos; 1969 break; 1970 case RawNTLMSSP: 1971 sess_data->func = SMB2_sess_auth_rawntlmssp_negotiate; 1972 break; 1973 default: 1974 cifs_dbg(VFS, "secType %d not supported!\n", type); 1975 return -EOPNOTSUPP; 1976 } 1977 1978 return 0; 1979 } 1980 1981 int 1982 SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses, 1983 struct TCP_Server_Info *server, 1984 const struct nls_table *nls_cp) 1985 { 1986 int rc = 0; 1987 struct SMB2_sess_data *sess_data; 1988 1989 cifs_dbg(FYI, "Session Setup\n"); 1990 1991 if (!server) { 1992 WARN(1, "%s: server is NULL!\n", __func__); 1993 return smb_EIO(smb_eio_trace_null_pointers); 1994 } 1995 1996 sess_data = kzalloc_obj(struct SMB2_sess_data); 1997 if (!sess_data) 1998 return -ENOMEM; 1999 2000 sess_data->xid = xid; 2001 sess_data->ses = ses; 2002 sess_data->server = server; 2003 sess_data->buf0_type = CIFS_NO_BUFFER; 2004 sess_data->nls_cp = (struct nls_table *) nls_cp; 2005 sess_data->previous_session = ses->Suid; 2006 2007 rc = SMB2_select_sec(sess_data); 2008 if (rc) 2009 goto out; 2010 2011 /* 2012 * Initialize the session hash with the server one. 2013 */ 2014 memcpy(ses->preauth_sha_hash, server->preauth_sha_hash, 2015 SMB2_PREAUTH_HASH_SIZE); 2016 2017 while (sess_data->func) 2018 sess_data->func(sess_data); 2019 2020 if ((ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) && (ses->sign)) 2021 cifs_server_dbg(VFS, "signing requested but authenticated as guest\n"); 2022 rc = sess_data->result; 2023 out: 2024 kfree_sensitive(sess_data); 2025 return rc; 2026 } 2027 2028 int 2029 SMB2_logoff(const unsigned int xid, struct cifs_ses *ses) 2030 { 2031 struct smb_rqst rqst; 2032 struct smb2_logoff_req *req; /* response is also trivial struct */ 2033 int rc = 0; 2034 struct TCP_Server_Info *server; 2035 int flags = 0; 2036 unsigned int total_len; 2037 struct kvec iov[1]; 2038 struct kvec rsp_iov; 2039 int resp_buf_type; 2040 2041 cifs_dbg(FYI, "disconnect session %p\n", ses); 2042 2043 if (!ses || !ses->server) 2044 return smb_EIO(smb_eio_trace_null_pointers); 2045 server = ses->server; 2046 2047 /* no need to send SMB logoff if uid already closed due to reconnect */ 2048 spin_lock(&ses->chan_lock); 2049 if (CIFS_ALL_CHANS_NEED_RECONNECT(ses)) { 2050 spin_unlock(&ses->chan_lock); 2051 goto smb2_session_already_dead; 2052 } 2053 spin_unlock(&ses->chan_lock); 2054 2055 rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, ses->server, 2056 (void **) &req, &total_len); 2057 if (rc) 2058 return rc; 2059 2060 /* since no tcon, smb2_init can not do this, so do here */ 2061 req->hdr.SessionId = cpu_to_le64(ses->Suid); 2062 2063 if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) 2064 flags |= CIFS_TRANSFORM_REQ; 2065 else if (server->sign) 2066 req->hdr.Flags |= SMB2_FLAGS_SIGNED; 2067 2068 flags |= CIFS_NO_RSP_BUF; 2069 2070 iov[0].iov_base = (char *)req; 2071 iov[0].iov_len = total_len; 2072 2073 memset(&rqst, 0, sizeof(struct smb_rqst)); 2074 rqst.rq_iov = iov; 2075 rqst.rq_nvec = 1; 2076 2077 rc = cifs_send_recv(xid, ses, ses->server, 2078 &rqst, &resp_buf_type, flags, &rsp_iov); 2079 cifs_small_buf_release(req); 2080 /* 2081 * No tcon so can't do 2082 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]); 2083 */ 2084 2085 smb2_session_already_dead: 2086 return rc; 2087 } 2088 2089 static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code) 2090 { 2091 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]); 2092 } 2093 2094 #define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */) 2095 2096 /* These are similar values to what Windows uses */ 2097 static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon) 2098 { 2099 tcon->max_chunks = 256; 2100 tcon->max_bytes_chunk = 1048576; 2101 tcon->max_bytes_copy = 16777216; 2102 } 2103 2104 int 2105 SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, 2106 struct cifs_tcon *tcon, const struct nls_table *cp) 2107 { 2108 struct smb_rqst rqst; 2109 struct smb2_tree_connect_req *req; 2110 struct smb2_tree_connect_rsp *rsp = NULL; 2111 struct kvec iov[2]; 2112 struct kvec rsp_iov = { NULL, 0 }; 2113 int rc = 0; 2114 int resp_buftype; 2115 int unc_path_len; 2116 __le16 *unc_path = NULL; 2117 int flags = 0; 2118 unsigned int total_len; 2119 struct TCP_Server_Info *server = cifs_pick_channel(ses); 2120 2121 cifs_dbg(FYI, "TCON\n"); 2122 2123 if (!server || !tree) 2124 return smb_EIO(smb_eio_trace_null_pointers); 2125 2126 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL); 2127 if (unc_path == NULL) 2128 return -ENOMEM; 2129 2130 unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp); 2131 if (unc_path_len <= 0) { 2132 kfree(unc_path); 2133 return -EINVAL; 2134 } 2135 unc_path_len *= 2; 2136 2137 /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */ 2138 tcon->tid = 0; 2139 atomic_set(&tcon->num_remote_opens, 0); 2140 rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, server, 2141 (void **) &req, &total_len); 2142 if (rc) { 2143 kfree(unc_path); 2144 return rc; 2145 } 2146 2147 if (smb3_encryption_required(tcon)) 2148 flags |= CIFS_TRANSFORM_REQ; 2149 2150 iov[0].iov_base = (char *)req; 2151 /* 1 for pad */ 2152 iov[0].iov_len = total_len - 1; 2153 2154 /* Testing shows that buffer offset must be at location of Buffer[0] */ 2155 req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)); 2156 req->PathLength = cpu_to_le16(unc_path_len); 2157 iov[1].iov_base = unc_path; 2158 iov[1].iov_len = unc_path_len; 2159 2160 /* 2161 * 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1 2162 * unless it is guest or anonymous user. See MS-SMB2 3.2.5.3.1 2163 * (Samba servers don't always set the flag so also check if null user) 2164 */ 2165 if ((server->dialect == SMB311_PROT_ID) && 2166 !smb3_encryption_required(tcon) && 2167 !(ses->session_flags & 2168 (SMB2_SESSION_FLAG_IS_GUEST|SMB2_SESSION_FLAG_IS_NULL)) && 2169 ((ses->user_name != NULL) || (ses->sectype == Kerberos))) 2170 req->hdr.Flags |= SMB2_FLAGS_SIGNED; 2171 2172 memset(&rqst, 0, sizeof(struct smb_rqst)); 2173 rqst.rq_iov = iov; 2174 rqst.rq_nvec = 2; 2175 2176 /* Need 64 for max size write so ask for more in case not there yet */ 2177 if (server->credits >= server->max_credits) 2178 req->hdr.CreditRequest = cpu_to_le16(0); 2179 else 2180 req->hdr.CreditRequest = cpu_to_le16( 2181 min_t(int, server->max_credits - 2182 server->credits, 64)); 2183 2184 rc = cifs_send_recv(xid, ses, server, 2185 &rqst, &resp_buftype, flags, &rsp_iov); 2186 cifs_small_buf_release(req); 2187 rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base; 2188 trace_smb3_tcon(xid, tcon->tid, ses->Suid, tree, rc); 2189 if ((rc != 0) || (rsp == NULL)) { 2190 cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE); 2191 tcon->need_reconnect = true; 2192 goto tcon_error_exit; 2193 } 2194 2195 switch (rsp->ShareType) { 2196 case SMB2_SHARE_TYPE_DISK: 2197 cifs_dbg(FYI, "connection to disk share\n"); 2198 break; 2199 case SMB2_SHARE_TYPE_PIPE: 2200 tcon->pipe = true; 2201 cifs_dbg(FYI, "connection to pipe share\n"); 2202 break; 2203 case SMB2_SHARE_TYPE_PRINT: 2204 tcon->print = true; 2205 cifs_dbg(FYI, "connection to printer\n"); 2206 break; 2207 default: 2208 cifs_server_dbg(VFS, "unknown share type %d\n", rsp->ShareType); 2209 rc = -EOPNOTSUPP; 2210 goto tcon_error_exit; 2211 } 2212 2213 tcon->share_flags = le32_to_cpu(rsp->ShareFlags); 2214 tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */ 2215 tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess); 2216 tcon->tid = le32_to_cpu(rsp->hdr.Id.SyncId.TreeId); 2217 strscpy(tcon->tree_name, tree, sizeof(tcon->tree_name)); 2218 2219 if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) && 2220 ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0)) 2221 cifs_tcon_dbg(VFS, "DFS capability contradicts DFS flag\n"); 2222 2223 if (tcon->seal && 2224 !(server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) 2225 cifs_tcon_dbg(VFS, "Encryption is requested but not supported\n"); 2226 2227 init_copy_chunk_defaults(tcon); 2228 if (server->ops->validate_negotiate) 2229 rc = server->ops->validate_negotiate(xid, tcon); 2230 if (rc == 0) /* See MS-SMB2 2.2.10 and 3.2.5.5 */ 2231 if (tcon->share_flags & SMB2_SHAREFLAG_ISOLATED_TRANSPORT) 2232 server->nosharesock = true; 2233 tcon_exit: 2234 2235 free_rsp_buf(resp_buftype, rsp); 2236 kfree(unc_path); 2237 return rc; 2238 2239 tcon_error_exit: 2240 if (rsp && rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) 2241 cifs_dbg(VFS | ONCE, "BAD_NETWORK_NAME: %s\n", tree); 2242 goto tcon_exit; 2243 } 2244 2245 int 2246 SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon) 2247 { 2248 struct smb_rqst rqst; 2249 struct smb2_tree_disconnect_req *req; /* response is trivial */ 2250 int rc = 0; 2251 struct cifs_ses *ses = tcon->ses; 2252 struct TCP_Server_Info *server = cifs_pick_channel(ses); 2253 int flags = 0; 2254 unsigned int total_len; 2255 struct kvec iov[1]; 2256 struct kvec rsp_iov; 2257 int resp_buf_type; 2258 2259 cifs_dbg(FYI, "Tree Disconnect\n"); 2260 2261 if (!ses || !(ses->server)) 2262 return smb_EIO(smb_eio_trace_null_pointers); 2263 2264 trace_smb3_tdis_enter(xid, tcon->tid, ses->Suid, tcon->tree_name); 2265 spin_lock(&ses->chan_lock); 2266 if ((tcon->need_reconnect) || 2267 (CIFS_ALL_CHANS_NEED_RECONNECT(tcon->ses))) { 2268 spin_unlock(&ses->chan_lock); 2269 return 0; 2270 } 2271 spin_unlock(&ses->chan_lock); 2272 2273 invalidate_all_cached_dirs(tcon, true); 2274 2275 rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, server, 2276 (void **) &req, 2277 &total_len); 2278 if (rc) 2279 return rc; 2280 2281 if (smb3_encryption_required(tcon)) 2282 flags |= CIFS_TRANSFORM_REQ; 2283 2284 flags |= CIFS_NO_RSP_BUF; 2285 2286 iov[0].iov_base = (char *)req; 2287 iov[0].iov_len = total_len; 2288 2289 memset(&rqst, 0, sizeof(struct smb_rqst)); 2290 rqst.rq_iov = iov; 2291 rqst.rq_nvec = 1; 2292 2293 rc = cifs_send_recv(xid, ses, server, 2294 &rqst, &resp_buf_type, flags, &rsp_iov); 2295 cifs_small_buf_release(req); 2296 if (rc) { 2297 cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE); 2298 trace_smb3_tdis_err(xid, tcon->tid, ses->Suid, rc); 2299 } 2300 trace_smb3_tdis_done(xid, tcon->tid, ses->Suid); 2301 2302 return rc; 2303 } 2304 2305 static create_durable_req_t * 2306 create_durable_buf(void) 2307 { 2308 create_durable_req_t *buf; 2309 2310 buf = kzalloc_obj(create_durable_req_t); 2311 if (!buf) 2312 return NULL; 2313 2314 buf->ccontext.DataOffset = cpu_to_le16(offsetof 2315 (create_durable_req_t, Data)); 2316 buf->ccontext.DataLength = cpu_to_le32(16); 2317 buf->ccontext.NameOffset = cpu_to_le16(offsetof 2318 (create_durable_req_t, Name)); 2319 buf->ccontext.NameLength = cpu_to_le16(4); 2320 /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DHnQ" */ 2321 buf->Name[0] = 'D'; 2322 buf->Name[1] = 'H'; 2323 buf->Name[2] = 'n'; 2324 buf->Name[3] = 'Q'; 2325 return buf; 2326 } 2327 2328 static create_durable_req_t * 2329 create_reconnect_durable_buf(struct cifs_fid *fid) 2330 { 2331 create_durable_req_t *buf; 2332 2333 buf = kzalloc_obj(create_durable_req_t); 2334 if (!buf) 2335 return NULL; 2336 2337 buf->ccontext.DataOffset = cpu_to_le16(offsetof 2338 (create_durable_req_t, Data)); 2339 buf->ccontext.DataLength = cpu_to_le32(16); 2340 buf->ccontext.NameOffset = cpu_to_le16(offsetof 2341 (create_durable_req_t, Name)); 2342 buf->ccontext.NameLength = cpu_to_le16(4); 2343 buf->Data.Fid.PersistentFileId = fid->persistent_fid; 2344 buf->Data.Fid.VolatileFileId = fid->volatile_fid; 2345 /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT is "DHnC" */ 2346 buf->Name[0] = 'D'; 2347 buf->Name[1] = 'H'; 2348 buf->Name[2] = 'n'; 2349 buf->Name[3] = 'C'; 2350 return buf; 2351 } 2352 2353 static void 2354 parse_query_id_ctxt(struct create_context *cc, struct smb2_file_all_info *buf) 2355 { 2356 struct create_disk_id_rsp *pdisk_id = (struct create_disk_id_rsp *)cc; 2357 2358 cifs_dbg(FYI, "parse query id context 0x%llx 0x%llx\n", 2359 pdisk_id->DiskFileId, pdisk_id->VolumeId); 2360 buf->IndexNumber = pdisk_id->DiskFileId; 2361 } 2362 2363 static void 2364 parse_posix_ctxt(struct create_context *cc, struct smb2_file_all_info *info, 2365 struct create_posix_rsp *posix) 2366 { 2367 int sid_len; 2368 u8 *beg = (u8 *)cc + le16_to_cpu(cc->DataOffset); 2369 u8 *end = beg + le32_to_cpu(cc->DataLength); 2370 u8 *sid; 2371 2372 memset(posix, 0, sizeof(*posix)); 2373 2374 posix->nlink = le32_to_cpu(*(__le32 *)(beg + 0)); 2375 posix->reparse_tag = le32_to_cpu(*(__le32 *)(beg + 4)); 2376 posix->mode = le32_to_cpu(*(__le32 *)(beg + 8)); 2377 2378 sid = beg + 12; 2379 sid_len = posix_info_sid_size(sid, end); 2380 if (sid_len < 0) { 2381 cifs_dbg(VFS, "bad owner sid in posix create response\n"); 2382 return; 2383 } 2384 memcpy(&posix->owner, sid, sid_len); 2385 2386 sid = sid + sid_len; 2387 sid_len = posix_info_sid_size(sid, end); 2388 if (sid_len < 0) { 2389 cifs_dbg(VFS, "bad group sid in posix create response\n"); 2390 return; 2391 } 2392 memcpy(&posix->group, sid, sid_len); 2393 2394 cifs_dbg(FYI, "nlink=%d mode=%o reparse_tag=%x\n", 2395 posix->nlink, posix->mode, posix->reparse_tag); 2396 } 2397 2398 int smb2_parse_contexts(struct TCP_Server_Info *server, 2399 struct kvec *rsp_iov, 2400 __u16 *epoch, 2401 char *lease_key, __u8 *oplock, 2402 struct smb2_file_all_info *buf, 2403 struct create_posix_rsp *posix) 2404 { 2405 struct smb2_create_rsp *rsp = rsp_iov->iov_base; 2406 struct create_context *cc; 2407 size_t rem, off, len; 2408 size_t doff, dlen; 2409 size_t noff, nlen; 2410 char *name; 2411 static const char smb3_create_tag_posix[] = { 2412 0x93, 0xAD, 0x25, 0x50, 0x9C, 2413 0xB4, 0x11, 0xE7, 0xB4, 0x23, 0x83, 2414 0xDE, 0x96, 0x8B, 0xCD, 0x7C 2415 }; 2416 2417 *oplock = 0; 2418 2419 off = le32_to_cpu(rsp->CreateContextsOffset); 2420 rem = le32_to_cpu(rsp->CreateContextsLength); 2421 if (check_add_overflow(off, rem, &len) || len > rsp_iov->iov_len) 2422 return -EINVAL; 2423 cc = (struct create_context *)((u8 *)rsp + off); 2424 2425 /* Initialize inode number to 0 in case no valid data in qfid context */ 2426 if (buf) 2427 buf->IndexNumber = 0; 2428 2429 while (rem >= sizeof(*cc)) { 2430 doff = le16_to_cpu(cc->DataOffset); 2431 dlen = le32_to_cpu(cc->DataLength); 2432 if (check_add_overflow(doff, dlen, &len) || len > rem) 2433 return -EINVAL; 2434 2435 noff = le16_to_cpu(cc->NameOffset); 2436 nlen = le16_to_cpu(cc->NameLength); 2437 if (noff + nlen > doff) 2438 return -EINVAL; 2439 2440 name = (char *)cc + noff; 2441 switch (nlen) { 2442 case 4: 2443 if (!strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4)) { 2444 *oplock = server->ops->parse_lease_buf(cc, epoch, 2445 lease_key); 2446 } else if (buf && 2447 !strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4)) { 2448 parse_query_id_ctxt(cc, buf); 2449 } 2450 break; 2451 case 16: 2452 if (posix && !memcmp(name, smb3_create_tag_posix, 16)) 2453 parse_posix_ctxt(cc, buf, posix); 2454 break; 2455 default: 2456 cifs_dbg(FYI, "%s: unhandled context (nlen=%zu dlen=%zu)\n", 2457 __func__, nlen, dlen); 2458 if (IS_ENABLED(CONFIG_CIFS_DEBUG2)) 2459 cifs_dump_mem("context data: ", cc, dlen); 2460 break; 2461 } 2462 2463 off = le32_to_cpu(cc->Next); 2464 if (!off) 2465 break; 2466 if (check_sub_overflow(rem, off, &rem)) 2467 return -EINVAL; 2468 cc = (struct create_context *)((u8 *)cc + off); 2469 } 2470 2471 if (rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) 2472 *oplock = rsp->OplockLevel; 2473 2474 return 0; 2475 } 2476 2477 static int 2478 add_lease_context(struct TCP_Server_Info *server, 2479 struct smb2_create_req *req, 2480 struct kvec *iov, 2481 unsigned int *num_iovec, 2482 u8 *lease_key, 2483 __u8 *oplock, 2484 u8 *parent_lease_key, 2485 __le32 flags) 2486 { 2487 unsigned int num = *num_iovec; 2488 2489 iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock, 2490 parent_lease_key, flags); 2491 if (iov[num].iov_base == NULL) 2492 return -ENOMEM; 2493 iov[num].iov_len = server->vals->create_lease_size; 2494 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE; 2495 *num_iovec = num + 1; 2496 return 0; 2497 } 2498 2499 static struct create_durable_req_v2 * 2500 create_durable_v2_buf(struct cifs_open_parms *oparms) 2501 { 2502 struct cifs_fid *pfid = oparms->fid; 2503 struct create_durable_req_v2 *buf; 2504 2505 buf = kzalloc_obj(struct create_durable_req_v2); 2506 if (!buf) 2507 return NULL; 2508 2509 buf->ccontext.DataOffset = cpu_to_le16(offsetof 2510 (struct create_durable_req_v2, dcontext)); 2511 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct durable_context_v2_req)); 2512 buf->ccontext.NameOffset = cpu_to_le16(offsetof 2513 (struct create_durable_req_v2, Name)); 2514 buf->ccontext.NameLength = cpu_to_le16(4); 2515 2516 /* 2517 * NB: Handle timeout defaults to 0, which allows server to choose 2518 * (most servers default to 120 seconds) and most clients default to 0. 2519 * This can be overridden at mount ("handletimeout=") if the user wants 2520 * a different persistent (or resilient) handle timeout for all opens 2521 * on a particular SMB3 mount. 2522 */ 2523 buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout); 2524 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT); 2525 2526 /* for replay, we should not overwrite the existing create guid */ 2527 if (!oparms->replay) { 2528 generate_random_uuid(buf->dcontext.CreateGuid); 2529 memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16); 2530 } else 2531 memcpy(buf->dcontext.CreateGuid, pfid->create_guid, 16); 2532 2533 /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */ 2534 buf->Name[0] = 'D'; 2535 buf->Name[1] = 'H'; 2536 buf->Name[2] = '2'; 2537 buf->Name[3] = 'Q'; 2538 return buf; 2539 } 2540 2541 static struct create_durable_handle_reconnect_v2 * 2542 create_reconnect_durable_v2_buf(struct cifs_fid *fid) 2543 { 2544 struct create_durable_handle_reconnect_v2 *buf; 2545 2546 buf = kzalloc_obj(struct create_durable_handle_reconnect_v2); 2547 if (!buf) 2548 return NULL; 2549 2550 buf->ccontext.DataOffset = 2551 cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2, 2552 dcontext)); 2553 buf->ccontext.DataLength = 2554 cpu_to_le32(sizeof(struct durable_reconnect_context_v2)); 2555 buf->ccontext.NameOffset = 2556 cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2, 2557 Name)); 2558 buf->ccontext.NameLength = cpu_to_le16(4); 2559 2560 buf->dcontext.Fid.PersistentFileId = fid->persistent_fid; 2561 buf->dcontext.Fid.VolatileFileId = fid->volatile_fid; 2562 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT); 2563 memcpy(buf->dcontext.CreateGuid, fid->create_guid, 16); 2564 2565 /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 is "DH2C" */ 2566 buf->Name[0] = 'D'; 2567 buf->Name[1] = 'H'; 2568 buf->Name[2] = '2'; 2569 buf->Name[3] = 'C'; 2570 return buf; 2571 } 2572 2573 static int 2574 add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec, 2575 struct cifs_open_parms *oparms) 2576 { 2577 unsigned int num = *num_iovec; 2578 2579 iov[num].iov_base = create_durable_v2_buf(oparms); 2580 if (iov[num].iov_base == NULL) 2581 return -ENOMEM; 2582 iov[num].iov_len = sizeof(struct create_durable_req_v2); 2583 *num_iovec = num + 1; 2584 return 0; 2585 } 2586 2587 static int 2588 add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec, 2589 struct cifs_open_parms *oparms) 2590 { 2591 unsigned int num = *num_iovec; 2592 2593 /* indicate that we don't need to relock the file */ 2594 oparms->reconnect = false; 2595 2596 iov[num].iov_base = create_reconnect_durable_v2_buf(oparms->fid); 2597 if (iov[num].iov_base == NULL) 2598 return -ENOMEM; 2599 iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2); 2600 *num_iovec = num + 1; 2601 return 0; 2602 } 2603 2604 static int 2605 add_durable_context(struct kvec *iov, unsigned int *num_iovec, 2606 struct cifs_open_parms *oparms, bool use_persistent) 2607 { 2608 unsigned int num = *num_iovec; 2609 2610 if (use_persistent) { 2611 if (oparms->reconnect) 2612 return add_durable_reconnect_v2_context(iov, num_iovec, 2613 oparms); 2614 else 2615 return add_durable_v2_context(iov, num_iovec, oparms); 2616 } 2617 2618 if (oparms->reconnect) { 2619 iov[num].iov_base = create_reconnect_durable_buf(oparms->fid); 2620 /* indicate that we don't need to relock the file */ 2621 oparms->reconnect = false; 2622 } else 2623 iov[num].iov_base = create_durable_buf(); 2624 if (iov[num].iov_base == NULL) 2625 return -ENOMEM; 2626 iov[num].iov_len = sizeof(create_durable_req_t); 2627 *num_iovec = num + 1; 2628 return 0; 2629 } 2630 2631 /* See MS-SMB2 2.2.13.2.7 */ 2632 static struct crt_twarp_ctxt * 2633 create_twarp_buf(__u64 timewarp) 2634 { 2635 struct crt_twarp_ctxt *buf; 2636 2637 buf = kzalloc_obj(struct crt_twarp_ctxt); 2638 if (!buf) 2639 return NULL; 2640 2641 buf->ccontext.DataOffset = cpu_to_le16(offsetof 2642 (struct crt_twarp_ctxt, Timestamp)); 2643 buf->ccontext.DataLength = cpu_to_le32(8); 2644 buf->ccontext.NameOffset = cpu_to_le16(offsetof 2645 (struct crt_twarp_ctxt, Name)); 2646 buf->ccontext.NameLength = cpu_to_le16(4); 2647 /* SMB2_CREATE_TIMEWARP_TOKEN is "TWrp" */ 2648 buf->Name[0] = 'T'; 2649 buf->Name[1] = 'W'; 2650 buf->Name[2] = 'r'; 2651 buf->Name[3] = 'p'; 2652 buf->Timestamp = cpu_to_le64(timewarp); 2653 return buf; 2654 } 2655 2656 /* See MS-SMB2 2.2.13.2.7 */ 2657 static int 2658 add_twarp_context(struct kvec *iov, unsigned int *num_iovec, __u64 timewarp) 2659 { 2660 unsigned int num = *num_iovec; 2661 2662 iov[num].iov_base = create_twarp_buf(timewarp); 2663 if (iov[num].iov_base == NULL) 2664 return -ENOMEM; 2665 iov[num].iov_len = sizeof(struct crt_twarp_ctxt); 2666 *num_iovec = num + 1; 2667 return 0; 2668 } 2669 2670 /* See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */ 2671 static void setup_owner_group_sids(char *buf) 2672 { 2673 struct owner_group_sids *sids = (struct owner_group_sids *)buf; 2674 2675 /* Populate the user ownership fields S-1-5-88-1 */ 2676 sids->owner.Revision = 1; 2677 sids->owner.NumAuth = 3; 2678 sids->owner.Authority[5] = 5; 2679 sids->owner.SubAuthorities[0] = cpu_to_le32(88); 2680 sids->owner.SubAuthorities[1] = cpu_to_le32(1); 2681 sids->owner.SubAuthorities[2] = cpu_to_le32(current_fsuid().val); 2682 2683 /* Populate the group ownership fields S-1-5-88-2 */ 2684 sids->group.Revision = 1; 2685 sids->group.NumAuth = 3; 2686 sids->group.Authority[5] = 5; 2687 sids->group.SubAuthorities[0] = cpu_to_le32(88); 2688 sids->group.SubAuthorities[1] = cpu_to_le32(2); 2689 sids->group.SubAuthorities[2] = cpu_to_le32(current_fsgid().val); 2690 2691 cifs_dbg(FYI, "owner S-1-5-88-1-%d, group S-1-5-88-2-%d\n", current_fsuid().val, current_fsgid().val); 2692 } 2693 2694 /* See MS-SMB2 2.2.13.2.2 and MS-DTYP 2.4.6 */ 2695 static struct crt_sd_ctxt * 2696 create_sd_buf(umode_t mode, bool set_owner, unsigned int *len) 2697 { 2698 struct crt_sd_ctxt *buf; 2699 __u8 *ptr, *aclptr; 2700 unsigned int acelen, acl_size, ace_count; 2701 unsigned int owner_offset = 0; 2702 unsigned int group_offset = 0; 2703 struct smb3_acl acl = {}; 2704 2705 *len = round_up(sizeof(struct crt_sd_ctxt) + (sizeof(struct smb_ace) * 4), 8); 2706 2707 if (set_owner) { 2708 /* sizeof(struct owner_group_sids) is already multiple of 8 so no need to round */ 2709 *len += sizeof(struct owner_group_sids); 2710 } 2711 2712 buf = kzalloc(*len, GFP_KERNEL); 2713 if (buf == NULL) 2714 return buf; 2715 2716 ptr = (__u8 *)&buf[1]; 2717 if (set_owner) { 2718 /* offset fields are from beginning of security descriptor not of create context */ 2719 owner_offset = ptr - (__u8 *)&buf->sd; 2720 buf->sd.OffsetOwner = cpu_to_le32(owner_offset); 2721 group_offset = owner_offset + offsetof(struct owner_group_sids, group); 2722 buf->sd.OffsetGroup = cpu_to_le32(group_offset); 2723 2724 setup_owner_group_sids(ptr); 2725 ptr += sizeof(struct owner_group_sids); 2726 } else { 2727 buf->sd.OffsetOwner = 0; 2728 buf->sd.OffsetGroup = 0; 2729 } 2730 2731 buf->ccontext.DataOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, sd)); 2732 buf->ccontext.NameOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, Name)); 2733 buf->ccontext.NameLength = cpu_to_le16(4); 2734 /* SMB2_CREATE_SD_BUFFER_TOKEN is "SecD" */ 2735 buf->Name[0] = 'S'; 2736 buf->Name[1] = 'e'; 2737 buf->Name[2] = 'c'; 2738 buf->Name[3] = 'D'; 2739 buf->sd.Revision = 1; /* Must be one see MS-DTYP 2.4.6 */ 2740 2741 /* 2742 * ACL is "self relative" ie ACL is stored in contiguous block of memory 2743 * and "DP" ie the DACL is present 2744 */ 2745 buf->sd.Control = cpu_to_le16(ACL_CONTROL_SR | ACL_CONTROL_DP); 2746 2747 /* offset owner, group and Sbz1 and SACL are all zero */ 2748 buf->sd.OffsetDacl = cpu_to_le32(ptr - (__u8 *)&buf->sd); 2749 /* Ship the ACL for now. we will copy it into buf later. */ 2750 aclptr = ptr; 2751 ptr += sizeof(struct smb3_acl); 2752 2753 /* create one ACE to hold the mode embedded in reserved special SID */ 2754 acelen = setup_special_mode_ACE((struct smb_ace *)ptr, false, (__u64)mode); 2755 ptr += acelen; 2756 acl_size = acelen + sizeof(struct smb3_acl); 2757 ace_count = 1; 2758 2759 if (set_owner) { 2760 /* we do not need to reallocate buffer to add the two more ACEs. plenty of space */ 2761 acelen = setup_special_user_owner_ACE((struct smb_ace *)ptr); 2762 ptr += acelen; 2763 acl_size += acelen; 2764 ace_count += 1; 2765 } 2766 2767 /* and one more ACE to allow access for authenticated users */ 2768 acelen = setup_authusers_ACE((struct smb_ace *)ptr); 2769 ptr += acelen; 2770 acl_size += acelen; 2771 ace_count += 1; 2772 2773 acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */ 2774 acl.AclSize = cpu_to_le16(acl_size); 2775 acl.AceCount = cpu_to_le16(ace_count); 2776 /* acl.Sbz1 and Sbz2 MBZ so are not set here, but initialized above */ 2777 memcpy(aclptr, &acl, sizeof(struct smb3_acl)); 2778 2779 buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd); 2780 *len = round_up((unsigned int)(ptr - (__u8 *)buf), 8); 2781 2782 return buf; 2783 } 2784 2785 static int 2786 add_sd_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode, bool set_owner) 2787 { 2788 unsigned int num = *num_iovec; 2789 unsigned int len = 0; 2790 2791 iov[num].iov_base = create_sd_buf(mode, set_owner, &len); 2792 if (iov[num].iov_base == NULL) 2793 return -ENOMEM; 2794 iov[num].iov_len = len; 2795 *num_iovec = num + 1; 2796 return 0; 2797 } 2798 2799 static struct crt_query_id_ctxt * 2800 create_query_id_buf(void) 2801 { 2802 struct crt_query_id_ctxt *buf; 2803 2804 buf = kzalloc_obj(struct crt_query_id_ctxt); 2805 if (!buf) 2806 return NULL; 2807 2808 buf->ccontext.DataOffset = cpu_to_le16(0); 2809 buf->ccontext.DataLength = cpu_to_le32(0); 2810 buf->ccontext.NameOffset = cpu_to_le16(offsetof 2811 (struct crt_query_id_ctxt, Name)); 2812 buf->ccontext.NameLength = cpu_to_le16(4); 2813 /* SMB2_CREATE_QUERY_ON_DISK_ID is "QFid" */ 2814 buf->Name[0] = 'Q'; 2815 buf->Name[1] = 'F'; 2816 buf->Name[2] = 'i'; 2817 buf->Name[3] = 'd'; 2818 return buf; 2819 } 2820 2821 /* See MS-SMB2 2.2.13.2.9 */ 2822 static int 2823 add_query_id_context(struct kvec *iov, unsigned int *num_iovec) 2824 { 2825 unsigned int num = *num_iovec; 2826 2827 iov[num].iov_base = create_query_id_buf(); 2828 if (iov[num].iov_base == NULL) 2829 return -ENOMEM; 2830 iov[num].iov_len = sizeof(struct crt_query_id_ctxt); 2831 *num_iovec = num + 1; 2832 return 0; 2833 } 2834 2835 static void add_ea_context(struct cifs_open_parms *oparms, 2836 struct kvec *rq_iov, unsigned int *num_iovs) 2837 { 2838 struct kvec *iov = oparms->ea_cctx; 2839 2840 if (iov && iov->iov_base && iov->iov_len) { 2841 rq_iov[(*num_iovs)++] = *iov; 2842 memset(iov, 0, sizeof(*iov)); 2843 } 2844 } 2845 2846 static int 2847 alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len, 2848 const char *treename, const __le16 *path) 2849 { 2850 int treename_len, path_len; 2851 struct nls_table *cp; 2852 const __le16 sep[] = {cpu_to_le16('\\'), cpu_to_le16(0x0000)}; 2853 2854 /* 2855 * skip leading "\\" 2856 */ 2857 treename_len = strlen(treename); 2858 if (treename_len < 2 || !(treename[0] == '\\' && treename[1] == '\\')) 2859 return -EINVAL; 2860 2861 treename += 2; 2862 treename_len -= 2; 2863 2864 path_len = UniStrnlen((wchar_t *)path, PATH_MAX); 2865 2866 /* make room for one path separator only if @path isn't empty */ 2867 *out_len = treename_len + (path[0] ? 1 : 0) + path_len; 2868 2869 /* 2870 * final path needs to be 8-byte aligned as specified in 2871 * MS-SMB2 2.2.13 SMB2 CREATE Request. 2872 */ 2873 *out_size = round_up(*out_len * sizeof(__le16), 8); 2874 *out_path = kzalloc(*out_size + sizeof(__le16) /* null */, GFP_KERNEL); 2875 if (!*out_path) 2876 return -ENOMEM; 2877 2878 cp = load_nls_default(); 2879 cifs_strtoUTF16(*out_path, treename, treename_len, cp); 2880 2881 /* Do not append the separator if the path is empty */ 2882 if (path[0] != cpu_to_le16(0x0000)) { 2883 UniStrcat((wchar_t *)*out_path, (wchar_t *)sep); 2884 UniStrcat((wchar_t *)*out_path, (wchar_t *)path); 2885 } 2886 2887 unload_nls(cp); 2888 2889 return 0; 2890 } 2891 2892 int smb311_posix_mkdir(const unsigned int xid, struct inode *inode, 2893 umode_t mode, struct cifs_tcon *tcon, 2894 const char *full_path, 2895 struct cifs_sb_info *cifs_sb) 2896 { 2897 struct smb_rqst rqst; 2898 struct smb2_create_req *req; 2899 struct smb2_create_rsp *rsp = NULL; 2900 struct cifs_ses *ses = tcon->ses; 2901 struct kvec iov[3]; /* make sure at least one for each open context */ 2902 struct kvec rsp_iov = {NULL, 0}; 2903 int resp_buftype; 2904 int uni_path_len; 2905 __le16 *copy_path = NULL; 2906 int copy_size; 2907 int rc = 0; 2908 unsigned int n_iov = 2; 2909 __u32 file_attributes = 0; 2910 char *pc_buf = NULL; 2911 int flags = 0; 2912 unsigned int total_len; 2913 __le16 *utf16_path = NULL; 2914 struct TCP_Server_Info *server; 2915 int retries = 0, cur_sleep = 0; 2916 2917 replay_again: 2918 /* reinitialize for possible replay */ 2919 pc_buf = NULL; 2920 flags = 0; 2921 n_iov = 2; 2922 server = cifs_pick_channel(ses); 2923 2924 cifs_dbg(FYI, "mkdir\n"); 2925 2926 /* resource #1: path allocation */ 2927 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb); 2928 if (!utf16_path) 2929 return -ENOMEM; 2930 2931 if (!ses || !server) { 2932 rc = smb_EIO(smb_eio_trace_null_pointers); 2933 goto err_free_path; 2934 } 2935 2936 /* resource #2: request */ 2937 rc = smb2_plain_req_init(SMB2_CREATE, tcon, server, 2938 (void **) &req, &total_len); 2939 if (rc) 2940 goto err_free_path; 2941 2942 2943 if (smb3_encryption_required(tcon)) 2944 flags |= CIFS_TRANSFORM_REQ; 2945 2946 req->ImpersonationLevel = IL_IMPERSONATION; 2947 req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES); 2948 /* File attributes ignored on open (used in create though) */ 2949 req->FileAttributes = cpu_to_le32(file_attributes); 2950 req->ShareAccess = FILE_SHARE_ALL_LE; 2951 req->CreateDisposition = cpu_to_le32(FILE_CREATE); 2952 req->CreateOptions = cpu_to_le32(CREATE_NOT_FILE); 2953 2954 iov[0].iov_base = (char *)req; 2955 /* -1 since last byte is buf[0] which is sent below (path) */ 2956 iov[0].iov_len = total_len - 1; 2957 2958 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req)); 2959 2960 /* [MS-SMB2] 2.2.13 NameOffset: 2961 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of 2962 * the SMB2 header, the file name includes a prefix that will 2963 * be processed during DFS name normalization as specified in 2964 * section 3.3.5.9. Otherwise, the file name is relative to 2965 * the share that is identified by the TreeId in the SMB2 2966 * header. 2967 */ 2968 if (tcon->share_flags & SHI1005_FLAGS_DFS) { 2969 int name_len; 2970 2971 req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS; 2972 rc = alloc_path_with_tree_prefix(©_path, ©_size, 2973 &name_len, 2974 tcon->tree_name, utf16_path); 2975 if (rc) 2976 goto err_free_req; 2977 2978 req->NameLength = cpu_to_le16(name_len * 2); 2979 uni_path_len = copy_size; 2980 /* free before overwriting resource */ 2981 kfree(utf16_path); 2982 utf16_path = copy_path; 2983 } else { 2984 uni_path_len = (2 * UniStrnlen((wchar_t *)utf16_path, PATH_MAX)) + 2; 2985 /* MUST set path len (NameLength) to 0 opening root of share */ 2986 req->NameLength = cpu_to_le16(uni_path_len - 2); 2987 if (uni_path_len % 8 != 0) { 2988 copy_size = roundup(uni_path_len, 8); 2989 copy_path = kzalloc(copy_size, GFP_KERNEL); 2990 if (!copy_path) { 2991 rc = -ENOMEM; 2992 goto err_free_req; 2993 } 2994 memcpy((char *)copy_path, (const char *)utf16_path, 2995 uni_path_len); 2996 uni_path_len = copy_size; 2997 /* free before overwriting resource */ 2998 kfree(utf16_path); 2999 utf16_path = copy_path; 3000 } 3001 } 3002 3003 iov[1].iov_len = uni_path_len; 3004 iov[1].iov_base = utf16_path; 3005 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE; 3006 3007 if (tcon->posix_extensions) { 3008 /* resource #3: posix buf */ 3009 rc = add_posix_context(iov, &n_iov, mode); 3010 if (rc) 3011 goto err_free_req; 3012 req->CreateContextsOffset = cpu_to_le32( 3013 sizeof(struct smb2_create_req) + 3014 iov[1].iov_len); 3015 le32_add_cpu(&req->CreateContextsLength, iov[n_iov-1].iov_len); 3016 pc_buf = iov[n_iov-1].iov_base; 3017 } 3018 3019 3020 memset(&rqst, 0, sizeof(struct smb_rqst)); 3021 rqst.rq_iov = iov; 3022 rqst.rq_nvec = n_iov; 3023 3024 /* no need to inc num_remote_opens because we close it just below */ 3025 trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, full_path, CREATE_NOT_FILE, 3026 FILE_WRITE_ATTRIBUTES); 3027 3028 if (retries) { 3029 /* Back-off before retry */ 3030 if (cur_sleep) 3031 msleep(cur_sleep); 3032 smb2_set_replay(server, &rqst); 3033 } 3034 3035 /* resource #4: response buffer */ 3036 rc = cifs_send_recv(xid, ses, server, 3037 &rqst, &resp_buftype, flags, &rsp_iov); 3038 if (rc) { 3039 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE); 3040 trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid, 3041 CREATE_NOT_FILE, 3042 FILE_WRITE_ATTRIBUTES, rc); 3043 goto err_free_rsp_buf; 3044 } 3045 3046 /* 3047 * Although unlikely to be possible for rsp to be null and rc not set, 3048 * adding check below is slightly safer long term (and quiets Coverity 3049 * warning) 3050 */ 3051 rsp = (struct smb2_create_rsp *)rsp_iov.iov_base; 3052 if (rsp == NULL) { 3053 rc = smb_EIO(smb_eio_trace_mkdir_no_rsp); 3054 kfree(pc_buf); 3055 goto err_free_req; 3056 } 3057 3058 trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid, 3059 CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES, 3060 rsp->OplockLevel); 3061 3062 SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId); 3063 3064 /* Eventually save off posix specific response info and timestamps */ 3065 3066 err_free_rsp_buf: 3067 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 3068 kfree(pc_buf); 3069 err_free_req: 3070 cifs_small_buf_release(req); 3071 err_free_path: 3072 kfree(utf16_path); 3073 3074 if (is_replayable_error(rc) && 3075 smb2_should_replay(tcon, &retries, &cur_sleep)) 3076 goto replay_again; 3077 3078 return rc; 3079 } 3080 3081 int 3082 SMB2_open_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, 3083 struct smb_rqst *rqst, __u8 *oplock, 3084 struct cifs_open_parms *oparms, __le16 *path) 3085 { 3086 struct smb2_create_req *req; 3087 unsigned int n_iov = 2; 3088 __u32 file_attributes = 0; 3089 int copy_size; 3090 int uni_path_len; 3091 unsigned int total_len; 3092 struct kvec *iov = rqst->rq_iov; 3093 __le16 *copy_path; 3094 int rc; 3095 3096 rc = smb2_plain_req_init(SMB2_CREATE, tcon, server, 3097 (void **) &req, &total_len); 3098 if (rc) 3099 return rc; 3100 3101 iov[0].iov_base = (char *)req; 3102 /* -1 since last byte is buf[0] which is sent below (path) */ 3103 iov[0].iov_len = total_len - 1; 3104 3105 if (oparms->create_options & CREATE_OPTION_READONLY) 3106 file_attributes |= ATTR_READONLY; 3107 if (oparms->create_options & CREATE_OPTION_SPECIAL) 3108 file_attributes |= ATTR_SYSTEM; 3109 3110 req->ImpersonationLevel = IL_IMPERSONATION; 3111 req->DesiredAccess = cpu_to_le32(oparms->desired_access); 3112 /* File attributes ignored on open (used in create though) */ 3113 req->FileAttributes = cpu_to_le32(file_attributes); 3114 req->ShareAccess = FILE_SHARE_ALL_LE; 3115 3116 req->CreateDisposition = cpu_to_le32(oparms->disposition); 3117 req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK); 3118 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req)); 3119 3120 /* [MS-SMB2] 2.2.13 NameOffset: 3121 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of 3122 * the SMB2 header, the file name includes a prefix that will 3123 * be processed during DFS name normalization as specified in 3124 * section 3.3.5.9. Otherwise, the file name is relative to 3125 * the share that is identified by the TreeId in the SMB2 3126 * header. 3127 */ 3128 if (tcon->share_flags & SHI1005_FLAGS_DFS) { 3129 int name_len; 3130 3131 req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS; 3132 rc = alloc_path_with_tree_prefix(©_path, ©_size, 3133 &name_len, 3134 tcon->tree_name, path); 3135 if (rc) 3136 return rc; 3137 req->NameLength = cpu_to_le16(name_len * 2); 3138 uni_path_len = copy_size; 3139 path = copy_path; 3140 } else { 3141 uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2; 3142 /* MUST set path len (NameLength) to 0 opening root of share */ 3143 req->NameLength = cpu_to_le16(uni_path_len - 2); 3144 copy_size = round_up(uni_path_len, 8); 3145 copy_path = kzalloc(copy_size, GFP_KERNEL); 3146 if (!copy_path) 3147 return -ENOMEM; 3148 memcpy((char *)copy_path, (const char *)path, 3149 uni_path_len); 3150 uni_path_len = copy_size; 3151 path = copy_path; 3152 } 3153 3154 iov[1].iov_len = uni_path_len; 3155 iov[1].iov_base = path; 3156 3157 if ((!server->oplocks) || (tcon->no_lease)) 3158 *oplock = SMB2_OPLOCK_LEVEL_NONE; 3159 3160 if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) || 3161 *oplock == SMB2_OPLOCK_LEVEL_NONE) 3162 req->RequestedOplockLevel = *oplock; 3163 else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) && 3164 (oparms->create_options & CREATE_NOT_FILE)) 3165 req->RequestedOplockLevel = *oplock; /* no srv lease support */ 3166 else { 3167 rc = add_lease_context(server, req, iov, &n_iov, 3168 oparms->fid->lease_key, oplock, 3169 oparms->fid->parent_lease_key, 3170 oparms->lease_flags); 3171 if (rc) 3172 return rc; 3173 } 3174 3175 if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) { 3176 rc = add_durable_context(iov, &n_iov, oparms, 3177 tcon->use_persistent); 3178 if (rc) 3179 return rc; 3180 } 3181 3182 if (tcon->posix_extensions) { 3183 rc = add_posix_context(iov, &n_iov, oparms->mode); 3184 if (rc) 3185 return rc; 3186 } 3187 3188 if (tcon->snapshot_time) { 3189 cifs_dbg(FYI, "adding snapshot context\n"); 3190 rc = add_twarp_context(iov, &n_iov, tcon->snapshot_time); 3191 if (rc) 3192 return rc; 3193 } 3194 3195 if ((oparms->disposition != FILE_OPEN) && (oparms->cifs_sb)) { 3196 unsigned int sbflags = cifs_sb_flags(oparms->cifs_sb); 3197 bool set_mode; 3198 bool set_owner; 3199 3200 if ((sbflags & CIFS_MOUNT_MODE_FROM_SID) && 3201 oparms->mode != ACL_NO_MODE) { 3202 set_mode = true; 3203 } else { 3204 set_mode = false; 3205 oparms->mode = ACL_NO_MODE; 3206 } 3207 3208 set_owner = sbflags & CIFS_MOUNT_UID_FROM_ACL; 3209 if (set_owner | set_mode) { 3210 cifs_dbg(FYI, "add sd with mode 0x%x\n", oparms->mode); 3211 rc = add_sd_context(iov, &n_iov, oparms->mode, set_owner); 3212 if (rc) 3213 return rc; 3214 } 3215 } 3216 3217 add_query_id_context(iov, &n_iov); 3218 add_ea_context(oparms, iov, &n_iov); 3219 3220 if (n_iov > 2) { 3221 /* 3222 * We have create contexts behind iov[1] (the file 3223 * name), point at them from the main create request 3224 */ 3225 req->CreateContextsOffset = cpu_to_le32( 3226 sizeof(struct smb2_create_req) + 3227 iov[1].iov_len); 3228 req->CreateContextsLength = 0; 3229 3230 for (unsigned int i = 2; i < (n_iov-1); i++) { 3231 struct kvec *v = &iov[i]; 3232 size_t len = v->iov_len; 3233 struct create_context *cctx = 3234 (struct create_context *)v->iov_base; 3235 3236 cctx->Next = cpu_to_le32(len); 3237 le32_add_cpu(&req->CreateContextsLength, len); 3238 } 3239 le32_add_cpu(&req->CreateContextsLength, 3240 iov[n_iov-1].iov_len); 3241 } 3242 3243 rqst->rq_nvec = n_iov; 3244 return 0; 3245 } 3246 3247 /* rq_iov[0] is the request and is released by cifs_small_buf_release(). 3248 * All other vectors are freed by kfree(). 3249 */ 3250 void 3251 SMB2_open_free(struct smb_rqst *rqst) 3252 { 3253 int i; 3254 3255 if (rqst && rqst->rq_iov) { 3256 cifs_small_buf_release(rqst->rq_iov[0].iov_base); 3257 for (i = 1; i < rqst->rq_nvec; i++) 3258 if (rqst->rq_iov[i].iov_base != smb2_padding) 3259 kfree(rqst->rq_iov[i].iov_base); 3260 } 3261 } 3262 3263 int 3264 SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, 3265 __u8 *oplock, struct smb2_file_all_info *buf, 3266 struct create_posix_rsp *posix, 3267 struct kvec *err_iov, int *buftype) 3268 { 3269 struct smb_rqst rqst; 3270 struct smb2_create_rsp *rsp = NULL; 3271 struct cifs_tcon *tcon = oparms->tcon; 3272 struct cifs_ses *ses = tcon->ses; 3273 struct TCP_Server_Info *server; 3274 struct kvec iov[SMB2_CREATE_IOV_SIZE]; 3275 struct kvec rsp_iov = {NULL, 0}; 3276 int resp_buftype = CIFS_NO_BUFFER; 3277 int rc = 0; 3278 int flags = 0; 3279 int retries = 0, cur_sleep = 0; 3280 3281 replay_again: 3282 /* reinitialize for possible replay */ 3283 flags = 0; 3284 server = cifs_pick_channel(ses); 3285 oparms->replay = !!(retries); 3286 3287 cifs_dbg(FYI, "create/open\n"); 3288 if (!ses || !server) 3289 return smb_EIO(smb_eio_trace_null_pointers); 3290 3291 if (smb3_encryption_required(tcon)) 3292 flags |= CIFS_TRANSFORM_REQ; 3293 3294 memset(&rqst, 0, sizeof(struct smb_rqst)); 3295 memset(&iov, 0, sizeof(iov)); 3296 rqst.rq_iov = iov; 3297 rqst.rq_nvec = SMB2_CREATE_IOV_SIZE; 3298 3299 rc = SMB2_open_init(tcon, server, 3300 &rqst, oplock, oparms, path); 3301 if (rc) 3302 goto creat_exit; 3303 3304 trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid, oparms->path, 3305 oparms->create_options, oparms->desired_access); 3306 3307 if (retries) { 3308 /* Back-off before retry */ 3309 if (cur_sleep) 3310 msleep(cur_sleep); 3311 smb2_set_replay(server, &rqst); 3312 } 3313 3314 rc = cifs_send_recv(xid, ses, server, 3315 &rqst, &resp_buftype, flags, 3316 &rsp_iov); 3317 rsp = (struct smb2_create_rsp *)rsp_iov.iov_base; 3318 3319 if (rc != 0) { 3320 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE); 3321 if (err_iov && rsp) { 3322 *err_iov = rsp_iov; 3323 *buftype = resp_buftype; 3324 resp_buftype = CIFS_NO_BUFFER; 3325 rsp = NULL; 3326 } 3327 trace_smb3_open_err(xid, tcon->tid, ses->Suid, 3328 oparms->create_options, oparms->desired_access, rc); 3329 if (rc == -EREMCHG) { 3330 pr_warn_once("server share %s deleted\n", 3331 tcon->tree_name); 3332 tcon->need_reconnect = true; 3333 } 3334 goto creat_exit; 3335 } else if (rsp == NULL) /* unlikely to happen, but safer to check */ 3336 goto creat_exit; 3337 3338 atomic_inc(&tcon->num_remote_opens); 3339 oparms->fid->persistent_fid = rsp->PersistentFileId; 3340 oparms->fid->volatile_fid = rsp->VolatileFileId; 3341 oparms->fid->access = oparms->desired_access; 3342 #ifdef CONFIG_CIFS_DEBUG2 3343 oparms->fid->mid = le64_to_cpu(rsp->hdr.MessageId); 3344 #endif /* CIFS_DEBUG2 */ 3345 3346 if (buf) { 3347 buf->CreationTime = rsp->CreationTime; 3348 buf->LastAccessTime = rsp->LastAccessTime; 3349 buf->LastWriteTime = rsp->LastWriteTime; 3350 buf->ChangeTime = rsp->ChangeTime; 3351 buf->AllocationSize = rsp->AllocationSize; 3352 buf->EndOfFile = rsp->EndofFile; 3353 buf->Attributes = rsp->FileAttributes; 3354 buf->NumberOfLinks = cpu_to_le32(1); 3355 buf->DeletePending = 0; /* successful open = not delete pending */ 3356 } 3357 3358 3359 rc = smb2_parse_contexts(server, &rsp_iov, &oparms->fid->epoch, 3360 oparms->fid->lease_key, oplock, buf, posix); 3361 3362 trace_smb3_open_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid, 3363 oparms->create_options, oparms->desired_access, 3364 *oplock); 3365 creat_exit: 3366 SMB2_open_free(&rqst); 3367 free_rsp_buf(resp_buftype, rsp); 3368 3369 if (is_replayable_error(rc) && 3370 smb2_should_replay(tcon, &retries, &cur_sleep)) 3371 goto replay_again; 3372 3373 return rc; 3374 } 3375 3376 int 3377 SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, 3378 struct smb_rqst *rqst, 3379 u64 persistent_fid, u64 volatile_fid, u32 opcode, 3380 char *in_data, u32 indatalen, 3381 __u32 max_response_size) 3382 { 3383 struct smb2_ioctl_req *req; 3384 struct kvec *iov = rqst->rq_iov; 3385 unsigned int total_len; 3386 int rc; 3387 char *in_data_buf; 3388 3389 rc = smb2_ioctl_req_init(opcode, tcon, server, 3390 (void **) &req, &total_len); 3391 if (rc) 3392 return rc; 3393 3394 if (indatalen) { 3395 /* 3396 * indatalen is usually small at a couple of bytes max, so 3397 * just allocate through generic pool 3398 */ 3399 in_data_buf = kmemdup(in_data, indatalen, GFP_NOFS); 3400 if (!in_data_buf) { 3401 cifs_small_buf_release(req); 3402 return -ENOMEM; 3403 } 3404 } 3405 3406 req->CtlCode = cpu_to_le32(opcode); 3407 req->PersistentFileId = persistent_fid; 3408 req->VolatileFileId = volatile_fid; 3409 3410 iov[0].iov_base = (char *)req; 3411 /* 3412 * If no input data, the size of ioctl struct in 3413 * protocol spec still includes a 1 byte data buffer, 3414 * but if input data passed to ioctl, we do not 3415 * want to double count this, so we do not send 3416 * the dummy one byte of data in iovec[0] if sending 3417 * input data (in iovec[1]). 3418 */ 3419 if (indatalen) { 3420 req->InputCount = cpu_to_le32(indatalen); 3421 /* do not set InputOffset if no input data */ 3422 req->InputOffset = 3423 cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer)); 3424 rqst->rq_nvec = 2; 3425 iov[0].iov_len = total_len - 1; 3426 iov[1].iov_base = in_data_buf; 3427 iov[1].iov_len = indatalen; 3428 } else { 3429 rqst->rq_nvec = 1; 3430 iov[0].iov_len = total_len; 3431 } 3432 3433 req->OutputOffset = 0; 3434 req->OutputCount = 0; /* MBZ */ 3435 3436 /* 3437 * In most cases max_response_size is set to 16K (CIFSMaxBufSize) 3438 * We Could increase default MaxOutputResponse, but that could require 3439 * more credits. Windows typically sets this smaller, but for some 3440 * ioctls it may be useful to allow server to send more. No point 3441 * limiting what the server can send as long as fits in one credit 3442 * We can not handle more than CIFS_MAX_BUF_SIZE yet but may want 3443 * to increase this limit up in the future. 3444 * Note that for snapshot queries that servers like Azure expect that 3445 * the first query be minimal size (and just used to get the number/size 3446 * of previous versions) so response size must be specified as EXACTLY 3447 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple 3448 * of eight bytes. Currently that is the only case where we set max 3449 * response size smaller. 3450 */ 3451 req->MaxOutputResponse = cpu_to_le32(max_response_size); 3452 req->hdr.CreditCharge = 3453 cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size), 3454 SMB2_MAX_BUFFER_SIZE)); 3455 /* always an FSCTL (for now) */ 3456 req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); 3457 3458 /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */ 3459 if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) 3460 req->hdr.Flags |= SMB2_FLAGS_SIGNED; 3461 3462 return 0; 3463 } 3464 3465 void 3466 SMB2_ioctl_free(struct smb_rqst *rqst) 3467 { 3468 int i; 3469 3470 if (rqst && rqst->rq_iov) { 3471 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ 3472 for (i = 1; i < rqst->rq_nvec; i++) 3473 if (rqst->rq_iov[i].iov_base != smb2_padding) 3474 kfree(rqst->rq_iov[i].iov_base); 3475 } 3476 } 3477 3478 3479 /* 3480 * SMB2 IOCTL is used for both IOCTLs and FSCTLs 3481 */ 3482 int 3483 SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, 3484 u64 volatile_fid, u32 opcode, char *in_data, u32 indatalen, 3485 u32 max_out_data_len, char **out_data, 3486 u32 *plen /* returned data len */) 3487 { 3488 struct smb_rqst rqst; 3489 struct smb2_ioctl_rsp *rsp = NULL; 3490 struct cifs_ses *ses; 3491 struct TCP_Server_Info *server; 3492 struct kvec iov[SMB2_IOCTL_IOV_SIZE]; 3493 struct kvec rsp_iov = {NULL, 0}; 3494 int resp_buftype = CIFS_NO_BUFFER; 3495 int rc = 0; 3496 int flags = 0; 3497 int retries = 0, cur_sleep = 0; 3498 3499 if (!tcon) 3500 return smb_EIO(smb_eio_trace_null_pointers); 3501 3502 ses = tcon->ses; 3503 if (!ses) 3504 return smb_EIO(smb_eio_trace_null_pointers); 3505 3506 replay_again: 3507 /* reinitialize for possible replay */ 3508 flags = 0; 3509 server = cifs_pick_channel(ses); 3510 3511 if (!server) 3512 return smb_EIO(smb_eio_trace_null_pointers); 3513 3514 cifs_dbg(FYI, "SMB2 IOCTL\n"); 3515 3516 if (out_data != NULL) 3517 *out_data = NULL; 3518 3519 /* zero out returned data len, in case of error */ 3520 if (plen) 3521 *plen = 0; 3522 3523 if (smb3_encryption_required(tcon)) 3524 flags |= CIFS_TRANSFORM_REQ; 3525 3526 memset(&rqst, 0, sizeof(struct smb_rqst)); 3527 memset(&iov, 0, sizeof(iov)); 3528 rqst.rq_iov = iov; 3529 rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE; 3530 3531 rc = SMB2_ioctl_init(tcon, server, 3532 &rqst, persistent_fid, volatile_fid, opcode, 3533 in_data, indatalen, max_out_data_len); 3534 if (rc) 3535 goto ioctl_exit; 3536 3537 if (retries) { 3538 /* Back-off before retry */ 3539 if (cur_sleep) 3540 msleep(cur_sleep); 3541 smb2_set_replay(server, &rqst); 3542 } 3543 3544 rc = cifs_send_recv(xid, ses, server, 3545 &rqst, &resp_buftype, flags, 3546 &rsp_iov); 3547 rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base; 3548 3549 if (rc != 0) 3550 trace_smb3_fsctl_err(xid, persistent_fid, tcon->tid, 3551 ses->Suid, 0, opcode, rc); 3552 3553 if ((rc != 0) && (rc != -EINVAL) && (rc != -E2BIG)) { 3554 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); 3555 goto ioctl_exit; 3556 } else if (rc == -EINVAL) { 3557 if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) && 3558 (opcode != FSCTL_SRV_COPYCHUNK)) { 3559 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); 3560 goto ioctl_exit; 3561 } 3562 } else if (rc == -E2BIG) { 3563 if (opcode != FSCTL_QUERY_ALLOCATED_RANGES) { 3564 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); 3565 goto ioctl_exit; 3566 } 3567 } 3568 3569 /* check if caller wants to look at return data or just return rc */ 3570 if ((plen == NULL) || (out_data == NULL)) 3571 goto ioctl_exit; 3572 3573 /* 3574 * Although unlikely to be possible for rsp to be null and rc not set, 3575 * adding check below is slightly safer long term (and quiets Coverity 3576 * warning) 3577 */ 3578 if (rsp == NULL) { 3579 rc = smb_EIO(smb_eio_trace_ioctl_no_rsp); 3580 goto ioctl_exit; 3581 } 3582 3583 *plen = le32_to_cpu(rsp->OutputCount); 3584 3585 /* We check for obvious errors in the output buffer length and offset */ 3586 if (*plen == 0) 3587 goto ioctl_exit; /* server returned no data */ 3588 else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) { 3589 cifs_tcon_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen); 3590 rc = smb_EIO2(smb_eio_trace_ioctl_data_len, *plen, rsp_iov.iov_len); 3591 *plen = 0; 3592 goto ioctl_exit; 3593 } 3594 3595 u32 outoff = le32_to_cpu(rsp->OutputOffset); 3596 3597 if (rsp_iov.iov_len - *plen < outoff) { 3598 cifs_tcon_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", 3599 *plen, outoff); 3600 rc = smb_EIO2(smb_eio_trace_ioctl_out_off, rsp_iov.iov_len - *plen, outoff); 3601 *plen = 0; 3602 goto ioctl_exit; 3603 } 3604 3605 *out_data = kmemdup((char *)rsp + le32_to_cpu(rsp->OutputOffset), 3606 *plen, GFP_KERNEL); 3607 if (*out_data == NULL) { 3608 rc = -ENOMEM; 3609 goto ioctl_exit; 3610 } 3611 3612 ioctl_exit: 3613 SMB2_ioctl_free(&rqst); 3614 free_rsp_buf(resp_buftype, rsp); 3615 3616 if (is_replayable_error(rc) && 3617 smb2_should_replay(tcon, &retries, &cur_sleep)) 3618 goto replay_again; 3619 3620 return rc; 3621 } 3622 3623 /* 3624 * Individual callers to ioctl worker function follow 3625 */ 3626 3627 int 3628 SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon, 3629 u64 persistent_fid, u64 volatile_fid) 3630 { 3631 int rc; 3632 struct compress_ioctl fsctl_input; 3633 char *ret_data = NULL; 3634 3635 fsctl_input.CompressionState = 3636 cpu_to_le16(COMPRESSION_FORMAT_DEFAULT); 3637 3638 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, 3639 FSCTL_SET_COMPRESSION, 3640 (char *)&fsctl_input /* data input */, 3641 2 /* in data len */, CIFSMaxBufSize /* max out data */, 3642 &ret_data /* out data */, NULL); 3643 3644 cifs_dbg(FYI, "set compression rc %d\n", rc); 3645 3646 return rc; 3647 } 3648 3649 int 3650 SMB2_close_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, 3651 struct smb_rqst *rqst, 3652 u64 persistent_fid, u64 volatile_fid, bool query_attrs) 3653 { 3654 struct smb2_close_req *req; 3655 struct kvec *iov = rqst->rq_iov; 3656 unsigned int total_len; 3657 int rc; 3658 3659 rc = smb2_plain_req_init(SMB2_CLOSE, tcon, server, 3660 (void **) &req, &total_len); 3661 if (rc) 3662 return rc; 3663 3664 req->PersistentFileId = persistent_fid; 3665 req->VolatileFileId = volatile_fid; 3666 if (query_attrs) 3667 req->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB; 3668 else 3669 req->Flags = 0; 3670 iov[0].iov_base = (char *)req; 3671 iov[0].iov_len = total_len; 3672 3673 return 0; 3674 } 3675 3676 void 3677 SMB2_close_free(struct smb_rqst *rqst) 3678 { 3679 if (rqst && rqst->rq_iov) 3680 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ 3681 } 3682 3683 int 3684 __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, 3685 u64 persistent_fid, u64 volatile_fid, 3686 struct smb2_file_network_open_info *pbuf) 3687 { 3688 struct smb_rqst rqst; 3689 struct smb2_close_rsp *rsp = NULL; 3690 struct cifs_ses *ses = tcon->ses; 3691 struct TCP_Server_Info *server; 3692 struct kvec iov[1]; 3693 struct kvec rsp_iov; 3694 int resp_buftype = CIFS_NO_BUFFER; 3695 int rc = 0; 3696 int flags = 0; 3697 bool query_attrs = false; 3698 int retries = 0, cur_sleep = 0; 3699 3700 replay_again: 3701 /* reinitialize for possible replay */ 3702 flags = 0; 3703 query_attrs = false; 3704 server = cifs_pick_channel(ses); 3705 3706 cifs_dbg(FYI, "Close\n"); 3707 3708 if (!ses || !server) 3709 return smb_EIO(smb_eio_trace_null_pointers); 3710 3711 if (smb3_encryption_required(tcon)) 3712 flags |= CIFS_TRANSFORM_REQ; 3713 3714 memset(&rqst, 0, sizeof(struct smb_rqst)); 3715 memset(&iov, 0, sizeof(iov)); 3716 rqst.rq_iov = iov; 3717 rqst.rq_nvec = 1; 3718 3719 /* check if need to ask server to return timestamps in close response */ 3720 if (pbuf) 3721 query_attrs = true; 3722 3723 trace_smb3_close_enter(xid, persistent_fid, tcon->tid, ses->Suid); 3724 rc = SMB2_close_init(tcon, server, 3725 &rqst, persistent_fid, volatile_fid, 3726 query_attrs); 3727 if (rc) 3728 goto close_exit; 3729 3730 if (retries) { 3731 /* Back-off before retry */ 3732 if (cur_sleep) 3733 msleep(cur_sleep); 3734 smb2_set_replay(server, &rqst); 3735 } 3736 3737 rc = cifs_send_recv(xid, ses, server, 3738 &rqst, &resp_buftype, flags, &rsp_iov); 3739 rsp = (struct smb2_close_rsp *)rsp_iov.iov_base; 3740 3741 if (rc != 0) { 3742 cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE); 3743 trace_smb3_close_err(xid, persistent_fid, tcon->tid, ses->Suid, 3744 rc); 3745 goto close_exit; 3746 } else { 3747 trace_smb3_close_done(xid, persistent_fid, tcon->tid, 3748 ses->Suid); 3749 if (pbuf) 3750 memcpy(&pbuf->network_open_info, 3751 &rsp->network_open_info, 3752 sizeof(pbuf->network_open_info)); 3753 atomic_dec(&tcon->num_remote_opens); 3754 } 3755 3756 close_exit: 3757 SMB2_close_free(&rqst); 3758 free_rsp_buf(resp_buftype, rsp); 3759 3760 /* retry close in a worker thread if this one is interrupted */ 3761 if (is_interrupt_error(rc)) { 3762 int tmp_rc; 3763 3764 tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid, 3765 volatile_fid); 3766 if (tmp_rc) 3767 cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n", 3768 persistent_fid, tmp_rc); 3769 } 3770 3771 if (is_replayable_error(rc) && 3772 smb2_should_replay(tcon, &retries, &cur_sleep)) 3773 goto replay_again; 3774 3775 return rc; 3776 } 3777 3778 int 3779 SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, 3780 u64 persistent_fid, u64 volatile_fid) 3781 { 3782 return __SMB2_close(xid, tcon, persistent_fid, volatile_fid, NULL); 3783 } 3784 3785 int 3786 smb2_validate_iov(unsigned int offset, unsigned int buffer_length, 3787 struct kvec *iov, unsigned int min_buf_size) 3788 { 3789 unsigned int smb_len = iov->iov_len; 3790 char *end_of_smb = smb_len + (char *)iov->iov_base; 3791 char *begin_of_buf = offset + (char *)iov->iov_base; 3792 char *end_of_buf = begin_of_buf + buffer_length; 3793 3794 3795 if (buffer_length < min_buf_size) { 3796 cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n", 3797 buffer_length, min_buf_size); 3798 return -EINVAL; 3799 } 3800 3801 /* check if beyond RFC1001 maximum length */ 3802 if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) { 3803 cifs_dbg(VFS, "buffer length %d or smb length %d too large\n", 3804 buffer_length, smb_len); 3805 return -EINVAL; 3806 } 3807 3808 if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) { 3809 cifs_dbg(VFS, "Invalid server response, bad offset to data\n"); 3810 return -EINVAL; 3811 } 3812 3813 return 0; 3814 } 3815 3816 /* 3817 * If SMB buffer fields are valid, copy into temporary buffer to hold result. 3818 * Caller must free buffer. 3819 */ 3820 int 3821 smb2_validate_and_copy_iov(unsigned int offset, unsigned int buffer_length, 3822 struct kvec *iov, unsigned int minbufsize, 3823 char *data) 3824 { 3825 char *begin_of_buf = offset + (char *)iov->iov_base; 3826 int rc; 3827 3828 if (!data) 3829 return -EINVAL; 3830 3831 rc = smb2_validate_iov(offset, buffer_length, iov, minbufsize); 3832 if (rc) 3833 return rc; 3834 3835 memcpy(data, begin_of_buf, minbufsize); 3836 3837 return 0; 3838 } 3839 3840 int 3841 SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, 3842 struct smb_rqst *rqst, 3843 u64 persistent_fid, u64 volatile_fid, 3844 u8 info_class, u8 info_type, u32 additional_info, 3845 size_t output_len, size_t input_len, void *input) 3846 { 3847 struct smb2_query_info_req *req; 3848 struct kvec *iov = rqst->rq_iov; 3849 unsigned int total_len; 3850 size_t len; 3851 int rc; 3852 3853 if (unlikely(check_add_overflow(input_len, sizeof(*req), &len) || 3854 len > CIFSMaxBufSize)) 3855 return -EINVAL; 3856 3857 rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server, 3858 (void **) &req, &total_len); 3859 if (rc) 3860 return rc; 3861 3862 req->InfoType = info_type; 3863 req->FileInfoClass = info_class; 3864 req->PersistentFileId = persistent_fid; 3865 req->VolatileFileId = volatile_fid; 3866 req->AdditionalInformation = cpu_to_le32(additional_info); 3867 3868 req->OutputBufferLength = cpu_to_le32(output_len); 3869 if (input_len) { 3870 req->InputBufferLength = cpu_to_le32(input_len); 3871 /* total_len for smb query request never close to le16 max */ 3872 req->InputBufferOffset = cpu_to_le16(total_len - 1); 3873 memcpy(req->Buffer, input, input_len); 3874 } 3875 3876 iov[0].iov_base = (char *)req; 3877 /* 1 for Buffer */ 3878 iov[0].iov_len = len; 3879 return 0; 3880 } 3881 3882 void 3883 SMB2_query_info_free(struct smb_rqst *rqst) 3884 { 3885 if (rqst && rqst->rq_iov) 3886 cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */ 3887 } 3888 3889 static int 3890 query_info(const unsigned int xid, struct cifs_tcon *tcon, 3891 u64 persistent_fid, u64 volatile_fid, u8 info_class, u8 info_type, 3892 u32 additional_info, size_t output_len, size_t min_len, void **data, 3893 u32 *dlen) 3894 { 3895 struct smb_rqst rqst; 3896 struct smb2_query_info_rsp *rsp = NULL; 3897 struct kvec iov[1]; 3898 struct kvec rsp_iov; 3899 int rc = 0; 3900 int resp_buftype = CIFS_NO_BUFFER; 3901 struct cifs_ses *ses = tcon->ses; 3902 struct TCP_Server_Info *server; 3903 int flags = 0; 3904 bool allocated = false; 3905 int retries = 0, cur_sleep = 0; 3906 3907 cifs_dbg(FYI, "Query Info\n"); 3908 3909 if (!ses) 3910 return smb_EIO(smb_eio_trace_null_pointers); 3911 3912 replay_again: 3913 /* reinitialize for possible replay */ 3914 flags = 0; 3915 allocated = false; 3916 server = cifs_pick_channel(ses); 3917 3918 if (!server) 3919 return smb_EIO(smb_eio_trace_null_pointers); 3920 3921 if (smb3_encryption_required(tcon)) 3922 flags |= CIFS_TRANSFORM_REQ; 3923 3924 memset(&rqst, 0, sizeof(struct smb_rqst)); 3925 memset(&iov, 0, sizeof(iov)); 3926 rqst.rq_iov = iov; 3927 rqst.rq_nvec = 1; 3928 3929 rc = SMB2_query_info_init(tcon, server, 3930 &rqst, persistent_fid, volatile_fid, 3931 info_class, info_type, additional_info, 3932 output_len, 0, NULL); 3933 if (rc) 3934 goto qinf_exit; 3935 3936 trace_smb3_query_info_enter(xid, persistent_fid, tcon->tid, 3937 ses->Suid, info_class, (__u32)info_type); 3938 3939 if (retries) { 3940 /* Back-off before retry */ 3941 if (cur_sleep) 3942 msleep(cur_sleep); 3943 smb2_set_replay(server, &rqst); 3944 } 3945 3946 rc = cifs_send_recv(xid, ses, server, 3947 &rqst, &resp_buftype, flags, &rsp_iov); 3948 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; 3949 3950 if (rc) { 3951 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); 3952 trace_smb3_query_info_err(xid, persistent_fid, tcon->tid, 3953 ses->Suid, info_class, (__u32)info_type, rc); 3954 goto qinf_exit; 3955 } 3956 3957 trace_smb3_query_info_done(xid, persistent_fid, tcon->tid, 3958 ses->Suid, info_class, (__u32)info_type); 3959 3960 if (dlen) { 3961 *dlen = le32_to_cpu(rsp->OutputBufferLength); 3962 if (!*data) { 3963 *data = kmalloc(*dlen, GFP_KERNEL); 3964 if (!*data) { 3965 cifs_tcon_dbg(VFS, 3966 "Error %d allocating memory for acl\n", 3967 rc); 3968 *dlen = 0; 3969 rc = -ENOMEM; 3970 goto qinf_exit; 3971 } 3972 allocated = true; 3973 } 3974 } 3975 3976 rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset), 3977 le32_to_cpu(rsp->OutputBufferLength), 3978 &rsp_iov, dlen ? *dlen : min_len, *data); 3979 if (rc && allocated) { 3980 kfree(*data); 3981 *data = NULL; 3982 *dlen = 0; 3983 } 3984 3985 qinf_exit: 3986 SMB2_query_info_free(&rqst); 3987 free_rsp_buf(resp_buftype, rsp); 3988 3989 if (is_replayable_error(rc) && 3990 smb2_should_replay(tcon, &retries, &cur_sleep)) 3991 goto replay_again; 3992 3993 return rc; 3994 } 3995 3996 int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon, 3997 u64 persistent_fid, u64 volatile_fid, struct smb2_file_all_info *data) 3998 { 3999 return query_info(xid, tcon, persistent_fid, volatile_fid, 4000 FILE_ALL_INFORMATION, SMB2_O_INFO_FILE, 0, 4001 sizeof(struct smb2_file_all_info) + PATH_MAX * 2, 4002 sizeof(struct smb2_file_all_info), (void **)&data, 4003 NULL); 4004 } 4005 4006 int 4007 SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon, 4008 u64 persistent_fid, u64 volatile_fid, 4009 void **data, u32 *plen, u32 extra_info) 4010 { 4011 *plen = 0; 4012 4013 return query_info(xid, tcon, persistent_fid, volatile_fid, 4014 0, SMB2_O_INFO_SECURITY, extra_info, 4015 SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen); 4016 } 4017 4018 int 4019 SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon, 4020 u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid) 4021 { 4022 return query_info(xid, tcon, persistent_fid, volatile_fid, 4023 FILE_INTERNAL_INFORMATION, SMB2_O_INFO_FILE, 0, 4024 sizeof(struct smb2_file_internal_info), 4025 sizeof(struct smb2_file_internal_info), 4026 (void **)&uniqueid, NULL); 4027 } 4028 4029 /* 4030 * CHANGE_NOTIFY Request is sent to get notifications on changes to a directory 4031 * See MS-SMB2 2.2.35 and 2.2.36 4032 */ 4033 4034 static int 4035 SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst, 4036 struct cifs_tcon *tcon, struct TCP_Server_Info *server, 4037 u64 persistent_fid, u64 volatile_fid, 4038 u32 completion_filter, bool watch_tree) 4039 { 4040 struct smb2_change_notify_req *req; 4041 struct kvec *iov = rqst->rq_iov; 4042 unsigned int total_len; 4043 int rc; 4044 4045 rc = smb2_plain_req_init(SMB2_CHANGE_NOTIFY, tcon, server, 4046 (void **) &req, &total_len); 4047 if (rc) 4048 return rc; 4049 4050 req->PersistentFileId = persistent_fid; 4051 req->VolatileFileId = volatile_fid; 4052 /* See note 354 of MS-SMB2, 64K max */ 4053 req->OutputBufferLength = 4054 cpu_to_le32(SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE); 4055 req->CompletionFilter = cpu_to_le32(completion_filter); 4056 if (watch_tree) 4057 req->Flags = cpu_to_le16(SMB2_WATCH_TREE); 4058 else 4059 req->Flags = 0; 4060 4061 iov[0].iov_base = (char *)req; 4062 iov[0].iov_len = total_len; 4063 4064 return 0; 4065 } 4066 4067 int 4068 SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon, 4069 u64 persistent_fid, u64 volatile_fid, bool watch_tree, 4070 u32 completion_filter, u32 max_out_data_len, char **out_data, 4071 u32 *plen /* returned data len */) 4072 { 4073 struct cifs_ses *ses = tcon->ses; 4074 struct TCP_Server_Info *server; 4075 struct smb_rqst rqst; 4076 struct smb2_change_notify_rsp *smb_rsp; 4077 struct kvec iov[1]; 4078 struct kvec rsp_iov = {NULL, 0}; 4079 int resp_buftype = CIFS_NO_BUFFER; 4080 int flags = 0; 4081 int rc = 0; 4082 int retries = 0, cur_sleep = 0; 4083 4084 replay_again: 4085 /* reinitialize for possible replay */ 4086 flags = 0; 4087 server = cifs_pick_channel(ses); 4088 4089 cifs_dbg(FYI, "change notify\n"); 4090 if (!ses || !server) 4091 return smb_EIO(smb_eio_trace_null_pointers); 4092 4093 if (smb3_encryption_required(tcon)) 4094 flags |= CIFS_TRANSFORM_REQ; 4095 4096 memset(&rqst, 0, sizeof(struct smb_rqst)); 4097 memset(&iov, 0, sizeof(iov)); 4098 if (plen) 4099 *plen = 0; 4100 4101 rqst.rq_iov = iov; 4102 rqst.rq_nvec = 1; 4103 4104 rc = SMB2_notify_init(xid, &rqst, tcon, server, 4105 persistent_fid, volatile_fid, 4106 completion_filter, watch_tree); 4107 if (rc) 4108 goto cnotify_exit; 4109 4110 trace_smb3_notify_enter(xid, persistent_fid, tcon->tid, ses->Suid, 4111 (u8)watch_tree, completion_filter); 4112 4113 if (retries) { 4114 /* Back-off before retry */ 4115 if (cur_sleep) 4116 msleep(cur_sleep); 4117 smb2_set_replay(server, &rqst); 4118 } 4119 4120 rc = cifs_send_recv(xid, ses, server, 4121 &rqst, &resp_buftype, flags, &rsp_iov); 4122 4123 if (rc != 0) { 4124 cifs_stats_fail_inc(tcon, SMB2_CHANGE_NOTIFY_HE); 4125 trace_smb3_notify_err(xid, persistent_fid, tcon->tid, ses->Suid, 4126 (u8)watch_tree, completion_filter, rc); 4127 } else { 4128 trace_smb3_notify_done(xid, persistent_fid, tcon->tid, 4129 ses->Suid, (u8)watch_tree, completion_filter); 4130 /* validate that notify information is plausible */ 4131 if ((rsp_iov.iov_base == NULL) || 4132 (rsp_iov.iov_len < sizeof(struct smb2_change_notify_rsp) + 1)) 4133 goto cnotify_exit; 4134 4135 smb_rsp = (struct smb2_change_notify_rsp *)rsp_iov.iov_base; 4136 4137 rc = smb2_validate_iov(le16_to_cpu(smb_rsp->OutputBufferOffset), 4138 le32_to_cpu(smb_rsp->OutputBufferLength), 4139 &rsp_iov, 4140 sizeof(struct file_notify_information)); 4141 if (rc) 4142 goto cnotify_exit; 4143 4144 *out_data = kmemdup((char *)smb_rsp + le16_to_cpu(smb_rsp->OutputBufferOffset), 4145 le32_to_cpu(smb_rsp->OutputBufferLength), GFP_KERNEL); 4146 if (*out_data == NULL) { 4147 rc = -ENOMEM; 4148 goto cnotify_exit; 4149 } else if (plen) 4150 *plen = le32_to_cpu(smb_rsp->OutputBufferLength); 4151 } 4152 4153 cnotify_exit: 4154 if (rqst.rq_iov) 4155 cifs_small_buf_release(rqst.rq_iov[0].iov_base); /* request */ 4156 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 4157 4158 if (is_replayable_error(rc) && 4159 smb2_should_replay(tcon, &retries, &cur_sleep)) 4160 goto replay_again; 4161 4162 return rc; 4163 } 4164 4165 4166 4167 /* 4168 * This is a no-op for now. We're not really interested in the reply, but 4169 * rather in the fact that the server sent one and that server->lstrp 4170 * gets updated. 4171 * 4172 * FIXME: maybe we should consider checking that the reply matches request? 4173 */ 4174 static void 4175 smb2_echo_callback(struct TCP_Server_Info *server, struct mid_q_entry *mid) 4176 { 4177 struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf; 4178 struct cifs_credits credits = { .value = 0, .instance = 0 }; 4179 4180 if (mid->mid_state == MID_RESPONSE_RECEIVED 4181 || mid->mid_state == MID_RESPONSE_MALFORMED) { 4182 credits.value = le16_to_cpu(rsp->hdr.CreditRequest); 4183 credits.instance = server->reconnect_instance; 4184 } 4185 4186 release_mid(server, mid); 4187 add_credits(server, &credits, CIFS_ECHO_OP); 4188 } 4189 4190 static void cifs_renegotiate_iosize(struct TCP_Server_Info *server, 4191 struct cifs_tcon *tcon) 4192 { 4193 struct cifs_sb_info *cifs_sb; 4194 4195 if (server == NULL || tcon == NULL) 4196 return; 4197 4198 spin_lock(&tcon->sb_list_lock); 4199 list_for_each_entry(cifs_sb, &tcon->cifs_sb_list, tcon_sb_link) 4200 cifs_negotiate_iosize(server, cifs_sb->ctx, tcon); 4201 spin_unlock(&tcon->sb_list_lock); 4202 } 4203 4204 void smb2_reconnect_server(struct work_struct *work) 4205 { 4206 struct TCP_Server_Info *server = container_of(work, 4207 struct TCP_Server_Info, reconnect.work); 4208 struct TCP_Server_Info *pserver; 4209 struct cifs_ses *ses, *ses2; 4210 struct cifs_tcon *tcon, *tcon2; 4211 struct list_head tmp_list, tmp_ses_list; 4212 bool ses_exist = false; 4213 bool tcon_selected = false; 4214 int rc; 4215 bool resched = false; 4216 4217 /* first check if ref count has reached 0, if not inc ref count */ 4218 spin_lock(&cifs_tcp_ses_lock); 4219 if (!server->srv_count) { 4220 spin_unlock(&cifs_tcp_ses_lock); 4221 return; 4222 } 4223 server->srv_count++; 4224 spin_unlock(&cifs_tcp_ses_lock); 4225 4226 /* If server is a channel, select the primary channel */ 4227 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; 4228 4229 /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */ 4230 mutex_lock(&pserver->reconnect_mutex); 4231 4232 /* if the server is marked for termination, drop the ref count here */ 4233 if (server->terminate) { 4234 cifs_put_tcp_session(server, true); 4235 mutex_unlock(&pserver->reconnect_mutex); 4236 return; 4237 } 4238 4239 INIT_LIST_HEAD(&tmp_list); 4240 INIT_LIST_HEAD(&tmp_ses_list); 4241 cifs_dbg(FYI, "Reconnecting tcons and channels\n"); 4242 4243 spin_lock(&cifs_tcp_ses_lock); 4244 list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { 4245 spin_lock(&ses->ses_lock); 4246 if (ses->ses_status == SES_EXITING) { 4247 spin_unlock(&ses->ses_lock); 4248 continue; 4249 } 4250 spin_unlock(&ses->ses_lock); 4251 4252 tcon_selected = false; 4253 4254 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { 4255 if (tcon->need_reconnect || tcon->need_reopen_files) { 4256 spin_lock(&tcon->tc_lock); 4257 tcon->tc_count++; 4258 spin_unlock(&tcon->tc_lock); 4259 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, 4260 netfs_trace_tcon_ref_get_reconnect_server); 4261 list_add_tail(&tcon->rlist, &tmp_list); 4262 tcon_selected = true; 4263 } 4264 } 4265 /* 4266 * IPC has the same lifetime as its session and uses its 4267 * refcount. 4268 */ 4269 if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) { 4270 list_add_tail(&ses->tcon_ipc->rlist, &tmp_list); 4271 tcon_selected = true; 4272 cifs_smb_ses_inc_refcount(ses); 4273 } 4274 /* 4275 * handle the case where channel needs to reconnect 4276 * binding session, but tcon is healthy (some other channel 4277 * is active) 4278 */ 4279 spin_lock(&ses->chan_lock); 4280 if (!tcon_selected && cifs_chan_needs_reconnect(ses, server)) { 4281 list_add_tail(&ses->rlist, &tmp_ses_list); 4282 ses_exist = true; 4283 cifs_smb_ses_inc_refcount(ses); 4284 } 4285 spin_unlock(&ses->chan_lock); 4286 } 4287 spin_unlock(&cifs_tcp_ses_lock); 4288 4289 list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) { 4290 rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server, true); 4291 if (!rc) { 4292 cifs_renegotiate_iosize(server, tcon); 4293 cifs_reopen_persistent_handles(tcon); 4294 } else 4295 resched = true; 4296 list_del_init(&tcon->rlist); 4297 if (tcon->ipc) 4298 cifs_put_smb_ses(tcon->ses); 4299 else 4300 cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_reconnect_server); 4301 } 4302 4303 if (!ses_exist) 4304 goto done; 4305 4306 /* allocate a dummy tcon struct used for reconnect */ 4307 tcon = tcon_info_alloc(false, netfs_trace_tcon_ref_new_reconnect_server); 4308 if (!tcon) { 4309 resched = true; 4310 list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) { 4311 list_del_init(&ses->rlist); 4312 cifs_put_smb_ses(ses); 4313 } 4314 goto done; 4315 } 4316 tcon->status = TID_GOOD; 4317 tcon->dummy = true; 4318 4319 /* now reconnect sessions for necessary channels */ 4320 list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) { 4321 tcon->ses = ses; 4322 rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server, true); 4323 if (rc) 4324 resched = true; 4325 list_del_init(&ses->rlist); 4326 cifs_put_smb_ses(ses); 4327 } 4328 tconInfoFree(tcon, netfs_trace_tcon_ref_free_reconnect_server); 4329 4330 done: 4331 cifs_dbg(FYI, "Reconnecting tcons and channels finished\n"); 4332 if (resched) 4333 cifs_requeue_server_reconn(server); 4334 mutex_unlock(&pserver->reconnect_mutex); 4335 4336 /* now we can safely release srv struct */ 4337 cifs_put_tcp_session(server, true); 4338 } 4339 4340 int 4341 SMB2_echo(struct TCP_Server_Info *server) 4342 { 4343 struct smb2_echo_req *req; 4344 int rc = 0; 4345 struct kvec iov[1]; 4346 struct smb_rqst rqst = { .rq_iov = iov, 4347 .rq_nvec = 1 }; 4348 unsigned int total_len; 4349 4350 cifs_dbg(FYI, "In echo request for conn_id %lld\n", server->conn_id); 4351 4352 spin_lock(&server->srv_lock); 4353 if (server->ops->need_neg && 4354 server->ops->need_neg(server)) { 4355 spin_unlock(&server->srv_lock); 4356 /* No need to send echo on newly established connections */ 4357 cifs_queue_server_reconn(server); 4358 return rc; 4359 } 4360 spin_unlock(&server->srv_lock); 4361 4362 rc = smb2_plain_req_init(SMB2_ECHO, NULL, server, 4363 (void **)&req, &total_len); 4364 if (rc) 4365 return rc; 4366 4367 req->hdr.CreditRequest = cpu_to_le16(1); 4368 4369 iov[0].iov_len = total_len; 4370 iov[0].iov_base = (char *)req; 4371 4372 rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL, 4373 server, CIFS_ECHO_OP, NULL); 4374 if (rc) 4375 cifs_dbg(FYI, "Echo request failed: %d\n", rc); 4376 4377 cifs_small_buf_release(req); 4378 return rc; 4379 } 4380 4381 void 4382 SMB2_flush_free(struct smb_rqst *rqst) 4383 { 4384 if (rqst && rqst->rq_iov) 4385 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ 4386 } 4387 4388 int 4389 SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst, 4390 struct cifs_tcon *tcon, struct TCP_Server_Info *server, 4391 u64 persistent_fid, u64 volatile_fid) 4392 { 4393 struct smb2_flush_req *req; 4394 struct kvec *iov = rqst->rq_iov; 4395 unsigned int total_len; 4396 int rc; 4397 4398 rc = smb2_plain_req_init(SMB2_FLUSH, tcon, server, 4399 (void **) &req, &total_len); 4400 if (rc) 4401 return rc; 4402 4403 req->PersistentFileId = persistent_fid; 4404 req->VolatileFileId = volatile_fid; 4405 4406 iov[0].iov_base = (char *)req; 4407 iov[0].iov_len = total_len; 4408 4409 return 0; 4410 } 4411 4412 int 4413 SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, 4414 u64 volatile_fid) 4415 { 4416 struct cifs_ses *ses = tcon->ses; 4417 struct smb_rqst rqst; 4418 struct kvec iov[1]; 4419 struct kvec rsp_iov = {NULL, 0}; 4420 struct TCP_Server_Info *server; 4421 int resp_buftype = CIFS_NO_BUFFER; 4422 int flags = 0; 4423 int rc = 0; 4424 int retries = 0, cur_sleep = 0; 4425 4426 replay_again: 4427 /* reinitialize for possible replay */ 4428 flags = 0; 4429 server = cifs_pick_channel(ses); 4430 4431 cifs_dbg(FYI, "flush\n"); 4432 if (!ses || !(ses->server)) 4433 return smb_EIO(smb_eio_trace_null_pointers); 4434 4435 if (smb3_encryption_required(tcon)) 4436 flags |= CIFS_TRANSFORM_REQ; 4437 4438 memset(&rqst, 0, sizeof(struct smb_rqst)); 4439 memset(&iov, 0, sizeof(iov)); 4440 rqst.rq_iov = iov; 4441 rqst.rq_nvec = 1; 4442 4443 rc = SMB2_flush_init(xid, &rqst, tcon, server, 4444 persistent_fid, volatile_fid); 4445 if (rc) 4446 goto flush_exit; 4447 4448 trace_smb3_flush_enter(xid, persistent_fid, tcon->tid, ses->Suid); 4449 4450 if (retries) { 4451 /* Back-off before retry */ 4452 if (cur_sleep) 4453 msleep(cur_sleep); 4454 smb2_set_replay(server, &rqst); 4455 } 4456 4457 rc = cifs_send_recv(xid, ses, server, 4458 &rqst, &resp_buftype, flags, &rsp_iov); 4459 4460 if (rc != 0) { 4461 cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE); 4462 trace_smb3_flush_err(xid, persistent_fid, tcon->tid, ses->Suid, 4463 rc); 4464 } else 4465 trace_smb3_flush_done(xid, persistent_fid, tcon->tid, 4466 ses->Suid); 4467 4468 flush_exit: 4469 SMB2_flush_free(&rqst); 4470 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 4471 4472 if (is_replayable_error(rc) && 4473 smb2_should_replay(tcon, &retries, &cur_sleep)) 4474 goto replay_again; 4475 4476 return rc; 4477 } 4478 4479 #ifdef CONFIG_CIFS_SMB_DIRECT 4480 static inline bool smb3_use_rdma_offload(struct cifs_io_parms *io_parms) 4481 { 4482 struct TCP_Server_Info *server = io_parms->server; 4483 struct cifs_tcon *tcon = io_parms->tcon; 4484 4485 /* we can only offload if we're connected */ 4486 if (!server || !tcon) 4487 return false; 4488 4489 /* we can only offload on an rdma connection */ 4490 if (!server->rdma || !server->smbd_conn) 4491 return false; 4492 4493 /* we don't support signed offload yet */ 4494 if (server->sign) 4495 return false; 4496 4497 /* we don't support encrypted offload yet */ 4498 if (smb3_encryption_required(tcon)) 4499 return false; 4500 4501 /* offload also has its overhead, so only do it if desired */ 4502 if (io_parms->length < server->rdma_readwrite_threshold) 4503 return false; 4504 4505 return true; 4506 } 4507 #endif /* CONFIG_CIFS_SMB_DIRECT */ 4508 4509 /* 4510 * To form a chain of read requests, any read requests after the first should 4511 * have the end_of_chain boolean set to true. 4512 */ 4513 static int 4514 smb2_new_read_req(void **buf, unsigned int *total_len, 4515 struct cifs_io_parms *io_parms, struct cifs_io_subrequest *rdata, 4516 unsigned int remaining_bytes, int request_type) 4517 { 4518 int rc = -EACCES; 4519 struct smb2_read_req *req = NULL; 4520 struct smb2_hdr *shdr; 4521 struct TCP_Server_Info *server = io_parms->server; 4522 4523 rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, server, 4524 (void **) &req, total_len); 4525 if (rc) 4526 return rc; 4527 4528 if (server == NULL) 4529 return -ECONNABORTED; 4530 4531 shdr = &req->hdr; 4532 shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid); 4533 4534 req->PersistentFileId = io_parms->persistent_fid; 4535 req->VolatileFileId = io_parms->volatile_fid; 4536 req->ReadChannelInfoOffset = 0; /* reserved */ 4537 req->ReadChannelInfoLength = 0; /* reserved */ 4538 req->Channel = 0; /* reserved */ 4539 req->MinimumCount = 0; 4540 req->Length = cpu_to_le32(io_parms->length); 4541 req->Offset = cpu_to_le64(io_parms->offset); 4542 4543 trace_smb3_read_enter(rdata ? rdata->rreq->debug_id : 0, 4544 rdata ? rdata->subreq.debug_index : 0, 4545 rdata ? rdata->xid : 0, 4546 io_parms->persistent_fid, 4547 io_parms->tcon->tid, io_parms->tcon->ses->Suid, 4548 io_parms->offset, io_parms->length); 4549 #ifdef CONFIG_CIFS_SMB_DIRECT 4550 /* 4551 * If we want to do a RDMA write, fill in and append 4552 * smbdirect_buffer_descriptor_v1 to the end of read request 4553 */ 4554 if (rdata && smb3_use_rdma_offload(io_parms)) { 4555 struct smbdirect_buffer_descriptor_v1 *v1; 4556 bool need_invalidate = server->dialect == SMB30_PROT_ID; 4557 4558 rdata->mr = smbd_register_mr(server->smbd_conn, &rdata->subreq.io_iter, 4559 true, need_invalidate); 4560 if (!rdata->mr) 4561 return -EAGAIN; 4562 4563 req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE; 4564 if (need_invalidate) 4565 req->Channel = SMB2_CHANNEL_RDMA_V1; 4566 req->ReadChannelInfoOffset = 4567 cpu_to_le16(offsetof(struct smb2_read_req, Buffer)); 4568 req->ReadChannelInfoLength = 4569 cpu_to_le16(sizeof(struct smbdirect_buffer_descriptor_v1)); 4570 v1 = (struct smbdirect_buffer_descriptor_v1 *) &req->Buffer[0]; 4571 smbd_mr_fill_buffer_descriptor(rdata->mr, v1); 4572 4573 *total_len += sizeof(*v1) - 1; 4574 } 4575 #endif 4576 if (request_type & CHAINED_REQUEST) { 4577 if (!(request_type & END_OF_CHAIN)) { 4578 /* next 8-byte aligned request */ 4579 *total_len = ALIGN(*total_len, 8); 4580 shdr->NextCommand = cpu_to_le32(*total_len); 4581 } else /* END_OF_CHAIN */ 4582 shdr->NextCommand = 0; 4583 if (request_type & RELATED_REQUEST) { 4584 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS; 4585 /* 4586 * Related requests use info from previous read request 4587 * in chain. 4588 */ 4589 shdr->SessionId = cpu_to_le64(0xFFFFFFFFFFFFFFFF); 4590 shdr->Id.SyncId.TreeId = cpu_to_le32(0xFFFFFFFF); 4591 req->PersistentFileId = (u64)-1; 4592 req->VolatileFileId = (u64)-1; 4593 } 4594 } 4595 if (remaining_bytes > io_parms->length) 4596 req->RemainingBytes = cpu_to_le32(remaining_bytes); 4597 else 4598 req->RemainingBytes = 0; 4599 4600 *buf = req; 4601 return rc; 4602 } 4603 4604 static void 4605 smb2_readv_callback(struct TCP_Server_Info *server, struct mid_q_entry *mid) 4606 { 4607 struct cifs_io_subrequest *rdata = mid->callback_data; 4608 struct netfs_inode *ictx = netfs_inode(rdata->rreq->inode); 4609 struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink); 4610 struct smb2_hdr *shdr = (struct smb2_hdr *)rdata->iov[0].iov_base; 4611 struct cifs_credits credits = { 4612 .value = 0, 4613 .instance = 0, 4614 .rreq_debug_id = rdata->rreq->debug_id, 4615 .rreq_debug_index = rdata->subreq.debug_index, 4616 }; 4617 struct smb_rqst rqst = { .rq_iov = &rdata->iov[0], .rq_nvec = 1 }; 4618 unsigned int rreq_debug_id = rdata->rreq->debug_id; 4619 unsigned int subreq_debug_index = rdata->subreq.debug_index; 4620 4621 if (rdata->got_bytes) { 4622 rqst.rq_iter = rdata->subreq.io_iter; 4623 } 4624 4625 WARN_ONCE(rdata->server != server, 4626 "rdata server %p != mid server %p", 4627 rdata->server, server); 4628 4629 cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%zu/%zu\n", 4630 __func__, mid->mid, mid->mid_state, rdata->result, 4631 rdata->got_bytes, rdata->subreq.len - rdata->subreq.transferred); 4632 4633 switch (mid->mid_state) { 4634 case MID_RESPONSE_RECEIVED: 4635 credits.value = le16_to_cpu(shdr->CreditRequest); 4636 credits.instance = server->reconnect_instance; 4637 /* result already set, check signature */ 4638 if (server->sign && !mid->decrypted) { 4639 int rc; 4640 4641 iov_iter_truncate(&rqst.rq_iter, rdata->got_bytes); 4642 rc = smb2_verify_signature(&rqst, server); 4643 if (rc) { 4644 cifs_tcon_dbg(VFS, "SMB signature verification returned error = %d\n", 4645 rc); 4646 rdata->subreq.error = rc; 4647 rdata->result = rc; 4648 4649 if (is_replayable_error(rc)) { 4650 trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_retry_needed); 4651 __set_bit(NETFS_SREQ_NEED_RETRY, &rdata->subreq.flags); 4652 } else 4653 trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_bad); 4654 } else 4655 trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_progress); 4656 } 4657 /* FIXME: should this be counted toward the initiating task? */ 4658 task_io_account_read(rdata->got_bytes); 4659 cifs_stats_bytes_read(tcon, rdata->got_bytes); 4660 break; 4661 case MID_REQUEST_SUBMITTED: 4662 trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_req_submitted); 4663 goto do_retry; 4664 case MID_RETRY_NEEDED: 4665 trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_retry_needed); 4666 do_retry: 4667 __set_bit(NETFS_SREQ_NEED_RETRY, &rdata->subreq.flags); 4668 rdata->result = -EAGAIN; 4669 if (server->sign && rdata->got_bytes) 4670 /* reset bytes number since we can not check a sign */ 4671 rdata->got_bytes = 0; 4672 /* FIXME: should this be counted toward the initiating task? */ 4673 task_io_account_read(rdata->got_bytes); 4674 cifs_stats_bytes_read(tcon, rdata->got_bytes); 4675 break; 4676 case MID_RESPONSE_MALFORMED: 4677 trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_malformed); 4678 credits.value = le16_to_cpu(shdr->CreditRequest); 4679 credits.instance = server->reconnect_instance; 4680 rdata->result = smb_EIO(smb_eio_trace_read_rsp_malformed); 4681 break; 4682 default: 4683 trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_unknown); 4684 rdata->result = smb_EIO1(smb_eio_trace_read_mid_state_unknown, 4685 mid->mid_state); 4686 break; 4687 } 4688 #ifdef CONFIG_CIFS_SMB_DIRECT 4689 /* 4690 * If this rdata has a memory registered, the MR can be freed 4691 * MR needs to be freed as soon as I/O finishes to prevent deadlock 4692 * because they have limited number and are used for future I/Os 4693 */ 4694 if (rdata->mr) { 4695 smbd_deregister_mr(rdata->mr); 4696 rdata->mr = NULL; 4697 } 4698 #endif 4699 if (rdata->result && rdata->result != -ENODATA) { 4700 cifs_stats_fail_inc(tcon, SMB2_READ_HE); 4701 trace_smb3_read_err(rdata->rreq->debug_id, 4702 rdata->subreq.debug_index, 4703 rdata->xid, 4704 rdata->req->cfile->fid.persistent_fid, 4705 tcon->tid, tcon->ses->Suid, 4706 rdata->subreq.start + rdata->subreq.transferred, 4707 rdata->subreq.len - rdata->subreq.transferred, 4708 rdata->result); 4709 } else 4710 trace_smb3_read_done(rdata->rreq->debug_id, 4711 rdata->subreq.debug_index, 4712 rdata->xid, 4713 rdata->req->cfile->fid.persistent_fid, 4714 tcon->tid, tcon->ses->Suid, 4715 rdata->subreq.start + rdata->subreq.transferred, 4716 rdata->got_bytes); 4717 4718 if (rdata->result == -ENODATA) { 4719 __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags); 4720 rdata->result = 0; 4721 } else { 4722 size_t trans = rdata->subreq.transferred + rdata->got_bytes; 4723 if (trans < rdata->subreq.len && 4724 rdata->subreq.start + trans >= ictx->remote_i_size) { 4725 __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags); 4726 rdata->result = 0; 4727 } 4728 if (rdata->got_bytes) 4729 __set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags); 4730 } 4731 4732 /* see if we need to retry */ 4733 if (is_replayable_error(rdata->result) && 4734 smb2_should_replay(tcon, 4735 &rdata->retries, 4736 &rdata->cur_sleep)) 4737 rdata->replay = true; 4738 4739 trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, rdata->credits.value, 4740 server->credits, server->in_flight, 4741 0, cifs_trace_rw_credits_read_response_clear); 4742 rdata->credits.value = 0; 4743 rdata->subreq.error = rdata->result; 4744 rdata->subreq.transferred += rdata->got_bytes; 4745 trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_progress); 4746 netfs_read_subreq_terminated(&rdata->subreq); 4747 release_mid(server, mid); 4748 trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0, 4749 server->credits, server->in_flight, 4750 credits.value, cifs_trace_rw_credits_read_response_add); 4751 add_credits(server, &credits, 0); 4752 } 4753 4754 /* smb2_async_readv - send an async read, and set up mid to handle result */ 4755 int 4756 smb2_async_readv(struct cifs_io_subrequest *rdata) 4757 { 4758 int rc, flags = 0; 4759 char *buf; 4760 struct netfs_io_subrequest *subreq = &rdata->subreq; 4761 struct smb2_hdr *shdr; 4762 struct cifs_io_parms io_parms; 4763 struct smb_rqst rqst = { .rq_iov = rdata->iov, 4764 .rq_nvec = 1 }; 4765 struct TCP_Server_Info *server; 4766 struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink); 4767 unsigned int total_len; 4768 int credit_request; 4769 4770 cifs_dbg(FYI, "%s: offset=%llu bytes=%zu\n", 4771 __func__, subreq->start, subreq->len); 4772 4773 if (!rdata->server) 4774 rdata->server = cifs_pick_channel(tcon->ses); 4775 4776 io_parms.tcon = tlink_tcon(rdata->req->cfile->tlink); 4777 io_parms.server = server = rdata->server; 4778 io_parms.offset = subreq->start + subreq->transferred; 4779 io_parms.length = subreq->len - subreq->transferred; 4780 io_parms.persistent_fid = rdata->req->cfile->fid.persistent_fid; 4781 io_parms.volatile_fid = rdata->req->cfile->fid.volatile_fid; 4782 io_parms.pid = rdata->req->pid; 4783 4784 rc = smb2_new_read_req( 4785 (void **) &buf, &total_len, &io_parms, rdata, 0, 0); 4786 if (rc) 4787 goto out; 4788 4789 if (smb3_encryption_required(io_parms.tcon)) 4790 flags |= CIFS_TRANSFORM_REQ; 4791 4792 rdata->iov[0].iov_base = buf; 4793 rdata->iov[0].iov_len = total_len; 4794 rdata->got_bytes = 0; 4795 rdata->result = 0; 4796 4797 shdr = (struct smb2_hdr *)buf; 4798 4799 if (rdata->replay) { 4800 /* Back-off before retry */ 4801 if (rdata->cur_sleep) 4802 msleep(rdata->cur_sleep); 4803 smb2_set_replay(server, &rqst); 4804 } 4805 4806 if (rdata->credits.value > 0) { 4807 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(io_parms.length, 4808 SMB2_MAX_BUFFER_SIZE)); 4809 credit_request = le16_to_cpu(shdr->CreditCharge) + 8; 4810 if (server->credits >= server->max_credits) 4811 shdr->CreditRequest = cpu_to_le16(0); 4812 else 4813 shdr->CreditRequest = cpu_to_le16( 4814 min_t(int, server->max_credits - 4815 server->credits, credit_request)); 4816 4817 rc = adjust_credits(server, rdata, cifs_trace_rw_credits_call_readv_adjust); 4818 if (rc) 4819 goto async_readv_out; 4820 4821 flags |= CIFS_HAS_CREDITS; 4822 } 4823 4824 rc = cifs_call_async(server, &rqst, 4825 cifs_readv_receive, smb2_readv_callback, 4826 smb3_handle_read_data, rdata, flags, 4827 &rdata->credits); 4828 if (rc) { 4829 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE); 4830 trace_smb3_read_err(rdata->rreq->debug_id, 4831 subreq->debug_index, 4832 rdata->xid, io_parms.persistent_fid, 4833 io_parms.tcon->tid, 4834 io_parms.tcon->ses->Suid, 4835 io_parms.offset, 4836 subreq->len - subreq->transferred, rc); 4837 } 4838 4839 async_readv_out: 4840 cifs_small_buf_release(buf); 4841 4842 out: 4843 /* if the send error is retryable, let netfs know about it */ 4844 if (is_replayable_error(rc) && 4845 smb2_should_replay(tcon, 4846 &rdata->retries, 4847 &rdata->cur_sleep)) { 4848 trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_retry_needed); 4849 __set_bit(NETFS_SREQ_NEED_RETRY, &rdata->subreq.flags); 4850 } 4851 4852 return rc; 4853 } 4854 4855 int 4856 SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms, 4857 unsigned int *nbytes, char **buf, int *buf_type) 4858 { 4859 struct smb_rqst rqst; 4860 int resp_buftype, rc; 4861 struct smb2_read_req *req = NULL; 4862 struct smb2_read_rsp *rsp = NULL; 4863 struct kvec iov[1]; 4864 struct kvec rsp_iov; 4865 unsigned int total_len; 4866 int flags = CIFS_LOG_ERROR; 4867 struct cifs_ses *ses = io_parms->tcon->ses; 4868 4869 if (!io_parms->server) 4870 io_parms->server = cifs_pick_channel(io_parms->tcon->ses); 4871 4872 *nbytes = 0; 4873 rc = smb2_new_read_req((void **)&req, &total_len, io_parms, NULL, 0, 0); 4874 if (rc) 4875 return rc; 4876 4877 if (smb3_encryption_required(io_parms->tcon)) 4878 flags |= CIFS_TRANSFORM_REQ; 4879 4880 iov[0].iov_base = (char *)req; 4881 iov[0].iov_len = total_len; 4882 4883 memset(&rqst, 0, sizeof(struct smb_rqst)); 4884 rqst.rq_iov = iov; 4885 rqst.rq_nvec = 1; 4886 4887 rc = cifs_send_recv(xid, ses, io_parms->server, 4888 &rqst, &resp_buftype, flags, &rsp_iov); 4889 rsp = (struct smb2_read_rsp *)rsp_iov.iov_base; 4890 4891 if (rc) { 4892 if (rc != -ENODATA) { 4893 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE); 4894 cifs_dbg(VFS, "Send error in read = %d\n", rc); 4895 trace_smb3_read_err(0, 0, xid, 4896 req->PersistentFileId, 4897 io_parms->tcon->tid, ses->Suid, 4898 io_parms->offset, io_parms->length, 4899 rc); 4900 } else 4901 trace_smb3_read_done(0, 0, xid, 4902 req->PersistentFileId, io_parms->tcon->tid, 4903 ses->Suid, io_parms->offset, 0); 4904 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 4905 cifs_small_buf_release(req); 4906 return rc == -ENODATA ? 0 : rc; 4907 } else 4908 trace_smb3_read_done(0, 0, xid, 4909 req->PersistentFileId, 4910 io_parms->tcon->tid, ses->Suid, 4911 io_parms->offset, io_parms->length); 4912 4913 cifs_small_buf_release(req); 4914 4915 *nbytes = le32_to_cpu(rsp->DataLength); 4916 if ((*nbytes > CIFS_MAX_MSGSIZE) || 4917 (*nbytes > io_parms->length)) { 4918 cifs_dbg(FYI, "bad length %d for count %d\n", 4919 *nbytes, io_parms->length); 4920 rc = smb_EIO2(smb_eio_trace_read_overlarge, 4921 *nbytes, io_parms->length); 4922 *nbytes = 0; 4923 } 4924 4925 if (*buf) { 4926 memcpy(*buf, (char *)rsp + rsp->DataOffset, *nbytes); 4927 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 4928 } else if (resp_buftype != CIFS_NO_BUFFER) { 4929 *buf = rsp_iov.iov_base; 4930 if (resp_buftype == CIFS_SMALL_BUFFER) 4931 *buf_type = CIFS_SMALL_BUFFER; 4932 else if (resp_buftype == CIFS_LARGE_BUFFER) 4933 *buf_type = CIFS_LARGE_BUFFER; 4934 } 4935 return rc; 4936 } 4937 4938 /* 4939 * Check the mid_state and signature on received buffer (if any), and queue the 4940 * workqueue completion task. 4941 */ 4942 static void 4943 smb2_writev_callback(struct TCP_Server_Info *server, struct mid_q_entry *mid) 4944 { 4945 struct cifs_io_subrequest *wdata = mid->callback_data; 4946 struct cifs_tcon *tcon = tlink_tcon(wdata->req->cfile->tlink); 4947 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf; 4948 struct cifs_credits credits = { 4949 .value = 0, 4950 .instance = 0, 4951 .rreq_debug_id = wdata->rreq->debug_id, 4952 .rreq_debug_index = wdata->subreq.debug_index, 4953 }; 4954 unsigned int rreq_debug_id = wdata->rreq->debug_id; 4955 unsigned int subreq_debug_index = wdata->subreq.debug_index; 4956 ssize_t result = 0; 4957 size_t written; 4958 4959 WARN_ONCE(wdata->server != server, 4960 "wdata server %p != mid server %p", 4961 wdata->server, server); 4962 4963 switch (mid->mid_state) { 4964 case MID_RESPONSE_RECEIVED: 4965 credits.value = le16_to_cpu(rsp->hdr.CreditRequest); 4966 credits.instance = server->reconnect_instance; 4967 result = smb2_check_receive(mid, server, 0); 4968 if (result != 0) { 4969 if (is_replayable_error(result)) { 4970 trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_retry_needed); 4971 __set_bit(NETFS_SREQ_NEED_RETRY, &wdata->subreq.flags); 4972 } else { 4973 wdata->subreq.error = result; 4974 trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_bad); 4975 } 4976 break; 4977 } 4978 trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_progress); 4979 4980 written = le32_to_cpu(rsp->DataLength); 4981 /* 4982 * Mask off high 16 bits when bytes written as returned 4983 * by the server is greater than bytes requested by the 4984 * client. OS/2 servers are known to set incorrect 4985 * CountHigh values. 4986 */ 4987 if (written > wdata->subreq.len) 4988 written &= 0xFFFF; 4989 4990 cifs_stats_bytes_written(tcon, written); 4991 4992 if (written < wdata->subreq.len) { 4993 result = -ENOSPC; 4994 } else if (written > 0) { 4995 wdata->subreq.len = written; 4996 __set_bit(NETFS_SREQ_MADE_PROGRESS, &wdata->subreq.flags); 4997 } 4998 break; 4999 case MID_REQUEST_SUBMITTED: 5000 trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_req_submitted); 5001 __set_bit(NETFS_SREQ_NEED_RETRY, &wdata->subreq.flags); 5002 result = -EAGAIN; 5003 break; 5004 case MID_RETRY_NEEDED: 5005 trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_retry_needed); 5006 __set_bit(NETFS_SREQ_NEED_RETRY, &wdata->subreq.flags); 5007 result = -EAGAIN; 5008 break; 5009 case MID_RESPONSE_MALFORMED: 5010 trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_malformed); 5011 credits.value = le16_to_cpu(rsp->hdr.CreditRequest); 5012 credits.instance = server->reconnect_instance; 5013 result = smb_EIO(smb_eio_trace_write_rsp_malformed); 5014 break; 5015 default: 5016 trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_unknown); 5017 result = smb_EIO1(smb_eio_trace_write_mid_state_unknown, 5018 mid->mid_state); 5019 break; 5020 } 5021 #ifdef CONFIG_CIFS_SMB_DIRECT 5022 /* 5023 * If this wdata has a memory registered, the MR can be freed 5024 * The number of MRs available is limited, it's important to recover 5025 * used MR as soon as I/O is finished. Hold MR longer in the later 5026 * I/O process can possibly result in I/O deadlock due to lack of MR 5027 * to send request on I/O retry 5028 */ 5029 if (wdata->mr) { 5030 smbd_deregister_mr(wdata->mr); 5031 wdata->mr = NULL; 5032 } 5033 #endif 5034 if (result) { 5035 wdata->result = result; 5036 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); 5037 trace_smb3_write_err(wdata->rreq->debug_id, 5038 wdata->subreq.debug_index, 5039 wdata->xid, 5040 wdata->req->cfile->fid.persistent_fid, 5041 tcon->tid, tcon->ses->Suid, wdata->subreq.start, 5042 wdata->subreq.len, wdata->result); 5043 if (wdata->result == -ENOSPC) 5044 pr_warn_once("Out of space writing to %s\n", 5045 tcon->tree_name); 5046 } else 5047 trace_smb3_write_done(wdata->rreq->debug_id, 5048 wdata->subreq.debug_index, 5049 wdata->xid, 5050 wdata->req->cfile->fid.persistent_fid, 5051 tcon->tid, tcon->ses->Suid, 5052 wdata->subreq.start, wdata->subreq.len); 5053 5054 trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, wdata->credits.value, 5055 server->credits, server->in_flight, 5056 0, cifs_trace_rw_credits_write_response_clear); 5057 wdata->credits.value = 0; 5058 5059 /* see if we need to retry */ 5060 if (is_replayable_error(wdata->result) && 5061 smb2_should_replay(tcon, 5062 &wdata->retries, 5063 &wdata->cur_sleep)) 5064 wdata->replay = true; 5065 5066 cifs_write_subrequest_terminated(wdata, result ?: written); 5067 release_mid(server, mid); 5068 trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0, 5069 server->credits, server->in_flight, 5070 credits.value, cifs_trace_rw_credits_write_response_add); 5071 add_credits(server, &credits, 0); 5072 } 5073 5074 /* smb2_async_writev - send an async write, and set up mid to handle result */ 5075 void 5076 smb2_async_writev(struct cifs_io_subrequest *wdata) 5077 { 5078 int rc = -EACCES, flags = 0; 5079 struct smb2_write_req *req = NULL; 5080 struct smb2_hdr *shdr; 5081 struct cifs_tcon *tcon = tlink_tcon(wdata->req->cfile->tlink); 5082 struct TCP_Server_Info *server = wdata->server; 5083 struct kvec iov[1]; 5084 struct smb_rqst rqst = { }; 5085 unsigned int total_len, xid = wdata->xid; 5086 struct cifs_io_parms _io_parms; 5087 struct cifs_io_parms *io_parms = NULL; 5088 int credit_request; 5089 5090 /* 5091 * in future we may get cifs_io_parms passed in from the caller, 5092 * but for now we construct it here... 5093 */ 5094 _io_parms = (struct cifs_io_parms) { 5095 .tcon = tcon, 5096 .server = server, 5097 .offset = wdata->subreq.start, 5098 .length = wdata->subreq.len, 5099 .persistent_fid = wdata->req->cfile->fid.persistent_fid, 5100 .volatile_fid = wdata->req->cfile->fid.volatile_fid, 5101 .pid = wdata->req->pid, 5102 }; 5103 io_parms = &_io_parms; 5104 5105 rc = smb2_plain_req_init(SMB2_WRITE, tcon, server, 5106 (void **) &req, &total_len); 5107 if (rc) 5108 goto out; 5109 5110 rqst.rq_iov = iov; 5111 rqst.rq_iter = wdata->subreq.io_iter; 5112 5113 rqst.rq_iov[0].iov_len = total_len - 1; 5114 rqst.rq_iov[0].iov_base = (char *)req; 5115 rqst.rq_nvec += 1; 5116 5117 if (smb3_encryption_required(tcon)) 5118 flags |= CIFS_TRANSFORM_REQ; 5119 5120 shdr = (struct smb2_hdr *)req; 5121 shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid); 5122 5123 req->PersistentFileId = io_parms->persistent_fid; 5124 req->VolatileFileId = io_parms->volatile_fid; 5125 req->WriteChannelInfoOffset = 0; 5126 req->WriteChannelInfoLength = 0; 5127 req->Channel = SMB2_CHANNEL_NONE; 5128 req->Length = cpu_to_le32(io_parms->length); 5129 req->Offset = cpu_to_le64(io_parms->offset); 5130 req->DataOffset = cpu_to_le16( 5131 offsetof(struct smb2_write_req, Buffer)); 5132 req->RemainingBytes = 0; 5133 5134 trace_smb3_write_enter(wdata->rreq->debug_id, 5135 wdata->subreq.debug_index, 5136 wdata->xid, 5137 io_parms->persistent_fid, 5138 io_parms->tcon->tid, 5139 io_parms->tcon->ses->Suid, 5140 io_parms->offset, 5141 io_parms->length); 5142 5143 #ifdef CONFIG_CIFS_SMB_DIRECT 5144 /* 5145 * If we want to do a server RDMA read, fill in and append 5146 * smbdirect_buffer_descriptor_v1 to the end of write request 5147 */ 5148 if (smb3_use_rdma_offload(io_parms)) { 5149 struct smbdirect_buffer_descriptor_v1 *v1; 5150 bool need_invalidate = server->dialect == SMB30_PROT_ID; 5151 5152 wdata->mr = smbd_register_mr(server->smbd_conn, &wdata->subreq.io_iter, 5153 false, need_invalidate); 5154 if (!wdata->mr) { 5155 rc = -EAGAIN; 5156 goto async_writev_out; 5157 } 5158 /* For RDMA read, I/O size is in RemainingBytes not in Length */ 5159 req->RemainingBytes = req->Length; 5160 req->Length = 0; 5161 req->DataOffset = 0; 5162 req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE; 5163 if (need_invalidate) 5164 req->Channel = SMB2_CHANNEL_RDMA_V1; 5165 req->WriteChannelInfoOffset = 5166 cpu_to_le16(offsetof(struct smb2_write_req, Buffer)); 5167 req->WriteChannelInfoLength = 5168 cpu_to_le16(sizeof(struct smbdirect_buffer_descriptor_v1)); 5169 v1 = (struct smbdirect_buffer_descriptor_v1 *) &req->Buffer[0]; 5170 smbd_mr_fill_buffer_descriptor(wdata->mr, v1); 5171 5172 rqst.rq_iov[0].iov_len += sizeof(*v1); 5173 5174 /* 5175 * We keep wdata->subreq.io_iter, 5176 * but we have to truncate rqst.rq_iter 5177 */ 5178 iov_iter_truncate(&rqst.rq_iter, 0); 5179 } 5180 #endif 5181 5182 if (wdata->replay) { 5183 /* Back-off before retry */ 5184 if (wdata->cur_sleep) 5185 msleep(wdata->cur_sleep); 5186 smb2_set_replay(server, &rqst); 5187 } 5188 5189 cifs_dbg(FYI, "async write at %llu %u bytes iter=%zx\n", 5190 io_parms->offset, io_parms->length, iov_iter_count(&wdata->subreq.io_iter)); 5191 5192 if (wdata->credits.value > 0) { 5193 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->subreq.len, 5194 SMB2_MAX_BUFFER_SIZE)); 5195 credit_request = le16_to_cpu(shdr->CreditCharge) + 8; 5196 if (server->credits >= server->max_credits) 5197 shdr->CreditRequest = cpu_to_le16(0); 5198 else 5199 shdr->CreditRequest = cpu_to_le16( 5200 min_t(int, server->max_credits - 5201 server->credits, credit_request)); 5202 5203 rc = adjust_credits(server, wdata, cifs_trace_rw_credits_call_writev_adjust); 5204 if (rc) 5205 goto async_writev_out; 5206 5207 flags |= CIFS_HAS_CREDITS; 5208 } 5209 5210 /* XXX: compression + encryption is unsupported for now */ 5211 if (((flags & CIFS_TRANSFORM_REQ) != CIFS_TRANSFORM_REQ) && should_compress(tcon, &rqst)) 5212 flags |= CIFS_COMPRESS_REQ; 5213 5214 rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, NULL, 5215 wdata, flags, &wdata->credits); 5216 /* Can't touch wdata if rc == 0 */ 5217 if (rc) { 5218 trace_smb3_write_err(wdata->rreq->debug_id, 5219 wdata->subreq.debug_index, 5220 xid, 5221 io_parms->persistent_fid, 5222 io_parms->tcon->tid, 5223 io_parms->tcon->ses->Suid, 5224 io_parms->offset, 5225 io_parms->length, 5226 rc); 5227 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); 5228 } 5229 5230 async_writev_out: 5231 cifs_small_buf_release(req); 5232 out: 5233 /* if the send error is retryable, let netfs know about it */ 5234 if (is_replayable_error(rc) && 5235 smb2_should_replay(tcon, 5236 &wdata->retries, 5237 &wdata->cur_sleep)) { 5238 wdata->replay = true; 5239 trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_retry_needed); 5240 __set_bit(NETFS_SREQ_NEED_RETRY, &wdata->subreq.flags); 5241 } 5242 5243 if (rc) { 5244 trace_smb3_rw_credits(wdata->rreq->debug_id, 5245 wdata->subreq.debug_index, 5246 wdata->credits.value, 5247 server->credits, server->in_flight, 5248 -(int)wdata->credits.value, 5249 cifs_trace_rw_credits_write_response_clear); 5250 add_credits_and_wake_if(wdata->server, &wdata->credits, 0); 5251 cifs_write_subrequest_terminated(wdata, rc); 5252 } 5253 } 5254 5255 /* 5256 * SMB2_write function gets iov pointer to kvec array with n_vec as a length. 5257 * The length field from io_parms must be at least 1 and indicates a number of 5258 * elements with data to write that begins with position 1 in iov array. All 5259 * data length is specified by count. 5260 */ 5261 int 5262 SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, 5263 unsigned int *nbytes, struct kvec *iov, int n_vec) 5264 { 5265 struct smb_rqst rqst; 5266 int rc = 0; 5267 struct smb2_write_req *req = NULL; 5268 struct smb2_write_rsp *rsp = NULL; 5269 int resp_buftype; 5270 struct kvec rsp_iov; 5271 int flags = 0; 5272 unsigned int total_len; 5273 struct TCP_Server_Info *server; 5274 int retries = 0, cur_sleep = 0; 5275 5276 replay_again: 5277 /* reinitialize for possible replay */ 5278 flags = 0; 5279 *nbytes = 0; 5280 if (!io_parms->server) 5281 io_parms->server = cifs_pick_channel(io_parms->tcon->ses); 5282 server = io_parms->server; 5283 if (server == NULL) 5284 return -ECONNABORTED; 5285 5286 if (n_vec < 1) 5287 return rc; 5288 5289 rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, server, 5290 (void **) &req, &total_len); 5291 if (rc) 5292 return rc; 5293 5294 if (smb3_encryption_required(io_parms->tcon)) 5295 flags |= CIFS_TRANSFORM_REQ; 5296 5297 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid); 5298 5299 req->PersistentFileId = io_parms->persistent_fid; 5300 req->VolatileFileId = io_parms->volatile_fid; 5301 req->WriteChannelInfoOffset = 0; 5302 req->WriteChannelInfoLength = 0; 5303 req->Channel = 0; 5304 req->Length = cpu_to_le32(io_parms->length); 5305 req->Offset = cpu_to_le64(io_parms->offset); 5306 req->DataOffset = cpu_to_le16( 5307 offsetof(struct smb2_write_req, Buffer)); 5308 req->RemainingBytes = 0; 5309 5310 trace_smb3_write_enter(0, 0, xid, io_parms->persistent_fid, 5311 io_parms->tcon->tid, io_parms->tcon->ses->Suid, 5312 io_parms->offset, io_parms->length); 5313 5314 iov[0].iov_base = (char *)req; 5315 /* 1 for Buffer */ 5316 iov[0].iov_len = total_len - 1; 5317 5318 memset(&rqst, 0, sizeof(struct smb_rqst)); 5319 rqst.rq_iov = iov; 5320 /* iov[0] is the SMB header; move payload to rq_iter for encryption safety */ 5321 rqst.rq_nvec = 1; 5322 iov_iter_kvec(&rqst.rq_iter, ITER_SOURCE, &iov[1], n_vec, 5323 io_parms->length); 5324 5325 if (retries) { 5326 /* Back-off before retry */ 5327 if (cur_sleep) 5328 msleep(cur_sleep); 5329 smb2_set_replay(server, &rqst); 5330 } 5331 5332 rc = cifs_send_recv(xid, io_parms->tcon->ses, server, 5333 &rqst, 5334 &resp_buftype, flags, &rsp_iov); 5335 rsp = (struct smb2_write_rsp *)rsp_iov.iov_base; 5336 5337 if (rc) { 5338 trace_smb3_write_err(0, 0, xid, 5339 req->PersistentFileId, 5340 io_parms->tcon->tid, 5341 io_parms->tcon->ses->Suid, 5342 io_parms->offset, io_parms->length, rc); 5343 cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE); 5344 cifs_dbg(VFS, "Send error in write = %d\n", rc); 5345 } else { 5346 *nbytes = le32_to_cpu(rsp->DataLength); 5347 cifs_stats_bytes_written(io_parms->tcon, *nbytes); 5348 trace_smb3_write_done(0, 0, xid, 5349 req->PersistentFileId, 5350 io_parms->tcon->tid, 5351 io_parms->tcon->ses->Suid, 5352 io_parms->offset, *nbytes); 5353 } 5354 5355 cifs_small_buf_release(req); 5356 free_rsp_buf(resp_buftype, rsp); 5357 5358 if (is_replayable_error(rc) && 5359 smb2_should_replay(io_parms->tcon, &retries, &cur_sleep)) 5360 goto replay_again; 5361 5362 return rc; 5363 } 5364 5365 int posix_info_sid_size(const void *beg, const void *end) 5366 { 5367 size_t subauth; 5368 int total; 5369 5370 if (beg + 1 > end) 5371 return -1; 5372 5373 subauth = *(u8 *)(beg+1); 5374 if (subauth < 1 || subauth > 15) 5375 return -1; 5376 5377 total = 1 + 1 + 6 + 4*subauth; 5378 if (beg + total > end) 5379 return -1; 5380 5381 return total; 5382 } 5383 5384 int posix_info_parse(const void *beg, const void *end, 5385 struct smb2_posix_info_parsed *out) 5386 5387 { 5388 int total_len = 0; 5389 int owner_len, group_len; 5390 int name_len; 5391 const void *owner_sid; 5392 const void *group_sid; 5393 const void *name; 5394 5395 /* if no end bound given, assume payload to be correct */ 5396 if (!end) { 5397 const struct smb2_posix_info *p = beg; 5398 5399 end = beg + le32_to_cpu(p->NextEntryOffset); 5400 /* last element will have a 0 offset, pick a sensible bound */ 5401 if (end == beg) 5402 end += 0xFFFF; 5403 } 5404 5405 /* check base buf */ 5406 if (beg + sizeof(struct smb2_posix_info) > end) 5407 return -1; 5408 total_len = sizeof(struct smb2_posix_info); 5409 5410 /* check owner sid */ 5411 owner_sid = beg + total_len; 5412 owner_len = posix_info_sid_size(owner_sid, end); 5413 if (owner_len < 0) 5414 return -1; 5415 total_len += owner_len; 5416 5417 /* check group sid */ 5418 group_sid = beg + total_len; 5419 group_len = posix_info_sid_size(group_sid, end); 5420 if (group_len < 0) 5421 return -1; 5422 total_len += group_len; 5423 5424 /* check name len */ 5425 if (beg + total_len + 4 > end) 5426 return -1; 5427 name_len = le32_to_cpu(*(__le32 *)(beg + total_len)); 5428 if (name_len < 1 || name_len > 0xFFFF) 5429 return -1; 5430 total_len += 4; 5431 5432 /* check name */ 5433 name = beg + total_len; 5434 if (name + name_len > end) 5435 return -1; 5436 total_len += name_len; 5437 5438 if (out) { 5439 out->base = beg; 5440 out->size = total_len; 5441 out->name_len = name_len; 5442 out->name = name; 5443 memcpy(&out->owner, owner_sid, owner_len); 5444 memcpy(&out->group, group_sid, group_len); 5445 } 5446 return total_len; 5447 } 5448 5449 static int posix_info_extra_size(const void *beg, const void *end) 5450 { 5451 int len = posix_info_parse(beg, end, NULL); 5452 5453 if (len < 0) 5454 return -1; 5455 return len - sizeof(struct smb2_posix_info); 5456 } 5457 5458 static unsigned int 5459 num_entries(int infotype, char *bufstart, char *end_of_buf, char **lastentry, 5460 size_t size) 5461 { 5462 int len; 5463 unsigned int entrycount = 0; 5464 unsigned int next_offset = 0; 5465 char *entryptr; 5466 FILE_DIRECTORY_INFO *dir_info; 5467 5468 if (bufstart == NULL) 5469 return 0; 5470 5471 entryptr = bufstart; 5472 5473 while (1) { 5474 if (entryptr + next_offset < entryptr || 5475 entryptr + next_offset > end_of_buf || 5476 entryptr + next_offset + size > end_of_buf) { 5477 cifs_dbg(VFS, "malformed search entry would overflow\n"); 5478 break; 5479 } 5480 5481 entryptr = entryptr + next_offset; 5482 dir_info = (FILE_DIRECTORY_INFO *)entryptr; 5483 5484 if (infotype == SMB_FIND_FILE_POSIX_INFO) 5485 len = posix_info_extra_size(entryptr, end_of_buf); 5486 else 5487 len = le32_to_cpu(dir_info->FileNameLength); 5488 5489 if (len < 0 || 5490 entryptr + len < entryptr || 5491 entryptr + len > end_of_buf || 5492 entryptr + len + size > end_of_buf) { 5493 cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n", 5494 end_of_buf); 5495 break; 5496 } 5497 5498 *lastentry = entryptr; 5499 entrycount++; 5500 5501 next_offset = le32_to_cpu(dir_info->NextEntryOffset); 5502 if (!next_offset) 5503 break; 5504 } 5505 5506 return entrycount; 5507 } 5508 5509 /* 5510 * Readdir/FindFirst 5511 */ 5512 int SMB2_query_directory_init(const unsigned int xid, 5513 struct cifs_tcon *tcon, 5514 struct TCP_Server_Info *server, 5515 struct smb_rqst *rqst, 5516 u64 persistent_fid, u64 volatile_fid, 5517 int index, int info_level) 5518 { 5519 struct smb2_query_directory_req *req; 5520 unsigned char *bufptr; 5521 __le16 asteriks = cpu_to_le16('*'); 5522 unsigned int output_size = CIFSMaxBufSize - 5523 MAX_SMB2_CREATE_RESPONSE_SIZE - 5524 MAX_SMB2_CLOSE_RESPONSE_SIZE; 5525 unsigned int total_len; 5526 struct kvec *iov = rqst->rq_iov; 5527 int len, rc; 5528 5529 rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, server, 5530 (void **) &req, &total_len); 5531 if (rc) 5532 return rc; 5533 5534 switch (info_level) { 5535 case SMB_FIND_FILE_DIRECTORY_INFO: 5536 req->FileInformationClass = FILE_DIRECTORY_INFORMATION; 5537 break; 5538 case SMB_FIND_FILE_ID_FULL_DIR_INFO: 5539 req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION; 5540 break; 5541 case SMB_FIND_FILE_POSIX_INFO: 5542 req->FileInformationClass = SMB_FIND_FILE_POSIX_INFO; 5543 break; 5544 case SMB_FIND_FILE_FULL_DIRECTORY_INFO: 5545 req->FileInformationClass = FILE_FULL_DIRECTORY_INFORMATION; 5546 break; 5547 default: 5548 cifs_tcon_dbg(VFS, "info level %u isn't supported\n", 5549 info_level); 5550 return -EINVAL; 5551 } 5552 5553 req->FileIndex = cpu_to_le32(index); 5554 req->PersistentFileId = persistent_fid; 5555 req->VolatileFileId = volatile_fid; 5556 5557 len = 0x2; 5558 bufptr = req->Buffer; 5559 memcpy(bufptr, &asteriks, len); 5560 5561 req->FileNameOffset = 5562 cpu_to_le16(sizeof(struct smb2_query_directory_req)); 5563 req->FileNameLength = cpu_to_le16(len); 5564 /* 5565 * BB could be 30 bytes or so longer if we used SMB2 specific 5566 * buffer lengths, but this is safe and close enough. 5567 */ 5568 output_size = min_t(unsigned int, output_size, server->maxBuf); 5569 output_size = min_t(unsigned int, output_size, 2 << 15); 5570 req->OutputBufferLength = cpu_to_le32(output_size); 5571 5572 iov[0].iov_base = (char *)req; 5573 /* 1 for Buffer */ 5574 iov[0].iov_len = total_len - 1; 5575 5576 iov[1].iov_base = (char *)(req->Buffer); 5577 iov[1].iov_len = len; 5578 5579 trace_smb3_query_dir_enter(xid, persistent_fid, tcon->tid, 5580 tcon->ses->Suid, index, output_size); 5581 5582 return 0; 5583 } 5584 5585 void SMB2_query_directory_free(struct smb_rqst *rqst) 5586 { 5587 if (rqst && rqst->rq_iov) { 5588 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ 5589 } 5590 } 5591 5592 int 5593 smb2_parse_query_directory(struct cifs_tcon *tcon, 5594 struct kvec *rsp_iov, 5595 int resp_buftype, 5596 struct cifs_search_info *srch_inf) 5597 { 5598 struct smb2_query_directory_rsp *rsp; 5599 size_t info_buf_size; 5600 char *end_of_smb; 5601 int rc; 5602 5603 rsp = (struct smb2_query_directory_rsp *)rsp_iov->iov_base; 5604 5605 switch (srch_inf->info_level) { 5606 case SMB_FIND_FILE_DIRECTORY_INFO: 5607 info_buf_size = sizeof(FILE_DIRECTORY_INFO); 5608 break; 5609 case SMB_FIND_FILE_ID_FULL_DIR_INFO: 5610 info_buf_size = sizeof(FILE_ID_FULL_DIR_INFO); 5611 break; 5612 case SMB_FIND_FILE_POSIX_INFO: 5613 /* note that posix payload are variable size */ 5614 info_buf_size = sizeof(struct smb2_posix_info); 5615 break; 5616 case SMB_FIND_FILE_FULL_DIRECTORY_INFO: 5617 info_buf_size = sizeof(FILE_FULL_DIRECTORY_INFO); 5618 break; 5619 default: 5620 cifs_tcon_dbg(VFS, "info level %u isn't supported\n", 5621 srch_inf->info_level); 5622 return -EINVAL; 5623 } 5624 5625 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset), 5626 le32_to_cpu(rsp->OutputBufferLength), rsp_iov, 5627 info_buf_size); 5628 if (rc) { 5629 cifs_tcon_dbg(VFS, "bad info payload"); 5630 return rc; 5631 } 5632 5633 srch_inf->unicode = true; 5634 5635 if (srch_inf->ntwrk_buf_start) { 5636 if (srch_inf->smallBuf) 5637 cifs_small_buf_release(srch_inf->ntwrk_buf_start); 5638 else 5639 cifs_buf_release(srch_inf->ntwrk_buf_start); 5640 } 5641 srch_inf->ntwrk_buf_start = (char *)rsp; 5642 srch_inf->srch_entries_start = srch_inf->last_entry = 5643 (char *)rsp + le16_to_cpu(rsp->OutputBufferOffset); 5644 end_of_smb = rsp_iov->iov_len + (char *)rsp; 5645 5646 srch_inf->entries_in_buffer = num_entries( 5647 srch_inf->info_level, 5648 srch_inf->srch_entries_start, 5649 end_of_smb, 5650 &srch_inf->last_entry, 5651 info_buf_size); 5652 5653 srch_inf->index_of_last_entry += srch_inf->entries_in_buffer; 5654 cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n", 5655 srch_inf->entries_in_buffer, srch_inf->index_of_last_entry, 5656 srch_inf->srch_entries_start, srch_inf->last_entry); 5657 if (resp_buftype == CIFS_LARGE_BUFFER) 5658 srch_inf->smallBuf = false; 5659 else if (resp_buftype == CIFS_SMALL_BUFFER) 5660 srch_inf->smallBuf = true; 5661 else 5662 cifs_tcon_dbg(VFS, "Invalid search buffer type\n"); 5663 5664 return 0; 5665 } 5666 5667 int 5668 SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, 5669 u64 persistent_fid, u64 volatile_fid, int index, 5670 struct cifs_search_info *srch_inf) 5671 { 5672 struct smb_rqst rqst; 5673 struct kvec iov[SMB2_QUERY_DIRECTORY_IOV_SIZE]; 5674 struct smb2_query_directory_rsp *rsp = NULL; 5675 int resp_buftype = CIFS_NO_BUFFER; 5676 struct kvec rsp_iov; 5677 int rc = 0; 5678 struct cifs_ses *ses = tcon->ses; 5679 struct TCP_Server_Info *server; 5680 int flags = 0; 5681 int retries = 0, cur_sleep = 0; 5682 5683 replay_again: 5684 /* reinitialize for possible replay */ 5685 flags = 0; 5686 server = cifs_pick_channel(ses); 5687 5688 if (!ses || !(ses->server)) 5689 return smb_EIO(smb_eio_trace_null_pointers); 5690 5691 if (smb3_encryption_required(tcon)) 5692 flags |= CIFS_TRANSFORM_REQ; 5693 5694 memset(&rqst, 0, sizeof(struct smb_rqst)); 5695 memset(&iov, 0, sizeof(iov)); 5696 rqst.rq_iov = iov; 5697 rqst.rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE; 5698 5699 rc = SMB2_query_directory_init(xid, tcon, server, 5700 &rqst, persistent_fid, 5701 volatile_fid, index, 5702 srch_inf->info_level); 5703 if (rc) 5704 goto qdir_exit; 5705 5706 if (retries) { 5707 /* Back-off before retry */ 5708 if (cur_sleep) 5709 msleep(cur_sleep); 5710 smb2_set_replay(server, &rqst); 5711 } 5712 5713 rc = cifs_send_recv(xid, ses, server, 5714 &rqst, &resp_buftype, flags, &rsp_iov); 5715 rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base; 5716 5717 if (rc) { 5718 if (rc == -ENODATA && 5719 rsp->hdr.Status == STATUS_NO_MORE_FILES) { 5720 trace_smb3_query_dir_done(xid, persistent_fid, 5721 tcon->tid, tcon->ses->Suid, index, 0); 5722 srch_inf->endOfSearch = true; 5723 rc = 0; 5724 } else { 5725 trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid, 5726 tcon->ses->Suid, index, 0, rc); 5727 cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE); 5728 } 5729 goto qdir_exit; 5730 } 5731 5732 rc = smb2_parse_query_directory(tcon, &rsp_iov, resp_buftype, 5733 srch_inf); 5734 if (rc) { 5735 trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid, 5736 tcon->ses->Suid, index, 0, rc); 5737 goto qdir_exit; 5738 } 5739 resp_buftype = CIFS_NO_BUFFER; 5740 5741 trace_smb3_query_dir_done(xid, persistent_fid, tcon->tid, 5742 tcon->ses->Suid, index, srch_inf->entries_in_buffer); 5743 5744 qdir_exit: 5745 SMB2_query_directory_free(&rqst); 5746 free_rsp_buf(resp_buftype, rsp); 5747 5748 if (is_replayable_error(rc) && 5749 smb2_should_replay(tcon, &retries, &cur_sleep)) 5750 goto replay_again; 5751 5752 return rc; 5753 } 5754 5755 int 5756 SMB2_set_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, 5757 struct smb_rqst *rqst, 5758 u64 persistent_fid, u64 volatile_fid, u32 pid, 5759 u8 info_class, u8 info_type, u32 additional_info, 5760 void **data, unsigned int *size) 5761 { 5762 struct smb2_set_info_req *req; 5763 struct kvec *iov = rqst->rq_iov; 5764 unsigned int i, total_len; 5765 int rc; 5766 5767 rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, server, 5768 (void **) &req, &total_len); 5769 if (rc) 5770 return rc; 5771 5772 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid); 5773 req->InfoType = info_type; 5774 req->FileInfoClass = info_class; 5775 req->PersistentFileId = persistent_fid; 5776 req->VolatileFileId = volatile_fid; 5777 req->AdditionalInformation = cpu_to_le32(additional_info); 5778 5779 req->BufferOffset = cpu_to_le16(sizeof(struct smb2_set_info_req)); 5780 req->BufferLength = cpu_to_le32(*size); 5781 5782 memcpy(req->Buffer, *data, *size); 5783 total_len += *size; 5784 5785 iov[0].iov_base = (char *)req; 5786 /* 1 for Buffer */ 5787 iov[0].iov_len = total_len - 1; 5788 5789 for (i = 1; i < rqst->rq_nvec; i++) { 5790 le32_add_cpu(&req->BufferLength, size[i]); 5791 iov[i].iov_base = (char *)data[i]; 5792 iov[i].iov_len = size[i]; 5793 } 5794 5795 return 0; 5796 } 5797 5798 void 5799 SMB2_set_info_free(struct smb_rqst *rqst) 5800 { 5801 if (rqst && rqst->rq_iov) 5802 cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */ 5803 } 5804 5805 static int 5806 send_set_info(const unsigned int xid, struct cifs_tcon *tcon, 5807 u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class, 5808 u8 info_type, u32 additional_info, unsigned int num, 5809 void **data, unsigned int *size) 5810 { 5811 struct smb_rqst rqst; 5812 struct smb2_set_info_rsp *rsp = NULL; 5813 struct kvec *iov; 5814 struct kvec rsp_iov; 5815 int rc = 0; 5816 int resp_buftype; 5817 struct cifs_ses *ses = tcon->ses; 5818 struct TCP_Server_Info *server; 5819 int flags = 0; 5820 int retries = 0, cur_sleep = 0; 5821 5822 replay_again: 5823 /* reinitialize for possible replay */ 5824 flags = 0; 5825 server = cifs_pick_channel(ses); 5826 5827 if (!ses || !server) 5828 return smb_EIO(smb_eio_trace_null_pointers); 5829 5830 if (!num) 5831 return -EINVAL; 5832 5833 if (smb3_encryption_required(tcon)) 5834 flags |= CIFS_TRANSFORM_REQ; 5835 5836 iov = kmalloc_objs(struct kvec, num); 5837 if (!iov) 5838 return -ENOMEM; 5839 5840 memset(&rqst, 0, sizeof(struct smb_rqst)); 5841 rqst.rq_iov = iov; 5842 rqst.rq_nvec = num; 5843 5844 rc = SMB2_set_info_init(tcon, server, 5845 &rqst, persistent_fid, volatile_fid, pid, 5846 info_class, info_type, additional_info, 5847 data, size); 5848 if (rc) { 5849 kfree(iov); 5850 return rc; 5851 } 5852 5853 if (retries) { 5854 /* Back-off before retry */ 5855 if (cur_sleep) 5856 msleep(cur_sleep); 5857 smb2_set_replay(server, &rqst); 5858 } 5859 5860 rc = cifs_send_recv(xid, ses, server, 5861 &rqst, &resp_buftype, flags, 5862 &rsp_iov); 5863 SMB2_set_info_free(&rqst); 5864 rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base; 5865 5866 if (rc != 0) { 5867 cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE); 5868 trace_smb3_set_info_err(xid, persistent_fid, tcon->tid, 5869 ses->Suid, info_class, (__u32)info_type, rc); 5870 } 5871 5872 free_rsp_buf(resp_buftype, rsp); 5873 kfree(iov); 5874 5875 if (is_replayable_error(rc) && 5876 smb2_should_replay(tcon, &retries, &cur_sleep)) 5877 goto replay_again; 5878 5879 return rc; 5880 } 5881 5882 int 5883 SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, 5884 u64 volatile_fid, u32 pid, loff_t new_eof) 5885 { 5886 struct smb2_file_eof_info info; 5887 void *data; 5888 unsigned int size; 5889 5890 info.EndOfFile = cpu_to_le64(new_eof); 5891 5892 data = &info; 5893 size = sizeof(struct smb2_file_eof_info); 5894 5895 trace_smb3_set_eof(xid, persistent_fid, tcon->tid, tcon->ses->Suid, new_eof); 5896 5897 return send_set_info(xid, tcon, persistent_fid, volatile_fid, 5898 pid, FILE_END_OF_FILE_INFORMATION, SMB2_O_INFO_FILE, 5899 0, 1, &data, &size); 5900 } 5901 5902 int 5903 SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon, 5904 u64 persistent_fid, u64 volatile_fid, 5905 struct smb_ntsd *pnntsd, int pacllen, int aclflag) 5906 { 5907 return send_set_info(xid, tcon, persistent_fid, volatile_fid, 5908 current->tgid, 0, SMB2_O_INFO_SECURITY, aclflag, 5909 1, (void **)&pnntsd, &pacllen); 5910 } 5911 5912 int 5913 SMB2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, 5914 u64 persistent_fid, u64 volatile_fid, 5915 struct smb2_file_full_ea_info *buf, int len) 5916 { 5917 return send_set_info(xid, tcon, persistent_fid, volatile_fid, 5918 current->tgid, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE, 5919 0, 1, (void **)&buf, &len); 5920 } 5921 5922 int 5923 SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon, 5924 const u64 persistent_fid, const u64 volatile_fid, 5925 __u8 oplock_level) 5926 { 5927 struct smb_rqst rqst; 5928 int rc; 5929 struct smb2_oplock_break *req = NULL; 5930 struct cifs_ses *ses = tcon->ses; 5931 struct TCP_Server_Info *server; 5932 int flags = CIFS_OBREAK_OP; 5933 unsigned int total_len; 5934 struct kvec iov[1]; 5935 struct kvec rsp_iov; 5936 int resp_buf_type; 5937 int retries = 0, cur_sleep = 0; 5938 5939 replay_again: 5940 /* reinitialize for possible replay */ 5941 flags = CIFS_OBREAK_OP; 5942 server = cifs_pick_channel(ses); 5943 5944 cifs_dbg(FYI, "SMB2_oplock_break\n"); 5945 rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server, 5946 (void **) &req, &total_len); 5947 if (rc) 5948 return rc; 5949 5950 if (smb3_encryption_required(tcon)) 5951 flags |= CIFS_TRANSFORM_REQ; 5952 5953 req->VolatileFid = volatile_fid; 5954 req->PersistentFid = persistent_fid; 5955 req->OplockLevel = oplock_level; 5956 req->hdr.CreditRequest = cpu_to_le16(1); 5957 5958 flags |= CIFS_NO_RSP_BUF; 5959 5960 iov[0].iov_base = (char *)req; 5961 iov[0].iov_len = total_len; 5962 5963 memset(&rqst, 0, sizeof(struct smb_rqst)); 5964 rqst.rq_iov = iov; 5965 rqst.rq_nvec = 1; 5966 5967 if (retries) { 5968 /* Back-off before retry */ 5969 if (cur_sleep) 5970 msleep(cur_sleep); 5971 smb2_set_replay(server, &rqst); 5972 } 5973 5974 rc = cifs_send_recv(xid, ses, server, 5975 &rqst, &resp_buf_type, flags, &rsp_iov); 5976 cifs_small_buf_release(req); 5977 if (rc) { 5978 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); 5979 cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc); 5980 } 5981 5982 if (is_replayable_error(rc) && 5983 smb2_should_replay(tcon, &retries, &cur_sleep)) 5984 goto replay_again; 5985 5986 return rc; 5987 } 5988 5989 void 5990 smb2_copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf, 5991 struct kstatfs *kst) 5992 { 5993 kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) * 5994 le32_to_cpu(pfs_inf->SectorsPerAllocationUnit); 5995 kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits); 5996 kst->f_bfree = kst->f_bavail = 5997 le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits); 5998 return; 5999 } 6000 6001 static void 6002 copy_posix_fs_info_to_kstatfs(FILE_SYSTEM_POSIX_INFO *response_data, 6003 struct kstatfs *kst) 6004 { 6005 kst->f_bsize = le32_to_cpu(response_data->BlockSize); 6006 kst->f_blocks = le64_to_cpu(response_data->TotalBlocks); 6007 kst->f_bfree = le64_to_cpu(response_data->BlocksAvail); 6008 if (response_data->UserBlocksAvail == cpu_to_le64(-1)) 6009 kst->f_bavail = kst->f_bfree; 6010 else 6011 kst->f_bavail = le64_to_cpu(response_data->UserBlocksAvail); 6012 if (response_data->TotalFileNodes != cpu_to_le64(-1)) 6013 kst->f_files = le64_to_cpu(response_data->TotalFileNodes); 6014 if (response_data->FreeFileNodes != cpu_to_le64(-1)) 6015 kst->f_ffree = le64_to_cpu(response_data->FreeFileNodes); 6016 6017 return; 6018 } 6019 6020 static int 6021 build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, 6022 struct TCP_Server_Info *server, 6023 int level, int outbuf_len, u64 persistent_fid, 6024 u64 volatile_fid) 6025 { 6026 int rc; 6027 struct smb2_query_info_req *req; 6028 unsigned int total_len; 6029 6030 cifs_dbg(FYI, "Query FSInfo level %d\n", level); 6031 6032 if ((tcon->ses == NULL) || server == NULL) 6033 return smb_EIO(smb_eio_trace_null_pointers); 6034 6035 rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server, 6036 (void **) &req, &total_len); 6037 if (rc) 6038 return rc; 6039 6040 req->InfoType = SMB2_O_INFO_FILESYSTEM; 6041 req->FileInfoClass = level; 6042 req->PersistentFileId = persistent_fid; 6043 req->VolatileFileId = volatile_fid; 6044 /* 1 for pad */ 6045 req->InputBufferOffset = 6046 cpu_to_le16(sizeof(struct smb2_query_info_req)); 6047 req->OutputBufferLength = cpu_to_le32( 6048 outbuf_len + sizeof(struct smb2_query_info_rsp)); 6049 6050 iov->iov_base = (char *)req; 6051 iov->iov_len = total_len; 6052 return 0; 6053 } 6054 6055 static inline void free_qfs_info_req(struct kvec *iov) 6056 { 6057 cifs_buf_release(iov->iov_base); 6058 } 6059 6060 int 6061 SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon, 6062 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata) 6063 { 6064 struct smb_rqst rqst; 6065 struct smb2_query_info_rsp *rsp = NULL; 6066 struct kvec iov; 6067 struct kvec rsp_iov; 6068 int rc = 0; 6069 int resp_buftype; 6070 struct cifs_ses *ses = tcon->ses; 6071 struct TCP_Server_Info *server; 6072 FILE_SYSTEM_POSIX_INFO *info = NULL; 6073 int flags = 0; 6074 int retries = 0, cur_sleep = 0; 6075 6076 replay_again: 6077 /* reinitialize for possible replay */ 6078 flags = 0; 6079 server = cifs_pick_channel(ses); 6080 6081 rc = build_qfs_info_req(&iov, tcon, server, 6082 FS_POSIX_INFORMATION, 6083 sizeof(FILE_SYSTEM_POSIX_INFO), 6084 persistent_fid, volatile_fid); 6085 if (rc) 6086 return rc; 6087 6088 if (smb3_encryption_required(tcon)) 6089 flags |= CIFS_TRANSFORM_REQ; 6090 6091 memset(&rqst, 0, sizeof(struct smb_rqst)); 6092 rqst.rq_iov = &iov; 6093 rqst.rq_nvec = 1; 6094 6095 if (retries) { 6096 /* Back-off before retry */ 6097 if (cur_sleep) 6098 msleep(cur_sleep); 6099 smb2_set_replay(server, &rqst); 6100 } 6101 6102 rc = cifs_send_recv(xid, ses, server, 6103 &rqst, &resp_buftype, flags, &rsp_iov); 6104 free_qfs_info_req(&iov); 6105 if (rc) { 6106 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); 6107 goto posix_qfsinf_exit; 6108 } 6109 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; 6110 6111 info = (FILE_SYSTEM_POSIX_INFO *)( 6112 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp); 6113 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset), 6114 le32_to_cpu(rsp->OutputBufferLength), &rsp_iov, 6115 sizeof(FILE_SYSTEM_POSIX_INFO)); 6116 if (!rc) 6117 copy_posix_fs_info_to_kstatfs(info, fsdata); 6118 6119 posix_qfsinf_exit: 6120 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 6121 6122 if (is_replayable_error(rc) && 6123 smb2_should_replay(tcon, &retries, &cur_sleep)) 6124 goto replay_again; 6125 6126 return rc; 6127 } 6128 6129 int 6130 SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon, 6131 u64 persistent_fid, u64 volatile_fid, int level) 6132 { 6133 struct smb_rqst rqst; 6134 struct smb2_query_info_rsp *rsp = NULL; 6135 struct kvec iov; 6136 struct kvec rsp_iov; 6137 int rc = 0; 6138 int resp_buftype, max_len, min_len; 6139 struct cifs_ses *ses = tcon->ses; 6140 struct TCP_Server_Info *server; 6141 unsigned int rsp_len, offset; 6142 int flags = 0; 6143 int retries = 0, cur_sleep = 0; 6144 6145 replay_again: 6146 /* reinitialize for possible replay */ 6147 flags = 0; 6148 server = cifs_pick_channel(ses); 6149 6150 if (level == FS_DEVICE_INFORMATION) { 6151 max_len = sizeof(FILE_SYSTEM_DEVICE_INFO); 6152 min_len = sizeof(FILE_SYSTEM_DEVICE_INFO); 6153 } else if (level == FS_ATTRIBUTE_INFORMATION) { 6154 max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO) + MAX_FS_NAME_LEN; 6155 min_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO); 6156 } else if (level == FS_SECTOR_SIZE_INFORMATION) { 6157 max_len = sizeof(struct smb3_fs_ss_info); 6158 min_len = sizeof(struct smb3_fs_ss_info); 6159 } else if (level == FS_VOLUME_INFORMATION) { 6160 max_len = sizeof(struct filesystem_vol_info) + MAX_VOL_LABEL_LEN; 6161 min_len = sizeof(struct filesystem_vol_info); 6162 } else { 6163 cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level); 6164 return -EINVAL; 6165 } 6166 6167 rc = build_qfs_info_req(&iov, tcon, server, 6168 level, max_len, 6169 persistent_fid, volatile_fid); 6170 if (rc) 6171 return rc; 6172 6173 if (smb3_encryption_required(tcon)) 6174 flags |= CIFS_TRANSFORM_REQ; 6175 6176 memset(&rqst, 0, sizeof(struct smb_rqst)); 6177 rqst.rq_iov = &iov; 6178 rqst.rq_nvec = 1; 6179 6180 if (retries) { 6181 /* Back-off before retry */ 6182 if (cur_sleep) 6183 msleep(cur_sleep); 6184 smb2_set_replay(server, &rqst); 6185 } 6186 6187 rc = cifs_send_recv(xid, ses, server, 6188 &rqst, &resp_buftype, flags, &rsp_iov); 6189 free_qfs_info_req(&iov); 6190 if (rc) { 6191 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); 6192 goto qfsattr_exit; 6193 } 6194 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; 6195 6196 rsp_len = le32_to_cpu(rsp->OutputBufferLength); 6197 offset = le16_to_cpu(rsp->OutputBufferOffset); 6198 rc = smb2_validate_iov(offset, rsp_len, &rsp_iov, min_len); 6199 if (rc) 6200 goto qfsattr_exit; 6201 6202 if (level == FS_ATTRIBUTE_INFORMATION) 6203 memcpy(&tcon->fsAttrInfo, offset 6204 + (char *)rsp, min_t(unsigned int, 6205 rsp_len, min_len)); 6206 else if (level == FS_DEVICE_INFORMATION) 6207 memcpy(&tcon->fsDevInfo, offset 6208 + (char *)rsp, sizeof(FILE_SYSTEM_DEVICE_INFO)); 6209 else if (level == FS_SECTOR_SIZE_INFORMATION) { 6210 struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *) 6211 (offset + (char *)rsp); 6212 tcon->ss_flags = le32_to_cpu(ss_info->Flags); 6213 tcon->perf_sector_size = 6214 le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf); 6215 } else if (level == FS_VOLUME_INFORMATION) { 6216 struct filesystem_vol_info *vol_info = (struct filesystem_vol_info *) 6217 (offset + (char *)rsp); 6218 tcon->vol_serial_number = le32_to_cpu(vol_info->VolumeSerialNumber); 6219 tcon->vol_create_time = vol_info->VolumeCreationTime; 6220 } 6221 6222 qfsattr_exit: 6223 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 6224 6225 if (is_replayable_error(rc) && 6226 smb2_should_replay(tcon, &retries, &cur_sleep)) 6227 goto replay_again; 6228 6229 return rc; 6230 } 6231 6232 int 6233 smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon, 6234 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid, 6235 const __u32 num_lock, struct smb2_lock_element *buf) 6236 { 6237 struct smb_rqst rqst; 6238 int rc = 0; 6239 struct smb2_lock_req *req = NULL; 6240 struct kvec iov[2]; 6241 struct kvec rsp_iov; 6242 int resp_buf_type; 6243 unsigned int count; 6244 int flags = CIFS_NO_RSP_BUF; 6245 unsigned int total_len; 6246 struct TCP_Server_Info *server; 6247 int retries = 0, cur_sleep = 0; 6248 6249 replay_again: 6250 /* reinitialize for possible replay */ 6251 flags = CIFS_NO_RSP_BUF; 6252 server = cifs_pick_channel(tcon->ses); 6253 6254 cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock); 6255 6256 rc = smb2_plain_req_init(SMB2_LOCK, tcon, server, 6257 (void **) &req, &total_len); 6258 if (rc) 6259 return rc; 6260 6261 if (smb3_encryption_required(tcon)) 6262 flags |= CIFS_TRANSFORM_REQ; 6263 6264 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid); 6265 req->LockCount = cpu_to_le16(num_lock); 6266 6267 req->PersistentFileId = persist_fid; 6268 req->VolatileFileId = volatile_fid; 6269 6270 count = num_lock * sizeof(struct smb2_lock_element); 6271 6272 iov[0].iov_base = (char *)req; 6273 iov[0].iov_len = total_len - sizeof(struct smb2_lock_element); 6274 iov[1].iov_base = (char *)buf; 6275 iov[1].iov_len = count; 6276 6277 cifs_stats_inc(&tcon->stats.cifs_stats.num_locks); 6278 6279 memset(&rqst, 0, sizeof(struct smb_rqst)); 6280 rqst.rq_iov = iov; 6281 rqst.rq_nvec = 2; 6282 6283 if (retries) { 6284 /* Back-off before retry */ 6285 if (cur_sleep) 6286 msleep(cur_sleep); 6287 smb2_set_replay(server, &rqst); 6288 } 6289 6290 trace_smb3_lock_enter(xid, persist_fid, tcon->tid, tcon->ses->Suid, 6291 le64_to_cpu(buf[0].Offset), 6292 le64_to_cpu(buf[0].Length), 6293 le32_to_cpu(buf[0].Flags), num_lock, 0); 6294 6295 rc = cifs_send_recv(xid, tcon->ses, server, 6296 &rqst, &resp_buf_type, flags, 6297 &rsp_iov); 6298 cifs_small_buf_release(req); 6299 if (rc) { 6300 cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc); 6301 cifs_stats_fail_inc(tcon, SMB2_LOCK_HE); 6302 trace_smb3_lock_err(xid, persist_fid, tcon->tid, 6303 tcon->ses->Suid, 6304 le64_to_cpu(buf[0].Offset), 6305 le64_to_cpu(buf[0].Length), 6306 le32_to_cpu(buf[0].Flags), num_lock, rc); 6307 } else { 6308 trace_smb3_lock_done(xid, persist_fid, tcon->tid, tcon->ses->Suid, 6309 le64_to_cpu(buf[0].Offset), 6310 le64_to_cpu(buf[0].Length), 6311 le32_to_cpu(buf[0].Flags), num_lock, 0); 6312 } 6313 6314 if (is_replayable_error(rc) && 6315 smb2_should_replay(tcon, &retries, &cur_sleep)) 6316 goto replay_again; 6317 6318 return rc; 6319 } 6320 6321 int 6322 SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon, 6323 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid, 6324 const __u64 length, const __u64 offset, const __u32 lock_flags, 6325 const bool wait) 6326 { 6327 struct smb2_lock_element lock; 6328 6329 lock.Offset = cpu_to_le64(offset); 6330 lock.Length = cpu_to_le64(length); 6331 lock.Flags = cpu_to_le32(lock_flags); 6332 if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK) 6333 lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY); 6334 6335 return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock); 6336 } 6337 6338 int 6339 SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon, 6340 __u8 *lease_key, const __le32 lease_state) 6341 { 6342 struct smb_rqst rqst; 6343 int rc; 6344 struct smb2_lease_ack *req = NULL; 6345 struct cifs_ses *ses = tcon->ses; 6346 int flags = CIFS_OBREAK_OP; 6347 unsigned int total_len; 6348 struct kvec iov[1]; 6349 struct kvec rsp_iov; 6350 int resp_buf_type; 6351 __u64 *please_key_high; 6352 __u64 *please_key_low; 6353 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses); 6354 6355 cifs_dbg(FYI, "SMB2_lease_break\n"); 6356 rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server, 6357 (void **) &req, &total_len); 6358 if (rc) 6359 return rc; 6360 6361 if (smb3_encryption_required(tcon)) 6362 flags |= CIFS_TRANSFORM_REQ; 6363 6364 req->hdr.CreditRequest = cpu_to_le16(1); 6365 req->StructureSize = cpu_to_le16(36); 6366 total_len += 12; 6367 6368 memcpy(req->LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE); 6369 req->LeaseState = lease_state; 6370 6371 flags |= CIFS_NO_RSP_BUF; 6372 6373 iov[0].iov_base = (char *)req; 6374 iov[0].iov_len = total_len; 6375 6376 memset(&rqst, 0, sizeof(struct smb_rqst)); 6377 rqst.rq_iov = iov; 6378 rqst.rq_nvec = 1; 6379 6380 rc = cifs_send_recv(xid, ses, server, 6381 &rqst, &resp_buf_type, flags, &rsp_iov); 6382 cifs_small_buf_release(req); 6383 6384 please_key_low = (__u64 *)lease_key; 6385 please_key_high = (__u64 *)(lease_key+8); 6386 if (rc) { 6387 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); 6388 trace_smb3_lease_ack_err(le32_to_cpu(lease_state), tcon->tid, 6389 ses->Suid, *please_key_low, *please_key_high, rc); 6390 cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc); 6391 } else 6392 trace_smb3_lease_ack_done(le32_to_cpu(lease_state), tcon->tid, 6393 ses->Suid, *please_key_low, *please_key_high); 6394 6395 return rc; 6396 } 6397