1 // SPDX-License-Identifier: LGPL-2.1 2 /* 3 * 4 * Copyright (C) International Business Machines Corp., 2002,2011 5 * Author(s): Steve French (sfrench@us.ibm.com) 6 * 7 */ 8 #include <linux/fs.h> 9 #include <linux/net.h> 10 #include <linux/string.h> 11 #include <linux/sched/mm.h> 12 #include <linux/sched/signal.h> 13 #include <linux/list.h> 14 #include <linux/wait.h> 15 #include <linux/slab.h> 16 #include <linux/pagemap.h> 17 #include <linux/ctype.h> 18 #include <linux/utsname.h> 19 #include <linux/mempool.h> 20 #include <linux/delay.h> 21 #include <linux/completion.h> 22 #include <linux/kthread.h> 23 #include <linux/pagevec.h> 24 #include <linux/freezer.h> 25 #include <linux/namei.h> 26 #include <linux/uuid.h> 27 #include <linux/uaccess.h> 28 #include <asm/processor.h> 29 #include <linux/inet.h> 30 #include <linux/module.h> 31 #include <keys/user-type.h> 32 #include <net/ipv6.h> 33 #include <linux/parser.h> 34 #include <linux/bvec.h> 35 #include "cifspdu.h" 36 #include "cifsglob.h" 37 #include "cifsproto.h" 38 #include "cifs_unicode.h" 39 #include "cifs_debug.h" 40 #include "cifs_fs_sb.h" 41 #include "ntlmssp.h" 42 #include "nterr.h" 43 #include "rfc1002pdu.h" 44 #include "fscache.h" 45 #include "smb2proto.h" 46 #include "smbdirect.h" 47 #include "dns_resolve.h" 48 #ifdef CONFIG_CIFS_DFS_UPCALL 49 #include "dfs.h" 50 #include "dfs_cache.h" 51 #endif 52 #include "fs_context.h" 53 #include "cifs_swn.h" 54 55 extern mempool_t *cifs_req_poolp; 56 extern bool disable_legacy_dialects; 57 58 /* FIXME: should these be tunable? */ 59 #define TLINK_ERROR_EXPIRE (1 * HZ) 60 #define TLINK_IDLE_EXPIRE (600 * HZ) 61 62 /* Drop the connection to not overload the server */ 63 #define MAX_STATUS_IO_TIMEOUT 5 64 65 static int ip_connect(struct TCP_Server_Info *server); 66 static int generic_ip_connect(struct TCP_Server_Info *server); 67 static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink); 68 static void cifs_prune_tlinks(struct work_struct *work); 69 70 /* 71 * Resolve hostname and set ip addr in tcp ses. Useful for hostnames that may 72 * get their ip addresses changed at some point. 73 * 74 * This should be called with server->srv_mutex held. 75 */ 76 static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server) 77 { 78 int rc; 79 int len; 80 char *unc; 81 struct sockaddr_storage ss; 82 83 if (!server->hostname) 84 return -EINVAL; 85 86 /* if server hostname isn't populated, there's nothing to do here */ 87 if (server->hostname[0] == '\0') 88 return 0; 89 90 len = strlen(server->hostname) + 3; 91 92 unc = kmalloc(len, GFP_KERNEL); 93 if (!unc) { 94 cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__); 95 return -ENOMEM; 96 } 97 scnprintf(unc, len, "\\\\%s", server->hostname); 98 99 spin_lock(&server->srv_lock); 100 ss = server->dstaddr; 101 spin_unlock(&server->srv_lock); 102 103 rc = dns_resolve_server_name_to_ip(unc, (struct sockaddr *)&ss, NULL); 104 kfree(unc); 105 106 if (rc < 0) { 107 cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n", 108 __func__, server->hostname, rc); 109 } else { 110 spin_lock(&server->srv_lock); 111 memcpy(&server->dstaddr, &ss, sizeof(server->dstaddr)); 112 spin_unlock(&server->srv_lock); 113 rc = 0; 114 } 115 116 return rc; 117 } 118 119 static void smb2_query_server_interfaces(struct work_struct *work) 120 { 121 int rc; 122 int xid; 123 struct cifs_tcon *tcon = container_of(work, 124 struct cifs_tcon, 125 query_interfaces.work); 126 127 /* 128 * query server network interfaces, in case they change 129 */ 130 xid = get_xid(); 131 rc = SMB3_request_interfaces(xid, tcon, false); 132 free_xid(xid); 133 134 if (rc) { 135 if (rc == -EOPNOTSUPP) 136 return; 137 138 cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n", 139 __func__, rc); 140 } 141 142 queue_delayed_work(cifsiod_wq, &tcon->query_interfaces, 143 (SMB_INTERFACE_POLL_INTERVAL * HZ)); 144 } 145 146 /* 147 * Update the tcpStatus for the server. 148 * This is used to signal the cifsd thread to call cifs_reconnect 149 * ONLY cifsd thread should call cifs_reconnect. For any other 150 * thread, use this function 151 * 152 * @server: the tcp ses for which reconnect is needed 153 * @all_channels: if this needs to be done for all channels 154 */ 155 void 156 cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server, 157 bool all_channels) 158 { 159 struct TCP_Server_Info *pserver; 160 struct cifs_ses *ses; 161 int i; 162 163 /* If server is a channel, select the primary channel */ 164 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; 165 166 /* if we need to signal just this channel */ 167 if (!all_channels) { 168 spin_lock(&server->srv_lock); 169 if (server->tcpStatus != CifsExiting) 170 server->tcpStatus = CifsNeedReconnect; 171 spin_unlock(&server->srv_lock); 172 return; 173 } 174 175 spin_lock(&cifs_tcp_ses_lock); 176 list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { 177 spin_lock(&ses->chan_lock); 178 for (i = 0; i < ses->chan_count; i++) { 179 if (!ses->chans[i].server) 180 continue; 181 182 spin_lock(&ses->chans[i].server->srv_lock); 183 if (ses->chans[i].server->tcpStatus != CifsExiting) 184 ses->chans[i].server->tcpStatus = CifsNeedReconnect; 185 spin_unlock(&ses->chans[i].server->srv_lock); 186 } 187 spin_unlock(&ses->chan_lock); 188 } 189 spin_unlock(&cifs_tcp_ses_lock); 190 } 191 192 /* 193 * Mark all sessions and tcons for reconnect. 194 * IMPORTANT: make sure that this gets called only from 195 * cifsd thread. For any other thread, use 196 * cifs_signal_cifsd_for_reconnect 197 * 198 * @server: the tcp ses for which reconnect is needed 199 * @server needs to be previously set to CifsNeedReconnect. 200 * @mark_smb_session: whether even sessions need to be marked 201 */ 202 void 203 cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server, 204 bool mark_smb_session) 205 { 206 struct TCP_Server_Info *pserver; 207 struct cifs_ses *ses, *nses; 208 struct cifs_tcon *tcon; 209 210 /* 211 * before reconnecting the tcp session, mark the smb session (uid) and the tid bad so they 212 * are not used until reconnected. 213 */ 214 cifs_dbg(FYI, "%s: marking necessary sessions and tcons for reconnect\n", __func__); 215 216 /* If server is a channel, select the primary channel */ 217 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; 218 219 /* 220 * if the server has been marked for termination, there is a 221 * chance that the remaining channels all need reconnect. To be 222 * on the safer side, mark the session and trees for reconnect 223 * for this scenario. This might cause a few redundant session 224 * setup and tree connect requests, but it is better than not doing 225 * a tree connect when needed, and all following requests failing 226 */ 227 if (server->terminate) { 228 mark_smb_session = true; 229 server = pserver; 230 } 231 232 spin_lock(&cifs_tcp_ses_lock); 233 list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) { 234 /* check if iface is still active */ 235 spin_lock(&ses->chan_lock); 236 if (!cifs_chan_is_iface_active(ses, server)) { 237 spin_unlock(&ses->chan_lock); 238 cifs_chan_update_iface(ses, server); 239 spin_lock(&ses->chan_lock); 240 } 241 242 if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server)) { 243 spin_unlock(&ses->chan_lock); 244 continue; 245 } 246 247 if (mark_smb_session) 248 CIFS_SET_ALL_CHANS_NEED_RECONNECT(ses); 249 else 250 cifs_chan_set_need_reconnect(ses, server); 251 252 cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n", 253 __func__, ses->chans_need_reconnect); 254 255 /* If all channels need reconnect, then tcon needs reconnect */ 256 if (!mark_smb_session && !CIFS_ALL_CHANS_NEED_RECONNECT(ses)) { 257 spin_unlock(&ses->chan_lock); 258 continue; 259 } 260 spin_unlock(&ses->chan_lock); 261 262 spin_lock(&ses->ses_lock); 263 ses->ses_status = SES_NEED_RECON; 264 spin_unlock(&ses->ses_lock); 265 266 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { 267 tcon->need_reconnect = true; 268 spin_lock(&tcon->tc_lock); 269 tcon->status = TID_NEED_RECON; 270 spin_unlock(&tcon->tc_lock); 271 272 cancel_delayed_work(&tcon->query_interfaces); 273 } 274 if (ses->tcon_ipc) { 275 ses->tcon_ipc->need_reconnect = true; 276 spin_lock(&ses->tcon_ipc->tc_lock); 277 ses->tcon_ipc->status = TID_NEED_RECON; 278 spin_unlock(&ses->tcon_ipc->tc_lock); 279 } 280 } 281 spin_unlock(&cifs_tcp_ses_lock); 282 } 283 284 static void 285 cifs_abort_connection(struct TCP_Server_Info *server) 286 { 287 struct mid_q_entry *mid, *nmid; 288 struct list_head retry_list; 289 290 server->maxBuf = 0; 291 server->max_read = 0; 292 293 /* do not want to be sending data on a socket we are freeing */ 294 cifs_dbg(FYI, "%s: tearing down socket\n", __func__); 295 cifs_server_lock(server); 296 if (server->ssocket) { 297 cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n", server->ssocket->state, 298 server->ssocket->flags); 299 kernel_sock_shutdown(server->ssocket, SHUT_WR); 300 cifs_dbg(FYI, "Post shutdown state: 0x%x Flags: 0x%lx\n", server->ssocket->state, 301 server->ssocket->flags); 302 sock_release(server->ssocket); 303 server->ssocket = NULL; 304 } 305 server->sequence_number = 0; 306 server->session_estab = false; 307 kfree_sensitive(server->session_key.response); 308 server->session_key.response = NULL; 309 server->session_key.len = 0; 310 server->lstrp = jiffies; 311 312 /* mark submitted MIDs for retry and issue callback */ 313 INIT_LIST_HEAD(&retry_list); 314 cifs_dbg(FYI, "%s: moving mids to private list\n", __func__); 315 spin_lock(&server->mid_lock); 316 list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) { 317 kref_get(&mid->refcount); 318 if (mid->mid_state == MID_REQUEST_SUBMITTED) 319 mid->mid_state = MID_RETRY_NEEDED; 320 list_move(&mid->qhead, &retry_list); 321 mid->mid_flags |= MID_DELETED; 322 } 323 spin_unlock(&server->mid_lock); 324 cifs_server_unlock(server); 325 326 cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__); 327 list_for_each_entry_safe(mid, nmid, &retry_list, qhead) { 328 list_del_init(&mid->qhead); 329 mid->callback(mid); 330 release_mid(mid); 331 } 332 333 if (cifs_rdma_enabled(server)) { 334 cifs_server_lock(server); 335 smbd_destroy(server); 336 cifs_server_unlock(server); 337 } 338 } 339 340 static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets) 341 { 342 spin_lock(&server->srv_lock); 343 server->nr_targets = num_targets; 344 if (server->tcpStatus == CifsExiting) { 345 /* the demux thread will exit normally next time through the loop */ 346 spin_unlock(&server->srv_lock); 347 wake_up(&server->response_q); 348 return false; 349 } 350 351 cifs_dbg(FYI, "Mark tcp session as need reconnect\n"); 352 trace_smb3_reconnect(server->CurrentMid, server->conn_id, 353 server->hostname); 354 server->tcpStatus = CifsNeedReconnect; 355 356 spin_unlock(&server->srv_lock); 357 return true; 358 } 359 360 /* 361 * cifs tcp session reconnection 362 * 363 * mark tcp session as reconnecting so temporarily locked 364 * mark all smb sessions as reconnecting for tcp session 365 * reconnect tcp session 366 * wake up waiters on reconnection? - (not needed currently) 367 * 368 * if mark_smb_session is passed as true, unconditionally mark 369 * the smb session (and tcon) for reconnect as well. This value 370 * doesn't really matter for non-multichannel scenario. 371 * 372 */ 373 static int __cifs_reconnect(struct TCP_Server_Info *server, 374 bool mark_smb_session) 375 { 376 int rc = 0; 377 378 if (!cifs_tcp_ses_needs_reconnect(server, 1)) 379 return 0; 380 381 cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session); 382 383 cifs_abort_connection(server); 384 385 do { 386 try_to_freeze(); 387 cifs_server_lock(server); 388 389 if (!cifs_swn_set_server_dstaddr(server)) { 390 /* resolve the hostname again to make sure that IP address is up-to-date */ 391 rc = reconn_set_ipaddr_from_hostname(server); 392 cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc); 393 } 394 395 if (cifs_rdma_enabled(server)) 396 rc = smbd_reconnect(server); 397 else 398 rc = generic_ip_connect(server); 399 if (rc) { 400 cifs_server_unlock(server); 401 cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc); 402 msleep(3000); 403 } else { 404 atomic_inc(&tcpSesReconnectCount); 405 set_credits(server, 1); 406 spin_lock(&server->srv_lock); 407 if (server->tcpStatus != CifsExiting) 408 server->tcpStatus = CifsNeedNegotiate; 409 spin_unlock(&server->srv_lock); 410 cifs_swn_reset_server_dstaddr(server); 411 cifs_server_unlock(server); 412 mod_delayed_work(cifsiod_wq, &server->reconnect, 0); 413 } 414 } while (server->tcpStatus == CifsNeedReconnect); 415 416 spin_lock(&server->srv_lock); 417 if (server->tcpStatus == CifsNeedNegotiate) 418 mod_delayed_work(cifsiod_wq, &server->echo, 0); 419 spin_unlock(&server->srv_lock); 420 421 wake_up(&server->response_q); 422 return rc; 423 } 424 425 #ifdef CONFIG_CIFS_DFS_UPCALL 426 static int __reconnect_target_unlocked(struct TCP_Server_Info *server, const char *target) 427 { 428 int rc; 429 char *hostname; 430 431 if (!cifs_swn_set_server_dstaddr(server)) { 432 if (server->hostname != target) { 433 hostname = extract_hostname(target); 434 if (!IS_ERR(hostname)) { 435 spin_lock(&server->srv_lock); 436 kfree(server->hostname); 437 server->hostname = hostname; 438 spin_unlock(&server->srv_lock); 439 } else { 440 cifs_dbg(FYI, "%s: couldn't extract hostname or address from dfs target: %ld\n", 441 __func__, PTR_ERR(hostname)); 442 cifs_dbg(FYI, "%s: default to last target server: %s\n", __func__, 443 server->hostname); 444 } 445 } 446 /* resolve the hostname again to make sure that IP address is up-to-date. */ 447 rc = reconn_set_ipaddr_from_hostname(server); 448 cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc); 449 } 450 /* Reconnect the socket */ 451 if (cifs_rdma_enabled(server)) 452 rc = smbd_reconnect(server); 453 else 454 rc = generic_ip_connect(server); 455 456 return rc; 457 } 458 459 static int reconnect_target_unlocked(struct TCP_Server_Info *server, struct dfs_cache_tgt_list *tl, 460 struct dfs_cache_tgt_iterator **target_hint) 461 { 462 int rc; 463 struct dfs_cache_tgt_iterator *tit; 464 465 *target_hint = NULL; 466 467 /* If dfs target list is empty, then reconnect to last server */ 468 tit = dfs_cache_get_tgt_iterator(tl); 469 if (!tit) 470 return __reconnect_target_unlocked(server, server->hostname); 471 472 /* Otherwise, try every dfs target in @tl */ 473 for (; tit; tit = dfs_cache_get_next_tgt(tl, tit)) { 474 rc = __reconnect_target_unlocked(server, dfs_cache_get_tgt_name(tit)); 475 if (!rc) { 476 *target_hint = tit; 477 break; 478 } 479 } 480 return rc; 481 } 482 483 static int reconnect_dfs_server(struct TCP_Server_Info *server) 484 { 485 struct dfs_cache_tgt_iterator *target_hint = NULL; 486 DFS_CACHE_TGT_LIST(tl); 487 int num_targets = 0; 488 int rc = 0; 489 490 /* 491 * Determine the number of dfs targets the referral path in @cifs_sb resolves to. 492 * 493 * smb2_reconnect() needs to know how long it should wait based upon the number of dfs 494 * targets (server->nr_targets). It's also possible that the cached referral was cleared 495 * through /proc/fs/cifs/dfscache or the target list is empty due to server settings after 496 * refreshing the referral, so, in this case, default it to 1. 497 */ 498 mutex_lock(&server->refpath_lock); 499 if (!dfs_cache_noreq_find(server->leaf_fullpath + 1, NULL, &tl)) 500 num_targets = dfs_cache_get_nr_tgts(&tl); 501 mutex_unlock(&server->refpath_lock); 502 if (!num_targets) 503 num_targets = 1; 504 505 if (!cifs_tcp_ses_needs_reconnect(server, num_targets)) 506 return 0; 507 508 /* 509 * Unconditionally mark all sessions & tcons for reconnect as we might be connecting to a 510 * different server or share during failover. It could be improved by adding some logic to 511 * only do that in case it connects to a different server or share, though. 512 */ 513 cifs_mark_tcp_ses_conns_for_reconnect(server, true); 514 515 cifs_abort_connection(server); 516 517 do { 518 try_to_freeze(); 519 cifs_server_lock(server); 520 521 rc = reconnect_target_unlocked(server, &tl, &target_hint); 522 if (rc) { 523 /* Failed to reconnect socket */ 524 cifs_server_unlock(server); 525 cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc); 526 msleep(3000); 527 continue; 528 } 529 /* 530 * Socket was created. Update tcp session status to CifsNeedNegotiate so that a 531 * process waiting for reconnect will know it needs to re-establish session and tcon 532 * through the reconnected target server. 533 */ 534 atomic_inc(&tcpSesReconnectCount); 535 set_credits(server, 1); 536 spin_lock(&server->srv_lock); 537 if (server->tcpStatus != CifsExiting) 538 server->tcpStatus = CifsNeedNegotiate; 539 spin_unlock(&server->srv_lock); 540 cifs_swn_reset_server_dstaddr(server); 541 cifs_server_unlock(server); 542 mod_delayed_work(cifsiod_wq, &server->reconnect, 0); 543 } while (server->tcpStatus == CifsNeedReconnect); 544 545 mutex_lock(&server->refpath_lock); 546 dfs_cache_noreq_update_tgthint(server->leaf_fullpath + 1, target_hint); 547 mutex_unlock(&server->refpath_lock); 548 dfs_cache_free_tgts(&tl); 549 550 /* Need to set up echo worker again once connection has been established */ 551 spin_lock(&server->srv_lock); 552 if (server->tcpStatus == CifsNeedNegotiate) 553 mod_delayed_work(cifsiod_wq, &server->echo, 0); 554 spin_unlock(&server->srv_lock); 555 556 wake_up(&server->response_q); 557 return rc; 558 } 559 560 int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session) 561 { 562 mutex_lock(&server->refpath_lock); 563 if (!server->leaf_fullpath) { 564 mutex_unlock(&server->refpath_lock); 565 return __cifs_reconnect(server, mark_smb_session); 566 } 567 mutex_unlock(&server->refpath_lock); 568 569 return reconnect_dfs_server(server); 570 } 571 #else 572 int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session) 573 { 574 return __cifs_reconnect(server, mark_smb_session); 575 } 576 #endif 577 578 static void 579 cifs_echo_request(struct work_struct *work) 580 { 581 int rc; 582 struct TCP_Server_Info *server = container_of(work, 583 struct TCP_Server_Info, echo.work); 584 585 /* 586 * We cannot send an echo if it is disabled. 587 * Also, no need to ping if we got a response recently. 588 */ 589 590 if (server->tcpStatus == CifsNeedReconnect || 591 server->tcpStatus == CifsExiting || 592 server->tcpStatus == CifsNew || 593 (server->ops->can_echo && !server->ops->can_echo(server)) || 594 time_before(jiffies, server->lstrp + server->echo_interval - HZ)) 595 goto requeue_echo; 596 597 rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS; 598 cifs_server_dbg(FYI, "send echo request: rc = %d\n", rc); 599 600 /* Check witness registrations */ 601 cifs_swn_check(); 602 603 requeue_echo: 604 queue_delayed_work(cifsiod_wq, &server->echo, server->echo_interval); 605 } 606 607 static bool 608 allocate_buffers(struct TCP_Server_Info *server) 609 { 610 if (!server->bigbuf) { 611 server->bigbuf = (char *)cifs_buf_get(); 612 if (!server->bigbuf) { 613 cifs_server_dbg(VFS, "No memory for large SMB response\n"); 614 msleep(3000); 615 /* retry will check if exiting */ 616 return false; 617 } 618 } else if (server->large_buf) { 619 /* we are reusing a dirty large buf, clear its start */ 620 memset(server->bigbuf, 0, HEADER_SIZE(server)); 621 } 622 623 if (!server->smallbuf) { 624 server->smallbuf = (char *)cifs_small_buf_get(); 625 if (!server->smallbuf) { 626 cifs_server_dbg(VFS, "No memory for SMB response\n"); 627 msleep(1000); 628 /* retry will check if exiting */ 629 return false; 630 } 631 /* beginning of smb buffer is cleared in our buf_get */ 632 } else { 633 /* if existing small buf clear beginning */ 634 memset(server->smallbuf, 0, HEADER_SIZE(server)); 635 } 636 637 return true; 638 } 639 640 static bool 641 server_unresponsive(struct TCP_Server_Info *server) 642 { 643 /* 644 * We need to wait 3 echo intervals to make sure we handle such 645 * situations right: 646 * 1s client sends a normal SMB request 647 * 2s client gets a response 648 * 30s echo workqueue job pops, and decides we got a response recently 649 * and don't need to send another 650 * ... 651 * 65s kernel_recvmsg times out, and we see that we haven't gotten 652 * a response in >60s. 653 */ 654 spin_lock(&server->srv_lock); 655 if ((server->tcpStatus == CifsGood || 656 server->tcpStatus == CifsNeedNegotiate) && 657 (!server->ops->can_echo || server->ops->can_echo(server)) && 658 time_after(jiffies, server->lstrp + 3 * server->echo_interval)) { 659 spin_unlock(&server->srv_lock); 660 cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n", 661 (3 * server->echo_interval) / HZ); 662 cifs_reconnect(server, false); 663 return true; 664 } 665 spin_unlock(&server->srv_lock); 666 667 return false; 668 } 669 670 static inline bool 671 zero_credits(struct TCP_Server_Info *server) 672 { 673 int val; 674 675 spin_lock(&server->req_lock); 676 val = server->credits + server->echo_credits + server->oplock_credits; 677 if (server->in_flight == 0 && val == 0) { 678 spin_unlock(&server->req_lock); 679 return true; 680 } 681 spin_unlock(&server->req_lock); 682 return false; 683 } 684 685 static int 686 cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg) 687 { 688 int length = 0; 689 int total_read; 690 691 for (total_read = 0; msg_data_left(smb_msg); total_read += length) { 692 try_to_freeze(); 693 694 /* reconnect if no credits and no requests in flight */ 695 if (zero_credits(server)) { 696 cifs_reconnect(server, false); 697 return -ECONNABORTED; 698 } 699 700 if (server_unresponsive(server)) 701 return -ECONNABORTED; 702 if (cifs_rdma_enabled(server) && server->smbd_conn) 703 length = smbd_recv(server->smbd_conn, smb_msg); 704 else 705 length = sock_recvmsg(server->ssocket, smb_msg, 0); 706 707 spin_lock(&server->srv_lock); 708 if (server->tcpStatus == CifsExiting) { 709 spin_unlock(&server->srv_lock); 710 return -ESHUTDOWN; 711 } 712 713 if (server->tcpStatus == CifsNeedReconnect) { 714 spin_unlock(&server->srv_lock); 715 cifs_reconnect(server, false); 716 return -ECONNABORTED; 717 } 718 spin_unlock(&server->srv_lock); 719 720 if (length == -ERESTARTSYS || 721 length == -EAGAIN || 722 length == -EINTR) { 723 /* 724 * Minimum sleep to prevent looping, allowing socket 725 * to clear and app threads to set tcpStatus 726 * CifsNeedReconnect if server hung. 727 */ 728 usleep_range(1000, 2000); 729 length = 0; 730 continue; 731 } 732 733 if (length <= 0) { 734 cifs_dbg(FYI, "Received no data or error: %d\n", length); 735 cifs_reconnect(server, false); 736 return -ECONNABORTED; 737 } 738 } 739 return total_read; 740 } 741 742 int 743 cifs_read_from_socket(struct TCP_Server_Info *server, char *buf, 744 unsigned int to_read) 745 { 746 struct msghdr smb_msg = {}; 747 struct kvec iov = {.iov_base = buf, .iov_len = to_read}; 748 iov_iter_kvec(&smb_msg.msg_iter, ITER_DEST, &iov, 1, to_read); 749 750 return cifs_readv_from_socket(server, &smb_msg); 751 } 752 753 ssize_t 754 cifs_discard_from_socket(struct TCP_Server_Info *server, size_t to_read) 755 { 756 struct msghdr smb_msg = {}; 757 758 /* 759 * iov_iter_discard already sets smb_msg.type and count and iov_offset 760 * and cifs_readv_from_socket sets msg_control and msg_controllen 761 * so little to initialize in struct msghdr 762 */ 763 iov_iter_discard(&smb_msg.msg_iter, ITER_DEST, to_read); 764 765 return cifs_readv_from_socket(server, &smb_msg); 766 } 767 768 int 769 cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page, 770 unsigned int page_offset, unsigned int to_read) 771 { 772 struct msghdr smb_msg = {}; 773 struct bio_vec bv; 774 775 bvec_set_page(&bv, page, to_read, page_offset); 776 iov_iter_bvec(&smb_msg.msg_iter, ITER_DEST, &bv, 1, to_read); 777 return cifs_readv_from_socket(server, &smb_msg); 778 } 779 780 int 781 cifs_read_iter_from_socket(struct TCP_Server_Info *server, struct iov_iter *iter, 782 unsigned int to_read) 783 { 784 struct msghdr smb_msg = { .msg_iter = *iter }; 785 int ret; 786 787 iov_iter_truncate(&smb_msg.msg_iter, to_read); 788 ret = cifs_readv_from_socket(server, &smb_msg); 789 if (ret > 0) 790 iov_iter_advance(iter, ret); 791 return ret; 792 } 793 794 static bool 795 is_smb_response(struct TCP_Server_Info *server, unsigned char type) 796 { 797 /* 798 * The first byte big endian of the length field, 799 * is actually not part of the length but the type 800 * with the most common, zero, as regular data. 801 */ 802 switch (type) { 803 case RFC1002_SESSION_MESSAGE: 804 /* Regular SMB response */ 805 return true; 806 case RFC1002_SESSION_KEEP_ALIVE: 807 cifs_dbg(FYI, "RFC 1002 session keep alive\n"); 808 break; 809 case RFC1002_POSITIVE_SESSION_RESPONSE: 810 cifs_dbg(FYI, "RFC 1002 positive session response\n"); 811 break; 812 case RFC1002_NEGATIVE_SESSION_RESPONSE: 813 /* 814 * We get this from Windows 98 instead of an error on 815 * SMB negprot response. 816 */ 817 cifs_dbg(FYI, "RFC 1002 negative session response\n"); 818 /* give server a second to clean up */ 819 msleep(1000); 820 /* 821 * Always try 445 first on reconnect since we get NACK 822 * on some if we ever connected to port 139 (the NACK 823 * is since we do not begin with RFC1001 session 824 * initialize frame). 825 */ 826 cifs_set_port((struct sockaddr *)&server->dstaddr, CIFS_PORT); 827 cifs_reconnect(server, true); 828 break; 829 default: 830 cifs_server_dbg(VFS, "RFC 1002 unknown response type 0x%x\n", type); 831 cifs_reconnect(server, true); 832 } 833 834 return false; 835 } 836 837 void 838 dequeue_mid(struct mid_q_entry *mid, bool malformed) 839 { 840 #ifdef CONFIG_CIFS_STATS2 841 mid->when_received = jiffies; 842 #endif 843 spin_lock(&mid->server->mid_lock); 844 if (!malformed) 845 mid->mid_state = MID_RESPONSE_RECEIVED; 846 else 847 mid->mid_state = MID_RESPONSE_MALFORMED; 848 /* 849 * Trying to handle/dequeue a mid after the send_recv() 850 * function has finished processing it is a bug. 851 */ 852 if (mid->mid_flags & MID_DELETED) { 853 spin_unlock(&mid->server->mid_lock); 854 pr_warn_once("trying to dequeue a deleted mid\n"); 855 } else { 856 list_del_init(&mid->qhead); 857 mid->mid_flags |= MID_DELETED; 858 spin_unlock(&mid->server->mid_lock); 859 } 860 } 861 862 static unsigned int 863 smb2_get_credits_from_hdr(char *buffer, struct TCP_Server_Info *server) 864 { 865 struct smb2_hdr *shdr = (struct smb2_hdr *)buffer; 866 867 /* 868 * SMB1 does not use credits. 869 */ 870 if (is_smb1(server)) 871 return 0; 872 873 return le16_to_cpu(shdr->CreditRequest); 874 } 875 876 static void 877 handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server, 878 char *buf, int malformed) 879 { 880 if (server->ops->check_trans2 && 881 server->ops->check_trans2(mid, server, buf, malformed)) 882 return; 883 mid->credits_received = smb2_get_credits_from_hdr(buf, server); 884 mid->resp_buf = buf; 885 mid->large_buf = server->large_buf; 886 /* Was previous buf put in mpx struct for multi-rsp? */ 887 if (!mid->multiRsp) { 888 /* smb buffer will be freed by user thread */ 889 if (server->large_buf) 890 server->bigbuf = NULL; 891 else 892 server->smallbuf = NULL; 893 } 894 dequeue_mid(mid, malformed); 895 } 896 897 int 898 cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required) 899 { 900 bool srv_sign_required = server->sec_mode & server->vals->signing_required; 901 bool srv_sign_enabled = server->sec_mode & server->vals->signing_enabled; 902 bool mnt_sign_enabled; 903 904 /* 905 * Is signing required by mnt options? If not then check 906 * global_secflags to see if it is there. 907 */ 908 if (!mnt_sign_required) 909 mnt_sign_required = ((global_secflags & CIFSSEC_MUST_SIGN) == 910 CIFSSEC_MUST_SIGN); 911 912 /* 913 * If signing is required then it's automatically enabled too, 914 * otherwise, check to see if the secflags allow it. 915 */ 916 mnt_sign_enabled = mnt_sign_required ? mnt_sign_required : 917 (global_secflags & CIFSSEC_MAY_SIGN); 918 919 /* If server requires signing, does client allow it? */ 920 if (srv_sign_required) { 921 if (!mnt_sign_enabled) { 922 cifs_dbg(VFS, "Server requires signing, but it's disabled in SecurityFlags!\n"); 923 return -EOPNOTSUPP; 924 } 925 server->sign = true; 926 } 927 928 /* If client requires signing, does server allow it? */ 929 if (mnt_sign_required) { 930 if (!srv_sign_enabled) { 931 cifs_dbg(VFS, "Server does not support signing!\n"); 932 return -EOPNOTSUPP; 933 } 934 server->sign = true; 935 } 936 937 if (cifs_rdma_enabled(server) && server->sign) 938 cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled\n"); 939 940 return 0; 941 } 942 943 static noinline_for_stack void 944 clean_demultiplex_info(struct TCP_Server_Info *server) 945 { 946 int length; 947 948 /* take it off the list, if it's not already */ 949 spin_lock(&server->srv_lock); 950 list_del_init(&server->tcp_ses_list); 951 spin_unlock(&server->srv_lock); 952 953 cancel_delayed_work_sync(&server->echo); 954 955 spin_lock(&server->srv_lock); 956 server->tcpStatus = CifsExiting; 957 spin_unlock(&server->srv_lock); 958 wake_up_all(&server->response_q); 959 960 /* check if we have blocked requests that need to free */ 961 spin_lock(&server->req_lock); 962 if (server->credits <= 0) 963 server->credits = 1; 964 spin_unlock(&server->req_lock); 965 /* 966 * Although there should not be any requests blocked on this queue it 967 * can not hurt to be paranoid and try to wake up requests that may 968 * haven been blocked when more than 50 at time were on the wire to the 969 * same server - they now will see the session is in exit state and get 970 * out of SendReceive. 971 */ 972 wake_up_all(&server->request_q); 973 /* give those requests time to exit */ 974 msleep(125); 975 if (cifs_rdma_enabled(server)) 976 smbd_destroy(server); 977 if (server->ssocket) { 978 sock_release(server->ssocket); 979 server->ssocket = NULL; 980 } 981 982 if (!list_empty(&server->pending_mid_q)) { 983 struct list_head dispose_list; 984 struct mid_q_entry *mid_entry; 985 struct list_head *tmp, *tmp2; 986 987 INIT_LIST_HEAD(&dispose_list); 988 spin_lock(&server->mid_lock); 989 list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { 990 mid_entry = list_entry(tmp, struct mid_q_entry, qhead); 991 cifs_dbg(FYI, "Clearing mid %llu\n", mid_entry->mid); 992 kref_get(&mid_entry->refcount); 993 mid_entry->mid_state = MID_SHUTDOWN; 994 list_move(&mid_entry->qhead, &dispose_list); 995 mid_entry->mid_flags |= MID_DELETED; 996 } 997 spin_unlock(&server->mid_lock); 998 999 /* now walk dispose list and issue callbacks */ 1000 list_for_each_safe(tmp, tmp2, &dispose_list) { 1001 mid_entry = list_entry(tmp, struct mid_q_entry, qhead); 1002 cifs_dbg(FYI, "Callback mid %llu\n", mid_entry->mid); 1003 list_del_init(&mid_entry->qhead); 1004 mid_entry->callback(mid_entry); 1005 release_mid(mid_entry); 1006 } 1007 /* 1/8th of sec is more than enough time for them to exit */ 1008 msleep(125); 1009 } 1010 1011 if (!list_empty(&server->pending_mid_q)) { 1012 /* 1013 * mpx threads have not exited yet give them at least the smb 1014 * send timeout time for long ops. 1015 * 1016 * Due to delays on oplock break requests, we need to wait at 1017 * least 45 seconds before giving up on a request getting a 1018 * response and going ahead and killing cifsd. 1019 */ 1020 cifs_dbg(FYI, "Wait for exit from demultiplex thread\n"); 1021 msleep(46000); 1022 /* 1023 * If threads still have not exited they are probably never 1024 * coming home not much else we can do but free the memory. 1025 */ 1026 } 1027 1028 kfree(server->leaf_fullpath); 1029 kfree(server); 1030 1031 length = atomic_dec_return(&tcpSesAllocCount); 1032 if (length > 0) 1033 mempool_resize(cifs_req_poolp, length + cifs_min_rcv); 1034 } 1035 1036 static int 1037 standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid) 1038 { 1039 int length; 1040 char *buf = server->smallbuf; 1041 unsigned int pdu_length = server->pdu_size; 1042 1043 /* make sure this will fit in a large buffer */ 1044 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server) - 1045 HEADER_PREAMBLE_SIZE(server)) { 1046 cifs_server_dbg(VFS, "SMB response too long (%u bytes)\n", pdu_length); 1047 cifs_reconnect(server, true); 1048 return -ECONNABORTED; 1049 } 1050 1051 /* switch to large buffer if too big for a small one */ 1052 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) { 1053 server->large_buf = true; 1054 memcpy(server->bigbuf, buf, server->total_read); 1055 buf = server->bigbuf; 1056 } 1057 1058 /* now read the rest */ 1059 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, 1060 pdu_length - MID_HEADER_SIZE(server)); 1061 1062 if (length < 0) 1063 return length; 1064 server->total_read += length; 1065 1066 dump_smb(buf, server->total_read); 1067 1068 return cifs_handle_standard(server, mid); 1069 } 1070 1071 int 1072 cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid) 1073 { 1074 char *buf = server->large_buf ? server->bigbuf : server->smallbuf; 1075 int rc; 1076 1077 /* 1078 * We know that we received enough to get to the MID as we 1079 * checked the pdu_length earlier. Now check to see 1080 * if the rest of the header is OK. 1081 * 1082 * 48 bytes is enough to display the header and a little bit 1083 * into the payload for debugging purposes. 1084 */ 1085 rc = server->ops->check_message(buf, server->total_read, server); 1086 if (rc) 1087 cifs_dump_mem("Bad SMB: ", buf, 1088 min_t(unsigned int, server->total_read, 48)); 1089 1090 if (server->ops->is_session_expired && 1091 server->ops->is_session_expired(buf)) { 1092 cifs_reconnect(server, true); 1093 return -1; 1094 } 1095 1096 if (server->ops->is_status_pending && 1097 server->ops->is_status_pending(buf, server)) 1098 return -1; 1099 1100 if (!mid) 1101 return rc; 1102 1103 handle_mid(mid, server, buf, rc); 1104 return 0; 1105 } 1106 1107 static void 1108 smb2_add_credits_from_hdr(char *buffer, struct TCP_Server_Info *server) 1109 { 1110 struct smb2_hdr *shdr = (struct smb2_hdr *)buffer; 1111 int scredits, in_flight; 1112 1113 /* 1114 * SMB1 does not use credits. 1115 */ 1116 if (is_smb1(server)) 1117 return; 1118 1119 if (shdr->CreditRequest) { 1120 spin_lock(&server->req_lock); 1121 server->credits += le16_to_cpu(shdr->CreditRequest); 1122 scredits = server->credits; 1123 in_flight = server->in_flight; 1124 spin_unlock(&server->req_lock); 1125 wake_up(&server->request_q); 1126 1127 trace_smb3_hdr_credits(server->CurrentMid, 1128 server->conn_id, server->hostname, scredits, 1129 le16_to_cpu(shdr->CreditRequest), in_flight); 1130 cifs_server_dbg(FYI, "%s: added %u credits total=%d\n", 1131 __func__, le16_to_cpu(shdr->CreditRequest), 1132 scredits); 1133 } 1134 } 1135 1136 1137 static int 1138 cifs_demultiplex_thread(void *p) 1139 { 1140 int i, num_mids, length; 1141 struct TCP_Server_Info *server = p; 1142 unsigned int pdu_length; 1143 unsigned int next_offset; 1144 char *buf = NULL; 1145 struct task_struct *task_to_wake = NULL; 1146 struct mid_q_entry *mids[MAX_COMPOUND]; 1147 char *bufs[MAX_COMPOUND]; 1148 unsigned int noreclaim_flag, num_io_timeout = 0; 1149 bool pending_reconnect = false; 1150 1151 noreclaim_flag = memalloc_noreclaim_save(); 1152 cifs_dbg(FYI, "Demultiplex PID: %d\n", task_pid_nr(current)); 1153 1154 length = atomic_inc_return(&tcpSesAllocCount); 1155 if (length > 1) 1156 mempool_resize(cifs_req_poolp, length + cifs_min_rcv); 1157 1158 set_freezable(); 1159 allow_kernel_signal(SIGKILL); 1160 while (server->tcpStatus != CifsExiting) { 1161 if (try_to_freeze()) 1162 continue; 1163 1164 if (!allocate_buffers(server)) 1165 continue; 1166 1167 server->large_buf = false; 1168 buf = server->smallbuf; 1169 pdu_length = 4; /* enough to get RFC1001 header */ 1170 1171 length = cifs_read_from_socket(server, buf, pdu_length); 1172 if (length < 0) 1173 continue; 1174 1175 if (is_smb1(server)) 1176 server->total_read = length; 1177 else 1178 server->total_read = 0; 1179 1180 /* 1181 * The right amount was read from socket - 4 bytes, 1182 * so we can now interpret the length field. 1183 */ 1184 pdu_length = get_rfc1002_length(buf); 1185 1186 cifs_dbg(FYI, "RFC1002 header 0x%x\n", pdu_length); 1187 if (!is_smb_response(server, buf[0])) 1188 continue; 1189 1190 pending_reconnect = false; 1191 next_pdu: 1192 server->pdu_size = pdu_length; 1193 1194 /* make sure we have enough to get to the MID */ 1195 if (server->pdu_size < MID_HEADER_SIZE(server)) { 1196 cifs_server_dbg(VFS, "SMB response too short (%u bytes)\n", 1197 server->pdu_size); 1198 cifs_reconnect(server, true); 1199 continue; 1200 } 1201 1202 /* read down to the MID */ 1203 length = cifs_read_from_socket(server, 1204 buf + HEADER_PREAMBLE_SIZE(server), 1205 MID_HEADER_SIZE(server)); 1206 if (length < 0) 1207 continue; 1208 server->total_read += length; 1209 1210 if (server->ops->next_header) { 1211 if (server->ops->next_header(server, buf, &next_offset)) { 1212 cifs_dbg(VFS, "%s: malformed response (next_offset=%u)\n", 1213 __func__, next_offset); 1214 cifs_reconnect(server, true); 1215 continue; 1216 } 1217 if (next_offset) 1218 server->pdu_size = next_offset; 1219 } 1220 1221 memset(mids, 0, sizeof(mids)); 1222 memset(bufs, 0, sizeof(bufs)); 1223 num_mids = 0; 1224 1225 if (server->ops->is_transform_hdr && 1226 server->ops->receive_transform && 1227 server->ops->is_transform_hdr(buf)) { 1228 length = server->ops->receive_transform(server, 1229 mids, 1230 bufs, 1231 &num_mids); 1232 } else { 1233 mids[0] = server->ops->find_mid(server, buf); 1234 bufs[0] = buf; 1235 num_mids = 1; 1236 1237 if (!mids[0] || !mids[0]->receive) 1238 length = standard_receive3(server, mids[0]); 1239 else 1240 length = mids[0]->receive(server, mids[0]); 1241 } 1242 1243 if (length < 0) { 1244 for (i = 0; i < num_mids; i++) 1245 if (mids[i]) 1246 release_mid(mids[i]); 1247 continue; 1248 } 1249 1250 if (server->ops->is_status_io_timeout && 1251 server->ops->is_status_io_timeout(buf)) { 1252 num_io_timeout++; 1253 if (num_io_timeout > MAX_STATUS_IO_TIMEOUT) { 1254 cifs_server_dbg(VFS, 1255 "Number of request timeouts exceeded %d. Reconnecting", 1256 MAX_STATUS_IO_TIMEOUT); 1257 1258 pending_reconnect = true; 1259 num_io_timeout = 0; 1260 } 1261 } 1262 1263 server->lstrp = jiffies; 1264 1265 for (i = 0; i < num_mids; i++) { 1266 if (mids[i] != NULL) { 1267 mids[i]->resp_buf_size = server->pdu_size; 1268 1269 if (bufs[i] != NULL) { 1270 if (server->ops->is_network_name_deleted && 1271 server->ops->is_network_name_deleted(bufs[i], 1272 server)) { 1273 cifs_server_dbg(FYI, 1274 "Share deleted. Reconnect needed"); 1275 } 1276 } 1277 1278 if (!mids[i]->multiRsp || mids[i]->multiEnd) 1279 mids[i]->callback(mids[i]); 1280 1281 release_mid(mids[i]); 1282 } else if (server->ops->is_oplock_break && 1283 server->ops->is_oplock_break(bufs[i], 1284 server)) { 1285 smb2_add_credits_from_hdr(bufs[i], server); 1286 cifs_dbg(FYI, "Received oplock break\n"); 1287 } else { 1288 cifs_server_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n", 1289 atomic_read(&mid_count)); 1290 cifs_dump_mem("Received Data is: ", bufs[i], 1291 HEADER_SIZE(server)); 1292 smb2_add_credits_from_hdr(bufs[i], server); 1293 #ifdef CONFIG_CIFS_DEBUG2 1294 if (server->ops->dump_detail) 1295 server->ops->dump_detail(bufs[i], 1296 server); 1297 cifs_dump_mids(server); 1298 #endif /* CIFS_DEBUG2 */ 1299 } 1300 } 1301 1302 if (pdu_length > server->pdu_size) { 1303 if (!allocate_buffers(server)) 1304 continue; 1305 pdu_length -= server->pdu_size; 1306 server->total_read = 0; 1307 server->large_buf = false; 1308 buf = server->smallbuf; 1309 goto next_pdu; 1310 } 1311 1312 /* do this reconnect at the very end after processing all MIDs */ 1313 if (pending_reconnect) 1314 cifs_reconnect(server, true); 1315 1316 } /* end while !EXITING */ 1317 1318 /* buffer usually freed in free_mid - need to free it here on exit */ 1319 cifs_buf_release(server->bigbuf); 1320 if (server->smallbuf) /* no sense logging a debug message if NULL */ 1321 cifs_small_buf_release(server->smallbuf); 1322 1323 task_to_wake = xchg(&server->tsk, NULL); 1324 clean_demultiplex_info(server); 1325 1326 /* if server->tsk was NULL then wait for a signal before exiting */ 1327 if (!task_to_wake) { 1328 set_current_state(TASK_INTERRUPTIBLE); 1329 while (!signal_pending(current)) { 1330 schedule(); 1331 set_current_state(TASK_INTERRUPTIBLE); 1332 } 1333 set_current_state(TASK_RUNNING); 1334 } 1335 1336 memalloc_noreclaim_restore(noreclaim_flag); 1337 module_put_and_kthread_exit(0); 1338 } 1339 1340 int 1341 cifs_ipaddr_cmp(struct sockaddr *srcaddr, struct sockaddr *rhs) 1342 { 1343 struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr; 1344 struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs; 1345 struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr; 1346 struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs; 1347 1348 switch (srcaddr->sa_family) { 1349 case AF_UNSPEC: 1350 switch (rhs->sa_family) { 1351 case AF_UNSPEC: 1352 return 0; 1353 case AF_INET: 1354 case AF_INET6: 1355 return 1; 1356 default: 1357 return -1; 1358 } 1359 case AF_INET: { 1360 switch (rhs->sa_family) { 1361 case AF_UNSPEC: 1362 return -1; 1363 case AF_INET: 1364 return memcmp(saddr4, vaddr4, 1365 sizeof(struct sockaddr_in)); 1366 case AF_INET6: 1367 return 1; 1368 default: 1369 return -1; 1370 } 1371 } 1372 case AF_INET6: { 1373 switch (rhs->sa_family) { 1374 case AF_UNSPEC: 1375 case AF_INET: 1376 return -1; 1377 case AF_INET6: 1378 return memcmp(saddr6, 1379 vaddr6, 1380 sizeof(struct sockaddr_in6)); 1381 default: 1382 return -1; 1383 } 1384 } 1385 default: 1386 return -1; /* don't expect to be here */ 1387 } 1388 } 1389 1390 /* 1391 * Returns true if srcaddr isn't specified and rhs isn't specified, or 1392 * if srcaddr is specified and matches the IP address of the rhs argument 1393 */ 1394 bool 1395 cifs_match_ipaddr(struct sockaddr *srcaddr, struct sockaddr *rhs) 1396 { 1397 switch (srcaddr->sa_family) { 1398 case AF_UNSPEC: 1399 return (rhs->sa_family == AF_UNSPEC); 1400 case AF_INET: { 1401 struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr; 1402 struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs; 1403 return (saddr4->sin_addr.s_addr == vaddr4->sin_addr.s_addr); 1404 } 1405 case AF_INET6: { 1406 struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr; 1407 struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs; 1408 return (ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr) 1409 && saddr6->sin6_scope_id == vaddr6->sin6_scope_id); 1410 } 1411 default: 1412 WARN_ON(1); 1413 return false; /* don't expect to be here */ 1414 } 1415 } 1416 1417 /* 1418 * If no port is specified in addr structure, we try to match with 445 port 1419 * and if it fails - with 139 ports. It should be called only if address 1420 * families of server and addr are equal. 1421 */ 1422 static bool 1423 match_port(struct TCP_Server_Info *server, struct sockaddr *addr) 1424 { 1425 __be16 port, *sport; 1426 1427 /* SMBDirect manages its own ports, don't match it here */ 1428 if (server->rdma) 1429 return true; 1430 1431 switch (addr->sa_family) { 1432 case AF_INET: 1433 sport = &((struct sockaddr_in *) &server->dstaddr)->sin_port; 1434 port = ((struct sockaddr_in *) addr)->sin_port; 1435 break; 1436 case AF_INET6: 1437 sport = &((struct sockaddr_in6 *) &server->dstaddr)->sin6_port; 1438 port = ((struct sockaddr_in6 *) addr)->sin6_port; 1439 break; 1440 default: 1441 WARN_ON(1); 1442 return false; 1443 } 1444 1445 if (!port) { 1446 port = htons(CIFS_PORT); 1447 if (port == *sport) 1448 return true; 1449 1450 port = htons(RFC1001_PORT); 1451 } 1452 1453 return port == *sport; 1454 } 1455 1456 static bool match_server_address(struct TCP_Server_Info *server, struct sockaddr *addr) 1457 { 1458 if (!cifs_match_ipaddr(addr, (struct sockaddr *)&server->dstaddr)) 1459 return false; 1460 1461 return true; 1462 } 1463 1464 static bool 1465 match_security(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) 1466 { 1467 /* 1468 * The select_sectype function should either return the ctx->sectype 1469 * that was specified, or "Unspecified" if that sectype was not 1470 * compatible with the given NEGOTIATE request. 1471 */ 1472 if (server->ops->select_sectype(server, ctx->sectype) 1473 == Unspecified) 1474 return false; 1475 1476 /* 1477 * Now check if signing mode is acceptable. No need to check 1478 * global_secflags at this point since if MUST_SIGN is set then 1479 * the server->sign had better be too. 1480 */ 1481 if (ctx->sign && !server->sign) 1482 return false; 1483 1484 return true; 1485 } 1486 1487 /* this function must be called with srv_lock held */ 1488 static int match_server(struct TCP_Server_Info *server, 1489 struct smb3_fs_context *ctx, 1490 bool match_super) 1491 { 1492 struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr; 1493 1494 lockdep_assert_held(&server->srv_lock); 1495 1496 if (ctx->nosharesock) 1497 return 0; 1498 1499 /* this server does not share socket */ 1500 if (server->nosharesock) 1501 return 0; 1502 1503 /* If multidialect negotiation see if existing sessions match one */ 1504 if (strcmp(ctx->vals->version_string, SMB3ANY_VERSION_STRING) == 0) { 1505 if (server->vals->protocol_id < SMB30_PROT_ID) 1506 return 0; 1507 } else if (strcmp(ctx->vals->version_string, 1508 SMBDEFAULT_VERSION_STRING) == 0) { 1509 if (server->vals->protocol_id < SMB21_PROT_ID) 1510 return 0; 1511 } else if ((server->vals != ctx->vals) || (server->ops != ctx->ops)) 1512 return 0; 1513 1514 if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns)) 1515 return 0; 1516 1517 if (!cifs_match_ipaddr((struct sockaddr *)&ctx->srcaddr, 1518 (struct sockaddr *)&server->srcaddr)) 1519 return 0; 1520 /* 1521 * When matching cifs.ko superblocks (@match_super == true), we can't 1522 * really match either @server->leaf_fullpath or @server->dstaddr 1523 * directly since this @server might belong to a completely different 1524 * server -- in case of domain-based DFS referrals or DFS links -- as 1525 * provided earlier by mount(2) through 'source' and 'ip' options. 1526 * 1527 * Otherwise, match the DFS referral in @server->leaf_fullpath or the 1528 * destination address in @server->dstaddr. 1529 * 1530 * When using 'nodfs' mount option, we avoid sharing it with DFS 1531 * connections as they might failover. 1532 */ 1533 if (!match_super) { 1534 if (!ctx->nodfs) { 1535 if (server->leaf_fullpath) { 1536 if (!ctx->leaf_fullpath || 1537 strcasecmp(server->leaf_fullpath, 1538 ctx->leaf_fullpath)) 1539 return 0; 1540 } else if (ctx->leaf_fullpath) { 1541 return 0; 1542 } 1543 } else if (server->leaf_fullpath) { 1544 return 0; 1545 } 1546 } 1547 1548 /* 1549 * Match for a regular connection (address/hostname/port) which has no 1550 * DFS referrals set. 1551 */ 1552 if (!server->leaf_fullpath && 1553 (strcasecmp(server->hostname, ctx->server_hostname) || 1554 !match_server_address(server, addr) || 1555 !match_port(server, addr))) 1556 return 0; 1557 1558 if (!match_security(server, ctx)) 1559 return 0; 1560 1561 if (server->echo_interval != ctx->echo_interval * HZ) 1562 return 0; 1563 1564 if (server->rdma != ctx->rdma) 1565 return 0; 1566 1567 if (server->ignore_signature != ctx->ignore_signature) 1568 return 0; 1569 1570 if (server->min_offload != ctx->min_offload) 1571 return 0; 1572 1573 return 1; 1574 } 1575 1576 struct TCP_Server_Info * 1577 cifs_find_tcp_session(struct smb3_fs_context *ctx) 1578 { 1579 struct TCP_Server_Info *server; 1580 1581 spin_lock(&cifs_tcp_ses_lock); 1582 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { 1583 spin_lock(&server->srv_lock); 1584 /* 1585 * Skip ses channels since they're only handled in lower layers 1586 * (e.g. cifs_send_recv). 1587 */ 1588 if (SERVER_IS_CHAN(server) || 1589 !match_server(server, ctx, false)) { 1590 spin_unlock(&server->srv_lock); 1591 continue; 1592 } 1593 spin_unlock(&server->srv_lock); 1594 1595 ++server->srv_count; 1596 spin_unlock(&cifs_tcp_ses_lock); 1597 cifs_dbg(FYI, "Existing tcp session with server found\n"); 1598 return server; 1599 } 1600 spin_unlock(&cifs_tcp_ses_lock); 1601 return NULL; 1602 } 1603 1604 void 1605 cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect) 1606 { 1607 struct task_struct *task; 1608 1609 spin_lock(&cifs_tcp_ses_lock); 1610 if (--server->srv_count > 0) { 1611 spin_unlock(&cifs_tcp_ses_lock); 1612 return; 1613 } 1614 1615 /* srv_count can never go negative */ 1616 WARN_ON(server->srv_count < 0); 1617 1618 put_net(cifs_net_ns(server)); 1619 1620 list_del_init(&server->tcp_ses_list); 1621 spin_unlock(&cifs_tcp_ses_lock); 1622 1623 cancel_delayed_work_sync(&server->echo); 1624 1625 if (from_reconnect) 1626 /* 1627 * Avoid deadlock here: reconnect work calls 1628 * cifs_put_tcp_session() at its end. Need to be sure 1629 * that reconnect work does nothing with server pointer after 1630 * that step. 1631 */ 1632 cancel_delayed_work(&server->reconnect); 1633 else 1634 cancel_delayed_work_sync(&server->reconnect); 1635 1636 /* For secondary channels, we pick up ref-count on the primary server */ 1637 if (SERVER_IS_CHAN(server)) 1638 cifs_put_tcp_session(server->primary_server, from_reconnect); 1639 1640 spin_lock(&server->srv_lock); 1641 server->tcpStatus = CifsExiting; 1642 spin_unlock(&server->srv_lock); 1643 1644 cifs_crypto_secmech_release(server); 1645 1646 kfree_sensitive(server->session_key.response); 1647 server->session_key.response = NULL; 1648 server->session_key.len = 0; 1649 kfree(server->hostname); 1650 server->hostname = NULL; 1651 1652 task = xchg(&server->tsk, NULL); 1653 if (task) 1654 send_sig(SIGKILL, task, 1); 1655 } 1656 1657 struct TCP_Server_Info * 1658 cifs_get_tcp_session(struct smb3_fs_context *ctx, 1659 struct TCP_Server_Info *primary_server) 1660 { 1661 struct TCP_Server_Info *tcp_ses = NULL; 1662 int rc; 1663 1664 cifs_dbg(FYI, "UNC: %s\n", ctx->UNC); 1665 1666 /* see if we already have a matching tcp_ses */ 1667 tcp_ses = cifs_find_tcp_session(ctx); 1668 if (tcp_ses) 1669 return tcp_ses; 1670 1671 tcp_ses = kzalloc(sizeof(struct TCP_Server_Info), GFP_KERNEL); 1672 if (!tcp_ses) { 1673 rc = -ENOMEM; 1674 goto out_err; 1675 } 1676 1677 tcp_ses->hostname = kstrdup(ctx->server_hostname, GFP_KERNEL); 1678 if (!tcp_ses->hostname) { 1679 rc = -ENOMEM; 1680 goto out_err; 1681 } 1682 1683 if (ctx->leaf_fullpath) { 1684 tcp_ses->leaf_fullpath = kstrdup(ctx->leaf_fullpath, GFP_KERNEL); 1685 if (!tcp_ses->leaf_fullpath) { 1686 rc = -ENOMEM; 1687 goto out_err; 1688 } 1689 } 1690 1691 if (ctx->nosharesock) 1692 tcp_ses->nosharesock = true; 1693 1694 tcp_ses->ops = ctx->ops; 1695 tcp_ses->vals = ctx->vals; 1696 cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns)); 1697 1698 tcp_ses->conn_id = atomic_inc_return(&tcpSesNextId); 1699 tcp_ses->noblockcnt = ctx->rootfs; 1700 tcp_ses->noblocksnd = ctx->noblocksnd || ctx->rootfs; 1701 tcp_ses->noautotune = ctx->noautotune; 1702 tcp_ses->tcp_nodelay = ctx->sockopt_tcp_nodelay; 1703 tcp_ses->rdma = ctx->rdma; 1704 tcp_ses->in_flight = 0; 1705 tcp_ses->max_in_flight = 0; 1706 tcp_ses->credits = 1; 1707 if (primary_server) { 1708 spin_lock(&cifs_tcp_ses_lock); 1709 ++primary_server->srv_count; 1710 spin_unlock(&cifs_tcp_ses_lock); 1711 tcp_ses->primary_server = primary_server; 1712 } 1713 init_waitqueue_head(&tcp_ses->response_q); 1714 init_waitqueue_head(&tcp_ses->request_q); 1715 INIT_LIST_HEAD(&tcp_ses->pending_mid_q); 1716 mutex_init(&tcp_ses->_srv_mutex); 1717 memcpy(tcp_ses->workstation_RFC1001_name, 1718 ctx->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL); 1719 memcpy(tcp_ses->server_RFC1001_name, 1720 ctx->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL); 1721 tcp_ses->session_estab = false; 1722 tcp_ses->sequence_number = 0; 1723 tcp_ses->channel_sequence_num = 0; /* only tracked for primary channel */ 1724 tcp_ses->reconnect_instance = 1; 1725 tcp_ses->lstrp = jiffies; 1726 tcp_ses->compress_algorithm = cpu_to_le16(ctx->compression); 1727 spin_lock_init(&tcp_ses->req_lock); 1728 spin_lock_init(&tcp_ses->srv_lock); 1729 spin_lock_init(&tcp_ses->mid_lock); 1730 INIT_LIST_HEAD(&tcp_ses->tcp_ses_list); 1731 INIT_LIST_HEAD(&tcp_ses->smb_ses_list); 1732 INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request); 1733 INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server); 1734 mutex_init(&tcp_ses->reconnect_mutex); 1735 #ifdef CONFIG_CIFS_DFS_UPCALL 1736 mutex_init(&tcp_ses->refpath_lock); 1737 #endif 1738 memcpy(&tcp_ses->srcaddr, &ctx->srcaddr, 1739 sizeof(tcp_ses->srcaddr)); 1740 memcpy(&tcp_ses->dstaddr, &ctx->dstaddr, 1741 sizeof(tcp_ses->dstaddr)); 1742 if (ctx->use_client_guid) 1743 memcpy(tcp_ses->client_guid, ctx->client_guid, 1744 SMB2_CLIENT_GUID_SIZE); 1745 else 1746 generate_random_uuid(tcp_ses->client_guid); 1747 /* 1748 * at this point we are the only ones with the pointer 1749 * to the struct since the kernel thread not created yet 1750 * no need to spinlock this init of tcpStatus or srv_count 1751 */ 1752 tcp_ses->tcpStatus = CifsNew; 1753 ++tcp_ses->srv_count; 1754 1755 if (ctx->echo_interval >= SMB_ECHO_INTERVAL_MIN && 1756 ctx->echo_interval <= SMB_ECHO_INTERVAL_MAX) 1757 tcp_ses->echo_interval = ctx->echo_interval * HZ; 1758 else 1759 tcp_ses->echo_interval = SMB_ECHO_INTERVAL_DEFAULT * HZ; 1760 if (tcp_ses->rdma) { 1761 #ifndef CONFIG_CIFS_SMB_DIRECT 1762 cifs_dbg(VFS, "CONFIG_CIFS_SMB_DIRECT is not enabled\n"); 1763 rc = -ENOENT; 1764 goto out_err_crypto_release; 1765 #endif 1766 tcp_ses->smbd_conn = smbd_get_connection( 1767 tcp_ses, (struct sockaddr *)&ctx->dstaddr); 1768 if (tcp_ses->smbd_conn) { 1769 cifs_dbg(VFS, "RDMA transport established\n"); 1770 rc = 0; 1771 goto smbd_connected; 1772 } else { 1773 rc = -ENOENT; 1774 goto out_err_crypto_release; 1775 } 1776 } 1777 rc = ip_connect(tcp_ses); 1778 if (rc < 0) { 1779 cifs_dbg(VFS, "Error connecting to socket. Aborting operation.\n"); 1780 goto out_err_crypto_release; 1781 } 1782 smbd_connected: 1783 /* 1784 * since we're in a cifs function already, we know that 1785 * this will succeed. No need for try_module_get(). 1786 */ 1787 __module_get(THIS_MODULE); 1788 tcp_ses->tsk = kthread_run(cifs_demultiplex_thread, 1789 tcp_ses, "cifsd"); 1790 if (IS_ERR(tcp_ses->tsk)) { 1791 rc = PTR_ERR(tcp_ses->tsk); 1792 cifs_dbg(VFS, "error %d create cifsd thread\n", rc); 1793 module_put(THIS_MODULE); 1794 goto out_err_crypto_release; 1795 } 1796 tcp_ses->min_offload = ctx->min_offload; 1797 /* 1798 * at this point we are the only ones with the pointer 1799 * to the struct since the kernel thread not created yet 1800 * no need to spinlock this update of tcpStatus 1801 */ 1802 spin_lock(&tcp_ses->srv_lock); 1803 tcp_ses->tcpStatus = CifsNeedNegotiate; 1804 spin_unlock(&tcp_ses->srv_lock); 1805 1806 if ((ctx->max_credits < 20) || (ctx->max_credits > 60000)) 1807 tcp_ses->max_credits = SMB2_MAX_CREDITS_AVAILABLE; 1808 else 1809 tcp_ses->max_credits = ctx->max_credits; 1810 1811 tcp_ses->nr_targets = 1; 1812 tcp_ses->ignore_signature = ctx->ignore_signature; 1813 /* thread spawned, put it on the list */ 1814 spin_lock(&cifs_tcp_ses_lock); 1815 list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list); 1816 spin_unlock(&cifs_tcp_ses_lock); 1817 1818 /* queue echo request delayed work */ 1819 queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval); 1820 1821 return tcp_ses; 1822 1823 out_err_crypto_release: 1824 cifs_crypto_secmech_release(tcp_ses); 1825 1826 put_net(cifs_net_ns(tcp_ses)); 1827 1828 out_err: 1829 if (tcp_ses) { 1830 if (SERVER_IS_CHAN(tcp_ses)) 1831 cifs_put_tcp_session(tcp_ses->primary_server, false); 1832 kfree(tcp_ses->hostname); 1833 kfree(tcp_ses->leaf_fullpath); 1834 if (tcp_ses->ssocket) 1835 sock_release(tcp_ses->ssocket); 1836 kfree(tcp_ses); 1837 } 1838 return ERR_PTR(rc); 1839 } 1840 1841 /* this function must be called with ses_lock and chan_lock held */ 1842 static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx) 1843 { 1844 if (ctx->sectype != Unspecified && 1845 ctx->sectype != ses->sectype) 1846 return 0; 1847 1848 /* 1849 * If an existing session is limited to less channels than 1850 * requested, it should not be reused 1851 */ 1852 if (ses->chan_max < ctx->max_channels) 1853 return 0; 1854 1855 switch (ses->sectype) { 1856 case Kerberos: 1857 if (!uid_eq(ctx->cred_uid, ses->cred_uid)) 1858 return 0; 1859 break; 1860 default: 1861 /* NULL username means anonymous session */ 1862 if (ses->user_name == NULL) { 1863 if (!ctx->nullauth) 1864 return 0; 1865 break; 1866 } 1867 1868 /* anything else takes username/password */ 1869 if (strncmp(ses->user_name, 1870 ctx->username ? ctx->username : "", 1871 CIFS_MAX_USERNAME_LEN)) 1872 return 0; 1873 if ((ctx->username && strlen(ctx->username) != 0) && 1874 ses->password != NULL && 1875 strncmp(ses->password, 1876 ctx->password ? ctx->password : "", 1877 CIFS_MAX_PASSWORD_LEN)) 1878 return 0; 1879 } 1880 1881 if (strcmp(ctx->local_nls->charset, ses->local_nls->charset)) 1882 return 0; 1883 1884 return 1; 1885 } 1886 1887 /** 1888 * cifs_setup_ipc - helper to setup the IPC tcon for the session 1889 * @ses: smb session to issue the request on 1890 * @ctx: the superblock configuration context to use for building the 1891 * new tree connection for the IPC (interprocess communication RPC) 1892 * 1893 * A new IPC connection is made and stored in the session 1894 * tcon_ipc. The IPC tcon has the same lifetime as the session. 1895 */ 1896 static int 1897 cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx) 1898 { 1899 int rc = 0, xid; 1900 struct cifs_tcon *tcon; 1901 char unc[SERVER_NAME_LENGTH + sizeof("//x/IPC$")] = {0}; 1902 bool seal = false; 1903 struct TCP_Server_Info *server = ses->server; 1904 1905 /* 1906 * If the mount request that resulted in the creation of the 1907 * session requires encryption, force IPC to be encrypted too. 1908 */ 1909 if (ctx->seal) { 1910 if (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) 1911 seal = true; 1912 else { 1913 cifs_server_dbg(VFS, 1914 "IPC: server doesn't support encryption\n"); 1915 return -EOPNOTSUPP; 1916 } 1917 } 1918 1919 /* no need to setup directory caching on IPC share, so pass in false */ 1920 tcon = tcon_info_alloc(false); 1921 if (tcon == NULL) 1922 return -ENOMEM; 1923 1924 spin_lock(&server->srv_lock); 1925 scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", server->hostname); 1926 spin_unlock(&server->srv_lock); 1927 1928 xid = get_xid(); 1929 tcon->ses = ses; 1930 tcon->ipc = true; 1931 tcon->seal = seal; 1932 rc = server->ops->tree_connect(xid, ses, unc, tcon, ctx->local_nls); 1933 free_xid(xid); 1934 1935 if (rc) { 1936 cifs_server_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc); 1937 tconInfoFree(tcon); 1938 goto out; 1939 } 1940 1941 cifs_dbg(FYI, "IPC tcon rc=%d ipc tid=0x%x\n", rc, tcon->tid); 1942 1943 spin_lock(&tcon->tc_lock); 1944 tcon->status = TID_GOOD; 1945 spin_unlock(&tcon->tc_lock); 1946 ses->tcon_ipc = tcon; 1947 out: 1948 return rc; 1949 } 1950 1951 /** 1952 * cifs_free_ipc - helper to release the session IPC tcon 1953 * @ses: smb session to unmount the IPC from 1954 * 1955 * Needs to be called everytime a session is destroyed. 1956 * 1957 * On session close, the IPC is closed and the server must release all tcons of the session. 1958 * No need to send a tree disconnect here. 1959 * 1960 * Besides, it will make the server to not close durable and resilient files on session close, as 1961 * specified in MS-SMB2 3.3.5.6 Receiving an SMB2 LOGOFF Request. 1962 */ 1963 static int 1964 cifs_free_ipc(struct cifs_ses *ses) 1965 { 1966 struct cifs_tcon *tcon = ses->tcon_ipc; 1967 1968 if (tcon == NULL) 1969 return 0; 1970 1971 tconInfoFree(tcon); 1972 ses->tcon_ipc = NULL; 1973 return 0; 1974 } 1975 1976 static struct cifs_ses * 1977 cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) 1978 { 1979 struct cifs_ses *ses, *ret = NULL; 1980 1981 spin_lock(&cifs_tcp_ses_lock); 1982 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { 1983 spin_lock(&ses->ses_lock); 1984 if (ses->ses_status == SES_EXITING) { 1985 spin_unlock(&ses->ses_lock); 1986 continue; 1987 } 1988 spin_lock(&ses->chan_lock); 1989 if (match_session(ses, ctx)) { 1990 spin_unlock(&ses->chan_lock); 1991 spin_unlock(&ses->ses_lock); 1992 ret = ses; 1993 break; 1994 } 1995 spin_unlock(&ses->chan_lock); 1996 spin_unlock(&ses->ses_lock); 1997 } 1998 if (ret) 1999 cifs_smb_ses_inc_refcount(ret); 2000 spin_unlock(&cifs_tcp_ses_lock); 2001 return ret; 2002 } 2003 2004 void __cifs_put_smb_ses(struct cifs_ses *ses) 2005 { 2006 struct TCP_Server_Info *server = ses->server; 2007 unsigned int xid; 2008 size_t i; 2009 int rc; 2010 2011 spin_lock(&ses->ses_lock); 2012 if (ses->ses_status == SES_EXITING) { 2013 spin_unlock(&ses->ses_lock); 2014 return; 2015 } 2016 spin_unlock(&ses->ses_lock); 2017 2018 cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count); 2019 cifs_dbg(FYI, 2020 "%s: ses ipc: %s\n", __func__, ses->tcon_ipc ? ses->tcon_ipc->tree_name : "NONE"); 2021 2022 spin_lock(&cifs_tcp_ses_lock); 2023 if (--ses->ses_count > 0) { 2024 spin_unlock(&cifs_tcp_ses_lock); 2025 return; 2026 } 2027 spin_lock(&ses->ses_lock); 2028 if (ses->ses_status == SES_GOOD) 2029 ses->ses_status = SES_EXITING; 2030 spin_unlock(&ses->ses_lock); 2031 spin_unlock(&cifs_tcp_ses_lock); 2032 2033 /* ses_count can never go negative */ 2034 WARN_ON(ses->ses_count < 0); 2035 2036 spin_lock(&ses->ses_lock); 2037 if (ses->ses_status == SES_EXITING && server->ops->logoff) { 2038 spin_unlock(&ses->ses_lock); 2039 cifs_free_ipc(ses); 2040 xid = get_xid(); 2041 rc = server->ops->logoff(xid, ses); 2042 if (rc) 2043 cifs_server_dbg(VFS, "%s: Session Logoff failure rc=%d\n", 2044 __func__, rc); 2045 _free_xid(xid); 2046 } else { 2047 spin_unlock(&ses->ses_lock); 2048 cifs_free_ipc(ses); 2049 } 2050 2051 spin_lock(&cifs_tcp_ses_lock); 2052 list_del_init(&ses->smb_ses_list); 2053 spin_unlock(&cifs_tcp_ses_lock); 2054 2055 /* close any extra channels */ 2056 for (i = 1; i < ses->chan_count; i++) { 2057 if (ses->chans[i].iface) { 2058 kref_put(&ses->chans[i].iface->refcount, release_iface); 2059 ses->chans[i].iface = NULL; 2060 } 2061 cifs_put_tcp_session(ses->chans[i].server, 0); 2062 ses->chans[i].server = NULL; 2063 } 2064 2065 /* we now account for primary channel in iface->refcount */ 2066 if (ses->chans[0].iface) { 2067 kref_put(&ses->chans[0].iface->refcount, release_iface); 2068 ses->chans[0].server = NULL; 2069 } 2070 2071 sesInfoFree(ses); 2072 cifs_put_tcp_session(server, 0); 2073 } 2074 2075 #ifdef CONFIG_KEYS 2076 2077 /* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */ 2078 #define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1) 2079 2080 /* Populate username and pw fields from keyring if possible */ 2081 static int 2082 cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses) 2083 { 2084 int rc = 0; 2085 int is_domain = 0; 2086 const char *delim, *payload; 2087 char *desc; 2088 ssize_t len; 2089 struct key *key; 2090 struct TCP_Server_Info *server = ses->server; 2091 struct sockaddr_in *sa; 2092 struct sockaddr_in6 *sa6; 2093 const struct user_key_payload *upayload; 2094 2095 desc = kmalloc(CIFSCREDS_DESC_SIZE, GFP_KERNEL); 2096 if (!desc) 2097 return -ENOMEM; 2098 2099 /* try to find an address key first */ 2100 switch (server->dstaddr.ss_family) { 2101 case AF_INET: 2102 sa = (struct sockaddr_in *)&server->dstaddr; 2103 sprintf(desc, "cifs:a:%pI4", &sa->sin_addr.s_addr); 2104 break; 2105 case AF_INET6: 2106 sa6 = (struct sockaddr_in6 *)&server->dstaddr; 2107 sprintf(desc, "cifs:a:%pI6c", &sa6->sin6_addr.s6_addr); 2108 break; 2109 default: 2110 cifs_dbg(FYI, "Bad ss_family (%hu)\n", 2111 server->dstaddr.ss_family); 2112 rc = -EINVAL; 2113 goto out_err; 2114 } 2115 2116 cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc); 2117 key = request_key(&key_type_logon, desc, ""); 2118 if (IS_ERR(key)) { 2119 if (!ses->domainName) { 2120 cifs_dbg(FYI, "domainName is NULL\n"); 2121 rc = PTR_ERR(key); 2122 goto out_err; 2123 } 2124 2125 /* didn't work, try to find a domain key */ 2126 sprintf(desc, "cifs:d:%s", ses->domainName); 2127 cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc); 2128 key = request_key(&key_type_logon, desc, ""); 2129 if (IS_ERR(key)) { 2130 rc = PTR_ERR(key); 2131 goto out_err; 2132 } 2133 is_domain = 1; 2134 } 2135 2136 down_read(&key->sem); 2137 upayload = user_key_payload_locked(key); 2138 if (IS_ERR_OR_NULL(upayload)) { 2139 rc = upayload ? PTR_ERR(upayload) : -EINVAL; 2140 goto out_key_put; 2141 } 2142 2143 /* find first : in payload */ 2144 payload = upayload->data; 2145 delim = strnchr(payload, upayload->datalen, ':'); 2146 cifs_dbg(FYI, "payload=%s\n", payload); 2147 if (!delim) { 2148 cifs_dbg(FYI, "Unable to find ':' in payload (datalen=%d)\n", 2149 upayload->datalen); 2150 rc = -EINVAL; 2151 goto out_key_put; 2152 } 2153 2154 len = delim - payload; 2155 if (len > CIFS_MAX_USERNAME_LEN || len <= 0) { 2156 cifs_dbg(FYI, "Bad value from username search (len=%zd)\n", 2157 len); 2158 rc = -EINVAL; 2159 goto out_key_put; 2160 } 2161 2162 ctx->username = kstrndup(payload, len, GFP_KERNEL); 2163 if (!ctx->username) { 2164 cifs_dbg(FYI, "Unable to allocate %zd bytes for username\n", 2165 len); 2166 rc = -ENOMEM; 2167 goto out_key_put; 2168 } 2169 cifs_dbg(FYI, "%s: username=%s\n", __func__, ctx->username); 2170 2171 len = key->datalen - (len + 1); 2172 if (len > CIFS_MAX_PASSWORD_LEN || len <= 0) { 2173 cifs_dbg(FYI, "Bad len for password search (len=%zd)\n", len); 2174 rc = -EINVAL; 2175 kfree(ctx->username); 2176 ctx->username = NULL; 2177 goto out_key_put; 2178 } 2179 2180 ++delim; 2181 ctx->password = kstrndup(delim, len, GFP_KERNEL); 2182 if (!ctx->password) { 2183 cifs_dbg(FYI, "Unable to allocate %zd bytes for password\n", 2184 len); 2185 rc = -ENOMEM; 2186 kfree(ctx->username); 2187 ctx->username = NULL; 2188 goto out_key_put; 2189 } 2190 2191 /* 2192 * If we have a domain key then we must set the domainName in the 2193 * for the request. 2194 */ 2195 if (is_domain && ses->domainName) { 2196 ctx->domainname = kstrdup(ses->domainName, GFP_KERNEL); 2197 if (!ctx->domainname) { 2198 cifs_dbg(FYI, "Unable to allocate %zd bytes for domain\n", 2199 len); 2200 rc = -ENOMEM; 2201 kfree(ctx->username); 2202 ctx->username = NULL; 2203 kfree_sensitive(ctx->password); 2204 ctx->password = NULL; 2205 goto out_key_put; 2206 } 2207 } 2208 2209 strscpy(ctx->workstation_name, ses->workstation_name, sizeof(ctx->workstation_name)); 2210 2211 out_key_put: 2212 up_read(&key->sem); 2213 key_put(key); 2214 out_err: 2215 kfree(desc); 2216 cifs_dbg(FYI, "%s: returning %d\n", __func__, rc); 2217 return rc; 2218 } 2219 #else /* ! CONFIG_KEYS */ 2220 static inline int 2221 cifs_set_cifscreds(struct smb3_fs_context *ctx __attribute__((unused)), 2222 struct cifs_ses *ses __attribute__((unused))) 2223 { 2224 return -ENOSYS; 2225 } 2226 #endif /* CONFIG_KEYS */ 2227 2228 /** 2229 * cifs_get_smb_ses - get a session matching @ctx data from @server 2230 * @server: server to setup the session to 2231 * @ctx: superblock configuration context to use to setup the session 2232 * 2233 * This function assumes it is being called from cifs_mount() where we 2234 * already got a server reference (server refcount +1). See 2235 * cifs_get_tcon() for refcount explanations. 2236 */ 2237 struct cifs_ses * 2238 cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) 2239 { 2240 int rc = 0; 2241 unsigned int xid; 2242 struct cifs_ses *ses; 2243 struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; 2244 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; 2245 2246 xid = get_xid(); 2247 2248 ses = cifs_find_smb_ses(server, ctx); 2249 if (ses) { 2250 cifs_dbg(FYI, "Existing smb sess found (status=%d)\n", 2251 ses->ses_status); 2252 2253 spin_lock(&ses->chan_lock); 2254 if (cifs_chan_needs_reconnect(ses, server)) { 2255 spin_unlock(&ses->chan_lock); 2256 cifs_dbg(FYI, "Session needs reconnect\n"); 2257 2258 mutex_lock(&ses->session_mutex); 2259 rc = cifs_negotiate_protocol(xid, ses, server); 2260 if (rc) { 2261 mutex_unlock(&ses->session_mutex); 2262 /* problem -- put our ses reference */ 2263 cifs_put_smb_ses(ses); 2264 free_xid(xid); 2265 return ERR_PTR(rc); 2266 } 2267 2268 rc = cifs_setup_session(xid, ses, server, 2269 ctx->local_nls); 2270 if (rc) { 2271 mutex_unlock(&ses->session_mutex); 2272 /* problem -- put our reference */ 2273 cifs_put_smb_ses(ses); 2274 free_xid(xid); 2275 return ERR_PTR(rc); 2276 } 2277 mutex_unlock(&ses->session_mutex); 2278 2279 spin_lock(&ses->chan_lock); 2280 } 2281 spin_unlock(&ses->chan_lock); 2282 2283 /* existing SMB ses has a server reference already */ 2284 cifs_put_tcp_session(server, 0); 2285 free_xid(xid); 2286 return ses; 2287 } 2288 2289 rc = -ENOMEM; 2290 2291 cifs_dbg(FYI, "Existing smb sess not found\n"); 2292 ses = sesInfoAlloc(); 2293 if (ses == NULL) 2294 goto get_ses_fail; 2295 2296 /* new SMB session uses our server ref */ 2297 ses->server = server; 2298 if (server->dstaddr.ss_family == AF_INET6) 2299 sprintf(ses->ip_addr, "%pI6", &addr6->sin6_addr); 2300 else 2301 sprintf(ses->ip_addr, "%pI4", &addr->sin_addr); 2302 2303 if (ctx->username) { 2304 ses->user_name = kstrdup(ctx->username, GFP_KERNEL); 2305 if (!ses->user_name) 2306 goto get_ses_fail; 2307 } 2308 2309 /* ctx->password freed at unmount */ 2310 if (ctx->password) { 2311 ses->password = kstrdup(ctx->password, GFP_KERNEL); 2312 if (!ses->password) 2313 goto get_ses_fail; 2314 } 2315 if (ctx->domainname) { 2316 ses->domainName = kstrdup(ctx->domainname, GFP_KERNEL); 2317 if (!ses->domainName) 2318 goto get_ses_fail; 2319 } 2320 2321 strscpy(ses->workstation_name, ctx->workstation_name, sizeof(ses->workstation_name)); 2322 2323 if (ctx->domainauto) 2324 ses->domainAuto = ctx->domainauto; 2325 ses->cred_uid = ctx->cred_uid; 2326 ses->linux_uid = ctx->linux_uid; 2327 2328 ses->sectype = ctx->sectype; 2329 ses->sign = ctx->sign; 2330 ses->local_nls = load_nls(ctx->local_nls->charset); 2331 2332 /* add server as first channel */ 2333 spin_lock(&ses->chan_lock); 2334 ses->chans[0].server = server; 2335 ses->chan_count = 1; 2336 ses->chan_max = ctx->multichannel ? ctx->max_channels:1; 2337 ses->chans_need_reconnect = 1; 2338 spin_unlock(&ses->chan_lock); 2339 2340 mutex_lock(&ses->session_mutex); 2341 rc = cifs_negotiate_protocol(xid, ses, server); 2342 if (!rc) 2343 rc = cifs_setup_session(xid, ses, server, ctx->local_nls); 2344 mutex_unlock(&ses->session_mutex); 2345 2346 /* each channel uses a different signing key */ 2347 spin_lock(&ses->chan_lock); 2348 memcpy(ses->chans[0].signkey, ses->smb3signingkey, 2349 sizeof(ses->smb3signingkey)); 2350 spin_unlock(&ses->chan_lock); 2351 2352 if (rc) 2353 goto get_ses_fail; 2354 2355 /* 2356 * success, put it on the list and add it as first channel 2357 * note: the session becomes active soon after this. So you'll 2358 * need to lock before changing something in the session. 2359 */ 2360 spin_lock(&cifs_tcp_ses_lock); 2361 ses->dfs_root_ses = ctx->dfs_root_ses; 2362 if (ses->dfs_root_ses) 2363 ses->dfs_root_ses->ses_count++; 2364 list_add(&ses->smb_ses_list, &server->smb_ses_list); 2365 spin_unlock(&cifs_tcp_ses_lock); 2366 2367 cifs_setup_ipc(ses, ctx); 2368 2369 free_xid(xid); 2370 2371 return ses; 2372 2373 get_ses_fail: 2374 sesInfoFree(ses); 2375 free_xid(xid); 2376 return ERR_PTR(rc); 2377 } 2378 2379 /* this function must be called with tc_lock held */ 2380 static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) 2381 { 2382 struct TCP_Server_Info *server = tcon->ses->server; 2383 2384 if (tcon->status == TID_EXITING) 2385 return 0; 2386 2387 if (tcon->origin_fullpath) { 2388 if (!ctx->source || 2389 !dfs_src_pathname_equal(ctx->source, 2390 tcon->origin_fullpath)) 2391 return 0; 2392 } else if (!server->leaf_fullpath && 2393 strncmp(tcon->tree_name, ctx->UNC, MAX_TREE_SIZE)) { 2394 return 0; 2395 } 2396 if (tcon->seal != ctx->seal) 2397 return 0; 2398 if (tcon->snapshot_time != ctx->snapshot_time) 2399 return 0; 2400 if (tcon->handle_timeout != ctx->handle_timeout) 2401 return 0; 2402 if (tcon->no_lease != ctx->no_lease) 2403 return 0; 2404 if (tcon->nodelete != ctx->nodelete) 2405 return 0; 2406 return 1; 2407 } 2408 2409 static struct cifs_tcon * 2410 cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx) 2411 { 2412 struct cifs_tcon *tcon; 2413 2414 spin_lock(&cifs_tcp_ses_lock); 2415 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { 2416 spin_lock(&tcon->tc_lock); 2417 if (!match_tcon(tcon, ctx)) { 2418 spin_unlock(&tcon->tc_lock); 2419 continue; 2420 } 2421 ++tcon->tc_count; 2422 spin_unlock(&tcon->tc_lock); 2423 spin_unlock(&cifs_tcp_ses_lock); 2424 return tcon; 2425 } 2426 spin_unlock(&cifs_tcp_ses_lock); 2427 return NULL; 2428 } 2429 2430 void 2431 cifs_put_tcon(struct cifs_tcon *tcon) 2432 { 2433 unsigned int xid; 2434 struct cifs_ses *ses; 2435 2436 /* 2437 * IPC tcon share the lifetime of their session and are 2438 * destroyed in the session put function 2439 */ 2440 if (tcon == NULL || tcon->ipc) 2441 return; 2442 2443 ses = tcon->ses; 2444 cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count); 2445 spin_lock(&cifs_tcp_ses_lock); 2446 spin_lock(&tcon->tc_lock); 2447 if (--tcon->tc_count > 0) { 2448 spin_unlock(&tcon->tc_lock); 2449 spin_unlock(&cifs_tcp_ses_lock); 2450 return; 2451 } 2452 2453 /* tc_count can never go negative */ 2454 WARN_ON(tcon->tc_count < 0); 2455 2456 list_del_init(&tcon->tcon_list); 2457 tcon->status = TID_EXITING; 2458 spin_unlock(&tcon->tc_lock); 2459 spin_unlock(&cifs_tcp_ses_lock); 2460 2461 /* cancel polling of interfaces */ 2462 cancel_delayed_work_sync(&tcon->query_interfaces); 2463 #ifdef CONFIG_CIFS_DFS_UPCALL 2464 cancel_delayed_work_sync(&tcon->dfs_cache_work); 2465 #endif 2466 2467 if (tcon->use_witness) { 2468 int rc; 2469 2470 rc = cifs_swn_unregister(tcon); 2471 if (rc < 0) { 2472 cifs_dbg(VFS, "%s: Failed to unregister for witness notifications: %d\n", 2473 __func__, rc); 2474 } 2475 } 2476 2477 xid = get_xid(); 2478 if (ses->server->ops->tree_disconnect) 2479 ses->server->ops->tree_disconnect(xid, tcon); 2480 _free_xid(xid); 2481 2482 cifs_fscache_release_super_cookie(tcon); 2483 tconInfoFree(tcon); 2484 cifs_put_smb_ses(ses); 2485 } 2486 2487 /** 2488 * cifs_get_tcon - get a tcon matching @ctx data from @ses 2489 * @ses: smb session to issue the request on 2490 * @ctx: the superblock configuration context to use for building the 2491 * 2492 * - tcon refcount is the number of mount points using the tcon. 2493 * - ses refcount is the number of tcon using the session. 2494 * 2495 * 1. This function assumes it is being called from cifs_mount() where 2496 * we already got a session reference (ses refcount +1). 2497 * 2498 * 2. Since we're in the context of adding a mount point, the end 2499 * result should be either: 2500 * 2501 * a) a new tcon already allocated with refcount=1 (1 mount point) and 2502 * its session refcount incremented (1 new tcon). This +1 was 2503 * already done in (1). 2504 * 2505 * b) an existing tcon with refcount+1 (add a mount point to it) and 2506 * identical ses refcount (no new tcon). Because of (1) we need to 2507 * decrement the ses refcount. 2508 */ 2509 static struct cifs_tcon * 2510 cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx) 2511 { 2512 struct cifs_tcon *tcon; 2513 bool nohandlecache; 2514 int rc, xid; 2515 2516 tcon = cifs_find_tcon(ses, ctx); 2517 if (tcon) { 2518 /* 2519 * tcon has refcount already incremented but we need to 2520 * decrement extra ses reference gotten by caller (case b) 2521 */ 2522 cifs_dbg(FYI, "Found match on UNC path\n"); 2523 cifs_put_smb_ses(ses); 2524 return tcon; 2525 } 2526 2527 if (!ses->server->ops->tree_connect) { 2528 rc = -ENOSYS; 2529 goto out_fail; 2530 } 2531 2532 if (ses->server->dialect >= SMB20_PROT_ID && 2533 (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)) 2534 nohandlecache = ctx->nohandlecache; 2535 else 2536 nohandlecache = true; 2537 tcon = tcon_info_alloc(!nohandlecache); 2538 if (tcon == NULL) { 2539 rc = -ENOMEM; 2540 goto out_fail; 2541 } 2542 tcon->nohandlecache = nohandlecache; 2543 2544 if (ctx->snapshot_time) { 2545 if (ses->server->vals->protocol_id == 0) { 2546 cifs_dbg(VFS, 2547 "Use SMB2 or later for snapshot mount option\n"); 2548 rc = -EOPNOTSUPP; 2549 goto out_fail; 2550 } else 2551 tcon->snapshot_time = ctx->snapshot_time; 2552 } 2553 2554 if (ctx->handle_timeout) { 2555 if (ses->server->vals->protocol_id == 0) { 2556 cifs_dbg(VFS, 2557 "Use SMB2.1 or later for handle timeout option\n"); 2558 rc = -EOPNOTSUPP; 2559 goto out_fail; 2560 } else 2561 tcon->handle_timeout = ctx->handle_timeout; 2562 } 2563 2564 tcon->ses = ses; 2565 if (ctx->password) { 2566 tcon->password = kstrdup(ctx->password, GFP_KERNEL); 2567 if (!tcon->password) { 2568 rc = -ENOMEM; 2569 goto out_fail; 2570 } 2571 } 2572 2573 if (ctx->seal) { 2574 if (ses->server->vals->protocol_id == 0) { 2575 cifs_dbg(VFS, 2576 "SMB3 or later required for encryption\n"); 2577 rc = -EOPNOTSUPP; 2578 goto out_fail; 2579 } else if (tcon->ses->server->capabilities & 2580 SMB2_GLOBAL_CAP_ENCRYPTION) 2581 tcon->seal = true; 2582 else { 2583 cifs_dbg(VFS, "Encryption is not supported on share\n"); 2584 rc = -EOPNOTSUPP; 2585 goto out_fail; 2586 } 2587 } 2588 2589 if (ctx->linux_ext) { 2590 if (ses->server->posix_ext_supported) { 2591 tcon->posix_extensions = true; 2592 pr_warn_once("SMB3.11 POSIX Extensions are experimental\n"); 2593 } else if ((ses->server->vals->protocol_id == SMB311_PROT_ID) || 2594 (strcmp(ses->server->vals->version_string, 2595 SMB3ANY_VERSION_STRING) == 0) || 2596 (strcmp(ses->server->vals->version_string, 2597 SMBDEFAULT_VERSION_STRING) == 0)) { 2598 cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions\n"); 2599 rc = -EOPNOTSUPP; 2600 goto out_fail; 2601 } else { 2602 cifs_dbg(VFS, "Check vers= mount option. SMB3.11 " 2603 "disabled but required for POSIX extensions\n"); 2604 rc = -EOPNOTSUPP; 2605 goto out_fail; 2606 } 2607 } 2608 2609 xid = get_xid(); 2610 rc = ses->server->ops->tree_connect(xid, ses, ctx->UNC, tcon, 2611 ctx->local_nls); 2612 free_xid(xid); 2613 cifs_dbg(FYI, "Tcon rc = %d\n", rc); 2614 if (rc) 2615 goto out_fail; 2616 2617 tcon->use_persistent = false; 2618 /* check if SMB2 or later, CIFS does not support persistent handles */ 2619 if (ctx->persistent) { 2620 if (ses->server->vals->protocol_id == 0) { 2621 cifs_dbg(VFS, 2622 "SMB3 or later required for persistent handles\n"); 2623 rc = -EOPNOTSUPP; 2624 goto out_fail; 2625 } else if (ses->server->capabilities & 2626 SMB2_GLOBAL_CAP_PERSISTENT_HANDLES) 2627 tcon->use_persistent = true; 2628 else /* persistent handles requested but not supported */ { 2629 cifs_dbg(VFS, 2630 "Persistent handles not supported on share\n"); 2631 rc = -EOPNOTSUPP; 2632 goto out_fail; 2633 } 2634 } else if ((tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY) 2635 && (ses->server->capabilities & SMB2_GLOBAL_CAP_PERSISTENT_HANDLES) 2636 && (ctx->nopersistent == false)) { 2637 cifs_dbg(FYI, "enabling persistent handles\n"); 2638 tcon->use_persistent = true; 2639 } else if (ctx->resilient) { 2640 if (ses->server->vals->protocol_id == 0) { 2641 cifs_dbg(VFS, 2642 "SMB2.1 or later required for resilient handles\n"); 2643 rc = -EOPNOTSUPP; 2644 goto out_fail; 2645 } 2646 tcon->use_resilient = true; 2647 } 2648 2649 tcon->use_witness = false; 2650 if (IS_ENABLED(CONFIG_CIFS_SWN_UPCALL) && ctx->witness) { 2651 if (ses->server->vals->protocol_id >= SMB30_PROT_ID) { 2652 if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER) { 2653 /* 2654 * Set witness in use flag in first place 2655 * to retry registration in the echo task 2656 */ 2657 tcon->use_witness = true; 2658 /* And try to register immediately */ 2659 rc = cifs_swn_register(tcon); 2660 if (rc < 0) { 2661 cifs_dbg(VFS, "Failed to register for witness notifications: %d\n", rc); 2662 goto out_fail; 2663 } 2664 } else { 2665 /* TODO: try to extend for non-cluster uses (eg multichannel) */ 2666 cifs_dbg(VFS, "witness requested on mount but no CLUSTER capability on share\n"); 2667 rc = -EOPNOTSUPP; 2668 goto out_fail; 2669 } 2670 } else { 2671 cifs_dbg(VFS, "SMB3 or later required for witness option\n"); 2672 rc = -EOPNOTSUPP; 2673 goto out_fail; 2674 } 2675 } 2676 2677 /* If the user really knows what they are doing they can override */ 2678 if (tcon->share_flags & SMB2_SHAREFLAG_NO_CACHING) { 2679 if (ctx->cache_ro) 2680 cifs_dbg(VFS, "cache=ro requested on mount but NO_CACHING flag set on share\n"); 2681 else if (ctx->cache_rw) 2682 cifs_dbg(VFS, "cache=singleclient requested on mount but NO_CACHING flag set on share\n"); 2683 } 2684 2685 if (ctx->no_lease) { 2686 if (ses->server->vals->protocol_id == 0) { 2687 cifs_dbg(VFS, 2688 "SMB2 or later required for nolease option\n"); 2689 rc = -EOPNOTSUPP; 2690 goto out_fail; 2691 } else 2692 tcon->no_lease = ctx->no_lease; 2693 } 2694 2695 /* 2696 * We can have only one retry value for a connection to a share so for 2697 * resources mounted more than once to the same server share the last 2698 * value passed in for the retry flag is used. 2699 */ 2700 tcon->retry = ctx->retry; 2701 tcon->nocase = ctx->nocase; 2702 tcon->broken_sparse_sup = ctx->no_sparse; 2703 tcon->max_cached_dirs = ctx->max_cached_dirs; 2704 tcon->nodelete = ctx->nodelete; 2705 tcon->local_lease = ctx->local_lease; 2706 INIT_LIST_HEAD(&tcon->pending_opens); 2707 tcon->status = TID_GOOD; 2708 2709 INIT_DELAYED_WORK(&tcon->query_interfaces, 2710 smb2_query_server_interfaces); 2711 if (ses->server->dialect >= SMB30_PROT_ID && 2712 (ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) { 2713 /* schedule query interfaces poll */ 2714 queue_delayed_work(cifsiod_wq, &tcon->query_interfaces, 2715 (SMB_INTERFACE_POLL_INTERVAL * HZ)); 2716 } 2717 #ifdef CONFIG_CIFS_DFS_UPCALL 2718 INIT_DELAYED_WORK(&tcon->dfs_cache_work, dfs_cache_refresh); 2719 #endif 2720 spin_lock(&cifs_tcp_ses_lock); 2721 list_add(&tcon->tcon_list, &ses->tcon_list); 2722 spin_unlock(&cifs_tcp_ses_lock); 2723 2724 return tcon; 2725 2726 out_fail: 2727 tconInfoFree(tcon); 2728 return ERR_PTR(rc); 2729 } 2730 2731 void 2732 cifs_put_tlink(struct tcon_link *tlink) 2733 { 2734 if (!tlink || IS_ERR(tlink)) 2735 return; 2736 2737 if (!atomic_dec_and_test(&tlink->tl_count) || 2738 test_bit(TCON_LINK_IN_TREE, &tlink->tl_flags)) { 2739 tlink->tl_time = jiffies; 2740 return; 2741 } 2742 2743 if (!IS_ERR(tlink_tcon(tlink))) 2744 cifs_put_tcon(tlink_tcon(tlink)); 2745 kfree(tlink); 2746 return; 2747 } 2748 2749 static int 2750 compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data) 2751 { 2752 struct cifs_sb_info *old = CIFS_SB(sb); 2753 struct cifs_sb_info *new = mnt_data->cifs_sb; 2754 unsigned int oldflags = old->mnt_cifs_flags & CIFS_MOUNT_MASK; 2755 unsigned int newflags = new->mnt_cifs_flags & CIFS_MOUNT_MASK; 2756 2757 if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK)) 2758 return 0; 2759 2760 if (old->mnt_cifs_serverino_autodisabled) 2761 newflags &= ~CIFS_MOUNT_SERVER_INUM; 2762 2763 if (oldflags != newflags) 2764 return 0; 2765 2766 /* 2767 * We want to share sb only if we don't specify an r/wsize or 2768 * specified r/wsize is greater than or equal to existing one. 2769 */ 2770 if (new->ctx->wsize && new->ctx->wsize < old->ctx->wsize) 2771 return 0; 2772 2773 if (new->ctx->rsize && new->ctx->rsize < old->ctx->rsize) 2774 return 0; 2775 2776 if (!uid_eq(old->ctx->linux_uid, new->ctx->linux_uid) || 2777 !gid_eq(old->ctx->linux_gid, new->ctx->linux_gid)) 2778 return 0; 2779 2780 if (old->ctx->file_mode != new->ctx->file_mode || 2781 old->ctx->dir_mode != new->ctx->dir_mode) 2782 return 0; 2783 2784 if (strcmp(old->local_nls->charset, new->local_nls->charset)) 2785 return 0; 2786 2787 if (old->ctx->acregmax != new->ctx->acregmax) 2788 return 0; 2789 if (old->ctx->acdirmax != new->ctx->acdirmax) 2790 return 0; 2791 if (old->ctx->closetimeo != new->ctx->closetimeo) 2792 return 0; 2793 2794 return 1; 2795 } 2796 2797 static int match_prepath(struct super_block *sb, 2798 struct cifs_tcon *tcon, 2799 struct cifs_mnt_data *mnt_data) 2800 { 2801 struct smb3_fs_context *ctx = mnt_data->ctx; 2802 struct cifs_sb_info *old = CIFS_SB(sb); 2803 struct cifs_sb_info *new = mnt_data->cifs_sb; 2804 bool old_set = (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) && 2805 old->prepath; 2806 bool new_set = (new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) && 2807 new->prepath; 2808 2809 if (tcon->origin_fullpath && 2810 dfs_src_pathname_equal(tcon->origin_fullpath, ctx->source)) 2811 return 1; 2812 2813 if (old_set && new_set && !strcmp(new->prepath, old->prepath)) 2814 return 1; 2815 else if (!old_set && !new_set) 2816 return 1; 2817 2818 return 0; 2819 } 2820 2821 int 2822 cifs_match_super(struct super_block *sb, void *data) 2823 { 2824 struct cifs_mnt_data *mnt_data = data; 2825 struct smb3_fs_context *ctx; 2826 struct cifs_sb_info *cifs_sb; 2827 struct TCP_Server_Info *tcp_srv; 2828 struct cifs_ses *ses; 2829 struct cifs_tcon *tcon; 2830 struct tcon_link *tlink; 2831 int rc = 0; 2832 2833 spin_lock(&cifs_tcp_ses_lock); 2834 cifs_sb = CIFS_SB(sb); 2835 2836 /* We do not want to use a superblock that has been shutdown */ 2837 if (CIFS_MOUNT_SHUTDOWN & cifs_sb->mnt_cifs_flags) { 2838 spin_unlock(&cifs_tcp_ses_lock); 2839 return 0; 2840 } 2841 2842 tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb)); 2843 if (IS_ERR_OR_NULL(tlink)) { 2844 pr_warn_once("%s: skip super matching due to bad tlink(%p)\n", 2845 __func__, tlink); 2846 spin_unlock(&cifs_tcp_ses_lock); 2847 return 0; 2848 } 2849 tcon = tlink_tcon(tlink); 2850 ses = tcon->ses; 2851 tcp_srv = ses->server; 2852 2853 ctx = mnt_data->ctx; 2854 2855 spin_lock(&tcp_srv->srv_lock); 2856 spin_lock(&ses->ses_lock); 2857 spin_lock(&ses->chan_lock); 2858 spin_lock(&tcon->tc_lock); 2859 if (!match_server(tcp_srv, ctx, true) || 2860 !match_session(ses, ctx) || 2861 !match_tcon(tcon, ctx) || 2862 !match_prepath(sb, tcon, mnt_data)) { 2863 rc = 0; 2864 goto out; 2865 } 2866 2867 rc = compare_mount_options(sb, mnt_data); 2868 out: 2869 spin_unlock(&tcon->tc_lock); 2870 spin_unlock(&ses->chan_lock); 2871 spin_unlock(&ses->ses_lock); 2872 spin_unlock(&tcp_srv->srv_lock); 2873 2874 spin_unlock(&cifs_tcp_ses_lock); 2875 cifs_put_tlink(tlink); 2876 return rc; 2877 } 2878 2879 #ifdef CONFIG_DEBUG_LOCK_ALLOC 2880 static struct lock_class_key cifs_key[2]; 2881 static struct lock_class_key cifs_slock_key[2]; 2882 2883 static inline void 2884 cifs_reclassify_socket4(struct socket *sock) 2885 { 2886 struct sock *sk = sock->sk; 2887 BUG_ON(!sock_allow_reclassification(sk)); 2888 sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS", 2889 &cifs_slock_key[0], "sk_lock-AF_INET-CIFS", &cifs_key[0]); 2890 } 2891 2892 static inline void 2893 cifs_reclassify_socket6(struct socket *sock) 2894 { 2895 struct sock *sk = sock->sk; 2896 BUG_ON(!sock_allow_reclassification(sk)); 2897 sock_lock_init_class_and_name(sk, "slock-AF_INET6-CIFS", 2898 &cifs_slock_key[1], "sk_lock-AF_INET6-CIFS", &cifs_key[1]); 2899 } 2900 #else 2901 static inline void 2902 cifs_reclassify_socket4(struct socket *sock) 2903 { 2904 } 2905 2906 static inline void 2907 cifs_reclassify_socket6(struct socket *sock) 2908 { 2909 } 2910 #endif 2911 2912 /* See RFC1001 section 14 on representation of Netbios names */ 2913 static void rfc1002mangle(char *target, char *source, unsigned int length) 2914 { 2915 unsigned int i, j; 2916 2917 for (i = 0, j = 0; i < (length); i++) { 2918 /* mask a nibble at a time and encode */ 2919 target[j] = 'A' + (0x0F & (source[i] >> 4)); 2920 target[j+1] = 'A' + (0x0F & source[i]); 2921 j += 2; 2922 } 2923 2924 } 2925 2926 static int 2927 bind_socket(struct TCP_Server_Info *server) 2928 { 2929 int rc = 0; 2930 if (server->srcaddr.ss_family != AF_UNSPEC) { 2931 /* Bind to the specified local IP address */ 2932 struct socket *socket = server->ssocket; 2933 rc = kernel_bind(socket, 2934 (struct sockaddr *) &server->srcaddr, 2935 sizeof(server->srcaddr)); 2936 if (rc < 0) { 2937 struct sockaddr_in *saddr4; 2938 struct sockaddr_in6 *saddr6; 2939 saddr4 = (struct sockaddr_in *)&server->srcaddr; 2940 saddr6 = (struct sockaddr_in6 *)&server->srcaddr; 2941 if (saddr6->sin6_family == AF_INET6) 2942 cifs_server_dbg(VFS, "Failed to bind to: %pI6c, error: %d\n", 2943 &saddr6->sin6_addr, rc); 2944 else 2945 cifs_server_dbg(VFS, "Failed to bind to: %pI4, error: %d\n", 2946 &saddr4->sin_addr.s_addr, rc); 2947 } 2948 } 2949 return rc; 2950 } 2951 2952 static int 2953 ip_rfc1001_connect(struct TCP_Server_Info *server) 2954 { 2955 int rc = 0; 2956 /* 2957 * some servers require RFC1001 sessinit before sending 2958 * negprot - BB check reconnection in case where second 2959 * sessinit is sent but no second negprot 2960 */ 2961 struct rfc1002_session_packet req = {}; 2962 struct smb_hdr *smb_buf = (struct smb_hdr *)&req; 2963 unsigned int len; 2964 2965 req.trailer.session_req.called_len = sizeof(req.trailer.session_req.called_name); 2966 2967 if (server->server_RFC1001_name[0] != 0) 2968 rfc1002mangle(req.trailer.session_req.called_name, 2969 server->server_RFC1001_name, 2970 RFC1001_NAME_LEN_WITH_NULL); 2971 else 2972 rfc1002mangle(req.trailer.session_req.called_name, 2973 DEFAULT_CIFS_CALLED_NAME, 2974 RFC1001_NAME_LEN_WITH_NULL); 2975 2976 req.trailer.session_req.calling_len = sizeof(req.trailer.session_req.calling_name); 2977 2978 /* calling name ends in null (byte 16) from old smb convention */ 2979 if (server->workstation_RFC1001_name[0] != 0) 2980 rfc1002mangle(req.trailer.session_req.calling_name, 2981 server->workstation_RFC1001_name, 2982 RFC1001_NAME_LEN_WITH_NULL); 2983 else 2984 rfc1002mangle(req.trailer.session_req.calling_name, 2985 "LINUX_CIFS_CLNT", 2986 RFC1001_NAME_LEN_WITH_NULL); 2987 2988 /* 2989 * As per rfc1002, @len must be the number of bytes that follows the 2990 * length field of a rfc1002 session request payload. 2991 */ 2992 len = sizeof(req) - offsetof(struct rfc1002_session_packet, trailer.session_req); 2993 2994 smb_buf->smb_buf_length = cpu_to_be32((RFC1002_SESSION_REQUEST << 24) | len); 2995 rc = smb_send(server, smb_buf, len); 2996 /* 2997 * RFC1001 layer in at least one server requires very short break before 2998 * negprot presumably because not expecting negprot to follow so fast. 2999 * This is a simple solution that works without complicating the code 3000 * and causes no significant slowing down on mount for everyone else 3001 */ 3002 usleep_range(1000, 2000); 3003 3004 return rc; 3005 } 3006 3007 static int 3008 generic_ip_connect(struct TCP_Server_Info *server) 3009 { 3010 struct sockaddr *saddr; 3011 struct socket *socket; 3012 int slen, sfamily; 3013 __be16 sport; 3014 int rc = 0; 3015 3016 saddr = (struct sockaddr *) &server->dstaddr; 3017 3018 if (server->dstaddr.ss_family == AF_INET6) { 3019 struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&server->dstaddr; 3020 3021 sport = ipv6->sin6_port; 3022 slen = sizeof(struct sockaddr_in6); 3023 sfamily = AF_INET6; 3024 cifs_dbg(FYI, "%s: connecting to [%pI6]:%d\n", __func__, &ipv6->sin6_addr, 3025 ntohs(sport)); 3026 } else { 3027 struct sockaddr_in *ipv4 = (struct sockaddr_in *)&server->dstaddr; 3028 3029 sport = ipv4->sin_port; 3030 slen = sizeof(struct sockaddr_in); 3031 sfamily = AF_INET; 3032 cifs_dbg(FYI, "%s: connecting to %pI4:%d\n", __func__, &ipv4->sin_addr, 3033 ntohs(sport)); 3034 } 3035 3036 if (server->ssocket) { 3037 socket = server->ssocket; 3038 } else { 3039 rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM, 3040 IPPROTO_TCP, &server->ssocket, 1); 3041 if (rc < 0) { 3042 cifs_server_dbg(VFS, "Error %d creating socket\n", rc); 3043 return rc; 3044 } 3045 3046 /* BB other socket options to set KEEPALIVE, NODELAY? */ 3047 cifs_dbg(FYI, "Socket created\n"); 3048 socket = server->ssocket; 3049 socket->sk->sk_allocation = GFP_NOFS; 3050 socket->sk->sk_use_task_frag = false; 3051 if (sfamily == AF_INET6) 3052 cifs_reclassify_socket6(socket); 3053 else 3054 cifs_reclassify_socket4(socket); 3055 } 3056 3057 rc = bind_socket(server); 3058 if (rc < 0) 3059 return rc; 3060 3061 /* 3062 * Eventually check for other socket options to change from 3063 * the default. sock_setsockopt not used because it expects 3064 * user space buffer 3065 */ 3066 socket->sk->sk_rcvtimeo = 7 * HZ; 3067 socket->sk->sk_sndtimeo = 5 * HZ; 3068 3069 /* make the bufsizes depend on wsize/rsize and max requests */ 3070 if (server->noautotune) { 3071 if (socket->sk->sk_sndbuf < (200 * 1024)) 3072 socket->sk->sk_sndbuf = 200 * 1024; 3073 if (socket->sk->sk_rcvbuf < (140 * 1024)) 3074 socket->sk->sk_rcvbuf = 140 * 1024; 3075 } 3076 3077 if (server->tcp_nodelay) 3078 tcp_sock_set_nodelay(socket->sk); 3079 3080 cifs_dbg(FYI, "sndbuf %d rcvbuf %d rcvtimeo 0x%lx\n", 3081 socket->sk->sk_sndbuf, 3082 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo); 3083 3084 rc = kernel_connect(socket, saddr, slen, 3085 server->noblockcnt ? O_NONBLOCK : 0); 3086 /* 3087 * When mounting SMB root file systems, we do not want to block in 3088 * connect. Otherwise bail out and then let cifs_reconnect() perform 3089 * reconnect failover - if possible. 3090 */ 3091 if (server->noblockcnt && rc == -EINPROGRESS) 3092 rc = 0; 3093 if (rc < 0) { 3094 cifs_dbg(FYI, "Error %d connecting to server\n", rc); 3095 trace_smb3_connect_err(server->hostname, server->conn_id, &server->dstaddr, rc); 3096 sock_release(socket); 3097 server->ssocket = NULL; 3098 return rc; 3099 } 3100 trace_smb3_connect_done(server->hostname, server->conn_id, &server->dstaddr); 3101 if (sport == htons(RFC1001_PORT)) 3102 rc = ip_rfc1001_connect(server); 3103 3104 return rc; 3105 } 3106 3107 static int 3108 ip_connect(struct TCP_Server_Info *server) 3109 { 3110 __be16 *sport; 3111 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; 3112 struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; 3113 3114 if (server->dstaddr.ss_family == AF_INET6) 3115 sport = &addr6->sin6_port; 3116 else 3117 sport = &addr->sin_port; 3118 3119 if (*sport == 0) { 3120 int rc; 3121 3122 /* try with 445 port at first */ 3123 *sport = htons(CIFS_PORT); 3124 3125 rc = generic_ip_connect(server); 3126 if (rc >= 0) 3127 return rc; 3128 3129 /* if it failed, try with 139 port */ 3130 *sport = htons(RFC1001_PORT); 3131 } 3132 3133 return generic_ip_connect(server); 3134 } 3135 3136 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 3137 void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon, 3138 struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx) 3139 { 3140 /* 3141 * If we are reconnecting then should we check to see if 3142 * any requested capabilities changed locally e.g. via 3143 * remount but we can not do much about it here 3144 * if they have (even if we could detect it by the following) 3145 * Perhaps we could add a backpointer to array of sb from tcon 3146 * or if we change to make all sb to same share the same 3147 * sb as NFS - then we only have one backpointer to sb. 3148 * What if we wanted to mount the server share twice once with 3149 * and once without posixacls or posix paths? 3150 */ 3151 __u64 saved_cap = le64_to_cpu(tcon->fsUnixInfo.Capability); 3152 3153 if (ctx && ctx->no_linux_ext) { 3154 tcon->fsUnixInfo.Capability = 0; 3155 tcon->unix_ext = 0; /* Unix Extensions disabled */ 3156 cifs_dbg(FYI, "Linux protocol extensions disabled\n"); 3157 return; 3158 } else if (ctx) 3159 tcon->unix_ext = 1; /* Unix Extensions supported */ 3160 3161 if (!tcon->unix_ext) { 3162 cifs_dbg(FYI, "Unix extensions disabled so not set on reconnect\n"); 3163 return; 3164 } 3165 3166 if (!CIFSSMBQFSUnixInfo(xid, tcon)) { 3167 __u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability); 3168 cifs_dbg(FYI, "unix caps which server supports %lld\n", cap); 3169 /* 3170 * check for reconnect case in which we do not 3171 * want to change the mount behavior if we can avoid it 3172 */ 3173 if (ctx == NULL) { 3174 /* 3175 * turn off POSIX ACL and PATHNAMES if not set 3176 * originally at mount time 3177 */ 3178 if ((saved_cap & CIFS_UNIX_POSIX_ACL_CAP) == 0) 3179 cap &= ~CIFS_UNIX_POSIX_ACL_CAP; 3180 if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) { 3181 if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) 3182 cifs_dbg(VFS, "POSIXPATH support change\n"); 3183 cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; 3184 } else if ((cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) { 3185 cifs_dbg(VFS, "possible reconnect error\n"); 3186 cifs_dbg(VFS, "server disabled POSIX path support\n"); 3187 } 3188 } 3189 3190 if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP) 3191 cifs_dbg(VFS, "per-share encryption not supported yet\n"); 3192 3193 cap &= CIFS_UNIX_CAP_MASK; 3194 if (ctx && ctx->no_psx_acl) 3195 cap &= ~CIFS_UNIX_POSIX_ACL_CAP; 3196 else if (CIFS_UNIX_POSIX_ACL_CAP & cap) { 3197 cifs_dbg(FYI, "negotiated posix acl support\n"); 3198 if (cifs_sb) 3199 cifs_sb->mnt_cifs_flags |= 3200 CIFS_MOUNT_POSIXACL; 3201 } 3202 3203 if (ctx && ctx->posix_paths == 0) 3204 cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; 3205 else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) { 3206 cifs_dbg(FYI, "negotiate posix pathnames\n"); 3207 if (cifs_sb) 3208 cifs_sb->mnt_cifs_flags |= 3209 CIFS_MOUNT_POSIX_PATHS; 3210 } 3211 3212 cifs_dbg(FYI, "Negotiate caps 0x%x\n", (int)cap); 3213 #ifdef CONFIG_CIFS_DEBUG2 3214 if (cap & CIFS_UNIX_FCNTL_CAP) 3215 cifs_dbg(FYI, "FCNTL cap\n"); 3216 if (cap & CIFS_UNIX_EXTATTR_CAP) 3217 cifs_dbg(FYI, "EXTATTR cap\n"); 3218 if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) 3219 cifs_dbg(FYI, "POSIX path cap\n"); 3220 if (cap & CIFS_UNIX_XATTR_CAP) 3221 cifs_dbg(FYI, "XATTR cap\n"); 3222 if (cap & CIFS_UNIX_POSIX_ACL_CAP) 3223 cifs_dbg(FYI, "POSIX ACL cap\n"); 3224 if (cap & CIFS_UNIX_LARGE_READ_CAP) 3225 cifs_dbg(FYI, "very large read cap\n"); 3226 if (cap & CIFS_UNIX_LARGE_WRITE_CAP) 3227 cifs_dbg(FYI, "very large write cap\n"); 3228 if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP) 3229 cifs_dbg(FYI, "transport encryption cap\n"); 3230 if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP) 3231 cifs_dbg(FYI, "mandatory transport encryption cap\n"); 3232 #endif /* CIFS_DEBUG2 */ 3233 if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) { 3234 if (ctx == NULL) 3235 cifs_dbg(FYI, "resetting capabilities failed\n"); 3236 else 3237 cifs_dbg(VFS, "Negotiating Unix capabilities with the server failed. Consider mounting with the Unix Extensions disabled if problems are found by specifying the nounix mount option.\n"); 3238 3239 } 3240 } 3241 } 3242 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 3243 3244 int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb) 3245 { 3246 struct smb3_fs_context *ctx = cifs_sb->ctx; 3247 3248 INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks); 3249 3250 spin_lock_init(&cifs_sb->tlink_tree_lock); 3251 cifs_sb->tlink_tree = RB_ROOT; 3252 3253 cifs_dbg(FYI, "file mode: %04ho dir mode: %04ho\n", 3254 ctx->file_mode, ctx->dir_mode); 3255 3256 /* this is needed for ASCII cp to Unicode converts */ 3257 if (ctx->iocharset == NULL) { 3258 /* load_nls_default cannot return null */ 3259 cifs_sb->local_nls = load_nls_default(); 3260 } else { 3261 cifs_sb->local_nls = load_nls(ctx->iocharset); 3262 if (cifs_sb->local_nls == NULL) { 3263 cifs_dbg(VFS, "CIFS mount error: iocharset %s not found\n", 3264 ctx->iocharset); 3265 return -ELIBACC; 3266 } 3267 } 3268 ctx->local_nls = cifs_sb->local_nls; 3269 3270 smb3_update_mnt_flags(cifs_sb); 3271 3272 if (ctx->direct_io) 3273 cifs_dbg(FYI, "mounting share using direct i/o\n"); 3274 if (ctx->cache_ro) { 3275 cifs_dbg(VFS, "mounting share with read only caching. Ensure that the share will not be modified while in use.\n"); 3276 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RO_CACHE; 3277 } else if (ctx->cache_rw) { 3278 cifs_dbg(VFS, "mounting share in single client RW caching mode. Ensure that no other systems will be accessing the share.\n"); 3279 cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_RO_CACHE | 3280 CIFS_MOUNT_RW_CACHE); 3281 } 3282 3283 if ((ctx->cifs_acl) && (ctx->dynperm)) 3284 cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n"); 3285 3286 if (ctx->prepath) { 3287 cifs_sb->prepath = kstrdup(ctx->prepath, GFP_KERNEL); 3288 if (cifs_sb->prepath == NULL) 3289 return -ENOMEM; 3290 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; 3291 } 3292 3293 return 0; 3294 } 3295 3296 /* Release all succeed connections */ 3297 void cifs_mount_put_conns(struct cifs_mount_ctx *mnt_ctx) 3298 { 3299 int rc = 0; 3300 3301 if (mnt_ctx->tcon) 3302 cifs_put_tcon(mnt_ctx->tcon); 3303 else if (mnt_ctx->ses) 3304 cifs_put_smb_ses(mnt_ctx->ses); 3305 else if (mnt_ctx->server) 3306 cifs_put_tcp_session(mnt_ctx->server, 0); 3307 mnt_ctx->cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_POSIX_PATHS; 3308 free_xid(mnt_ctx->xid); 3309 } 3310 3311 int cifs_mount_get_session(struct cifs_mount_ctx *mnt_ctx) 3312 { 3313 struct TCP_Server_Info *server = NULL; 3314 struct smb3_fs_context *ctx; 3315 struct cifs_ses *ses = NULL; 3316 unsigned int xid; 3317 int rc = 0; 3318 3319 xid = get_xid(); 3320 3321 if (WARN_ON_ONCE(!mnt_ctx || !mnt_ctx->fs_ctx)) { 3322 rc = -EINVAL; 3323 goto out; 3324 } 3325 ctx = mnt_ctx->fs_ctx; 3326 3327 /* get a reference to a tcp session */ 3328 server = cifs_get_tcp_session(ctx, NULL); 3329 if (IS_ERR(server)) { 3330 rc = PTR_ERR(server); 3331 server = NULL; 3332 goto out; 3333 } 3334 3335 /* get a reference to a SMB session */ 3336 ses = cifs_get_smb_ses(server, ctx); 3337 if (IS_ERR(ses)) { 3338 rc = PTR_ERR(ses); 3339 ses = NULL; 3340 goto out; 3341 } 3342 3343 if ((ctx->persistent == true) && (!(ses->server->capabilities & 3344 SMB2_GLOBAL_CAP_PERSISTENT_HANDLES))) { 3345 cifs_server_dbg(VFS, "persistent handles not supported by server\n"); 3346 rc = -EOPNOTSUPP; 3347 } 3348 3349 out: 3350 mnt_ctx->xid = xid; 3351 mnt_ctx->server = server; 3352 mnt_ctx->ses = ses; 3353 mnt_ctx->tcon = NULL; 3354 3355 return rc; 3356 } 3357 3358 int cifs_mount_get_tcon(struct cifs_mount_ctx *mnt_ctx) 3359 { 3360 struct TCP_Server_Info *server; 3361 struct cifs_sb_info *cifs_sb; 3362 struct smb3_fs_context *ctx; 3363 struct cifs_tcon *tcon = NULL; 3364 int rc = 0; 3365 3366 if (WARN_ON_ONCE(!mnt_ctx || !mnt_ctx->server || !mnt_ctx->ses || !mnt_ctx->fs_ctx || 3367 !mnt_ctx->cifs_sb)) { 3368 rc = -EINVAL; 3369 goto out; 3370 } 3371 server = mnt_ctx->server; 3372 ctx = mnt_ctx->fs_ctx; 3373 cifs_sb = mnt_ctx->cifs_sb; 3374 3375 /* search for existing tcon to this server share */ 3376 tcon = cifs_get_tcon(mnt_ctx->ses, ctx); 3377 if (IS_ERR(tcon)) { 3378 rc = PTR_ERR(tcon); 3379 tcon = NULL; 3380 goto out; 3381 } 3382 3383 /* if new SMB3.11 POSIX extensions are supported do not remap / and \ */ 3384 if (tcon->posix_extensions) 3385 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS; 3386 3387 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 3388 /* tell server which Unix caps we support */ 3389 if (cap_unix(tcon->ses)) { 3390 /* 3391 * reset of caps checks mount to see if unix extensions disabled 3392 * for just this mount. 3393 */ 3394 reset_cifs_unix_caps(mnt_ctx->xid, tcon, cifs_sb, ctx); 3395 spin_lock(&tcon->ses->server->srv_lock); 3396 if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) && 3397 (le64_to_cpu(tcon->fsUnixInfo.Capability) & 3398 CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) { 3399 spin_unlock(&tcon->ses->server->srv_lock); 3400 rc = -EACCES; 3401 goto out; 3402 } 3403 spin_unlock(&tcon->ses->server->srv_lock); 3404 } else 3405 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 3406 tcon->unix_ext = 0; /* server does not support them */ 3407 3408 /* do not care if a following call succeed - informational */ 3409 if (!tcon->pipe && server->ops->qfs_tcon) { 3410 server->ops->qfs_tcon(mnt_ctx->xid, tcon, cifs_sb); 3411 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) { 3412 if (tcon->fsDevInfo.DeviceCharacteristics & 3413 cpu_to_le32(FILE_READ_ONLY_DEVICE)) 3414 cifs_dbg(VFS, "mounted to read only share\n"); 3415 else if ((cifs_sb->mnt_cifs_flags & 3416 CIFS_MOUNT_RW_CACHE) == 0) 3417 cifs_dbg(VFS, "read only mount of RW share\n"); 3418 /* no need to log a RW mount of a typical RW share */ 3419 } 3420 } 3421 3422 /* 3423 * Clamp the rsize/wsize mount arguments if they are too big for the server 3424 * and set the rsize/wsize to the negotiated values if not passed in by 3425 * the user on mount 3426 */ 3427 if ((cifs_sb->ctx->wsize == 0) || 3428 (cifs_sb->ctx->wsize > server->ops->negotiate_wsize(tcon, ctx))) 3429 cifs_sb->ctx->wsize = server->ops->negotiate_wsize(tcon, ctx); 3430 if ((cifs_sb->ctx->rsize == 0) || 3431 (cifs_sb->ctx->rsize > server->ops->negotiate_rsize(tcon, ctx))) 3432 cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tcon, ctx); 3433 3434 /* 3435 * The cookie is initialized from volume info returned above. 3436 * Inside cifs_fscache_get_super_cookie it checks 3437 * that we do not get super cookie twice. 3438 */ 3439 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) 3440 cifs_fscache_get_super_cookie(tcon); 3441 3442 out: 3443 mnt_ctx->tcon = tcon; 3444 return rc; 3445 } 3446 3447 static int mount_setup_tlink(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses, 3448 struct cifs_tcon *tcon) 3449 { 3450 struct tcon_link *tlink; 3451 3452 /* hang the tcon off of the superblock */ 3453 tlink = kzalloc(sizeof(*tlink), GFP_KERNEL); 3454 if (tlink == NULL) 3455 return -ENOMEM; 3456 3457 tlink->tl_uid = ses->linux_uid; 3458 tlink->tl_tcon = tcon; 3459 tlink->tl_time = jiffies; 3460 set_bit(TCON_LINK_MASTER, &tlink->tl_flags); 3461 set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); 3462 3463 cifs_sb->master_tlink = tlink; 3464 spin_lock(&cifs_sb->tlink_tree_lock); 3465 tlink_rb_insert(&cifs_sb->tlink_tree, tlink); 3466 spin_unlock(&cifs_sb->tlink_tree_lock); 3467 3468 queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks, 3469 TLINK_IDLE_EXPIRE); 3470 return 0; 3471 } 3472 3473 static int 3474 cifs_are_all_path_components_accessible(struct TCP_Server_Info *server, 3475 unsigned int xid, 3476 struct cifs_tcon *tcon, 3477 struct cifs_sb_info *cifs_sb, 3478 char *full_path, 3479 int added_treename) 3480 { 3481 int rc; 3482 char *s; 3483 char sep, tmp; 3484 int skip = added_treename ? 1 : 0; 3485 3486 sep = CIFS_DIR_SEP(cifs_sb); 3487 s = full_path; 3488 3489 rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, ""); 3490 while (rc == 0) { 3491 /* skip separators */ 3492 while (*s == sep) 3493 s++; 3494 if (!*s) 3495 break; 3496 /* next separator */ 3497 while (*s && *s != sep) 3498 s++; 3499 /* 3500 * if the treename is added, we then have to skip the first 3501 * part within the separators 3502 */ 3503 if (skip) { 3504 skip = 0; 3505 continue; 3506 } 3507 /* 3508 * temporarily null-terminate the path at the end of 3509 * the current component 3510 */ 3511 tmp = *s; 3512 *s = 0; 3513 rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, 3514 full_path); 3515 *s = tmp; 3516 } 3517 return rc; 3518 } 3519 3520 /* 3521 * Check if path is remote (i.e. a DFS share). 3522 * 3523 * Return -EREMOTE if it is, otherwise 0 or -errno. 3524 */ 3525 int cifs_is_path_remote(struct cifs_mount_ctx *mnt_ctx) 3526 { 3527 int rc; 3528 struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb; 3529 struct TCP_Server_Info *server = mnt_ctx->server; 3530 unsigned int xid = mnt_ctx->xid; 3531 struct cifs_tcon *tcon = mnt_ctx->tcon; 3532 struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; 3533 char *full_path; 3534 3535 if (!server->ops->is_path_accessible) 3536 return -EOPNOTSUPP; 3537 3538 /* 3539 * cifs_build_path_to_root works only when we have a valid tcon 3540 */ 3541 full_path = cifs_build_path_to_root(ctx, cifs_sb, tcon, 3542 tcon->Flags & SMB_SHARE_IS_IN_DFS); 3543 if (full_path == NULL) 3544 return -ENOMEM; 3545 3546 cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path); 3547 3548 rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, 3549 full_path); 3550 if (rc != 0 && rc != -EREMOTE) 3551 goto out; 3552 3553 if (rc != -EREMOTE) { 3554 rc = cifs_are_all_path_components_accessible(server, xid, tcon, 3555 cifs_sb, full_path, tcon->Flags & SMB_SHARE_IS_IN_DFS); 3556 if (rc != 0) { 3557 cifs_server_dbg(VFS, "cannot query dirs between root and final path, enabling CIFS_MOUNT_USE_PREFIX_PATH\n"); 3558 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; 3559 rc = 0; 3560 } 3561 } 3562 3563 out: 3564 kfree(full_path); 3565 return rc; 3566 } 3567 3568 #ifdef CONFIG_CIFS_DFS_UPCALL 3569 int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx) 3570 { 3571 struct cifs_mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, }; 3572 bool isdfs; 3573 int rc; 3574 3575 INIT_LIST_HEAD(&mnt_ctx.dfs_ses_list); 3576 3577 rc = dfs_mount_share(&mnt_ctx, &isdfs); 3578 if (rc) 3579 goto error; 3580 if (!isdfs) 3581 goto out; 3582 3583 /* 3584 * After reconnecting to a different server, unique ids won't match anymore, so we disable 3585 * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE). 3586 */ 3587 cifs_autodisable_serverino(cifs_sb); 3588 /* 3589 * Force the use of prefix path to support failover on DFS paths that resolve to targets 3590 * that have different prefix paths. 3591 */ 3592 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; 3593 kfree(cifs_sb->prepath); 3594 cifs_sb->prepath = ctx->prepath; 3595 ctx->prepath = NULL; 3596 3597 out: 3598 cifs_try_adding_channels(mnt_ctx.ses); 3599 rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon); 3600 if (rc) 3601 goto error; 3602 3603 free_xid(mnt_ctx.xid); 3604 return rc; 3605 3606 error: 3607 dfs_put_root_smb_sessions(&mnt_ctx.dfs_ses_list); 3608 cifs_mount_put_conns(&mnt_ctx); 3609 return rc; 3610 } 3611 #else 3612 int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx) 3613 { 3614 int rc = 0; 3615 struct cifs_mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, }; 3616 3617 rc = cifs_mount_get_session(&mnt_ctx); 3618 if (rc) 3619 goto error; 3620 3621 rc = cifs_mount_get_tcon(&mnt_ctx); 3622 if (rc) 3623 goto error; 3624 3625 rc = cifs_is_path_remote(&mnt_ctx); 3626 if (rc == -EREMOTE) 3627 rc = -EOPNOTSUPP; 3628 if (rc) 3629 goto error; 3630 3631 rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon); 3632 if (rc) 3633 goto error; 3634 3635 free_xid(mnt_ctx.xid); 3636 return rc; 3637 3638 error: 3639 cifs_mount_put_conns(&mnt_ctx); 3640 return rc; 3641 } 3642 #endif 3643 3644 /* 3645 * Issue a TREE_CONNECT request. 3646 */ 3647 int 3648 CIFSTCon(const unsigned int xid, struct cifs_ses *ses, 3649 const char *tree, struct cifs_tcon *tcon, 3650 const struct nls_table *nls_codepage) 3651 { 3652 struct smb_hdr *smb_buffer; 3653 struct smb_hdr *smb_buffer_response; 3654 TCONX_REQ *pSMB; 3655 TCONX_RSP *pSMBr; 3656 unsigned char *bcc_ptr; 3657 int rc = 0; 3658 int length; 3659 __u16 bytes_left, count; 3660 3661 if (ses == NULL) 3662 return -EIO; 3663 3664 smb_buffer = cifs_buf_get(); 3665 if (smb_buffer == NULL) 3666 return -ENOMEM; 3667 3668 smb_buffer_response = smb_buffer; 3669 3670 header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX, 3671 NULL /*no tid */ , 4 /*wct */ ); 3672 3673 smb_buffer->Mid = get_next_mid(ses->server); 3674 smb_buffer->Uid = ses->Suid; 3675 pSMB = (TCONX_REQ *) smb_buffer; 3676 pSMBr = (TCONX_RSP *) smb_buffer_response; 3677 3678 pSMB->AndXCommand = 0xFF; 3679 pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO); 3680 bcc_ptr = &pSMB->Password[0]; 3681 3682 pSMB->PasswordLength = cpu_to_le16(1); /* minimum */ 3683 *bcc_ptr = 0; /* password is null byte */ 3684 bcc_ptr++; /* skip password */ 3685 /* already aligned so no need to do it below */ 3686 3687 if (ses->server->sign) 3688 smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; 3689 3690 if (ses->capabilities & CAP_STATUS32) { 3691 smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS; 3692 } 3693 if (ses->capabilities & CAP_DFS) { 3694 smb_buffer->Flags2 |= SMBFLG2_DFS; 3695 } 3696 if (ses->capabilities & CAP_UNICODE) { 3697 smb_buffer->Flags2 |= SMBFLG2_UNICODE; 3698 length = 3699 cifs_strtoUTF16((__le16 *) bcc_ptr, tree, 3700 6 /* max utf8 char length in bytes */ * 3701 (/* server len*/ + 256 /* share len */), nls_codepage); 3702 bcc_ptr += 2 * length; /* convert num 16 bit words to bytes */ 3703 bcc_ptr += 2; /* skip trailing null */ 3704 } else { /* ASCII */ 3705 strcpy(bcc_ptr, tree); 3706 bcc_ptr += strlen(tree) + 1; 3707 } 3708 strcpy(bcc_ptr, "?????"); 3709 bcc_ptr += strlen("?????"); 3710 bcc_ptr += 1; 3711 count = bcc_ptr - &pSMB->Password[0]; 3712 be32_add_cpu(&pSMB->hdr.smb_buf_length, count); 3713 pSMB->ByteCount = cpu_to_le16(count); 3714 3715 rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length, 3716 0); 3717 3718 /* above now done in SendReceive */ 3719 if (rc == 0) { 3720 bool is_unicode; 3721 3722 tcon->tid = smb_buffer_response->Tid; 3723 bcc_ptr = pByteArea(smb_buffer_response); 3724 bytes_left = get_bcc(smb_buffer_response); 3725 length = strnlen(bcc_ptr, bytes_left - 2); 3726 if (smb_buffer->Flags2 & SMBFLG2_UNICODE) 3727 is_unicode = true; 3728 else 3729 is_unicode = false; 3730 3731 3732 /* skip service field (NB: this field is always ASCII) */ 3733 if (length == 3) { 3734 if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') && 3735 (bcc_ptr[2] == 'C')) { 3736 cifs_dbg(FYI, "IPC connection\n"); 3737 tcon->ipc = true; 3738 tcon->pipe = true; 3739 } 3740 } else if (length == 2) { 3741 if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) { 3742 /* the most common case */ 3743 cifs_dbg(FYI, "disk share connection\n"); 3744 } 3745 } 3746 bcc_ptr += length + 1; 3747 bytes_left -= (length + 1); 3748 strscpy(tcon->tree_name, tree, sizeof(tcon->tree_name)); 3749 3750 /* mostly informational -- no need to fail on error here */ 3751 kfree(tcon->nativeFileSystem); 3752 tcon->nativeFileSystem = cifs_strndup_from_utf16(bcc_ptr, 3753 bytes_left, is_unicode, 3754 nls_codepage); 3755 3756 cifs_dbg(FYI, "nativeFileSystem=%s\n", tcon->nativeFileSystem); 3757 3758 if ((smb_buffer_response->WordCount == 3) || 3759 (smb_buffer_response->WordCount == 7)) 3760 /* field is in same location */ 3761 tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport); 3762 else 3763 tcon->Flags = 0; 3764 cifs_dbg(FYI, "Tcon flags: 0x%x\n", tcon->Flags); 3765 } 3766 3767 cifs_buf_release(smb_buffer); 3768 return rc; 3769 } 3770 3771 static void delayed_free(struct rcu_head *p) 3772 { 3773 struct cifs_sb_info *cifs_sb = container_of(p, struct cifs_sb_info, rcu); 3774 3775 unload_nls(cifs_sb->local_nls); 3776 smb3_cleanup_fs_context(cifs_sb->ctx); 3777 kfree(cifs_sb); 3778 } 3779 3780 void 3781 cifs_umount(struct cifs_sb_info *cifs_sb) 3782 { 3783 struct rb_root *root = &cifs_sb->tlink_tree; 3784 struct rb_node *node; 3785 struct tcon_link *tlink; 3786 3787 cancel_delayed_work_sync(&cifs_sb->prune_tlinks); 3788 3789 spin_lock(&cifs_sb->tlink_tree_lock); 3790 while ((node = rb_first(root))) { 3791 tlink = rb_entry(node, struct tcon_link, tl_rbnode); 3792 cifs_get_tlink(tlink); 3793 clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); 3794 rb_erase(node, root); 3795 3796 spin_unlock(&cifs_sb->tlink_tree_lock); 3797 cifs_put_tlink(tlink); 3798 spin_lock(&cifs_sb->tlink_tree_lock); 3799 } 3800 spin_unlock(&cifs_sb->tlink_tree_lock); 3801 3802 kfree(cifs_sb->prepath); 3803 call_rcu(&cifs_sb->rcu, delayed_free); 3804 } 3805 3806 int 3807 cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses, 3808 struct TCP_Server_Info *server) 3809 { 3810 int rc = 0; 3811 3812 if (!server->ops->need_neg || !server->ops->negotiate) 3813 return -ENOSYS; 3814 3815 /* only send once per connect */ 3816 spin_lock(&server->srv_lock); 3817 if (server->tcpStatus != CifsGood && 3818 server->tcpStatus != CifsNew && 3819 server->tcpStatus != CifsNeedNegotiate) { 3820 spin_unlock(&server->srv_lock); 3821 return -EHOSTDOWN; 3822 } 3823 3824 if (!server->ops->need_neg(server) && 3825 server->tcpStatus == CifsGood) { 3826 spin_unlock(&server->srv_lock); 3827 return 0; 3828 } 3829 3830 server->tcpStatus = CifsInNegotiate; 3831 spin_unlock(&server->srv_lock); 3832 3833 rc = server->ops->negotiate(xid, ses, server); 3834 if (rc == 0) { 3835 spin_lock(&server->srv_lock); 3836 if (server->tcpStatus == CifsInNegotiate) 3837 server->tcpStatus = CifsGood; 3838 else 3839 rc = -EHOSTDOWN; 3840 spin_unlock(&server->srv_lock); 3841 } else { 3842 spin_lock(&server->srv_lock); 3843 if (server->tcpStatus == CifsInNegotiate) 3844 server->tcpStatus = CifsNeedNegotiate; 3845 spin_unlock(&server->srv_lock); 3846 } 3847 3848 return rc; 3849 } 3850 3851 int 3852 cifs_setup_session(const unsigned int xid, struct cifs_ses *ses, 3853 struct TCP_Server_Info *server, 3854 struct nls_table *nls_info) 3855 { 3856 int rc = -ENOSYS; 3857 struct TCP_Server_Info *pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; 3858 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&pserver->dstaddr; 3859 struct sockaddr_in *addr = (struct sockaddr_in *)&pserver->dstaddr; 3860 bool is_binding = false; 3861 3862 spin_lock(&ses->ses_lock); 3863 cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n", 3864 __func__, ses->chans_need_reconnect); 3865 3866 if (ses->ses_status != SES_GOOD && 3867 ses->ses_status != SES_NEW && 3868 ses->ses_status != SES_NEED_RECON) { 3869 spin_unlock(&ses->ses_lock); 3870 return -EHOSTDOWN; 3871 } 3872 3873 /* only send once per connect */ 3874 spin_lock(&ses->chan_lock); 3875 if (CIFS_ALL_CHANS_GOOD(ses)) { 3876 if (ses->ses_status == SES_NEED_RECON) 3877 ses->ses_status = SES_GOOD; 3878 spin_unlock(&ses->chan_lock); 3879 spin_unlock(&ses->ses_lock); 3880 return 0; 3881 } 3882 3883 cifs_chan_set_in_reconnect(ses, server); 3884 is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses); 3885 spin_unlock(&ses->chan_lock); 3886 3887 if (!is_binding) { 3888 ses->ses_status = SES_IN_SETUP; 3889 3890 /* force iface_list refresh */ 3891 ses->iface_last_update = 0; 3892 } 3893 spin_unlock(&ses->ses_lock); 3894 3895 /* update ses ip_addr only for primary chan */ 3896 if (server == pserver) { 3897 if (server->dstaddr.ss_family == AF_INET6) 3898 scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI6", &addr6->sin6_addr); 3899 else 3900 scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI4", &addr->sin_addr); 3901 } 3902 3903 if (!is_binding) { 3904 ses->capabilities = server->capabilities; 3905 if (!linuxExtEnabled) 3906 ses->capabilities &= (~server->vals->cap_unix); 3907 3908 if (ses->auth_key.response) { 3909 cifs_dbg(FYI, "Free previous auth_key.response = %p\n", 3910 ses->auth_key.response); 3911 kfree_sensitive(ses->auth_key.response); 3912 ses->auth_key.response = NULL; 3913 ses->auth_key.len = 0; 3914 } 3915 } 3916 3917 cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n", 3918 server->sec_mode, server->capabilities, server->timeAdj); 3919 3920 if (server->ops->sess_setup) 3921 rc = server->ops->sess_setup(xid, ses, server, nls_info); 3922 3923 if (rc) { 3924 cifs_server_dbg(VFS, "Send error in SessSetup = %d\n", rc); 3925 spin_lock(&ses->ses_lock); 3926 if (ses->ses_status == SES_IN_SETUP) 3927 ses->ses_status = SES_NEED_RECON; 3928 spin_lock(&ses->chan_lock); 3929 cifs_chan_clear_in_reconnect(ses, server); 3930 spin_unlock(&ses->chan_lock); 3931 spin_unlock(&ses->ses_lock); 3932 } else { 3933 spin_lock(&ses->ses_lock); 3934 if (ses->ses_status == SES_IN_SETUP) 3935 ses->ses_status = SES_GOOD; 3936 spin_lock(&ses->chan_lock); 3937 cifs_chan_clear_in_reconnect(ses, server); 3938 cifs_chan_clear_need_reconnect(ses, server); 3939 spin_unlock(&ses->chan_lock); 3940 spin_unlock(&ses->ses_lock); 3941 } 3942 3943 return rc; 3944 } 3945 3946 static int 3947 cifs_set_vol_auth(struct smb3_fs_context *ctx, struct cifs_ses *ses) 3948 { 3949 ctx->sectype = ses->sectype; 3950 3951 /* krb5 is special, since we don't need username or pw */ 3952 if (ctx->sectype == Kerberos) 3953 return 0; 3954 3955 return cifs_set_cifscreds(ctx, ses); 3956 } 3957 3958 static struct cifs_tcon * 3959 cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) 3960 { 3961 int rc; 3962 struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb); 3963 struct cifs_ses *ses; 3964 struct cifs_tcon *tcon = NULL; 3965 struct smb3_fs_context *ctx; 3966 3967 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 3968 if (ctx == NULL) 3969 return ERR_PTR(-ENOMEM); 3970 3971 ctx->local_nls = cifs_sb->local_nls; 3972 ctx->linux_uid = fsuid; 3973 ctx->cred_uid = fsuid; 3974 ctx->UNC = master_tcon->tree_name; 3975 ctx->retry = master_tcon->retry; 3976 ctx->nocase = master_tcon->nocase; 3977 ctx->nohandlecache = master_tcon->nohandlecache; 3978 ctx->local_lease = master_tcon->local_lease; 3979 ctx->no_lease = master_tcon->no_lease; 3980 ctx->resilient = master_tcon->use_resilient; 3981 ctx->persistent = master_tcon->use_persistent; 3982 ctx->handle_timeout = master_tcon->handle_timeout; 3983 ctx->no_linux_ext = !master_tcon->unix_ext; 3984 ctx->linux_ext = master_tcon->posix_extensions; 3985 ctx->sectype = master_tcon->ses->sectype; 3986 ctx->sign = master_tcon->ses->sign; 3987 ctx->seal = master_tcon->seal; 3988 ctx->witness = master_tcon->use_witness; 3989 3990 rc = cifs_set_vol_auth(ctx, master_tcon->ses); 3991 if (rc) { 3992 tcon = ERR_PTR(rc); 3993 goto out; 3994 } 3995 3996 /* get a reference for the same TCP session */ 3997 spin_lock(&cifs_tcp_ses_lock); 3998 ++master_tcon->ses->server->srv_count; 3999 spin_unlock(&cifs_tcp_ses_lock); 4000 4001 ses = cifs_get_smb_ses(master_tcon->ses->server, ctx); 4002 if (IS_ERR(ses)) { 4003 tcon = (struct cifs_tcon *)ses; 4004 cifs_put_tcp_session(master_tcon->ses->server, 0); 4005 goto out; 4006 } 4007 4008 tcon = cifs_get_tcon(ses, ctx); 4009 if (IS_ERR(tcon)) { 4010 cifs_put_smb_ses(ses); 4011 goto out; 4012 } 4013 4014 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 4015 if (cap_unix(ses)) 4016 reset_cifs_unix_caps(0, tcon, NULL, ctx); 4017 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 4018 4019 out: 4020 kfree(ctx->username); 4021 kfree_sensitive(ctx->password); 4022 kfree(ctx); 4023 4024 return tcon; 4025 } 4026 4027 struct cifs_tcon * 4028 cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb) 4029 { 4030 return tlink_tcon(cifs_sb_master_tlink(cifs_sb)); 4031 } 4032 4033 /* find and return a tlink with given uid */ 4034 static struct tcon_link * 4035 tlink_rb_search(struct rb_root *root, kuid_t uid) 4036 { 4037 struct rb_node *node = root->rb_node; 4038 struct tcon_link *tlink; 4039 4040 while (node) { 4041 tlink = rb_entry(node, struct tcon_link, tl_rbnode); 4042 4043 if (uid_gt(tlink->tl_uid, uid)) 4044 node = node->rb_left; 4045 else if (uid_lt(tlink->tl_uid, uid)) 4046 node = node->rb_right; 4047 else 4048 return tlink; 4049 } 4050 return NULL; 4051 } 4052 4053 /* insert a tcon_link into the tree */ 4054 static void 4055 tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink) 4056 { 4057 struct rb_node **new = &(root->rb_node), *parent = NULL; 4058 struct tcon_link *tlink; 4059 4060 while (*new) { 4061 tlink = rb_entry(*new, struct tcon_link, tl_rbnode); 4062 parent = *new; 4063 4064 if (uid_gt(tlink->tl_uid, new_tlink->tl_uid)) 4065 new = &((*new)->rb_left); 4066 else 4067 new = &((*new)->rb_right); 4068 } 4069 4070 rb_link_node(&new_tlink->tl_rbnode, parent, new); 4071 rb_insert_color(&new_tlink->tl_rbnode, root); 4072 } 4073 4074 /* 4075 * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the 4076 * current task. 4077 * 4078 * If the superblock doesn't refer to a multiuser mount, then just return 4079 * the master tcon for the mount. 4080 * 4081 * First, search the rbtree for an existing tcon for this fsuid. If one 4082 * exists, then check to see if it's pending construction. If it is then wait 4083 * for construction to complete. Once it's no longer pending, check to see if 4084 * it failed and either return an error or retry construction, depending on 4085 * the timeout. 4086 * 4087 * If one doesn't exist then insert a new tcon_link struct into the tree and 4088 * try to construct a new one. 4089 */ 4090 struct tcon_link * 4091 cifs_sb_tlink(struct cifs_sb_info *cifs_sb) 4092 { 4093 int ret; 4094 kuid_t fsuid = current_fsuid(); 4095 struct tcon_link *tlink, *newtlink; 4096 4097 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) 4098 return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb)); 4099 4100 spin_lock(&cifs_sb->tlink_tree_lock); 4101 tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid); 4102 if (tlink) 4103 cifs_get_tlink(tlink); 4104 spin_unlock(&cifs_sb->tlink_tree_lock); 4105 4106 if (tlink == NULL) { 4107 newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL); 4108 if (newtlink == NULL) 4109 return ERR_PTR(-ENOMEM); 4110 newtlink->tl_uid = fsuid; 4111 newtlink->tl_tcon = ERR_PTR(-EACCES); 4112 set_bit(TCON_LINK_PENDING, &newtlink->tl_flags); 4113 set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags); 4114 cifs_get_tlink(newtlink); 4115 4116 spin_lock(&cifs_sb->tlink_tree_lock); 4117 /* was one inserted after previous search? */ 4118 tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid); 4119 if (tlink) { 4120 cifs_get_tlink(tlink); 4121 spin_unlock(&cifs_sb->tlink_tree_lock); 4122 kfree(newtlink); 4123 goto wait_for_construction; 4124 } 4125 tlink = newtlink; 4126 tlink_rb_insert(&cifs_sb->tlink_tree, tlink); 4127 spin_unlock(&cifs_sb->tlink_tree_lock); 4128 } else { 4129 wait_for_construction: 4130 ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING, 4131 TASK_INTERRUPTIBLE); 4132 if (ret) { 4133 cifs_put_tlink(tlink); 4134 return ERR_PTR(-ERESTARTSYS); 4135 } 4136 4137 /* if it's good, return it */ 4138 if (!IS_ERR(tlink->tl_tcon)) 4139 return tlink; 4140 4141 /* return error if we tried this already recently */ 4142 if (time_before(jiffies, tlink->tl_time + TLINK_ERROR_EXPIRE)) { 4143 cifs_put_tlink(tlink); 4144 return ERR_PTR(-EACCES); 4145 } 4146 4147 if (test_and_set_bit(TCON_LINK_PENDING, &tlink->tl_flags)) 4148 goto wait_for_construction; 4149 } 4150 4151 tlink->tl_tcon = cifs_construct_tcon(cifs_sb, fsuid); 4152 clear_bit(TCON_LINK_PENDING, &tlink->tl_flags); 4153 wake_up_bit(&tlink->tl_flags, TCON_LINK_PENDING); 4154 4155 if (IS_ERR(tlink->tl_tcon)) { 4156 cifs_put_tlink(tlink); 4157 return ERR_PTR(-EACCES); 4158 } 4159 4160 return tlink; 4161 } 4162 4163 /* 4164 * periodic workqueue job that scans tcon_tree for a superblock and closes 4165 * out tcons. 4166 */ 4167 static void 4168 cifs_prune_tlinks(struct work_struct *work) 4169 { 4170 struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info, 4171 prune_tlinks.work); 4172 struct rb_root *root = &cifs_sb->tlink_tree; 4173 struct rb_node *node; 4174 struct rb_node *tmp; 4175 struct tcon_link *tlink; 4176 4177 /* 4178 * Because we drop the spinlock in the loop in order to put the tlink 4179 * it's not guarded against removal of links from the tree. The only 4180 * places that remove entries from the tree are this function and 4181 * umounts. Because this function is non-reentrant and is canceled 4182 * before umount can proceed, this is safe. 4183 */ 4184 spin_lock(&cifs_sb->tlink_tree_lock); 4185 node = rb_first(root); 4186 while (node != NULL) { 4187 tmp = node; 4188 node = rb_next(tmp); 4189 tlink = rb_entry(tmp, struct tcon_link, tl_rbnode); 4190 4191 if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) || 4192 atomic_read(&tlink->tl_count) != 0 || 4193 time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies)) 4194 continue; 4195 4196 cifs_get_tlink(tlink); 4197 clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); 4198 rb_erase(tmp, root); 4199 4200 spin_unlock(&cifs_sb->tlink_tree_lock); 4201 cifs_put_tlink(tlink); 4202 spin_lock(&cifs_sb->tlink_tree_lock); 4203 } 4204 spin_unlock(&cifs_sb->tlink_tree_lock); 4205 4206 queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks, 4207 TLINK_IDLE_EXPIRE); 4208 } 4209 4210 #ifndef CONFIG_CIFS_DFS_UPCALL 4211 int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nlsc) 4212 { 4213 int rc; 4214 const struct smb_version_operations *ops = tcon->ses->server->ops; 4215 4216 /* only send once per connect */ 4217 spin_lock(&tcon->tc_lock); 4218 if (tcon->status == TID_GOOD) { 4219 spin_unlock(&tcon->tc_lock); 4220 return 0; 4221 } 4222 4223 if (tcon->status != TID_NEW && 4224 tcon->status != TID_NEED_TCON) { 4225 spin_unlock(&tcon->tc_lock); 4226 return -EHOSTDOWN; 4227 } 4228 4229 tcon->status = TID_IN_TCON; 4230 spin_unlock(&tcon->tc_lock); 4231 4232 rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon, nlsc); 4233 if (rc) { 4234 spin_lock(&tcon->tc_lock); 4235 if (tcon->status == TID_IN_TCON) 4236 tcon->status = TID_NEED_TCON; 4237 spin_unlock(&tcon->tc_lock); 4238 } else { 4239 spin_lock(&tcon->tc_lock); 4240 if (tcon->status == TID_IN_TCON) 4241 tcon->status = TID_GOOD; 4242 tcon->need_reconnect = false; 4243 spin_unlock(&tcon->tc_lock); 4244 } 4245 4246 return rc; 4247 } 4248 #endif 4249