1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2016 Namjae Jeon <namjae.jeon@protocolfreedom.org> 4 * Copyright (C) 2018 Samsung Electronics Co., Ltd. 5 */ 6 7 #include <linux/mutex.h> 8 #include <linux/freezer.h> 9 #include <linux/module.h> 10 11 #include "server.h" 12 #include "smb_common.h" 13 #include "mgmt/ksmbd_ida.h" 14 #include "connection.h" 15 #include "transport_tcp.h" 16 #include "transport_rdma.h" 17 #include "misc.h" 18 19 static DEFINE_MUTEX(init_lock); 20 21 static struct ksmbd_conn_ops default_conn_ops; 22 23 DEFINE_HASHTABLE(conn_list, CONN_HASH_BITS); 24 DECLARE_RWSEM(conn_list_lock); 25 26 #ifdef CONFIG_PROC_FS 27 static struct proc_dir_entry *proc_clients; 28 29 static int proc_show_clients(struct seq_file *m, void *v) 30 { 31 struct ksmbd_conn *conn; 32 struct timespec64 now, t; 33 int i; 34 35 seq_printf(m, "#%-20s %-10s %-10s %-10s %-10s %-10s\n", 36 "<name>", "<dialect>", "<credits>", "<open files>", 37 "<requests>", "<last active>"); 38 39 down_read(&conn_list_lock); 40 hash_for_each(conn_list, i, conn, hlist) { 41 jiffies_to_timespec64(jiffies - conn->last_active, &t); 42 ktime_get_real_ts64(&now); 43 t = timespec64_sub(now, t); 44 #if IS_ENABLED(CONFIG_IPV6) 45 if (!conn->inet_addr) 46 seq_printf(m, "%-20pI6c", &conn->inet6_addr); 47 else 48 #endif 49 seq_printf(m, "%-20pI4", &conn->inet_addr); 50 seq_printf(m, " 0x%-10x %-10u %-12d %-10d %ptT\n", 51 conn->dialect, 52 conn->total_credits, 53 atomic_read(&conn->stats.open_files_count), 54 atomic_read(&conn->req_running), 55 &t); 56 } 57 up_read(&conn_list_lock); 58 return 0; 59 } 60 61 static int create_proc_clients(void) 62 { 63 proc_clients = ksmbd_proc_create("clients", 64 proc_show_clients, NULL); 65 if (!proc_clients) 66 return -ENOMEM; 67 return 0; 68 } 69 70 static void delete_proc_clients(void) 71 { 72 if (proc_clients) { 73 proc_remove(proc_clients); 74 proc_clients = NULL; 75 } 76 } 77 #else 78 static int create_proc_clients(void) { return 0; } 79 static void delete_proc_clients(void) {} 80 #endif 81 82 /** 83 * ksmbd_conn_free() - free resources of the connection instance 84 * 85 * @conn: connection instance to be cleaned up 86 * 87 * During the thread termination, the corresponding conn instance 88 * resources(sock/memory) are released and finally the conn object is freed. 89 */ 90 void ksmbd_conn_free(struct ksmbd_conn *conn) 91 { 92 down_write(&conn_list_lock); 93 hash_del(&conn->hlist); 94 up_write(&conn_list_lock); 95 96 xa_destroy(&conn->sessions); 97 kvfree(conn->request_buf); 98 kfree(conn->preauth_info); 99 kfree(conn->mechToken); 100 if (atomic_dec_and_test(&conn->refcnt)) { 101 /* 102 * async_ida is embedded in struct ksmbd_conn, so pair 103 * ida_destroy() with the final kfree() rather than with 104 * the unconditional field teardown above. This keeps 105 * the IDA valid for the entire lifetime of the struct, 106 * even while other refcount holders (oplock / vfs 107 * durable handles) still reference the connection. 108 */ 109 ida_destroy(&conn->async_ida); 110 conn->transport->ops->free_transport(conn->transport); 111 kfree(conn); 112 } 113 } 114 115 /** 116 * ksmbd_conn_alloc() - initialize a new connection instance 117 * 118 * Return: ksmbd_conn struct on success, otherwise NULL 119 */ 120 struct ksmbd_conn *ksmbd_conn_alloc(void) 121 { 122 struct ksmbd_conn *conn; 123 124 conn = kzalloc_obj(struct ksmbd_conn, KSMBD_DEFAULT_GFP); 125 if (!conn) 126 return NULL; 127 128 conn->need_neg = true; 129 ksmbd_conn_set_new(conn); 130 conn->local_nls = load_nls("utf8"); 131 if (!conn->local_nls) 132 conn->local_nls = load_nls_default(); 133 if (IS_ENABLED(CONFIG_UNICODE)) 134 conn->um = utf8_load(UNICODE_AGE(12, 1, 0)); 135 else 136 conn->um = ERR_PTR(-EOPNOTSUPP); 137 if (IS_ERR(conn->um)) 138 conn->um = NULL; 139 atomic_set(&conn->req_running, 0); 140 atomic_set(&conn->r_count, 0); 141 atomic_set(&conn->refcnt, 1); 142 conn->total_credits = 1; 143 conn->outstanding_credits = 0; 144 145 init_waitqueue_head(&conn->req_running_q); 146 init_waitqueue_head(&conn->r_count_q); 147 INIT_LIST_HEAD(&conn->requests); 148 INIT_LIST_HEAD(&conn->async_requests); 149 spin_lock_init(&conn->request_lock); 150 spin_lock_init(&conn->credits_lock); 151 ida_init(&conn->async_ida); 152 xa_init(&conn->sessions); 153 154 spin_lock_init(&conn->llist_lock); 155 INIT_LIST_HEAD(&conn->lock_list); 156 157 init_rwsem(&conn->session_lock); 158 159 return conn; 160 } 161 162 bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c) 163 { 164 struct ksmbd_conn *t; 165 int bkt; 166 bool ret = false; 167 168 down_read(&conn_list_lock); 169 hash_for_each(conn_list, bkt, t, hlist) { 170 if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE)) 171 continue; 172 173 ret = true; 174 break; 175 } 176 up_read(&conn_list_lock); 177 return ret; 178 } 179 180 void ksmbd_conn_enqueue_request(struct ksmbd_work *work) 181 { 182 struct ksmbd_conn *conn = work->conn; 183 struct list_head *requests_queue = NULL; 184 185 if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE) 186 requests_queue = &conn->requests; 187 188 atomic_inc(&conn->req_running); 189 if (requests_queue) { 190 spin_lock(&conn->request_lock); 191 list_add_tail(&work->request_entry, requests_queue); 192 spin_unlock(&conn->request_lock); 193 } 194 } 195 196 void ksmbd_conn_try_dequeue_request(struct ksmbd_work *work) 197 { 198 struct ksmbd_conn *conn = work->conn; 199 200 atomic_dec(&conn->req_running); 201 if (waitqueue_active(&conn->req_running_q)) 202 wake_up(&conn->req_running_q); 203 204 if (list_empty(&work->request_entry) && 205 list_empty(&work->async_request_entry)) 206 return; 207 208 spin_lock(&conn->request_lock); 209 list_del_init(&work->request_entry); 210 spin_unlock(&conn->request_lock); 211 if (work->asynchronous) 212 release_async_work(work); 213 214 wake_up_all(&conn->req_running_q); 215 } 216 217 void ksmbd_conn_lock(struct ksmbd_conn *conn) 218 { 219 mutex_lock(&conn->srv_mutex); 220 } 221 222 void ksmbd_conn_unlock(struct ksmbd_conn *conn) 223 { 224 mutex_unlock(&conn->srv_mutex); 225 } 226 227 void ksmbd_all_conn_set_status(u64 sess_id, u32 status) 228 { 229 struct ksmbd_conn *conn; 230 int bkt; 231 232 down_read(&conn_list_lock); 233 hash_for_each(conn_list, bkt, conn, hlist) { 234 if (conn->binding || xa_load(&conn->sessions, sess_id)) 235 WRITE_ONCE(conn->status, status); 236 } 237 up_read(&conn_list_lock); 238 } 239 240 void ksmbd_conn_wait_idle(struct ksmbd_conn *conn) 241 { 242 wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2); 243 } 244 245 int ksmbd_conn_wait_idle_sess_id(struct ksmbd_conn *curr_conn, u64 sess_id) 246 { 247 struct ksmbd_conn *conn; 248 int rc, retry_count = 0, max_timeout = 120; 249 int rcount, bkt; 250 251 retry_idle: 252 if (retry_count >= max_timeout) 253 return -EIO; 254 255 down_read(&conn_list_lock); 256 hash_for_each(conn_list, bkt, conn, hlist) { 257 if (conn->binding || xa_load(&conn->sessions, sess_id)) { 258 rcount = (conn == curr_conn) ? 2 : 1; 259 if (atomic_read(&conn->req_running) >= rcount) { 260 rc = wait_event_timeout(conn->req_running_q, 261 atomic_read(&conn->req_running) < rcount, 262 HZ); 263 if (!rc) { 264 up_read(&conn_list_lock); 265 retry_count++; 266 goto retry_idle; 267 } 268 } 269 } 270 } 271 up_read(&conn_list_lock); 272 273 return 0; 274 } 275 276 int ksmbd_conn_write(struct ksmbd_work *work) 277 { 278 struct ksmbd_conn *conn = work->conn; 279 int sent; 280 281 if (!work->response_buf) { 282 pr_err("NULL response header\n"); 283 return -EINVAL; 284 } 285 286 if (work->send_no_response) 287 return 0; 288 289 if (!work->iov_idx) 290 return -EINVAL; 291 292 ksmbd_conn_lock(conn); 293 sent = conn->transport->ops->writev(conn->transport, work->iov, 294 work->iov_cnt, 295 get_rfc1002_len(work->iov[0].iov_base) + 4, 296 work->need_invalidate_rkey, 297 work->remote_key); 298 ksmbd_conn_unlock(conn); 299 300 if (sent < 0) { 301 pr_err("Failed to send message: %d\n", sent); 302 return sent; 303 } 304 305 return 0; 306 } 307 308 int ksmbd_conn_rdma_read(struct ksmbd_conn *conn, 309 void *buf, unsigned int buflen, 310 struct smbdirect_buffer_descriptor_v1 *desc, 311 unsigned int desc_len) 312 { 313 int ret = -EINVAL; 314 315 if (conn->transport->ops->rdma_read) 316 ret = conn->transport->ops->rdma_read(conn->transport, 317 buf, buflen, 318 desc, desc_len); 319 return ret; 320 } 321 322 int ksmbd_conn_rdma_write(struct ksmbd_conn *conn, 323 void *buf, unsigned int buflen, 324 struct smbdirect_buffer_descriptor_v1 *desc, 325 unsigned int desc_len) 326 { 327 int ret = -EINVAL; 328 329 if (conn->transport->ops->rdma_write) 330 ret = conn->transport->ops->rdma_write(conn->transport, 331 buf, buflen, 332 desc, desc_len); 333 return ret; 334 } 335 336 bool ksmbd_conn_alive(struct ksmbd_conn *conn) 337 { 338 if (!ksmbd_server_running()) 339 return false; 340 341 if (ksmbd_conn_exiting(conn)) 342 return false; 343 344 if (kthread_should_stop()) 345 return false; 346 347 if (atomic_read(&conn->stats.open_files_count) > 0) 348 return true; 349 350 /* 351 * Stop current session if the time that get last request from client 352 * is bigger than deadtime user configured and opening file count is 353 * zero. 354 */ 355 if (server_conf.deadtime > 0 && 356 time_after(jiffies, conn->last_active + server_conf.deadtime)) { 357 ksmbd_debug(CONN, "No response from client in %lu minutes\n", 358 server_conf.deadtime / SMB_ECHO_INTERVAL); 359 return false; 360 } 361 return true; 362 } 363 364 /* "+2" for BCC field (ByteCount, 2 bytes) */ 365 #define SMB1_MIN_SUPPORTED_PDU_SIZE (sizeof(struct smb_hdr) + 2) 366 #define SMB2_MIN_SUPPORTED_PDU_SIZE (sizeof(struct smb2_pdu)) 367 368 /** 369 * ksmbd_conn_handler_loop() - session thread to listen on new smb requests 370 * @p: connection instance 371 * 372 * One thread each per connection 373 * 374 * Return: 0 on success 375 */ 376 int ksmbd_conn_handler_loop(void *p) 377 { 378 struct ksmbd_conn *conn = (struct ksmbd_conn *)p; 379 struct ksmbd_transport *t = conn->transport; 380 unsigned int pdu_size, max_allowed_pdu_size, max_req; 381 char hdr_buf[4] = {0,}; 382 int size; 383 384 mutex_init(&conn->srv_mutex); 385 __module_get(THIS_MODULE); 386 387 max_req = server_conf.max_inflight_req; 388 conn->last_active = jiffies; 389 set_freezable(); 390 while (ksmbd_conn_alive(conn)) { 391 if (try_to_freeze()) 392 continue; 393 394 kvfree(conn->request_buf); 395 conn->request_buf = NULL; 396 397 recheck: 398 if (atomic_read(&conn->req_running) + 1 > max_req) { 399 wait_event_interruptible(conn->req_running_q, 400 atomic_read(&conn->req_running) < max_req); 401 goto recheck; 402 } 403 404 size = t->ops->read(t, hdr_buf, sizeof(hdr_buf), -1); 405 if (size != sizeof(hdr_buf)) 406 break; 407 408 pdu_size = get_rfc1002_len(hdr_buf); 409 ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size); 410 411 if (ksmbd_conn_good(conn)) 412 max_allowed_pdu_size = 413 SMB3_MAX_MSGSIZE + conn->vals->max_write_size; 414 else 415 max_allowed_pdu_size = SMB3_MAX_MSGSIZE; 416 417 if (pdu_size > max_allowed_pdu_size) { 418 pr_err_ratelimited("PDU length(%u) exceeded maximum allowed pdu size(%u) on connection(%d)\n", 419 pdu_size, max_allowed_pdu_size, 420 READ_ONCE(conn->status)); 421 break; 422 } 423 424 /* 425 * Check maximum pdu size(0x00FFFFFF). 426 */ 427 if (pdu_size > MAX_STREAM_PROT_LEN) 428 break; 429 430 if (pdu_size < SMB1_MIN_SUPPORTED_PDU_SIZE) 431 break; 432 433 /* 4 for rfc1002 length field */ 434 /* 1 for implied bcc[0] */ 435 size = pdu_size + 4 + 1; 436 conn->request_buf = kvmalloc(size, KSMBD_DEFAULT_GFP); 437 if (!conn->request_buf) 438 break; 439 440 memcpy(conn->request_buf, hdr_buf, sizeof(hdr_buf)); 441 442 /* 443 * We already read 4 bytes to find out PDU size, now 444 * read in PDU 445 */ 446 size = t->ops->read(t, conn->request_buf + 4, pdu_size, 2); 447 if (size < 0) { 448 pr_err("sock_read failed: %d\n", size); 449 break; 450 } 451 452 if (size != pdu_size) { 453 pr_err("PDU error. Read: %d, Expected: %d\n", 454 size, pdu_size); 455 continue; 456 } 457 458 if (!ksmbd_smb_request(conn)) 459 break; 460 461 if (((struct smb2_hdr *)smb_get_msg(conn->request_buf))->ProtocolId == 462 SMB2_PROTO_NUMBER) { 463 if (pdu_size < SMB2_MIN_SUPPORTED_PDU_SIZE) 464 break; 465 } 466 467 if (!default_conn_ops.process_fn) { 468 pr_err("No connection request callback\n"); 469 break; 470 } 471 472 if (default_conn_ops.process_fn(conn)) { 473 pr_err("Cannot handle request\n"); 474 break; 475 } 476 } 477 478 ksmbd_conn_set_releasing(conn); 479 /* Wait till all reference dropped to the Server object*/ 480 ksmbd_debug(CONN, "Wait for all pending requests(%d)\n", atomic_read(&conn->r_count)); 481 wait_event(conn->r_count_q, atomic_read(&conn->r_count) == 0); 482 483 if (IS_ENABLED(CONFIG_UNICODE)) 484 utf8_unload(conn->um); 485 unload_nls(conn->local_nls); 486 if (default_conn_ops.terminate_fn) 487 default_conn_ops.terminate_fn(conn); 488 t->ops->disconnect(t); 489 module_put(THIS_MODULE); 490 return 0; 491 } 492 493 void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops) 494 { 495 default_conn_ops.process_fn = ops->process_fn; 496 default_conn_ops.terminate_fn = ops->terminate_fn; 497 } 498 499 void ksmbd_conn_r_count_inc(struct ksmbd_conn *conn) 500 { 501 atomic_inc(&conn->r_count); 502 } 503 504 void ksmbd_conn_r_count_dec(struct ksmbd_conn *conn) 505 { 506 /* 507 * Checking waitqueue to dropping pending requests on 508 * disconnection. waitqueue_active is safe because it 509 * uses atomic operation for condition. 510 */ 511 atomic_inc(&conn->refcnt); 512 if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q)) 513 wake_up(&conn->r_count_q); 514 515 if (atomic_dec_and_test(&conn->refcnt)) 516 kfree(conn); 517 } 518 519 int ksmbd_conn_transport_init(void) 520 { 521 int ret; 522 523 mutex_lock(&init_lock); 524 ret = ksmbd_tcp_init(); 525 if (ret) { 526 pr_err("Failed to init TCP subsystem: %d\n", ret); 527 goto out; 528 } 529 530 ret = ksmbd_rdma_init(); 531 if (ret) { 532 pr_err("Failed to init RDMA subsystem: %d\n", ret); 533 goto out; 534 } 535 out: 536 mutex_unlock(&init_lock); 537 create_proc_clients(); 538 return ret; 539 } 540 541 static void stop_sessions(void) 542 { 543 struct ksmbd_conn *conn; 544 struct ksmbd_transport *t; 545 int bkt; 546 547 again: 548 down_read(&conn_list_lock); 549 hash_for_each(conn_list, bkt, conn, hlist) { 550 t = conn->transport; 551 ksmbd_conn_set_exiting(conn); 552 if (t->ops->shutdown) { 553 up_read(&conn_list_lock); 554 t->ops->shutdown(t); 555 down_read(&conn_list_lock); 556 } 557 } 558 up_read(&conn_list_lock); 559 560 if (!hash_empty(conn_list)) { 561 msleep(100); 562 goto again; 563 } 564 } 565 566 void ksmbd_conn_transport_destroy(void) 567 { 568 delete_proc_clients(); 569 mutex_lock(&init_lock); 570 ksmbd_tcp_destroy(); 571 ksmbd_rdma_stop_listening(); 572 stop_sessions(); 573 mutex_unlock(&init_lock); 574 } 575