1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2016 Namjae Jeon <namjae.jeon@protocolfreedom.org> 4 * Copyright (C) 2018 Samsung Electronics Co., Ltd. 5 */ 6 7 #include <linux/mutex.h> 8 #include <linux/freezer.h> 9 #include <linux/module.h> 10 11 #include "server.h" 12 #include "smb_common.h" 13 #include "mgmt/ksmbd_ida.h" 14 #include "connection.h" 15 #include "transport_tcp.h" 16 #include "transport_rdma.h" 17 #include "misc.h" 18 19 static DEFINE_MUTEX(init_lock); 20 21 static struct ksmbd_conn_ops default_conn_ops; 22 23 DEFINE_HASHTABLE(conn_list, CONN_HASH_BITS); 24 DECLARE_RWSEM(conn_list_lock); 25 26 #ifdef CONFIG_PROC_FS 27 static struct proc_dir_entry *proc_clients; 28 29 static int proc_show_clients(struct seq_file *m, void *v) 30 { 31 struct ksmbd_conn *conn; 32 struct timespec64 now, t; 33 int i; 34 35 seq_printf(m, "#%-20s %-10s %-10s %-10s %-10s %-10s\n", 36 "<name>", "<dialect>", "<credits>", "<open files>", 37 "<requests>", "<last active>"); 38 39 down_read(&conn_list_lock); 40 hash_for_each(conn_list, i, conn, hlist) { 41 jiffies_to_timespec64(jiffies - conn->last_active, &t); 42 ktime_get_real_ts64(&now); 43 t = timespec64_sub(now, t); 44 #if IS_ENABLED(CONFIG_IPV6) 45 if (!conn->inet_addr) 46 seq_printf(m, "%-20pI6c", &conn->inet6_addr); 47 else 48 #endif 49 seq_printf(m, "%-20pI4", &conn->inet_addr); 50 seq_printf(m, " 0x%-10x %-10u %-12d %-10d %ptT\n", 51 conn->dialect, 52 conn->total_credits, 53 atomic_read(&conn->stats.open_files_count), 54 atomic_read(&conn->req_running), 55 &t); 56 } 57 up_read(&conn_list_lock); 58 return 0; 59 } 60 61 static int create_proc_clients(void) 62 { 63 proc_clients = ksmbd_proc_create("clients", 64 proc_show_clients, NULL); 65 if (!proc_clients) 66 return -ENOMEM; 67 return 0; 68 } 69 70 static void delete_proc_clients(void) 71 { 72 if (proc_clients) { 73 proc_remove(proc_clients); 74 proc_clients = NULL; 75 } 76 } 77 #else 78 static int create_proc_clients(void) { return 0; } 79 static void delete_proc_clients(void) {} 80 #endif 81 82 /** 83 * ksmbd_conn_free() - free resources of the connection instance 84 * 85 * @conn: connection instance to be cleaned up 86 * 87 * During the thread termination, the corresponding conn instance 88 * resources(sock/memory) are released and finally the conn object is freed. 89 */ 90 void ksmbd_conn_free(struct ksmbd_conn *conn) 91 { 92 down_write(&conn_list_lock); 93 hash_del(&conn->hlist); 94 up_write(&conn_list_lock); 95 96 xa_destroy(&conn->sessions); 97 kvfree(conn->request_buf); 98 kfree(conn->preauth_info); 99 kfree(conn->mechToken); 100 if (atomic_dec_and_test(&conn->refcnt)) { 101 conn->transport->ops->free_transport(conn->transport); 102 kfree(conn); 103 } 104 } 105 106 /** 107 * ksmbd_conn_alloc() - initialize a new connection instance 108 * 109 * Return: ksmbd_conn struct on success, otherwise NULL 110 */ 111 struct ksmbd_conn *ksmbd_conn_alloc(void) 112 { 113 struct ksmbd_conn *conn; 114 115 conn = kzalloc_obj(struct ksmbd_conn, KSMBD_DEFAULT_GFP); 116 if (!conn) 117 return NULL; 118 119 conn->need_neg = true; 120 ksmbd_conn_set_new(conn); 121 conn->local_nls = load_nls("utf8"); 122 if (!conn->local_nls) 123 conn->local_nls = load_nls_default(); 124 if (IS_ENABLED(CONFIG_UNICODE)) 125 conn->um = utf8_load(UNICODE_AGE(12, 1, 0)); 126 else 127 conn->um = ERR_PTR(-EOPNOTSUPP); 128 if (IS_ERR(conn->um)) 129 conn->um = NULL; 130 atomic_set(&conn->req_running, 0); 131 atomic_set(&conn->r_count, 0); 132 atomic_set(&conn->refcnt, 1); 133 conn->total_credits = 1; 134 conn->outstanding_credits = 0; 135 136 init_waitqueue_head(&conn->req_running_q); 137 init_waitqueue_head(&conn->r_count_q); 138 INIT_LIST_HEAD(&conn->requests); 139 INIT_LIST_HEAD(&conn->async_requests); 140 spin_lock_init(&conn->request_lock); 141 spin_lock_init(&conn->credits_lock); 142 ida_init(&conn->async_ida); 143 xa_init(&conn->sessions); 144 145 spin_lock_init(&conn->llist_lock); 146 INIT_LIST_HEAD(&conn->lock_list); 147 148 init_rwsem(&conn->session_lock); 149 150 return conn; 151 } 152 153 bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c) 154 { 155 struct ksmbd_conn *t; 156 int bkt; 157 bool ret = false; 158 159 down_read(&conn_list_lock); 160 hash_for_each(conn_list, bkt, t, hlist) { 161 if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE)) 162 continue; 163 164 ret = true; 165 break; 166 } 167 up_read(&conn_list_lock); 168 return ret; 169 } 170 171 void ksmbd_conn_enqueue_request(struct ksmbd_work *work) 172 { 173 struct ksmbd_conn *conn = work->conn; 174 struct list_head *requests_queue = NULL; 175 176 if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE) 177 requests_queue = &conn->requests; 178 179 atomic_inc(&conn->req_running); 180 if (requests_queue) { 181 spin_lock(&conn->request_lock); 182 list_add_tail(&work->request_entry, requests_queue); 183 spin_unlock(&conn->request_lock); 184 } 185 } 186 187 void ksmbd_conn_try_dequeue_request(struct ksmbd_work *work) 188 { 189 struct ksmbd_conn *conn = work->conn; 190 191 atomic_dec(&conn->req_running); 192 if (waitqueue_active(&conn->req_running_q)) 193 wake_up(&conn->req_running_q); 194 195 if (list_empty(&work->request_entry) && 196 list_empty(&work->async_request_entry)) 197 return; 198 199 spin_lock(&conn->request_lock); 200 list_del_init(&work->request_entry); 201 spin_unlock(&conn->request_lock); 202 if (work->asynchronous) 203 release_async_work(work); 204 205 wake_up_all(&conn->req_running_q); 206 } 207 208 void ksmbd_conn_lock(struct ksmbd_conn *conn) 209 { 210 mutex_lock(&conn->srv_mutex); 211 } 212 213 void ksmbd_conn_unlock(struct ksmbd_conn *conn) 214 { 215 mutex_unlock(&conn->srv_mutex); 216 } 217 218 void ksmbd_all_conn_set_status(u64 sess_id, u32 status) 219 { 220 struct ksmbd_conn *conn; 221 int bkt; 222 223 down_read(&conn_list_lock); 224 hash_for_each(conn_list, bkt, conn, hlist) { 225 if (conn->binding || xa_load(&conn->sessions, sess_id)) 226 WRITE_ONCE(conn->status, status); 227 } 228 up_read(&conn_list_lock); 229 } 230 231 void ksmbd_conn_wait_idle(struct ksmbd_conn *conn) 232 { 233 wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2); 234 } 235 236 int ksmbd_conn_wait_idle_sess_id(struct ksmbd_conn *curr_conn, u64 sess_id) 237 { 238 struct ksmbd_conn *conn; 239 int rc, retry_count = 0, max_timeout = 120; 240 int rcount = 1, bkt; 241 242 retry_idle: 243 if (retry_count >= max_timeout) 244 return -EIO; 245 246 down_read(&conn_list_lock); 247 hash_for_each(conn_list, bkt, conn, hlist) { 248 if (conn->binding || xa_load(&conn->sessions, sess_id)) { 249 if (conn == curr_conn) 250 rcount = 2; 251 if (atomic_read(&conn->req_running) >= rcount) { 252 rc = wait_event_timeout(conn->req_running_q, 253 atomic_read(&conn->req_running) < rcount, 254 HZ); 255 if (!rc) { 256 up_read(&conn_list_lock); 257 retry_count++; 258 goto retry_idle; 259 } 260 } 261 } 262 } 263 up_read(&conn_list_lock); 264 265 return 0; 266 } 267 268 int ksmbd_conn_write(struct ksmbd_work *work) 269 { 270 struct ksmbd_conn *conn = work->conn; 271 int sent; 272 273 if (!work->response_buf) { 274 pr_err("NULL response header\n"); 275 return -EINVAL; 276 } 277 278 if (work->send_no_response) 279 return 0; 280 281 if (!work->iov_idx) 282 return -EINVAL; 283 284 ksmbd_conn_lock(conn); 285 sent = conn->transport->ops->writev(conn->transport, work->iov, 286 work->iov_cnt, 287 get_rfc1002_len(work->iov[0].iov_base) + 4, 288 work->need_invalidate_rkey, 289 work->remote_key); 290 ksmbd_conn_unlock(conn); 291 292 if (sent < 0) { 293 pr_err("Failed to send message: %d\n", sent); 294 return sent; 295 } 296 297 return 0; 298 } 299 300 int ksmbd_conn_rdma_read(struct ksmbd_conn *conn, 301 void *buf, unsigned int buflen, 302 struct smbdirect_buffer_descriptor_v1 *desc, 303 unsigned int desc_len) 304 { 305 int ret = -EINVAL; 306 307 if (conn->transport->ops->rdma_read) 308 ret = conn->transport->ops->rdma_read(conn->transport, 309 buf, buflen, 310 desc, desc_len); 311 return ret; 312 } 313 314 int ksmbd_conn_rdma_write(struct ksmbd_conn *conn, 315 void *buf, unsigned int buflen, 316 struct smbdirect_buffer_descriptor_v1 *desc, 317 unsigned int desc_len) 318 { 319 int ret = -EINVAL; 320 321 if (conn->transport->ops->rdma_write) 322 ret = conn->transport->ops->rdma_write(conn->transport, 323 buf, buflen, 324 desc, desc_len); 325 return ret; 326 } 327 328 bool ksmbd_conn_alive(struct ksmbd_conn *conn) 329 { 330 if (!ksmbd_server_running()) 331 return false; 332 333 if (ksmbd_conn_exiting(conn)) 334 return false; 335 336 if (kthread_should_stop()) 337 return false; 338 339 if (atomic_read(&conn->stats.open_files_count) > 0) 340 return true; 341 342 /* 343 * Stop current session if the time that get last request from client 344 * is bigger than deadtime user configured and opening file count is 345 * zero. 346 */ 347 if (server_conf.deadtime > 0 && 348 time_after(jiffies, conn->last_active + server_conf.deadtime)) { 349 ksmbd_debug(CONN, "No response from client in %lu minutes\n", 350 server_conf.deadtime / SMB_ECHO_INTERVAL); 351 return false; 352 } 353 return true; 354 } 355 356 /* "+2" for BCC field (ByteCount, 2 bytes) */ 357 #define SMB1_MIN_SUPPORTED_PDU_SIZE (sizeof(struct smb_hdr) + 2) 358 #define SMB2_MIN_SUPPORTED_PDU_SIZE (sizeof(struct smb2_pdu)) 359 360 /** 361 * ksmbd_conn_handler_loop() - session thread to listen on new smb requests 362 * @p: connection instance 363 * 364 * One thread each per connection 365 * 366 * Return: 0 on success 367 */ 368 int ksmbd_conn_handler_loop(void *p) 369 { 370 struct ksmbd_conn *conn = (struct ksmbd_conn *)p; 371 struct ksmbd_transport *t = conn->transport; 372 unsigned int pdu_size, max_allowed_pdu_size, max_req; 373 char hdr_buf[4] = {0,}; 374 int size; 375 376 mutex_init(&conn->srv_mutex); 377 __module_get(THIS_MODULE); 378 379 max_req = server_conf.max_inflight_req; 380 conn->last_active = jiffies; 381 set_freezable(); 382 while (ksmbd_conn_alive(conn)) { 383 if (try_to_freeze()) 384 continue; 385 386 kvfree(conn->request_buf); 387 conn->request_buf = NULL; 388 389 recheck: 390 if (atomic_read(&conn->req_running) + 1 > max_req) { 391 wait_event_interruptible(conn->req_running_q, 392 atomic_read(&conn->req_running) < max_req); 393 goto recheck; 394 } 395 396 size = t->ops->read(t, hdr_buf, sizeof(hdr_buf), -1); 397 if (size != sizeof(hdr_buf)) 398 break; 399 400 pdu_size = get_rfc1002_len(hdr_buf); 401 ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size); 402 403 if (ksmbd_conn_good(conn)) 404 max_allowed_pdu_size = 405 SMB3_MAX_MSGSIZE + conn->vals->max_write_size; 406 else 407 max_allowed_pdu_size = SMB3_MAX_MSGSIZE; 408 409 if (pdu_size > max_allowed_pdu_size) { 410 pr_err_ratelimited("PDU length(%u) exceeded maximum allowed pdu size(%u) on connection(%d)\n", 411 pdu_size, max_allowed_pdu_size, 412 READ_ONCE(conn->status)); 413 break; 414 } 415 416 /* 417 * Check maximum pdu size(0x00FFFFFF). 418 */ 419 if (pdu_size > MAX_STREAM_PROT_LEN) 420 break; 421 422 if (pdu_size < SMB1_MIN_SUPPORTED_PDU_SIZE) 423 break; 424 425 /* 4 for rfc1002 length field */ 426 /* 1 for implied bcc[0] */ 427 size = pdu_size + 4 + 1; 428 conn->request_buf = kvmalloc(size, KSMBD_DEFAULT_GFP); 429 if (!conn->request_buf) 430 break; 431 432 memcpy(conn->request_buf, hdr_buf, sizeof(hdr_buf)); 433 434 /* 435 * We already read 4 bytes to find out PDU size, now 436 * read in PDU 437 */ 438 size = t->ops->read(t, conn->request_buf + 4, pdu_size, 2); 439 if (size < 0) { 440 pr_err("sock_read failed: %d\n", size); 441 break; 442 } 443 444 if (size != pdu_size) { 445 pr_err("PDU error. Read: %d, Expected: %d\n", 446 size, pdu_size); 447 continue; 448 } 449 450 if (!ksmbd_smb_request(conn)) 451 break; 452 453 if (((struct smb2_hdr *)smb_get_msg(conn->request_buf))->ProtocolId == 454 SMB2_PROTO_NUMBER) { 455 if (pdu_size < SMB2_MIN_SUPPORTED_PDU_SIZE) 456 break; 457 } 458 459 if (!default_conn_ops.process_fn) { 460 pr_err("No connection request callback\n"); 461 break; 462 } 463 464 if (default_conn_ops.process_fn(conn)) { 465 pr_err("Cannot handle request\n"); 466 break; 467 } 468 } 469 470 ksmbd_conn_set_releasing(conn); 471 /* Wait till all reference dropped to the Server object*/ 472 ksmbd_debug(CONN, "Wait for all pending requests(%d)\n", atomic_read(&conn->r_count)); 473 wait_event(conn->r_count_q, atomic_read(&conn->r_count) == 0); 474 475 if (IS_ENABLED(CONFIG_UNICODE)) 476 utf8_unload(conn->um); 477 unload_nls(conn->local_nls); 478 if (default_conn_ops.terminate_fn) 479 default_conn_ops.terminate_fn(conn); 480 t->ops->disconnect(t); 481 module_put(THIS_MODULE); 482 return 0; 483 } 484 485 void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops) 486 { 487 default_conn_ops.process_fn = ops->process_fn; 488 default_conn_ops.terminate_fn = ops->terminate_fn; 489 } 490 491 void ksmbd_conn_r_count_inc(struct ksmbd_conn *conn) 492 { 493 atomic_inc(&conn->r_count); 494 } 495 496 void ksmbd_conn_r_count_dec(struct ksmbd_conn *conn) 497 { 498 /* 499 * Checking waitqueue to dropping pending requests on 500 * disconnection. waitqueue_active is safe because it 501 * uses atomic operation for condition. 502 */ 503 atomic_inc(&conn->refcnt); 504 if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q)) 505 wake_up(&conn->r_count_q); 506 507 if (atomic_dec_and_test(&conn->refcnt)) 508 kfree(conn); 509 } 510 511 int ksmbd_conn_transport_init(void) 512 { 513 int ret; 514 515 mutex_lock(&init_lock); 516 ret = ksmbd_tcp_init(); 517 if (ret) { 518 pr_err("Failed to init TCP subsystem: %d\n", ret); 519 goto out; 520 } 521 522 ret = ksmbd_rdma_init(); 523 if (ret) { 524 pr_err("Failed to init RDMA subsystem: %d\n", ret); 525 goto out; 526 } 527 out: 528 mutex_unlock(&init_lock); 529 create_proc_clients(); 530 return ret; 531 } 532 533 static void stop_sessions(void) 534 { 535 struct ksmbd_conn *conn; 536 struct ksmbd_transport *t; 537 int bkt; 538 539 again: 540 down_read(&conn_list_lock); 541 hash_for_each(conn_list, bkt, conn, hlist) { 542 t = conn->transport; 543 ksmbd_conn_set_exiting(conn); 544 if (t->ops->shutdown) { 545 up_read(&conn_list_lock); 546 t->ops->shutdown(t); 547 down_read(&conn_list_lock); 548 } 549 } 550 up_read(&conn_list_lock); 551 552 if (!hash_empty(conn_list)) { 553 msleep(100); 554 goto again; 555 } 556 } 557 558 void ksmbd_conn_transport_destroy(void) 559 { 560 delete_proc_clients(); 561 mutex_lock(&init_lock); 562 ksmbd_tcp_destroy(); 563 ksmbd_rdma_stop_listening(); 564 stop_sessions(); 565 mutex_unlock(&init_lock); 566 } 567