1 /* 2 * Network block device - make block devices work over TCP 3 * 4 * Note that you can not swap over this thing, yet. Seems to work but 5 * deadlocks sometimes - you can not swap over TCP in general. 6 * 7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz> 8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com> 9 * 10 * This file is released under GPLv2 or later. 11 * 12 * (part of code stolen from loop.c) 13 */ 14 15 #include <linux/major.h> 16 17 #include <linux/blkdev.h> 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/sched.h> 21 #include <linux/sched/mm.h> 22 #include <linux/fs.h> 23 #include <linux/bio.h> 24 #include <linux/stat.h> 25 #include <linux/errno.h> 26 #include <linux/file.h> 27 #include <linux/ioctl.h> 28 #include <linux/mutex.h> 29 #include <linux/compiler.h> 30 #include <linux/err.h> 31 #include <linux/kernel.h> 32 #include <linux/slab.h> 33 #include <net/sock.h> 34 #include <linux/net.h> 35 #include <linux/kthread.h> 36 #include <linux/types.h> 37 #include <linux/debugfs.h> 38 #include <linux/blk-mq.h> 39 40 #include <linux/uaccess.h> 41 #include <asm/types.h> 42 43 #include <linux/nbd.h> 44 #include <linux/nbd-netlink.h> 45 #include <net/genetlink.h> 46 47 static DEFINE_IDR(nbd_index_idr); 48 static DEFINE_MUTEX(nbd_index_mutex); 49 static int nbd_total_devices = 0; 50 51 struct nbd_sock { 52 struct socket *sock; 53 struct mutex tx_lock; 54 struct request *pending; 55 int sent; 56 bool dead; 57 int fallback_index; 58 int cookie; 59 }; 60 61 struct recv_thread_args { 62 struct work_struct work; 63 struct nbd_device *nbd; 64 int index; 65 }; 66 67 struct link_dead_args { 68 struct work_struct work; 69 int index; 70 }; 71 72 #define NBD_TIMEDOUT 0 73 #define NBD_DISCONNECT_REQUESTED 1 74 #define NBD_DISCONNECTED 2 75 #define NBD_HAS_PID_FILE 3 76 #define NBD_HAS_CONFIG_REF 4 77 #define NBD_BOUND 5 78 #define NBD_DESTROY_ON_DISCONNECT 6 79 80 struct nbd_config { 81 u32 flags; 82 unsigned long runtime_flags; 83 u64 dead_conn_timeout; 84 85 struct nbd_sock **socks; 86 int num_connections; 87 atomic_t live_connections; 88 wait_queue_head_t conn_wait; 89 90 atomic_t recv_threads; 91 wait_queue_head_t recv_wq; 92 loff_t blksize; 93 loff_t bytesize; 94 #if IS_ENABLED(CONFIG_DEBUG_FS) 95 struct dentry *dbg_dir; 96 #endif 97 }; 98 99 struct nbd_device { 100 struct blk_mq_tag_set tag_set; 101 102 int index; 103 refcount_t config_refs; 104 refcount_t refs; 105 struct nbd_config *config; 106 struct mutex config_lock; 107 struct gendisk *disk; 108 109 struct list_head list; 110 struct task_struct *task_recv; 111 struct task_struct *task_setup; 112 }; 113 114 struct nbd_cmd { 115 struct nbd_device *nbd; 116 int index; 117 int cookie; 118 struct completion send_complete; 119 blk_status_t status; 120 }; 121 122 #if IS_ENABLED(CONFIG_DEBUG_FS) 123 static struct dentry *nbd_dbg_dir; 124 #endif 125 126 #define nbd_name(nbd) ((nbd)->disk->disk_name) 127 128 #define NBD_MAGIC 0x68797548 129 130 static unsigned int nbds_max = 16; 131 static int max_part = 16; 132 static struct workqueue_struct *recv_workqueue; 133 static int part_shift; 134 135 static int nbd_dev_dbg_init(struct nbd_device *nbd); 136 static void nbd_dev_dbg_close(struct nbd_device *nbd); 137 static void nbd_config_put(struct nbd_device *nbd); 138 static void nbd_connect_reply(struct genl_info *info, int index); 139 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info); 140 static void nbd_dead_link_work(struct work_struct *work); 141 142 static inline struct device *nbd_to_dev(struct nbd_device *nbd) 143 { 144 return disk_to_dev(nbd->disk); 145 } 146 147 static const char *nbdcmd_to_ascii(int cmd) 148 { 149 switch (cmd) { 150 case NBD_CMD_READ: return "read"; 151 case NBD_CMD_WRITE: return "write"; 152 case NBD_CMD_DISC: return "disconnect"; 153 case NBD_CMD_FLUSH: return "flush"; 154 case NBD_CMD_TRIM: return "trim/discard"; 155 } 156 return "invalid"; 157 } 158 159 static ssize_t pid_show(struct device *dev, 160 struct device_attribute *attr, char *buf) 161 { 162 struct gendisk *disk = dev_to_disk(dev); 163 struct nbd_device *nbd = (struct nbd_device *)disk->private_data; 164 165 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv)); 166 } 167 168 static const struct device_attribute pid_attr = { 169 .attr = { .name = "pid", .mode = S_IRUGO}, 170 .show = pid_show, 171 }; 172 173 static void nbd_dev_remove(struct nbd_device *nbd) 174 { 175 struct gendisk *disk = nbd->disk; 176 if (disk) { 177 del_gendisk(disk); 178 blk_cleanup_queue(disk->queue); 179 blk_mq_free_tag_set(&nbd->tag_set); 180 disk->private_data = NULL; 181 put_disk(disk); 182 } 183 kfree(nbd); 184 } 185 186 static void nbd_put(struct nbd_device *nbd) 187 { 188 if (refcount_dec_and_mutex_lock(&nbd->refs, 189 &nbd_index_mutex)) { 190 idr_remove(&nbd_index_idr, nbd->index); 191 mutex_unlock(&nbd_index_mutex); 192 nbd_dev_remove(nbd); 193 } 194 } 195 196 static int nbd_disconnected(struct nbd_config *config) 197 { 198 return test_bit(NBD_DISCONNECTED, &config->runtime_flags) || 199 test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags); 200 } 201 202 static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock, 203 int notify) 204 { 205 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) { 206 struct link_dead_args *args; 207 args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO); 208 if (args) { 209 INIT_WORK(&args->work, nbd_dead_link_work); 210 args->index = nbd->index; 211 queue_work(system_wq, &args->work); 212 } 213 } 214 if (!nsock->dead) { 215 kernel_sock_shutdown(nsock->sock, SHUT_RDWR); 216 atomic_dec(&nbd->config->live_connections); 217 } 218 nsock->dead = true; 219 nsock->pending = NULL; 220 nsock->sent = 0; 221 } 222 223 static void nbd_size_clear(struct nbd_device *nbd) 224 { 225 if (nbd->config->bytesize) { 226 set_capacity(nbd->disk, 0); 227 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); 228 } 229 } 230 231 static void nbd_size_update(struct nbd_device *nbd) 232 { 233 struct nbd_config *config = nbd->config; 234 blk_queue_logical_block_size(nbd->disk->queue, config->blksize); 235 blk_queue_physical_block_size(nbd->disk->queue, config->blksize); 236 set_capacity(nbd->disk, config->bytesize >> 9); 237 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); 238 } 239 240 static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize, 241 loff_t nr_blocks) 242 { 243 struct nbd_config *config = nbd->config; 244 config->blksize = blocksize; 245 config->bytesize = blocksize * nr_blocks; 246 } 247 248 static void nbd_complete_rq(struct request *req) 249 { 250 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); 251 252 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", cmd, 253 cmd->status ? "failed" : "done"); 254 255 blk_mq_end_request(req, cmd->status); 256 } 257 258 /* 259 * Forcibly shutdown the socket causing all listeners to error 260 */ 261 static void sock_shutdown(struct nbd_device *nbd) 262 { 263 struct nbd_config *config = nbd->config; 264 int i; 265 266 if (config->num_connections == 0) 267 return; 268 if (test_and_set_bit(NBD_DISCONNECTED, &config->runtime_flags)) 269 return; 270 271 for (i = 0; i < config->num_connections; i++) { 272 struct nbd_sock *nsock = config->socks[i]; 273 mutex_lock(&nsock->tx_lock); 274 nbd_mark_nsock_dead(nbd, nsock, 0); 275 mutex_unlock(&nsock->tx_lock); 276 } 277 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n"); 278 } 279 280 static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, 281 bool reserved) 282 { 283 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); 284 struct nbd_device *nbd = cmd->nbd; 285 struct nbd_config *config; 286 287 if (!refcount_inc_not_zero(&nbd->config_refs)) { 288 cmd->status = BLK_STS_TIMEOUT; 289 return BLK_EH_HANDLED; 290 } 291 292 /* If we are waiting on our dead timer then we could get timeout 293 * callbacks for our request. For this we just want to reset the timer 294 * and let the queue side take care of everything. 295 */ 296 if (!completion_done(&cmd->send_complete)) { 297 nbd_config_put(nbd); 298 return BLK_EH_RESET_TIMER; 299 } 300 config = nbd->config; 301 302 if (config->num_connections > 1) { 303 dev_err_ratelimited(nbd_to_dev(nbd), 304 "Connection timed out, retrying\n"); 305 /* 306 * Hooray we have more connections, requeue this IO, the submit 307 * path will put it on a real connection. 308 */ 309 if (config->socks && config->num_connections > 1) { 310 if (cmd->index < config->num_connections) { 311 struct nbd_sock *nsock = 312 config->socks[cmd->index]; 313 mutex_lock(&nsock->tx_lock); 314 /* We can have multiple outstanding requests, so 315 * we don't want to mark the nsock dead if we've 316 * already reconnected with a new socket, so 317 * only mark it dead if its the same socket we 318 * were sent out on. 319 */ 320 if (cmd->cookie == nsock->cookie) 321 nbd_mark_nsock_dead(nbd, nsock, 1); 322 mutex_unlock(&nsock->tx_lock); 323 } 324 blk_mq_requeue_request(req, true); 325 nbd_config_put(nbd); 326 return BLK_EH_NOT_HANDLED; 327 } 328 } else { 329 dev_err_ratelimited(nbd_to_dev(nbd), 330 "Connection timed out\n"); 331 } 332 set_bit(NBD_TIMEDOUT, &config->runtime_flags); 333 cmd->status = BLK_STS_IOERR; 334 sock_shutdown(nbd); 335 nbd_config_put(nbd); 336 337 return BLK_EH_HANDLED; 338 } 339 340 /* 341 * Send or receive packet. 342 */ 343 static int sock_xmit(struct nbd_device *nbd, int index, int send, 344 struct iov_iter *iter, int msg_flags, int *sent) 345 { 346 struct nbd_config *config = nbd->config; 347 struct socket *sock = config->socks[index]->sock; 348 int result; 349 struct msghdr msg; 350 unsigned int noreclaim_flag; 351 352 if (unlikely(!sock)) { 353 dev_err_ratelimited(disk_to_dev(nbd->disk), 354 "Attempted %s on closed socket in sock_xmit\n", 355 (send ? "send" : "recv")); 356 return -EINVAL; 357 } 358 359 msg.msg_iter = *iter; 360 361 noreclaim_flag = memalloc_noreclaim_save(); 362 do { 363 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC; 364 msg.msg_name = NULL; 365 msg.msg_namelen = 0; 366 msg.msg_control = NULL; 367 msg.msg_controllen = 0; 368 msg.msg_flags = msg_flags | MSG_NOSIGNAL; 369 370 if (send) 371 result = sock_sendmsg(sock, &msg); 372 else 373 result = sock_recvmsg(sock, &msg, msg.msg_flags); 374 375 if (result <= 0) { 376 if (result == 0) 377 result = -EPIPE; /* short read */ 378 break; 379 } 380 if (sent) 381 *sent += result; 382 } while (msg_data_left(&msg)); 383 384 memalloc_noreclaim_restore(noreclaim_flag); 385 386 return result; 387 } 388 389 /* always call with the tx_lock held */ 390 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) 391 { 392 struct request *req = blk_mq_rq_from_pdu(cmd); 393 struct nbd_config *config = nbd->config; 394 struct nbd_sock *nsock = config->socks[index]; 395 int result; 396 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)}; 397 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; 398 struct iov_iter from; 399 unsigned long size = blk_rq_bytes(req); 400 struct bio *bio; 401 u32 type; 402 u32 nbd_cmd_flags = 0; 403 u32 tag = blk_mq_unique_tag(req); 404 int sent = nsock->sent, skip = 0; 405 406 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); 407 408 switch (req_op(req)) { 409 case REQ_OP_DISCARD: 410 type = NBD_CMD_TRIM; 411 break; 412 case REQ_OP_FLUSH: 413 type = NBD_CMD_FLUSH; 414 break; 415 case REQ_OP_WRITE: 416 type = NBD_CMD_WRITE; 417 break; 418 case REQ_OP_READ: 419 type = NBD_CMD_READ; 420 break; 421 default: 422 return -EIO; 423 } 424 425 if (rq_data_dir(req) == WRITE && 426 (config->flags & NBD_FLAG_READ_ONLY)) { 427 dev_err_ratelimited(disk_to_dev(nbd->disk), 428 "Write on read-only\n"); 429 return -EIO; 430 } 431 432 if (req->cmd_flags & REQ_FUA) 433 nbd_cmd_flags |= NBD_CMD_FLAG_FUA; 434 435 /* We did a partial send previously, and we at least sent the whole 436 * request struct, so just go and send the rest of the pages in the 437 * request. 438 */ 439 if (sent) { 440 if (sent >= sizeof(request)) { 441 skip = sent - sizeof(request); 442 goto send_pages; 443 } 444 iov_iter_advance(&from, sent); 445 } 446 cmd->index = index; 447 cmd->cookie = nsock->cookie; 448 request.type = htonl(type | nbd_cmd_flags); 449 if (type != NBD_CMD_FLUSH) { 450 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); 451 request.len = htonl(size); 452 } 453 memcpy(request.handle, &tag, sizeof(tag)); 454 455 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", 456 cmd, nbdcmd_to_ascii(type), 457 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); 458 result = sock_xmit(nbd, index, 1, &from, 459 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent); 460 if (result <= 0) { 461 if (result == -ERESTARTSYS) { 462 /* If we havne't sent anything we can just return BUSY, 463 * however if we have sent something we need to make 464 * sure we only allow this req to be sent until we are 465 * completely done. 466 */ 467 if (sent) { 468 nsock->pending = req; 469 nsock->sent = sent; 470 } 471 return BLK_STS_RESOURCE; 472 } 473 dev_err_ratelimited(disk_to_dev(nbd->disk), 474 "Send control failed (result %d)\n", result); 475 return -EAGAIN; 476 } 477 send_pages: 478 if (type != NBD_CMD_WRITE) 479 goto out; 480 481 bio = req->bio; 482 while (bio) { 483 struct bio *next = bio->bi_next; 484 struct bvec_iter iter; 485 struct bio_vec bvec; 486 487 bio_for_each_segment(bvec, bio, iter) { 488 bool is_last = !next && bio_iter_last(bvec, iter); 489 int flags = is_last ? 0 : MSG_MORE; 490 491 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", 492 cmd, bvec.bv_len); 493 iov_iter_bvec(&from, ITER_BVEC | WRITE, 494 &bvec, 1, bvec.bv_len); 495 if (skip) { 496 if (skip >= iov_iter_count(&from)) { 497 skip -= iov_iter_count(&from); 498 continue; 499 } 500 iov_iter_advance(&from, skip); 501 skip = 0; 502 } 503 result = sock_xmit(nbd, index, 1, &from, flags, &sent); 504 if (result <= 0) { 505 if (result == -ERESTARTSYS) { 506 /* We've already sent the header, we 507 * have no choice but to set pending and 508 * return BUSY. 509 */ 510 nsock->pending = req; 511 nsock->sent = sent; 512 return BLK_STS_RESOURCE; 513 } 514 dev_err(disk_to_dev(nbd->disk), 515 "Send data failed (result %d)\n", 516 result); 517 return -EAGAIN; 518 } 519 /* 520 * The completion might already have come in, 521 * so break for the last one instead of letting 522 * the iterator do it. This prevents use-after-free 523 * of the bio. 524 */ 525 if (is_last) 526 break; 527 } 528 bio = next; 529 } 530 out: 531 nsock->pending = NULL; 532 nsock->sent = 0; 533 return 0; 534 } 535 536 /* NULL returned = something went wrong, inform userspace */ 537 static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) 538 { 539 struct nbd_config *config = nbd->config; 540 int result; 541 struct nbd_reply reply; 542 struct nbd_cmd *cmd; 543 struct request *req = NULL; 544 u16 hwq; 545 u32 tag; 546 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)}; 547 struct iov_iter to; 548 549 reply.magic = 0; 550 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply)); 551 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); 552 if (result <= 0) { 553 if (!nbd_disconnected(config)) 554 dev_err(disk_to_dev(nbd->disk), 555 "Receive control failed (result %d)\n", result); 556 return ERR_PTR(result); 557 } 558 559 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) { 560 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n", 561 (unsigned long)ntohl(reply.magic)); 562 return ERR_PTR(-EPROTO); 563 } 564 565 memcpy(&tag, reply.handle, sizeof(u32)); 566 567 hwq = blk_mq_unique_tag_to_hwq(tag); 568 if (hwq < nbd->tag_set.nr_hw_queues) 569 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], 570 blk_mq_unique_tag_to_tag(tag)); 571 if (!req || !blk_mq_request_started(req)) { 572 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n", 573 tag, req); 574 return ERR_PTR(-ENOENT); 575 } 576 cmd = blk_mq_rq_to_pdu(req); 577 if (ntohl(reply.error)) { 578 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", 579 ntohl(reply.error)); 580 cmd->status = BLK_STS_IOERR; 581 return cmd; 582 } 583 584 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd); 585 if (rq_data_dir(req) != WRITE) { 586 struct req_iterator iter; 587 struct bio_vec bvec; 588 589 rq_for_each_segment(bvec, req, iter) { 590 iov_iter_bvec(&to, ITER_BVEC | READ, 591 &bvec, 1, bvec.bv_len); 592 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); 593 if (result <= 0) { 594 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", 595 result); 596 /* 597 * If we've disconnected or we only have 1 598 * connection then we need to make sure we 599 * complete this request, otherwise error out 600 * and let the timeout stuff handle resubmitting 601 * this request onto another connection. 602 */ 603 if (nbd_disconnected(config) || 604 config->num_connections <= 1) { 605 cmd->status = BLK_STS_IOERR; 606 return cmd; 607 } 608 return ERR_PTR(-EIO); 609 } 610 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", 611 cmd, bvec.bv_len); 612 } 613 } else { 614 /* See the comment in nbd_queue_rq. */ 615 wait_for_completion(&cmd->send_complete); 616 } 617 return cmd; 618 } 619 620 static void recv_work(struct work_struct *work) 621 { 622 struct recv_thread_args *args = container_of(work, 623 struct recv_thread_args, 624 work); 625 struct nbd_device *nbd = args->nbd; 626 struct nbd_config *config = nbd->config; 627 struct nbd_cmd *cmd; 628 629 while (1) { 630 cmd = nbd_read_stat(nbd, args->index); 631 if (IS_ERR(cmd)) { 632 struct nbd_sock *nsock = config->socks[args->index]; 633 634 mutex_lock(&nsock->tx_lock); 635 nbd_mark_nsock_dead(nbd, nsock, 1); 636 mutex_unlock(&nsock->tx_lock); 637 break; 638 } 639 640 blk_mq_complete_request(blk_mq_rq_from_pdu(cmd)); 641 } 642 atomic_dec(&config->recv_threads); 643 wake_up(&config->recv_wq); 644 nbd_config_put(nbd); 645 kfree(args); 646 } 647 648 static void nbd_clear_req(struct request *req, void *data, bool reserved) 649 { 650 struct nbd_cmd *cmd; 651 652 if (!blk_mq_request_started(req)) 653 return; 654 cmd = blk_mq_rq_to_pdu(req); 655 cmd->status = BLK_STS_IOERR; 656 blk_mq_complete_request(req); 657 } 658 659 static void nbd_clear_que(struct nbd_device *nbd) 660 { 661 blk_mq_quiesce_queue(nbd->disk->queue); 662 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL); 663 blk_mq_unquiesce_queue(nbd->disk->queue); 664 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n"); 665 } 666 667 static int find_fallback(struct nbd_device *nbd, int index) 668 { 669 struct nbd_config *config = nbd->config; 670 int new_index = -1; 671 struct nbd_sock *nsock = config->socks[index]; 672 int fallback = nsock->fallback_index; 673 674 if (test_bit(NBD_DISCONNECTED, &config->runtime_flags)) 675 return new_index; 676 677 if (config->num_connections <= 1) { 678 dev_err_ratelimited(disk_to_dev(nbd->disk), 679 "Attempted send on invalid socket\n"); 680 return new_index; 681 } 682 683 if (fallback >= 0 && fallback < config->num_connections && 684 !config->socks[fallback]->dead) 685 return fallback; 686 687 if (nsock->fallback_index < 0 || 688 nsock->fallback_index >= config->num_connections || 689 config->socks[nsock->fallback_index]->dead) { 690 int i; 691 for (i = 0; i < config->num_connections; i++) { 692 if (i == index) 693 continue; 694 if (!config->socks[i]->dead) { 695 new_index = i; 696 break; 697 } 698 } 699 nsock->fallback_index = new_index; 700 if (new_index < 0) { 701 dev_err_ratelimited(disk_to_dev(nbd->disk), 702 "Dead connection, failed to find a fallback\n"); 703 return new_index; 704 } 705 } 706 new_index = nsock->fallback_index; 707 return new_index; 708 } 709 710 static int wait_for_reconnect(struct nbd_device *nbd) 711 { 712 struct nbd_config *config = nbd->config; 713 if (!config->dead_conn_timeout) 714 return 0; 715 if (test_bit(NBD_DISCONNECTED, &config->runtime_flags)) 716 return 0; 717 wait_event_interruptible_timeout(config->conn_wait, 718 atomic_read(&config->live_connections), 719 config->dead_conn_timeout); 720 return atomic_read(&config->live_connections); 721 } 722 723 static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) 724 { 725 struct request *req = blk_mq_rq_from_pdu(cmd); 726 struct nbd_device *nbd = cmd->nbd; 727 struct nbd_config *config; 728 struct nbd_sock *nsock; 729 int ret; 730 731 if (!refcount_inc_not_zero(&nbd->config_refs)) { 732 dev_err_ratelimited(disk_to_dev(nbd->disk), 733 "Socks array is empty\n"); 734 return -EINVAL; 735 } 736 config = nbd->config; 737 738 if (index >= config->num_connections) { 739 dev_err_ratelimited(disk_to_dev(nbd->disk), 740 "Attempted send on invalid socket\n"); 741 nbd_config_put(nbd); 742 return -EINVAL; 743 } 744 cmd->status = BLK_STS_OK; 745 again: 746 nsock = config->socks[index]; 747 mutex_lock(&nsock->tx_lock); 748 if (nsock->dead) { 749 int old_index = index; 750 index = find_fallback(nbd, index); 751 mutex_unlock(&nsock->tx_lock); 752 if (index < 0) { 753 if (wait_for_reconnect(nbd)) { 754 index = old_index; 755 goto again; 756 } 757 /* All the sockets should already be down at this point, 758 * we just want to make sure that DISCONNECTED is set so 759 * any requests that come in that were queue'ed waiting 760 * for the reconnect timer don't trigger the timer again 761 * and instead just error out. 762 */ 763 sock_shutdown(nbd); 764 nbd_config_put(nbd); 765 return -EIO; 766 } 767 goto again; 768 } 769 770 /* Handle the case that we have a pending request that was partially 771 * transmitted that _has_ to be serviced first. We need to call requeue 772 * here so that it gets put _after_ the request that is already on the 773 * dispatch list. 774 */ 775 if (unlikely(nsock->pending && nsock->pending != req)) { 776 blk_mq_requeue_request(req, true); 777 ret = 0; 778 goto out; 779 } 780 /* 781 * Some failures are related to the link going down, so anything that 782 * returns EAGAIN can be retried on a different socket. 783 */ 784 ret = nbd_send_cmd(nbd, cmd, index); 785 if (ret == -EAGAIN) { 786 dev_err_ratelimited(disk_to_dev(nbd->disk), 787 "Request send failed trying another connection\n"); 788 nbd_mark_nsock_dead(nbd, nsock, 1); 789 mutex_unlock(&nsock->tx_lock); 790 goto again; 791 } 792 out: 793 mutex_unlock(&nsock->tx_lock); 794 nbd_config_put(nbd); 795 return ret; 796 } 797 798 static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, 799 const struct blk_mq_queue_data *bd) 800 { 801 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 802 int ret; 803 804 /* 805 * Since we look at the bio's to send the request over the network we 806 * need to make sure the completion work doesn't mark this request done 807 * before we are done doing our send. This keeps us from dereferencing 808 * freed data if we have particularly fast completions (ie we get the 809 * completion before we exit sock_xmit on the last bvec) or in the case 810 * that the server is misbehaving (or there was an error) before we're 811 * done sending everything over the wire. 812 */ 813 init_completion(&cmd->send_complete); 814 blk_mq_start_request(bd->rq); 815 816 /* We can be called directly from the user space process, which means we 817 * could possibly have signals pending so our sendmsg will fail. In 818 * this case we need to return that we are busy, otherwise error out as 819 * appropriate. 820 */ 821 ret = nbd_handle_cmd(cmd, hctx->queue_num); 822 if (ret < 0) 823 ret = BLK_STS_IOERR; 824 else if (!ret) 825 ret = BLK_STS_OK; 826 complete(&cmd->send_complete); 827 828 return ret; 829 } 830 831 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, 832 bool netlink) 833 { 834 struct nbd_config *config = nbd->config; 835 struct socket *sock; 836 struct nbd_sock **socks; 837 struct nbd_sock *nsock; 838 int err; 839 840 sock = sockfd_lookup(arg, &err); 841 if (!sock) 842 return err; 843 844 if (!netlink && !nbd->task_setup && 845 !test_bit(NBD_BOUND, &config->runtime_flags)) 846 nbd->task_setup = current; 847 848 if (!netlink && 849 (nbd->task_setup != current || 850 test_bit(NBD_BOUND, &config->runtime_flags))) { 851 dev_err(disk_to_dev(nbd->disk), 852 "Device being setup by another task"); 853 sockfd_put(sock); 854 return -EBUSY; 855 } 856 857 socks = krealloc(config->socks, (config->num_connections + 1) * 858 sizeof(struct nbd_sock *), GFP_KERNEL); 859 if (!socks) { 860 sockfd_put(sock); 861 return -ENOMEM; 862 } 863 nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL); 864 if (!nsock) { 865 sockfd_put(sock); 866 return -ENOMEM; 867 } 868 869 config->socks = socks; 870 871 nsock->fallback_index = -1; 872 nsock->dead = false; 873 mutex_init(&nsock->tx_lock); 874 nsock->sock = sock; 875 nsock->pending = NULL; 876 nsock->sent = 0; 877 nsock->cookie = 0; 878 socks[config->num_connections++] = nsock; 879 atomic_inc(&config->live_connections); 880 881 return 0; 882 } 883 884 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) 885 { 886 struct nbd_config *config = nbd->config; 887 struct socket *sock, *old; 888 struct recv_thread_args *args; 889 int i; 890 int err; 891 892 sock = sockfd_lookup(arg, &err); 893 if (!sock) 894 return err; 895 896 args = kzalloc(sizeof(*args), GFP_KERNEL); 897 if (!args) { 898 sockfd_put(sock); 899 return -ENOMEM; 900 } 901 902 for (i = 0; i < config->num_connections; i++) { 903 struct nbd_sock *nsock = config->socks[i]; 904 905 if (!nsock->dead) 906 continue; 907 908 mutex_lock(&nsock->tx_lock); 909 if (!nsock->dead) { 910 mutex_unlock(&nsock->tx_lock); 911 continue; 912 } 913 sk_set_memalloc(sock->sk); 914 if (nbd->tag_set.timeout) 915 sock->sk->sk_sndtimeo = nbd->tag_set.timeout; 916 atomic_inc(&config->recv_threads); 917 refcount_inc(&nbd->config_refs); 918 old = nsock->sock; 919 nsock->fallback_index = -1; 920 nsock->sock = sock; 921 nsock->dead = false; 922 INIT_WORK(&args->work, recv_work); 923 args->index = i; 924 args->nbd = nbd; 925 nsock->cookie++; 926 mutex_unlock(&nsock->tx_lock); 927 sockfd_put(old); 928 929 clear_bit(NBD_DISCONNECTED, &config->runtime_flags); 930 931 /* We take the tx_mutex in an error path in the recv_work, so we 932 * need to queue_work outside of the tx_mutex. 933 */ 934 queue_work(recv_workqueue, &args->work); 935 936 atomic_inc(&config->live_connections); 937 wake_up(&config->conn_wait); 938 return 0; 939 } 940 sockfd_put(sock); 941 kfree(args); 942 return -ENOSPC; 943 } 944 945 static void nbd_bdev_reset(struct block_device *bdev) 946 { 947 if (bdev->bd_openers > 1) 948 return; 949 bd_set_size(bdev, 0); 950 if (max_part > 0) { 951 blkdev_reread_part(bdev); 952 bdev->bd_invalidated = 1; 953 } 954 } 955 956 static void nbd_parse_flags(struct nbd_device *nbd) 957 { 958 struct nbd_config *config = nbd->config; 959 if (config->flags & NBD_FLAG_READ_ONLY) 960 set_disk_ro(nbd->disk, true); 961 else 962 set_disk_ro(nbd->disk, false); 963 if (config->flags & NBD_FLAG_SEND_TRIM) 964 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); 965 if (config->flags & NBD_FLAG_SEND_FLUSH) { 966 if (config->flags & NBD_FLAG_SEND_FUA) 967 blk_queue_write_cache(nbd->disk->queue, true, true); 968 else 969 blk_queue_write_cache(nbd->disk->queue, true, false); 970 } 971 else 972 blk_queue_write_cache(nbd->disk->queue, false, false); 973 } 974 975 static void send_disconnects(struct nbd_device *nbd) 976 { 977 struct nbd_config *config = nbd->config; 978 struct nbd_request request = { 979 .magic = htonl(NBD_REQUEST_MAGIC), 980 .type = htonl(NBD_CMD_DISC), 981 }; 982 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; 983 struct iov_iter from; 984 int i, ret; 985 986 for (i = 0; i < config->num_connections; i++) { 987 struct nbd_sock *nsock = config->socks[i]; 988 989 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); 990 mutex_lock(&nsock->tx_lock); 991 ret = sock_xmit(nbd, i, 1, &from, 0, NULL); 992 if (ret <= 0) 993 dev_err(disk_to_dev(nbd->disk), 994 "Send disconnect failed %d\n", ret); 995 mutex_unlock(&nsock->tx_lock); 996 } 997 } 998 999 static int nbd_disconnect(struct nbd_device *nbd) 1000 { 1001 struct nbd_config *config = nbd->config; 1002 1003 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); 1004 set_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags); 1005 send_disconnects(nbd); 1006 return 0; 1007 } 1008 1009 static void nbd_clear_sock(struct nbd_device *nbd) 1010 { 1011 sock_shutdown(nbd); 1012 nbd_clear_que(nbd); 1013 nbd->task_setup = NULL; 1014 } 1015 1016 static void nbd_config_put(struct nbd_device *nbd) 1017 { 1018 if (refcount_dec_and_mutex_lock(&nbd->config_refs, 1019 &nbd->config_lock)) { 1020 struct nbd_config *config = nbd->config; 1021 nbd_dev_dbg_close(nbd); 1022 nbd_size_clear(nbd); 1023 if (test_and_clear_bit(NBD_HAS_PID_FILE, 1024 &config->runtime_flags)) 1025 device_remove_file(disk_to_dev(nbd->disk), &pid_attr); 1026 nbd->task_recv = NULL; 1027 nbd_clear_sock(nbd); 1028 if (config->num_connections) { 1029 int i; 1030 for (i = 0; i < config->num_connections; i++) { 1031 sockfd_put(config->socks[i]->sock); 1032 kfree(config->socks[i]); 1033 } 1034 kfree(config->socks); 1035 } 1036 kfree(nbd->config); 1037 nbd->config = NULL; 1038 1039 nbd->tag_set.timeout = 0; 1040 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); 1041 1042 mutex_unlock(&nbd->config_lock); 1043 nbd_put(nbd); 1044 module_put(THIS_MODULE); 1045 } 1046 } 1047 1048 static int nbd_start_device(struct nbd_device *nbd) 1049 { 1050 struct nbd_config *config = nbd->config; 1051 int num_connections = config->num_connections; 1052 int error = 0, i; 1053 1054 if (nbd->task_recv) 1055 return -EBUSY; 1056 if (!config->socks) 1057 return -EINVAL; 1058 if (num_connections > 1 && 1059 !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) { 1060 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n"); 1061 return -EINVAL; 1062 } 1063 1064 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections); 1065 nbd->task_recv = current; 1066 1067 nbd_parse_flags(nbd); 1068 1069 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr); 1070 if (error) { 1071 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); 1072 return error; 1073 } 1074 set_bit(NBD_HAS_PID_FILE, &config->runtime_flags); 1075 1076 nbd_dev_dbg_init(nbd); 1077 for (i = 0; i < num_connections; i++) { 1078 struct recv_thread_args *args; 1079 1080 args = kzalloc(sizeof(*args), GFP_KERNEL); 1081 if (!args) { 1082 sock_shutdown(nbd); 1083 return -ENOMEM; 1084 } 1085 sk_set_memalloc(config->socks[i]->sock->sk); 1086 if (nbd->tag_set.timeout) 1087 config->socks[i]->sock->sk->sk_sndtimeo = 1088 nbd->tag_set.timeout; 1089 atomic_inc(&config->recv_threads); 1090 refcount_inc(&nbd->config_refs); 1091 INIT_WORK(&args->work, recv_work); 1092 args->nbd = nbd; 1093 args->index = i; 1094 queue_work(recv_workqueue, &args->work); 1095 } 1096 nbd_size_update(nbd); 1097 return error; 1098 } 1099 1100 static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev) 1101 { 1102 struct nbd_config *config = nbd->config; 1103 int ret; 1104 1105 ret = nbd_start_device(nbd); 1106 if (ret) 1107 return ret; 1108 1109 bd_set_size(bdev, config->bytesize); 1110 if (max_part) 1111 bdev->bd_invalidated = 1; 1112 mutex_unlock(&nbd->config_lock); 1113 ret = wait_event_interruptible(config->recv_wq, 1114 atomic_read(&config->recv_threads) == 0); 1115 if (ret) 1116 sock_shutdown(nbd); 1117 mutex_lock(&nbd->config_lock); 1118 bd_set_size(bdev, 0); 1119 /* user requested, ignore socket errors */ 1120 if (test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags)) 1121 ret = 0; 1122 if (test_bit(NBD_TIMEDOUT, &config->runtime_flags)) 1123 ret = -ETIMEDOUT; 1124 return ret; 1125 } 1126 1127 static void nbd_clear_sock_ioctl(struct nbd_device *nbd, 1128 struct block_device *bdev) 1129 { 1130 sock_shutdown(nbd); 1131 kill_bdev(bdev); 1132 nbd_bdev_reset(bdev); 1133 if (test_and_clear_bit(NBD_HAS_CONFIG_REF, 1134 &nbd->config->runtime_flags)) 1135 nbd_config_put(nbd); 1136 } 1137 1138 /* Must be called with config_lock held */ 1139 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, 1140 unsigned int cmd, unsigned long arg) 1141 { 1142 struct nbd_config *config = nbd->config; 1143 1144 switch (cmd) { 1145 case NBD_DISCONNECT: 1146 return nbd_disconnect(nbd); 1147 case NBD_CLEAR_SOCK: 1148 nbd_clear_sock_ioctl(nbd, bdev); 1149 return 0; 1150 case NBD_SET_SOCK: 1151 return nbd_add_socket(nbd, arg, false); 1152 case NBD_SET_BLKSIZE: 1153 nbd_size_set(nbd, arg, 1154 div_s64(config->bytesize, arg)); 1155 return 0; 1156 case NBD_SET_SIZE: 1157 nbd_size_set(nbd, config->blksize, 1158 div_s64(arg, config->blksize)); 1159 return 0; 1160 case NBD_SET_SIZE_BLOCKS: 1161 nbd_size_set(nbd, config->blksize, arg); 1162 return 0; 1163 case NBD_SET_TIMEOUT: 1164 if (arg) { 1165 nbd->tag_set.timeout = arg * HZ; 1166 blk_queue_rq_timeout(nbd->disk->queue, arg * HZ); 1167 } 1168 return 0; 1169 1170 case NBD_SET_FLAGS: 1171 config->flags = arg; 1172 return 0; 1173 case NBD_DO_IT: 1174 return nbd_start_device_ioctl(nbd, bdev); 1175 case NBD_CLEAR_QUE: 1176 /* 1177 * This is for compatibility only. The queue is always cleared 1178 * by NBD_DO_IT or NBD_CLEAR_SOCK. 1179 */ 1180 return 0; 1181 case NBD_PRINT_DEBUG: 1182 /* 1183 * For compatibility only, we no longer keep a list of 1184 * outstanding requests. 1185 */ 1186 return 0; 1187 } 1188 return -ENOTTY; 1189 } 1190 1191 static int nbd_ioctl(struct block_device *bdev, fmode_t mode, 1192 unsigned int cmd, unsigned long arg) 1193 { 1194 struct nbd_device *nbd = bdev->bd_disk->private_data; 1195 struct nbd_config *config = nbd->config; 1196 int error = -EINVAL; 1197 1198 if (!capable(CAP_SYS_ADMIN)) 1199 return -EPERM; 1200 1201 /* The block layer will pass back some non-nbd ioctls in case we have 1202 * special handling for them, but we don't so just return an error. 1203 */ 1204 if (_IOC_TYPE(cmd) != 0xab) 1205 return -EINVAL; 1206 1207 mutex_lock(&nbd->config_lock); 1208 1209 /* Don't allow ioctl operations on a nbd device that was created with 1210 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine. 1211 */ 1212 if (!test_bit(NBD_BOUND, &config->runtime_flags) || 1213 (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK)) 1214 error = __nbd_ioctl(bdev, nbd, cmd, arg); 1215 else 1216 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n"); 1217 mutex_unlock(&nbd->config_lock); 1218 return error; 1219 } 1220 1221 static struct nbd_config *nbd_alloc_config(void) 1222 { 1223 struct nbd_config *config; 1224 1225 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS); 1226 if (!config) 1227 return NULL; 1228 atomic_set(&config->recv_threads, 0); 1229 init_waitqueue_head(&config->recv_wq); 1230 init_waitqueue_head(&config->conn_wait); 1231 config->blksize = 1024; 1232 atomic_set(&config->live_connections, 0); 1233 try_module_get(THIS_MODULE); 1234 return config; 1235 } 1236 1237 static int nbd_open(struct block_device *bdev, fmode_t mode) 1238 { 1239 struct nbd_device *nbd; 1240 int ret = 0; 1241 1242 mutex_lock(&nbd_index_mutex); 1243 nbd = bdev->bd_disk->private_data; 1244 if (!nbd) { 1245 ret = -ENXIO; 1246 goto out; 1247 } 1248 if (!refcount_inc_not_zero(&nbd->refs)) { 1249 ret = -ENXIO; 1250 goto out; 1251 } 1252 if (!refcount_inc_not_zero(&nbd->config_refs)) { 1253 struct nbd_config *config; 1254 1255 mutex_lock(&nbd->config_lock); 1256 if (refcount_inc_not_zero(&nbd->config_refs)) { 1257 mutex_unlock(&nbd->config_lock); 1258 goto out; 1259 } 1260 config = nbd->config = nbd_alloc_config(); 1261 if (!config) { 1262 ret = -ENOMEM; 1263 mutex_unlock(&nbd->config_lock); 1264 goto out; 1265 } 1266 refcount_set(&nbd->config_refs, 1); 1267 refcount_inc(&nbd->refs); 1268 mutex_unlock(&nbd->config_lock); 1269 } 1270 out: 1271 mutex_unlock(&nbd_index_mutex); 1272 return ret; 1273 } 1274 1275 static void nbd_release(struct gendisk *disk, fmode_t mode) 1276 { 1277 struct nbd_device *nbd = disk->private_data; 1278 nbd_config_put(nbd); 1279 nbd_put(nbd); 1280 } 1281 1282 static const struct block_device_operations nbd_fops = 1283 { 1284 .owner = THIS_MODULE, 1285 .open = nbd_open, 1286 .release = nbd_release, 1287 .ioctl = nbd_ioctl, 1288 .compat_ioctl = nbd_ioctl, 1289 }; 1290 1291 #if IS_ENABLED(CONFIG_DEBUG_FS) 1292 1293 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused) 1294 { 1295 struct nbd_device *nbd = s->private; 1296 1297 if (nbd->task_recv) 1298 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv)); 1299 1300 return 0; 1301 } 1302 1303 static int nbd_dbg_tasks_open(struct inode *inode, struct file *file) 1304 { 1305 return single_open(file, nbd_dbg_tasks_show, inode->i_private); 1306 } 1307 1308 static const struct file_operations nbd_dbg_tasks_ops = { 1309 .open = nbd_dbg_tasks_open, 1310 .read = seq_read, 1311 .llseek = seq_lseek, 1312 .release = single_release, 1313 }; 1314 1315 static int nbd_dbg_flags_show(struct seq_file *s, void *unused) 1316 { 1317 struct nbd_device *nbd = s->private; 1318 u32 flags = nbd->config->flags; 1319 1320 seq_printf(s, "Hex: 0x%08x\n\n", flags); 1321 1322 seq_puts(s, "Known flags:\n"); 1323 1324 if (flags & NBD_FLAG_HAS_FLAGS) 1325 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n"); 1326 if (flags & NBD_FLAG_READ_ONLY) 1327 seq_puts(s, "NBD_FLAG_READ_ONLY\n"); 1328 if (flags & NBD_FLAG_SEND_FLUSH) 1329 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n"); 1330 if (flags & NBD_FLAG_SEND_FUA) 1331 seq_puts(s, "NBD_FLAG_SEND_FUA\n"); 1332 if (flags & NBD_FLAG_SEND_TRIM) 1333 seq_puts(s, "NBD_FLAG_SEND_TRIM\n"); 1334 1335 return 0; 1336 } 1337 1338 static int nbd_dbg_flags_open(struct inode *inode, struct file *file) 1339 { 1340 return single_open(file, nbd_dbg_flags_show, inode->i_private); 1341 } 1342 1343 static const struct file_operations nbd_dbg_flags_ops = { 1344 .open = nbd_dbg_flags_open, 1345 .read = seq_read, 1346 .llseek = seq_lseek, 1347 .release = single_release, 1348 }; 1349 1350 static int nbd_dev_dbg_init(struct nbd_device *nbd) 1351 { 1352 struct dentry *dir; 1353 struct nbd_config *config = nbd->config; 1354 1355 if (!nbd_dbg_dir) 1356 return -EIO; 1357 1358 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir); 1359 if (!dir) { 1360 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n", 1361 nbd_name(nbd)); 1362 return -EIO; 1363 } 1364 config->dbg_dir = dir; 1365 1366 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops); 1367 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize); 1368 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout); 1369 debugfs_create_u64("blocksize", 0444, dir, &config->blksize); 1370 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops); 1371 1372 return 0; 1373 } 1374 1375 static void nbd_dev_dbg_close(struct nbd_device *nbd) 1376 { 1377 debugfs_remove_recursive(nbd->config->dbg_dir); 1378 } 1379 1380 static int nbd_dbg_init(void) 1381 { 1382 struct dentry *dbg_dir; 1383 1384 dbg_dir = debugfs_create_dir("nbd", NULL); 1385 if (!dbg_dir) 1386 return -EIO; 1387 1388 nbd_dbg_dir = dbg_dir; 1389 1390 return 0; 1391 } 1392 1393 static void nbd_dbg_close(void) 1394 { 1395 debugfs_remove_recursive(nbd_dbg_dir); 1396 } 1397 1398 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */ 1399 1400 static int nbd_dev_dbg_init(struct nbd_device *nbd) 1401 { 1402 return 0; 1403 } 1404 1405 static void nbd_dev_dbg_close(struct nbd_device *nbd) 1406 { 1407 } 1408 1409 static int nbd_dbg_init(void) 1410 { 1411 return 0; 1412 } 1413 1414 static void nbd_dbg_close(void) 1415 { 1416 } 1417 1418 #endif 1419 1420 static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq, 1421 unsigned int hctx_idx, unsigned int numa_node) 1422 { 1423 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq); 1424 cmd->nbd = set->driver_data; 1425 return 0; 1426 } 1427 1428 static const struct blk_mq_ops nbd_mq_ops = { 1429 .queue_rq = nbd_queue_rq, 1430 .complete = nbd_complete_rq, 1431 .init_request = nbd_init_request, 1432 .timeout = nbd_xmit_timeout, 1433 }; 1434 1435 static int nbd_dev_add(int index) 1436 { 1437 struct nbd_device *nbd; 1438 struct gendisk *disk; 1439 struct request_queue *q; 1440 int err = -ENOMEM; 1441 1442 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL); 1443 if (!nbd) 1444 goto out; 1445 1446 disk = alloc_disk(1 << part_shift); 1447 if (!disk) 1448 goto out_free_nbd; 1449 1450 if (index >= 0) { 1451 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1, 1452 GFP_KERNEL); 1453 if (err == -ENOSPC) 1454 err = -EEXIST; 1455 } else { 1456 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL); 1457 if (err >= 0) 1458 index = err; 1459 } 1460 if (err < 0) 1461 goto out_free_disk; 1462 1463 nbd->index = index; 1464 nbd->disk = disk; 1465 nbd->tag_set.ops = &nbd_mq_ops; 1466 nbd->tag_set.nr_hw_queues = 1; 1467 nbd->tag_set.queue_depth = 128; 1468 nbd->tag_set.numa_node = NUMA_NO_NODE; 1469 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd); 1470 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | 1471 BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING; 1472 nbd->tag_set.driver_data = nbd; 1473 1474 err = blk_mq_alloc_tag_set(&nbd->tag_set); 1475 if (err) 1476 goto out_free_idr; 1477 1478 q = blk_mq_init_queue(&nbd->tag_set); 1479 if (IS_ERR(q)) { 1480 err = PTR_ERR(q); 1481 goto out_free_tags; 1482 } 1483 disk->queue = q; 1484 1485 /* 1486 * Tell the block layer that we are not a rotational device 1487 */ 1488 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue); 1489 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue); 1490 disk->queue->limits.discard_granularity = 512; 1491 blk_queue_max_discard_sectors(disk->queue, UINT_MAX); 1492 blk_queue_max_segment_size(disk->queue, UINT_MAX); 1493 blk_queue_max_segments(disk->queue, USHRT_MAX); 1494 blk_queue_max_hw_sectors(disk->queue, 65536); 1495 disk->queue->limits.max_sectors = 256; 1496 1497 mutex_init(&nbd->config_lock); 1498 refcount_set(&nbd->config_refs, 0); 1499 refcount_set(&nbd->refs, 1); 1500 INIT_LIST_HEAD(&nbd->list); 1501 disk->major = NBD_MAJOR; 1502 disk->first_minor = index << part_shift; 1503 disk->fops = &nbd_fops; 1504 disk->private_data = nbd; 1505 sprintf(disk->disk_name, "nbd%d", index); 1506 add_disk(disk); 1507 nbd_total_devices++; 1508 return index; 1509 1510 out_free_tags: 1511 blk_mq_free_tag_set(&nbd->tag_set); 1512 out_free_idr: 1513 idr_remove(&nbd_index_idr, index); 1514 out_free_disk: 1515 put_disk(disk); 1516 out_free_nbd: 1517 kfree(nbd); 1518 out: 1519 return err; 1520 } 1521 1522 static int find_free_cb(int id, void *ptr, void *data) 1523 { 1524 struct nbd_device *nbd = ptr; 1525 struct nbd_device **found = data; 1526 1527 if (!refcount_read(&nbd->config_refs)) { 1528 *found = nbd; 1529 return 1; 1530 } 1531 return 0; 1532 } 1533 1534 /* Netlink interface. */ 1535 static struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = { 1536 [NBD_ATTR_INDEX] = { .type = NLA_U32 }, 1537 [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 }, 1538 [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 }, 1539 [NBD_ATTR_TIMEOUT] = { .type = NLA_U64 }, 1540 [NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 }, 1541 [NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 }, 1542 [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED}, 1543 [NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 }, 1544 [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED}, 1545 }; 1546 1547 static struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = { 1548 [NBD_SOCK_FD] = { .type = NLA_U32 }, 1549 }; 1550 1551 /* We don't use this right now since we don't parse the incoming list, but we 1552 * still want it here so userspace knows what to expect. 1553 */ 1554 static struct nla_policy __attribute__((unused)) 1555 nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = { 1556 [NBD_DEVICE_INDEX] = { .type = NLA_U32 }, 1557 [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 }, 1558 }; 1559 1560 static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) 1561 { 1562 struct nbd_device *nbd = NULL; 1563 struct nbd_config *config; 1564 int index = -1; 1565 int ret; 1566 bool put_dev = false; 1567 1568 if (!netlink_capable(skb, CAP_SYS_ADMIN)) 1569 return -EPERM; 1570 1571 if (info->attrs[NBD_ATTR_INDEX]) 1572 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); 1573 if (!info->attrs[NBD_ATTR_SOCKETS]) { 1574 printk(KERN_ERR "nbd: must specify at least one socket\n"); 1575 return -EINVAL; 1576 } 1577 if (!info->attrs[NBD_ATTR_SIZE_BYTES]) { 1578 printk(KERN_ERR "nbd: must specify a size in bytes for the device\n"); 1579 return -EINVAL; 1580 } 1581 again: 1582 mutex_lock(&nbd_index_mutex); 1583 if (index == -1) { 1584 ret = idr_for_each(&nbd_index_idr, &find_free_cb, &nbd); 1585 if (ret == 0) { 1586 int new_index; 1587 new_index = nbd_dev_add(-1); 1588 if (new_index < 0) { 1589 mutex_unlock(&nbd_index_mutex); 1590 printk(KERN_ERR "nbd: failed to add new device\n"); 1591 return ret; 1592 } 1593 nbd = idr_find(&nbd_index_idr, new_index); 1594 } 1595 } else { 1596 nbd = idr_find(&nbd_index_idr, index); 1597 if (!nbd) { 1598 ret = nbd_dev_add(index); 1599 if (ret < 0) { 1600 mutex_unlock(&nbd_index_mutex); 1601 printk(KERN_ERR "nbd: failed to add new device\n"); 1602 return ret; 1603 } 1604 nbd = idr_find(&nbd_index_idr, index); 1605 } 1606 } 1607 if (!nbd) { 1608 printk(KERN_ERR "nbd: couldn't find device at index %d\n", 1609 index); 1610 mutex_unlock(&nbd_index_mutex); 1611 return -EINVAL; 1612 } 1613 if (!refcount_inc_not_zero(&nbd->refs)) { 1614 mutex_unlock(&nbd_index_mutex); 1615 if (index == -1) 1616 goto again; 1617 printk(KERN_ERR "nbd: device at index %d is going down\n", 1618 index); 1619 return -EINVAL; 1620 } 1621 mutex_unlock(&nbd_index_mutex); 1622 1623 mutex_lock(&nbd->config_lock); 1624 if (refcount_read(&nbd->config_refs)) { 1625 mutex_unlock(&nbd->config_lock); 1626 nbd_put(nbd); 1627 if (index == -1) 1628 goto again; 1629 printk(KERN_ERR "nbd: nbd%d already in use\n", index); 1630 return -EBUSY; 1631 } 1632 if (WARN_ON(nbd->config)) { 1633 mutex_unlock(&nbd->config_lock); 1634 nbd_put(nbd); 1635 return -EINVAL; 1636 } 1637 config = nbd->config = nbd_alloc_config(); 1638 if (!nbd->config) { 1639 mutex_unlock(&nbd->config_lock); 1640 nbd_put(nbd); 1641 printk(KERN_ERR "nbd: couldn't allocate config\n"); 1642 return -ENOMEM; 1643 } 1644 refcount_set(&nbd->config_refs, 1); 1645 set_bit(NBD_BOUND, &config->runtime_flags); 1646 1647 if (info->attrs[NBD_ATTR_SIZE_BYTES]) { 1648 u64 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]); 1649 nbd_size_set(nbd, config->blksize, 1650 div64_u64(bytes, config->blksize)); 1651 } 1652 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) { 1653 u64 bsize = 1654 nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]); 1655 nbd_size_set(nbd, bsize, div64_u64(config->bytesize, bsize)); 1656 } 1657 if (info->attrs[NBD_ATTR_TIMEOUT]) { 1658 u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]); 1659 nbd->tag_set.timeout = timeout * HZ; 1660 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ); 1661 } 1662 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) { 1663 config->dead_conn_timeout = 1664 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]); 1665 config->dead_conn_timeout *= HZ; 1666 } 1667 if (info->attrs[NBD_ATTR_SERVER_FLAGS]) 1668 config->flags = 1669 nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]); 1670 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) { 1671 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]); 1672 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) { 1673 set_bit(NBD_DESTROY_ON_DISCONNECT, 1674 &config->runtime_flags); 1675 put_dev = true; 1676 } 1677 } 1678 1679 if (info->attrs[NBD_ATTR_SOCKETS]) { 1680 struct nlattr *attr; 1681 int rem, fd; 1682 1683 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS], 1684 rem) { 1685 struct nlattr *socks[NBD_SOCK_MAX+1]; 1686 1687 if (nla_type(attr) != NBD_SOCK_ITEM) { 1688 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n"); 1689 ret = -EINVAL; 1690 goto out; 1691 } 1692 ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr, 1693 nbd_sock_policy, info->extack); 1694 if (ret != 0) { 1695 printk(KERN_ERR "nbd: error processing sock list\n"); 1696 ret = -EINVAL; 1697 goto out; 1698 } 1699 if (!socks[NBD_SOCK_FD]) 1700 continue; 1701 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]); 1702 ret = nbd_add_socket(nbd, fd, true); 1703 if (ret) 1704 goto out; 1705 } 1706 } 1707 ret = nbd_start_device(nbd); 1708 out: 1709 mutex_unlock(&nbd->config_lock); 1710 if (!ret) { 1711 set_bit(NBD_HAS_CONFIG_REF, &config->runtime_flags); 1712 refcount_inc(&nbd->config_refs); 1713 nbd_connect_reply(info, nbd->index); 1714 } 1715 nbd_config_put(nbd); 1716 if (put_dev) 1717 nbd_put(nbd); 1718 return ret; 1719 } 1720 1721 static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info) 1722 { 1723 struct nbd_device *nbd; 1724 int index; 1725 1726 if (!netlink_capable(skb, CAP_SYS_ADMIN)) 1727 return -EPERM; 1728 1729 if (!info->attrs[NBD_ATTR_INDEX]) { 1730 printk(KERN_ERR "nbd: must specify an index to disconnect\n"); 1731 return -EINVAL; 1732 } 1733 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); 1734 mutex_lock(&nbd_index_mutex); 1735 nbd = idr_find(&nbd_index_idr, index); 1736 if (!nbd) { 1737 mutex_unlock(&nbd_index_mutex); 1738 printk(KERN_ERR "nbd: couldn't find device at index %d\n", 1739 index); 1740 return -EINVAL; 1741 } 1742 if (!refcount_inc_not_zero(&nbd->refs)) { 1743 mutex_unlock(&nbd_index_mutex); 1744 printk(KERN_ERR "nbd: device at index %d is going down\n", 1745 index); 1746 return -EINVAL; 1747 } 1748 mutex_unlock(&nbd_index_mutex); 1749 if (!refcount_inc_not_zero(&nbd->config_refs)) { 1750 nbd_put(nbd); 1751 return 0; 1752 } 1753 mutex_lock(&nbd->config_lock); 1754 nbd_disconnect(nbd); 1755 mutex_unlock(&nbd->config_lock); 1756 if (test_and_clear_bit(NBD_HAS_CONFIG_REF, 1757 &nbd->config->runtime_flags)) 1758 nbd_config_put(nbd); 1759 nbd_config_put(nbd); 1760 nbd_put(nbd); 1761 return 0; 1762 } 1763 1764 static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info) 1765 { 1766 struct nbd_device *nbd = NULL; 1767 struct nbd_config *config; 1768 int index; 1769 int ret = -EINVAL; 1770 bool put_dev = false; 1771 1772 if (!netlink_capable(skb, CAP_SYS_ADMIN)) 1773 return -EPERM; 1774 1775 if (!info->attrs[NBD_ATTR_INDEX]) { 1776 printk(KERN_ERR "nbd: must specify a device to reconfigure\n"); 1777 return -EINVAL; 1778 } 1779 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); 1780 mutex_lock(&nbd_index_mutex); 1781 nbd = idr_find(&nbd_index_idr, index); 1782 if (!nbd) { 1783 mutex_unlock(&nbd_index_mutex); 1784 printk(KERN_ERR "nbd: couldn't find a device at index %d\n", 1785 index); 1786 return -EINVAL; 1787 } 1788 if (!refcount_inc_not_zero(&nbd->refs)) { 1789 mutex_unlock(&nbd_index_mutex); 1790 printk(KERN_ERR "nbd: device at index %d is going down\n", 1791 index); 1792 return -EINVAL; 1793 } 1794 mutex_unlock(&nbd_index_mutex); 1795 1796 if (!refcount_inc_not_zero(&nbd->config_refs)) { 1797 dev_err(nbd_to_dev(nbd), 1798 "not configured, cannot reconfigure\n"); 1799 nbd_put(nbd); 1800 return -EINVAL; 1801 } 1802 1803 mutex_lock(&nbd->config_lock); 1804 config = nbd->config; 1805 if (!test_bit(NBD_BOUND, &config->runtime_flags) || 1806 !nbd->task_recv) { 1807 dev_err(nbd_to_dev(nbd), 1808 "not configured, cannot reconfigure\n"); 1809 goto out; 1810 } 1811 1812 if (info->attrs[NBD_ATTR_TIMEOUT]) { 1813 u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]); 1814 nbd->tag_set.timeout = timeout * HZ; 1815 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ); 1816 } 1817 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) { 1818 config->dead_conn_timeout = 1819 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]); 1820 config->dead_conn_timeout *= HZ; 1821 } 1822 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) { 1823 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]); 1824 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) { 1825 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT, 1826 &config->runtime_flags)) 1827 put_dev = true; 1828 } else { 1829 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT, 1830 &config->runtime_flags)) 1831 refcount_inc(&nbd->refs); 1832 } 1833 } 1834 1835 if (info->attrs[NBD_ATTR_SOCKETS]) { 1836 struct nlattr *attr; 1837 int rem, fd; 1838 1839 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS], 1840 rem) { 1841 struct nlattr *socks[NBD_SOCK_MAX+1]; 1842 1843 if (nla_type(attr) != NBD_SOCK_ITEM) { 1844 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n"); 1845 ret = -EINVAL; 1846 goto out; 1847 } 1848 ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr, 1849 nbd_sock_policy, info->extack); 1850 if (ret != 0) { 1851 printk(KERN_ERR "nbd: error processing sock list\n"); 1852 ret = -EINVAL; 1853 goto out; 1854 } 1855 if (!socks[NBD_SOCK_FD]) 1856 continue; 1857 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]); 1858 ret = nbd_reconnect_socket(nbd, fd); 1859 if (ret) { 1860 if (ret == -ENOSPC) 1861 ret = 0; 1862 goto out; 1863 } 1864 dev_info(nbd_to_dev(nbd), "reconnected socket\n"); 1865 } 1866 } 1867 out: 1868 mutex_unlock(&nbd->config_lock); 1869 nbd_config_put(nbd); 1870 nbd_put(nbd); 1871 if (put_dev) 1872 nbd_put(nbd); 1873 return ret; 1874 } 1875 1876 static const struct genl_ops nbd_connect_genl_ops[] = { 1877 { 1878 .cmd = NBD_CMD_CONNECT, 1879 .policy = nbd_attr_policy, 1880 .doit = nbd_genl_connect, 1881 }, 1882 { 1883 .cmd = NBD_CMD_DISCONNECT, 1884 .policy = nbd_attr_policy, 1885 .doit = nbd_genl_disconnect, 1886 }, 1887 { 1888 .cmd = NBD_CMD_RECONFIGURE, 1889 .policy = nbd_attr_policy, 1890 .doit = nbd_genl_reconfigure, 1891 }, 1892 { 1893 .cmd = NBD_CMD_STATUS, 1894 .policy = nbd_attr_policy, 1895 .doit = nbd_genl_status, 1896 }, 1897 }; 1898 1899 static const struct genl_multicast_group nbd_mcast_grps[] = { 1900 { .name = NBD_GENL_MCAST_GROUP_NAME, }, 1901 }; 1902 1903 static struct genl_family nbd_genl_family __ro_after_init = { 1904 .hdrsize = 0, 1905 .name = NBD_GENL_FAMILY_NAME, 1906 .version = NBD_GENL_VERSION, 1907 .module = THIS_MODULE, 1908 .ops = nbd_connect_genl_ops, 1909 .n_ops = ARRAY_SIZE(nbd_connect_genl_ops), 1910 .maxattr = NBD_ATTR_MAX, 1911 .mcgrps = nbd_mcast_grps, 1912 .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps), 1913 }; 1914 1915 static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply) 1916 { 1917 struct nlattr *dev_opt; 1918 u8 connected = 0; 1919 int ret; 1920 1921 /* This is a little racey, but for status it's ok. The 1922 * reason we don't take a ref here is because we can't 1923 * take a ref in the index == -1 case as we would need 1924 * to put under the nbd_index_mutex, which could 1925 * deadlock if we are configured to remove ourselves 1926 * once we're disconnected. 1927 */ 1928 if (refcount_read(&nbd->config_refs)) 1929 connected = 1; 1930 dev_opt = nla_nest_start(reply, NBD_DEVICE_ITEM); 1931 if (!dev_opt) 1932 return -EMSGSIZE; 1933 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index); 1934 if (ret) 1935 return -EMSGSIZE; 1936 ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED, 1937 connected); 1938 if (ret) 1939 return -EMSGSIZE; 1940 nla_nest_end(reply, dev_opt); 1941 return 0; 1942 } 1943 1944 static int status_cb(int id, void *ptr, void *data) 1945 { 1946 struct nbd_device *nbd = ptr; 1947 return populate_nbd_status(nbd, (struct sk_buff *)data); 1948 } 1949 1950 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info) 1951 { 1952 struct nlattr *dev_list; 1953 struct sk_buff *reply; 1954 void *reply_head; 1955 size_t msg_size; 1956 int index = -1; 1957 int ret = -ENOMEM; 1958 1959 if (info->attrs[NBD_ATTR_INDEX]) 1960 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); 1961 1962 mutex_lock(&nbd_index_mutex); 1963 1964 msg_size = nla_total_size(nla_attr_size(sizeof(u32)) + 1965 nla_attr_size(sizeof(u8))); 1966 msg_size *= (index == -1) ? nbd_total_devices : 1; 1967 1968 reply = genlmsg_new(msg_size, GFP_KERNEL); 1969 if (!reply) 1970 goto out; 1971 reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0, 1972 NBD_CMD_STATUS); 1973 if (!reply_head) { 1974 nlmsg_free(reply); 1975 goto out; 1976 } 1977 1978 dev_list = nla_nest_start(reply, NBD_ATTR_DEVICE_LIST); 1979 if (index == -1) { 1980 ret = idr_for_each(&nbd_index_idr, &status_cb, reply); 1981 if (ret) { 1982 nlmsg_free(reply); 1983 goto out; 1984 } 1985 } else { 1986 struct nbd_device *nbd; 1987 nbd = idr_find(&nbd_index_idr, index); 1988 if (nbd) { 1989 ret = populate_nbd_status(nbd, reply); 1990 if (ret) { 1991 nlmsg_free(reply); 1992 goto out; 1993 } 1994 } 1995 } 1996 nla_nest_end(reply, dev_list); 1997 genlmsg_end(reply, reply_head); 1998 genlmsg_reply(reply, info); 1999 ret = 0; 2000 out: 2001 mutex_unlock(&nbd_index_mutex); 2002 return ret; 2003 } 2004 2005 static void nbd_connect_reply(struct genl_info *info, int index) 2006 { 2007 struct sk_buff *skb; 2008 void *msg_head; 2009 int ret; 2010 2011 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL); 2012 if (!skb) 2013 return; 2014 msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0, 2015 NBD_CMD_CONNECT); 2016 if (!msg_head) { 2017 nlmsg_free(skb); 2018 return; 2019 } 2020 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index); 2021 if (ret) { 2022 nlmsg_free(skb); 2023 return; 2024 } 2025 genlmsg_end(skb, msg_head); 2026 genlmsg_reply(skb, info); 2027 } 2028 2029 static void nbd_mcast_index(int index) 2030 { 2031 struct sk_buff *skb; 2032 void *msg_head; 2033 int ret; 2034 2035 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL); 2036 if (!skb) 2037 return; 2038 msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0, 2039 NBD_CMD_LINK_DEAD); 2040 if (!msg_head) { 2041 nlmsg_free(skb); 2042 return; 2043 } 2044 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index); 2045 if (ret) { 2046 nlmsg_free(skb); 2047 return; 2048 } 2049 genlmsg_end(skb, msg_head); 2050 genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL); 2051 } 2052 2053 static void nbd_dead_link_work(struct work_struct *work) 2054 { 2055 struct link_dead_args *args = container_of(work, struct link_dead_args, 2056 work); 2057 nbd_mcast_index(args->index); 2058 kfree(args); 2059 } 2060 2061 static int __init nbd_init(void) 2062 { 2063 int i; 2064 2065 BUILD_BUG_ON(sizeof(struct nbd_request) != 28); 2066 2067 if (max_part < 0) { 2068 printk(KERN_ERR "nbd: max_part must be >= 0\n"); 2069 return -EINVAL; 2070 } 2071 2072 part_shift = 0; 2073 if (max_part > 0) { 2074 part_shift = fls(max_part); 2075 2076 /* 2077 * Adjust max_part according to part_shift as it is exported 2078 * to user space so that user can know the max number of 2079 * partition kernel should be able to manage. 2080 * 2081 * Note that -1 is required because partition 0 is reserved 2082 * for the whole disk. 2083 */ 2084 max_part = (1UL << part_shift) - 1; 2085 } 2086 2087 if ((1UL << part_shift) > DISK_MAX_PARTS) 2088 return -EINVAL; 2089 2090 if (nbds_max > 1UL << (MINORBITS - part_shift)) 2091 return -EINVAL; 2092 recv_workqueue = alloc_workqueue("knbd-recv", 2093 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 2094 if (!recv_workqueue) 2095 return -ENOMEM; 2096 2097 if (register_blkdev(NBD_MAJOR, "nbd")) { 2098 destroy_workqueue(recv_workqueue); 2099 return -EIO; 2100 } 2101 2102 if (genl_register_family(&nbd_genl_family)) { 2103 unregister_blkdev(NBD_MAJOR, "nbd"); 2104 destroy_workqueue(recv_workqueue); 2105 return -EINVAL; 2106 } 2107 nbd_dbg_init(); 2108 2109 mutex_lock(&nbd_index_mutex); 2110 for (i = 0; i < nbds_max; i++) 2111 nbd_dev_add(i); 2112 mutex_unlock(&nbd_index_mutex); 2113 return 0; 2114 } 2115 2116 static int nbd_exit_cb(int id, void *ptr, void *data) 2117 { 2118 struct list_head *list = (struct list_head *)data; 2119 struct nbd_device *nbd = ptr; 2120 2121 list_add_tail(&nbd->list, list); 2122 return 0; 2123 } 2124 2125 static void __exit nbd_cleanup(void) 2126 { 2127 struct nbd_device *nbd; 2128 LIST_HEAD(del_list); 2129 2130 nbd_dbg_close(); 2131 2132 mutex_lock(&nbd_index_mutex); 2133 idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list); 2134 mutex_unlock(&nbd_index_mutex); 2135 2136 while (!list_empty(&del_list)) { 2137 nbd = list_first_entry(&del_list, struct nbd_device, list); 2138 list_del_init(&nbd->list); 2139 if (refcount_read(&nbd->refs) != 1) 2140 printk(KERN_ERR "nbd: possibly leaking a device\n"); 2141 nbd_put(nbd); 2142 } 2143 2144 idr_destroy(&nbd_index_idr); 2145 genl_unregister_family(&nbd_genl_family); 2146 destroy_workqueue(recv_workqueue); 2147 unregister_blkdev(NBD_MAJOR, "nbd"); 2148 } 2149 2150 module_init(nbd_init); 2151 module_exit(nbd_cleanup); 2152 2153 MODULE_DESCRIPTION("Network Block Device"); 2154 MODULE_LICENSE("GPL"); 2155 2156 module_param(nbds_max, int, 0444); 2157 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)"); 2158 module_param(max_part, int, 0444); 2159 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)"); 2160