Lines Matching +full:config +full:- +full:complete +full:- +full:timeout +full:- +full:us

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Network block device - make block devices work over TCP
6 * deadlocks sometimes - you can not swap over TCP in general.
8 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
40 #include <linux/blk-mq.h>
46 #include <linux/nbd-netlink.h>
112 static inline unsigned int nbd_blksize(struct nbd_config *config) in nbd_blksize() argument
114 return 1u << config->blksize_bits; in nbd_blksize()
123 struct nbd_config *config; member
133 pid_t pid; /* pid of nbd-client, if attached */
142 * by cmd->lock.
164 #define nbd_name(nbd) ((nbd)->disk->disk_name)
182 return disk_to_dev(nbd->disk); in nbd_to_dev()
189 lockdep_assert_held(&cmd->lock); in nbd_requeue_cmd()
198 __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags); in nbd_requeue_cmd()
200 if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags)) in nbd_requeue_cmd()
210 u64 cookie = cmd->cmd_cookie; in nbd_cmd_handle()
241 struct nbd_device *nbd = disk->private_data; in pid_show()
243 return sprintf(buf, "%d\n", nbd->pid); in pid_show()
255 struct nbd_device *nbd = disk->private_data; in backend_show()
257 return sprintf(buf, "%s\n", nbd->backend ?: ""); in backend_show()
267 struct gendisk *disk = nbd->disk; in nbd_dev_remove()
270 blk_mq_free_tag_set(&nbd->tag_set); in nbd_dev_remove()
277 idr_remove(&nbd_index_idr, nbd->index); in nbd_dev_remove()
279 destroy_workqueue(nbd->recv_workq); in nbd_dev_remove()
290 if (!refcount_dec_and_test(&nbd->refs)) in nbd_put()
294 if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags)) in nbd_put()
295 queue_work(nbd_del_wq, &nbd->remove_work); in nbd_put()
300 static int nbd_disconnected(struct nbd_config *config) in nbd_disconnected() argument
302 return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) || in nbd_disconnected()
303 test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags); in nbd_disconnected()
309 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) { in nbd_mark_nsock_dead()
313 INIT_WORK(&args->work, nbd_dead_link_work); in nbd_mark_nsock_dead()
314 args->index = nbd->index; in nbd_mark_nsock_dead()
315 queue_work(system_percpu_wq, &args->work); in nbd_mark_nsock_dead()
318 if (!nsock->dead) { in nbd_mark_nsock_dead()
319 kernel_sock_shutdown(nsock->sock, SHUT_RDWR); in nbd_mark_nsock_dead()
320 if (atomic_dec_return(&nbd->config->live_connections) == 0) { in nbd_mark_nsock_dead()
322 &nbd->config->runtime_flags)) { in nbd_mark_nsock_dead()
324 &nbd->config->runtime_flags); in nbd_mark_nsock_dead()
330 nsock->dead = true; in nbd_mark_nsock_dead()
331 nsock->pending = NULL; in nbd_mark_nsock_dead()
332 nsock->sent = 0; in nbd_mark_nsock_dead()
344 return -EINVAL; in nbd_set_size()
347 return -EINVAL; in nbd_set_size()
349 nbd->config->bytesize = bytesize; in nbd_set_size()
350 nbd->config->blksize_bits = __ffs(blksize); in nbd_set_size()
352 if (!nbd->pid) in nbd_set_size()
355 lim = queue_limits_start_update(nbd->disk->queue); in nbd_set_size()
356 if (nbd->config->flags & NBD_FLAG_SEND_TRIM) in nbd_set_size()
360 if (!(nbd->config->flags & NBD_FLAG_SEND_FLUSH)) { in nbd_set_size()
362 } else if (nbd->config->flags & NBD_FLAG_SEND_FUA) { in nbd_set_size()
368 if (nbd->config->flags & NBD_FLAG_ROTATIONAL) in nbd_set_size()
370 if (nbd->config->flags & NBD_FLAG_SEND_WRITE_ZEROES) in nbd_set_size()
375 error = queue_limits_commit_update_frozen(nbd->disk->queue, &lim); in nbd_set_size()
380 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state); in nbd_set_size()
381 if (!set_capacity_and_notify(nbd->disk, bytesize >> 9)) in nbd_set_size()
382 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); in nbd_set_size()
390 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req, in nbd_complete_rq()
391 cmd->status ? "failed" : "done"); in nbd_complete_rq()
393 blk_mq_end_request(req, cmd->status); in nbd_complete_rq()
401 struct nbd_config *config = nbd->config; in sock_shutdown() local
404 if (config->num_connections == 0) in sock_shutdown()
406 if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags)) in sock_shutdown()
409 for (i = 0; i < config->num_connections; i++) { in sock_shutdown()
410 struct nbd_sock *nsock = config->socks[i]; in sock_shutdown()
411 mutex_lock(&nsock->tx_lock); in sock_shutdown()
413 mutex_unlock(&nsock->tx_lock); in sock_shutdown()
415 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n"); in sock_shutdown()
438 if (refcount_inc_not_zero(&nbd->config_refs)) { in nbd_get_config_unlocked()
440 * Add smp_mb__after_atomic to ensure that reading nbd->config_refs in nbd_get_config_unlocked()
441 * and reading nbd->config is ordered. The pair is the barrier in in nbd_get_config_unlocked()
442 * nbd_alloc_and_init_config(), avoid nbd->config_refs is set in nbd_get_config_unlocked()
443 * before nbd->config. in nbd_get_config_unlocked()
446 return nbd->config; in nbd_get_config_unlocked()
455 struct nbd_device *nbd = cmd->nbd; in nbd_xmit_timeout()
456 struct nbd_config *config; in nbd_xmit_timeout() local
458 if (!mutex_trylock(&cmd->lock)) in nbd_xmit_timeout()
462 if (test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags)) { in nbd_xmit_timeout()
463 mutex_unlock(&cmd->lock); in nbd_xmit_timeout()
467 if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) { in nbd_xmit_timeout()
468 mutex_unlock(&cmd->lock); in nbd_xmit_timeout()
472 config = nbd_get_config_unlocked(nbd); in nbd_xmit_timeout()
473 if (!config) { in nbd_xmit_timeout()
474 cmd->status = BLK_STS_TIMEOUT; in nbd_xmit_timeout()
475 __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags); in nbd_xmit_timeout()
476 mutex_unlock(&cmd->lock); in nbd_xmit_timeout()
480 if (config->num_connections > 1 || in nbd_xmit_timeout()
481 (config->num_connections == 1 && nbd->tag_set.timeout)) { in nbd_xmit_timeout()
484 atomic_read(&config->live_connections), in nbd_xmit_timeout()
485 config->num_connections); in nbd_xmit_timeout()
490 * a new connection is reconfigured or util dead timeout. in nbd_xmit_timeout()
492 if (config->socks) { in nbd_xmit_timeout()
493 if (cmd->index < config->num_connections) { in nbd_xmit_timeout()
495 config->socks[cmd->index]; in nbd_xmit_timeout()
496 mutex_lock(&nsock->tx_lock); in nbd_xmit_timeout()
503 if (cmd->cookie == nsock->cookie) in nbd_xmit_timeout()
505 mutex_unlock(&nsock->tx_lock); in nbd_xmit_timeout()
508 mutex_unlock(&cmd->lock); in nbd_xmit_timeout()
514 if (!nbd->tag_set.timeout) { in nbd_xmit_timeout()
516 * Userspace sets timeout=0 to disable socket disconnection, in nbd_xmit_timeout()
519 struct nbd_sock *nsock = config->socks[cmd->index]; in nbd_xmit_timeout()
520 cmd->retries++; in nbd_xmit_timeout()
524 blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries); in nbd_xmit_timeout()
526 mutex_lock(&nsock->tx_lock); in nbd_xmit_timeout()
527 if (cmd->cookie != nsock->cookie) { in nbd_xmit_timeout()
529 mutex_unlock(&nsock->tx_lock); in nbd_xmit_timeout()
530 mutex_unlock(&cmd->lock); in nbd_xmit_timeout()
534 mutex_unlock(&nsock->tx_lock); in nbd_xmit_timeout()
535 mutex_unlock(&cmd->lock); in nbd_xmit_timeout()
541 set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags); in nbd_xmit_timeout()
542 cmd->status = BLK_STS_IOERR; in nbd_xmit_timeout()
543 __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags); in nbd_xmit_timeout()
544 mutex_unlock(&cmd->lock); in nbd_xmit_timeout()
561 dev_err_ratelimited(disk_to_dev(nbd->disk), in __sock_xmit()
564 return -EINVAL; in __sock_xmit()
573 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC; in __sock_xmit()
574 sock->sk->sk_use_task_frag = false; in __sock_xmit()
584 result = -EPIPE; /* short read */ in __sock_xmit()
605 struct nbd_config *config = nbd->config; in sock_xmit() local
606 struct socket *sock = config->socks[index]->sock; in sock_xmit()
612 * Different settings for sk->sk_sndtimeo can result in different return values
617 return result == -ERESTARTSYS || result == -EINTR; in was_interrupted()
625 * request may be re-dispatched with different tag, but our header has
635 WARN_ON_ONCE(test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags)); in nbd_sched_pending_work()
637 nsock->pending = req; in nbd_sched_pending_work()
638 nsock->sent = sent; in nbd_sched_pending_work()
639 set_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags); in nbd_sched_pending_work()
640 refcount_inc(&nbd->config_refs); in nbd_sched_pending_work()
641 schedule_work(&nsock->work); in nbd_sched_pending_work()
652 struct nbd_config *config = nbd->config; in nbd_send_cmd() local
653 struct nbd_sock *nsock = config->socks[index]; in nbd_send_cmd()
662 int sent = nsock->sent, skip = 0; in nbd_send_cmd()
664 lockdep_assert_held(&cmd->lock); in nbd_send_cmd()
665 lockdep_assert_held(&nsock->tx_lock); in nbd_send_cmd()
674 (config->flags & NBD_FLAG_READ_ONLY)) { in nbd_send_cmd()
675 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_send_cmd()
676 "Write on read-only\n"); in nbd_send_cmd()
680 if (req->cmd_flags & REQ_FUA) in nbd_send_cmd()
682 if ((req->cmd_flags & REQ_NOUNMAP) && (type == NBD_CMD_WRITE_ZEROES)) in nbd_send_cmd()
691 skip = sent - sizeof(request); in nbd_send_cmd()
700 cmd->cmd_cookie++; in nbd_send_cmd()
702 cmd->index = index; in nbd_send_cmd()
703 cmd->cookie = nsock->cookie; in nbd_send_cmd()
704 cmd->retries = 0; in nbd_send_cmd()
713 trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd)); in nbd_send_cmd()
732 set_bit(NBD_CMD_REQUEUED, &cmd->flags); in nbd_send_cmd()
735 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_send_cmd()
743 bio = req->bio; in nbd_send_cmd()
745 struct bio *next = bio->bi_next; in nbd_send_cmd()
758 skip -= iov_iter_count(&from); in nbd_send_cmd()
770 dev_err(disk_to_dev(nbd->disk), in nbd_send_cmd()
778 * the iterator do it. This prevents use-after-free in nbd_send_cmd()
788 nsock->pending = NULL; in nbd_send_cmd()
789 nsock->sent = 0; in nbd_send_cmd()
790 __set_bit(NBD_CMD_INFLIGHT, &cmd->flags); in nbd_send_cmd()
799 if (test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags)) in nbd_send_cmd()
803 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_send_cmd()
814 struct request *req = nsock->pending; in nbd_pending_cmd_work()
816 struct nbd_device *nbd = cmd->nbd; in nbd_pending_cmd_work()
817 unsigned long deadline = READ_ONCE(req->deadline); in nbd_pending_cmd_work()
820 mutex_lock(&cmd->lock); in nbd_pending_cmd_work()
822 WARN_ON_ONCE(test_bit(NBD_CMD_REQUEUED, &cmd->flags)); in nbd_pending_cmd_work()
823 if (WARN_ON_ONCE(!test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags))) in nbd_pending_cmd_work()
826 mutex_lock(&nsock->tx_lock); in nbd_pending_cmd_work()
828 nbd_send_cmd(nbd, cmd, cmd->index); in nbd_pending_cmd_work()
829 if (!nsock->pending) in nbd_pending_cmd_work()
832 /* don't bother timeout handler for partial sending */ in nbd_pending_cmd_work()
834 cmd->status = BLK_STS_IOERR; in nbd_pending_cmd_work()
841 mutex_unlock(&nsock->tx_lock); in nbd_pending_cmd_work()
842 clear_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags); in nbd_pending_cmd_work()
844 mutex_unlock(&cmd->lock); in nbd_pending_cmd_work()
855 reply->magic = 0; in nbd_read_reply()
859 if (!nbd_disconnected(nbd->config)) in nbd_read_reply()
860 dev_err(disk_to_dev(nbd->disk), in nbd_read_reply()
865 if (ntohl(reply->magic) != NBD_REPLY_MAGIC) { in nbd_read_reply()
866 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n", in nbd_read_reply()
867 (unsigned long)ntohl(reply->magic)); in nbd_read_reply()
868 return -EPROTO; in nbd_read_reply()
886 handle = be64_to_cpu(reply->cookie); in nbd_handle_reply()
889 if (hwq < nbd->tag_set.nr_hw_queues) in nbd_handle_reply()
890 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], in nbd_handle_reply()
893 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n", in nbd_handle_reply()
895 return ERR_PTR(-ENOENT); in nbd_handle_reply()
900 mutex_lock(&cmd->lock); in nbd_handle_reply()
901 if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) { in nbd_handle_reply()
902 dev_err(disk_to_dev(nbd->disk), "Suspicious reply %d (status %u flags %lu)", in nbd_handle_reply()
903 tag, cmd->status, cmd->flags); in nbd_handle_reply()
904 ret = -ENOENT; in nbd_handle_reply()
907 if (cmd->index != index) { in nbd_handle_reply()
908 dev_err(disk_to_dev(nbd->disk), "Unexpected reply %d from different sock %d (expected %d)", in nbd_handle_reply()
909 tag, index, cmd->index); in nbd_handle_reply()
910 ret = -ENOENT; in nbd_handle_reply()
913 if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) { in nbd_handle_reply()
914 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n", in nbd_handle_reply()
915 req, cmd->cmd_cookie, nbd_handle_to_cookie(handle)); in nbd_handle_reply()
916 ret = -ENOENT; in nbd_handle_reply()
919 if (cmd->status != BLK_STS_OK) { in nbd_handle_reply()
920 dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n", in nbd_handle_reply()
922 ret = -ENOENT; in nbd_handle_reply()
925 if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) { in nbd_handle_reply()
926 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n", in nbd_handle_reply()
928 ret = -ENOENT; in nbd_handle_reply()
931 if (ntohl(reply->error)) { in nbd_handle_reply()
932 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", in nbd_handle_reply()
933 ntohl(reply->error)); in nbd_handle_reply()
934 cmd->status = BLK_STS_IOERR; in nbd_handle_reply()
948 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", in nbd_handle_reply()
952 * complete this request, otherwise error out in nbd_handle_reply()
953 * and let the timeout stuff handle resubmitting in nbd_handle_reply()
956 if (nbd_disconnected(nbd->config)) { in nbd_handle_reply()
957 cmd->status = BLK_STS_IOERR; in nbd_handle_reply()
960 ret = -EIO; in nbd_handle_reply()
969 mutex_unlock(&cmd->lock); in nbd_handle_reply()
978 struct nbd_device *nbd = args->nbd; in recv_work()
979 struct nbd_config *config = nbd->config; in recv_work() local
980 struct request_queue *q = nbd->disk->queue; in recv_work()
981 struct nbd_sock *nsock = args->nsock; in recv_work()
988 if (nbd_read_reply(nbd, nsock->sock, &reply)) in recv_work()
993 * request use-after-free is possible during nbd_handle_reply(). in recv_work()
997 if (!percpu_ref_tryget(&q->q_usage_counter)) { in recv_work()
998 dev_err(disk_to_dev(nbd->disk), "%s: no io inflight\n", in recv_work()
1003 cmd = nbd_handle_reply(nbd, args->index, &reply); in recv_work()
1005 percpu_ref_put(&q->q_usage_counter); in recv_work()
1010 if (likely(!blk_should_fake_timeout(rq->q))) { in recv_work()
1011 bool complete; in recv_work() local
1013 mutex_lock(&cmd->lock); in recv_work()
1014 complete = __test_and_clear_bit(NBD_CMD_INFLIGHT, in recv_work()
1015 &cmd->flags); in recv_work()
1016 mutex_unlock(&cmd->lock); in recv_work()
1017 if (complete) in recv_work()
1020 percpu_ref_put(&q->q_usage_counter); in recv_work()
1023 mutex_lock(&nsock->tx_lock); in recv_work()
1025 mutex_unlock(&nsock->tx_lock); in recv_work()
1028 atomic_dec(&config->recv_threads); in recv_work()
1029 wake_up(&config->recv_wq); in recv_work()
1041 mutex_lock(&cmd->lock); in nbd_clear_req()
1042 if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) { in nbd_clear_req()
1043 mutex_unlock(&cmd->lock); in nbd_clear_req()
1046 cmd->status = BLK_STS_IOERR; in nbd_clear_req()
1047 mutex_unlock(&cmd->lock); in nbd_clear_req()
1055 blk_mq_quiesce_queue(nbd->disk->queue); in nbd_clear_que()
1056 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL); in nbd_clear_que()
1057 blk_mq_unquiesce_queue(nbd->disk->queue); in nbd_clear_que()
1058 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n"); in nbd_clear_que()
1063 struct nbd_config *config = nbd->config; in find_fallback() local
1064 int new_index = -1; in find_fallback()
1065 struct nbd_sock *nsock = config->socks[index]; in find_fallback()
1066 int fallback = nsock->fallback_index; in find_fallback()
1068 if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags)) in find_fallback()
1071 if (config->num_connections <= 1) { in find_fallback()
1072 dev_err_ratelimited(disk_to_dev(nbd->disk), in find_fallback()
1077 if (fallback >= 0 && fallback < config->num_connections && in find_fallback()
1078 !config->socks[fallback]->dead) in find_fallback()
1081 if (nsock->fallback_index < 0 || in find_fallback()
1082 nsock->fallback_index >= config->num_connections || in find_fallback()
1083 config->socks[nsock->fallback_index]->dead) { in find_fallback()
1085 for (i = 0; i < config->num_connections; i++) { in find_fallback()
1088 if (!config->socks[i]->dead) { in find_fallback()
1093 nsock->fallback_index = new_index; in find_fallback()
1095 dev_err_ratelimited(disk_to_dev(nbd->disk), in find_fallback()
1100 new_index = nsock->fallback_index; in find_fallback()
1106 struct nbd_config *config = nbd->config; in wait_for_reconnect() local
1107 if (!config->dead_conn_timeout) in wait_for_reconnect()
1110 if (!wait_event_timeout(config->conn_wait, in wait_for_reconnect()
1112 &config->runtime_flags) || in wait_for_reconnect()
1113 atomic_read(&config->live_connections) > 0, in wait_for_reconnect()
1114 config->dead_conn_timeout)) in wait_for_reconnect()
1117 return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags); in wait_for_reconnect()
1123 struct nbd_device *nbd = cmd->nbd; in nbd_handle_cmd()
1124 struct nbd_config *config; in nbd_handle_cmd() local
1128 lockdep_assert_held(&cmd->lock); in nbd_handle_cmd()
1130 config = nbd_get_config_unlocked(nbd); in nbd_handle_cmd()
1131 if (!config) { in nbd_handle_cmd()
1132 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_handle_cmd()
1137 if (index >= config->num_connections) { in nbd_handle_cmd()
1138 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_handle_cmd()
1143 cmd->status = BLK_STS_OK; in nbd_handle_cmd()
1145 nsock = config->socks[index]; in nbd_handle_cmd()
1146 mutex_lock(&nsock->tx_lock); in nbd_handle_cmd()
1147 if (nsock->dead) { in nbd_handle_cmd()
1150 mutex_unlock(&nsock->tx_lock); in nbd_handle_cmd()
1175 if (unlikely(nsock->pending && nsock->pending != req)) { in nbd_handle_cmd()
1182 mutex_unlock(&nsock->tx_lock); in nbd_handle_cmd()
1190 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); in nbd_queue_rq()
1196 * before we are done doing our send. This keeps us from dereferencing in nbd_queue_rq()
1202 mutex_lock(&cmd->lock); in nbd_queue_rq()
1203 clear_bit(NBD_CMD_REQUEUED, &cmd->flags); in nbd_queue_rq()
1210 ret = nbd_handle_cmd(cmd, hctx->queue_num); in nbd_queue_rq()
1211 mutex_unlock(&cmd->lock); in nbd_queue_rq()
1226 if (!sk_is_tcp(sock->sk) && in nbd_get_socket()
1227 !sk_is_stream_unix(sock->sk)) { in nbd_get_socket()
1228 dev_err(disk_to_dev(nbd->disk), "Unsupported socket: should be TCP or UNIX.\n"); in nbd_get_socket()
1229 *err = -EINVAL; in nbd_get_socket()
1234 if (sock->ops->shutdown == sock_no_shutdown) { in nbd_get_socket()
1235 dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n"); in nbd_get_socket()
1236 *err = -EINVAL; in nbd_get_socket()
1247 struct nbd_config *config = nbd->config; in nbd_add_socket() local
1256 return -EINVAL; in nbd_add_socket()
1263 * reallocating the ->socks array. in nbd_add_socket()
1265 memflags = blk_mq_freeze_queue(nbd->disk->queue); in nbd_add_socket()
1267 if (!netlink && !nbd->task_setup && in nbd_add_socket()
1268 !test_bit(NBD_RT_BOUND, &config->runtime_flags)) in nbd_add_socket()
1269 nbd->task_setup = current; in nbd_add_socket()
1272 (nbd->task_setup != current || in nbd_add_socket()
1273 test_bit(NBD_RT_BOUND, &config->runtime_flags))) { in nbd_add_socket()
1274 dev_err(disk_to_dev(nbd->disk), in nbd_add_socket()
1276 err = -EBUSY; in nbd_add_socket()
1282 err = -ENOMEM; in nbd_add_socket()
1286 socks = krealloc(config->socks, (config->num_connections + 1) * in nbd_add_socket()
1290 err = -ENOMEM; in nbd_add_socket()
1294 config->socks = socks; in nbd_add_socket()
1296 nsock->fallback_index = -1; in nbd_add_socket()
1297 nsock->dead = false; in nbd_add_socket()
1298 mutex_init(&nsock->tx_lock); in nbd_add_socket()
1299 nsock->sock = sock; in nbd_add_socket()
1300 nsock->pending = NULL; in nbd_add_socket()
1301 nsock->sent = 0; in nbd_add_socket()
1302 nsock->cookie = 0; in nbd_add_socket()
1303 INIT_WORK(&nsock->work, nbd_pending_cmd_work); in nbd_add_socket()
1304 socks[config->num_connections++] = nsock; in nbd_add_socket()
1305 atomic_inc(&config->live_connections); in nbd_add_socket()
1306 blk_mq_unfreeze_queue(nbd->disk->queue, memflags); in nbd_add_socket()
1311 blk_mq_unfreeze_queue(nbd->disk->queue, memflags); in nbd_add_socket()
1318 struct nbd_config *config = nbd->config; in nbd_reconnect_socket() local
1331 return -ENOMEM; in nbd_reconnect_socket()
1334 for (i = 0; i < config->num_connections; i++) { in nbd_reconnect_socket()
1335 struct nbd_sock *nsock = config->socks[i]; in nbd_reconnect_socket()
1337 if (!nsock->dead) in nbd_reconnect_socket()
1340 mutex_lock(&nsock->tx_lock); in nbd_reconnect_socket()
1341 if (!nsock->dead) { in nbd_reconnect_socket()
1342 mutex_unlock(&nsock->tx_lock); in nbd_reconnect_socket()
1345 sk_set_memalloc(sock->sk); in nbd_reconnect_socket()
1346 if (nbd->tag_set.timeout) in nbd_reconnect_socket()
1347 sock->sk->sk_sndtimeo = nbd->tag_set.timeout; in nbd_reconnect_socket()
1348 atomic_inc(&config->recv_threads); in nbd_reconnect_socket()
1349 refcount_inc(&nbd->config_refs); in nbd_reconnect_socket()
1350 old = nsock->sock; in nbd_reconnect_socket()
1351 nsock->fallback_index = -1; in nbd_reconnect_socket()
1352 nsock->sock = sock; in nbd_reconnect_socket()
1353 nsock->dead = false; in nbd_reconnect_socket()
1354 INIT_WORK(&args->work, recv_work); in nbd_reconnect_socket()
1355 args->index = i; in nbd_reconnect_socket()
1356 args->nbd = nbd; in nbd_reconnect_socket()
1357 args->nsock = nsock; in nbd_reconnect_socket()
1358 nsock->cookie++; in nbd_reconnect_socket()
1359 mutex_unlock(&nsock->tx_lock); in nbd_reconnect_socket()
1362 clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags); in nbd_reconnect_socket()
1367 queue_work(nbd->recv_workq, &args->work); in nbd_reconnect_socket()
1369 atomic_inc(&config->live_connections); in nbd_reconnect_socket()
1370 wake_up(&config->conn_wait); in nbd_reconnect_socket()
1375 return -ENOSPC; in nbd_reconnect_socket()
1380 if (disk_openers(nbd->disk) > 1) in nbd_bdev_reset()
1382 set_capacity(nbd->disk, 0); in nbd_bdev_reset()
1387 if (nbd->config->flags & NBD_FLAG_READ_ONLY) in nbd_parse_flags()
1388 set_disk_ro(nbd->disk, true); in nbd_parse_flags()
1390 set_disk_ro(nbd->disk, false); in nbd_parse_flags()
1395 struct nbd_config *config = nbd->config; in send_disconnects() local
1404 for (i = 0; i < config->num_connections; i++) { in send_disconnects()
1405 struct nbd_sock *nsock = config->socks[i]; in send_disconnects()
1408 mutex_lock(&nsock->tx_lock); in send_disconnects()
1411 dev_err(disk_to_dev(nbd->disk), in send_disconnects()
1413 mutex_unlock(&nsock->tx_lock); in send_disconnects()
1419 struct nbd_config *config = nbd->config; in nbd_disconnect() local
1421 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); in nbd_disconnect()
1422 set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags); in nbd_disconnect()
1423 set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags); in nbd_disconnect()
1432 nbd->task_setup = NULL; in nbd_clear_sock()
1437 if (refcount_dec_and_mutex_lock(&nbd->config_refs, in nbd_config_put()
1438 &nbd->config_lock)) { in nbd_config_put()
1439 struct nbd_config *config = nbd->config; in nbd_config_put() local
1441 invalidate_disk(nbd->disk); in nbd_config_put()
1442 if (nbd->config->bytesize) in nbd_config_put()
1443 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); in nbd_config_put()
1445 &config->runtime_flags)) in nbd_config_put()
1446 device_remove_file(disk_to_dev(nbd->disk), &pid_attr); in nbd_config_put()
1447 nbd->pid = 0; in nbd_config_put()
1449 &config->runtime_flags)) { in nbd_config_put()
1450 device_remove_file(disk_to_dev(nbd->disk), &backend_attr); in nbd_config_put()
1451 kfree(nbd->backend); in nbd_config_put()
1452 nbd->backend = NULL; in nbd_config_put()
1455 if (config->num_connections) { in nbd_config_put()
1457 for (i = 0; i < config->num_connections; i++) { in nbd_config_put()
1458 sockfd_put(config->socks[i]->sock); in nbd_config_put()
1459 kfree(config->socks[i]); in nbd_config_put()
1461 kfree(config->socks); in nbd_config_put()
1463 kfree(nbd->config); in nbd_config_put()
1464 nbd->config = NULL; in nbd_config_put()
1466 nbd->tag_set.timeout = 0; in nbd_config_put()
1468 mutex_unlock(&nbd->config_lock); in nbd_config_put()
1476 struct nbd_config *config = nbd->config; in nbd_start_device() local
1477 int num_connections = config->num_connections; in nbd_start_device()
1480 if (nbd->pid) in nbd_start_device()
1481 return -EBUSY; in nbd_start_device()
1482 if (!config->socks) in nbd_start_device()
1483 return -EINVAL; in nbd_start_device()
1485 !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) { in nbd_start_device()
1486 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n"); in nbd_start_device()
1487 return -EINVAL; in nbd_start_device()
1491 mutex_unlock(&nbd->config_lock); in nbd_start_device()
1492 blk_mq_update_nr_hw_queues(&nbd->tag_set, num_connections); in nbd_start_device()
1493 mutex_lock(&nbd->config_lock); in nbd_start_device()
1496 if (num_connections != config->num_connections) { in nbd_start_device()
1497 num_connections = config->num_connections; in nbd_start_device()
1501 nbd->pid = task_pid_nr(current); in nbd_start_device()
1505 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr); in nbd_start_device()
1507 dev_err(disk_to_dev(nbd->disk), "device_create_file failed for pid!\n"); in nbd_start_device()
1510 set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags); in nbd_start_device()
1528 flush_workqueue(nbd->recv_workq); in nbd_start_device()
1529 return -ENOMEM; in nbd_start_device()
1531 sk_set_memalloc(config->socks[i]->sock->sk); in nbd_start_device()
1532 if (nbd->tag_set.timeout) in nbd_start_device()
1533 config->socks[i]->sock->sk->sk_sndtimeo = in nbd_start_device()
1534 nbd->tag_set.timeout; in nbd_start_device()
1535 atomic_inc(&config->recv_threads); in nbd_start_device()
1536 refcount_inc(&nbd->config_refs); in nbd_start_device()
1537 INIT_WORK(&args->work, recv_work); in nbd_start_device()
1538 args->nbd = nbd; in nbd_start_device()
1539 args->nsock = config->socks[i]; in nbd_start_device()
1540 args->index = i; in nbd_start_device()
1541 queue_work(nbd->recv_workq, &args->work); in nbd_start_device()
1543 return nbd_set_size(nbd, config->bytesize, nbd_blksize(config)); in nbd_start_device()
1548 struct nbd_config *config = nbd->config; in nbd_start_device_ioctl() local
1556 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state); in nbd_start_device_ioctl()
1557 mutex_unlock(&nbd->config_lock); in nbd_start_device_ioctl()
1558 ret = wait_event_interruptible(config->recv_wq, in nbd_start_device_ioctl()
1559 atomic_read(&config->recv_threads) == 0); in nbd_start_device_ioctl()
1565 flush_workqueue(nbd->recv_workq); in nbd_start_device_ioctl()
1566 mutex_lock(&nbd->config_lock); in nbd_start_device_ioctl()
1569 if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags)) in nbd_start_device_ioctl()
1571 if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags)) in nbd_start_device_ioctl()
1572 ret = -ETIMEDOUT; in nbd_start_device_ioctl()
1579 disk_force_media_change(nbd->disk); in nbd_clear_sock_ioctl()
1582 &nbd->config->runtime_flags)) in nbd_clear_sock_ioctl()
1586 static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout) in nbd_set_cmd_timeout() argument
1588 nbd->tag_set.timeout = timeout * HZ; in nbd_set_cmd_timeout()
1589 if (timeout) in nbd_set_cmd_timeout()
1590 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ); in nbd_set_cmd_timeout()
1592 blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ); in nbd_set_cmd_timeout()
1599 struct nbd_config *config = nbd->config; in __nbd_ioctl() local
1611 return nbd_set_size(nbd, config->bytesize, arg); in __nbd_ioctl()
1613 return nbd_set_size(nbd, arg, nbd_blksize(config)); in __nbd_ioctl()
1615 if (check_shl_overflow(arg, config->blksize_bits, &bytesize)) in __nbd_ioctl()
1616 return -EINVAL; in __nbd_ioctl()
1617 return nbd_set_size(nbd, bytesize, nbd_blksize(config)); in __nbd_ioctl()
1623 config->flags = arg; in __nbd_ioctl()
1640 return -ENOTTY; in __nbd_ioctl()
1646 struct nbd_device *nbd = bdev->bd_disk->private_data; in nbd_ioctl()
1647 struct nbd_config *config = nbd->config; in nbd_ioctl() local
1648 int error = -EINVAL; in nbd_ioctl()
1651 return -EPERM; in nbd_ioctl()
1653 /* The block layer will pass back some non-nbd ioctls in case we have in nbd_ioctl()
1657 return -EINVAL; in nbd_ioctl()
1659 mutex_lock(&nbd->config_lock); in nbd_ioctl()
1664 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) || in nbd_ioctl()
1669 mutex_unlock(&nbd->config_lock); in nbd_ioctl()
1675 struct nbd_config *config; in nbd_alloc_and_init_config() local
1677 if (WARN_ON(nbd->config)) in nbd_alloc_and_init_config()
1678 return -EINVAL; in nbd_alloc_and_init_config()
1681 return -ENODEV; in nbd_alloc_and_init_config()
1683 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS); in nbd_alloc_and_init_config()
1684 if (!config) { in nbd_alloc_and_init_config()
1686 return -ENOMEM; in nbd_alloc_and_init_config()
1689 atomic_set(&config->recv_threads, 0); in nbd_alloc_and_init_config()
1690 init_waitqueue_head(&config->recv_wq); in nbd_alloc_and_init_config()
1691 init_waitqueue_head(&config->conn_wait); in nbd_alloc_and_init_config()
1692 config->blksize_bits = NBD_DEF_BLKSIZE_BITS; in nbd_alloc_and_init_config()
1693 atomic_set(&config->live_connections, 0); in nbd_alloc_and_init_config()
1695 nbd->config = config; in nbd_alloc_and_init_config()
1697 * Order refcount_set(&nbd->config_refs, 1) and nbd->config assignment, in nbd_alloc_and_init_config()
1699 * So nbd_get_config_unlocked() won't see nbd->config as null after in nbd_alloc_and_init_config()
1703 refcount_set(&nbd->config_refs, 1); in nbd_alloc_and_init_config()
1711 struct nbd_config *config; in nbd_open() local
1715 nbd = disk->private_data; in nbd_open()
1717 ret = -ENXIO; in nbd_open()
1720 if (!refcount_inc_not_zero(&nbd->refs)) { in nbd_open()
1721 ret = -ENXIO; in nbd_open()
1725 config = nbd_get_config_unlocked(nbd); in nbd_open()
1726 if (!config) { in nbd_open()
1727 mutex_lock(&nbd->config_lock); in nbd_open()
1728 if (refcount_inc_not_zero(&nbd->config_refs)) { in nbd_open()
1729 mutex_unlock(&nbd->config_lock); in nbd_open()
1734 mutex_unlock(&nbd->config_lock); in nbd_open()
1738 refcount_inc(&nbd->refs); in nbd_open()
1739 mutex_unlock(&nbd->config_lock); in nbd_open()
1741 set_bit(GD_NEED_PART_SCAN, &disk->state); in nbd_open()
1742 } else if (nbd_disconnected(config)) { in nbd_open()
1744 set_bit(GD_NEED_PART_SCAN, &disk->state); in nbd_open()
1753 struct nbd_device *nbd = disk->private_data; in nbd_release()
1755 if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) && in nbd_release()
1765 struct nbd_device *nbd = disk->private_data; in nbd_free_disk()
1784 struct nbd_device *nbd = s->private; in nbd_dbg_tasks_show()
1786 if (nbd->pid) in nbd_dbg_tasks_show()
1787 seq_printf(s, "recv: %d\n", nbd->pid); in nbd_dbg_tasks_show()
1796 struct nbd_device *nbd = s->private; in nbd_dbg_flags_show()
1797 u32 flags = nbd->config->flags; in nbd_dbg_flags_show()
1826 struct nbd_config *config = nbd->config; in nbd_dev_dbg_init() local
1829 return -EIO; in nbd_dev_dbg_init()
1835 return -EIO; in nbd_dev_dbg_init()
1837 config->dbg_dir = dir; in nbd_dev_dbg_init()
1840 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize); in nbd_dev_dbg_init()
1841 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout); in nbd_dev_dbg_init()
1842 debugfs_create_u32("blocksize_bits", 0444, dir, &config->blksize_bits); in nbd_dev_dbg_init()
1850 debugfs_remove_recursive(nbd->config->dbg_dir); in nbd_dev_dbg_close()
1859 return -EIO; in nbd_dbg_init()
1897 cmd->nbd = set->driver_data; in nbd_init_request()
1898 cmd->flags = 0; in nbd_init_request()
1899 mutex_init(&cmd->lock); in nbd_init_request()
1905 .complete = nbd_complete_rq,
1907 .timeout = nbd_xmit_timeout,
1920 int err = -ENOMEM; in nbd_dev_add()
1926 nbd->tag_set.ops = &nbd_mq_ops; in nbd_dev_add()
1927 nbd->tag_set.nr_hw_queues = 1; in nbd_dev_add()
1928 nbd->tag_set.queue_depth = 128; in nbd_dev_add()
1929 nbd->tag_set.numa_node = NUMA_NO_NODE; in nbd_dev_add()
1930 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd); in nbd_dev_add()
1931 nbd->tag_set.flags = BLK_MQ_F_BLOCKING; in nbd_dev_add()
1932 nbd->tag_set.driver_data = nbd; in nbd_dev_add()
1933 INIT_WORK(&nbd->remove_work, nbd_dev_remove_work); in nbd_dev_add()
1934 nbd->backend = NULL; in nbd_dev_add()
1936 err = blk_mq_alloc_tag_set(&nbd->tag_set); in nbd_dev_add()
1944 if (err == -ENOSPC) in nbd_dev_add()
1945 err = -EEXIST; in nbd_dev_add()
1952 nbd->index = index; in nbd_dev_add()
1957 disk = blk_mq_alloc_disk(&nbd->tag_set, &lim, NULL); in nbd_dev_add()
1962 nbd->disk = disk; in nbd_dev_add()
1964 nbd->recv_workq = alloc_workqueue("nbd%d-recv", in nbd_dev_add()
1966 WQ_UNBOUND, 0, nbd->index); in nbd_dev_add()
1967 if (!nbd->recv_workq) { in nbd_dev_add()
1968 dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n"); in nbd_dev_add()
1969 err = -ENOMEM; in nbd_dev_add()
1973 mutex_init(&nbd->config_lock); in nbd_dev_add()
1974 refcount_set(&nbd->config_refs, 0); in nbd_dev_add()
1979 refcount_set(&nbd->refs, 0); in nbd_dev_add()
1980 INIT_LIST_HEAD(&nbd->list); in nbd_dev_add()
1981 disk->major = NBD_MAJOR; in nbd_dev_add()
1982 disk->first_minor = index << part_shift; in nbd_dev_add()
1983 disk->minors = 1 << part_shift; in nbd_dev_add()
1984 disk->fops = &nbd_fops; in nbd_dev_add()
1985 disk->private_data = nbd; in nbd_dev_add()
1986 sprintf(disk->disk_name, "nbd%d", index); in nbd_dev_add()
1994 refcount_set(&nbd->refs, refs); in nbd_dev_add()
1999 destroy_workqueue(nbd->recv_workq); in nbd_dev_add()
2007 blk_mq_free_tag_set(&nbd->tag_set); in nbd_dev_add()
2022 if (refcount_read(&nbd->config_refs) || in nbd_find_get_unused()
2023 test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags)) in nbd_find_get_unused()
2025 if (refcount_inc_not_zero(&nbd->refs)) in nbd_find_get_unused()
2061 struct nbd_config *config = nbd->config; in nbd_genl_size_set() local
2062 u64 bsize = nbd_blksize(config); in nbd_genl_size_set()
2063 u64 bytes = config->bytesize; in nbd_genl_size_set()
2065 if (info->attrs[NBD_ATTR_SIZE_BYTES]) in nbd_genl_size_set()
2066 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]); in nbd_genl_size_set()
2068 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) in nbd_genl_size_set()
2069 bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]); in nbd_genl_size_set()
2071 if (bytes != config->bytesize || bsize != nbd_blksize(config)) in nbd_genl_size_set()
2079 struct nbd_config *config; in nbd_genl_connect() local
2080 int index = -1; in nbd_genl_connect()
2085 return -EPERM; in nbd_genl_connect()
2087 if (info->attrs[NBD_ATTR_INDEX]) { in nbd_genl_connect()
2088 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); in nbd_genl_connect()
2097 return -EINVAL; in nbd_genl_connect()
2102 return -EINVAL; in nbd_genl_connect()
2106 return -EINVAL; in nbd_genl_connect()
2110 if (index == -1) { in nbd_genl_connect()
2115 if ((test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && in nbd_genl_connect()
2116 test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) || in nbd_genl_connect()
2117 !refcount_inc_not_zero(&nbd->refs)) { in nbd_genl_connect()
2121 return -EINVAL; in nbd_genl_connect()
2135 mutex_lock(&nbd->config_lock); in nbd_genl_connect()
2136 if (refcount_read(&nbd->config_refs)) { in nbd_genl_connect()
2137 mutex_unlock(&nbd->config_lock); in nbd_genl_connect()
2139 if (index == -1) in nbd_genl_connect()
2142 return -EBUSY; in nbd_genl_connect()
2147 mutex_unlock(&nbd->config_lock); in nbd_genl_connect()
2149 pr_err("couldn't allocate config\n"); in nbd_genl_connect()
2153 config = nbd->config; in nbd_genl_connect()
2154 set_bit(NBD_RT_BOUND, &config->runtime_flags); in nbd_genl_connect()
2159 if (info->attrs[NBD_ATTR_TIMEOUT]) in nbd_genl_connect()
2161 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT])); in nbd_genl_connect()
2162 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) { in nbd_genl_connect()
2163 config->dead_conn_timeout = in nbd_genl_connect()
2164 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]); in nbd_genl_connect()
2165 config->dead_conn_timeout *= HZ; in nbd_genl_connect()
2167 if (info->attrs[NBD_ATTR_SERVER_FLAGS]) in nbd_genl_connect()
2168 config->flags = in nbd_genl_connect()
2169 nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]); in nbd_genl_connect()
2170 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) { in nbd_genl_connect()
2171 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]); in nbd_genl_connect()
2176 * inherited by the config. If we already have in nbd_genl_connect()
2182 &nbd->flags)) in nbd_genl_connect()
2186 &nbd->flags)) in nbd_genl_connect()
2187 refcount_inc(&nbd->refs); in nbd_genl_connect()
2191 &config->runtime_flags); in nbd_genl_connect()
2195 if (info->attrs[NBD_ATTR_SOCKETS]) { in nbd_genl_connect()
2199 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS], in nbd_genl_connect()
2205 ret = -EINVAL; in nbd_genl_connect()
2211 info->extack); in nbd_genl_connect()
2214 ret = -EINVAL; in nbd_genl_connect()
2226 if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) { in nbd_genl_connect()
2227 nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER], in nbd_genl_connect()
2229 if (!nbd->backend) { in nbd_genl_connect()
2230 ret = -ENOMEM; in nbd_genl_connect()
2234 ret = device_create_file(disk_to_dev(nbd->disk), &backend_attr); in nbd_genl_connect()
2236 dev_err(disk_to_dev(nbd->disk), in nbd_genl_connect()
2240 set_bit(NBD_RT_HAS_BACKEND_FILE, &config->runtime_flags); in nbd_genl_connect()
2244 mutex_unlock(&nbd->config_lock); in nbd_genl_connect()
2246 set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags); in nbd_genl_connect()
2247 refcount_inc(&nbd->config_refs); in nbd_genl_connect()
2248 nbd_connect_reply(info, nbd->index); in nbd_genl_connect()
2258 mutex_lock(&nbd->config_lock); in nbd_disconnect_and_put()
2261 wake_up(&nbd->config->conn_wait); in nbd_disconnect_and_put()
2266 flush_workqueue(nbd->recv_workq); in nbd_disconnect_and_put()
2268 nbd->task_setup = NULL; in nbd_disconnect_and_put()
2269 clear_bit(NBD_RT_BOUND, &nbd->config->runtime_flags); in nbd_disconnect_and_put()
2270 mutex_unlock(&nbd->config_lock); in nbd_disconnect_and_put()
2273 &nbd->config->runtime_flags)) in nbd_disconnect_and_put()
2283 return -EPERM; in nbd_genl_disconnect()
2287 return -EINVAL; in nbd_genl_disconnect()
2289 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); in nbd_genl_disconnect()
2295 return -EINVAL; in nbd_genl_disconnect()
2297 if (!refcount_inc_not_zero(&nbd->refs)) { in nbd_genl_disconnect()
2300 return -EINVAL; in nbd_genl_disconnect()
2303 if (!refcount_inc_not_zero(&nbd->config_refs)) in nbd_genl_disconnect()
2315 struct nbd_config *config; in nbd_genl_reconfigure() local
2321 return -EPERM; in nbd_genl_reconfigure()
2325 return -EINVAL; in nbd_genl_reconfigure()
2327 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); in nbd_genl_reconfigure()
2333 return -EINVAL; in nbd_genl_reconfigure()
2335 if (nbd->backend) { in nbd_genl_reconfigure()
2336 if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) { in nbd_genl_reconfigure()
2337 if (nla_strcmp(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER], in nbd_genl_reconfigure()
2338 nbd->backend)) { in nbd_genl_reconfigure()
2342 nbd->backend); in nbd_genl_reconfigure()
2343 return -EINVAL; in nbd_genl_reconfigure()
2348 return -EINVAL; in nbd_genl_reconfigure()
2351 if (!refcount_inc_not_zero(&nbd->refs)) { in nbd_genl_reconfigure()
2354 return -EINVAL; in nbd_genl_reconfigure()
2358 config = nbd_get_config_unlocked(nbd); in nbd_genl_reconfigure()
2359 if (!config) { in nbd_genl_reconfigure()
2363 return -EINVAL; in nbd_genl_reconfigure()
2366 mutex_lock(&nbd->config_lock); in nbd_genl_reconfigure()
2367 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) || in nbd_genl_reconfigure()
2368 !nbd->pid) { in nbd_genl_reconfigure()
2371 ret = -EINVAL; in nbd_genl_reconfigure()
2379 if (info->attrs[NBD_ATTR_TIMEOUT]) in nbd_genl_reconfigure()
2381 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT])); in nbd_genl_reconfigure()
2382 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) { in nbd_genl_reconfigure()
2383 config->dead_conn_timeout = in nbd_genl_reconfigure()
2384 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]); in nbd_genl_reconfigure()
2385 config->dead_conn_timeout *= HZ; in nbd_genl_reconfigure()
2387 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) { in nbd_genl_reconfigure()
2388 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]); in nbd_genl_reconfigure()
2391 &nbd->flags)) in nbd_genl_reconfigure()
2395 &nbd->flags)) in nbd_genl_reconfigure()
2396 refcount_inc(&nbd->refs); in nbd_genl_reconfigure()
2401 &config->runtime_flags); in nbd_genl_reconfigure()
2404 &config->runtime_flags); in nbd_genl_reconfigure()
2408 if (info->attrs[NBD_ATTR_SOCKETS]) { in nbd_genl_reconfigure()
2412 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS], in nbd_genl_reconfigure()
2418 ret = -EINVAL; in nbd_genl_reconfigure()
2424 info->extack); in nbd_genl_reconfigure()
2427 ret = -EINVAL; in nbd_genl_reconfigure()
2435 if (ret == -ENOSPC) in nbd_genl_reconfigure()
2443 mutex_unlock(&nbd->config_lock); in nbd_genl_reconfigure()
2502 * take a ref in the index == -1 case as we would need in populate_nbd_status()
2507 if (refcount_read(&nbd->config_refs)) in populate_nbd_status()
2511 return -EMSGSIZE; in populate_nbd_status()
2512 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index); in populate_nbd_status()
2514 return -EMSGSIZE; in populate_nbd_status()
2518 return -EMSGSIZE; in populate_nbd_status()
2535 int index = -1; in nbd_genl_status()
2536 int ret = -ENOMEM; in nbd_genl_status()
2538 if (info->attrs[NBD_ATTR_INDEX]) in nbd_genl_status()
2539 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); in nbd_genl_status()
2545 msg_size *= (index == -1) ? nbd_total_devices : 1; in nbd_genl_status()
2560 ret = -EMSGSIZE; in nbd_genl_status()
2564 if (index == -1) { in nbd_genl_status()
2641 nbd_mcast_index(args->index); in nbd_dead_link_work()
2653 return -EINVAL; in nbd_init()
2665 * Note that -1 is required because partition 0 is reserved in nbd_init()
2668 max_part = (1UL << part_shift) - 1; in nbd_init()
2672 return -EINVAL; in nbd_init()
2674 if (nbds_max > 1UL << (MINORBITS - part_shift)) in nbd_init()
2675 return -EINVAL; in nbd_init()
2678 return -EIO; in nbd_init()
2680 nbd_del_wq = alloc_workqueue("nbd-del", WQ_UNBOUND, 0); in nbd_init()
2683 return -ENOMEM; in nbd_init()
2690 return -ENOMEM; in nbd_init()
2697 return -EINVAL; in nbd_init()
2712 if (refcount_read(&nbd->refs)) in nbd_exit_cb()
2713 list_add_tail(&nbd->list, list); in nbd_exit_cb()
2737 list_del_init(&nbd->list); in nbd_cleanup()
2738 if (refcount_read(&nbd->config_refs)) in nbd_cleanup()
2740 refcount_read(&nbd->config_refs)); in nbd_cleanup()
2741 if (refcount_read(&nbd->refs) != 1) in nbd_cleanup()