Lines Matching +full:static +full:- +full:config
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Network block device - make block devices work over TCP
6 * deadlocks sometimes - you can not swap over TCP in general.
8 * Copyright 1997-2000, 2008 Pavel Machek <[email protected]>
40 #include <linux/blk-mq.h>
46 #include <linux/nbd-netlink.h>
52 static DEFINE_IDR(nbd_index_idr);
53 static DEFINE_MUTEX(nbd_index_mutex);
54 static struct workqueue_struct *nbd_del_wq;
55 static int nbd_total_devices = 0;
111 static inline unsigned int nbd_blksize(struct nbd_config *config) in nbd_blksize() argument
113 return 1u << config->blksize_bits; in nbd_blksize()
122 struct nbd_config *config; member
132 pid_t pid; /* pid of nbd-client, if attached */
141 * by cmd->lock.
160 static struct dentry *nbd_dbg_dir;
163 #define nbd_name(nbd) ((nbd)->disk->disk_name)
167 static unsigned int nbds_max = 16;
168 static int max_part = 16;
169 static int part_shift;
171 static int nbd_dev_dbg_init(struct nbd_device *nbd);
172 static void nbd_dev_dbg_close(struct nbd_device *nbd);
173 static void nbd_config_put(struct nbd_device *nbd);
174 static void nbd_connect_reply(struct genl_info *info, int index);
175 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
176 static void nbd_dead_link_work(struct work_struct *work);
177 static void nbd_disconnect_and_put(struct nbd_device *nbd);
179 static inline struct device *nbd_to_dev(struct nbd_device *nbd) in nbd_to_dev()
181 return disk_to_dev(nbd->disk); in nbd_to_dev()
184 static void nbd_requeue_cmd(struct nbd_cmd *cmd) in nbd_requeue_cmd()
188 lockdep_assert_held(&cmd->lock); in nbd_requeue_cmd()
197 __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags); in nbd_requeue_cmd()
199 if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags)) in nbd_requeue_cmd()
205 static u64 nbd_cmd_handle(struct nbd_cmd *cmd) in nbd_cmd_handle()
209 u64 cookie = cmd->cmd_cookie; in nbd_cmd_handle()
214 static u32 nbd_handle_to_tag(u64 handle) in nbd_handle_to_tag()
219 static u32 nbd_handle_to_cookie(u64 handle) in nbd_handle_to_cookie()
224 static const char *nbdcmd_to_ascii(int cmd) in nbdcmd_to_ascii()
236 static ssize_t pid_show(struct device *dev, in pid_show()
240 struct nbd_device *nbd = disk->private_data; in pid_show()
242 return sprintf(buf, "%d\n", nbd->pid); in pid_show()
245 static const struct device_attribute pid_attr = {
250 static ssize_t backend_show(struct device *dev, in backend_show()
254 struct nbd_device *nbd = disk->private_data; in backend_show()
256 return sprintf(buf, "%s\n", nbd->backend ?: ""); in backend_show()
259 static const struct device_attribute backend_attr = {
264 static void nbd_dev_remove(struct nbd_device *nbd) in nbd_dev_remove()
266 struct gendisk *disk = nbd->disk; in nbd_dev_remove()
269 blk_mq_free_tag_set(&nbd->tag_set); in nbd_dev_remove()
276 idr_remove(&nbd_index_idr, nbd->index); in nbd_dev_remove()
278 destroy_workqueue(nbd->recv_workq); in nbd_dev_remove()
282 static void nbd_dev_remove_work(struct work_struct *work) in nbd_dev_remove_work()
287 static void nbd_put(struct nbd_device *nbd) in nbd_put()
289 if (!refcount_dec_and_test(&nbd->refs)) in nbd_put()
293 if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags)) in nbd_put()
294 queue_work(nbd_del_wq, &nbd->remove_work); in nbd_put()
299 static int nbd_disconnected(struct nbd_config *config) in nbd_disconnected() argument
301 return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) || in nbd_disconnected()
302 test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags); in nbd_disconnected()
305 static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock, in nbd_mark_nsock_dead()
308 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) { in nbd_mark_nsock_dead()
312 INIT_WORK(&args->work, nbd_dead_link_work); in nbd_mark_nsock_dead()
313 args->index = nbd->index; in nbd_mark_nsock_dead()
314 queue_work(system_wq, &args->work); in nbd_mark_nsock_dead()
317 if (!nsock->dead) { in nbd_mark_nsock_dead()
318 kernel_sock_shutdown(nsock->sock, SHUT_RDWR); in nbd_mark_nsock_dead()
319 if (atomic_dec_return(&nbd->config->live_connections) == 0) { in nbd_mark_nsock_dead()
321 &nbd->config->runtime_flags)) { in nbd_mark_nsock_dead()
323 &nbd->config->runtime_flags); in nbd_mark_nsock_dead()
329 nsock->dead = true; in nbd_mark_nsock_dead()
330 nsock->pending = NULL; in nbd_mark_nsock_dead()
331 nsock->sent = 0; in nbd_mark_nsock_dead()
334 static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize, loff_t blksize) in nbd_set_size()
343 return -EINVAL; in nbd_set_size()
346 return -EINVAL; in nbd_set_size()
348 nbd->config->bytesize = bytesize; in nbd_set_size()
349 nbd->config->blksize_bits = __ffs(blksize); in nbd_set_size()
351 if (!nbd->pid) in nbd_set_size()
354 lim = queue_limits_start_update(nbd->disk->queue); in nbd_set_size()
355 if (nbd->config->flags & NBD_FLAG_SEND_TRIM) in nbd_set_size()
359 if (!(nbd->config->flags & NBD_FLAG_SEND_FLUSH)) { in nbd_set_size()
361 } else if (nbd->config->flags & NBD_FLAG_SEND_FUA) { in nbd_set_size()
367 if (nbd->config->flags & NBD_FLAG_ROTATIONAL) in nbd_set_size()
369 if (nbd->config->flags & NBD_FLAG_SEND_WRITE_ZEROES) in nbd_set_size()
374 error = queue_limits_commit_update_frozen(nbd->disk->queue, &lim); in nbd_set_size()
379 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state); in nbd_set_size()
380 if (!set_capacity_and_notify(nbd->disk, bytesize >> 9)) in nbd_set_size()
381 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); in nbd_set_size()
385 static void nbd_complete_rq(struct request *req) in nbd_complete_rq()
389 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req, in nbd_complete_rq()
390 cmd->status ? "failed" : "done"); in nbd_complete_rq()
392 blk_mq_end_request(req, cmd->status); in nbd_complete_rq()
398 static void sock_shutdown(struct nbd_device *nbd) in sock_shutdown()
400 struct nbd_config *config = nbd->config; in sock_shutdown() local
403 if (config->num_connections == 0) in sock_shutdown()
405 if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags)) in sock_shutdown()
408 for (i = 0; i < config->num_connections; i++) { in sock_shutdown()
409 struct nbd_sock *nsock = config->socks[i]; in sock_shutdown()
410 mutex_lock(&nsock->tx_lock); in sock_shutdown()
412 mutex_unlock(&nsock->tx_lock); in sock_shutdown()
414 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n"); in sock_shutdown()
417 static u32 req_to_nbd_cmd_type(struct request *req) in req_to_nbd_cmd_type()
435 static struct nbd_config *nbd_get_config_unlocked(struct nbd_device *nbd) in nbd_get_config_unlocked()
437 if (refcount_inc_not_zero(&nbd->config_refs)) { in nbd_get_config_unlocked()
439 * Add smp_mb__after_atomic to ensure that reading nbd->config_refs in nbd_get_config_unlocked()
440 * and reading nbd->config is ordered. The pair is the barrier in in nbd_get_config_unlocked()
441 * nbd_alloc_and_init_config(), avoid nbd->config_refs is set in nbd_get_config_unlocked()
442 * before nbd->config. in nbd_get_config_unlocked()
445 return nbd->config; in nbd_get_config_unlocked()
451 static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req) in nbd_xmit_timeout()
454 struct nbd_device *nbd = cmd->nbd; in nbd_xmit_timeout()
455 struct nbd_config *config; in nbd_xmit_timeout() local
457 if (!mutex_trylock(&cmd->lock)) in nbd_xmit_timeout()
461 if (test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags)) { in nbd_xmit_timeout()
462 mutex_unlock(&cmd->lock); in nbd_xmit_timeout()
466 if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) { in nbd_xmit_timeout()
467 mutex_unlock(&cmd->lock); in nbd_xmit_timeout()
471 config = nbd_get_config_unlocked(nbd); in nbd_xmit_timeout()
472 if (!config) { in nbd_xmit_timeout()
473 cmd->status = BLK_STS_TIMEOUT; in nbd_xmit_timeout()
474 __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags); in nbd_xmit_timeout()
475 mutex_unlock(&cmd->lock); in nbd_xmit_timeout()
479 if (config->num_connections > 1 || in nbd_xmit_timeout()
480 (config->num_connections == 1 && nbd->tag_set.timeout)) { in nbd_xmit_timeout()
483 atomic_read(&config->live_connections), in nbd_xmit_timeout()
484 config->num_connections); in nbd_xmit_timeout()
491 if (config->socks) { in nbd_xmit_timeout()
492 if (cmd->index < config->num_connections) { in nbd_xmit_timeout()
494 config->socks[cmd->index]; in nbd_xmit_timeout()
495 mutex_lock(&nsock->tx_lock); in nbd_xmit_timeout()
502 if (cmd->cookie == nsock->cookie) in nbd_xmit_timeout()
504 mutex_unlock(&nsock->tx_lock); in nbd_xmit_timeout()
507 mutex_unlock(&cmd->lock); in nbd_xmit_timeout()
513 if (!nbd->tag_set.timeout) { in nbd_xmit_timeout()
518 struct nbd_sock *nsock = config->socks[cmd->index]; in nbd_xmit_timeout()
519 cmd->retries++; in nbd_xmit_timeout()
523 blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries); in nbd_xmit_timeout()
525 mutex_lock(&nsock->tx_lock); in nbd_xmit_timeout()
526 if (cmd->cookie != nsock->cookie) { in nbd_xmit_timeout()
528 mutex_unlock(&nsock->tx_lock); in nbd_xmit_timeout()
529 mutex_unlock(&cmd->lock); in nbd_xmit_timeout()
533 mutex_unlock(&nsock->tx_lock); in nbd_xmit_timeout()
534 mutex_unlock(&cmd->lock); in nbd_xmit_timeout()
540 set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags); in nbd_xmit_timeout()
541 cmd->status = BLK_STS_IOERR; in nbd_xmit_timeout()
542 __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags); in nbd_xmit_timeout()
543 mutex_unlock(&cmd->lock); in nbd_xmit_timeout()
551 static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send, in __sock_xmit()
559 dev_err_ratelimited(disk_to_dev(nbd->disk), in __sock_xmit()
562 return -EINVAL; in __sock_xmit()
569 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC; in __sock_xmit()
570 sock->sk->sk_use_task_frag = false; in __sock_xmit()
580 result = -EPIPE; /* short read */ in __sock_xmit()
596 static int sock_xmit(struct nbd_device *nbd, int index, int send, in sock_xmit()
599 struct nbd_config *config = nbd->config; in sock_xmit() local
600 struct socket *sock = config->socks[index]->sock; in sock_xmit()
606 * Different settings for sk->sk_sndtimeo can result in different return values
609 static inline int was_interrupted(int result) in was_interrupted()
611 return result == -ERESTARTSYS || result == -EINTR; in was_interrupted()
619 * request may be re-dispatched with different tag, but our header has
622 static void nbd_sched_pending_work(struct nbd_device *nbd, in nbd_sched_pending_work()
629 WARN_ON_ONCE(test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags)); in nbd_sched_pending_work()
631 nsock->pending = req; in nbd_sched_pending_work()
632 nsock->sent = sent; in nbd_sched_pending_work()
633 set_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags); in nbd_sched_pending_work()
634 refcount_inc(&nbd->config_refs); in nbd_sched_pending_work()
635 schedule_work(&nsock->work); in nbd_sched_pending_work()
642 static blk_status_t nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, in nbd_send_cmd()
646 struct nbd_config *config = nbd->config; in nbd_send_cmd() local
647 struct nbd_sock *nsock = config->socks[index]; in nbd_send_cmd()
656 int sent = nsock->sent, skip = 0; in nbd_send_cmd()
658 lockdep_assert_held(&cmd->lock); in nbd_send_cmd()
659 lockdep_assert_held(&nsock->tx_lock); in nbd_send_cmd()
668 (config->flags & NBD_FLAG_READ_ONLY)) { in nbd_send_cmd()
669 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_send_cmd()
670 "Write on read-only\n"); in nbd_send_cmd()
674 if (req->cmd_flags & REQ_FUA) in nbd_send_cmd()
676 if ((req->cmd_flags & REQ_NOUNMAP) && (type == NBD_CMD_WRITE_ZEROES)) in nbd_send_cmd()
685 skip = sent - sizeof(request); in nbd_send_cmd()
694 cmd->cmd_cookie++; in nbd_send_cmd()
696 cmd->index = index; in nbd_send_cmd()
697 cmd->cookie = nsock->cookie; in nbd_send_cmd()
698 cmd->retries = 0; in nbd_send_cmd()
707 trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd)); in nbd_send_cmd()
726 set_bit(NBD_CMD_REQUEUED, &cmd->flags); in nbd_send_cmd()
729 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_send_cmd()
737 bio = req->bio; in nbd_send_cmd()
739 struct bio *next = bio->bi_next; in nbd_send_cmd()
752 skip -= iov_iter_count(&from); in nbd_send_cmd()
764 dev_err(disk_to_dev(nbd->disk), in nbd_send_cmd()
772 * the iterator do it. This prevents use-after-free in nbd_send_cmd()
782 nsock->pending = NULL; in nbd_send_cmd()
783 nsock->sent = 0; in nbd_send_cmd()
784 __set_bit(NBD_CMD_INFLIGHT, &cmd->flags); in nbd_send_cmd()
793 if (test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags)) in nbd_send_cmd()
797 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_send_cmd()
805 static void nbd_pending_cmd_work(struct work_struct *work) in nbd_pending_cmd_work()
808 struct request *req = nsock->pending; in nbd_pending_cmd_work()
810 struct nbd_device *nbd = cmd->nbd; in nbd_pending_cmd_work()
811 unsigned long deadline = READ_ONCE(req->deadline); in nbd_pending_cmd_work()
814 mutex_lock(&cmd->lock); in nbd_pending_cmd_work()
816 WARN_ON_ONCE(test_bit(NBD_CMD_REQUEUED, &cmd->flags)); in nbd_pending_cmd_work()
817 if (WARN_ON_ONCE(!test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags))) in nbd_pending_cmd_work()
820 mutex_lock(&nsock->tx_lock); in nbd_pending_cmd_work()
822 nbd_send_cmd(nbd, cmd, cmd->index); in nbd_pending_cmd_work()
823 if (!nsock->pending) in nbd_pending_cmd_work()
828 cmd->status = BLK_STS_IOERR; in nbd_pending_cmd_work()
835 mutex_unlock(&nsock->tx_lock); in nbd_pending_cmd_work()
836 clear_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags); in nbd_pending_cmd_work()
838 mutex_unlock(&cmd->lock); in nbd_pending_cmd_work()
842 static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock, in nbd_read_reply()
849 reply->magic = 0; in nbd_read_reply()
853 if (!nbd_disconnected(nbd->config)) in nbd_read_reply()
854 dev_err(disk_to_dev(nbd->disk), in nbd_read_reply()
859 if (ntohl(reply->magic) != NBD_REPLY_MAGIC) { in nbd_read_reply()
860 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n", in nbd_read_reply()
861 (unsigned long)ntohl(reply->magic)); in nbd_read_reply()
862 return -EPROTO; in nbd_read_reply()
869 static struct nbd_cmd *nbd_handle_reply(struct nbd_device *nbd, int index, in nbd_handle_reply()
880 handle = be64_to_cpu(reply->cookie); in nbd_handle_reply()
883 if (hwq < nbd->tag_set.nr_hw_queues) in nbd_handle_reply()
884 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], in nbd_handle_reply()
887 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n", in nbd_handle_reply()
889 return ERR_PTR(-ENOENT); in nbd_handle_reply()
894 mutex_lock(&cmd->lock); in nbd_handle_reply()
895 if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) { in nbd_handle_reply()
896 dev_err(disk_to_dev(nbd->disk), "Suspicious reply %d (status %u flags %lu)", in nbd_handle_reply()
897 tag, cmd->status, cmd->flags); in nbd_handle_reply()
898 ret = -ENOENT; in nbd_handle_reply()
901 if (cmd->index != index) { in nbd_handle_reply()
902 dev_err(disk_to_dev(nbd->disk), "Unexpected reply %d from different sock %d (expected %d)", in nbd_handle_reply()
903 tag, index, cmd->index); in nbd_handle_reply()
904 ret = -ENOENT; in nbd_handle_reply()
907 if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) { in nbd_handle_reply()
908 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n", in nbd_handle_reply()
909 req, cmd->cmd_cookie, nbd_handle_to_cookie(handle)); in nbd_handle_reply()
910 ret = -ENOENT; in nbd_handle_reply()
913 if (cmd->status != BLK_STS_OK) { in nbd_handle_reply()
914 dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n", in nbd_handle_reply()
916 ret = -ENOENT; in nbd_handle_reply()
919 if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) { in nbd_handle_reply()
920 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n", in nbd_handle_reply()
922 ret = -ENOENT; in nbd_handle_reply()
925 if (ntohl(reply->error)) { in nbd_handle_reply()
926 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", in nbd_handle_reply()
927 ntohl(reply->error)); in nbd_handle_reply()
928 cmd->status = BLK_STS_IOERR; in nbd_handle_reply()
942 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", in nbd_handle_reply()
950 if (nbd_disconnected(nbd->config)) { in nbd_handle_reply()
951 cmd->status = BLK_STS_IOERR; in nbd_handle_reply()
954 ret = -EIO; in nbd_handle_reply()
963 mutex_unlock(&cmd->lock); in nbd_handle_reply()
967 static void recv_work(struct work_struct *work) in recv_work()
972 struct nbd_device *nbd = args->nbd; in recv_work()
973 struct nbd_config *config = nbd->config; in recv_work() local
974 struct request_queue *q = nbd->disk->queue; in recv_work()
975 struct nbd_sock *nsock = args->nsock; in recv_work()
982 if (nbd_read_reply(nbd, nsock->sock, &reply)) in recv_work()
987 * request use-after-free is possible during nbd_handle_reply(). in recv_work()
991 if (!percpu_ref_tryget(&q->q_usage_counter)) { in recv_work()
992 dev_err(disk_to_dev(nbd->disk), "%s: no io inflight\n", in recv_work()
997 cmd = nbd_handle_reply(nbd, args->index, &reply); in recv_work()
999 percpu_ref_put(&q->q_usage_counter); in recv_work()
1004 if (likely(!blk_should_fake_timeout(rq->q))) { in recv_work()
1007 mutex_lock(&cmd->lock); in recv_work()
1009 &cmd->flags); in recv_work()
1010 mutex_unlock(&cmd->lock); in recv_work()
1014 percpu_ref_put(&q->q_usage_counter); in recv_work()
1017 mutex_lock(&nsock->tx_lock); in recv_work()
1019 mutex_unlock(&nsock->tx_lock); in recv_work()
1022 atomic_dec(&config->recv_threads); in recv_work()
1023 wake_up(&config->recv_wq); in recv_work()
1027 static bool nbd_clear_req(struct request *req, void *data) in nbd_clear_req()
1035 mutex_lock(&cmd->lock); in nbd_clear_req()
1036 if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) { in nbd_clear_req()
1037 mutex_unlock(&cmd->lock); in nbd_clear_req()
1040 cmd->status = BLK_STS_IOERR; in nbd_clear_req()
1041 mutex_unlock(&cmd->lock); in nbd_clear_req()
1047 static void nbd_clear_que(struct nbd_device *nbd) in nbd_clear_que()
1049 blk_mq_quiesce_queue(nbd->disk->queue); in nbd_clear_que()
1050 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL); in nbd_clear_que()
1051 blk_mq_unquiesce_queue(nbd->disk->queue); in nbd_clear_que()
1052 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n"); in nbd_clear_que()
1055 static int find_fallback(struct nbd_device *nbd, int index) in find_fallback()
1057 struct nbd_config *config = nbd->config; in find_fallback() local
1058 int new_index = -1; in find_fallback()
1059 struct nbd_sock *nsock = config->socks[index]; in find_fallback()
1060 int fallback = nsock->fallback_index; in find_fallback()
1062 if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags)) in find_fallback()
1065 if (config->num_connections <= 1) { in find_fallback()
1066 dev_err_ratelimited(disk_to_dev(nbd->disk), in find_fallback()
1071 if (fallback >= 0 && fallback < config->num_connections && in find_fallback()
1072 !config->socks[fallback]->dead) in find_fallback()
1075 if (nsock->fallback_index < 0 || in find_fallback()
1076 nsock->fallback_index >= config->num_connections || in find_fallback()
1077 config->socks[nsock->fallback_index]->dead) { in find_fallback()
1079 for (i = 0; i < config->num_connections; i++) { in find_fallback()
1082 if (!config->socks[i]->dead) { in find_fallback()
1087 nsock->fallback_index = new_index; in find_fallback()
1089 dev_err_ratelimited(disk_to_dev(nbd->disk), in find_fallback()
1094 new_index = nsock->fallback_index; in find_fallback()
1098 static int wait_for_reconnect(struct nbd_device *nbd) in wait_for_reconnect()
1100 struct nbd_config *config = nbd->config; in wait_for_reconnect() local
1101 if (!config->dead_conn_timeout) in wait_for_reconnect()
1104 if (!wait_event_timeout(config->conn_wait, in wait_for_reconnect()
1106 &config->runtime_flags) || in wait_for_reconnect()
1107 atomic_read(&config->live_connections) > 0, in wait_for_reconnect()
1108 config->dead_conn_timeout)) in wait_for_reconnect()
1111 return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags); in wait_for_reconnect()
1114 static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index) in nbd_handle_cmd()
1117 struct nbd_device *nbd = cmd->nbd; in nbd_handle_cmd()
1118 struct nbd_config *config; in nbd_handle_cmd() local
1122 lockdep_assert_held(&cmd->lock); in nbd_handle_cmd()
1124 config = nbd_get_config_unlocked(nbd); in nbd_handle_cmd()
1125 if (!config) { in nbd_handle_cmd()
1126 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_handle_cmd()
1131 if (index >= config->num_connections) { in nbd_handle_cmd()
1132 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_handle_cmd()
1137 cmd->status = BLK_STS_OK; in nbd_handle_cmd()
1139 nsock = config->socks[index]; in nbd_handle_cmd()
1140 mutex_lock(&nsock->tx_lock); in nbd_handle_cmd()
1141 if (nsock->dead) { in nbd_handle_cmd()
1144 mutex_unlock(&nsock->tx_lock); in nbd_handle_cmd()
1169 if (unlikely(nsock->pending && nsock->pending != req)) { in nbd_handle_cmd()
1176 mutex_unlock(&nsock->tx_lock); in nbd_handle_cmd()
1181 static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, in nbd_queue_rq()
1184 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); in nbd_queue_rq()
1196 mutex_lock(&cmd->lock); in nbd_queue_rq()
1197 clear_bit(NBD_CMD_REQUEUED, &cmd->flags); in nbd_queue_rq()
1204 ret = nbd_handle_cmd(cmd, hctx->queue_num); in nbd_queue_rq()
1205 mutex_unlock(&cmd->lock); in nbd_queue_rq()
1210 static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd, in nbd_get_socket()
1220 if (sock->ops->shutdown == sock_no_shutdown) { in nbd_get_socket()
1221 dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n"); in nbd_get_socket()
1222 *err = -EINVAL; in nbd_get_socket()
1230 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, in nbd_add_socket()
1233 struct nbd_config *config = nbd->config; in nbd_add_socket() local
1242 return -EINVAL; in nbd_add_socket()
1249 * reallocating the ->socks array. in nbd_add_socket()
1251 memflags = blk_mq_freeze_queue(nbd->disk->queue); in nbd_add_socket()
1253 if (!netlink && !nbd->task_setup && in nbd_add_socket()
1254 !test_bit(NBD_RT_BOUND, &config->runtime_flags)) in nbd_add_socket()
1255 nbd->task_setup = current; in nbd_add_socket()
1258 (nbd->task_setup != current || in nbd_add_socket()
1259 test_bit(NBD_RT_BOUND, &config->runtime_flags))) { in nbd_add_socket()
1260 dev_err(disk_to_dev(nbd->disk), in nbd_add_socket()
1262 err = -EBUSY; in nbd_add_socket()
1268 err = -ENOMEM; in nbd_add_socket()
1272 socks = krealloc(config->socks, (config->num_connections + 1) * in nbd_add_socket()
1276 err = -ENOMEM; in nbd_add_socket()
1280 config->socks = socks; in nbd_add_socket()
1282 nsock->fallback_index = -1; in nbd_add_socket()
1283 nsock->dead = false; in nbd_add_socket()
1284 mutex_init(&nsock->tx_lock); in nbd_add_socket()
1285 nsock->sock = sock; in nbd_add_socket()
1286 nsock->pending = NULL; in nbd_add_socket()
1287 nsock->sent = 0; in nbd_add_socket()
1288 nsock->cookie = 0; in nbd_add_socket()
1289 INIT_WORK(&nsock->work, nbd_pending_cmd_work); in nbd_add_socket()
1290 socks[config->num_connections++] = nsock; in nbd_add_socket()
1291 atomic_inc(&config->live_connections); in nbd_add_socket()
1292 blk_mq_unfreeze_queue(nbd->disk->queue, memflags); in nbd_add_socket()
1297 blk_mq_unfreeze_queue(nbd->disk->queue, memflags); in nbd_add_socket()
1302 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) in nbd_reconnect_socket()
1304 struct nbd_config *config = nbd->config; in nbd_reconnect_socket() local
1317 return -ENOMEM; in nbd_reconnect_socket()
1320 for (i = 0; i < config->num_connections; i++) { in nbd_reconnect_socket()
1321 struct nbd_sock *nsock = config->socks[i]; in nbd_reconnect_socket()
1323 if (!nsock->dead) in nbd_reconnect_socket()
1326 mutex_lock(&nsock->tx_lock); in nbd_reconnect_socket()
1327 if (!nsock->dead) { in nbd_reconnect_socket()
1328 mutex_unlock(&nsock->tx_lock); in nbd_reconnect_socket()
1331 sk_set_memalloc(sock->sk); in nbd_reconnect_socket()
1332 if (nbd->tag_set.timeout) in nbd_reconnect_socket()
1333 sock->sk->sk_sndtimeo = nbd->tag_set.timeout; in nbd_reconnect_socket()
1334 atomic_inc(&config->recv_threads); in nbd_reconnect_socket()
1335 refcount_inc(&nbd->config_refs); in nbd_reconnect_socket()
1336 old = nsock->sock; in nbd_reconnect_socket()
1337 nsock->fallback_index = -1; in nbd_reconnect_socket()
1338 nsock->sock = sock; in nbd_reconnect_socket()
1339 nsock->dead = false; in nbd_reconnect_socket()
1340 INIT_WORK(&args->work, recv_work); in nbd_reconnect_socket()
1341 args->index = i; in nbd_reconnect_socket()
1342 args->nbd = nbd; in nbd_reconnect_socket()
1343 args->nsock = nsock; in nbd_reconnect_socket()
1344 nsock->cookie++; in nbd_reconnect_socket()
1345 mutex_unlock(&nsock->tx_lock); in nbd_reconnect_socket()
1348 clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags); in nbd_reconnect_socket()
1353 queue_work(nbd->recv_workq, &args->work); in nbd_reconnect_socket()
1355 atomic_inc(&config->live_connections); in nbd_reconnect_socket()
1356 wake_up(&config->conn_wait); in nbd_reconnect_socket()
1361 return -ENOSPC; in nbd_reconnect_socket()
1364 static void nbd_bdev_reset(struct nbd_device *nbd) in nbd_bdev_reset()
1366 if (disk_openers(nbd->disk) > 1) in nbd_bdev_reset()
1368 set_capacity(nbd->disk, 0); in nbd_bdev_reset()
1371 static void nbd_parse_flags(struct nbd_device *nbd) in nbd_parse_flags()
1373 if (nbd->config->flags & NBD_FLAG_READ_ONLY) in nbd_parse_flags()
1374 set_disk_ro(nbd->disk, true); in nbd_parse_flags()
1376 set_disk_ro(nbd->disk, false); in nbd_parse_flags()
1379 static void send_disconnects(struct nbd_device *nbd) in send_disconnects()
1381 struct nbd_config *config = nbd->config; in send_disconnects() local
1390 for (i = 0; i < config->num_connections; i++) { in send_disconnects()
1391 struct nbd_sock *nsock = config->socks[i]; in send_disconnects()
1394 mutex_lock(&nsock->tx_lock); in send_disconnects()
1397 dev_err(disk_to_dev(nbd->disk), in send_disconnects()
1399 mutex_unlock(&nsock->tx_lock); in send_disconnects()
1403 static int nbd_disconnect(struct nbd_device *nbd) in nbd_disconnect()
1405 struct nbd_config *config = nbd->config; in nbd_disconnect() local
1407 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); in nbd_disconnect()
1408 set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags); in nbd_disconnect()
1409 set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags); in nbd_disconnect()
1414 static void nbd_clear_sock(struct nbd_device *nbd) in nbd_clear_sock()
1418 nbd->task_setup = NULL; in nbd_clear_sock()
1421 static void nbd_config_put(struct nbd_device *nbd) in nbd_config_put()
1423 if (refcount_dec_and_mutex_lock(&nbd->config_refs, in nbd_config_put()
1424 &nbd->config_lock)) { in nbd_config_put()
1425 struct nbd_config *config = nbd->config; in nbd_config_put() local
1427 invalidate_disk(nbd->disk); in nbd_config_put()
1428 if (nbd->config->bytesize) in nbd_config_put()
1429 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); in nbd_config_put()
1431 &config->runtime_flags)) in nbd_config_put()
1432 device_remove_file(disk_to_dev(nbd->disk), &pid_attr); in nbd_config_put()
1433 nbd->pid = 0; in nbd_config_put()
1435 &config->runtime_flags)) { in nbd_config_put()
1436 device_remove_file(disk_to_dev(nbd->disk), &backend_attr); in nbd_config_put()
1437 kfree(nbd->backend); in nbd_config_put()
1438 nbd->backend = NULL; in nbd_config_put()
1441 if (config->num_connections) { in nbd_config_put()
1443 for (i = 0; i < config->num_connections; i++) { in nbd_config_put()
1444 sockfd_put(config->socks[i]->sock); in nbd_config_put()
1445 kfree(config->socks[i]); in nbd_config_put()
1447 kfree(config->socks); in nbd_config_put()
1449 kfree(nbd->config); in nbd_config_put()
1450 nbd->config = NULL; in nbd_config_put()
1452 nbd->tag_set.timeout = 0; in nbd_config_put()
1454 mutex_unlock(&nbd->config_lock); in nbd_config_put()
1460 static int nbd_start_device(struct nbd_device *nbd) in nbd_start_device()
1462 struct nbd_config *config = nbd->config; in nbd_start_device() local
1463 int num_connections = config->num_connections; in nbd_start_device()
1466 if (nbd->pid) in nbd_start_device()
1467 return -EBUSY; in nbd_start_device()
1468 if (!config->socks) in nbd_start_device()
1469 return -EINVAL; in nbd_start_device()
1471 !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) { in nbd_start_device()
1472 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n"); in nbd_start_device()
1473 return -EINVAL; in nbd_start_device()
1476 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections); in nbd_start_device()
1477 nbd->pid = task_pid_nr(current); in nbd_start_device()
1481 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr); in nbd_start_device()
1483 dev_err(disk_to_dev(nbd->disk), "device_create_file failed for pid!\n"); in nbd_start_device()
1486 set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags); in nbd_start_device()
1504 flush_workqueue(nbd->recv_workq); in nbd_start_device()
1505 return -ENOMEM; in nbd_start_device()
1507 sk_set_memalloc(config->socks[i]->sock->sk); in nbd_start_device()
1508 if (nbd->tag_set.timeout) in nbd_start_device()
1509 config->socks[i]->sock->sk->sk_sndtimeo = in nbd_start_device()
1510 nbd->tag_set.timeout; in nbd_start_device()
1511 atomic_inc(&config->recv_threads); in nbd_start_device()
1512 refcount_inc(&nbd->config_refs); in nbd_start_device()
1513 INIT_WORK(&args->work, recv_work); in nbd_start_device()
1514 args->nbd = nbd; in nbd_start_device()
1515 args->nsock = config->socks[i]; in nbd_start_device()
1516 args->index = i; in nbd_start_device()
1517 queue_work(nbd->recv_workq, &args->work); in nbd_start_device()
1519 return nbd_set_size(nbd, config->bytesize, nbd_blksize(config)); in nbd_start_device()
1522 static int nbd_start_device_ioctl(struct nbd_device *nbd) in nbd_start_device_ioctl()
1524 struct nbd_config *config = nbd->config; in nbd_start_device_ioctl() local
1532 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state); in nbd_start_device_ioctl()
1533 mutex_unlock(&nbd->config_lock); in nbd_start_device_ioctl()
1534 ret = wait_event_interruptible(config->recv_wq, in nbd_start_device_ioctl()
1535 atomic_read(&config->recv_threads) == 0); in nbd_start_device_ioctl()
1541 flush_workqueue(nbd->recv_workq); in nbd_start_device_ioctl()
1542 mutex_lock(&nbd->config_lock); in nbd_start_device_ioctl()
1545 if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags)) in nbd_start_device_ioctl()
1547 if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags)) in nbd_start_device_ioctl()
1548 ret = -ETIMEDOUT; in nbd_start_device_ioctl()
1552 static void nbd_clear_sock_ioctl(struct nbd_device *nbd) in nbd_clear_sock_ioctl()
1555 disk_force_media_change(nbd->disk); in nbd_clear_sock_ioctl()
1558 &nbd->config->runtime_flags)) in nbd_clear_sock_ioctl()
1562 static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout) in nbd_set_cmd_timeout()
1564 nbd->tag_set.timeout = timeout * HZ; in nbd_set_cmd_timeout()
1566 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ); in nbd_set_cmd_timeout()
1568 blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ); in nbd_set_cmd_timeout()
1572 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, in __nbd_ioctl()
1575 struct nbd_config *config = nbd->config; in __nbd_ioctl() local
1587 return nbd_set_size(nbd, config->bytesize, arg); in __nbd_ioctl()
1589 return nbd_set_size(nbd, arg, nbd_blksize(config)); in __nbd_ioctl()
1591 if (check_shl_overflow(arg, config->blksize_bits, &bytesize)) in __nbd_ioctl()
1592 return -EINVAL; in __nbd_ioctl()
1593 return nbd_set_size(nbd, bytesize, nbd_blksize(config)); in __nbd_ioctl()
1599 config->flags = arg; in __nbd_ioctl()
1616 return -ENOTTY; in __nbd_ioctl()
1619 static int nbd_ioctl(struct block_device *bdev, blk_mode_t mode, in nbd_ioctl()
1622 struct nbd_device *nbd = bdev->bd_disk->private_data; in nbd_ioctl()
1623 struct nbd_config *config = nbd->config; in nbd_ioctl() local
1624 int error = -EINVAL; in nbd_ioctl()
1627 return -EPERM; in nbd_ioctl()
1629 /* The block layer will pass back some non-nbd ioctls in case we have in nbd_ioctl()
1633 return -EINVAL; in nbd_ioctl()
1635 mutex_lock(&nbd->config_lock); in nbd_ioctl()
1640 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) || in nbd_ioctl()
1645 mutex_unlock(&nbd->config_lock); in nbd_ioctl()
1649 static int nbd_alloc_and_init_config(struct nbd_device *nbd) in nbd_alloc_and_init_config()
1651 struct nbd_config *config; in nbd_alloc_and_init_config() local
1653 if (WARN_ON(nbd->config)) in nbd_alloc_and_init_config()
1654 return -EINVAL; in nbd_alloc_and_init_config()
1657 return -ENODEV; in nbd_alloc_and_init_config()
1659 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS); in nbd_alloc_and_init_config()
1660 if (!config) { in nbd_alloc_and_init_config()
1662 return -ENOMEM; in nbd_alloc_and_init_config()
1665 atomic_set(&config->recv_threads, 0); in nbd_alloc_and_init_config()
1666 init_waitqueue_head(&config->recv_wq); in nbd_alloc_and_init_config()
1667 init_waitqueue_head(&config->conn_wait); in nbd_alloc_and_init_config()
1668 config->blksize_bits = NBD_DEF_BLKSIZE_BITS; in nbd_alloc_and_init_config()
1669 atomic_set(&config->live_connections, 0); in nbd_alloc_and_init_config()
1671 nbd->config = config; in nbd_alloc_and_init_config()
1673 * Order refcount_set(&nbd->config_refs, 1) and nbd->config assignment, in nbd_alloc_and_init_config()
1675 * So nbd_get_config_unlocked() won't see nbd->config as null after in nbd_alloc_and_init_config()
1679 refcount_set(&nbd->config_refs, 1); in nbd_alloc_and_init_config()
1684 static int nbd_open(struct gendisk *disk, blk_mode_t mode) in nbd_open()
1687 struct nbd_config *config; in nbd_open() local
1691 nbd = disk->private_data; in nbd_open()
1693 ret = -ENXIO; in nbd_open()
1696 if (!refcount_inc_not_zero(&nbd->refs)) { in nbd_open()
1697 ret = -ENXIO; in nbd_open()
1701 config = nbd_get_config_unlocked(nbd); in nbd_open()
1702 if (!config) { in nbd_open()
1703 mutex_lock(&nbd->config_lock); in nbd_open()
1704 if (refcount_inc_not_zero(&nbd->config_refs)) { in nbd_open()
1705 mutex_unlock(&nbd->config_lock); in nbd_open()
1710 mutex_unlock(&nbd->config_lock); in nbd_open()
1714 refcount_inc(&nbd->refs); in nbd_open()
1715 mutex_unlock(&nbd->config_lock); in nbd_open()
1717 set_bit(GD_NEED_PART_SCAN, &disk->state); in nbd_open()
1718 } else if (nbd_disconnected(config)) { in nbd_open()
1720 set_bit(GD_NEED_PART_SCAN, &disk->state); in nbd_open()
1727 static void nbd_release(struct gendisk *disk) in nbd_release()
1729 struct nbd_device *nbd = disk->private_data; in nbd_release()
1731 if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) && in nbd_release()
1739 static void nbd_free_disk(struct gendisk *disk) in nbd_free_disk()
1741 struct nbd_device *nbd = disk->private_data; in nbd_free_disk()
1746 static const struct block_device_operations nbd_fops =
1758 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused) in nbd_dbg_tasks_show()
1760 struct nbd_device *nbd = s->private; in nbd_dbg_tasks_show()
1762 if (nbd->pid) in nbd_dbg_tasks_show()
1763 seq_printf(s, "recv: %d\n", nbd->pid); in nbd_dbg_tasks_show()
1770 static int nbd_dbg_flags_show(struct seq_file *s, void *unused) in nbd_dbg_flags_show()
1772 struct nbd_device *nbd = s->private; in nbd_dbg_flags_show()
1773 u32 flags = nbd->config->flags; in nbd_dbg_flags_show()
1799 static int nbd_dev_dbg_init(struct nbd_device *nbd) in nbd_dev_dbg_init()
1802 struct nbd_config *config = nbd->config; in nbd_dev_dbg_init() local
1805 return -EIO; in nbd_dev_dbg_init()
1811 return -EIO; in nbd_dev_dbg_init()
1813 config->dbg_dir = dir; in nbd_dev_dbg_init()
1816 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize); in nbd_dev_dbg_init()
1817 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout); in nbd_dev_dbg_init()
1818 debugfs_create_u32("blocksize_bits", 0444, dir, &config->blksize_bits); in nbd_dev_dbg_init()
1824 static void nbd_dev_dbg_close(struct nbd_device *nbd) in nbd_dev_dbg_close()
1826 debugfs_remove_recursive(nbd->config->dbg_dir); in nbd_dev_dbg_close()
1829 static int nbd_dbg_init(void) in nbd_dbg_init()
1835 return -EIO; in nbd_dbg_init()
1842 static void nbd_dbg_close(void) in nbd_dbg_close()
1849 static int nbd_dev_dbg_init(struct nbd_device *nbd) in nbd_dev_dbg_init()
1854 static void nbd_dev_dbg_close(struct nbd_device *nbd) in nbd_dev_dbg_close()
1858 static int nbd_dbg_init(void) in nbd_dbg_init()
1863 static void nbd_dbg_close(void) in nbd_dbg_close()
1869 static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq, in nbd_init_request()
1873 cmd->nbd = set->driver_data; in nbd_init_request()
1874 cmd->flags = 0; in nbd_init_request()
1875 mutex_init(&cmd->lock); in nbd_init_request()
1879 static const struct blk_mq_ops nbd_mq_ops = {
1886 static struct nbd_device *nbd_dev_add(int index, unsigned int refs) in nbd_dev_add()
1896 int err = -ENOMEM; in nbd_dev_add()
1902 nbd->tag_set.ops = &nbd_mq_ops; in nbd_dev_add()
1903 nbd->tag_set.nr_hw_queues = 1; in nbd_dev_add()
1904 nbd->tag_set.queue_depth = 128; in nbd_dev_add()
1905 nbd->tag_set.numa_node = NUMA_NO_NODE; in nbd_dev_add()
1906 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd); in nbd_dev_add()
1907 nbd->tag_set.flags = BLK_MQ_F_BLOCKING; in nbd_dev_add()
1908 nbd->tag_set.driver_data = nbd; in nbd_dev_add()
1909 INIT_WORK(&nbd->remove_work, nbd_dev_remove_work); in nbd_dev_add()
1910 nbd->backend = NULL; in nbd_dev_add()
1912 err = blk_mq_alloc_tag_set(&nbd->tag_set); in nbd_dev_add()
1920 if (err == -ENOSPC) in nbd_dev_add()
1921 err = -EEXIST; in nbd_dev_add()
1928 nbd->index = index; in nbd_dev_add()
1933 disk = blk_mq_alloc_disk(&nbd->tag_set, &lim, NULL); in nbd_dev_add()
1938 nbd->disk = disk; in nbd_dev_add()
1940 nbd->recv_workq = alloc_workqueue("nbd%d-recv", in nbd_dev_add()
1942 WQ_UNBOUND, 0, nbd->index); in nbd_dev_add()
1943 if (!nbd->recv_workq) { in nbd_dev_add()
1944 dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n"); in nbd_dev_add()
1945 err = -ENOMEM; in nbd_dev_add()
1949 mutex_init(&nbd->config_lock); in nbd_dev_add()
1950 refcount_set(&nbd->config_refs, 0); in nbd_dev_add()
1955 refcount_set(&nbd->refs, 0); in nbd_dev_add()
1956 INIT_LIST_HEAD(&nbd->list); in nbd_dev_add()
1957 disk->major = NBD_MAJOR; in nbd_dev_add()
1958 disk->first_minor = index << part_shift; in nbd_dev_add()
1959 disk->minors = 1 << part_shift; in nbd_dev_add()
1960 disk->fops = &nbd_fops; in nbd_dev_add()
1961 disk->private_data = nbd; in nbd_dev_add()
1962 sprintf(disk->disk_name, "nbd%d", index); in nbd_dev_add()
1970 refcount_set(&nbd->refs, refs); in nbd_dev_add()
1975 destroy_workqueue(nbd->recv_workq); in nbd_dev_add()
1983 blk_mq_free_tag_set(&nbd->tag_set); in nbd_dev_add()
1990 static struct nbd_device *nbd_find_get_unused(void) in nbd_find_get_unused()
1998 if (refcount_read(&nbd->config_refs) || in nbd_find_get_unused()
1999 test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags)) in nbd_find_get_unused()
2001 if (refcount_inc_not_zero(&nbd->refs)) in nbd_find_get_unused()
2009 static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
2022 static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
2029 static const struct nla_policy __attribute__((unused))
2035 static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd) in nbd_genl_size_set()
2037 struct nbd_config *config = nbd->config; in nbd_genl_size_set() local
2038 u64 bsize = nbd_blksize(config); in nbd_genl_size_set()
2039 u64 bytes = config->bytesize; in nbd_genl_size_set()
2041 if (info->attrs[NBD_ATTR_SIZE_BYTES]) in nbd_genl_size_set()
2042 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]); in nbd_genl_size_set()
2044 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) in nbd_genl_size_set()
2045 bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]); in nbd_genl_size_set()
2047 if (bytes != config->bytesize || bsize != nbd_blksize(config)) in nbd_genl_size_set()
2052 static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) in nbd_genl_connect()
2055 struct nbd_config *config; in nbd_genl_connect() local
2056 int index = -1; in nbd_genl_connect()
2061 return -EPERM; in nbd_genl_connect()
2063 if (info->attrs[NBD_ATTR_INDEX]) { in nbd_genl_connect()
2064 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); in nbd_genl_connect()
2073 return -EINVAL; in nbd_genl_connect()
2078 return -EINVAL; in nbd_genl_connect()
2082 return -EINVAL; in nbd_genl_connect()
2086 if (index == -1) { in nbd_genl_connect()
2091 if ((test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && in nbd_genl_connect()
2092 test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) || in nbd_genl_connect()
2093 !refcount_inc_not_zero(&nbd->refs)) { in nbd_genl_connect()
2097 return -EINVAL; in nbd_genl_connect()
2111 mutex_lock(&nbd->config_lock); in nbd_genl_connect()
2112 if (refcount_read(&nbd->config_refs)) { in nbd_genl_connect()
2113 mutex_unlock(&nbd->config_lock); in nbd_genl_connect()
2115 if (index == -1) in nbd_genl_connect()
2118 return -EBUSY; in nbd_genl_connect()
2123 mutex_unlock(&nbd->config_lock); in nbd_genl_connect()
2125 pr_err("couldn't allocate config\n"); in nbd_genl_connect()
2129 config = nbd->config; in nbd_genl_connect()
2130 set_bit(NBD_RT_BOUND, &config->runtime_flags); in nbd_genl_connect()
2135 if (info->attrs[NBD_ATTR_TIMEOUT]) in nbd_genl_connect()
2137 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT])); in nbd_genl_connect()
2138 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) { in nbd_genl_connect()
2139 config->dead_conn_timeout = in nbd_genl_connect()
2140 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]); in nbd_genl_connect()
2141 config->dead_conn_timeout *= HZ; in nbd_genl_connect()
2143 if (info->attrs[NBD_ATTR_SERVER_FLAGS]) in nbd_genl_connect()
2144 config->flags = in nbd_genl_connect()
2145 nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]); in nbd_genl_connect()
2146 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) { in nbd_genl_connect()
2147 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]); in nbd_genl_connect()
2152 * inherited by the config. If we already have in nbd_genl_connect()
2158 &nbd->flags)) in nbd_genl_connect()
2162 &nbd->flags)) in nbd_genl_connect()
2163 refcount_inc(&nbd->refs); in nbd_genl_connect()
2167 &config->runtime_flags); in nbd_genl_connect()
2171 if (info->attrs[NBD_ATTR_SOCKETS]) { in nbd_genl_connect()
2175 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS], in nbd_genl_connect()
2181 ret = -EINVAL; in nbd_genl_connect()
2187 info->extack); in nbd_genl_connect()
2190 ret = -EINVAL; in nbd_genl_connect()
2204 if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) { in nbd_genl_connect()
2205 nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER], in nbd_genl_connect()
2207 if (!nbd->backend) { in nbd_genl_connect()
2208 ret = -ENOMEM; in nbd_genl_connect()
2212 ret = device_create_file(disk_to_dev(nbd->disk), &backend_attr); in nbd_genl_connect()
2214 dev_err(disk_to_dev(nbd->disk), in nbd_genl_connect()
2218 set_bit(NBD_RT_HAS_BACKEND_FILE, &config->runtime_flags); in nbd_genl_connect()
2220 mutex_unlock(&nbd->config_lock); in nbd_genl_connect()
2222 set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags); in nbd_genl_connect()
2223 refcount_inc(&nbd->config_refs); in nbd_genl_connect()
2224 nbd_connect_reply(info, nbd->index); in nbd_genl_connect()
2232 static void nbd_disconnect_and_put(struct nbd_device *nbd) in nbd_disconnect_and_put()
2234 mutex_lock(&nbd->config_lock); in nbd_disconnect_and_put()
2237 wake_up(&nbd->config->conn_wait); in nbd_disconnect_and_put()
2242 flush_workqueue(nbd->recv_workq); in nbd_disconnect_and_put()
2244 nbd->task_setup = NULL; in nbd_disconnect_and_put()
2245 clear_bit(NBD_RT_BOUND, &nbd->config->runtime_flags); in nbd_disconnect_and_put()
2246 mutex_unlock(&nbd->config_lock); in nbd_disconnect_and_put()
2249 &nbd->config->runtime_flags)) in nbd_disconnect_and_put()
2253 static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info) in nbd_genl_disconnect()
2259 return -EPERM; in nbd_genl_disconnect()
2263 return -EINVAL; in nbd_genl_disconnect()
2265 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); in nbd_genl_disconnect()
2271 return -EINVAL; in nbd_genl_disconnect()
2273 if (!refcount_inc_not_zero(&nbd->refs)) { in nbd_genl_disconnect()
2276 return -EINVAL; in nbd_genl_disconnect()
2279 if (!refcount_inc_not_zero(&nbd->config_refs)) in nbd_genl_disconnect()
2288 static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info) in nbd_genl_reconfigure()
2291 struct nbd_config *config; in nbd_genl_reconfigure() local
2297 return -EPERM; in nbd_genl_reconfigure()
2301 return -EINVAL; in nbd_genl_reconfigure()
2303 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); in nbd_genl_reconfigure()
2309 return -EINVAL; in nbd_genl_reconfigure()
2311 if (nbd->backend) { in nbd_genl_reconfigure()
2312 if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) { in nbd_genl_reconfigure()
2313 if (nla_strcmp(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER], in nbd_genl_reconfigure()
2314 nbd->backend)) { in nbd_genl_reconfigure()
2318 nbd->backend); in nbd_genl_reconfigure()
2319 return -EINVAL; in nbd_genl_reconfigure()
2324 return -EINVAL; in nbd_genl_reconfigure()
2327 if (!refcount_inc_not_zero(&nbd->refs)) { in nbd_genl_reconfigure()
2330 return -EINVAL; in nbd_genl_reconfigure()
2334 config = nbd_get_config_unlocked(nbd); in nbd_genl_reconfigure()
2335 if (!config) { in nbd_genl_reconfigure()
2339 return -EINVAL; in nbd_genl_reconfigure()
2342 mutex_lock(&nbd->config_lock); in nbd_genl_reconfigure()
2343 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) || in nbd_genl_reconfigure()
2344 !nbd->pid) { in nbd_genl_reconfigure()
2347 ret = -EINVAL; in nbd_genl_reconfigure()
2355 if (info->attrs[NBD_ATTR_TIMEOUT]) in nbd_genl_reconfigure()
2357 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT])); in nbd_genl_reconfigure()
2358 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) { in nbd_genl_reconfigure()
2359 config->dead_conn_timeout = in nbd_genl_reconfigure()
2360 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]); in nbd_genl_reconfigure()
2361 config->dead_conn_timeout *= HZ; in nbd_genl_reconfigure()
2363 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) { in nbd_genl_reconfigure()
2364 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]); in nbd_genl_reconfigure()
2367 &nbd->flags)) in nbd_genl_reconfigure()
2371 &nbd->flags)) in nbd_genl_reconfigure()
2372 refcount_inc(&nbd->refs); in nbd_genl_reconfigure()
2377 &config->runtime_flags); in nbd_genl_reconfigure()
2380 &config->runtime_flags); in nbd_genl_reconfigure()
2384 if (info->attrs[NBD_ATTR_SOCKETS]) { in nbd_genl_reconfigure()
2388 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS], in nbd_genl_reconfigure()
2394 ret = -EINVAL; in nbd_genl_reconfigure()
2400 info->extack); in nbd_genl_reconfigure()
2403 ret = -EINVAL; in nbd_genl_reconfigure()
2411 if (ret == -ENOSPC) in nbd_genl_reconfigure()
2419 mutex_unlock(&nbd->config_lock); in nbd_genl_reconfigure()
2427 static const struct genl_small_ops nbd_connect_genl_ops[] = {
2450 static const struct genl_multicast_group nbd_mcast_grps[] = {
2454 static struct genl_family nbd_genl_family __ro_after_init = {
2470 static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply) in populate_nbd_status()
2478 * take a ref in the index == -1 case as we would need in populate_nbd_status()
2483 if (refcount_read(&nbd->config_refs)) in populate_nbd_status()
2487 return -EMSGSIZE; in populate_nbd_status()
2488 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index); in populate_nbd_status()
2490 return -EMSGSIZE; in populate_nbd_status()
2494 return -EMSGSIZE; in populate_nbd_status()
2499 static int status_cb(int id, void *ptr, void *data) in status_cb()
2505 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info) in nbd_genl_status()
2511 int index = -1; in nbd_genl_status()
2512 int ret = -ENOMEM; in nbd_genl_status()
2514 if (info->attrs[NBD_ATTR_INDEX]) in nbd_genl_status()
2515 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); in nbd_genl_status()
2521 msg_size *= (index == -1) ? nbd_total_devices : 1; in nbd_genl_status()
2536 ret = -EMSGSIZE; in nbd_genl_status()
2540 if (index == -1) { in nbd_genl_status()
2565 static void nbd_connect_reply(struct genl_info *info, int index) in nbd_connect_reply()
2589 static void nbd_mcast_index(int index) in nbd_mcast_index()
2613 static void nbd_dead_link_work(struct work_struct *work) in nbd_dead_link_work()
2617 nbd_mcast_index(args->index); in nbd_dead_link_work()
2621 static int __init nbd_init(void) in nbd_init()
2629 return -EINVAL; in nbd_init()
2641 * Note that -1 is required because partition 0 is reserved in nbd_init()
2644 max_part = (1UL << part_shift) - 1; in nbd_init()
2648 return -EINVAL; in nbd_init()
2650 if (nbds_max > 1UL << (MINORBITS - part_shift)) in nbd_init()
2651 return -EINVAL; in nbd_init()
2654 return -EIO; in nbd_init()
2656 nbd_del_wq = alloc_workqueue("nbd-del", WQ_UNBOUND, 0); in nbd_init()
2659 return -ENOMEM; in nbd_init()
2665 return -EINVAL; in nbd_init()
2674 static int nbd_exit_cb(int id, void *ptr, void *data) in nbd_exit_cb()
2680 if (refcount_read(&nbd->refs)) in nbd_exit_cb()
2681 list_add_tail(&nbd->list, list); in nbd_exit_cb()
2686 static void __exit nbd_cleanup(void) in nbd_cleanup()
2705 list_del_init(&nbd->list); in nbd_cleanup()
2706 if (refcount_read(&nbd->config_refs)) in nbd_cleanup()
2708 refcount_read(&nbd->config_refs)); in nbd_cleanup()
2709 if (refcount_read(&nbd->refs) != 1) in nbd_cleanup()