1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Network block device - make block devices work over TCP
4 *
5 * Note that you can not swap over this thing, yet. Seems to work but
6 * deadlocks sometimes - you can not swap over TCP in general.
7 *
8 * Copyright 1997-2000, 2008 Pavel Machek <[email protected]>
9 * Parts copyright 2001 Steven Whitehouse <[email protected]>
10 *
11 * (part of code stolen from loop.c)
12 */
13
14 #define pr_fmt(fmt) "nbd: " fmt
15
16 #include <linux/major.h>
17
18 #include <linux/blkdev.h>
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/sched.h>
22 #include <linux/sched/mm.h>
23 #include <linux/fs.h>
24 #include <linux/bio.h>
25 #include <linux/stat.h>
26 #include <linux/errno.h>
27 #include <linux/file.h>
28 #include <linux/ioctl.h>
29 #include <linux/mutex.h>
30 #include <linux/compiler.h>
31 #include <linux/completion.h>
32 #include <linux/err.h>
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <net/sock.h>
36 #include <linux/net.h>
37 #include <linux/kthread.h>
38 #include <linux/types.h>
39 #include <linux/debugfs.h>
40 #include <linux/blk-mq.h>
41
42 #include <linux/uaccess.h>
43 #include <asm/types.h>
44
45 #include <linux/nbd.h>
46 #include <linux/nbd-netlink.h>
47 #include <net/genetlink.h>
48
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/nbd.h>
51
52 static DEFINE_IDR(nbd_index_idr);
53 static DEFINE_MUTEX(nbd_index_mutex);
54 static struct workqueue_struct *nbd_del_wq;
55 static int nbd_total_devices = 0;
56
57 struct nbd_sock {
58 struct socket *sock;
59 struct mutex tx_lock;
60 struct request *pending;
61 int sent;
62 bool dead;
63 int fallback_index;
64 int cookie;
65 struct work_struct work;
66 };
67
68 struct recv_thread_args {
69 struct work_struct work;
70 struct nbd_device *nbd;
71 struct nbd_sock *nsock;
72 int index;
73 };
74
75 struct link_dead_args {
76 struct work_struct work;
77 int index;
78 };
79
80 #define NBD_RT_TIMEDOUT 0
81 #define NBD_RT_DISCONNECT_REQUESTED 1
82 #define NBD_RT_DISCONNECTED 2
83 #define NBD_RT_HAS_PID_FILE 3
84 #define NBD_RT_HAS_CONFIG_REF 4
85 #define NBD_RT_BOUND 5
86 #define NBD_RT_DISCONNECT_ON_CLOSE 6
87 #define NBD_RT_HAS_BACKEND_FILE 7
88
89 #define NBD_DESTROY_ON_DISCONNECT 0
90 #define NBD_DISCONNECT_REQUESTED 1
91
92 struct nbd_config {
93 u32 flags;
94 unsigned long runtime_flags;
95 u64 dead_conn_timeout;
96
97 struct nbd_sock **socks;
98 int num_connections;
99 atomic_t live_connections;
100 wait_queue_head_t conn_wait;
101
102 atomic_t recv_threads;
103 wait_queue_head_t recv_wq;
104 unsigned int blksize_bits;
105 loff_t bytesize;
106 #if IS_ENABLED(CONFIG_DEBUG_FS)
107 struct dentry *dbg_dir;
108 #endif
109 };
110
nbd_blksize(struct nbd_config * config)111 static inline unsigned int nbd_blksize(struct nbd_config *config)
112 {
113 return 1u << config->blksize_bits;
114 }
115
116 struct nbd_device {
117 struct blk_mq_tag_set tag_set;
118
119 int index;
120 refcount_t config_refs;
121 refcount_t refs;
122 struct nbd_config *config;
123 struct mutex config_lock;
124 struct gendisk *disk;
125 struct workqueue_struct *recv_workq;
126 struct work_struct remove_work;
127
128 struct list_head list;
129 struct task_struct *task_setup;
130
131 unsigned long flags;
132 pid_t pid; /* pid of nbd-client, if attached */
133
134 char *backend;
135 };
136
137 #define NBD_CMD_REQUEUED 1
138 /*
139 * This flag will be set if nbd_queue_rq() succeed, and will be checked and
140 * cleared in completion. Both setting and clearing of the flag are protected
141 * by cmd->lock.
142 */
143 #define NBD_CMD_INFLIGHT 2
144
145 /* Just part of request header or data payload is sent successfully */
146 #define NBD_CMD_PARTIAL_SEND 3
147
148 struct nbd_cmd {
149 struct nbd_device *nbd;
150 struct mutex lock;
151 int index;
152 int cookie;
153 int retries;
154 blk_status_t status;
155 unsigned long flags;
156 u32 cmd_cookie;
157 };
158
159 #if IS_ENABLED(CONFIG_DEBUG_FS)
160 static struct dentry *nbd_dbg_dir;
161 #endif
162
163 #define nbd_name(nbd) ((nbd)->disk->disk_name)
164
165 #define NBD_DEF_BLKSIZE_BITS 10
166
167 static unsigned int nbds_max = 16;
168 static int max_part = 16;
169 static int part_shift;
170
171 static int nbd_dev_dbg_init(struct nbd_device *nbd);
172 static void nbd_dev_dbg_close(struct nbd_device *nbd);
173 static void nbd_config_put(struct nbd_device *nbd);
174 static void nbd_connect_reply(struct genl_info *info, int index);
175 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
176 static void nbd_dead_link_work(struct work_struct *work);
177 static void nbd_disconnect_and_put(struct nbd_device *nbd);
178
nbd_to_dev(struct nbd_device * nbd)179 static inline struct device *nbd_to_dev(struct nbd_device *nbd)
180 {
181 return disk_to_dev(nbd->disk);
182 }
183
nbd_requeue_cmd(struct nbd_cmd * cmd)184 static void nbd_requeue_cmd(struct nbd_cmd *cmd)
185 {
186 struct request *req = blk_mq_rq_from_pdu(cmd);
187
188 lockdep_assert_held(&cmd->lock);
189
190 /*
191 * Clear INFLIGHT flag so that this cmd won't be completed in
192 * normal completion path
193 *
194 * INFLIGHT flag will be set when the cmd is queued to nbd next
195 * time.
196 */
197 __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
198
199 if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
200 blk_mq_requeue_request(req, true);
201 }
202
203 #define NBD_COOKIE_BITS 32
204
nbd_cmd_handle(struct nbd_cmd * cmd)205 static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
206 {
207 struct request *req = blk_mq_rq_from_pdu(cmd);
208 u32 tag = blk_mq_unique_tag(req);
209 u64 cookie = cmd->cmd_cookie;
210
211 return (cookie << NBD_COOKIE_BITS) | tag;
212 }
213
nbd_handle_to_tag(u64 handle)214 static u32 nbd_handle_to_tag(u64 handle)
215 {
216 return (u32)handle;
217 }
218
nbd_handle_to_cookie(u64 handle)219 static u32 nbd_handle_to_cookie(u64 handle)
220 {
221 return (u32)(handle >> NBD_COOKIE_BITS);
222 }
223
nbdcmd_to_ascii(int cmd)224 static const char *nbdcmd_to_ascii(int cmd)
225 {
226 switch (cmd) {
227 case NBD_CMD_READ: return "read";
228 case NBD_CMD_WRITE: return "write";
229 case NBD_CMD_DISC: return "disconnect";
230 case NBD_CMD_FLUSH: return "flush";
231 case NBD_CMD_TRIM: return "trim/discard";
232 }
233 return "invalid";
234 }
235
pid_show(struct device * dev,struct device_attribute * attr,char * buf)236 static ssize_t pid_show(struct device *dev,
237 struct device_attribute *attr, char *buf)
238 {
239 struct gendisk *disk = dev_to_disk(dev);
240 struct nbd_device *nbd = disk->private_data;
241
242 return sprintf(buf, "%d\n", nbd->pid);
243 }
244
245 static const struct device_attribute pid_attr = {
246 .attr = { .name = "pid", .mode = 0444},
247 .show = pid_show,
248 };
249
backend_show(struct device * dev,struct device_attribute * attr,char * buf)250 static ssize_t backend_show(struct device *dev,
251 struct device_attribute *attr, char *buf)
252 {
253 struct gendisk *disk = dev_to_disk(dev);
254 struct nbd_device *nbd = disk->private_data;
255
256 return sprintf(buf, "%s\n", nbd->backend ?: "");
257 }
258
259 static const struct device_attribute backend_attr = {
260 .attr = { .name = "backend", .mode = 0444},
261 .show = backend_show,
262 };
263
nbd_dev_remove(struct nbd_device * nbd)264 static void nbd_dev_remove(struct nbd_device *nbd)
265 {
266 struct gendisk *disk = nbd->disk;
267
268 del_gendisk(disk);
269 blk_mq_free_tag_set(&nbd->tag_set);
270
271 /*
272 * Remove from idr after del_gendisk() completes, so if the same ID is
273 * reused, the following add_disk() will succeed.
274 */
275 mutex_lock(&nbd_index_mutex);
276 idr_remove(&nbd_index_idr, nbd->index);
277 mutex_unlock(&nbd_index_mutex);
278 destroy_workqueue(nbd->recv_workq);
279 put_disk(disk);
280 }
281
nbd_dev_remove_work(struct work_struct * work)282 static void nbd_dev_remove_work(struct work_struct *work)
283 {
284 nbd_dev_remove(container_of(work, struct nbd_device, remove_work));
285 }
286
nbd_put(struct nbd_device * nbd)287 static void nbd_put(struct nbd_device *nbd)
288 {
289 if (!refcount_dec_and_test(&nbd->refs))
290 return;
291
292 /* Call del_gendisk() asynchrounously to prevent deadlock */
293 if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags))
294 queue_work(nbd_del_wq, &nbd->remove_work);
295 else
296 nbd_dev_remove(nbd);
297 }
298
nbd_disconnected(struct nbd_config * config)299 static int nbd_disconnected(struct nbd_config *config)
300 {
301 return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) ||
302 test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
303 }
304
nbd_mark_nsock_dead(struct nbd_device * nbd,struct nbd_sock * nsock,int notify)305 static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
306 int notify)
307 {
308 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
309 struct link_dead_args *args;
310 args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
311 if (args) {
312 INIT_WORK(&args->work, nbd_dead_link_work);
313 args->index = nbd->index;
314 queue_work(system_wq, &args->work);
315 }
316 }
317 if (!nsock->dead) {
318 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
319 if (atomic_dec_return(&nbd->config->live_connections) == 0) {
320 if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED,
321 &nbd->config->runtime_flags)) {
322 set_bit(NBD_RT_DISCONNECTED,
323 &nbd->config->runtime_flags);
324 dev_info(nbd_to_dev(nbd),
325 "Disconnected due to user request.\n");
326 }
327 }
328 }
329 nsock->dead = true;
330 nsock->pending = NULL;
331 nsock->sent = 0;
332 }
333
nbd_set_size(struct nbd_device * nbd,loff_t bytesize,loff_t blksize)334 static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize, loff_t blksize)
335 {
336 struct queue_limits lim;
337 int error;
338
339 if (!blksize)
340 blksize = 1u << NBD_DEF_BLKSIZE_BITS;
341
342 if (blk_validate_block_size(blksize))
343 return -EINVAL;
344
345 if (bytesize < 0)
346 return -EINVAL;
347
348 nbd->config->bytesize = bytesize;
349 nbd->config->blksize_bits = __ffs(blksize);
350
351 if (!nbd->pid)
352 return 0;
353
354 lim = queue_limits_start_update(nbd->disk->queue);
355 if (nbd->config->flags & NBD_FLAG_SEND_TRIM)
356 lim.max_hw_discard_sectors = UINT_MAX >> SECTOR_SHIFT;
357 else
358 lim.max_hw_discard_sectors = 0;
359 if (!(nbd->config->flags & NBD_FLAG_SEND_FLUSH)) {
360 lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA);
361 } else if (nbd->config->flags & NBD_FLAG_SEND_FUA) {
362 lim.features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA;
363 } else {
364 lim.features |= BLK_FEAT_WRITE_CACHE;
365 lim.features &= ~BLK_FEAT_FUA;
366 }
367 if (nbd->config->flags & NBD_FLAG_ROTATIONAL)
368 lim.features |= BLK_FEAT_ROTATIONAL;
369 if (nbd->config->flags & NBD_FLAG_SEND_WRITE_ZEROES)
370 lim.max_write_zeroes_sectors = UINT_MAX >> SECTOR_SHIFT;
371
372 lim.logical_block_size = blksize;
373 lim.physical_block_size = blksize;
374 error = queue_limits_commit_update_frozen(nbd->disk->queue, &lim);
375 if (error)
376 return error;
377
378 if (max_part)
379 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
380 if (!set_capacity_and_notify(nbd->disk, bytesize >> 9))
381 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
382 return 0;
383 }
384
nbd_complete_rq(struct request * req)385 static void nbd_complete_rq(struct request *req)
386 {
387 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
388
389 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req,
390 cmd->status ? "failed" : "done");
391
392 blk_mq_end_request(req, cmd->status);
393 }
394
395 /*
396 * Forcibly shutdown the socket causing all listeners to error
397 */
sock_shutdown(struct nbd_device * nbd)398 static void sock_shutdown(struct nbd_device *nbd)
399 {
400 struct nbd_config *config = nbd->config;
401 int i;
402
403 if (config->num_connections == 0)
404 return;
405 if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
406 return;
407
408 for (i = 0; i < config->num_connections; i++) {
409 struct nbd_sock *nsock = config->socks[i];
410 mutex_lock(&nsock->tx_lock);
411 nbd_mark_nsock_dead(nbd, nsock, 0);
412 mutex_unlock(&nsock->tx_lock);
413 }
414 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
415 }
416
req_to_nbd_cmd_type(struct request * req)417 static u32 req_to_nbd_cmd_type(struct request *req)
418 {
419 switch (req_op(req)) {
420 case REQ_OP_DISCARD:
421 return NBD_CMD_TRIM;
422 case REQ_OP_FLUSH:
423 return NBD_CMD_FLUSH;
424 case REQ_OP_WRITE:
425 return NBD_CMD_WRITE;
426 case REQ_OP_READ:
427 return NBD_CMD_READ;
428 case REQ_OP_WRITE_ZEROES:
429 return NBD_CMD_WRITE_ZEROES;
430 default:
431 return U32_MAX;
432 }
433 }
434
nbd_get_config_unlocked(struct nbd_device * nbd)435 static struct nbd_config *nbd_get_config_unlocked(struct nbd_device *nbd)
436 {
437 if (refcount_inc_not_zero(&nbd->config_refs)) {
438 /*
439 * Add smp_mb__after_atomic to ensure that reading nbd->config_refs
440 * and reading nbd->config is ordered. The pair is the barrier in
441 * nbd_alloc_and_init_config(), avoid nbd->config_refs is set
442 * before nbd->config.
443 */
444 smp_mb__after_atomic();
445 return nbd->config;
446 }
447
448 return NULL;
449 }
450
nbd_xmit_timeout(struct request * req)451 static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
452 {
453 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
454 struct nbd_device *nbd = cmd->nbd;
455 struct nbd_config *config;
456
457 if (!mutex_trylock(&cmd->lock))
458 return BLK_EH_RESET_TIMER;
459
460 /* partial send is handled in nbd_sock's work function */
461 if (test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags)) {
462 mutex_unlock(&cmd->lock);
463 return BLK_EH_RESET_TIMER;
464 }
465
466 if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
467 mutex_unlock(&cmd->lock);
468 return BLK_EH_DONE;
469 }
470
471 config = nbd_get_config_unlocked(nbd);
472 if (!config) {
473 cmd->status = BLK_STS_TIMEOUT;
474 __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
475 mutex_unlock(&cmd->lock);
476 goto done;
477 }
478
479 if (config->num_connections > 1 ||
480 (config->num_connections == 1 && nbd->tag_set.timeout)) {
481 dev_err_ratelimited(nbd_to_dev(nbd),
482 "Connection timed out, retrying (%d/%d alive)\n",
483 atomic_read(&config->live_connections),
484 config->num_connections);
485 /*
486 * Hooray we have more connections, requeue this IO, the submit
487 * path will put it on a real connection. Or if only one
488 * connection is configured, the submit path will wait util
489 * a new connection is reconfigured or util dead timeout.
490 */
491 if (config->socks) {
492 if (cmd->index < config->num_connections) {
493 struct nbd_sock *nsock =
494 config->socks[cmd->index];
495 mutex_lock(&nsock->tx_lock);
496 /* We can have multiple outstanding requests, so
497 * we don't want to mark the nsock dead if we've
498 * already reconnected with a new socket, so
499 * only mark it dead if its the same socket we
500 * were sent out on.
501 */
502 if (cmd->cookie == nsock->cookie)
503 nbd_mark_nsock_dead(nbd, nsock, 1);
504 mutex_unlock(&nsock->tx_lock);
505 }
506 nbd_requeue_cmd(cmd);
507 mutex_unlock(&cmd->lock);
508 nbd_config_put(nbd);
509 return BLK_EH_DONE;
510 }
511 }
512
513 if (!nbd->tag_set.timeout) {
514 /*
515 * Userspace sets timeout=0 to disable socket disconnection,
516 * so just warn and reset the timer.
517 */
518 struct nbd_sock *nsock = config->socks[cmd->index];
519 cmd->retries++;
520 dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n",
521 req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)),
522 (unsigned long long)blk_rq_pos(req) << 9,
523 blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries);
524
525 mutex_lock(&nsock->tx_lock);
526 if (cmd->cookie != nsock->cookie) {
527 nbd_requeue_cmd(cmd);
528 mutex_unlock(&nsock->tx_lock);
529 mutex_unlock(&cmd->lock);
530 nbd_config_put(nbd);
531 return BLK_EH_DONE;
532 }
533 mutex_unlock(&nsock->tx_lock);
534 mutex_unlock(&cmd->lock);
535 nbd_config_put(nbd);
536 return BLK_EH_RESET_TIMER;
537 }
538
539 dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n");
540 set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags);
541 cmd->status = BLK_STS_IOERR;
542 __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
543 mutex_unlock(&cmd->lock);
544 sock_shutdown(nbd);
545 nbd_config_put(nbd);
546 done:
547 blk_mq_complete_request(req);
548 return BLK_EH_DONE;
549 }
550
__sock_xmit(struct nbd_device * nbd,struct socket * sock,int send,struct iov_iter * iter,int msg_flags,int * sent)551 static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
552 struct iov_iter *iter, int msg_flags, int *sent)
553 {
554 int result;
555 struct msghdr msg = {} ;
556 unsigned int noreclaim_flag;
557
558 if (unlikely(!sock)) {
559 dev_err_ratelimited(disk_to_dev(nbd->disk),
560 "Attempted %s on closed socket in sock_xmit\n",
561 (send ? "send" : "recv"));
562 return -EINVAL;
563 }
564
565 msg.msg_iter = *iter;
566
567 noreclaim_flag = memalloc_noreclaim_save();
568 do {
569 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
570 sock->sk->sk_use_task_frag = false;
571 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
572
573 if (send)
574 result = sock_sendmsg(sock, &msg);
575 else
576 result = sock_recvmsg(sock, &msg, msg.msg_flags);
577
578 if (result <= 0) {
579 if (result == 0)
580 result = -EPIPE; /* short read */
581 break;
582 }
583 if (sent)
584 *sent += result;
585 } while (msg_data_left(&msg));
586
587 memalloc_noreclaim_restore(noreclaim_flag);
588
589 return result;
590 }
591
592 /*
593 * Send or receive packet. Return a positive value on success and
594 * negtive value on failure, and never return 0.
595 */
sock_xmit(struct nbd_device * nbd,int index,int send,struct iov_iter * iter,int msg_flags,int * sent)596 static int sock_xmit(struct nbd_device *nbd, int index, int send,
597 struct iov_iter *iter, int msg_flags, int *sent)
598 {
599 struct nbd_config *config = nbd->config;
600 struct socket *sock = config->socks[index]->sock;
601
602 return __sock_xmit(nbd, sock, send, iter, msg_flags, sent);
603 }
604
605 /*
606 * Different settings for sk->sk_sndtimeo can result in different return values
607 * if there is a signal pending when we enter sendmsg, because reasons?
608 */
was_interrupted(int result)609 static inline int was_interrupted(int result)
610 {
611 return result == -ERESTARTSYS || result == -EINTR;
612 }
613
614 /*
615 * We've already sent header or part of data payload, have no choice but
616 * to set pending and schedule it in work.
617 *
618 * And we have to return BLK_STS_OK to block core, otherwise this same
619 * request may be re-dispatched with different tag, but our header has
620 * been sent out with old tag, and this way does confuse reply handling.
621 */
nbd_sched_pending_work(struct nbd_device * nbd,struct nbd_sock * nsock,struct nbd_cmd * cmd,int sent)622 static void nbd_sched_pending_work(struct nbd_device *nbd,
623 struct nbd_sock *nsock,
624 struct nbd_cmd *cmd, int sent)
625 {
626 struct request *req = blk_mq_rq_from_pdu(cmd);
627
628 /* pending work should be scheduled only once */
629 WARN_ON_ONCE(test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags));
630
631 nsock->pending = req;
632 nsock->sent = sent;
633 set_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags);
634 refcount_inc(&nbd->config_refs);
635 schedule_work(&nsock->work);
636 }
637
638 /*
639 * Returns BLK_STS_RESOURCE if the caller should retry after a delay.
640 * Returns BLK_STS_IOERR if sending failed.
641 */
nbd_send_cmd(struct nbd_device * nbd,struct nbd_cmd * cmd,int index)642 static blk_status_t nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd,
643 int index)
644 {
645 struct request *req = blk_mq_rq_from_pdu(cmd);
646 struct nbd_config *config = nbd->config;
647 struct nbd_sock *nsock = config->socks[index];
648 int result;
649 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
650 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
651 struct iov_iter from;
652 struct bio *bio;
653 u64 handle;
654 u32 type;
655 u32 nbd_cmd_flags = 0;
656 int sent = nsock->sent, skip = 0;
657
658 lockdep_assert_held(&cmd->lock);
659 lockdep_assert_held(&nsock->tx_lock);
660
661 iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request));
662
663 type = req_to_nbd_cmd_type(req);
664 if (type == U32_MAX)
665 return BLK_STS_IOERR;
666
667 if (rq_data_dir(req) == WRITE &&
668 (config->flags & NBD_FLAG_READ_ONLY)) {
669 dev_err_ratelimited(disk_to_dev(nbd->disk),
670 "Write on read-only\n");
671 return BLK_STS_IOERR;
672 }
673
674 if (req->cmd_flags & REQ_FUA)
675 nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
676 if ((req->cmd_flags & REQ_NOUNMAP) && (type == NBD_CMD_WRITE_ZEROES))
677 nbd_cmd_flags |= NBD_CMD_FLAG_NO_HOLE;
678
679 /* We did a partial send previously, and we at least sent the whole
680 * request struct, so just go and send the rest of the pages in the
681 * request.
682 */
683 if (sent) {
684 if (sent >= sizeof(request)) {
685 skip = sent - sizeof(request);
686
687 /* initialize handle for tracing purposes */
688 handle = nbd_cmd_handle(cmd);
689
690 goto send_pages;
691 }
692 iov_iter_advance(&from, sent);
693 } else {
694 cmd->cmd_cookie++;
695 }
696 cmd->index = index;
697 cmd->cookie = nsock->cookie;
698 cmd->retries = 0;
699 request.type = htonl(type | nbd_cmd_flags);
700 if (type != NBD_CMD_FLUSH) {
701 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
702 request.len = htonl(blk_rq_bytes(req));
703 }
704 handle = nbd_cmd_handle(cmd);
705 request.cookie = cpu_to_be64(handle);
706
707 trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
708
709 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
710 req, nbdcmd_to_ascii(type),
711 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
712 result = sock_xmit(nbd, index, 1, &from,
713 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
714 trace_nbd_header_sent(req, handle);
715 if (result < 0) {
716 if (was_interrupted(result)) {
717 /* If we haven't sent anything we can just return BUSY,
718 * however if we have sent something we need to make
719 * sure we only allow this req to be sent until we are
720 * completely done.
721 */
722 if (sent) {
723 nbd_sched_pending_work(nbd, nsock, cmd, sent);
724 return BLK_STS_OK;
725 }
726 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
727 return BLK_STS_RESOURCE;
728 }
729 dev_err_ratelimited(disk_to_dev(nbd->disk),
730 "Send control failed (result %d)\n", result);
731 goto requeue;
732 }
733 send_pages:
734 if (type != NBD_CMD_WRITE)
735 goto out;
736
737 bio = req->bio;
738 while (bio) {
739 struct bio *next = bio->bi_next;
740 struct bvec_iter iter;
741 struct bio_vec bvec;
742
743 bio_for_each_segment(bvec, bio, iter) {
744 bool is_last = !next && bio_iter_last(bvec, iter);
745 int flags = is_last ? 0 : MSG_MORE;
746
747 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
748 req, bvec.bv_len);
749 iov_iter_bvec(&from, ITER_SOURCE, &bvec, 1, bvec.bv_len);
750 if (skip) {
751 if (skip >= iov_iter_count(&from)) {
752 skip -= iov_iter_count(&from);
753 continue;
754 }
755 iov_iter_advance(&from, skip);
756 skip = 0;
757 }
758 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
759 if (result < 0) {
760 if (was_interrupted(result)) {
761 nbd_sched_pending_work(nbd, nsock, cmd, sent);
762 return BLK_STS_OK;
763 }
764 dev_err(disk_to_dev(nbd->disk),
765 "Send data failed (result %d)\n",
766 result);
767 goto requeue;
768 }
769 /*
770 * The completion might already have come in,
771 * so break for the last one instead of letting
772 * the iterator do it. This prevents use-after-free
773 * of the bio.
774 */
775 if (is_last)
776 break;
777 }
778 bio = next;
779 }
780 out:
781 trace_nbd_payload_sent(req, handle);
782 nsock->pending = NULL;
783 nsock->sent = 0;
784 __set_bit(NBD_CMD_INFLIGHT, &cmd->flags);
785 return BLK_STS_OK;
786
787 requeue:
788 /*
789 * Can't requeue in case we are dealing with partial send
790 *
791 * We must run from pending work function.
792 * */
793 if (test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags))
794 return BLK_STS_OK;
795
796 /* retry on a different socket */
797 dev_err_ratelimited(disk_to_dev(nbd->disk),
798 "Request send failed, requeueing\n");
799 nbd_mark_nsock_dead(nbd, nsock, 1);
800 nbd_requeue_cmd(cmd);
801 return BLK_STS_OK;
802 }
803
804 /* handle partial sending */
nbd_pending_cmd_work(struct work_struct * work)805 static void nbd_pending_cmd_work(struct work_struct *work)
806 {
807 struct nbd_sock *nsock = container_of(work, struct nbd_sock, work);
808 struct request *req = nsock->pending;
809 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
810 struct nbd_device *nbd = cmd->nbd;
811 unsigned long deadline = READ_ONCE(req->deadline);
812 unsigned int wait_ms = 2;
813
814 mutex_lock(&cmd->lock);
815
816 WARN_ON_ONCE(test_bit(NBD_CMD_REQUEUED, &cmd->flags));
817 if (WARN_ON_ONCE(!test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags)))
818 goto out;
819
820 mutex_lock(&nsock->tx_lock);
821 while (true) {
822 nbd_send_cmd(nbd, cmd, cmd->index);
823 if (!nsock->pending)
824 break;
825
826 /* don't bother timeout handler for partial sending */
827 if (READ_ONCE(jiffies) + msecs_to_jiffies(wait_ms) >= deadline) {
828 cmd->status = BLK_STS_IOERR;
829 blk_mq_complete_request(req);
830 break;
831 }
832 msleep(wait_ms);
833 wait_ms *= 2;
834 }
835 mutex_unlock(&nsock->tx_lock);
836 clear_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags);
837 out:
838 mutex_unlock(&cmd->lock);
839 nbd_config_put(nbd);
840 }
841
nbd_read_reply(struct nbd_device * nbd,struct socket * sock,struct nbd_reply * reply)842 static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
843 struct nbd_reply *reply)
844 {
845 struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)};
846 struct iov_iter to;
847 int result;
848
849 reply->magic = 0;
850 iov_iter_kvec(&to, ITER_DEST, &iov, 1, sizeof(*reply));
851 result = __sock_xmit(nbd, sock, 0, &to, MSG_WAITALL, NULL);
852 if (result < 0) {
853 if (!nbd_disconnected(nbd->config))
854 dev_err(disk_to_dev(nbd->disk),
855 "Receive control failed (result %d)\n", result);
856 return result;
857 }
858
859 if (ntohl(reply->magic) != NBD_REPLY_MAGIC) {
860 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
861 (unsigned long)ntohl(reply->magic));
862 return -EPROTO;
863 }
864
865 return 0;
866 }
867
868 /* NULL returned = something went wrong, inform userspace */
nbd_handle_reply(struct nbd_device * nbd,int index,struct nbd_reply * reply)869 static struct nbd_cmd *nbd_handle_reply(struct nbd_device *nbd, int index,
870 struct nbd_reply *reply)
871 {
872 int result;
873 struct nbd_cmd *cmd;
874 struct request *req = NULL;
875 u64 handle;
876 u16 hwq;
877 u32 tag;
878 int ret = 0;
879
880 handle = be64_to_cpu(reply->cookie);
881 tag = nbd_handle_to_tag(handle);
882 hwq = blk_mq_unique_tag_to_hwq(tag);
883 if (hwq < nbd->tag_set.nr_hw_queues)
884 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
885 blk_mq_unique_tag_to_tag(tag));
886 if (!req || !blk_mq_request_started(req)) {
887 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
888 tag, req);
889 return ERR_PTR(-ENOENT);
890 }
891 trace_nbd_header_received(req, handle);
892 cmd = blk_mq_rq_to_pdu(req);
893
894 mutex_lock(&cmd->lock);
895 if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
896 dev_err(disk_to_dev(nbd->disk), "Suspicious reply %d (status %u flags %lu)",
897 tag, cmd->status, cmd->flags);
898 ret = -ENOENT;
899 goto out;
900 }
901 if (cmd->index != index) {
902 dev_err(disk_to_dev(nbd->disk), "Unexpected reply %d from different sock %d (expected %d)",
903 tag, index, cmd->index);
904 ret = -ENOENT;
905 goto out;
906 }
907 if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
908 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
909 req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
910 ret = -ENOENT;
911 goto out;
912 }
913 if (cmd->status != BLK_STS_OK) {
914 dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
915 req);
916 ret = -ENOENT;
917 goto out;
918 }
919 if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
920 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
921 req);
922 ret = -ENOENT;
923 goto out;
924 }
925 if (ntohl(reply->error)) {
926 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
927 ntohl(reply->error));
928 cmd->status = BLK_STS_IOERR;
929 goto out;
930 }
931
932 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
933 if (rq_data_dir(req) != WRITE) {
934 struct req_iterator iter;
935 struct bio_vec bvec;
936 struct iov_iter to;
937
938 rq_for_each_segment(bvec, req, iter) {
939 iov_iter_bvec(&to, ITER_DEST, &bvec, 1, bvec.bv_len);
940 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
941 if (result < 0) {
942 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
943 result);
944 /*
945 * If we've disconnected, we need to make sure we
946 * complete this request, otherwise error out
947 * and let the timeout stuff handle resubmitting
948 * this request onto another connection.
949 */
950 if (nbd_disconnected(nbd->config)) {
951 cmd->status = BLK_STS_IOERR;
952 goto out;
953 }
954 ret = -EIO;
955 goto out;
956 }
957 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
958 req, bvec.bv_len);
959 }
960 }
961 out:
962 trace_nbd_payload_received(req, handle);
963 mutex_unlock(&cmd->lock);
964 return ret ? ERR_PTR(ret) : cmd;
965 }
966
recv_work(struct work_struct * work)967 static void recv_work(struct work_struct *work)
968 {
969 struct recv_thread_args *args = container_of(work,
970 struct recv_thread_args,
971 work);
972 struct nbd_device *nbd = args->nbd;
973 struct nbd_config *config = nbd->config;
974 struct request_queue *q = nbd->disk->queue;
975 struct nbd_sock *nsock = args->nsock;
976 struct nbd_cmd *cmd;
977 struct request *rq;
978
979 while (1) {
980 struct nbd_reply reply;
981
982 if (nbd_read_reply(nbd, nsock->sock, &reply))
983 break;
984
985 /*
986 * Grab .q_usage_counter so request pool won't go away, then no
987 * request use-after-free is possible during nbd_handle_reply().
988 * If queue is frozen, there won't be any inflight requests, we
989 * needn't to handle the incoming garbage message.
990 */
991 if (!percpu_ref_tryget(&q->q_usage_counter)) {
992 dev_err(disk_to_dev(nbd->disk), "%s: no io inflight\n",
993 __func__);
994 break;
995 }
996
997 cmd = nbd_handle_reply(nbd, args->index, &reply);
998 if (IS_ERR(cmd)) {
999 percpu_ref_put(&q->q_usage_counter);
1000 break;
1001 }
1002
1003 rq = blk_mq_rq_from_pdu(cmd);
1004 if (likely(!blk_should_fake_timeout(rq->q))) {
1005 bool complete;
1006
1007 mutex_lock(&cmd->lock);
1008 complete = __test_and_clear_bit(NBD_CMD_INFLIGHT,
1009 &cmd->flags);
1010 mutex_unlock(&cmd->lock);
1011 if (complete)
1012 blk_mq_complete_request(rq);
1013 }
1014 percpu_ref_put(&q->q_usage_counter);
1015 }
1016
1017 mutex_lock(&nsock->tx_lock);
1018 nbd_mark_nsock_dead(nbd, nsock, 1);
1019 mutex_unlock(&nsock->tx_lock);
1020
1021 nbd_config_put(nbd);
1022 atomic_dec(&config->recv_threads);
1023 wake_up(&config->recv_wq);
1024 kfree(args);
1025 }
1026
nbd_clear_req(struct request * req,void * data)1027 static bool nbd_clear_req(struct request *req, void *data)
1028 {
1029 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
1030
1031 /* don't abort one completed request */
1032 if (blk_mq_request_completed(req))
1033 return true;
1034
1035 mutex_lock(&cmd->lock);
1036 if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
1037 mutex_unlock(&cmd->lock);
1038 return true;
1039 }
1040 cmd->status = BLK_STS_IOERR;
1041 mutex_unlock(&cmd->lock);
1042
1043 blk_mq_complete_request(req);
1044 return true;
1045 }
1046
nbd_clear_que(struct nbd_device * nbd)1047 static void nbd_clear_que(struct nbd_device *nbd)
1048 {
1049 blk_mq_quiesce_queue(nbd->disk->queue);
1050 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
1051 blk_mq_unquiesce_queue(nbd->disk->queue);
1052 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
1053 }
1054
find_fallback(struct nbd_device * nbd,int index)1055 static int find_fallback(struct nbd_device *nbd, int index)
1056 {
1057 struct nbd_config *config = nbd->config;
1058 int new_index = -1;
1059 struct nbd_sock *nsock = config->socks[index];
1060 int fallback = nsock->fallback_index;
1061
1062 if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
1063 return new_index;
1064
1065 if (config->num_connections <= 1) {
1066 dev_err_ratelimited(disk_to_dev(nbd->disk),
1067 "Dead connection, failed to find a fallback\n");
1068 return new_index;
1069 }
1070
1071 if (fallback >= 0 && fallback < config->num_connections &&
1072 !config->socks[fallback]->dead)
1073 return fallback;
1074
1075 if (nsock->fallback_index < 0 ||
1076 nsock->fallback_index >= config->num_connections ||
1077 config->socks[nsock->fallback_index]->dead) {
1078 int i;
1079 for (i = 0; i < config->num_connections; i++) {
1080 if (i == index)
1081 continue;
1082 if (!config->socks[i]->dead) {
1083 new_index = i;
1084 break;
1085 }
1086 }
1087 nsock->fallback_index = new_index;
1088 if (new_index < 0) {
1089 dev_err_ratelimited(disk_to_dev(nbd->disk),
1090 "Dead connection, failed to find a fallback\n");
1091 return new_index;
1092 }
1093 }
1094 new_index = nsock->fallback_index;
1095 return new_index;
1096 }
1097
wait_for_reconnect(struct nbd_device * nbd)1098 static int wait_for_reconnect(struct nbd_device *nbd)
1099 {
1100 struct nbd_config *config = nbd->config;
1101 if (!config->dead_conn_timeout)
1102 return 0;
1103
1104 if (!wait_event_timeout(config->conn_wait,
1105 test_bit(NBD_RT_DISCONNECTED,
1106 &config->runtime_flags) ||
1107 atomic_read(&config->live_connections) > 0,
1108 config->dead_conn_timeout))
1109 return 0;
1110
1111 return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
1112 }
1113
nbd_handle_cmd(struct nbd_cmd * cmd,int index)1114 static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index)
1115 {
1116 struct request *req = blk_mq_rq_from_pdu(cmd);
1117 struct nbd_device *nbd = cmd->nbd;
1118 struct nbd_config *config;
1119 struct nbd_sock *nsock;
1120 blk_status_t ret;
1121
1122 lockdep_assert_held(&cmd->lock);
1123
1124 config = nbd_get_config_unlocked(nbd);
1125 if (!config) {
1126 dev_err_ratelimited(disk_to_dev(nbd->disk),
1127 "Socks array is empty\n");
1128 return BLK_STS_IOERR;
1129 }
1130
1131 if (index >= config->num_connections) {
1132 dev_err_ratelimited(disk_to_dev(nbd->disk),
1133 "Attempted send on invalid socket\n");
1134 nbd_config_put(nbd);
1135 return BLK_STS_IOERR;
1136 }
1137 cmd->status = BLK_STS_OK;
1138 again:
1139 nsock = config->socks[index];
1140 mutex_lock(&nsock->tx_lock);
1141 if (nsock->dead) {
1142 int old_index = index;
1143 index = find_fallback(nbd, index);
1144 mutex_unlock(&nsock->tx_lock);
1145 if (index < 0) {
1146 if (wait_for_reconnect(nbd)) {
1147 index = old_index;
1148 goto again;
1149 }
1150 /* All the sockets should already be down at this point,
1151 * we just want to make sure that DISCONNECTED is set so
1152 * any requests that come in that were queue'ed waiting
1153 * for the reconnect timer don't trigger the timer again
1154 * and instead just error out.
1155 */
1156 sock_shutdown(nbd);
1157 nbd_config_put(nbd);
1158 return BLK_STS_IOERR;
1159 }
1160 goto again;
1161 }
1162
1163 /* Handle the case that we have a pending request that was partially
1164 * transmitted that _has_ to be serviced first. We need to call requeue
1165 * here so that it gets put _after_ the request that is already on the
1166 * dispatch list.
1167 */
1168 blk_mq_start_request(req);
1169 if (unlikely(nsock->pending && nsock->pending != req)) {
1170 nbd_requeue_cmd(cmd);
1171 ret = BLK_STS_OK;
1172 goto out;
1173 }
1174 ret = nbd_send_cmd(nbd, cmd, index);
1175 out:
1176 mutex_unlock(&nsock->tx_lock);
1177 nbd_config_put(nbd);
1178 return ret;
1179 }
1180
nbd_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)1181 static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
1182 const struct blk_mq_queue_data *bd)
1183 {
1184 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
1185 blk_status_t ret;
1186
1187 /*
1188 * Since we look at the bio's to send the request over the network we
1189 * need to make sure the completion work doesn't mark this request done
1190 * before we are done doing our send. This keeps us from dereferencing
1191 * freed data if we have particularly fast completions (ie we get the
1192 * completion before we exit sock_xmit on the last bvec) or in the case
1193 * that the server is misbehaving (or there was an error) before we're
1194 * done sending everything over the wire.
1195 */
1196 mutex_lock(&cmd->lock);
1197 clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
1198
1199 /* We can be called directly from the user space process, which means we
1200 * could possibly have signals pending so our sendmsg will fail. In
1201 * this case we need to return that we are busy, otherwise error out as
1202 * appropriate.
1203 */
1204 ret = nbd_handle_cmd(cmd, hctx->queue_num);
1205 mutex_unlock(&cmd->lock);
1206
1207 return ret;
1208 }
1209
nbd_get_socket(struct nbd_device * nbd,unsigned long fd,int * err)1210 static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
1211 int *err)
1212 {
1213 struct socket *sock;
1214
1215 *err = 0;
1216 sock = sockfd_lookup(fd, err);
1217 if (!sock)
1218 return NULL;
1219
1220 if (sock->ops->shutdown == sock_no_shutdown) {
1221 dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
1222 *err = -EINVAL;
1223 sockfd_put(sock);
1224 return NULL;
1225 }
1226
1227 return sock;
1228 }
1229
nbd_add_socket(struct nbd_device * nbd,unsigned long arg,bool netlink)1230 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
1231 bool netlink)
1232 {
1233 struct nbd_config *config = nbd->config;
1234 struct socket *sock;
1235 struct nbd_sock **socks;
1236 struct nbd_sock *nsock;
1237 unsigned int memflags;
1238 int err;
1239
1240 /* Arg will be cast to int, check it to avoid overflow */
1241 if (arg > INT_MAX)
1242 return -EINVAL;
1243 sock = nbd_get_socket(nbd, arg, &err);
1244 if (!sock)
1245 return err;
1246
1247 /*
1248 * We need to make sure we don't get any errant requests while we're
1249 * reallocating the ->socks array.
1250 */
1251 memflags = blk_mq_freeze_queue(nbd->disk->queue);
1252
1253 if (!netlink && !nbd->task_setup &&
1254 !test_bit(NBD_RT_BOUND, &config->runtime_flags))
1255 nbd->task_setup = current;
1256
1257 if (!netlink &&
1258 (nbd->task_setup != current ||
1259 test_bit(NBD_RT_BOUND, &config->runtime_flags))) {
1260 dev_err(disk_to_dev(nbd->disk),
1261 "Device being setup by another task");
1262 err = -EBUSY;
1263 goto put_socket;
1264 }
1265
1266 nsock = kzalloc(sizeof(*nsock), GFP_KERNEL);
1267 if (!nsock) {
1268 err = -ENOMEM;
1269 goto put_socket;
1270 }
1271
1272 socks = krealloc(config->socks, (config->num_connections + 1) *
1273 sizeof(struct nbd_sock *), GFP_KERNEL);
1274 if (!socks) {
1275 kfree(nsock);
1276 err = -ENOMEM;
1277 goto put_socket;
1278 }
1279
1280 config->socks = socks;
1281
1282 nsock->fallback_index = -1;
1283 nsock->dead = false;
1284 mutex_init(&nsock->tx_lock);
1285 nsock->sock = sock;
1286 nsock->pending = NULL;
1287 nsock->sent = 0;
1288 nsock->cookie = 0;
1289 INIT_WORK(&nsock->work, nbd_pending_cmd_work);
1290 socks[config->num_connections++] = nsock;
1291 atomic_inc(&config->live_connections);
1292 blk_mq_unfreeze_queue(nbd->disk->queue, memflags);
1293
1294 return 0;
1295
1296 put_socket:
1297 blk_mq_unfreeze_queue(nbd->disk->queue, memflags);
1298 sockfd_put(sock);
1299 return err;
1300 }
1301
nbd_reconnect_socket(struct nbd_device * nbd,unsigned long arg)1302 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
1303 {
1304 struct nbd_config *config = nbd->config;
1305 struct socket *sock, *old;
1306 struct recv_thread_args *args;
1307 int i;
1308 int err;
1309
1310 sock = nbd_get_socket(nbd, arg, &err);
1311 if (!sock)
1312 return err;
1313
1314 args = kzalloc(sizeof(*args), GFP_KERNEL);
1315 if (!args) {
1316 sockfd_put(sock);
1317 return -ENOMEM;
1318 }
1319
1320 for (i = 0; i < config->num_connections; i++) {
1321 struct nbd_sock *nsock = config->socks[i];
1322
1323 if (!nsock->dead)
1324 continue;
1325
1326 mutex_lock(&nsock->tx_lock);
1327 if (!nsock->dead) {
1328 mutex_unlock(&nsock->tx_lock);
1329 continue;
1330 }
1331 sk_set_memalloc(sock->sk);
1332 if (nbd->tag_set.timeout)
1333 sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
1334 atomic_inc(&config->recv_threads);
1335 refcount_inc(&nbd->config_refs);
1336 old = nsock->sock;
1337 nsock->fallback_index = -1;
1338 nsock->sock = sock;
1339 nsock->dead = false;
1340 INIT_WORK(&args->work, recv_work);
1341 args->index = i;
1342 args->nbd = nbd;
1343 args->nsock = nsock;
1344 nsock->cookie++;
1345 mutex_unlock(&nsock->tx_lock);
1346 sockfd_put(old);
1347
1348 clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
1349
1350 /* We take the tx_mutex in an error path in the recv_work, so we
1351 * need to queue_work outside of the tx_mutex.
1352 */
1353 queue_work(nbd->recv_workq, &args->work);
1354
1355 atomic_inc(&config->live_connections);
1356 wake_up(&config->conn_wait);
1357 return 0;
1358 }
1359 sockfd_put(sock);
1360 kfree(args);
1361 return -ENOSPC;
1362 }
1363
nbd_bdev_reset(struct nbd_device * nbd)1364 static void nbd_bdev_reset(struct nbd_device *nbd)
1365 {
1366 if (disk_openers(nbd->disk) > 1)
1367 return;
1368 set_capacity(nbd->disk, 0);
1369 }
1370
nbd_parse_flags(struct nbd_device * nbd)1371 static void nbd_parse_flags(struct nbd_device *nbd)
1372 {
1373 if (nbd->config->flags & NBD_FLAG_READ_ONLY)
1374 set_disk_ro(nbd->disk, true);
1375 else
1376 set_disk_ro(nbd->disk, false);
1377 }
1378
send_disconnects(struct nbd_device * nbd)1379 static void send_disconnects(struct nbd_device *nbd)
1380 {
1381 struct nbd_config *config = nbd->config;
1382 struct nbd_request request = {
1383 .magic = htonl(NBD_REQUEST_MAGIC),
1384 .type = htonl(NBD_CMD_DISC),
1385 };
1386 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
1387 struct iov_iter from;
1388 int i, ret;
1389
1390 for (i = 0; i < config->num_connections; i++) {
1391 struct nbd_sock *nsock = config->socks[i];
1392
1393 iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request));
1394 mutex_lock(&nsock->tx_lock);
1395 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
1396 if (ret < 0)
1397 dev_err(disk_to_dev(nbd->disk),
1398 "Send disconnect failed %d\n", ret);
1399 mutex_unlock(&nsock->tx_lock);
1400 }
1401 }
1402
nbd_disconnect(struct nbd_device * nbd)1403 static int nbd_disconnect(struct nbd_device *nbd)
1404 {
1405 struct nbd_config *config = nbd->config;
1406
1407 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
1408 set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
1409 set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags);
1410 send_disconnects(nbd);
1411 return 0;
1412 }
1413
nbd_clear_sock(struct nbd_device * nbd)1414 static void nbd_clear_sock(struct nbd_device *nbd)
1415 {
1416 sock_shutdown(nbd);
1417 nbd_clear_que(nbd);
1418 nbd->task_setup = NULL;
1419 }
1420
nbd_config_put(struct nbd_device * nbd)1421 static void nbd_config_put(struct nbd_device *nbd)
1422 {
1423 if (refcount_dec_and_mutex_lock(&nbd->config_refs,
1424 &nbd->config_lock)) {
1425 struct nbd_config *config = nbd->config;
1426 nbd_dev_dbg_close(nbd);
1427 invalidate_disk(nbd->disk);
1428 if (nbd->config->bytesize)
1429 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
1430 if (test_and_clear_bit(NBD_RT_HAS_PID_FILE,
1431 &config->runtime_flags))
1432 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
1433 nbd->pid = 0;
1434 if (test_and_clear_bit(NBD_RT_HAS_BACKEND_FILE,
1435 &config->runtime_flags)) {
1436 device_remove_file(disk_to_dev(nbd->disk), &backend_attr);
1437 kfree(nbd->backend);
1438 nbd->backend = NULL;
1439 }
1440 nbd_clear_sock(nbd);
1441 if (config->num_connections) {
1442 int i;
1443 for (i = 0; i < config->num_connections; i++) {
1444 sockfd_put(config->socks[i]->sock);
1445 kfree(config->socks[i]);
1446 }
1447 kfree(config->socks);
1448 }
1449 kfree(nbd->config);
1450 nbd->config = NULL;
1451
1452 nbd->tag_set.timeout = 0;
1453
1454 mutex_unlock(&nbd->config_lock);
1455 nbd_put(nbd);
1456 module_put(THIS_MODULE);
1457 }
1458 }
1459
nbd_start_device(struct nbd_device * nbd)1460 static int nbd_start_device(struct nbd_device *nbd)
1461 {
1462 struct nbd_config *config = nbd->config;
1463 int num_connections = config->num_connections;
1464 int error = 0, i;
1465
1466 if (nbd->pid)
1467 return -EBUSY;
1468 if (!config->socks)
1469 return -EINVAL;
1470 if (num_connections > 1 &&
1471 !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
1472 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
1473 return -EINVAL;
1474 }
1475
1476 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
1477 nbd->pid = task_pid_nr(current);
1478
1479 nbd_parse_flags(nbd);
1480
1481 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
1482 if (error) {
1483 dev_err(disk_to_dev(nbd->disk), "device_create_file failed for pid!\n");
1484 return error;
1485 }
1486 set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags);
1487
1488 nbd_dev_dbg_init(nbd);
1489 for (i = 0; i < num_connections; i++) {
1490 struct recv_thread_args *args;
1491
1492 args = kzalloc(sizeof(*args), GFP_KERNEL);
1493 if (!args) {
1494 sock_shutdown(nbd);
1495 /*
1496 * If num_connections is m (2 < m),
1497 * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful.
1498 * But NO.(n + 1) failed. We still have n recv threads.
1499 * So, add flush_workqueue here to prevent recv threads
1500 * dropping the last config_refs and trying to destroy
1501 * the workqueue from inside the workqueue.
1502 */
1503 if (i)
1504 flush_workqueue(nbd->recv_workq);
1505 return -ENOMEM;
1506 }
1507 sk_set_memalloc(config->socks[i]->sock->sk);
1508 if (nbd->tag_set.timeout)
1509 config->socks[i]->sock->sk->sk_sndtimeo =
1510 nbd->tag_set.timeout;
1511 atomic_inc(&config->recv_threads);
1512 refcount_inc(&nbd->config_refs);
1513 INIT_WORK(&args->work, recv_work);
1514 args->nbd = nbd;
1515 args->nsock = config->socks[i];
1516 args->index = i;
1517 queue_work(nbd->recv_workq, &args->work);
1518 }
1519 return nbd_set_size(nbd, config->bytesize, nbd_blksize(config));
1520 }
1521
nbd_start_device_ioctl(struct nbd_device * nbd)1522 static int nbd_start_device_ioctl(struct nbd_device *nbd)
1523 {
1524 struct nbd_config *config = nbd->config;
1525 int ret;
1526
1527 ret = nbd_start_device(nbd);
1528 if (ret)
1529 return ret;
1530
1531 if (max_part)
1532 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
1533 mutex_unlock(&nbd->config_lock);
1534 ret = wait_event_interruptible(config->recv_wq,
1535 atomic_read(&config->recv_threads) == 0);
1536 if (ret) {
1537 sock_shutdown(nbd);
1538 nbd_clear_que(nbd);
1539 }
1540
1541 flush_workqueue(nbd->recv_workq);
1542 mutex_lock(&nbd->config_lock);
1543 nbd_bdev_reset(nbd);
1544 /* user requested, ignore socket errors */
1545 if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags))
1546 ret = 0;
1547 if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags))
1548 ret = -ETIMEDOUT;
1549 return ret;
1550 }
1551
nbd_clear_sock_ioctl(struct nbd_device * nbd)1552 static void nbd_clear_sock_ioctl(struct nbd_device *nbd)
1553 {
1554 nbd_clear_sock(nbd);
1555 disk_force_media_change(nbd->disk);
1556 nbd_bdev_reset(nbd);
1557 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
1558 &nbd->config->runtime_flags))
1559 nbd_config_put(nbd);
1560 }
1561
nbd_set_cmd_timeout(struct nbd_device * nbd,u64 timeout)1562 static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
1563 {
1564 nbd->tag_set.timeout = timeout * HZ;
1565 if (timeout)
1566 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1567 else
1568 blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ);
1569 }
1570
1571 /* Must be called with config_lock held */
__nbd_ioctl(struct block_device * bdev,struct nbd_device * nbd,unsigned int cmd,unsigned long arg)1572 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1573 unsigned int cmd, unsigned long arg)
1574 {
1575 struct nbd_config *config = nbd->config;
1576 loff_t bytesize;
1577
1578 switch (cmd) {
1579 case NBD_DISCONNECT:
1580 return nbd_disconnect(nbd);
1581 case NBD_CLEAR_SOCK:
1582 nbd_clear_sock_ioctl(nbd);
1583 return 0;
1584 case NBD_SET_SOCK:
1585 return nbd_add_socket(nbd, arg, false);
1586 case NBD_SET_BLKSIZE:
1587 return nbd_set_size(nbd, config->bytesize, arg);
1588 case NBD_SET_SIZE:
1589 return nbd_set_size(nbd, arg, nbd_blksize(config));
1590 case NBD_SET_SIZE_BLOCKS:
1591 if (check_shl_overflow(arg, config->blksize_bits, &bytesize))
1592 return -EINVAL;
1593 return nbd_set_size(nbd, bytesize, nbd_blksize(config));
1594 case NBD_SET_TIMEOUT:
1595 nbd_set_cmd_timeout(nbd, arg);
1596 return 0;
1597
1598 case NBD_SET_FLAGS:
1599 config->flags = arg;
1600 return 0;
1601 case NBD_DO_IT:
1602 return nbd_start_device_ioctl(nbd);
1603 case NBD_CLEAR_QUE:
1604 /*
1605 * This is for compatibility only. The queue is always cleared
1606 * by NBD_DO_IT or NBD_CLEAR_SOCK.
1607 */
1608 return 0;
1609 case NBD_PRINT_DEBUG:
1610 /*
1611 * For compatibility only, we no longer keep a list of
1612 * outstanding requests.
1613 */
1614 return 0;
1615 }
1616 return -ENOTTY;
1617 }
1618
nbd_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg)1619 static int nbd_ioctl(struct block_device *bdev, blk_mode_t mode,
1620 unsigned int cmd, unsigned long arg)
1621 {
1622 struct nbd_device *nbd = bdev->bd_disk->private_data;
1623 struct nbd_config *config = nbd->config;
1624 int error = -EINVAL;
1625
1626 if (!capable(CAP_SYS_ADMIN))
1627 return -EPERM;
1628
1629 /* The block layer will pass back some non-nbd ioctls in case we have
1630 * special handling for them, but we don't so just return an error.
1631 */
1632 if (_IOC_TYPE(cmd) != 0xab)
1633 return -EINVAL;
1634
1635 mutex_lock(&nbd->config_lock);
1636
1637 /* Don't allow ioctl operations on a nbd device that was created with
1638 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1639 */
1640 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
1641 (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
1642 error = __nbd_ioctl(bdev, nbd, cmd, arg);
1643 else
1644 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
1645 mutex_unlock(&nbd->config_lock);
1646 return error;
1647 }
1648
nbd_alloc_and_init_config(struct nbd_device * nbd)1649 static int nbd_alloc_and_init_config(struct nbd_device *nbd)
1650 {
1651 struct nbd_config *config;
1652
1653 if (WARN_ON(nbd->config))
1654 return -EINVAL;
1655
1656 if (!try_module_get(THIS_MODULE))
1657 return -ENODEV;
1658
1659 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
1660 if (!config) {
1661 module_put(THIS_MODULE);
1662 return -ENOMEM;
1663 }
1664
1665 atomic_set(&config->recv_threads, 0);
1666 init_waitqueue_head(&config->recv_wq);
1667 init_waitqueue_head(&config->conn_wait);
1668 config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
1669 atomic_set(&config->live_connections, 0);
1670
1671 nbd->config = config;
1672 /*
1673 * Order refcount_set(&nbd->config_refs, 1) and nbd->config assignment,
1674 * its pair is the barrier in nbd_get_config_unlocked().
1675 * So nbd_get_config_unlocked() won't see nbd->config as null after
1676 * refcount_inc_not_zero() succeed.
1677 */
1678 smp_mb__before_atomic();
1679 refcount_set(&nbd->config_refs, 1);
1680
1681 return 0;
1682 }
1683
nbd_open(struct gendisk * disk,blk_mode_t mode)1684 static int nbd_open(struct gendisk *disk, blk_mode_t mode)
1685 {
1686 struct nbd_device *nbd;
1687 struct nbd_config *config;
1688 int ret = 0;
1689
1690 mutex_lock(&nbd_index_mutex);
1691 nbd = disk->private_data;
1692 if (!nbd) {
1693 ret = -ENXIO;
1694 goto out;
1695 }
1696 if (!refcount_inc_not_zero(&nbd->refs)) {
1697 ret = -ENXIO;
1698 goto out;
1699 }
1700
1701 config = nbd_get_config_unlocked(nbd);
1702 if (!config) {
1703 mutex_lock(&nbd->config_lock);
1704 if (refcount_inc_not_zero(&nbd->config_refs)) {
1705 mutex_unlock(&nbd->config_lock);
1706 goto out;
1707 }
1708 ret = nbd_alloc_and_init_config(nbd);
1709 if (ret) {
1710 mutex_unlock(&nbd->config_lock);
1711 goto out;
1712 }
1713
1714 refcount_inc(&nbd->refs);
1715 mutex_unlock(&nbd->config_lock);
1716 if (max_part)
1717 set_bit(GD_NEED_PART_SCAN, &disk->state);
1718 } else if (nbd_disconnected(config)) {
1719 if (max_part)
1720 set_bit(GD_NEED_PART_SCAN, &disk->state);
1721 }
1722 out:
1723 mutex_unlock(&nbd_index_mutex);
1724 return ret;
1725 }
1726
nbd_release(struct gendisk * disk)1727 static void nbd_release(struct gendisk *disk)
1728 {
1729 struct nbd_device *nbd = disk->private_data;
1730
1731 if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
1732 disk_openers(disk) == 0)
1733 nbd_disconnect_and_put(nbd);
1734
1735 nbd_config_put(nbd);
1736 nbd_put(nbd);
1737 }
1738
nbd_free_disk(struct gendisk * disk)1739 static void nbd_free_disk(struct gendisk *disk)
1740 {
1741 struct nbd_device *nbd = disk->private_data;
1742
1743 kfree(nbd);
1744 }
1745
1746 static const struct block_device_operations nbd_fops =
1747 {
1748 .owner = THIS_MODULE,
1749 .open = nbd_open,
1750 .release = nbd_release,
1751 .ioctl = nbd_ioctl,
1752 .compat_ioctl = nbd_ioctl,
1753 .free_disk = nbd_free_disk,
1754 };
1755
1756 #if IS_ENABLED(CONFIG_DEBUG_FS)
1757
nbd_dbg_tasks_show(struct seq_file * s,void * unused)1758 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
1759 {
1760 struct nbd_device *nbd = s->private;
1761
1762 if (nbd->pid)
1763 seq_printf(s, "recv: %d\n", nbd->pid);
1764
1765 return 0;
1766 }
1767
1768 DEFINE_SHOW_ATTRIBUTE(nbd_dbg_tasks);
1769
nbd_dbg_flags_show(struct seq_file * s,void * unused)1770 static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
1771 {
1772 struct nbd_device *nbd = s->private;
1773 u32 flags = nbd->config->flags;
1774
1775 seq_printf(s, "Hex: 0x%08x\n\n", flags);
1776
1777 seq_puts(s, "Known flags:\n");
1778
1779 if (flags & NBD_FLAG_HAS_FLAGS)
1780 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
1781 if (flags & NBD_FLAG_READ_ONLY)
1782 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
1783 if (flags & NBD_FLAG_SEND_FLUSH)
1784 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
1785 if (flags & NBD_FLAG_SEND_FUA)
1786 seq_puts(s, "NBD_FLAG_SEND_FUA\n");
1787 if (flags & NBD_FLAG_SEND_TRIM)
1788 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
1789 if (flags & NBD_FLAG_SEND_WRITE_ZEROES)
1790 seq_puts(s, "NBD_FLAG_SEND_WRITE_ZEROES\n");
1791 if (flags & NBD_FLAG_ROTATIONAL)
1792 seq_puts(s, "NBD_FLAG_ROTATIONAL\n");
1793
1794 return 0;
1795 }
1796
1797 DEFINE_SHOW_ATTRIBUTE(nbd_dbg_flags);
1798
nbd_dev_dbg_init(struct nbd_device * nbd)1799 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1800 {
1801 struct dentry *dir;
1802 struct nbd_config *config = nbd->config;
1803
1804 if (!nbd_dbg_dir)
1805 return -EIO;
1806
1807 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
1808 if (IS_ERR(dir)) {
1809 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
1810 nbd_name(nbd));
1811 return -EIO;
1812 }
1813 config->dbg_dir = dir;
1814
1815 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops);
1816 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
1817 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
1818 debugfs_create_u32("blocksize_bits", 0444, dir, &config->blksize_bits);
1819 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops);
1820
1821 return 0;
1822 }
1823
nbd_dev_dbg_close(struct nbd_device * nbd)1824 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1825 {
1826 debugfs_remove_recursive(nbd->config->dbg_dir);
1827 }
1828
nbd_dbg_init(void)1829 static int nbd_dbg_init(void)
1830 {
1831 struct dentry *dbg_dir;
1832
1833 dbg_dir = debugfs_create_dir("nbd", NULL);
1834 if (IS_ERR(dbg_dir))
1835 return -EIO;
1836
1837 nbd_dbg_dir = dbg_dir;
1838
1839 return 0;
1840 }
1841
nbd_dbg_close(void)1842 static void nbd_dbg_close(void)
1843 {
1844 debugfs_remove_recursive(nbd_dbg_dir);
1845 }
1846
1847 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
1848
nbd_dev_dbg_init(struct nbd_device * nbd)1849 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1850 {
1851 return 0;
1852 }
1853
nbd_dev_dbg_close(struct nbd_device * nbd)1854 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1855 {
1856 }
1857
nbd_dbg_init(void)1858 static int nbd_dbg_init(void)
1859 {
1860 return 0;
1861 }
1862
nbd_dbg_close(void)1863 static void nbd_dbg_close(void)
1864 {
1865 }
1866
1867 #endif
1868
nbd_init_request(struct blk_mq_tag_set * set,struct request * rq,unsigned int hctx_idx,unsigned int numa_node)1869 static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1870 unsigned int hctx_idx, unsigned int numa_node)
1871 {
1872 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
1873 cmd->nbd = set->driver_data;
1874 cmd->flags = 0;
1875 mutex_init(&cmd->lock);
1876 return 0;
1877 }
1878
1879 static const struct blk_mq_ops nbd_mq_ops = {
1880 .queue_rq = nbd_queue_rq,
1881 .complete = nbd_complete_rq,
1882 .init_request = nbd_init_request,
1883 .timeout = nbd_xmit_timeout,
1884 };
1885
nbd_dev_add(int index,unsigned int refs)1886 static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
1887 {
1888 struct queue_limits lim = {
1889 .max_hw_sectors = 65536,
1890 .io_opt = 256 << SECTOR_SHIFT,
1891 .max_segments = USHRT_MAX,
1892 .max_segment_size = UINT_MAX,
1893 };
1894 struct nbd_device *nbd;
1895 struct gendisk *disk;
1896 int err = -ENOMEM;
1897
1898 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1899 if (!nbd)
1900 goto out;
1901
1902 nbd->tag_set.ops = &nbd_mq_ops;
1903 nbd->tag_set.nr_hw_queues = 1;
1904 nbd->tag_set.queue_depth = 128;
1905 nbd->tag_set.numa_node = NUMA_NO_NODE;
1906 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1907 nbd->tag_set.flags = BLK_MQ_F_BLOCKING;
1908 nbd->tag_set.driver_data = nbd;
1909 INIT_WORK(&nbd->remove_work, nbd_dev_remove_work);
1910 nbd->backend = NULL;
1911
1912 err = blk_mq_alloc_tag_set(&nbd->tag_set);
1913 if (err)
1914 goto out_free_nbd;
1915
1916 mutex_lock(&nbd_index_mutex);
1917 if (index >= 0) {
1918 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1919 GFP_KERNEL);
1920 if (err == -ENOSPC)
1921 err = -EEXIST;
1922 } else {
1923 err = idr_alloc(&nbd_index_idr, nbd, 0,
1924 (MINORMASK >> part_shift) + 1, GFP_KERNEL);
1925 if (err >= 0)
1926 index = err;
1927 }
1928 nbd->index = index;
1929 mutex_unlock(&nbd_index_mutex);
1930 if (err < 0)
1931 goto out_free_tags;
1932
1933 disk = blk_mq_alloc_disk(&nbd->tag_set, &lim, NULL);
1934 if (IS_ERR(disk)) {
1935 err = PTR_ERR(disk);
1936 goto out_free_idr;
1937 }
1938 nbd->disk = disk;
1939
1940 nbd->recv_workq = alloc_workqueue("nbd%d-recv",
1941 WQ_MEM_RECLAIM | WQ_HIGHPRI |
1942 WQ_UNBOUND, 0, nbd->index);
1943 if (!nbd->recv_workq) {
1944 dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
1945 err = -ENOMEM;
1946 goto out_err_disk;
1947 }
1948
1949 mutex_init(&nbd->config_lock);
1950 refcount_set(&nbd->config_refs, 0);
1951 /*
1952 * Start out with a zero references to keep other threads from using
1953 * this device until it is fully initialized.
1954 */
1955 refcount_set(&nbd->refs, 0);
1956 INIT_LIST_HEAD(&nbd->list);
1957 disk->major = NBD_MAJOR;
1958 disk->first_minor = index << part_shift;
1959 disk->minors = 1 << part_shift;
1960 disk->fops = &nbd_fops;
1961 disk->private_data = nbd;
1962 sprintf(disk->disk_name, "nbd%d", index);
1963 err = add_disk(disk);
1964 if (err)
1965 goto out_free_work;
1966
1967 /*
1968 * Now publish the device.
1969 */
1970 refcount_set(&nbd->refs, refs);
1971 nbd_total_devices++;
1972 return nbd;
1973
1974 out_free_work:
1975 destroy_workqueue(nbd->recv_workq);
1976 out_err_disk:
1977 put_disk(disk);
1978 out_free_idr:
1979 mutex_lock(&nbd_index_mutex);
1980 idr_remove(&nbd_index_idr, index);
1981 mutex_unlock(&nbd_index_mutex);
1982 out_free_tags:
1983 blk_mq_free_tag_set(&nbd->tag_set);
1984 out_free_nbd:
1985 kfree(nbd);
1986 out:
1987 return ERR_PTR(err);
1988 }
1989
nbd_find_get_unused(void)1990 static struct nbd_device *nbd_find_get_unused(void)
1991 {
1992 struct nbd_device *nbd;
1993 int id;
1994
1995 lockdep_assert_held(&nbd_index_mutex);
1996
1997 idr_for_each_entry(&nbd_index_idr, nbd, id) {
1998 if (refcount_read(&nbd->config_refs) ||
1999 test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags))
2000 continue;
2001 if (refcount_inc_not_zero(&nbd->refs))
2002 return nbd;
2003 }
2004
2005 return NULL;
2006 }
2007
2008 /* Netlink interface. */
2009 static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
2010 [NBD_ATTR_INDEX] = { .type = NLA_U32 },
2011 [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
2012 [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
2013 [NBD_ATTR_TIMEOUT] = { .type = NLA_U64 },
2014 [NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 },
2015 [NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 },
2016 [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED},
2017 [NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 },
2018 [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
2019 [NBD_ATTR_BACKEND_IDENTIFIER] = { .type = NLA_STRING},
2020 };
2021
2022 static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
2023 [NBD_SOCK_FD] = { .type = NLA_U32 },
2024 };
2025
2026 /* We don't use this right now since we don't parse the incoming list, but we
2027 * still want it here so userspace knows what to expect.
2028 */
2029 static const struct nla_policy __attribute__((unused))
2030 nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
2031 [NBD_DEVICE_INDEX] = { .type = NLA_U32 },
2032 [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 },
2033 };
2034
nbd_genl_size_set(struct genl_info * info,struct nbd_device * nbd)2035 static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
2036 {
2037 struct nbd_config *config = nbd->config;
2038 u64 bsize = nbd_blksize(config);
2039 u64 bytes = config->bytesize;
2040
2041 if (info->attrs[NBD_ATTR_SIZE_BYTES])
2042 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
2043
2044 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES])
2045 bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
2046
2047 if (bytes != config->bytesize || bsize != nbd_blksize(config))
2048 return nbd_set_size(nbd, bytes, bsize);
2049 return 0;
2050 }
2051
nbd_genl_connect(struct sk_buff * skb,struct genl_info * info)2052 static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
2053 {
2054 struct nbd_device *nbd;
2055 struct nbd_config *config;
2056 int index = -1;
2057 int ret;
2058 bool put_dev = false;
2059
2060 if (!netlink_capable(skb, CAP_SYS_ADMIN))
2061 return -EPERM;
2062
2063 if (info->attrs[NBD_ATTR_INDEX]) {
2064 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2065
2066 /*
2067 * Too big first_minor can cause duplicate creation of
2068 * sysfs files/links, since index << part_shift might overflow, or
2069 * MKDEV() expect that the max bits of first_minor is 20.
2070 */
2071 if (index < 0 || index > MINORMASK >> part_shift) {
2072 pr_err("illegal input index %d\n", index);
2073 return -EINVAL;
2074 }
2075 }
2076 if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_SOCKETS)) {
2077 pr_err("must specify at least one socket\n");
2078 return -EINVAL;
2079 }
2080 if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_SIZE_BYTES)) {
2081 pr_err("must specify a size in bytes for the device\n");
2082 return -EINVAL;
2083 }
2084 again:
2085 mutex_lock(&nbd_index_mutex);
2086 if (index == -1) {
2087 nbd = nbd_find_get_unused();
2088 } else {
2089 nbd = idr_find(&nbd_index_idr, index);
2090 if (nbd) {
2091 if ((test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
2092 test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) ||
2093 !refcount_inc_not_zero(&nbd->refs)) {
2094 mutex_unlock(&nbd_index_mutex);
2095 pr_err("device at index %d is going down\n",
2096 index);
2097 return -EINVAL;
2098 }
2099 }
2100 }
2101 mutex_unlock(&nbd_index_mutex);
2102
2103 if (!nbd) {
2104 nbd = nbd_dev_add(index, 2);
2105 if (IS_ERR(nbd)) {
2106 pr_err("failed to add new device\n");
2107 return PTR_ERR(nbd);
2108 }
2109 }
2110
2111 mutex_lock(&nbd->config_lock);
2112 if (refcount_read(&nbd->config_refs)) {
2113 mutex_unlock(&nbd->config_lock);
2114 nbd_put(nbd);
2115 if (index == -1)
2116 goto again;
2117 pr_err("nbd%d already in use\n", index);
2118 return -EBUSY;
2119 }
2120
2121 ret = nbd_alloc_and_init_config(nbd);
2122 if (ret) {
2123 mutex_unlock(&nbd->config_lock);
2124 nbd_put(nbd);
2125 pr_err("couldn't allocate config\n");
2126 return ret;
2127 }
2128
2129 config = nbd->config;
2130 set_bit(NBD_RT_BOUND, &config->runtime_flags);
2131 ret = nbd_genl_size_set(info, nbd);
2132 if (ret)
2133 goto out;
2134
2135 if (info->attrs[NBD_ATTR_TIMEOUT])
2136 nbd_set_cmd_timeout(nbd,
2137 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
2138 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
2139 config->dead_conn_timeout =
2140 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
2141 config->dead_conn_timeout *= HZ;
2142 }
2143 if (info->attrs[NBD_ATTR_SERVER_FLAGS])
2144 config->flags =
2145 nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
2146 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
2147 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
2148 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
2149 /*
2150 * We have 1 ref to keep the device around, and then 1
2151 * ref for our current operation here, which will be
2152 * inherited by the config. If we already have
2153 * DESTROY_ON_DISCONNECT set then we know we don't have
2154 * that extra ref already held so we don't need the
2155 * put_dev.
2156 */
2157 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
2158 &nbd->flags))
2159 put_dev = true;
2160 } else {
2161 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
2162 &nbd->flags))
2163 refcount_inc(&nbd->refs);
2164 }
2165 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
2166 set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2167 &config->runtime_flags);
2168 }
2169 }
2170
2171 if (info->attrs[NBD_ATTR_SOCKETS]) {
2172 struct nlattr *attr;
2173 int rem, fd;
2174
2175 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
2176 rem) {
2177 struct nlattr *socks[NBD_SOCK_MAX+1];
2178
2179 if (nla_type(attr) != NBD_SOCK_ITEM) {
2180 pr_err("socks must be embedded in a SOCK_ITEM attr\n");
2181 ret = -EINVAL;
2182 goto out;
2183 }
2184 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
2185 attr,
2186 nbd_sock_policy,
2187 info->extack);
2188 if (ret != 0) {
2189 pr_err("error processing sock list\n");
2190 ret = -EINVAL;
2191 goto out;
2192 }
2193 if (!socks[NBD_SOCK_FD])
2194 continue;
2195 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
2196 ret = nbd_add_socket(nbd, fd, true);
2197 if (ret)
2198 goto out;
2199 }
2200 }
2201 ret = nbd_start_device(nbd);
2202 if (ret)
2203 goto out;
2204 if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
2205 nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
2206 GFP_KERNEL);
2207 if (!nbd->backend) {
2208 ret = -ENOMEM;
2209 goto out;
2210 }
2211 }
2212 ret = device_create_file(disk_to_dev(nbd->disk), &backend_attr);
2213 if (ret) {
2214 dev_err(disk_to_dev(nbd->disk),
2215 "device_create_file failed for backend!\n");
2216 goto out;
2217 }
2218 set_bit(NBD_RT_HAS_BACKEND_FILE, &config->runtime_flags);
2219 out:
2220 mutex_unlock(&nbd->config_lock);
2221 if (!ret) {
2222 set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags);
2223 refcount_inc(&nbd->config_refs);
2224 nbd_connect_reply(info, nbd->index);
2225 }
2226 nbd_config_put(nbd);
2227 if (put_dev)
2228 nbd_put(nbd);
2229 return ret;
2230 }
2231
nbd_disconnect_and_put(struct nbd_device * nbd)2232 static void nbd_disconnect_and_put(struct nbd_device *nbd)
2233 {
2234 mutex_lock(&nbd->config_lock);
2235 nbd_disconnect(nbd);
2236 sock_shutdown(nbd);
2237 wake_up(&nbd->config->conn_wait);
2238 /*
2239 * Make sure recv thread has finished, we can safely call nbd_clear_que()
2240 * to cancel the inflight I/Os.
2241 */
2242 flush_workqueue(nbd->recv_workq);
2243 nbd_clear_que(nbd);
2244 nbd->task_setup = NULL;
2245 clear_bit(NBD_RT_BOUND, &nbd->config->runtime_flags);
2246 mutex_unlock(&nbd->config_lock);
2247
2248 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
2249 &nbd->config->runtime_flags))
2250 nbd_config_put(nbd);
2251 }
2252
nbd_genl_disconnect(struct sk_buff * skb,struct genl_info * info)2253 static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
2254 {
2255 struct nbd_device *nbd;
2256 int index;
2257
2258 if (!netlink_capable(skb, CAP_SYS_ADMIN))
2259 return -EPERM;
2260
2261 if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_INDEX)) {
2262 pr_err("must specify an index to disconnect\n");
2263 return -EINVAL;
2264 }
2265 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2266 mutex_lock(&nbd_index_mutex);
2267 nbd = idr_find(&nbd_index_idr, index);
2268 if (!nbd) {
2269 mutex_unlock(&nbd_index_mutex);
2270 pr_err("couldn't find device at index %d\n", index);
2271 return -EINVAL;
2272 }
2273 if (!refcount_inc_not_zero(&nbd->refs)) {
2274 mutex_unlock(&nbd_index_mutex);
2275 pr_err("device at index %d is going down\n", index);
2276 return -EINVAL;
2277 }
2278 mutex_unlock(&nbd_index_mutex);
2279 if (!refcount_inc_not_zero(&nbd->config_refs))
2280 goto put_nbd;
2281 nbd_disconnect_and_put(nbd);
2282 nbd_config_put(nbd);
2283 put_nbd:
2284 nbd_put(nbd);
2285 return 0;
2286 }
2287
nbd_genl_reconfigure(struct sk_buff * skb,struct genl_info * info)2288 static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
2289 {
2290 struct nbd_device *nbd = NULL;
2291 struct nbd_config *config;
2292 int index;
2293 int ret = 0;
2294 bool put_dev = false;
2295
2296 if (!netlink_capable(skb, CAP_SYS_ADMIN))
2297 return -EPERM;
2298
2299 if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_INDEX)) {
2300 pr_err("must specify a device to reconfigure\n");
2301 return -EINVAL;
2302 }
2303 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2304 mutex_lock(&nbd_index_mutex);
2305 nbd = idr_find(&nbd_index_idr, index);
2306 if (!nbd) {
2307 mutex_unlock(&nbd_index_mutex);
2308 pr_err("couldn't find a device at index %d\n", index);
2309 return -EINVAL;
2310 }
2311 if (nbd->backend) {
2312 if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
2313 if (nla_strcmp(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
2314 nbd->backend)) {
2315 mutex_unlock(&nbd_index_mutex);
2316 dev_err(nbd_to_dev(nbd),
2317 "backend image doesn't match with %s\n",
2318 nbd->backend);
2319 return -EINVAL;
2320 }
2321 } else {
2322 mutex_unlock(&nbd_index_mutex);
2323 dev_err(nbd_to_dev(nbd), "must specify backend\n");
2324 return -EINVAL;
2325 }
2326 }
2327 if (!refcount_inc_not_zero(&nbd->refs)) {
2328 mutex_unlock(&nbd_index_mutex);
2329 pr_err("device at index %d is going down\n", index);
2330 return -EINVAL;
2331 }
2332 mutex_unlock(&nbd_index_mutex);
2333
2334 config = nbd_get_config_unlocked(nbd);
2335 if (!config) {
2336 dev_err(nbd_to_dev(nbd),
2337 "not configured, cannot reconfigure\n");
2338 nbd_put(nbd);
2339 return -EINVAL;
2340 }
2341
2342 mutex_lock(&nbd->config_lock);
2343 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
2344 !nbd->pid) {
2345 dev_err(nbd_to_dev(nbd),
2346 "not configured, cannot reconfigure\n");
2347 ret = -EINVAL;
2348 goto out;
2349 }
2350
2351 ret = nbd_genl_size_set(info, nbd);
2352 if (ret)
2353 goto out;
2354
2355 if (info->attrs[NBD_ATTR_TIMEOUT])
2356 nbd_set_cmd_timeout(nbd,
2357 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
2358 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
2359 config->dead_conn_timeout =
2360 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
2361 config->dead_conn_timeout *= HZ;
2362 }
2363 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
2364 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
2365 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
2366 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
2367 &nbd->flags))
2368 put_dev = true;
2369 } else {
2370 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
2371 &nbd->flags))
2372 refcount_inc(&nbd->refs);
2373 }
2374
2375 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
2376 set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2377 &config->runtime_flags);
2378 } else {
2379 clear_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2380 &config->runtime_flags);
2381 }
2382 }
2383
2384 if (info->attrs[NBD_ATTR_SOCKETS]) {
2385 struct nlattr *attr;
2386 int rem, fd;
2387
2388 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
2389 rem) {
2390 struct nlattr *socks[NBD_SOCK_MAX+1];
2391
2392 if (nla_type(attr) != NBD_SOCK_ITEM) {
2393 pr_err("socks must be embedded in a SOCK_ITEM attr\n");
2394 ret = -EINVAL;
2395 goto out;
2396 }
2397 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
2398 attr,
2399 nbd_sock_policy,
2400 info->extack);
2401 if (ret != 0) {
2402 pr_err("error processing sock list\n");
2403 ret = -EINVAL;
2404 goto out;
2405 }
2406 if (!socks[NBD_SOCK_FD])
2407 continue;
2408 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
2409 ret = nbd_reconnect_socket(nbd, fd);
2410 if (ret) {
2411 if (ret == -ENOSPC)
2412 ret = 0;
2413 goto out;
2414 }
2415 dev_info(nbd_to_dev(nbd), "reconnected socket\n");
2416 }
2417 }
2418 out:
2419 mutex_unlock(&nbd->config_lock);
2420 nbd_config_put(nbd);
2421 nbd_put(nbd);
2422 if (put_dev)
2423 nbd_put(nbd);
2424 return ret;
2425 }
2426
2427 static const struct genl_small_ops nbd_connect_genl_ops[] = {
2428 {
2429 .cmd = NBD_CMD_CONNECT,
2430 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2431 .doit = nbd_genl_connect,
2432 },
2433 {
2434 .cmd = NBD_CMD_DISCONNECT,
2435 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2436 .doit = nbd_genl_disconnect,
2437 },
2438 {
2439 .cmd = NBD_CMD_RECONFIGURE,
2440 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2441 .doit = nbd_genl_reconfigure,
2442 },
2443 {
2444 .cmd = NBD_CMD_STATUS,
2445 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2446 .doit = nbd_genl_status,
2447 },
2448 };
2449
2450 static const struct genl_multicast_group nbd_mcast_grps[] = {
2451 { .name = NBD_GENL_MCAST_GROUP_NAME, },
2452 };
2453
2454 static struct genl_family nbd_genl_family __ro_after_init = {
2455 .hdrsize = 0,
2456 .name = NBD_GENL_FAMILY_NAME,
2457 .version = NBD_GENL_VERSION,
2458 .module = THIS_MODULE,
2459 .small_ops = nbd_connect_genl_ops,
2460 .n_small_ops = ARRAY_SIZE(nbd_connect_genl_ops),
2461 .resv_start_op = NBD_CMD_STATUS + 1,
2462 .maxattr = NBD_ATTR_MAX,
2463 .netnsok = 1,
2464 .policy = nbd_attr_policy,
2465 .mcgrps = nbd_mcast_grps,
2466 .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
2467 };
2468 MODULE_ALIAS_GENL_FAMILY(NBD_GENL_FAMILY_NAME);
2469
populate_nbd_status(struct nbd_device * nbd,struct sk_buff * reply)2470 static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
2471 {
2472 struct nlattr *dev_opt;
2473 u8 connected = 0;
2474 int ret;
2475
2476 /* This is a little racey, but for status it's ok. The
2477 * reason we don't take a ref here is because we can't
2478 * take a ref in the index == -1 case as we would need
2479 * to put under the nbd_index_mutex, which could
2480 * deadlock if we are configured to remove ourselves
2481 * once we're disconnected.
2482 */
2483 if (refcount_read(&nbd->config_refs))
2484 connected = 1;
2485 dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM);
2486 if (!dev_opt)
2487 return -EMSGSIZE;
2488 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
2489 if (ret)
2490 return -EMSGSIZE;
2491 ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
2492 connected);
2493 if (ret)
2494 return -EMSGSIZE;
2495 nla_nest_end(reply, dev_opt);
2496 return 0;
2497 }
2498
status_cb(int id,void * ptr,void * data)2499 static int status_cb(int id, void *ptr, void *data)
2500 {
2501 struct nbd_device *nbd = ptr;
2502 return populate_nbd_status(nbd, (struct sk_buff *)data);
2503 }
2504
nbd_genl_status(struct sk_buff * skb,struct genl_info * info)2505 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
2506 {
2507 struct nlattr *dev_list;
2508 struct sk_buff *reply;
2509 void *reply_head;
2510 size_t msg_size;
2511 int index = -1;
2512 int ret = -ENOMEM;
2513
2514 if (info->attrs[NBD_ATTR_INDEX])
2515 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2516
2517 mutex_lock(&nbd_index_mutex);
2518
2519 msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
2520 nla_attr_size(sizeof(u8)));
2521 msg_size *= (index == -1) ? nbd_total_devices : 1;
2522
2523 reply = genlmsg_new(msg_size, GFP_KERNEL);
2524 if (!reply)
2525 goto out;
2526 reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
2527 NBD_CMD_STATUS);
2528 if (!reply_head) {
2529 nlmsg_free(reply);
2530 goto out;
2531 }
2532
2533 dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
2534 if (!dev_list) {
2535 nlmsg_free(reply);
2536 ret = -EMSGSIZE;
2537 goto out;
2538 }
2539
2540 if (index == -1) {
2541 ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
2542 if (ret) {
2543 nlmsg_free(reply);
2544 goto out;
2545 }
2546 } else {
2547 struct nbd_device *nbd;
2548 nbd = idr_find(&nbd_index_idr, index);
2549 if (nbd) {
2550 ret = populate_nbd_status(nbd, reply);
2551 if (ret) {
2552 nlmsg_free(reply);
2553 goto out;
2554 }
2555 }
2556 }
2557 nla_nest_end(reply, dev_list);
2558 genlmsg_end(reply, reply_head);
2559 ret = genlmsg_reply(reply, info);
2560 out:
2561 mutex_unlock(&nbd_index_mutex);
2562 return ret;
2563 }
2564
nbd_connect_reply(struct genl_info * info,int index)2565 static void nbd_connect_reply(struct genl_info *info, int index)
2566 {
2567 struct sk_buff *skb;
2568 void *msg_head;
2569 int ret;
2570
2571 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2572 if (!skb)
2573 return;
2574 msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
2575 NBD_CMD_CONNECT);
2576 if (!msg_head) {
2577 nlmsg_free(skb);
2578 return;
2579 }
2580 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2581 if (ret) {
2582 nlmsg_free(skb);
2583 return;
2584 }
2585 genlmsg_end(skb, msg_head);
2586 genlmsg_reply(skb, info);
2587 }
2588
nbd_mcast_index(int index)2589 static void nbd_mcast_index(int index)
2590 {
2591 struct sk_buff *skb;
2592 void *msg_head;
2593 int ret;
2594
2595 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2596 if (!skb)
2597 return;
2598 msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
2599 NBD_CMD_LINK_DEAD);
2600 if (!msg_head) {
2601 nlmsg_free(skb);
2602 return;
2603 }
2604 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2605 if (ret) {
2606 nlmsg_free(skb);
2607 return;
2608 }
2609 genlmsg_end(skb, msg_head);
2610 genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
2611 }
2612
nbd_dead_link_work(struct work_struct * work)2613 static void nbd_dead_link_work(struct work_struct *work)
2614 {
2615 struct link_dead_args *args = container_of(work, struct link_dead_args,
2616 work);
2617 nbd_mcast_index(args->index);
2618 kfree(args);
2619 }
2620
nbd_init(void)2621 static int __init nbd_init(void)
2622 {
2623 int i;
2624
2625 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
2626
2627 if (max_part < 0) {
2628 pr_err("max_part must be >= 0\n");
2629 return -EINVAL;
2630 }
2631
2632 part_shift = 0;
2633 if (max_part > 0) {
2634 part_shift = fls(max_part);
2635
2636 /*
2637 * Adjust max_part according to part_shift as it is exported
2638 * to user space so that user can know the max number of
2639 * partition kernel should be able to manage.
2640 *
2641 * Note that -1 is required because partition 0 is reserved
2642 * for the whole disk.
2643 */
2644 max_part = (1UL << part_shift) - 1;
2645 }
2646
2647 if ((1UL << part_shift) > DISK_MAX_PARTS)
2648 return -EINVAL;
2649
2650 if (nbds_max > 1UL << (MINORBITS - part_shift))
2651 return -EINVAL;
2652
2653 if (register_blkdev(NBD_MAJOR, "nbd"))
2654 return -EIO;
2655
2656 nbd_del_wq = alloc_workqueue("nbd-del", WQ_UNBOUND, 0);
2657 if (!nbd_del_wq) {
2658 unregister_blkdev(NBD_MAJOR, "nbd");
2659 return -ENOMEM;
2660 }
2661
2662 if (genl_register_family(&nbd_genl_family)) {
2663 destroy_workqueue(nbd_del_wq);
2664 unregister_blkdev(NBD_MAJOR, "nbd");
2665 return -EINVAL;
2666 }
2667 nbd_dbg_init();
2668
2669 for (i = 0; i < nbds_max; i++)
2670 nbd_dev_add(i, 1);
2671 return 0;
2672 }
2673
nbd_exit_cb(int id,void * ptr,void * data)2674 static int nbd_exit_cb(int id, void *ptr, void *data)
2675 {
2676 struct list_head *list = (struct list_head *)data;
2677 struct nbd_device *nbd = ptr;
2678
2679 /* Skip nbd that is being removed asynchronously */
2680 if (refcount_read(&nbd->refs))
2681 list_add_tail(&nbd->list, list);
2682
2683 return 0;
2684 }
2685
nbd_cleanup(void)2686 static void __exit nbd_cleanup(void)
2687 {
2688 struct nbd_device *nbd;
2689 LIST_HEAD(del_list);
2690
2691 /*
2692 * Unregister netlink interface prior to waiting
2693 * for the completion of netlink commands.
2694 */
2695 genl_unregister_family(&nbd_genl_family);
2696
2697 nbd_dbg_close();
2698
2699 mutex_lock(&nbd_index_mutex);
2700 idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
2701 mutex_unlock(&nbd_index_mutex);
2702
2703 while (!list_empty(&del_list)) {
2704 nbd = list_first_entry(&del_list, struct nbd_device, list);
2705 list_del_init(&nbd->list);
2706 if (refcount_read(&nbd->config_refs))
2707 pr_err("possibly leaking nbd_config (ref %d)\n",
2708 refcount_read(&nbd->config_refs));
2709 if (refcount_read(&nbd->refs) != 1)
2710 pr_err("possibly leaking a device\n");
2711 nbd_put(nbd);
2712 }
2713
2714 /* Also wait for nbd_dev_remove_work() completes */
2715 destroy_workqueue(nbd_del_wq);
2716
2717 idr_destroy(&nbd_index_idr);
2718 unregister_blkdev(NBD_MAJOR, "nbd");
2719 }
2720
2721 module_init(nbd_init);
2722 module_exit(nbd_cleanup);
2723
2724 MODULE_DESCRIPTION("Network Block Device");
2725 MODULE_LICENSE("GPL");
2726
2727 module_param(nbds_max, int, 0444);
2728 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
2729 module_param(max_part, int, 0444);
2730 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");
2731