1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Add configfs and memory store: Kyungchan Koh <[email protected]> and
4 * Shaohua Li <[email protected]>
5 */
6 #include <linux/module.h>
7
8 #include <linux/moduleparam.h>
9 #include <linux/sched.h>
10 #include <linux/fs.h>
11 #include <linux/init.h>
12 #include "null_blk.h"
13
14 #undef pr_fmt
15 #define pr_fmt(fmt) "null_blk: " fmt
16
17 #define FREE_BATCH 16
18
19 #define TICKS_PER_SEC 50ULL
20 #define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
21
22 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
23 static DECLARE_FAULT_ATTR(null_timeout_attr);
24 static DECLARE_FAULT_ATTR(null_requeue_attr);
25 static DECLARE_FAULT_ATTR(null_init_hctx_attr);
26 #endif
27
mb_per_tick(int mbps)28 static inline u64 mb_per_tick(int mbps)
29 {
30 return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
31 }
32
33 /*
34 * Status flags for nullb_device.
35 *
36 * CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
37 * UP: Device is currently on and visible in userspace.
38 * THROTTLED: Device is being throttled.
39 * CACHE: Device is using a write-back cache.
40 */
41 enum nullb_device_flags {
42 NULLB_DEV_FL_CONFIGURED = 0,
43 NULLB_DEV_FL_UP = 1,
44 NULLB_DEV_FL_THROTTLED = 2,
45 NULLB_DEV_FL_CACHE = 3,
46 };
47
48 #define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
49 /*
50 * nullb_page is a page in memory for nullb devices.
51 *
52 * @page: The page holding the data.
53 * @bitmap: The bitmap represents which sector in the page has data.
54 * Each bit represents one block size. For example, sector 8
55 * will use the 7th bit
56 * The highest 2 bits of bitmap are for special purpose. LOCK means the cache
57 * page is being flushing to storage. FREE means the cache page is freed and
58 * should be skipped from flushing to storage. Please see
59 * null_make_cache_space
60 */
61 struct nullb_page {
62 struct page *page;
63 DECLARE_BITMAP(bitmap, MAP_SZ);
64 };
65 #define NULLB_PAGE_LOCK (MAP_SZ - 1)
66 #define NULLB_PAGE_FREE (MAP_SZ - 2)
67
68 static LIST_HEAD(nullb_list);
69 static struct mutex lock;
70 static int null_major;
71 static DEFINE_IDA(nullb_indexes);
72 static struct blk_mq_tag_set tag_set;
73
74 enum {
75 NULL_IRQ_NONE = 0,
76 NULL_IRQ_SOFTIRQ = 1,
77 NULL_IRQ_TIMER = 2,
78 };
79
80 static bool g_virt_boundary;
81 module_param_named(virt_boundary, g_virt_boundary, bool, 0444);
82 MODULE_PARM_DESC(virt_boundary, "Require a virtual boundary for the device. Default: False");
83
84 static int g_no_sched;
85 module_param_named(no_sched, g_no_sched, int, 0444);
86 MODULE_PARM_DESC(no_sched, "No io scheduler");
87
88 static int g_submit_queues = 1;
89 module_param_named(submit_queues, g_submit_queues, int, 0444);
90 MODULE_PARM_DESC(submit_queues, "Number of submission queues");
91
92 static int g_poll_queues = 1;
93 module_param_named(poll_queues, g_poll_queues, int, 0444);
94 MODULE_PARM_DESC(poll_queues, "Number of IOPOLL submission queues");
95
96 static int g_home_node = NUMA_NO_NODE;
97 module_param_named(home_node, g_home_node, int, 0444);
98 MODULE_PARM_DESC(home_node, "Home node for the device");
99
100 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
101 /*
102 * For more details about fault injection, please refer to
103 * Documentation/fault-injection/fault-injection.rst.
104 */
105 static char g_timeout_str[80];
106 module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444);
107 MODULE_PARM_DESC(timeout, "Fault injection. timeout=<interval>,<probability>,<space>,<times>");
108
109 static char g_requeue_str[80];
110 module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444);
111 MODULE_PARM_DESC(requeue, "Fault injection. requeue=<interval>,<probability>,<space>,<times>");
112
113 static char g_init_hctx_str[80];
114 module_param_string(init_hctx, g_init_hctx_str, sizeof(g_init_hctx_str), 0444);
115 MODULE_PARM_DESC(init_hctx, "Fault injection to fail hctx init. init_hctx=<interval>,<probability>,<space>,<times>");
116 #endif
117
118 /*
119 * Historic queue modes.
120 *
121 * These days nothing but NULL_Q_MQ is actually supported, but we keep it the
122 * enum for error reporting.
123 */
124 enum {
125 NULL_Q_BIO = 0,
126 NULL_Q_RQ = 1,
127 NULL_Q_MQ = 2,
128 };
129
130 static int g_queue_mode = NULL_Q_MQ;
131
null_param_store_val(const char * str,int * val,int min,int max)132 static int null_param_store_val(const char *str, int *val, int min, int max)
133 {
134 int ret, new_val;
135
136 ret = kstrtoint(str, 10, &new_val);
137 if (ret)
138 return -EINVAL;
139
140 if (new_val < min || new_val > max)
141 return -EINVAL;
142
143 *val = new_val;
144 return 0;
145 }
146
null_set_queue_mode(const char * str,const struct kernel_param * kp)147 static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
148 {
149 return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ);
150 }
151
152 static const struct kernel_param_ops null_queue_mode_param_ops = {
153 .set = null_set_queue_mode,
154 .get = param_get_int,
155 };
156
157 device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, 0444);
158 MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
159
160 static int g_gb = 250;
161 module_param_named(gb, g_gb, int, 0444);
162 MODULE_PARM_DESC(gb, "Size in GB");
163
164 static int g_bs = 512;
165 module_param_named(bs, g_bs, int, 0444);
166 MODULE_PARM_DESC(bs, "Block size (in bytes)");
167
168 static int g_max_sectors;
169 module_param_named(max_sectors, g_max_sectors, int, 0444);
170 MODULE_PARM_DESC(max_sectors, "Maximum size of a command (in 512B sectors)");
171
172 static unsigned int nr_devices = 1;
173 module_param(nr_devices, uint, 0444);
174 MODULE_PARM_DESC(nr_devices, "Number of devices to register");
175
176 static bool g_blocking;
177 module_param_named(blocking, g_blocking, bool, 0444);
178 MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
179
180 static bool g_shared_tags;
181 module_param_named(shared_tags, g_shared_tags, bool, 0444);
182 MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
183
184 static bool g_shared_tag_bitmap;
185 module_param_named(shared_tag_bitmap, g_shared_tag_bitmap, bool, 0444);
186 MODULE_PARM_DESC(shared_tag_bitmap, "Use shared tag bitmap for all submission queues for blk-mq");
187
188 static int g_irqmode = NULL_IRQ_SOFTIRQ;
189
null_set_irqmode(const char * str,const struct kernel_param * kp)190 static int null_set_irqmode(const char *str, const struct kernel_param *kp)
191 {
192 return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE,
193 NULL_IRQ_TIMER);
194 }
195
196 static const struct kernel_param_ops null_irqmode_param_ops = {
197 .set = null_set_irqmode,
198 .get = param_get_int,
199 };
200
201 device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, 0444);
202 MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
203
204 static unsigned long g_completion_nsec = 10000;
205 module_param_named(completion_nsec, g_completion_nsec, ulong, 0444);
206 MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
207
208 static int g_hw_queue_depth = 64;
209 module_param_named(hw_queue_depth, g_hw_queue_depth, int, 0444);
210 MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
211
212 static bool g_use_per_node_hctx;
213 module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444);
214 MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
215
216 static bool g_memory_backed;
217 module_param_named(memory_backed, g_memory_backed, bool, 0444);
218 MODULE_PARM_DESC(memory_backed, "Create a memory-backed block device. Default: false");
219
220 static bool g_discard;
221 module_param_named(discard, g_discard, bool, 0444);
222 MODULE_PARM_DESC(discard, "Support discard operations (requires memory-backed null_blk device). Default: false");
223
224 static unsigned long g_cache_size;
225 module_param_named(cache_size, g_cache_size, ulong, 0444);
226 MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)");
227
228 static bool g_fua = true;
229 module_param_named(fua, g_fua, bool, 0444);
230 MODULE_PARM_DESC(fua, "Enable/disable FUA support when cache_size is used. Default: true");
231
232 static unsigned int g_mbps;
233 module_param_named(mbps, g_mbps, uint, 0444);
234 MODULE_PARM_DESC(mbps, "Limit maximum bandwidth (in MiB/s). Default: 0 (no limit)");
235
236 static bool g_zoned;
237 module_param_named(zoned, g_zoned, bool, S_IRUGO);
238 MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false");
239
240 static unsigned long g_zone_size = 256;
241 module_param_named(zone_size, g_zone_size, ulong, S_IRUGO);
242 MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256");
243
244 static unsigned long g_zone_capacity;
245 module_param_named(zone_capacity, g_zone_capacity, ulong, 0444);
246 MODULE_PARM_DESC(zone_capacity, "Zone capacity in MB when block device is zoned. Can be less than or equal to zone size. Default: Zone size");
247
248 static unsigned int g_zone_nr_conv;
249 module_param_named(zone_nr_conv, g_zone_nr_conv, uint, 0444);
250 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones when block device is zoned. Default: 0");
251
252 static unsigned int g_zone_max_open;
253 module_param_named(zone_max_open, g_zone_max_open, uint, 0444);
254 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones when block device is zoned. Default: 0 (no limit)");
255
256 static unsigned int g_zone_max_active;
257 module_param_named(zone_max_active, g_zone_max_active, uint, 0444);
258 MODULE_PARM_DESC(zone_max_active, "Maximum number of active zones when block device is zoned. Default: 0 (no limit)");
259
260 static int g_zone_append_max_sectors = INT_MAX;
261 module_param_named(zone_append_max_sectors, g_zone_append_max_sectors, int, 0444);
262 MODULE_PARM_DESC(zone_append_max_sectors,
263 "Maximum size of a zone append command (in 512B sectors). Specify 0 for zone append emulation");
264
265 static bool g_zone_full;
266 module_param_named(zone_full, g_zone_full, bool, S_IRUGO);
267 MODULE_PARM_DESC(zone_full, "Initialize the sequential write required zones of a zoned device to be full. Default: false");
268
269 static bool g_rotational;
270 module_param_named(rotational, g_rotational, bool, S_IRUGO);
271 MODULE_PARM_DESC(rotational, "Set the rotational feature for the device. Default: false");
272
273 static struct nullb_device *null_alloc_dev(void);
274 static void null_free_dev(struct nullb_device *dev);
275 static void null_del_dev(struct nullb *nullb);
276 static int null_add_dev(struct nullb_device *dev);
277 static struct nullb *null_find_dev_by_name(const char *name);
278 static void null_free_device_storage(struct nullb_device *dev, bool is_cache);
279
to_nullb_device(struct config_item * item)280 static inline struct nullb_device *to_nullb_device(struct config_item *item)
281 {
282 return item ? container_of(to_config_group(item), struct nullb_device, group) : NULL;
283 }
284
nullb_device_uint_attr_show(unsigned int val,char * page)285 static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
286 {
287 return snprintf(page, PAGE_SIZE, "%u\n", val);
288 }
289
nullb_device_ulong_attr_show(unsigned long val,char * page)290 static inline ssize_t nullb_device_ulong_attr_show(unsigned long val,
291 char *page)
292 {
293 return snprintf(page, PAGE_SIZE, "%lu\n", val);
294 }
295
nullb_device_bool_attr_show(bool val,char * page)296 static inline ssize_t nullb_device_bool_attr_show(bool val, char *page)
297 {
298 return snprintf(page, PAGE_SIZE, "%u\n", val);
299 }
300
nullb_device_uint_attr_store(unsigned int * val,const char * page,size_t count)301 static ssize_t nullb_device_uint_attr_store(unsigned int *val,
302 const char *page, size_t count)
303 {
304 unsigned int tmp;
305 int result;
306
307 result = kstrtouint(page, 0, &tmp);
308 if (result < 0)
309 return result;
310
311 *val = tmp;
312 return count;
313 }
314
nullb_device_ulong_attr_store(unsigned long * val,const char * page,size_t count)315 static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
316 const char *page, size_t count)
317 {
318 int result;
319 unsigned long tmp;
320
321 result = kstrtoul(page, 0, &tmp);
322 if (result < 0)
323 return result;
324
325 *val = tmp;
326 return count;
327 }
328
nullb_device_bool_attr_store(bool * val,const char * page,size_t count)329 static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
330 size_t count)
331 {
332 bool tmp;
333 int result;
334
335 result = kstrtobool(page, &tmp);
336 if (result < 0)
337 return result;
338
339 *val = tmp;
340 return count;
341 }
342
343 /* The following macro should only be used with TYPE = {uint, ulong, bool}. */
344 #define NULLB_DEVICE_ATTR(NAME, TYPE, APPLY) \
345 static ssize_t \
346 nullb_device_##NAME##_show(struct config_item *item, char *page) \
347 { \
348 return nullb_device_##TYPE##_attr_show( \
349 to_nullb_device(item)->NAME, page); \
350 } \
351 static ssize_t \
352 nullb_device_##NAME##_store(struct config_item *item, const char *page, \
353 size_t count) \
354 { \
355 int (*apply_fn)(struct nullb_device *dev, TYPE new_value) = APPLY;\
356 struct nullb_device *dev = to_nullb_device(item); \
357 TYPE new_value = 0; \
358 int ret; \
359 \
360 ret = nullb_device_##TYPE##_attr_store(&new_value, page, count);\
361 if (ret < 0) \
362 return ret; \
363 if (apply_fn) \
364 ret = apply_fn(dev, new_value); \
365 else if (test_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags)) \
366 ret = -EBUSY; \
367 if (ret < 0) \
368 return ret; \
369 dev->NAME = new_value; \
370 return count; \
371 } \
372 CONFIGFS_ATTR(nullb_device_, NAME);
373
nullb_update_nr_hw_queues(struct nullb_device * dev,unsigned int submit_queues,unsigned int poll_queues)374 static int nullb_update_nr_hw_queues(struct nullb_device *dev,
375 unsigned int submit_queues,
376 unsigned int poll_queues)
377
378 {
379 struct blk_mq_tag_set *set;
380 int ret, nr_hw_queues;
381
382 if (!dev->nullb)
383 return 0;
384
385 /*
386 * Make sure at least one submit queue exists.
387 */
388 if (!submit_queues)
389 return -EINVAL;
390
391 /*
392 * Make sure that null_init_hctx() does not access nullb->queues[] past
393 * the end of that array.
394 */
395 if (submit_queues > nr_cpu_ids || poll_queues > g_poll_queues)
396 return -EINVAL;
397
398 /*
399 * Keep previous and new queue numbers in nullb_device for reference in
400 * the call back function null_map_queues().
401 */
402 dev->prev_submit_queues = dev->submit_queues;
403 dev->prev_poll_queues = dev->poll_queues;
404 dev->submit_queues = submit_queues;
405 dev->poll_queues = poll_queues;
406
407 set = dev->nullb->tag_set;
408 nr_hw_queues = submit_queues + poll_queues;
409 blk_mq_update_nr_hw_queues(set, nr_hw_queues);
410 ret = set->nr_hw_queues == nr_hw_queues ? 0 : -ENOMEM;
411
412 if (ret) {
413 /* on error, revert the queue numbers */
414 dev->submit_queues = dev->prev_submit_queues;
415 dev->poll_queues = dev->prev_poll_queues;
416 }
417
418 return ret;
419 }
420
nullb_apply_submit_queues(struct nullb_device * dev,unsigned int submit_queues)421 static int nullb_apply_submit_queues(struct nullb_device *dev,
422 unsigned int submit_queues)
423 {
424 int ret;
425
426 mutex_lock(&lock);
427 ret = nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues);
428 mutex_unlock(&lock);
429
430 return ret;
431 }
432
nullb_apply_poll_queues(struct nullb_device * dev,unsigned int poll_queues)433 static int nullb_apply_poll_queues(struct nullb_device *dev,
434 unsigned int poll_queues)
435 {
436 int ret;
437
438 mutex_lock(&lock);
439 ret = nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues);
440 mutex_unlock(&lock);
441
442 return ret;
443 }
444
445 NULLB_DEVICE_ATTR(size, ulong, NULL);
446 NULLB_DEVICE_ATTR(completion_nsec, ulong, NULL);
447 NULLB_DEVICE_ATTR(submit_queues, uint, nullb_apply_submit_queues);
448 NULLB_DEVICE_ATTR(poll_queues, uint, nullb_apply_poll_queues);
449 NULLB_DEVICE_ATTR(home_node, uint, NULL);
450 NULLB_DEVICE_ATTR(queue_mode, uint, NULL);
451 NULLB_DEVICE_ATTR(blocksize, uint, NULL);
452 NULLB_DEVICE_ATTR(max_sectors, uint, NULL);
453 NULLB_DEVICE_ATTR(irqmode, uint, NULL);
454 NULLB_DEVICE_ATTR(hw_queue_depth, uint, NULL);
455 NULLB_DEVICE_ATTR(index, uint, NULL);
456 NULLB_DEVICE_ATTR(blocking, bool, NULL);
457 NULLB_DEVICE_ATTR(use_per_node_hctx, bool, NULL);
458 NULLB_DEVICE_ATTR(memory_backed, bool, NULL);
459 NULLB_DEVICE_ATTR(discard, bool, NULL);
460 NULLB_DEVICE_ATTR(mbps, uint, NULL);
461 NULLB_DEVICE_ATTR(cache_size, ulong, NULL);
462 NULLB_DEVICE_ATTR(zoned, bool, NULL);
463 NULLB_DEVICE_ATTR(zone_size, ulong, NULL);
464 NULLB_DEVICE_ATTR(zone_capacity, ulong, NULL);
465 NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL);
466 NULLB_DEVICE_ATTR(zone_max_open, uint, NULL);
467 NULLB_DEVICE_ATTR(zone_max_active, uint, NULL);
468 NULLB_DEVICE_ATTR(zone_append_max_sectors, uint, NULL);
469 NULLB_DEVICE_ATTR(zone_full, bool, NULL);
470 NULLB_DEVICE_ATTR(virt_boundary, bool, NULL);
471 NULLB_DEVICE_ATTR(no_sched, bool, NULL);
472 NULLB_DEVICE_ATTR(shared_tags, bool, NULL);
473 NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL);
474 NULLB_DEVICE_ATTR(fua, bool, NULL);
475 NULLB_DEVICE_ATTR(rotational, bool, NULL);
476
nullb_device_power_show(struct config_item * item,char * page)477 static ssize_t nullb_device_power_show(struct config_item *item, char *page)
478 {
479 return nullb_device_bool_attr_show(to_nullb_device(item)->power, page);
480 }
481
nullb_device_power_store(struct config_item * item,const char * page,size_t count)482 static ssize_t nullb_device_power_store(struct config_item *item,
483 const char *page, size_t count)
484 {
485 struct nullb_device *dev = to_nullb_device(item);
486 bool newp = false;
487 ssize_t ret;
488
489 ret = nullb_device_bool_attr_store(&newp, page, count);
490 if (ret < 0)
491 return ret;
492
493 ret = count;
494 mutex_lock(&lock);
495 if (!dev->power && newp) {
496 if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
497 goto out;
498
499 ret = null_add_dev(dev);
500 if (ret) {
501 clear_bit(NULLB_DEV_FL_UP, &dev->flags);
502 goto out;
503 }
504
505 set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
506 dev->power = newp;
507 ret = count;
508 } else if (dev->power && !newp) {
509 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
510 dev->power = newp;
511 null_del_dev(dev->nullb);
512 }
513 clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
514 }
515
516 out:
517 mutex_unlock(&lock);
518 return ret;
519 }
520
521 CONFIGFS_ATTR(nullb_device_, power);
522
nullb_device_badblocks_show(struct config_item * item,char * page)523 static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page)
524 {
525 struct nullb_device *t_dev = to_nullb_device(item);
526
527 return badblocks_show(&t_dev->badblocks, page, 0);
528 }
529
nullb_device_badblocks_store(struct config_item * item,const char * page,size_t count)530 static ssize_t nullb_device_badblocks_store(struct config_item *item,
531 const char *page, size_t count)
532 {
533 struct nullb_device *t_dev = to_nullb_device(item);
534 char *orig, *buf, *tmp;
535 u64 start, end;
536 int ret;
537
538 orig = kstrndup(page, count, GFP_KERNEL);
539 if (!orig)
540 return -ENOMEM;
541
542 buf = strstrip(orig);
543
544 ret = -EINVAL;
545 if (buf[0] != '+' && buf[0] != '-')
546 goto out;
547 tmp = strchr(&buf[1], '-');
548 if (!tmp)
549 goto out;
550 *tmp = '\0';
551 ret = kstrtoull(buf + 1, 0, &start);
552 if (ret)
553 goto out;
554 ret = kstrtoull(tmp + 1, 0, &end);
555 if (ret)
556 goto out;
557 ret = -EINVAL;
558 if (start > end)
559 goto out;
560 /* enable badblocks */
561 cmpxchg(&t_dev->badblocks.shift, -1, 0);
562 if (buf[0] == '+') {
563 if (badblocks_set(&t_dev->badblocks, start,
564 end - start + 1, 1))
565 ret = count;
566 } else if (badblocks_clear(&t_dev->badblocks, start,
567 end - start + 1)) {
568 ret = count;
569 }
570 out:
571 kfree(orig);
572 return ret;
573 }
574 CONFIGFS_ATTR(nullb_device_, badblocks);
575
nullb_device_zone_readonly_store(struct config_item * item,const char * page,size_t count)576 static ssize_t nullb_device_zone_readonly_store(struct config_item *item,
577 const char *page, size_t count)
578 {
579 struct nullb_device *dev = to_nullb_device(item);
580
581 return zone_cond_store(dev, page, count, BLK_ZONE_COND_READONLY);
582 }
583 CONFIGFS_ATTR_WO(nullb_device_, zone_readonly);
584
nullb_device_zone_offline_store(struct config_item * item,const char * page,size_t count)585 static ssize_t nullb_device_zone_offline_store(struct config_item *item,
586 const char *page, size_t count)
587 {
588 struct nullb_device *dev = to_nullb_device(item);
589
590 return zone_cond_store(dev, page, count, BLK_ZONE_COND_OFFLINE);
591 }
592 CONFIGFS_ATTR_WO(nullb_device_, zone_offline);
593
594 static struct configfs_attribute *nullb_device_attrs[] = {
595 &nullb_device_attr_size,
596 &nullb_device_attr_completion_nsec,
597 &nullb_device_attr_submit_queues,
598 &nullb_device_attr_poll_queues,
599 &nullb_device_attr_home_node,
600 &nullb_device_attr_queue_mode,
601 &nullb_device_attr_blocksize,
602 &nullb_device_attr_max_sectors,
603 &nullb_device_attr_irqmode,
604 &nullb_device_attr_hw_queue_depth,
605 &nullb_device_attr_index,
606 &nullb_device_attr_blocking,
607 &nullb_device_attr_use_per_node_hctx,
608 &nullb_device_attr_power,
609 &nullb_device_attr_memory_backed,
610 &nullb_device_attr_discard,
611 &nullb_device_attr_mbps,
612 &nullb_device_attr_cache_size,
613 &nullb_device_attr_badblocks,
614 &nullb_device_attr_zoned,
615 &nullb_device_attr_zone_size,
616 &nullb_device_attr_zone_capacity,
617 &nullb_device_attr_zone_nr_conv,
618 &nullb_device_attr_zone_max_open,
619 &nullb_device_attr_zone_max_active,
620 &nullb_device_attr_zone_append_max_sectors,
621 &nullb_device_attr_zone_readonly,
622 &nullb_device_attr_zone_offline,
623 &nullb_device_attr_zone_full,
624 &nullb_device_attr_virt_boundary,
625 &nullb_device_attr_no_sched,
626 &nullb_device_attr_shared_tags,
627 &nullb_device_attr_shared_tag_bitmap,
628 &nullb_device_attr_fua,
629 &nullb_device_attr_rotational,
630 NULL,
631 };
632
nullb_device_release(struct config_item * item)633 static void nullb_device_release(struct config_item *item)
634 {
635 struct nullb_device *dev = to_nullb_device(item);
636
637 null_free_device_storage(dev, false);
638 null_free_dev(dev);
639 }
640
641 static struct configfs_item_operations nullb_device_ops = {
642 .release = nullb_device_release,
643 };
644
645 static const struct config_item_type nullb_device_type = {
646 .ct_item_ops = &nullb_device_ops,
647 .ct_attrs = nullb_device_attrs,
648 .ct_owner = THIS_MODULE,
649 };
650
651 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
652
nullb_add_fault_config(struct nullb_device * dev)653 static void nullb_add_fault_config(struct nullb_device *dev)
654 {
655 fault_config_init(&dev->timeout_config, "timeout_inject");
656 fault_config_init(&dev->requeue_config, "requeue_inject");
657 fault_config_init(&dev->init_hctx_fault_config, "init_hctx_fault_inject");
658
659 configfs_add_default_group(&dev->timeout_config.group, &dev->group);
660 configfs_add_default_group(&dev->requeue_config.group, &dev->group);
661 configfs_add_default_group(&dev->init_hctx_fault_config.group, &dev->group);
662 }
663
664 #else
665
nullb_add_fault_config(struct nullb_device * dev)666 static void nullb_add_fault_config(struct nullb_device *dev)
667 {
668 }
669
670 #endif
671
672 static struct
nullb_group_make_group(struct config_group * group,const char * name)673 config_group *nullb_group_make_group(struct config_group *group, const char *name)
674 {
675 struct nullb_device *dev;
676
677 if (null_find_dev_by_name(name))
678 return ERR_PTR(-EEXIST);
679
680 dev = null_alloc_dev();
681 if (!dev)
682 return ERR_PTR(-ENOMEM);
683
684 config_group_init_type_name(&dev->group, name, &nullb_device_type);
685 nullb_add_fault_config(dev);
686
687 return &dev->group;
688 }
689
690 static void
nullb_group_drop_item(struct config_group * group,struct config_item * item)691 nullb_group_drop_item(struct config_group *group, struct config_item *item)
692 {
693 struct nullb_device *dev = to_nullb_device(item);
694
695 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
696 mutex_lock(&lock);
697 dev->power = false;
698 null_del_dev(dev->nullb);
699 mutex_unlock(&lock);
700 }
701
702 config_item_put(item);
703 }
704
memb_group_features_show(struct config_item * item,char * page)705 static ssize_t memb_group_features_show(struct config_item *item, char *page)
706 {
707 return snprintf(page, PAGE_SIZE,
708 "badblocks,blocking,blocksize,cache_size,fua,"
709 "completion_nsec,discard,home_node,hw_queue_depth,"
710 "irqmode,max_sectors,mbps,memory_backed,no_sched,"
711 "poll_queues,power,queue_mode,shared_tag_bitmap,"
712 "shared_tags,size,submit_queues,use_per_node_hctx,"
713 "virt_boundary,zoned,zone_capacity,zone_max_active,"
714 "zone_max_open,zone_nr_conv,zone_offline,zone_readonly,"
715 "zone_size,zone_append_max_sectors,zone_full,"
716 "rotational\n");
717 }
718
719 CONFIGFS_ATTR_RO(memb_group_, features);
720
721 static struct configfs_attribute *nullb_group_attrs[] = {
722 &memb_group_attr_features,
723 NULL,
724 };
725
726 static struct configfs_group_operations nullb_group_ops = {
727 .make_group = nullb_group_make_group,
728 .drop_item = nullb_group_drop_item,
729 };
730
731 static const struct config_item_type nullb_group_type = {
732 .ct_group_ops = &nullb_group_ops,
733 .ct_attrs = nullb_group_attrs,
734 .ct_owner = THIS_MODULE,
735 };
736
737 static struct configfs_subsystem nullb_subsys = {
738 .su_group = {
739 .cg_item = {
740 .ci_namebuf = "nullb",
741 .ci_type = &nullb_group_type,
742 },
743 },
744 };
745
null_cache_active(struct nullb * nullb)746 static inline int null_cache_active(struct nullb *nullb)
747 {
748 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
749 }
750
null_alloc_dev(void)751 static struct nullb_device *null_alloc_dev(void)
752 {
753 struct nullb_device *dev;
754
755 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
756 if (!dev)
757 return NULL;
758
759 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
760 dev->timeout_config.attr = null_timeout_attr;
761 dev->requeue_config.attr = null_requeue_attr;
762 dev->init_hctx_fault_config.attr = null_init_hctx_attr;
763 #endif
764
765 INIT_RADIX_TREE(&dev->data, GFP_ATOMIC);
766 INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC);
767 if (badblocks_init(&dev->badblocks, 0)) {
768 kfree(dev);
769 return NULL;
770 }
771
772 dev->size = g_gb * 1024;
773 dev->completion_nsec = g_completion_nsec;
774 dev->submit_queues = g_submit_queues;
775 dev->prev_submit_queues = g_submit_queues;
776 dev->poll_queues = g_poll_queues;
777 dev->prev_poll_queues = g_poll_queues;
778 dev->home_node = g_home_node;
779 dev->queue_mode = g_queue_mode;
780 dev->blocksize = g_bs;
781 dev->max_sectors = g_max_sectors;
782 dev->irqmode = g_irqmode;
783 dev->hw_queue_depth = g_hw_queue_depth;
784 dev->blocking = g_blocking;
785 dev->memory_backed = g_memory_backed;
786 dev->discard = g_discard;
787 dev->cache_size = g_cache_size;
788 dev->mbps = g_mbps;
789 dev->use_per_node_hctx = g_use_per_node_hctx;
790 dev->zoned = g_zoned;
791 dev->zone_size = g_zone_size;
792 dev->zone_capacity = g_zone_capacity;
793 dev->zone_nr_conv = g_zone_nr_conv;
794 dev->zone_max_open = g_zone_max_open;
795 dev->zone_max_active = g_zone_max_active;
796 dev->zone_append_max_sectors = g_zone_append_max_sectors;
797 dev->zone_full = g_zone_full;
798 dev->virt_boundary = g_virt_boundary;
799 dev->no_sched = g_no_sched;
800 dev->shared_tags = g_shared_tags;
801 dev->shared_tag_bitmap = g_shared_tag_bitmap;
802 dev->fua = g_fua;
803 dev->rotational = g_rotational;
804
805 return dev;
806 }
807
null_free_dev(struct nullb_device * dev)808 static void null_free_dev(struct nullb_device *dev)
809 {
810 if (!dev)
811 return;
812
813 null_free_zoned_dev(dev);
814 badblocks_exit(&dev->badblocks);
815 kfree(dev);
816 }
817
null_cmd_timer_expired(struct hrtimer * timer)818 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
819 {
820 struct nullb_cmd *cmd = container_of(timer, struct nullb_cmd, timer);
821
822 blk_mq_end_request(blk_mq_rq_from_pdu(cmd), cmd->error);
823 return HRTIMER_NORESTART;
824 }
825
null_cmd_end_timer(struct nullb_cmd * cmd)826 static void null_cmd_end_timer(struct nullb_cmd *cmd)
827 {
828 ktime_t kt = cmd->nq->dev->completion_nsec;
829
830 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
831 }
832
null_complete_rq(struct request * rq)833 static void null_complete_rq(struct request *rq)
834 {
835 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
836
837 blk_mq_end_request(rq, cmd->error);
838 }
839
null_alloc_page(void)840 static struct nullb_page *null_alloc_page(void)
841 {
842 struct nullb_page *t_page;
843
844 t_page = kmalloc(sizeof(struct nullb_page), GFP_NOIO);
845 if (!t_page)
846 return NULL;
847
848 t_page->page = alloc_pages(GFP_NOIO, 0);
849 if (!t_page->page) {
850 kfree(t_page);
851 return NULL;
852 }
853
854 memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
855 return t_page;
856 }
857
null_free_page(struct nullb_page * t_page)858 static void null_free_page(struct nullb_page *t_page)
859 {
860 __set_bit(NULLB_PAGE_FREE, t_page->bitmap);
861 if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap))
862 return;
863 __free_page(t_page->page);
864 kfree(t_page);
865 }
866
null_page_empty(struct nullb_page * page)867 static bool null_page_empty(struct nullb_page *page)
868 {
869 int size = MAP_SZ - 2;
870
871 return find_first_bit(page->bitmap, size) == size;
872 }
873
null_free_sector(struct nullb * nullb,sector_t sector,bool is_cache)874 static void null_free_sector(struct nullb *nullb, sector_t sector,
875 bool is_cache)
876 {
877 unsigned int sector_bit;
878 u64 idx;
879 struct nullb_page *t_page, *ret;
880 struct radix_tree_root *root;
881
882 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
883 idx = sector >> PAGE_SECTORS_SHIFT;
884 sector_bit = (sector & SECTOR_MASK);
885
886 t_page = radix_tree_lookup(root, idx);
887 if (t_page) {
888 __clear_bit(sector_bit, t_page->bitmap);
889
890 if (null_page_empty(t_page)) {
891 ret = radix_tree_delete_item(root, idx, t_page);
892 WARN_ON(ret != t_page);
893 null_free_page(ret);
894 if (is_cache)
895 nullb->dev->curr_cache -= PAGE_SIZE;
896 }
897 }
898 }
899
null_radix_tree_insert(struct nullb * nullb,u64 idx,struct nullb_page * t_page,bool is_cache)900 static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
901 struct nullb_page *t_page, bool is_cache)
902 {
903 struct radix_tree_root *root;
904
905 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
906
907 if (radix_tree_insert(root, idx, t_page)) {
908 null_free_page(t_page);
909 t_page = radix_tree_lookup(root, idx);
910 WARN_ON(!t_page || t_page->page->private != idx);
911 } else if (is_cache)
912 nullb->dev->curr_cache += PAGE_SIZE;
913
914 return t_page;
915 }
916
null_free_device_storage(struct nullb_device * dev,bool is_cache)917 static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
918 {
919 unsigned long pos = 0;
920 int nr_pages;
921 struct nullb_page *ret, *t_pages[FREE_BATCH];
922 struct radix_tree_root *root;
923
924 root = is_cache ? &dev->cache : &dev->data;
925
926 do {
927 int i;
928
929 nr_pages = radix_tree_gang_lookup(root,
930 (void **)t_pages, pos, FREE_BATCH);
931
932 for (i = 0; i < nr_pages; i++) {
933 pos = t_pages[i]->page->private;
934 ret = radix_tree_delete_item(root, pos, t_pages[i]);
935 WARN_ON(ret != t_pages[i]);
936 null_free_page(ret);
937 }
938
939 pos++;
940 } while (nr_pages == FREE_BATCH);
941
942 if (is_cache)
943 dev->curr_cache = 0;
944 }
945
__null_lookup_page(struct nullb * nullb,sector_t sector,bool for_write,bool is_cache)946 static struct nullb_page *__null_lookup_page(struct nullb *nullb,
947 sector_t sector, bool for_write, bool is_cache)
948 {
949 unsigned int sector_bit;
950 u64 idx;
951 struct nullb_page *t_page;
952 struct radix_tree_root *root;
953
954 idx = sector >> PAGE_SECTORS_SHIFT;
955 sector_bit = (sector & SECTOR_MASK);
956
957 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
958 t_page = radix_tree_lookup(root, idx);
959 WARN_ON(t_page && t_page->page->private != idx);
960
961 if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
962 return t_page;
963
964 return NULL;
965 }
966
null_lookup_page(struct nullb * nullb,sector_t sector,bool for_write,bool ignore_cache)967 static struct nullb_page *null_lookup_page(struct nullb *nullb,
968 sector_t sector, bool for_write, bool ignore_cache)
969 {
970 struct nullb_page *page = NULL;
971
972 if (!ignore_cache)
973 page = __null_lookup_page(nullb, sector, for_write, true);
974 if (page)
975 return page;
976 return __null_lookup_page(nullb, sector, for_write, false);
977 }
978
null_insert_page(struct nullb * nullb,sector_t sector,bool ignore_cache)979 static struct nullb_page *null_insert_page(struct nullb *nullb,
980 sector_t sector, bool ignore_cache)
981 __releases(&nullb->lock)
982 __acquires(&nullb->lock)
983 {
984 u64 idx;
985 struct nullb_page *t_page;
986
987 t_page = null_lookup_page(nullb, sector, true, ignore_cache);
988 if (t_page)
989 return t_page;
990
991 spin_unlock_irq(&nullb->lock);
992
993 t_page = null_alloc_page();
994 if (!t_page)
995 goto out_lock;
996
997 if (radix_tree_preload(GFP_NOIO))
998 goto out_freepage;
999
1000 spin_lock_irq(&nullb->lock);
1001 idx = sector >> PAGE_SECTORS_SHIFT;
1002 t_page->page->private = idx;
1003 t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
1004 radix_tree_preload_end();
1005
1006 return t_page;
1007 out_freepage:
1008 null_free_page(t_page);
1009 out_lock:
1010 spin_lock_irq(&nullb->lock);
1011 return null_lookup_page(nullb, sector, true, ignore_cache);
1012 }
1013
null_flush_cache_page(struct nullb * nullb,struct nullb_page * c_page)1014 static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
1015 {
1016 int i;
1017 unsigned int offset;
1018 u64 idx;
1019 struct nullb_page *t_page, *ret;
1020 void *dst, *src;
1021
1022 idx = c_page->page->private;
1023
1024 t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
1025
1026 __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap);
1027 if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) {
1028 null_free_page(c_page);
1029 if (t_page && null_page_empty(t_page)) {
1030 ret = radix_tree_delete_item(&nullb->dev->data,
1031 idx, t_page);
1032 null_free_page(t_page);
1033 }
1034 return 0;
1035 }
1036
1037 if (!t_page)
1038 return -ENOMEM;
1039
1040 src = kmap_local_page(c_page->page);
1041 dst = kmap_local_page(t_page->page);
1042
1043 for (i = 0; i < PAGE_SECTORS;
1044 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
1045 if (test_bit(i, c_page->bitmap)) {
1046 offset = (i << SECTOR_SHIFT);
1047 memcpy(dst + offset, src + offset,
1048 nullb->dev->blocksize);
1049 __set_bit(i, t_page->bitmap);
1050 }
1051 }
1052
1053 kunmap_local(dst);
1054 kunmap_local(src);
1055
1056 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
1057 null_free_page(ret);
1058 nullb->dev->curr_cache -= PAGE_SIZE;
1059
1060 return 0;
1061 }
1062
null_make_cache_space(struct nullb * nullb,unsigned long n)1063 static int null_make_cache_space(struct nullb *nullb, unsigned long n)
1064 {
1065 int i, err, nr_pages;
1066 struct nullb_page *c_pages[FREE_BATCH];
1067 unsigned long flushed = 0, one_round;
1068
1069 again:
1070 if ((nullb->dev->cache_size * 1024 * 1024) >
1071 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0)
1072 return 0;
1073
1074 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache,
1075 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH);
1076 /*
1077 * nullb_flush_cache_page could unlock before using the c_pages. To
1078 * avoid race, we don't allow page free
1079 */
1080 for (i = 0; i < nr_pages; i++) {
1081 nullb->cache_flush_pos = c_pages[i]->page->private;
1082 /*
1083 * We found the page which is being flushed to disk by other
1084 * threads
1085 */
1086 if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap))
1087 c_pages[i] = NULL;
1088 else
1089 __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap);
1090 }
1091
1092 one_round = 0;
1093 for (i = 0; i < nr_pages; i++) {
1094 if (c_pages[i] == NULL)
1095 continue;
1096 err = null_flush_cache_page(nullb, c_pages[i]);
1097 if (err)
1098 return err;
1099 one_round++;
1100 }
1101 flushed += one_round << PAGE_SHIFT;
1102
1103 if (n > flushed) {
1104 if (nr_pages == 0)
1105 nullb->cache_flush_pos = 0;
1106 if (one_round == 0) {
1107 /* give other threads a chance */
1108 spin_unlock_irq(&nullb->lock);
1109 spin_lock_irq(&nullb->lock);
1110 }
1111 goto again;
1112 }
1113 return 0;
1114 }
1115
copy_to_nullb(struct nullb * nullb,struct page * source,unsigned int off,sector_t sector,size_t n,bool is_fua)1116 static int copy_to_nullb(struct nullb *nullb, struct page *source,
1117 unsigned int off, sector_t sector, size_t n, bool is_fua)
1118 {
1119 size_t temp, count = 0;
1120 unsigned int offset;
1121 struct nullb_page *t_page;
1122
1123 while (count < n) {
1124 temp = min_t(size_t, nullb->dev->blocksize, n - count);
1125
1126 if (null_cache_active(nullb) && !is_fua)
1127 null_make_cache_space(nullb, PAGE_SIZE);
1128
1129 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
1130 t_page = null_insert_page(nullb, sector,
1131 !null_cache_active(nullb) || is_fua);
1132 if (!t_page)
1133 return -ENOSPC;
1134
1135 memcpy_page(t_page->page, offset, source, off + count, temp);
1136
1137 __set_bit(sector & SECTOR_MASK, t_page->bitmap);
1138
1139 if (is_fua)
1140 null_free_sector(nullb, sector, true);
1141
1142 count += temp;
1143 sector += temp >> SECTOR_SHIFT;
1144 }
1145 return 0;
1146 }
1147
copy_from_nullb(struct nullb * nullb,struct page * dest,unsigned int off,sector_t sector,size_t n)1148 static int copy_from_nullb(struct nullb *nullb, struct page *dest,
1149 unsigned int off, sector_t sector, size_t n)
1150 {
1151 size_t temp, count = 0;
1152 unsigned int offset;
1153 struct nullb_page *t_page;
1154
1155 while (count < n) {
1156 temp = min_t(size_t, nullb->dev->blocksize, n - count);
1157
1158 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
1159 t_page = null_lookup_page(nullb, sector, false,
1160 !null_cache_active(nullb));
1161
1162 if (t_page)
1163 memcpy_page(dest, off + count, t_page->page, offset,
1164 temp);
1165 else
1166 zero_user(dest, off + count, temp);
1167
1168 count += temp;
1169 sector += temp >> SECTOR_SHIFT;
1170 }
1171 return 0;
1172 }
1173
nullb_fill_pattern(struct nullb * nullb,struct page * page,unsigned int len,unsigned int off)1174 static void nullb_fill_pattern(struct nullb *nullb, struct page *page,
1175 unsigned int len, unsigned int off)
1176 {
1177 memset_page(page, off, 0xff, len);
1178 }
1179
null_handle_discard(struct nullb_device * dev,sector_t sector,sector_t nr_sectors)1180 blk_status_t null_handle_discard(struct nullb_device *dev,
1181 sector_t sector, sector_t nr_sectors)
1182 {
1183 struct nullb *nullb = dev->nullb;
1184 size_t n = nr_sectors << SECTOR_SHIFT;
1185 size_t temp;
1186
1187 spin_lock_irq(&nullb->lock);
1188 while (n > 0) {
1189 temp = min_t(size_t, n, dev->blocksize);
1190 null_free_sector(nullb, sector, false);
1191 if (null_cache_active(nullb))
1192 null_free_sector(nullb, sector, true);
1193 sector += temp >> SECTOR_SHIFT;
1194 n -= temp;
1195 }
1196 spin_unlock_irq(&nullb->lock);
1197
1198 return BLK_STS_OK;
1199 }
1200
null_handle_flush(struct nullb * nullb)1201 static blk_status_t null_handle_flush(struct nullb *nullb)
1202 {
1203 int err;
1204
1205 if (!null_cache_active(nullb))
1206 return 0;
1207
1208 spin_lock_irq(&nullb->lock);
1209 while (true) {
1210 err = null_make_cache_space(nullb,
1211 nullb->dev->cache_size * 1024 * 1024);
1212 if (err || nullb->dev->curr_cache == 0)
1213 break;
1214 }
1215
1216 WARN_ON(!radix_tree_empty(&nullb->dev->cache));
1217 spin_unlock_irq(&nullb->lock);
1218 return errno_to_blk_status(err);
1219 }
1220
null_transfer(struct nullb * nullb,struct page * page,unsigned int len,unsigned int off,bool is_write,sector_t sector,bool is_fua)1221 static int null_transfer(struct nullb *nullb, struct page *page,
1222 unsigned int len, unsigned int off, bool is_write, sector_t sector,
1223 bool is_fua)
1224 {
1225 struct nullb_device *dev = nullb->dev;
1226 unsigned int valid_len = len;
1227 int err = 0;
1228
1229 if (!is_write) {
1230 if (dev->zoned)
1231 valid_len = null_zone_valid_read_len(nullb,
1232 sector, len);
1233
1234 if (valid_len) {
1235 err = copy_from_nullb(nullb, page, off,
1236 sector, valid_len);
1237 off += valid_len;
1238 len -= valid_len;
1239 }
1240
1241 if (len)
1242 nullb_fill_pattern(nullb, page, len, off);
1243 flush_dcache_page(page);
1244 } else {
1245 flush_dcache_page(page);
1246 err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
1247 }
1248
1249 return err;
1250 }
1251
null_handle_rq(struct nullb_cmd * cmd)1252 static blk_status_t null_handle_rq(struct nullb_cmd *cmd)
1253 {
1254 struct request *rq = blk_mq_rq_from_pdu(cmd);
1255 struct nullb *nullb = cmd->nq->dev->nullb;
1256 int err = 0;
1257 unsigned int len;
1258 sector_t sector = blk_rq_pos(rq);
1259 struct req_iterator iter;
1260 struct bio_vec bvec;
1261
1262 spin_lock_irq(&nullb->lock);
1263 rq_for_each_segment(bvec, rq, iter) {
1264 len = bvec.bv_len;
1265 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
1266 op_is_write(req_op(rq)), sector,
1267 rq->cmd_flags & REQ_FUA);
1268 if (err)
1269 break;
1270 sector += len >> SECTOR_SHIFT;
1271 }
1272 spin_unlock_irq(&nullb->lock);
1273
1274 return errno_to_blk_status(err);
1275 }
1276
null_handle_throttled(struct nullb_cmd * cmd)1277 static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd)
1278 {
1279 struct nullb_device *dev = cmd->nq->dev;
1280 struct nullb *nullb = dev->nullb;
1281 blk_status_t sts = BLK_STS_OK;
1282 struct request *rq = blk_mq_rq_from_pdu(cmd);
1283
1284 if (!hrtimer_active(&nullb->bw_timer))
1285 hrtimer_restart(&nullb->bw_timer);
1286
1287 if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) {
1288 blk_mq_stop_hw_queues(nullb->q);
1289 /* race with timer */
1290 if (atomic_long_read(&nullb->cur_bytes) > 0)
1291 blk_mq_start_stopped_hw_queues(nullb->q, true);
1292 /* requeue request */
1293 sts = BLK_STS_DEV_RESOURCE;
1294 }
1295 return sts;
1296 }
1297
null_handle_badblocks(struct nullb_cmd * cmd,sector_t sector,sector_t nr_sectors)1298 static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd,
1299 sector_t sector,
1300 sector_t nr_sectors)
1301 {
1302 struct badblocks *bb = &cmd->nq->dev->badblocks;
1303 sector_t first_bad, bad_sectors;
1304
1305 if (badblocks_check(bb, sector, nr_sectors, &first_bad, &bad_sectors))
1306 return BLK_STS_IOERR;
1307
1308 return BLK_STS_OK;
1309 }
1310
null_handle_memory_backed(struct nullb_cmd * cmd,enum req_op op,sector_t sector,sector_t nr_sectors)1311 static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
1312 enum req_op op,
1313 sector_t sector,
1314 sector_t nr_sectors)
1315 {
1316 struct nullb_device *dev = cmd->nq->dev;
1317
1318 if (op == REQ_OP_DISCARD)
1319 return null_handle_discard(dev, sector, nr_sectors);
1320
1321 return null_handle_rq(cmd);
1322 }
1323
nullb_zero_read_cmd_buffer(struct nullb_cmd * cmd)1324 static void nullb_zero_read_cmd_buffer(struct nullb_cmd *cmd)
1325 {
1326 struct request *rq = blk_mq_rq_from_pdu(cmd);
1327 struct nullb_device *dev = cmd->nq->dev;
1328 struct bio *bio;
1329
1330 if (!dev->memory_backed && req_op(rq) == REQ_OP_READ) {
1331 __rq_for_each_bio(bio, rq)
1332 zero_fill_bio(bio);
1333 }
1334 }
1335
nullb_complete_cmd(struct nullb_cmd * cmd)1336 static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
1337 {
1338 struct request *rq = blk_mq_rq_from_pdu(cmd);
1339
1340 /*
1341 * Since root privileges are required to configure the null_blk
1342 * driver, it is fine that this driver does not initialize the
1343 * data buffers of read commands. Zero-initialize these buffers
1344 * anyway if KMSAN is enabled to prevent that KMSAN complains
1345 * about null_blk not initializing read data buffers.
1346 */
1347 if (IS_ENABLED(CONFIG_KMSAN))
1348 nullb_zero_read_cmd_buffer(cmd);
1349
1350 /* Complete IO by inline, softirq or timer */
1351 switch (cmd->nq->dev->irqmode) {
1352 case NULL_IRQ_SOFTIRQ:
1353 blk_mq_complete_request(rq);
1354 break;
1355 case NULL_IRQ_NONE:
1356 blk_mq_end_request(rq, cmd->error);
1357 break;
1358 case NULL_IRQ_TIMER:
1359 null_cmd_end_timer(cmd);
1360 break;
1361 }
1362 }
1363
null_process_cmd(struct nullb_cmd * cmd,enum req_op op,sector_t sector,unsigned int nr_sectors)1364 blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
1365 sector_t sector, unsigned int nr_sectors)
1366 {
1367 struct nullb_device *dev = cmd->nq->dev;
1368 blk_status_t ret;
1369
1370 if (dev->badblocks.shift != -1) {
1371 ret = null_handle_badblocks(cmd, sector, nr_sectors);
1372 if (ret != BLK_STS_OK)
1373 return ret;
1374 }
1375
1376 if (dev->memory_backed)
1377 return null_handle_memory_backed(cmd, op, sector, nr_sectors);
1378
1379 return BLK_STS_OK;
1380 }
1381
null_handle_cmd(struct nullb_cmd * cmd,sector_t sector,sector_t nr_sectors,enum req_op op)1382 static void null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
1383 sector_t nr_sectors, enum req_op op)
1384 {
1385 struct nullb_device *dev = cmd->nq->dev;
1386 struct nullb *nullb = dev->nullb;
1387 blk_status_t sts;
1388
1389 if (op == REQ_OP_FLUSH) {
1390 cmd->error = null_handle_flush(nullb);
1391 goto out;
1392 }
1393
1394 if (dev->zoned)
1395 sts = null_process_zoned_cmd(cmd, op, sector, nr_sectors);
1396 else
1397 sts = null_process_cmd(cmd, op, sector, nr_sectors);
1398
1399 /* Do not overwrite errors (e.g. timeout errors) */
1400 if (cmd->error == BLK_STS_OK)
1401 cmd->error = sts;
1402
1403 out:
1404 nullb_complete_cmd(cmd);
1405 }
1406
nullb_bwtimer_fn(struct hrtimer * timer)1407 static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
1408 {
1409 struct nullb *nullb = container_of(timer, struct nullb, bw_timer);
1410 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1411 unsigned int mbps = nullb->dev->mbps;
1412
1413 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps))
1414 return HRTIMER_NORESTART;
1415
1416 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
1417 blk_mq_start_stopped_hw_queues(nullb->q, true);
1418
1419 hrtimer_forward_now(&nullb->bw_timer, timer_interval);
1420
1421 return HRTIMER_RESTART;
1422 }
1423
nullb_setup_bwtimer(struct nullb * nullb)1424 static void nullb_setup_bwtimer(struct nullb *nullb)
1425 {
1426 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1427
1428 hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1429 nullb->bw_timer.function = nullb_bwtimer_fn;
1430 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
1431 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
1432 }
1433
1434 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1435
should_timeout_request(struct request * rq)1436 static bool should_timeout_request(struct request *rq)
1437 {
1438 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
1439 struct nullb_device *dev = cmd->nq->dev;
1440
1441 return should_fail(&dev->timeout_config.attr, 1);
1442 }
1443
should_requeue_request(struct request * rq)1444 static bool should_requeue_request(struct request *rq)
1445 {
1446 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
1447 struct nullb_device *dev = cmd->nq->dev;
1448
1449 return should_fail(&dev->requeue_config.attr, 1);
1450 }
1451
should_init_hctx_fail(struct nullb_device * dev)1452 static bool should_init_hctx_fail(struct nullb_device *dev)
1453 {
1454 return should_fail(&dev->init_hctx_fault_config.attr, 1);
1455 }
1456
1457 #else
1458
should_timeout_request(struct request * rq)1459 static bool should_timeout_request(struct request *rq)
1460 {
1461 return false;
1462 }
1463
should_requeue_request(struct request * rq)1464 static bool should_requeue_request(struct request *rq)
1465 {
1466 return false;
1467 }
1468
should_init_hctx_fail(struct nullb_device * dev)1469 static bool should_init_hctx_fail(struct nullb_device *dev)
1470 {
1471 return false;
1472 }
1473
1474 #endif
1475
null_map_queues(struct blk_mq_tag_set * set)1476 static void null_map_queues(struct blk_mq_tag_set *set)
1477 {
1478 struct nullb *nullb = set->driver_data;
1479 int i, qoff;
1480 unsigned int submit_queues = g_submit_queues;
1481 unsigned int poll_queues = g_poll_queues;
1482
1483 if (nullb) {
1484 struct nullb_device *dev = nullb->dev;
1485
1486 /*
1487 * Refer nr_hw_queues of the tag set to check if the expected
1488 * number of hardware queues are prepared. If block layer failed
1489 * to prepare them, use previous numbers of submit queues and
1490 * poll queues to map queues.
1491 */
1492 if (set->nr_hw_queues ==
1493 dev->submit_queues + dev->poll_queues) {
1494 submit_queues = dev->submit_queues;
1495 poll_queues = dev->poll_queues;
1496 } else if (set->nr_hw_queues ==
1497 dev->prev_submit_queues + dev->prev_poll_queues) {
1498 submit_queues = dev->prev_submit_queues;
1499 poll_queues = dev->prev_poll_queues;
1500 } else {
1501 pr_warn("tag set has unexpected nr_hw_queues: %d\n",
1502 set->nr_hw_queues);
1503 WARN_ON_ONCE(true);
1504 submit_queues = 1;
1505 poll_queues = 0;
1506 }
1507 }
1508
1509 for (i = 0, qoff = 0; i < set->nr_maps; i++) {
1510 struct blk_mq_queue_map *map = &set->map[i];
1511
1512 switch (i) {
1513 case HCTX_TYPE_DEFAULT:
1514 map->nr_queues = submit_queues;
1515 break;
1516 case HCTX_TYPE_READ:
1517 map->nr_queues = 0;
1518 continue;
1519 case HCTX_TYPE_POLL:
1520 map->nr_queues = poll_queues;
1521 break;
1522 }
1523 map->queue_offset = qoff;
1524 qoff += map->nr_queues;
1525 blk_mq_map_queues(map);
1526 }
1527 }
1528
null_poll(struct blk_mq_hw_ctx * hctx,struct io_comp_batch * iob)1529 static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
1530 {
1531 struct nullb_queue *nq = hctx->driver_data;
1532 LIST_HEAD(list);
1533 int nr = 0;
1534 struct request *rq;
1535
1536 spin_lock(&nq->poll_lock);
1537 list_splice_init(&nq->poll_list, &list);
1538 list_for_each_entry(rq, &list, queuelist)
1539 blk_mq_set_request_complete(rq);
1540 spin_unlock(&nq->poll_lock);
1541
1542 while (!list_empty(&list)) {
1543 struct nullb_cmd *cmd;
1544 struct request *req;
1545
1546 req = list_first_entry(&list, struct request, queuelist);
1547 list_del_init(&req->queuelist);
1548 cmd = blk_mq_rq_to_pdu(req);
1549 cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req),
1550 blk_rq_sectors(req));
1551 if (!blk_mq_add_to_batch(req, iob, cmd->error != BLK_STS_OK,
1552 blk_mq_end_request_batch))
1553 blk_mq_end_request(req, cmd->error);
1554 nr++;
1555 }
1556
1557 return nr;
1558 }
1559
null_timeout_rq(struct request * rq)1560 static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
1561 {
1562 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1563 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
1564
1565 if (hctx->type == HCTX_TYPE_POLL) {
1566 struct nullb_queue *nq = hctx->driver_data;
1567
1568 spin_lock(&nq->poll_lock);
1569 /* The request may have completed meanwhile. */
1570 if (blk_mq_request_completed(rq)) {
1571 spin_unlock(&nq->poll_lock);
1572 return BLK_EH_DONE;
1573 }
1574 list_del_init(&rq->queuelist);
1575 spin_unlock(&nq->poll_lock);
1576 }
1577
1578 pr_info("rq %p timed out\n", rq);
1579
1580 /*
1581 * If the device is marked as blocking (i.e. memory backed or zoned
1582 * device), the submission path may be blocked waiting for resources
1583 * and cause real timeouts. For these real timeouts, the submission
1584 * path will complete the request using blk_mq_complete_request().
1585 * Only fake timeouts need to execute blk_mq_complete_request() here.
1586 */
1587 cmd->error = BLK_STS_TIMEOUT;
1588 if (cmd->fake_timeout || hctx->type == HCTX_TYPE_POLL)
1589 blk_mq_complete_request(rq);
1590 return BLK_EH_DONE;
1591 }
1592
null_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)1593 static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
1594 const struct blk_mq_queue_data *bd)
1595 {
1596 struct request *rq = bd->rq;
1597 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
1598 struct nullb_queue *nq = hctx->driver_data;
1599 sector_t nr_sectors = blk_rq_sectors(rq);
1600 sector_t sector = blk_rq_pos(rq);
1601 const bool is_poll = hctx->type == HCTX_TYPE_POLL;
1602
1603 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1604
1605 if (!is_poll && nq->dev->irqmode == NULL_IRQ_TIMER) {
1606 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1607 cmd->timer.function = null_cmd_timer_expired;
1608 }
1609 cmd->error = BLK_STS_OK;
1610 cmd->nq = nq;
1611 cmd->fake_timeout = should_timeout_request(rq) ||
1612 blk_should_fake_timeout(rq->q);
1613
1614 if (should_requeue_request(rq)) {
1615 /*
1616 * Alternate between hitting the core BUSY path, and the
1617 * driver driven requeue path
1618 */
1619 nq->requeue_selection++;
1620 if (nq->requeue_selection & 1)
1621 return BLK_STS_RESOURCE;
1622 blk_mq_requeue_request(rq, true);
1623 return BLK_STS_OK;
1624 }
1625
1626 if (test_bit(NULLB_DEV_FL_THROTTLED, &nq->dev->flags)) {
1627 blk_status_t sts = null_handle_throttled(cmd);
1628
1629 if (sts != BLK_STS_OK)
1630 return sts;
1631 }
1632
1633 blk_mq_start_request(rq);
1634
1635 if (is_poll) {
1636 spin_lock(&nq->poll_lock);
1637 list_add_tail(&rq->queuelist, &nq->poll_list);
1638 spin_unlock(&nq->poll_lock);
1639 return BLK_STS_OK;
1640 }
1641 if (cmd->fake_timeout)
1642 return BLK_STS_OK;
1643
1644 null_handle_cmd(cmd, sector, nr_sectors, req_op(rq));
1645 return BLK_STS_OK;
1646 }
1647
null_queue_rqs(struct rq_list * rqlist)1648 static void null_queue_rqs(struct rq_list *rqlist)
1649 {
1650 struct rq_list requeue_list = {};
1651 struct blk_mq_queue_data bd = { };
1652 blk_status_t ret;
1653
1654 do {
1655 struct request *rq = rq_list_pop(rqlist);
1656
1657 bd.rq = rq;
1658 ret = null_queue_rq(rq->mq_hctx, &bd);
1659 if (ret != BLK_STS_OK)
1660 rq_list_add_tail(&requeue_list, rq);
1661 } while (!rq_list_empty(rqlist));
1662
1663 *rqlist = requeue_list;
1664 }
1665
null_init_queue(struct nullb * nullb,struct nullb_queue * nq)1666 static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
1667 {
1668 nq->dev = nullb->dev;
1669 INIT_LIST_HEAD(&nq->poll_list);
1670 spin_lock_init(&nq->poll_lock);
1671 }
1672
null_init_hctx(struct blk_mq_hw_ctx * hctx,void * driver_data,unsigned int hctx_idx)1673 static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
1674 unsigned int hctx_idx)
1675 {
1676 struct nullb *nullb = hctx->queue->queuedata;
1677 struct nullb_queue *nq;
1678
1679 if (should_init_hctx_fail(nullb->dev))
1680 return -EFAULT;
1681
1682 nq = &nullb->queues[hctx_idx];
1683 hctx->driver_data = nq;
1684 null_init_queue(nullb, nq);
1685
1686 return 0;
1687 }
1688
1689 static const struct blk_mq_ops null_mq_ops = {
1690 .queue_rq = null_queue_rq,
1691 .queue_rqs = null_queue_rqs,
1692 .complete = null_complete_rq,
1693 .timeout = null_timeout_rq,
1694 .poll = null_poll,
1695 .map_queues = null_map_queues,
1696 .init_hctx = null_init_hctx,
1697 };
1698
null_del_dev(struct nullb * nullb)1699 static void null_del_dev(struct nullb *nullb)
1700 {
1701 struct nullb_device *dev;
1702
1703 if (!nullb)
1704 return;
1705
1706 dev = nullb->dev;
1707
1708 ida_free(&nullb_indexes, nullb->index);
1709
1710 list_del_init(&nullb->list);
1711
1712 del_gendisk(nullb->disk);
1713
1714 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
1715 hrtimer_cancel(&nullb->bw_timer);
1716 atomic_long_set(&nullb->cur_bytes, LONG_MAX);
1717 blk_mq_start_stopped_hw_queues(nullb->q, true);
1718 }
1719
1720 put_disk(nullb->disk);
1721 if (nullb->tag_set == &nullb->__tag_set)
1722 blk_mq_free_tag_set(nullb->tag_set);
1723 kfree(nullb->queues);
1724 if (null_cache_active(nullb))
1725 null_free_device_storage(nullb->dev, true);
1726 kfree(nullb);
1727 dev->nullb = NULL;
1728 }
1729
null_config_discard(struct nullb * nullb,struct queue_limits * lim)1730 static void null_config_discard(struct nullb *nullb, struct queue_limits *lim)
1731 {
1732 if (nullb->dev->discard == false)
1733 return;
1734
1735 if (!nullb->dev->memory_backed) {
1736 nullb->dev->discard = false;
1737 pr_info("discard option is ignored without memory backing\n");
1738 return;
1739 }
1740
1741 if (nullb->dev->zoned) {
1742 nullb->dev->discard = false;
1743 pr_info("discard option is ignored in zoned mode\n");
1744 return;
1745 }
1746
1747 lim->max_hw_discard_sectors = UINT_MAX >> 9;
1748 }
1749
1750 static const struct block_device_operations null_ops = {
1751 .owner = THIS_MODULE,
1752 .report_zones = null_report_zones,
1753 };
1754
setup_queues(struct nullb * nullb)1755 static int setup_queues(struct nullb *nullb)
1756 {
1757 int nqueues = nr_cpu_ids;
1758
1759 if (g_poll_queues)
1760 nqueues += g_poll_queues;
1761
1762 nullb->queues = kcalloc(nqueues, sizeof(struct nullb_queue),
1763 GFP_KERNEL);
1764 if (!nullb->queues)
1765 return -ENOMEM;
1766
1767 return 0;
1768 }
1769
null_init_tag_set(struct blk_mq_tag_set * set,int poll_queues)1770 static int null_init_tag_set(struct blk_mq_tag_set *set, int poll_queues)
1771 {
1772 set->ops = &null_mq_ops;
1773 set->cmd_size = sizeof(struct nullb_cmd);
1774 set->timeout = 5 * HZ;
1775 set->nr_maps = 1;
1776 if (poll_queues) {
1777 set->nr_hw_queues += poll_queues;
1778 set->nr_maps += 2;
1779 }
1780 return blk_mq_alloc_tag_set(set);
1781 }
1782
null_init_global_tag_set(void)1783 static int null_init_global_tag_set(void)
1784 {
1785 int error;
1786
1787 if (tag_set.ops)
1788 return 0;
1789
1790 tag_set.nr_hw_queues = g_submit_queues;
1791 tag_set.queue_depth = g_hw_queue_depth;
1792 tag_set.numa_node = g_home_node;
1793 if (g_no_sched)
1794 tag_set.flags |= BLK_MQ_F_NO_SCHED_BY_DEFAULT;
1795 if (g_shared_tag_bitmap)
1796 tag_set.flags |= BLK_MQ_F_TAG_HCTX_SHARED;
1797 if (g_blocking)
1798 tag_set.flags |= BLK_MQ_F_BLOCKING;
1799
1800 error = null_init_tag_set(&tag_set, g_poll_queues);
1801 if (error)
1802 tag_set.ops = NULL;
1803 return error;
1804 }
1805
null_setup_tagset(struct nullb * nullb)1806 static int null_setup_tagset(struct nullb *nullb)
1807 {
1808 if (nullb->dev->shared_tags) {
1809 nullb->tag_set = &tag_set;
1810 return null_init_global_tag_set();
1811 }
1812
1813 nullb->tag_set = &nullb->__tag_set;
1814 nullb->tag_set->driver_data = nullb;
1815 nullb->tag_set->nr_hw_queues = nullb->dev->submit_queues;
1816 nullb->tag_set->queue_depth = nullb->dev->hw_queue_depth;
1817 nullb->tag_set->numa_node = nullb->dev->home_node;
1818 if (nullb->dev->no_sched)
1819 nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED_BY_DEFAULT;
1820 if (nullb->dev->shared_tag_bitmap)
1821 nullb->tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
1822 if (nullb->dev->blocking)
1823 nullb->tag_set->flags |= BLK_MQ_F_BLOCKING;
1824 return null_init_tag_set(nullb->tag_set, nullb->dev->poll_queues);
1825 }
1826
null_validate_conf(struct nullb_device * dev)1827 static int null_validate_conf(struct nullb_device *dev)
1828 {
1829 if (dev->queue_mode == NULL_Q_RQ) {
1830 pr_err("legacy IO path is no longer available\n");
1831 return -EINVAL;
1832 }
1833 if (dev->queue_mode == NULL_Q_BIO) {
1834 pr_err("BIO-based IO path is no longer available, using blk-mq instead.\n");
1835 dev->queue_mode = NULL_Q_MQ;
1836 }
1837
1838 if (dev->use_per_node_hctx) {
1839 if (dev->submit_queues != nr_online_nodes)
1840 dev->submit_queues = nr_online_nodes;
1841 } else if (dev->submit_queues > nr_cpu_ids)
1842 dev->submit_queues = nr_cpu_ids;
1843 else if (dev->submit_queues == 0)
1844 dev->submit_queues = 1;
1845 dev->prev_submit_queues = dev->submit_queues;
1846
1847 if (dev->poll_queues > g_poll_queues)
1848 dev->poll_queues = g_poll_queues;
1849 dev->prev_poll_queues = dev->poll_queues;
1850 dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
1851
1852 /* Do memory allocation, so set blocking */
1853 if (dev->memory_backed)
1854 dev->blocking = true;
1855 else /* cache is meaningless */
1856 dev->cache_size = 0;
1857 dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024,
1858 dev->cache_size);
1859 dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps);
1860
1861 if (dev->zoned &&
1862 (!dev->zone_size || !is_power_of_2(dev->zone_size))) {
1863 pr_err("zone_size must be power-of-two\n");
1864 return -EINVAL;
1865 }
1866
1867 return 0;
1868 }
1869
1870 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
__null_setup_fault(struct fault_attr * attr,char * str)1871 static bool __null_setup_fault(struct fault_attr *attr, char *str)
1872 {
1873 if (!str[0])
1874 return true;
1875
1876 if (!setup_fault_attr(attr, str))
1877 return false;
1878
1879 attr->verbose = 0;
1880 return true;
1881 }
1882 #endif
1883
null_setup_fault(void)1884 static bool null_setup_fault(void)
1885 {
1886 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1887 if (!__null_setup_fault(&null_timeout_attr, g_timeout_str))
1888 return false;
1889 if (!__null_setup_fault(&null_requeue_attr, g_requeue_str))
1890 return false;
1891 if (!__null_setup_fault(&null_init_hctx_attr, g_init_hctx_str))
1892 return false;
1893 #endif
1894 return true;
1895 }
1896
null_add_dev(struct nullb_device * dev)1897 static int null_add_dev(struct nullb_device *dev)
1898 {
1899 struct queue_limits lim = {
1900 .logical_block_size = dev->blocksize,
1901 .physical_block_size = dev->blocksize,
1902 .max_hw_sectors = dev->max_sectors,
1903 };
1904
1905 struct nullb *nullb;
1906 int rv;
1907
1908 rv = null_validate_conf(dev);
1909 if (rv)
1910 return rv;
1911
1912 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
1913 if (!nullb) {
1914 rv = -ENOMEM;
1915 goto out;
1916 }
1917 nullb->dev = dev;
1918 dev->nullb = nullb;
1919
1920 spin_lock_init(&nullb->lock);
1921
1922 rv = setup_queues(nullb);
1923 if (rv)
1924 goto out_free_nullb;
1925
1926 rv = null_setup_tagset(nullb);
1927 if (rv)
1928 goto out_cleanup_queues;
1929
1930 if (dev->virt_boundary)
1931 lim.virt_boundary_mask = PAGE_SIZE - 1;
1932 null_config_discard(nullb, &lim);
1933 if (dev->zoned) {
1934 rv = null_init_zoned_dev(dev, &lim);
1935 if (rv)
1936 goto out_cleanup_tags;
1937 }
1938
1939 if (dev->cache_size > 0) {
1940 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
1941 lim.features |= BLK_FEAT_WRITE_CACHE;
1942 if (dev->fua)
1943 lim.features |= BLK_FEAT_FUA;
1944 }
1945
1946 if (dev->rotational)
1947 lim.features |= BLK_FEAT_ROTATIONAL;
1948
1949 nullb->disk = blk_mq_alloc_disk(nullb->tag_set, &lim, nullb);
1950 if (IS_ERR(nullb->disk)) {
1951 rv = PTR_ERR(nullb->disk);
1952 goto out_cleanup_zone;
1953 }
1954 nullb->q = nullb->disk->queue;
1955
1956 if (dev->mbps) {
1957 set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags);
1958 nullb_setup_bwtimer(nullb);
1959 }
1960
1961 nullb->q->queuedata = nullb;
1962
1963 rv = ida_alloc(&nullb_indexes, GFP_KERNEL);
1964 if (rv < 0)
1965 goto out_cleanup_disk;
1966
1967 nullb->index = rv;
1968 dev->index = rv;
1969
1970 if (config_item_name(&dev->group.cg_item)) {
1971 /* Use configfs dir name as the device name */
1972 snprintf(nullb->disk_name, sizeof(nullb->disk_name),
1973 "%s", config_item_name(&dev->group.cg_item));
1974 } else {
1975 sprintf(nullb->disk_name, "nullb%d", nullb->index);
1976 }
1977
1978 set_capacity(nullb->disk,
1979 ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT);
1980 nullb->disk->major = null_major;
1981 nullb->disk->first_minor = nullb->index;
1982 nullb->disk->minors = 1;
1983 nullb->disk->fops = &null_ops;
1984 nullb->disk->private_data = nullb;
1985 strscpy_pad(nullb->disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
1986
1987 if (nullb->dev->zoned) {
1988 rv = null_register_zoned_dev(nullb);
1989 if (rv)
1990 goto out_ida_free;
1991 }
1992
1993 rv = add_disk(nullb->disk);
1994 if (rv)
1995 goto out_ida_free;
1996
1997 list_add_tail(&nullb->list, &nullb_list);
1998
1999 pr_info("disk %s created\n", nullb->disk_name);
2000
2001 return 0;
2002
2003 out_ida_free:
2004 ida_free(&nullb_indexes, nullb->index);
2005 out_cleanup_disk:
2006 put_disk(nullb->disk);
2007 out_cleanup_zone:
2008 null_free_zoned_dev(dev);
2009 out_cleanup_tags:
2010 if (nullb->tag_set == &nullb->__tag_set)
2011 blk_mq_free_tag_set(nullb->tag_set);
2012 out_cleanup_queues:
2013 kfree(nullb->queues);
2014 out_free_nullb:
2015 kfree(nullb);
2016 dev->nullb = NULL;
2017 out:
2018 return rv;
2019 }
2020
null_find_dev_by_name(const char * name)2021 static struct nullb *null_find_dev_by_name(const char *name)
2022 {
2023 struct nullb *nullb = NULL, *nb;
2024
2025 mutex_lock(&lock);
2026 list_for_each_entry(nb, &nullb_list, list) {
2027 if (strcmp(nb->disk_name, name) == 0) {
2028 nullb = nb;
2029 break;
2030 }
2031 }
2032 mutex_unlock(&lock);
2033
2034 return nullb;
2035 }
2036
null_create_dev(void)2037 static int null_create_dev(void)
2038 {
2039 struct nullb_device *dev;
2040 int ret;
2041
2042 dev = null_alloc_dev();
2043 if (!dev)
2044 return -ENOMEM;
2045
2046 mutex_lock(&lock);
2047 ret = null_add_dev(dev);
2048 mutex_unlock(&lock);
2049 if (ret) {
2050 null_free_dev(dev);
2051 return ret;
2052 }
2053
2054 return 0;
2055 }
2056
null_destroy_dev(struct nullb * nullb)2057 static void null_destroy_dev(struct nullb *nullb)
2058 {
2059 struct nullb_device *dev = nullb->dev;
2060
2061 null_del_dev(nullb);
2062 null_free_device_storage(dev, false);
2063 null_free_dev(dev);
2064 }
2065
null_init(void)2066 static int __init null_init(void)
2067 {
2068 int ret = 0;
2069 unsigned int i;
2070 struct nullb *nullb;
2071
2072 if (g_bs > PAGE_SIZE) {
2073 pr_warn("invalid block size\n");
2074 pr_warn("defaults block size to %lu\n", PAGE_SIZE);
2075 g_bs = PAGE_SIZE;
2076 }
2077
2078 if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
2079 pr_err("invalid home_node value\n");
2080 g_home_node = NUMA_NO_NODE;
2081 }
2082
2083 if (!null_setup_fault())
2084 return -EINVAL;
2085
2086 if (g_queue_mode == NULL_Q_RQ) {
2087 pr_err("legacy IO path is no longer available\n");
2088 return -EINVAL;
2089 }
2090
2091 if (g_use_per_node_hctx) {
2092 if (g_submit_queues != nr_online_nodes) {
2093 pr_warn("submit_queues param is set to %u.\n",
2094 nr_online_nodes);
2095 g_submit_queues = nr_online_nodes;
2096 }
2097 } else if (g_submit_queues > nr_cpu_ids) {
2098 g_submit_queues = nr_cpu_ids;
2099 } else if (g_submit_queues <= 0) {
2100 g_submit_queues = 1;
2101 }
2102
2103 config_group_init(&nullb_subsys.su_group);
2104 mutex_init(&nullb_subsys.su_mutex);
2105
2106 ret = configfs_register_subsystem(&nullb_subsys);
2107 if (ret)
2108 return ret;
2109
2110 mutex_init(&lock);
2111
2112 null_major = register_blkdev(0, "nullb");
2113 if (null_major < 0) {
2114 ret = null_major;
2115 goto err_conf;
2116 }
2117
2118 for (i = 0; i < nr_devices; i++) {
2119 ret = null_create_dev();
2120 if (ret)
2121 goto err_dev;
2122 }
2123
2124 pr_info("module loaded\n");
2125 return 0;
2126
2127 err_dev:
2128 while (!list_empty(&nullb_list)) {
2129 nullb = list_entry(nullb_list.next, struct nullb, list);
2130 null_destroy_dev(nullb);
2131 }
2132 unregister_blkdev(null_major, "nullb");
2133 err_conf:
2134 configfs_unregister_subsystem(&nullb_subsys);
2135 return ret;
2136 }
2137
null_exit(void)2138 static void __exit null_exit(void)
2139 {
2140 struct nullb *nullb;
2141
2142 configfs_unregister_subsystem(&nullb_subsys);
2143
2144 unregister_blkdev(null_major, "nullb");
2145
2146 mutex_lock(&lock);
2147 while (!list_empty(&nullb_list)) {
2148 nullb = list_entry(nullb_list.next, struct nullb, list);
2149 null_destroy_dev(nullb);
2150 }
2151 mutex_unlock(&lock);
2152
2153 if (tag_set.ops)
2154 blk_mq_free_tag_set(&tag_set);
2155
2156 mutex_destroy(&lock);
2157 }
2158
2159 module_init(null_init);
2160 module_exit(null_exit);
2161
2162 MODULE_AUTHOR("Jens Axboe <[email protected]>");
2163 MODULE_DESCRIPTION("multi queue aware block test driver");
2164 MODULE_LICENSE("GPL");
2165