Lines Matching full:conf

69 static void allow_barrier(struct r10conf *conf);
70 static void lower_barrier(struct r10conf *conf);
71 static int _enough(struct r10conf *conf, int previous, int ignore);
72 static int enough(struct r10conf *conf, int ignore);
77 static void end_reshape(struct r10conf *conf);
82 #define cmd_before(conf, cmd) \ argument
84 write_sequnlock_irq(&(conf)->resync_lock); \
87 #define cmd_after(conf) write_seqlock_irq(&(conf)->resync_lock) argument
89 #define wait_event_barrier_cmd(conf, cond, cmd) \ argument
90 wait_event_cmd((conf)->wait_barrier, cond, cmd_before(conf, cmd), \
91 cmd_after(conf))
93 #define wait_event_barrier(conf, cond) \ argument
94 wait_event_barrier_cmd(conf, cond, NULL_CMD)
107 struct r10conf *conf = data; in r10bio_pool_alloc() local
108 int size = offsetof(struct r10bio, devs[conf->geo.raid_disks]); in r10bio_pool_alloc()
132 struct r10conf *conf = data; in r10buf_pool_alloc() local
139 r10_bio = r10bio_pool_alloc(gfp_flags, conf); in r10buf_pool_alloc()
143 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || in r10buf_pool_alloc()
144 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) in r10buf_pool_alloc()
145 nalloc = conf->copies; /* resync */ in r10buf_pool_alloc()
150 if (!conf->have_replacement) in r10buf_pool_alloc()
167 if (!conf->have_replacement) in r10buf_pool_alloc()
190 &conf->mddev->recovery)) { in r10buf_pool_alloc()
224 rbio_pool_free(r10_bio, conf); in r10buf_pool_alloc()
230 struct r10conf *conf = data; in r10buf_pool_free() local
235 for (j = conf->copies; j--; ) { in r10buf_pool_free()
255 rbio_pool_free(r10bio, conf); in r10buf_pool_free()
258 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio) in put_all_bios() argument
262 for (i = 0; i < conf->geo.raid_disks; i++) { in put_all_bios()
276 struct r10conf *conf = r10_bio->mddev->private; in free_r10bio() local
278 put_all_bios(conf, r10_bio); in free_r10bio()
279 mempool_free(r10_bio, &conf->r10bio_pool); in free_r10bio()
284 struct r10conf *conf = r10_bio->mddev->private; in put_buf() local
286 mempool_free(r10_bio, &conf->r10buf_pool); in put_buf()
288 lower_barrier(conf); in put_buf()
291 static void wake_up_barrier(struct r10conf *conf) in wake_up_barrier() argument
293 if (wq_has_sleeper(&conf->wait_barrier)) in wake_up_barrier()
294 wake_up(&conf->wait_barrier); in wake_up_barrier()
301 struct r10conf *conf = mddev->private; in reschedule_retry() local
303 spin_lock_irqsave(&conf->device_lock, flags); in reschedule_retry()
304 list_add(&r10_bio->retry_list, &conf->retry_list); in reschedule_retry()
305 conf->nr_queued ++; in reschedule_retry()
306 spin_unlock_irqrestore(&conf->device_lock, flags); in reschedule_retry()
309 wake_up(&conf->wait_barrier); in reschedule_retry()
322 struct r10conf *conf = r10_bio->mddev->private; in raid_end_bio_io() local
332 allow_barrier(conf); in raid_end_bio_io()
342 struct r10conf *conf = r10_bio->mddev->private; in update_head_pos() local
344 conf->mirrors[r10_bio->devs[slot].devnum].head_position = in update_head_pos()
351 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio, in find_bio_disk() argument
357 for (slot = 0; slot < conf->geo.raid_disks; slot++) { in find_bio_disk()
381 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_read_request() local
407 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state), in raid10_end_read_request()
413 rdev_dec_pending(rdev, conf->mddev); in raid10_end_read_request()
419 mdname(conf->mddev), in raid10_end_read_request()
454 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_write_request() local
462 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in raid10_end_write_request()
465 rdev = conf->mirrors[dev].replacement; in raid10_end_write_request()
469 rdev = conf->mirrors[dev].rdev; in raid10_end_write_request()
547 rdev_dec_pending(rdev, conf->mddev); in raid10_end_write_request()
640 static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio) in raid10_find_phys() argument
642 struct geom *geo = &conf->geo; in raid10_find_phys()
644 if (conf->reshape_progress != MaxSector && in raid10_find_phys()
645 ((r10bio->sector >= conf->reshape_progress) != in raid10_find_phys()
646 conf->mddev->reshape_backwards)) { in raid10_find_phys()
648 geo = &conf->prev; in raid10_find_phys()
655 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev) in raid10_find_virt() argument
658 /* Never use conf->prev as this is only called during resync in raid10_find_virt()
661 struct geom *geo = &conf->geo; in raid10_find_virt()
719 static struct md_rdev *read_balance(struct r10conf *conf, in read_balance() argument
733 struct geom *geo = &conf->geo; in read_balance()
735 raid10_find_phys(conf, r10_bio); in read_balance()
745 if (raid1_should_read_first(conf->mddev, this_sector, sectors)) in read_balance()
748 for (slot = 0; slot < conf->copies ; slot++) { in read_balance()
758 rdev = conf->mirrors[disk].replacement; in read_balance()
762 rdev = conf->mirrors[disk].rdev; in read_balance()
829 conf->mirrors[disk].head_position); in read_balance()
837 if (slot >= conf->copies) { in read_balance()
857 static void flush_pending_writes(struct r10conf *conf) in flush_pending_writes() argument
862 spin_lock_irq(&conf->device_lock); in flush_pending_writes()
864 if (conf->pending_bio_list.head) { in flush_pending_writes()
868 bio = bio_list_get(&conf->pending_bio_list); in flush_pending_writes()
869 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
883 raid1_prepare_flush_writes(conf->mddev); in flush_pending_writes()
884 wake_up(&conf->wait_barrier); in flush_pending_writes()
895 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
920 static void raise_barrier(struct r10conf *conf, int force) in raise_barrier() argument
922 write_seqlock_irq(&conf->resync_lock); in raise_barrier()
924 if (WARN_ON_ONCE(force && !conf->barrier)) in raise_barrier()
928 wait_event_barrier(conf, force || !conf->nr_waiting); in raise_barrier()
931 WRITE_ONCE(conf->barrier, conf->barrier + 1); in raise_barrier()
934 wait_event_barrier(conf, !atomic_read(&conf->nr_pending) && in raise_barrier()
935 conf->barrier < RESYNC_DEPTH); in raise_barrier()
937 write_sequnlock_irq(&conf->resync_lock); in raise_barrier()
940 static void lower_barrier(struct r10conf *conf) in lower_barrier() argument
944 write_seqlock_irqsave(&conf->resync_lock, flags); in lower_barrier()
945 WRITE_ONCE(conf->barrier, conf->barrier - 1); in lower_barrier()
946 write_sequnlock_irqrestore(&conf->resync_lock, flags); in lower_barrier()
947 wake_up(&conf->wait_barrier); in lower_barrier()
950 static bool stop_waiting_barrier(struct r10conf *conf) in stop_waiting_barrier() argument
956 if (!conf->barrier) in stop_waiting_barrier()
965 if (atomic_read(&conf->nr_pending) && bio_list && in stop_waiting_barrier()
970 thread = rcu_dereference_protected(conf->mddev->thread, true); in stop_waiting_barrier()
977 WARN_ON_ONCE(atomic_read(&conf->nr_pending) == 0); in stop_waiting_barrier()
984 static bool wait_barrier_nolock(struct r10conf *conf) in wait_barrier_nolock() argument
986 unsigned int seq = read_seqbegin(&conf->resync_lock); in wait_barrier_nolock()
988 if (READ_ONCE(conf->barrier)) in wait_barrier_nolock()
991 atomic_inc(&conf->nr_pending); in wait_barrier_nolock()
992 if (!read_seqretry(&conf->resync_lock, seq)) in wait_barrier_nolock()
995 if (atomic_dec_and_test(&conf->nr_pending)) in wait_barrier_nolock()
996 wake_up_barrier(conf); in wait_barrier_nolock()
1001 static bool wait_barrier(struct r10conf *conf, bool nowait) in wait_barrier() argument
1005 if (wait_barrier_nolock(conf)) in wait_barrier()
1008 write_seqlock_irq(&conf->resync_lock); in wait_barrier()
1009 if (conf->barrier) { in wait_barrier()
1014 conf->nr_waiting++; in wait_barrier()
1015 mddev_add_trace_msg(conf->mddev, "raid10 wait barrier"); in wait_barrier()
1016 wait_event_barrier(conf, stop_waiting_barrier(conf)); in wait_barrier()
1017 conf->nr_waiting--; in wait_barrier()
1019 if (!conf->nr_waiting) in wait_barrier()
1020 wake_up(&conf->wait_barrier); in wait_barrier()
1024 atomic_inc(&conf->nr_pending); in wait_barrier()
1025 write_sequnlock_irq(&conf->resync_lock); in wait_barrier()
1029 static void allow_barrier(struct r10conf *conf) in allow_barrier() argument
1031 if ((atomic_dec_and_test(&conf->nr_pending)) || in allow_barrier()
1032 (conf->array_freeze_pending)) in allow_barrier()
1033 wake_up_barrier(conf); in allow_barrier()
1036 static void freeze_array(struct r10conf *conf, int extra) in freeze_array() argument
1050 write_seqlock_irq(&conf->resync_lock); in freeze_array()
1051 conf->array_freeze_pending++; in freeze_array()
1052 WRITE_ONCE(conf->barrier, conf->barrier + 1); in freeze_array()
1053 conf->nr_waiting++; in freeze_array()
1054 wait_event_barrier_cmd(conf, atomic_read(&conf->nr_pending) == in freeze_array()
1055 conf->nr_queued + extra, flush_pending_writes(conf)); in freeze_array()
1056 conf->array_freeze_pending--; in freeze_array()
1057 write_sequnlock_irq(&conf->resync_lock); in freeze_array()
1060 static void unfreeze_array(struct r10conf *conf) in unfreeze_array() argument
1063 write_seqlock_irq(&conf->resync_lock); in unfreeze_array()
1064 WRITE_ONCE(conf->barrier, conf->barrier - 1); in unfreeze_array()
1065 conf->nr_waiting--; in unfreeze_array()
1066 wake_up(&conf->wait_barrier); in unfreeze_array()
1067 write_sequnlock_irq(&conf->resync_lock); in unfreeze_array()
1084 struct r10conf *conf = mddev->private; in raid10_unplug() local
1088 spin_lock_irq(&conf->device_lock); in raid10_unplug()
1089 bio_list_merge(&conf->pending_bio_list, &plug->pending); in raid10_unplug()
1090 spin_unlock_irq(&conf->device_lock); in raid10_unplug()
1091 wake_up_barrier(conf); in raid10_unplug()
1100 wake_up_barrier(conf); in raid10_unplug()
1118 static bool regular_request_wait(struct mddev *mddev, struct r10conf *conf, in regular_request_wait() argument
1122 if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) { in regular_request_wait()
1127 bio->bi_iter.bi_sector < conf->reshape_progress && in regular_request_wait()
1128 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { in regular_request_wait()
1129 allow_barrier(conf); in regular_request_wait()
1134 mddev_add_trace_msg(conf->mddev, "raid10 wait reshape"); in regular_request_wait()
1135 wait_event(conf->wait_barrier, in regular_request_wait()
1136 conf->reshape_progress <= bio->bi_iter.bi_sector || in regular_request_wait()
1137 conf->reshape_progress >= bio->bi_iter.bi_sector + in regular_request_wait()
1139 wait_barrier(conf, false); in regular_request_wait()
1147 struct r10conf *conf = mddev->private; in raid10_read_request() local
1161 * we must use the one in conf. in raid10_read_request()
1173 err_rdev = conf->mirrors[disk].rdev; in raid10_read_request()
1183 if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors)) in raid10_read_request()
1185 rdev = read_balance(conf, r10_bio, &max_sectors); in raid10_read_request()
1202 gfp, &conf->bio_split); in raid10_read_request()
1208 allow_barrier(conf); in raid10_read_request()
1210 wait_barrier(conf, false); in raid10_read_request()
1248 struct r10conf *conf = mddev->private; in raid10_write_one_disk() local
1253 rdev = replacement ? conf->mirrors[devnum].replacement : in raid10_write_one_disk()
1254 conf->mirrors[devnum].rdev; in raid10_write_one_disk()
1266 &conf->mirrors[devnum].rdev->flags) in raid10_write_one_disk()
1267 && enough(conf, devnum)) in raid10_write_one_disk()
1276 if (!raid1_add_bio_to_plug(mddev, mbio, raid10_unplug, conf->copies)) { in raid10_write_one_disk()
1277 spin_lock_irqsave(&conf->device_lock, flags); in raid10_write_one_disk()
1278 bio_list_add(&conf->pending_bio_list, mbio); in raid10_write_one_disk()
1279 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10_write_one_disk()
1286 struct r10conf *conf = mddev->private; in wait_blocked_dev() local
1292 for (i = 0; i < conf->copies; i++) { in wait_blocked_dev()
1295 rdev = conf->mirrors[i].rdev; in wait_blocked_dev()
1320 rrdev = conf->mirrors[i].replacement; in wait_blocked_dev()
1330 allow_barrier(conf); in wait_blocked_dev()
1331 mddev_add_trace_msg(conf->mddev, in wait_blocked_dev()
1335 wait_barrier(conf, false); in wait_blocked_dev()
1343 struct r10conf *conf = mddev->private; in raid10_write_request() local
1360 prepare_to_wait(&conf->wait_barrier, in raid10_write_request()
1367 finish_wait(&conf->wait_barrier, &w); in raid10_write_request()
1371 if (!regular_request_wait(mddev, conf, bio, sectors)) in raid10_write_request()
1375 ? (bio->bi_iter.bi_sector < conf->reshape_safe && in raid10_write_request()
1376 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) in raid10_write_request()
1377 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && in raid10_write_request()
1378 bio->bi_iter.bi_sector < conf->reshape_progress))) { in raid10_write_request()
1380 mddev->reshape_position = conf->reshape_progress; in raid10_write_request()
1385 allow_barrier(conf); in raid10_write_request()
1389 mddev_add_trace_msg(conf->mddev, in raid10_write_request()
1394 conf->reshape_safe = mddev->reshape_position; in raid10_write_request()
1408 raid10_find_phys(conf, r10_bio); in raid10_write_request()
1414 for (i = 0; i < conf->copies; i++) { in raid10_write_request()
1418 rdev = conf->mirrors[d].rdev; in raid10_write_request()
1419 rrdev = conf->mirrors[d].replacement; in raid10_write_request()
1483 GFP_NOIO, &conf->bio_split); in raid10_write_request()
1489 allow_barrier(conf); in raid10_write_request()
1491 wait_barrier(conf, false); in raid10_write_request()
1500 for (i = 0; i < conf->copies; i++) { in raid10_write_request()
1511 struct md_rdev *rdev = conf->mirrors[d].rdev; in raid10_write_request()
1512 struct md_rdev *rrdev = conf->mirrors[d].replacement; in raid10_write_request()
1531 struct r10conf *conf = mddev->private; in __make_request() local
1534 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO); in __make_request()
1544 conf->geo.raid_disks); in __make_request()
1554 struct r10conf *conf = r10bio->mddev->private; in raid_end_discard_bio() local
1559 allow_barrier(conf); in raid_end_discard_bio()
1577 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_discard_request() local
1588 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in raid10_end_discard_request()
1589 rdev = repl ? conf->mirrors[dev].replacement : in raid10_end_discard_request()
1590 conf->mirrors[dev].rdev; in raid10_end_discard_request()
1593 rdev_dec_pending(rdev, conf->mddev); in raid10_end_discard_request()
1604 struct r10conf *conf = mddev->private; in raid10_handle_discard() local
1605 struct geom *geo = &conf->geo; in raid10_handle_discard()
1626 if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) { in raid10_handle_discard()
1666 split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split); in raid10_handle_discard()
1673 allow_barrier(conf); in raid10_handle_discard()
1676 wait_barrier(conf, false); in raid10_handle_discard()
1681 split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split); in raid10_handle_discard()
1688 allow_barrier(conf); in raid10_handle_discard()
1692 wait_barrier(conf, false); in raid10_handle_discard()
1722 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO); in raid10_handle_discard()
1753 rdev = conf->mirrors[disk].rdev; in raid10_handle_discard()
1754 rrdev = conf->mirrors[disk].replacement; in raid10_handle_discard()
1813 struct md_rdev *rdev = conf->mirrors[disk].rdev; in raid10_handle_discard()
1827 struct md_rdev *rrdev = conf->mirrors[disk].replacement; in raid10_handle_discard()
1849 wait_barrier(conf, false); in raid10_handle_discard()
1857 allow_barrier(conf); in raid10_handle_discard()
1863 struct r10conf *conf = mddev->private; in raid10_make_request() local
1864 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); in raid10_make_request()
1884 && (conf->geo.near_copies < conf->geo.raid_disks in raid10_make_request()
1885 || conf->prev.near_copies < in raid10_make_request()
1886 conf->prev.raid_disks))) in raid10_make_request()
1893 wake_up_barrier(conf); in raid10_make_request()
1899 struct r10conf *conf = mddev->private; in raid10_status() local
1904 if (conf->geo.near_copies < conf->geo.raid_disks) in raid10_status()
1906 if (conf->geo.near_copies > 1) in raid10_status()
1907 seq_printf(seq, " %d near-copies", conf->geo.near_copies); in raid10_status()
1908 if (conf->geo.far_copies > 1) { in raid10_status()
1909 if (conf->geo.far_offset) in raid10_status()
1910 seq_printf(seq, " %d offset-copies", conf->geo.far_copies); in raid10_status()
1912 seq_printf(seq, " %d far-copies", conf->geo.far_copies); in raid10_status()
1913 if (conf->geo.far_set_size != conf->geo.raid_disks) in raid10_status()
1914 seq_printf(seq, " %d devices per set", conf->geo.far_set_size); in raid10_status()
1916 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks, in raid10_status()
1917 conf->geo.raid_disks - mddev->degraded); in raid10_status()
1918 for (i = 0; i < conf->geo.raid_disks; i++) { in raid10_status()
1919 struct md_rdev *rdev = READ_ONCE(conf->mirrors[i].rdev); in raid10_status()
1931 static int _enough(struct r10conf *conf, int previous, int ignore) in _enough() argument
1937 disks = conf->prev.raid_disks; in _enough()
1938 ncopies = conf->prev.near_copies; in _enough()
1940 disks = conf->geo.raid_disks; in _enough()
1941 ncopies = conf->geo.near_copies; in _enough()
1945 int n = conf->copies; in _enough()
1951 (rdev = conf->mirrors[this].rdev) && in _enough()
1965 static int enough(struct r10conf *conf, int ignore) in enough() argument
1972 return _enough(conf, 0, ignore) && in enough()
1973 _enough(conf, 1, ignore); in enough()
1993 struct r10conf *conf = mddev->private; in raid10_error() local
1996 spin_lock_irqsave(&conf->device_lock, flags); in raid10_error()
1998 if (test_bit(In_sync, &rdev->flags) && !enough(conf, rdev->raid_disk)) { in raid10_error()
2002 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10_error()
2014 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10_error()
2018 mdname(mddev), conf->geo.raid_disks - mddev->degraded); in raid10_error()
2021 static void print_conf(struct r10conf *conf) in print_conf() argument
2026 pr_debug("RAID10 conf printout:\n"); in print_conf()
2027 if (!conf) { in print_conf()
2028 pr_debug("(!conf)\n"); in print_conf()
2031 pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, in print_conf()
2032 conf->geo.raid_disks); in print_conf()
2034 lockdep_assert_held(&conf->mddev->reconfig_mutex); in print_conf()
2035 for (i = 0; i < conf->geo.raid_disks; i++) { in print_conf()
2036 rdev = conf->mirrors[i].rdev; in print_conf()
2045 static void close_sync(struct r10conf *conf) in close_sync() argument
2047 wait_barrier(conf, false); in close_sync()
2048 allow_barrier(conf); in close_sync()
2050 mempool_exit(&conf->r10buf_pool); in close_sync()
2056 struct r10conf *conf = mddev->private; in raid10_spare_active() local
2065 for (i = 0; i < conf->geo.raid_disks; i++) { in raid10_spare_active()
2066 tmp = conf->mirrors + i; in raid10_spare_active()
2093 spin_lock_irqsave(&conf->device_lock, flags); in raid10_spare_active()
2095 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10_spare_active()
2097 print_conf(conf); in raid10_spare_active()
2103 struct r10conf *conf = mddev->private; in raid10_add_disk() local
2107 int last = conf->geo.raid_disks - 1; in raid10_add_disk()
2115 if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1)) in raid10_add_disk()
2122 rdev->saved_raid_disk < conf->geo.raid_disks && in raid10_add_disk()
2123 conf->mirrors[rdev->saved_raid_disk].rdev == NULL) in raid10_add_disk()
2128 p = &conf->mirrors[mirror]; in raid10_add_disk()
2146 conf->fullsync = 1; in raid10_add_disk()
2152 p = &conf->mirrors[repl_slot]; in raid10_add_disk()
2159 conf->fullsync = 1; in raid10_add_disk()
2163 print_conf(conf); in raid10_add_disk()
2169 struct r10conf *conf = mddev->private; in raid10_remove_disk() local
2175 print_conf(conf); in raid10_remove_disk()
2178 p = conf->mirrors + number; in raid10_remove_disk()
2197 number < conf->geo.raid_disks && in raid10_remove_disk()
2198 enough(conf, -1)) { in raid10_remove_disk()
2215 print_conf(conf); in raid10_remove_disk()
2221 struct r10conf *conf = r10_bio->mddev->private; in __end_sync_read() local
2230 &conf->mirrors[d].rdev->corrected_errors); in __end_sync_read()
2235 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); in __end_sync_read()
2248 struct r10conf *conf = r10_bio->mddev->private; in end_sync_read() local
2249 int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL); in end_sync_read()
2293 struct r10conf *conf = mddev->private; in end_sync_write() local
2299 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_sync_write()
2301 rdev = conf->mirrors[d].replacement; in end_sync_write()
2303 rdev = conf->mirrors[d].rdev; in end_sync_write()
2343 struct r10conf *conf = mddev->private; in sync_request_write() local
2352 for (i=0; i<conf->copies; i++) in sync_request_write()
2356 if (i == conf->copies) in sync_request_write()
2367 for (i=0 ; i < conf->copies ; i++) { in sync_request_write()
2381 rdev = conf->mirrors[d].rdev; in sync_request_write()
2415 bio_reset(tbio, conf->mirrors[d].rdev->bdev, REQ_OP_WRITE); in sync_request_write()
2426 atomic_inc(&conf->mirrors[d].rdev->nr_pending); in sync_request_write()
2428 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); in sync_request_write()
2430 if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) in sync_request_write()
2432 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; in sync_request_write()
2439 for (i = 0; i < conf->copies; i++) { in sync_request_write()
2450 md_sync_acct(conf->mirrors[d].replacement->bdev, in sync_request_write()
2482 struct r10conf *conf = mddev->private; in fix_recovery_read_error() local
2500 rdev = conf->mirrors[dr].rdev; in fix_recovery_read_error()
2508 rdev = conf->mirrors[dw].rdev; in fix_recovery_read_error()
2530 if (rdev != conf->mirrors[dw].rdev) { in fix_recovery_read_error()
2532 struct md_rdev *rdev2 = conf->mirrors[dw].rdev; in fix_recovery_read_error()
2540 conf->mirrors[dw].recovery_disabled in fix_recovery_read_error()
2557 struct r10conf *conf = mddev->private; in recovery_request_write() local
2584 atomic_inc(&conf->mirrors[d].rdev->nr_pending); in recovery_request_write()
2585 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); in recovery_request_write()
2589 atomic_inc(&conf->mirrors[d].replacement->nr_pending); in recovery_request_write()
2590 md_sync_acct(conf->mirrors[d].replacement->bdev, in recovery_request_write()
2625 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio) in fix_read_error() argument
2635 rdev = conf->mirrors[d].rdev; in fix_read_error()
2658 rdev = conf->mirrors[d].rdev; in fix_read_error()
2670 conf->tmppage, in fix_read_error()
2677 if (sl == conf->copies) in fix_read_error()
2687 rdev = conf->mirrors[dn].rdev; in fix_read_error()
2705 sl = conf->copies; in fix_read_error()
2708 rdev = conf->mirrors[d].rdev; in fix_read_error()
2718 s, conf->tmppage, REQ_OP_WRITE) in fix_read_error()
2737 sl = conf->copies; in fix_read_error()
2740 rdev = conf->mirrors[d].rdev; in fix_read_error()
2750 s, conf->tmppage, REQ_OP_READ)) { in fix_read_error()
2785 struct r10conf *conf = mddev->private; in narrow_write_error() local
2786 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; in narrow_write_error()
2846 struct r10conf *conf = mddev->private; in handle_read_error() local
2864 freeze_array(conf, 1); in handle_read_error()
2865 fix_read_error(conf, mddev, r10_bio); in handle_read_error()
2866 unfreeze_array(conf); in handle_read_error()
2877 allow_barrier(conf); in handle_read_error()
2880 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) in handle_write_completed() argument
2893 for (m = 0; m < conf->copies; m++) { in handle_write_completed()
2895 rdev = conf->mirrors[dev].rdev; in handle_write_completed()
2909 md_error(conf->mddev, rdev); in handle_write_completed()
2911 rdev = conf->mirrors[dev].replacement; in handle_write_completed()
2926 md_error(conf->mddev, rdev); in handle_write_completed()
2932 for (m = 0; m < conf->copies; m++) { in handle_write_completed()
2935 rdev = conf->mirrors[dev].rdev; in handle_write_completed()
2941 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2945 md_error(conf->mddev, rdev); in handle_write_completed()
2946 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2949 rdev = conf->mirrors[dev].replacement; in handle_write_completed()
2955 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2959 spin_lock_irq(&conf->device_lock); in handle_write_completed()
2960 list_add(&r10_bio->retry_list, &conf->bio_end_io_list); in handle_write_completed()
2961 conf->nr_queued++; in handle_write_completed()
2962 spin_unlock_irq(&conf->device_lock); in handle_write_completed()
2967 wake_up(&conf->wait_barrier); in handle_write_completed()
2968 md_wakeup_thread(conf->mddev->thread); in handle_write_completed()
2983 struct r10conf *conf = mddev->private; in raid10d() local
2984 struct list_head *head = &conf->retry_list; in raid10d()
2989 if (!list_empty_careful(&conf->bio_end_io_list) && in raid10d()
2992 spin_lock_irqsave(&conf->device_lock, flags); in raid10d()
2994 while (!list_empty(&conf->bio_end_io_list)) { in raid10d()
2995 list_move(conf->bio_end_io_list.prev, &tmp); in raid10d()
2996 conf->nr_queued--; in raid10d()
2999 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10d()
3015 flush_pending_writes(conf); in raid10d()
3017 spin_lock_irqsave(&conf->device_lock, flags); in raid10d()
3019 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10d()
3024 conf->nr_queued--; in raid10d()
3025 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10d()
3028 conf = mddev->private; in raid10d()
3031 handle_write_completed(conf, r10_bio); in raid10d()
3050 static int init_resync(struct r10conf *conf) in init_resync() argument
3055 BUG_ON(mempool_initialized(&conf->r10buf_pool)); in init_resync()
3056 conf->have_replacement = 0; in init_resync()
3057 for (i = 0; i < conf->geo.raid_disks; i++) in init_resync()
3058 if (conf->mirrors[i].replacement) in init_resync()
3059 conf->have_replacement = 1; in init_resync()
3060 ret = mempool_init(&conf->r10buf_pool, buffs, in init_resync()
3061 r10buf_pool_alloc, r10buf_pool_free, conf); in init_resync()
3064 conf->next_resync = 0; in init_resync()
3068 static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf) in raid10_alloc_init_r10buf() argument
3070 struct r10bio *r10bio = mempool_alloc(&conf->r10buf_pool, GFP_NOIO); in raid10_alloc_init_r10buf()
3076 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || in raid10_alloc_init_r10buf()
3077 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) in raid10_alloc_init_r10buf()
3078 nalloc = conf->copies; /* resync */ in raid10_alloc_init_r10buf()
3101 static void raid10_set_cluster_sync_high(struct r10conf *conf) in raid10_set_cluster_sync_high() argument
3118 chunks = conf->geo.raid_disks / conf->geo.near_copies; in raid10_set_cluster_sync_high()
3119 if (conf->geo.raid_disks % conf->geo.near_copies == 0) in raid10_set_cluster_sync_high()
3123 window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors; in raid10_set_cluster_sync_high()
3131 conf->cluster_sync_high = conf->cluster_sync_low + window_size; in raid10_set_cluster_sync_high()
3169 struct r10conf *conf = mddev->private; in raid10_sync_request() local
3178 sector_t chunk_mask = conf->geo.chunk_mask; in raid10_sync_request()
3192 conf->fullsync == 0) { in raid10_sync_request()
3197 if (!mempool_initialized(&conf->r10buf_pool)) in raid10_sync_request()
3198 if (init_resync(conf)) in raid10_sync_request()
3203 conf->cluster_sync_low = 0; in raid10_sync_request()
3204 conf->cluster_sync_high = 0; in raid10_sync_request()
3216 end_reshape(conf); in raid10_sync_request()
3217 close_sync(conf); in raid10_sync_request()
3226 else for (i = 0; i < conf->geo.raid_disks; i++) { in raid10_sync_request()
3228 raid10_find_virt(conf, mddev->curr_resync, i); in raid10_sync_request()
3235 if ((!mddev->bitmap || conf->fullsync) in raid10_sync_request()
3236 && conf->have_replacement in raid10_sync_request()
3241 for (i = 0; i < conf->geo.raid_disks; i++) { in raid10_sync_request()
3243 conf->mirrors[i].replacement; in raid10_sync_request()
3249 conf->fullsync = 0; in raid10_sync_request()
3252 close_sync(conf); in raid10_sync_request()
3260 if (chunks_skipped >= conf->geo.raid_disks) { in raid10_sync_request()
3269 conf->mirrors[error_disk].recovery_disabled = in raid10_sync_request()
3287 if (conf->geo.near_copies < conf->geo.raid_disks && in raid10_sync_request()
3295 if (conf->nr_waiting) in raid10_sync_request()
3319 for (i = 0 ; i < conf->geo.raid_disks; i++) { in raid10_sync_request()
3325 struct raid10_info *mirror = &conf->mirrors[i]; in raid10_sync_request()
3343 sect = raid10_find_virt(conf, sector_nr, i); in raid10_sync_request()
3360 !conf->fullsync) { in raid10_sync_request()
3372 r10_bio = raid10_alloc_init_r10buf(conf); in raid10_sync_request()
3374 raise_barrier(conf, rb2 != NULL); in raid10_sync_request()
3384 raid10_find_phys(conf, r10_bio); in raid10_sync_request()
3389 for (j = 0; j < conf->geo.raid_disks; j++) { in raid10_sync_request()
3390 struct md_rdev *rdev = conf->mirrors[j].rdev; in raid10_sync_request()
3402 for (j=0; j<conf->copies;j++) { in raid10_sync_request()
3406 struct md_rdev *rdev = conf->mirrors[d].rdev; in raid10_sync_request()
3442 for (k=0; k<conf->copies; k++) in raid10_sync_request()
3445 BUG_ON(k == conf->copies); in raid10_sync_request()
3485 if (j == conf->copies) { in raid10_sync_request()
3493 for (k = 0; k < conf->copies; k++) in raid10_sync_request()
3540 for (; j < conf->copies; j++) { in raid10_sync_request()
3542 if (conf->mirrors[d].rdev && in raid10_sync_request()
3544 &conf->mirrors[d].rdev->flags)) in raid10_sync_request()
3574 (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); in raid10_sync_request()
3579 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, in raid10_sync_request()
3587 r10_bio = raid10_alloc_init_r10buf(conf); in raid10_sync_request()
3592 raise_barrier(conf, 0); in raid10_sync_request()
3593 conf->next_resync = sector_nr; in raid10_sync_request()
3598 raid10_find_phys(conf, r10_bio); in raid10_sync_request()
3601 for (i = 0; i < conf->copies; i++) { in raid10_sync_request()
3612 rdev = conf->mirrors[d].rdev; in raid10_sync_request()
3640 rdev = conf->mirrors[d].replacement; in raid10_sync_request()
3663 for (i=0; i<conf->copies; i++) { in raid10_sync_request()
3666 rdev_dec_pending(conf->mirrors[d].rdev, in raid10_sync_request()
3671 conf->mirrors[d].replacement, in raid10_sync_request()
3707 if (conf->cluster_sync_high < sector_nr + nr_sectors) { in raid10_sync_request()
3708 conf->cluster_sync_low = mddev->curr_resync_completed; in raid10_sync_request()
3709 raid10_set_cluster_sync_high(conf); in raid10_sync_request()
3712 conf->cluster_sync_low, in raid10_sync_request()
3713 conf->cluster_sync_high); in raid10_sync_request()
3720 for (i = 0; i < conf->geo.raid_disks; i++) { in raid10_sync_request()
3726 sect_va1 = raid10_find_virt(conf, sector_nr, i); in raid10_sync_request()
3728 if (conf->cluster_sync_high < sect_va1 + nr_sectors) { in raid10_sync_request()
3734 sect_va2 = raid10_find_virt(conf, in raid10_sync_request()
3737 if (conf->cluster_sync_low == 0 || in raid10_sync_request()
3738 conf->cluster_sync_low > sect_va2) in raid10_sync_request()
3739 conf->cluster_sync_low = sect_va2; in raid10_sync_request()
3743 raid10_set_cluster_sync_high(conf); in raid10_sync_request()
3745 conf->cluster_sync_low, in raid10_sync_request()
3746 conf->cluster_sync_high); in raid10_sync_request()
3790 struct r10conf *conf = mddev->private; in raid10_size() local
3793 raid_disks = min(conf->geo.raid_disks, in raid10_size()
3794 conf->prev.raid_disks); in raid10_size()
3796 sectors = conf->dev_sectors; in raid10_size()
3798 size = sectors >> conf->geo.chunk_shift; in raid10_size()
3799 sector_div(size, conf->geo.far_copies); in raid10_size()
3801 sector_div(size, conf->geo.near_copies); in raid10_size()
3803 return size << conf->geo.chunk_shift; in raid10_size()
3806 static void calc_sectors(struct r10conf *conf, sector_t size) in calc_sectors() argument
3809 * actually be used, and set conf->dev_sectors and in calc_sectors()
3810 * conf->stride in calc_sectors()
3813 size = size >> conf->geo.chunk_shift; in calc_sectors()
3814 sector_div(size, conf->geo.far_copies); in calc_sectors()
3815 size = size * conf->geo.raid_disks; in calc_sectors()
3816 sector_div(size, conf->geo.near_copies); in calc_sectors()
3819 size = size * conf->copies; in calc_sectors()
3824 size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks); in calc_sectors()
3826 conf->dev_sectors = size << conf->geo.chunk_shift; in calc_sectors()
3828 if (conf->geo.far_offset) in calc_sectors()
3829 conf->geo.stride = 1 << conf->geo.chunk_shift; in calc_sectors()
3831 sector_div(size, conf->geo.far_copies); in calc_sectors()
3832 conf->geo.stride = size << conf->geo.chunk_shift; in calc_sectors()
3893 static void raid10_free_conf(struct r10conf *conf) in raid10_free_conf() argument
3895 if (!conf) in raid10_free_conf()
3898 mempool_exit(&conf->r10bio_pool); in raid10_free_conf()
3899 kfree(conf->mirrors); in raid10_free_conf()
3900 kfree(conf->mirrors_old); in raid10_free_conf()
3901 kfree(conf->mirrors_new); in raid10_free_conf()
3902 safe_put_page(conf->tmppage); in raid10_free_conf()
3903 bioset_exit(&conf->bio_split); in raid10_free_conf()
3904 kfree(conf); in raid10_free_conf()
3909 struct r10conf *conf = NULL; in setup_conf() local
3929 conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL); in setup_conf()
3930 if (!conf) in setup_conf()
3934 conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks), in setup_conf()
3937 if (!conf->mirrors) in setup_conf()
3940 conf->tmppage = alloc_page(GFP_KERNEL); in setup_conf()
3941 if (!conf->tmppage) in setup_conf()
3944 conf->geo = geo; in setup_conf()
3945 conf->copies = copies; in setup_conf()
3946 err = mempool_init(&conf->r10bio_pool, NR_RAID_BIOS, r10bio_pool_alloc, in setup_conf()
3947 rbio_pool_free, conf); in setup_conf()
3951 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0); in setup_conf()
3955 calc_sectors(conf, mddev->dev_sectors); in setup_conf()
3957 conf->prev = conf->geo; in setup_conf()
3958 conf->reshape_progress = MaxSector; in setup_conf()
3960 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) { in setup_conf()
3964 conf->reshape_progress = mddev->reshape_position; in setup_conf()
3965 if (conf->prev.far_offset) in setup_conf()
3966 conf->prev.stride = 1 << conf->prev.chunk_shift; in setup_conf()
3969 conf->prev.stride = conf->dev_sectors; in setup_conf()
3971 conf->reshape_safe = conf->reshape_progress; in setup_conf()
3972 spin_lock_init(&conf->device_lock); in setup_conf()
3973 INIT_LIST_HEAD(&conf->retry_list); in setup_conf()
3974 INIT_LIST_HEAD(&conf->bio_end_io_list); in setup_conf()
3976 seqlock_init(&conf->resync_lock); in setup_conf()
3977 init_waitqueue_head(&conf->wait_barrier); in setup_conf()
3978 atomic_set(&conf->nr_pending, 0); in setup_conf()
3981 rcu_assign_pointer(conf->thread, in setup_conf()
3983 if (!conf->thread) in setup_conf()
3986 conf->mddev = mddev; in setup_conf()
3987 return conf; in setup_conf()
3990 raid10_free_conf(conf); in setup_conf()
3994 static unsigned int raid10_nr_stripes(struct r10conf *conf) in raid10_nr_stripes() argument
3996 unsigned int raid_disks = conf->geo.raid_disks; in raid10_nr_stripes()
3998 if (conf->geo.raid_disks % conf->geo.near_copies) in raid10_nr_stripes()
4000 return raid_disks / conf->geo.near_copies; in raid10_nr_stripes()
4005 struct r10conf *conf = mddev->private; in raid10_set_queue_limits() local
4012 lim.io_opt = lim.io_min * raid10_nr_stripes(conf); in raid10_set_queue_limits()
4022 struct r10conf *conf; in raid10_run() local
4032 conf = setup_conf(mddev); in raid10_run()
4033 if (IS_ERR(conf)) in raid10_run()
4034 return PTR_ERR(conf); in raid10_run()
4035 mddev->private = conf; in raid10_run()
4037 conf = mddev->private; in raid10_run()
4038 if (!conf) in raid10_run()
4041 rcu_assign_pointer(mddev->thread, conf->thread); in raid10_run()
4042 rcu_assign_pointer(conf->thread, NULL); in raid10_run()
4044 if (mddev_is_clustered(conf->mddev)) { in raid10_run()
4062 if (disk_idx >= conf->geo.raid_disks && in raid10_run()
4063 disk_idx >= conf->prev.raid_disks) in raid10_run()
4065 disk = conf->mirrors + disk_idx; in raid10_run()
4088 if (!mddev_is_dm(conf->mddev)) { in raid10_run()
4098 if (!enough(conf, -1)) { in raid10_run()
4104 if (conf->reshape_progress != MaxSector) { in raid10_run()
4106 if (conf->geo.far_copies != 1 && in raid10_run()
4107 conf->geo.far_offset == 0) in raid10_run()
4109 if (conf->prev.far_copies != 1 && in raid10_run()
4110 conf->prev.far_offset == 0) in raid10_run()
4116 i < conf->geo.raid_disks in raid10_run()
4117 || i < conf->prev.raid_disks; in raid10_run()
4120 disk = conf->mirrors + i; in raid10_run()
4135 conf->fullsync = 1; in raid10_run()
4141 conf->fullsync = 1; in raid10_run()
4151 mdname(mddev), conf->geo.raid_disks - mddev->degraded, in raid10_run()
4152 conf->geo.raid_disks); in raid10_run()
4156 mddev->dev_sectors = conf->dev_sectors; in raid10_run()
4165 if (conf->reshape_progress != MaxSector) { in raid10_run()
4168 before_length = ((1 << conf->prev.chunk_shift) * in raid10_run()
4169 conf->prev.far_copies); in raid10_run()
4170 after_length = ((1 << conf->geo.chunk_shift) * in raid10_run()
4171 conf->geo.far_copies); in raid10_run()
4178 conf->offset_diff = min_offset_diff; in raid10_run()
4190 raid10_free_conf(conf); in raid10_run()
4203 struct r10conf *conf = mddev->private; in raid10_quiesce() local
4206 raise_barrier(conf, 0); in raid10_quiesce()
4208 lower_barrier(conf); in raid10_quiesce()
4225 struct r10conf *conf = mddev->private; in raid10_resize() local
4232 if (conf->geo.far_copies > 1 && !conf->geo.far_offset) in raid10_resize()
4251 calc_sectors(conf, sectors); in raid10_resize()
4252 mddev->dev_sectors = conf->dev_sectors; in raid10_resize()
4260 struct r10conf *conf; in raid10_takeover_raid0() local
4280 conf = setup_conf(mddev); in raid10_takeover_raid0()
4281 if (!IS_ERR(conf)) { in raid10_takeover_raid0()
4289 return conf; in raid10_takeover_raid0()
4330 struct r10conf *conf = mddev->private; in raid10_check_reshape() local
4333 if (conf->geo.far_copies != 1 && !conf->geo.far_offset) in raid10_check_reshape()
4336 if (setup_geo(&geo, mddev, geo_start) != conf->copies) in raid10_check_reshape()
4347 if (!enough(conf, -1)) in raid10_check_reshape()
4350 kfree(conf->mirrors_new); in raid10_check_reshape()
4351 conf->mirrors_new = NULL; in raid10_check_reshape()
4354 conf->mirrors_new = in raid10_check_reshape()
4358 if (!conf->mirrors_new) in raid10_check_reshape()
4377 static int calc_degraded(struct r10conf *conf) in calc_degraded() argument
4384 for (i = 0; i < conf->prev.raid_disks; i++) { in calc_degraded()
4385 struct md_rdev *rdev = conf->mirrors[i].rdev; in calc_degraded()
4396 if (conf->geo.raid_disks == conf->prev.raid_disks) in calc_degraded()
4399 for (i = 0; i < conf->geo.raid_disks; i++) { in calc_degraded()
4400 struct md_rdev *rdev = conf->mirrors[i].rdev; in calc_degraded()
4410 if (conf->geo.raid_disks <= conf->prev.raid_disks) in calc_degraded()
4435 struct r10conf *conf = mddev->private; in raid10_start_reshape() local
4443 if (setup_geo(&new, mddev, geo_start) != conf->copies) in raid10_start_reshape()
4446 before_length = ((1 << conf->prev.chunk_shift) * in raid10_start_reshape()
4447 conf->prev.far_copies); in raid10_start_reshape()
4448 after_length = ((1 << conf->geo.chunk_shift) * in raid10_start_reshape()
4449 conf->geo.far_copies); in raid10_start_reshape()
4474 conf->offset_diff = min_offset_diff; in raid10_start_reshape()
4475 spin_lock_irq(&conf->device_lock); in raid10_start_reshape()
4476 if (conf->mirrors_new) { in raid10_start_reshape()
4477 memcpy(conf->mirrors_new, conf->mirrors, in raid10_start_reshape()
4478 sizeof(struct raid10_info)*conf->prev.raid_disks); in raid10_start_reshape()
4480 kfree(conf->mirrors_old); in raid10_start_reshape()
4481 conf->mirrors_old = conf->mirrors; in raid10_start_reshape()
4482 conf->mirrors = conf->mirrors_new; in raid10_start_reshape()
4483 conf->mirrors_new = NULL; in raid10_start_reshape()
4485 setup_geo(&conf->geo, mddev, geo_start); in raid10_start_reshape()
4490 spin_unlock_irq(&conf->device_lock); in raid10_start_reshape()
4496 conf->reshape_progress = size; in raid10_start_reshape()
4498 conf->reshape_progress = 0; in raid10_start_reshape()
4499 conf->reshape_safe = conf->reshape_progress; in raid10_start_reshape()
4500 spin_unlock_irq(&conf->device_lock); in raid10_start_reshape()
4507 newsize = raid10_size(mddev, 0, conf->geo.raid_disks); in raid10_start_reshape()
4549 conf->prev.raid_disks) in raid10_start_reshape()
4557 } else if (rdev->raid_disk >= conf->prev.raid_disks in raid10_start_reshape()
4567 spin_lock_irq(&conf->device_lock); in raid10_start_reshape()
4568 mddev->degraded = calc_degraded(conf); in raid10_start_reshape()
4569 spin_unlock_irq(&conf->device_lock); in raid10_start_reshape()
4570 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4571 mddev->reshape_position = conf->reshape_progress; in raid10_start_reshape()
4579 conf->reshape_checkpoint = jiffies; in raid10_start_reshape()
4585 spin_lock_irq(&conf->device_lock); in raid10_start_reshape()
4586 conf->geo = conf->prev; in raid10_start_reshape()
4587 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4591 conf->reshape_progress = MaxSector; in raid10_start_reshape()
4592 conf->reshape_safe = MaxSector; in raid10_start_reshape()
4594 spin_unlock_irq(&conf->device_lock); in raid10_start_reshape()
4659 * (conf->offset_diff - always positive) allows a bit of slack, in reshape_request()
4669 struct r10conf *conf = mddev->private; in reshape_request() local
4685 conf->reshape_progress < raid10_size(mddev, 0, 0)) { in reshape_request()
4687 - conf->reshape_progress); in reshape_request()
4689 conf->reshape_progress > 0) in reshape_request()
4690 sector_nr = conf->reshape_progress; in reshape_request()
4707 next = first_dev_address(conf->reshape_progress - 1, in reshape_request()
4708 &conf->geo); in reshape_request()
4713 safe = last_dev_address(conf->reshape_safe - 1, in reshape_request()
4714 &conf->prev); in reshape_request()
4716 if (next + conf->offset_diff < safe) in reshape_request()
4719 last = conf->reshape_progress - 1; in reshape_request()
4720 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask in reshape_request()
4721 & conf->prev.chunk_mask); in reshape_request()
4728 next = last_dev_address(conf->reshape_progress, &conf->geo); in reshape_request()
4733 safe = first_dev_address(conf->reshape_safe, &conf->prev); in reshape_request()
4738 if (next > safe + conf->offset_diff) in reshape_request()
4741 sector_nr = conf->reshape_progress; in reshape_request()
4742 last = sector_nr | (conf->geo.chunk_mask in reshape_request()
4743 & conf->prev.chunk_mask); in reshape_request()
4750 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { in reshape_request()
4752 wait_barrier(conf, false); in reshape_request()
4753 mddev->reshape_position = conf->reshape_progress; in reshape_request()
4756 - conf->reshape_progress; in reshape_request()
4758 mddev->curr_resync_completed = conf->reshape_progress; in reshape_request()
4759 conf->reshape_checkpoint = jiffies; in reshape_request()
4765 allow_barrier(conf); in reshape_request()
4768 conf->reshape_safe = mddev->reshape_position; in reshape_request()
4769 allow_barrier(conf); in reshape_request()
4772 raise_barrier(conf, 0); in reshape_request()
4775 r10_bio = raid10_alloc_init_r10buf(conf); in reshape_request()
4777 raise_barrier(conf, 1); in reshape_request()
4783 rdev = read_balance(conf, r10_bio, &max_sectors); in reshape_request()
4791 mempool_free(r10_bio, &conf->r10buf_pool); in reshape_request()
4809 if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) { in reshape_request()
4813 conf->cluster_sync_low = sector_nr; in reshape_request()
4814 conf->cluster_sync_high = sector_nr + CLUSTER_RESYNC_WINDOW_SECTORS; in reshape_request()
4823 if (sb_reshape_pos < conf->cluster_sync_low) in reshape_request()
4824 conf->cluster_sync_low = sb_reshape_pos; in reshape_request()
4827 md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low, in reshape_request()
4828 conf->cluster_sync_high); in reshape_request()
4832 __raid10_find_phys(&conf->geo, r10_bio); in reshape_request()
4837 for (s = 0; s < conf->copies*2; s++) { in reshape_request()
4842 rdev2 = conf->mirrors[d].replacement; in reshape_request()
4845 rdev2 = conf->mirrors[d].rdev; in reshape_request()
4890 lower_barrier(conf); in reshape_request()
4896 conf->reshape_progress -= sectors_done; in reshape_request()
4898 conf->reshape_progress += sectors_done; in reshape_request()
4913 struct r10conf *conf = mddev->private; in reshape_request_write() local
4927 for (s = 0; s < conf->copies*2; s++) { in reshape_request_write()
4932 rdev = conf->mirrors[d].replacement; in reshape_request_write()
4935 rdev = conf->mirrors[d].rdev; in reshape_request_write()
4950 static void end_reshape(struct r10conf *conf) in end_reshape() argument
4952 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) in end_reshape()
4955 spin_lock_irq(&conf->device_lock); in end_reshape()
4956 conf->prev = conf->geo; in end_reshape()
4957 md_finish_reshape(conf->mddev); in end_reshape()
4959 conf->reshape_progress = MaxSector; in end_reshape()
4960 conf->reshape_safe = MaxSector; in end_reshape()
4961 spin_unlock_irq(&conf->device_lock); in end_reshape()
4963 mddev_update_io_opt(conf->mddev, raid10_nr_stripes(conf)); in end_reshape()
4964 conf->fullsync = 0; in end_reshape()
4969 struct r10conf *conf = mddev->private; in raid10_update_reshape_pos() local
4975 conf->reshape_progress = mddev->reshape_position; in raid10_update_reshape_pos()
4985 struct r10conf *conf = mddev->private; in handle_reshape_read_error() local
4991 r10b = kmalloc(struct_size(r10b, devs, conf->copies), GFP_NOIO); in handle_reshape_read_error()
5001 __raid10_find_phys(&conf->prev, r10b); in handle_reshape_read_error()
5013 struct md_rdev *rdev = conf->mirrors[d].rdev; in handle_reshape_read_error()
5032 if (slot >= conf->copies) in handle_reshape_read_error()
5055 struct r10conf *conf = mddev->private; in end_reshape_write() local
5061 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_reshape_write()
5062 rdev = repl ? conf->mirrors[d].replacement : in end_reshape_write()
5063 conf->mirrors[d].rdev; in end_reshape_write()
5085 struct r10conf *conf = mddev->private; in raid10_finish_reshape() local
5098 for (d = conf->geo.raid_disks ; in raid10_finish_reshape()
5099 d < conf->geo.raid_disks - mddev->delta_disks; in raid10_finish_reshape()
5101 struct md_rdev *rdev = conf->mirrors[d].rdev; in raid10_finish_reshape()
5104 rdev = conf->mirrors[d].replacement; in raid10_finish_reshape()
5110 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; in raid10_finish_reshape()