Lines Matching full:conf
46 static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
47 static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
240 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio) in put_all_bios() argument
244 for (i = 0; i < conf->raid_disks * 2; i++) { in put_all_bios()
254 struct r1conf *conf = r1_bio->mddev->private; in free_r1bio() local
256 put_all_bios(conf, r1_bio); in free_r1bio()
257 mempool_free(r1_bio, &conf->r1bio_pool); in free_r1bio()
262 struct r1conf *conf = r1_bio->mddev->private; in put_buf() local
266 for (i = 0; i < conf->raid_disks * 2; i++) { in put_buf()
269 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); in put_buf()
272 mempool_free(r1_bio, &conf->r1buf_pool); in put_buf()
274 lower_barrier(conf, sect); in put_buf()
281 struct r1conf *conf = mddev->private; in reschedule_retry() local
285 spin_lock_irqsave(&conf->device_lock, flags); in reschedule_retry()
286 list_add(&r1_bio->retry_list, &conf->retry_list); in reschedule_retry()
287 atomic_inc(&conf->nr_queued[idx]); in reschedule_retry()
288 spin_unlock_irqrestore(&conf->device_lock, flags); in reschedule_retry()
290 wake_up(&conf->wait_barrier); in reschedule_retry()
312 struct r1conf *conf = r1_bio->mddev->private; in raid_end_bio_io() local
330 allow_barrier(conf, sector); in raid_end_bio_io()
338 struct r1conf *conf = r1_bio->mddev->private; in update_head_pos() local
340 conf->mirrors[disk].head_position = in update_head_pos()
350 struct r1conf *conf = r1_bio->mddev->private; in find_bio_disk() local
351 int raid_disks = conf->raid_disks; in find_bio_disk()
367 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_read_request() local
368 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev; in raid1_end_read_request()
388 spin_lock_irqsave(&conf->device_lock, flags); in raid1_end_read_request()
389 if (r1_bio->mddev->degraded == conf->raid_disks || in raid1_end_read_request()
390 (r1_bio->mddev->degraded == conf->raid_disks-1 && in raid1_end_read_request()
393 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_end_read_request()
398 rdev_dec_pending(rdev, conf->mddev); in raid1_end_read_request()
404 mdname(conf->mddev), in raid1_end_read_request()
449 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_write_request() local
452 struct md_rdev *rdev = conf->mirrors[mirror].rdev; in raid1_end_write_request()
466 conf->mddev->recovery); in raid1_end_write_request()
547 rdev_dec_pending(rdev, conf->mddev); in raid1_end_write_request()
578 static void update_read_sectors(struct r1conf *conf, int disk, in update_read_sectors() argument
581 struct raid1_info *info = &conf->mirrors[disk]; in update_read_sectors()
589 static int choose_first_rdev(struct r1conf *conf, struct r1bio *r1_bio, in choose_first_rdev() argument
596 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { in choose_first_rdev()
603 rdev = conf->mirrors[disk].rdev; in choose_first_rdev()
610 update_read_sectors(conf, disk, this_sector, read_len); in choose_first_rdev()
625 static int choose_bb_rdev(struct r1conf *conf, struct r1bio *r1_bio, in choose_bb_rdev() argument
633 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { in choose_bb_rdev()
641 rdev = conf->mirrors[disk].rdev; in choose_bb_rdev()
658 update_read_sectors(conf, best_disk, this_sector, best_len); in choose_bb_rdev()
664 static int choose_slow_rdev(struct r1conf *conf, struct r1bio *r1_bio, in choose_slow_rdev() argument
672 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { in choose_slow_rdev()
680 rdev = conf->mirrors[disk].rdev; in choose_slow_rdev()
691 update_read_sectors(conf, disk, this_sector, read_len); in choose_slow_rdev()
707 update_read_sectors(conf, bb_disk, this_sector, bb_read_len); in choose_slow_rdev()
713 static bool is_sequential(struct r1conf *conf, int disk, struct r1bio *r1_bio) in is_sequential() argument
716 return conf->mirrors[disk].next_seq_sect == r1_bio->sector || in is_sequential()
717 conf->mirrors[disk].head_position == r1_bio->sector; in is_sequential()
724 static bool should_choose_next(struct r1conf *conf, int disk) in should_choose_next() argument
726 struct raid1_info *mirror = &conf->mirrors[disk]; in should_choose_next()
766 static int choose_best_rdev(struct r1conf *conf, struct r1bio *r1_bio) in choose_best_rdev() argument
777 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { in choose_best_rdev()
785 rdev = conf->mirrors[disk].rdev; in choose_best_rdev()
794 dist = abs(r1_bio->sector - conf->mirrors[disk].head_position); in choose_best_rdev()
797 if (is_sequential(conf, disk, r1_bio)) { in choose_best_rdev()
798 if (!should_choose_next(conf, disk)) in choose_best_rdev()
838 (READ_ONCE(conf->nonrot_disks) || ctl.min_pending == 0)) in choose_best_rdev()
862 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, in read_balance() argument
869 if (raid1_should_read_first(conf->mddev, r1_bio->sector, in read_balance()
871 return choose_first_rdev(conf, r1_bio, max_sectors); in read_balance()
873 disk = choose_best_rdev(conf, r1_bio); in read_balance()
876 update_read_sectors(conf, disk, r1_bio->sector, in read_balance()
886 disk = choose_bb_rdev(conf, r1_bio, max_sectors); in read_balance()
890 return choose_slow_rdev(conf, r1_bio, max_sectors); in read_balance()
893 static void wake_up_barrier(struct r1conf *conf) in wake_up_barrier() argument
895 if (wq_has_sleeper(&conf->wait_barrier)) in wake_up_barrier()
896 wake_up(&conf->wait_barrier); in wake_up_barrier()
899 static void flush_bio_list(struct r1conf *conf, struct bio *bio) in flush_bio_list() argument
902 raid1_prepare_flush_writes(conf->mddev); in flush_bio_list()
903 wake_up_barrier(conf); in flush_bio_list()
914 static void flush_pending_writes(struct r1conf *conf) in flush_pending_writes() argument
919 spin_lock_irq(&conf->device_lock); in flush_pending_writes()
921 if (conf->pending_bio_list.head) { in flush_pending_writes()
925 bio = bio_list_get(&conf->pending_bio_list); in flush_pending_writes()
926 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
939 flush_bio_list(conf, bio); in flush_pending_writes()
942 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
969 static int raise_barrier(struct r1conf *conf, sector_t sector_nr) in raise_barrier() argument
973 spin_lock_irq(&conf->resync_lock); in raise_barrier()
976 wait_event_lock_irq(conf->wait_barrier, in raise_barrier()
977 !atomic_read(&conf->nr_waiting[idx]), in raise_barrier()
978 conf->resync_lock); in raise_barrier()
981 atomic_inc(&conf->barrier[idx]); in raise_barrier()
983 * In raise_barrier() we firstly increase conf->barrier[idx] then in raise_barrier()
984 * check conf->nr_pending[idx]. In _wait_barrier() we firstly in raise_barrier()
985 * increase conf->nr_pending[idx] then check conf->barrier[idx]. in raise_barrier()
986 * A memory barrier here to make sure conf->nr_pending[idx] won't in raise_barrier()
987 * be fetched before conf->barrier[idx] is increased. Otherwise in raise_barrier()
994 * B: while conf->nr_pending[idx] is not 0, meaning regular I/O in raise_barrier()
996 * C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches in raise_barrier()
999 wait_event_lock_irq(conf->wait_barrier, in raise_barrier()
1000 (!conf->array_frozen && in raise_barrier()
1001 !atomic_read(&conf->nr_pending[idx]) && in raise_barrier()
1002 atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) || in raise_barrier()
1003 test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery), in raise_barrier()
1004 conf->resync_lock); in raise_barrier()
1006 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { in raise_barrier()
1007 atomic_dec(&conf->barrier[idx]); in raise_barrier()
1008 spin_unlock_irq(&conf->resync_lock); in raise_barrier()
1009 wake_up(&conf->wait_barrier); in raise_barrier()
1013 atomic_inc(&conf->nr_sync_pending); in raise_barrier()
1014 spin_unlock_irq(&conf->resync_lock); in raise_barrier()
1019 static void lower_barrier(struct r1conf *conf, sector_t sector_nr) in lower_barrier() argument
1023 BUG_ON(atomic_read(&conf->barrier[idx]) <= 0); in lower_barrier()
1025 atomic_dec(&conf->barrier[idx]); in lower_barrier()
1026 atomic_dec(&conf->nr_sync_pending); in lower_barrier()
1027 wake_up(&conf->wait_barrier); in lower_barrier()
1030 static bool _wait_barrier(struct r1conf *conf, int idx, bool nowait) in _wait_barrier() argument
1035 * We need to increase conf->nr_pending[idx] very early here, in _wait_barrier()
1037 * conf->nr_pending[idx] to be 0. Then we can avoid holding in _wait_barrier()
1038 * conf->resync_lock when there is no barrier raised in same in _wait_barrier()
1042 atomic_inc(&conf->nr_pending[idx]); in _wait_barrier()
1044 * In _wait_barrier() we firstly increase conf->nr_pending[idx], then in _wait_barrier()
1045 * check conf->barrier[idx]. In raise_barrier() we firstly increase in _wait_barrier()
1046 * conf->barrier[idx], then check conf->nr_pending[idx]. A memory in _wait_barrier()
1047 * barrier is necessary here to make sure conf->barrier[idx] won't be in _wait_barrier()
1048 * fetched before conf->nr_pending[idx] is increased. Otherwise there in _wait_barrier()
1055 * here. If during we check conf->barrier[idx], the array is in _wait_barrier()
1056 * frozen (conf->array_frozen is 1), and chonf->barrier[idx] is in _wait_barrier()
1062 if (!READ_ONCE(conf->array_frozen) && in _wait_barrier()
1063 !atomic_read(&conf->barrier[idx])) in _wait_barrier()
1067 * After holding conf->resync_lock, conf->nr_pending[idx] in _wait_barrier()
1070 * raise_barrer() might be waiting for conf->nr_pending[idx] in _wait_barrier()
1073 spin_lock_irq(&conf->resync_lock); in _wait_barrier()
1074 atomic_inc(&conf->nr_waiting[idx]); in _wait_barrier()
1075 atomic_dec(&conf->nr_pending[idx]); in _wait_barrier()
1080 wake_up_barrier(conf); in _wait_barrier()
1087 wait_event_lock_irq(conf->wait_barrier, in _wait_barrier()
1088 !conf->array_frozen && in _wait_barrier()
1089 !atomic_read(&conf->barrier[idx]), in _wait_barrier()
1090 conf->resync_lock); in _wait_barrier()
1091 atomic_inc(&conf->nr_pending[idx]); in _wait_barrier()
1094 atomic_dec(&conf->nr_waiting[idx]); in _wait_barrier()
1095 spin_unlock_irq(&conf->resync_lock); in _wait_barrier()
1099 static bool wait_read_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait) in wait_read_barrier() argument
1109 * conf->barrier[idx] here, memory barrier is unnecessary as well. in wait_read_barrier()
1111 atomic_inc(&conf->nr_pending[idx]); in wait_read_barrier()
1113 if (!READ_ONCE(conf->array_frozen)) in wait_read_barrier()
1116 spin_lock_irq(&conf->resync_lock); in wait_read_barrier()
1117 atomic_inc(&conf->nr_waiting[idx]); in wait_read_barrier()
1118 atomic_dec(&conf->nr_pending[idx]); in wait_read_barrier()
1123 wake_up_barrier(conf); in wait_read_barrier()
1131 wait_event_lock_irq(conf->wait_barrier, in wait_read_barrier()
1132 !conf->array_frozen, in wait_read_barrier()
1133 conf->resync_lock); in wait_read_barrier()
1134 atomic_inc(&conf->nr_pending[idx]); in wait_read_barrier()
1137 atomic_dec(&conf->nr_waiting[idx]); in wait_read_barrier()
1138 spin_unlock_irq(&conf->resync_lock); in wait_read_barrier()
1142 static bool wait_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait) in wait_barrier() argument
1146 return _wait_barrier(conf, idx, nowait); in wait_barrier()
1149 static void _allow_barrier(struct r1conf *conf, int idx) in _allow_barrier() argument
1151 atomic_dec(&conf->nr_pending[idx]); in _allow_barrier()
1152 wake_up_barrier(conf); in _allow_barrier()
1155 static void allow_barrier(struct r1conf *conf, sector_t sector_nr) in allow_barrier() argument
1159 _allow_barrier(conf, idx); in allow_barrier()
1162 /* conf->resync_lock should be held */
1163 static int get_unqueued_pending(struct r1conf *conf) in get_unqueued_pending() argument
1167 ret = atomic_read(&conf->nr_sync_pending); in get_unqueued_pending()
1169 ret += atomic_read(&conf->nr_pending[idx]) - in get_unqueued_pending()
1170 atomic_read(&conf->nr_queued[idx]); in get_unqueued_pending()
1175 static void freeze_array(struct r1conf *conf, int extra) in freeze_array() argument
1188 * Every flying I/O contributes to a conf->nr_pending[idx], idx is the in freeze_array()
1190 * normal I/O are queued, sum of all conf->nr_pending[] will match sum in freeze_array()
1191 * of all conf->nr_queued[]. But normal I/O failure is an exception, in freeze_array()
1197 * get_unqueued_pendings(conf) gets equal to extra. For in freeze_array()
1200 spin_lock_irq(&conf->resync_lock); in freeze_array()
1201 conf->array_frozen = 1; in freeze_array()
1202 mddev_add_trace_msg(conf->mddev, "raid1 wait freeze"); in freeze_array()
1204 conf->wait_barrier, in freeze_array()
1205 get_unqueued_pending(conf) == extra, in freeze_array()
1206 conf->resync_lock, in freeze_array()
1207 flush_pending_writes(conf)); in freeze_array()
1208 spin_unlock_irq(&conf->resync_lock); in freeze_array()
1210 static void unfreeze_array(struct r1conf *conf) in unfreeze_array() argument
1213 spin_lock_irq(&conf->resync_lock); in unfreeze_array()
1214 conf->array_frozen = 0; in unfreeze_array()
1215 spin_unlock_irq(&conf->resync_lock); in unfreeze_array()
1216 wake_up(&conf->wait_barrier); in unfreeze_array()
1272 struct r1conf *conf = mddev->private; in raid1_unplug() local
1276 spin_lock_irq(&conf->device_lock); in raid1_unplug()
1277 bio_list_merge(&conf->pending_bio_list, &plug->pending); in raid1_unplug()
1278 spin_unlock_irq(&conf->device_lock); in raid1_unplug()
1279 wake_up_barrier(conf); in raid1_unplug()
1287 flush_bio_list(conf, bio); in raid1_unplug()
1303 struct r1conf *conf = mddev->private; in alloc_r1bio() local
1306 r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO); in alloc_r1bio()
1308 memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0])); in alloc_r1bio()
1316 struct r1conf *conf = mddev->private; in raid1_read_request() local
1334 if (!wait_read_barrier(conf, bio->bi_iter.bi_sector, in raid1_read_request()
1350 rdisk = read_balance(conf, r1_bio, &max_sectors); in raid1_read_request()
1356 conf->mirrors[r1_bio->read_disk].rdev->bdev, in raid1_read_request()
1361 mirror = conf->mirrors + rdisk; in raid1_read_request()
1380 gfp, &conf->bio_split); in raid1_read_request()
1423 struct r1conf *conf = mddev->private; in wait_blocked_rdev() local
1424 int disks = conf->raid_disks * 2; in wait_blocked_rdev()
1429 struct md_rdev *rdev = conf->mirrors[i].rdev; in wait_blocked_rdev()
1458 struct r1conf *conf = mddev->private; in raid1_write_request() local
1477 prepare_to_wait(&conf->wait_barrier, in raid1_write_request()
1485 finish_wait(&conf->wait_barrier, &w); in raid1_write_request()
1493 if (!wait_barrier(conf, bio->bi_iter.bi_sector, in raid1_write_request()
1518 disks = conf->raid_disks * 2; in raid1_write_request()
1521 struct md_rdev *rdev = conf->mirrors[i].rdev; in raid1_write_request()
1588 GFP_NOIO, &conf->bio_split); in raid1_write_request()
1610 struct md_rdev *rdev = conf->mirrors[i].rdev; in raid1_write_request()
1656 conf->raid_disks - mddev->degraded > 1) in raid1_write_request()
1665 spin_lock_irqsave(&conf->device_lock, flags); in raid1_write_request()
1666 bio_list_add(&conf->pending_bio_list, mbio); in raid1_write_request()
1667 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_write_request()
1675 wake_up_barrier(conf); in raid1_write_request()
1680 rdev_dec_pending(conf->mirrors[k].rdev, mddev); in raid1_write_request()
1719 struct r1conf *conf = mddev->private; in raid1_status() local
1724 seq_printf(seq, " [%d/%d] [", conf->raid_disks, in raid1_status()
1725 conf->raid_disks - mddev->degraded); in raid1_status()
1726 for (i = 0; i < conf->raid_disks; i++) { in raid1_status()
1727 struct md_rdev *rdev = READ_ONCE(conf->mirrors[i].rdev); in raid1_status()
1753 struct r1conf *conf = mddev->private; in raid1_error() local
1756 spin_lock_irqsave(&conf->device_lock, flags); in raid1_error()
1759 (conf->raid_disks - mddev->degraded) == 1) { in raid1_error()
1763 conf->recovery_disabled = mddev->recovery_disabled; in raid1_error()
1764 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_error()
1772 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_error()
1782 mdname(mddev), conf->raid_disks - mddev->degraded); in raid1_error()
1785 static void print_conf(struct r1conf *conf) in print_conf() argument
1789 pr_debug("RAID1 conf printout:\n"); in print_conf()
1790 if (!conf) { in print_conf()
1791 pr_debug("(!conf)\n"); in print_conf()
1794 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, in print_conf()
1795 conf->raid_disks); in print_conf()
1797 lockdep_assert_held(&conf->mddev->reconfig_mutex); in print_conf()
1798 for (i = 0; i < conf->raid_disks; i++) { in print_conf()
1799 struct md_rdev *rdev = conf->mirrors[i].rdev; in print_conf()
1808 static void close_sync(struct r1conf *conf) in close_sync() argument
1813 _wait_barrier(conf, idx, false); in close_sync()
1814 _allow_barrier(conf, idx); in close_sync()
1817 mempool_exit(&conf->r1buf_pool); in close_sync()
1823 struct r1conf *conf = mddev->private; in raid1_spare_active() local
1834 spin_lock_irqsave(&conf->device_lock, flags); in raid1_spare_active()
1835 for (i = 0; i < conf->raid_disks; i++) { in raid1_spare_active()
1836 struct md_rdev *rdev = conf->mirrors[i].rdev; in raid1_spare_active()
1837 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; in raid1_spare_active()
1866 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_spare_active()
1868 print_conf(conf); in raid1_spare_active()
1872 static bool raid1_add_conf(struct r1conf *conf, struct md_rdev *rdev, int disk, in raid1_add_conf() argument
1875 struct raid1_info *info = conf->mirrors + disk; in raid1_add_conf()
1878 info += conf->raid_disks; in raid1_add_conf()
1885 WRITE_ONCE(conf->nonrot_disks, conf->nonrot_disks + 1); in raid1_add_conf()
1896 static bool raid1_remove_conf(struct r1conf *conf, int disk) in raid1_remove_conf() argument
1898 struct raid1_info *info = conf->mirrors + disk; in raid1_remove_conf()
1907 rdev->mddev->recovery_disabled != conf->recovery_disabled && in raid1_remove_conf()
1908 rdev->mddev->degraded < conf->raid_disks) in raid1_remove_conf()
1912 WRITE_ONCE(conf->nonrot_disks, conf->nonrot_disks - 1); in raid1_remove_conf()
1920 struct r1conf *conf = mddev->private; in raid1_add_disk() local
1925 int last = conf->raid_disks - 1; in raid1_add_disk()
1927 if (mddev->recovery_disabled == conf->recovery_disabled) in raid1_add_disk()
1939 rdev->saved_raid_disk < conf->raid_disks && in raid1_add_disk()
1940 conf->mirrors[rdev->saved_raid_disk].rdev == NULL) in raid1_add_disk()
1944 p = conf->mirrors + mirror; in raid1_add_disk()
1950 raid1_add_conf(conf, rdev, mirror, false); in raid1_add_disk()
1955 conf->fullsync = 1; in raid1_add_disk()
1959 p[conf->raid_disks].rdev == NULL && repl_slot < 0) in raid1_add_disk()
1967 raid1_add_conf(conf, rdev, repl_slot, true); in raid1_add_disk()
1969 conf->fullsync = 1; in raid1_add_disk()
1972 print_conf(conf); in raid1_add_disk()
1978 struct r1conf *conf = mddev->private; in raid1_remove_disk() local
1981 struct raid1_info *p = conf->mirrors + number; in raid1_remove_disk()
1983 if (unlikely(number >= conf->raid_disks)) in raid1_remove_disk()
1987 number += conf->raid_disks; in raid1_remove_disk()
1988 p = conf->mirrors + number; in raid1_remove_disk()
1991 print_conf(conf); in raid1_remove_disk()
1993 if (!raid1_remove_conf(conf, number)) { in raid1_remove_disk()
1998 if (number < conf->raid_disks && in raid1_remove_disk()
1999 conf->mirrors[conf->raid_disks + number].rdev) { in raid1_remove_disk()
2005 conf->mirrors[conf->raid_disks + number].rdev; in raid1_remove_disk()
2006 freeze_array(conf, 0); in raid1_remove_disk()
2015 unfreeze_array(conf); in raid1_remove_disk()
2020 conf->mirrors[conf->raid_disks + number].rdev = NULL; in raid1_remove_disk()
2021 unfreeze_array(conf); in raid1_remove_disk()
2029 print_conf(conf); in raid1_remove_disk()
2086 struct r1conf *conf = mddev->private; in end_sync_write() local
2087 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; in end_sync_write()
2097 !rdev_has_badblock(conf->mirrors[r1_bio->read_disk].rdev, in end_sync_write()
2138 struct r1conf *conf = mddev->private; in fix_sync_read_error() local
2146 rdev = conf->mirrors[r1_bio->read_disk].rdev; in fix_sync_read_error()
2172 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
2181 if (d == conf->raid_disks * 2) in fix_sync_read_error()
2195 for (d = 0; d < conf->raid_disks * 2; d++) { in fix_sync_read_error()
2196 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
2203 conf->recovery_disabled = in fix_sync_read_error()
2221 d = conf->raid_disks * 2; in fix_sync_read_error()
2225 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
2236 d = conf->raid_disks * 2; in fix_sync_read_error()
2240 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
2265 struct r1conf *conf = mddev->private; in process_checks() local
2272 for (i = 0; i < conf->raid_disks * 2; i++) { in process_checks()
2280 bio_reset(b, conf->mirrors[i].rdev->bdev, REQ_OP_READ); in process_checks()
2283 conf->mirrors[i].rdev->data_offset; in process_checks()
2291 for (primary = 0; primary < conf->raid_disks * 2; primary++) in process_checks()
2295 rdev_dec_pending(conf->mirrors[primary].rdev, mddev); in process_checks()
2299 for (i = 0; i < conf->raid_disks * 2; i++) { in process_checks()
2333 rdev_dec_pending(conf->mirrors[i].rdev, mddev); in process_checks()
2343 struct r1conf *conf = mddev->private; in sync_request_write() local
2345 int disks = conf->raid_disks * 2; in sync_request_write()
2367 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) { in sync_request_write()
2373 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) in sync_request_write()
2378 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); in sync_request_write()
2394 static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio) in fix_read_error() argument
2399 struct mddev *mddev = conf->mddev; in fix_read_error()
2400 struct md_rdev *rdev = conf->mirrors[read_disk].rdev; in fix_read_error()
2417 rdev = conf->mirrors[d].rdev; in fix_read_error()
2425 conf->tmppage, REQ_OP_READ, false)) in fix_read_error()
2433 if (d == conf->raid_disks * 2) in fix_read_error()
2439 struct md_rdev *rdev = conf->mirrors[read_disk].rdev; in fix_read_error()
2448 d = conf->raid_disks * 2; in fix_read_error()
2450 rdev = conf->mirrors[d].rdev; in fix_read_error()
2455 conf->tmppage, REQ_OP_WRITE); in fix_read_error()
2462 d = conf->raid_disks * 2; in fix_read_error()
2464 rdev = conf->mirrors[d].rdev; in fix_read_error()
2469 conf->tmppage, REQ_OP_READ)) { in fix_read_error()
2488 struct r1conf *conf = mddev->private; in narrow_write_error() local
2489 struct md_rdev *rdev = conf->mirrors[i].rdev; in narrow_write_error()
2554 static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio) in handle_sync_write_finished() argument
2558 for (m = 0; m < conf->raid_disks * 2 ; m++) { in handle_sync_write_finished()
2559 struct md_rdev *rdev = conf->mirrors[m].rdev; in handle_sync_write_finished()
2570 md_error(conf->mddev, rdev); in handle_sync_write_finished()
2574 md_done_sync(conf->mddev, s, 1); in handle_sync_write_finished()
2577 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio) in handle_write_finished() argument
2582 for (m = 0; m < conf->raid_disks * 2 ; m++) in handle_write_finished()
2584 struct md_rdev *rdev = conf->mirrors[m].rdev; in handle_write_finished()
2588 rdev_dec_pending(rdev, conf->mddev); in handle_write_finished()
2596 md_error(conf->mddev, in handle_write_finished()
2597 conf->mirrors[m].rdev); in handle_write_finished()
2599 rdev_dec_pending(conf->mirrors[m].rdev, in handle_write_finished()
2600 conf->mddev); in handle_write_finished()
2603 spin_lock_irq(&conf->device_lock); in handle_write_finished()
2604 list_add(&r1_bio->retry_list, &conf->bio_end_io_list); in handle_write_finished()
2606 atomic_inc(&conf->nr_queued[idx]); in handle_write_finished()
2607 spin_unlock_irq(&conf->device_lock); in handle_write_finished()
2612 wake_up(&conf->wait_barrier); in handle_write_finished()
2613 md_wakeup_thread(conf->mddev->thread); in handle_write_finished()
2621 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) in handle_read_error() argument
2623 struct mddev *mddev = conf->mddev; in handle_read_error()
2642 rdev = conf->mirrors[r1_bio->read_disk].rdev; in handle_read_error()
2645 freeze_array(conf, 1); in handle_read_error()
2646 fix_read_error(conf, r1_bio); in handle_read_error()
2647 unfreeze_array(conf); in handle_read_error()
2654 rdev_dec_pending(rdev, conf->mddev); in handle_read_error()
2661 allow_barrier(conf, sector); in handle_read_error()
2669 struct r1conf *conf = mddev->private; in raid1d() local
2670 struct list_head *head = &conf->retry_list; in raid1d()
2676 if (!list_empty_careful(&conf->bio_end_io_list) && in raid1d()
2679 spin_lock_irqsave(&conf->device_lock, flags); in raid1d()
2681 list_splice_init(&conf->bio_end_io_list, &tmp); in raid1d()
2682 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1d()
2688 atomic_dec(&conf->nr_queued[idx]); in raid1d()
2698 flush_pending_writes(conf); in raid1d()
2700 spin_lock_irqsave(&conf->device_lock, flags); in raid1d()
2702 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1d()
2708 atomic_dec(&conf->nr_queued[idx]); in raid1d()
2709 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1d()
2712 conf = mddev->private; in raid1d()
2716 handle_sync_write_finished(conf, r1_bio); in raid1d()
2721 handle_write_finished(conf, r1_bio); in raid1d()
2723 handle_read_error(conf, r1_bio); in raid1d()
2734 static int init_resync(struct r1conf *conf) in init_resync() argument
2739 BUG_ON(mempool_initialized(&conf->r1buf_pool)); in init_resync()
2741 return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc, in init_resync()
2742 r1buf_pool_free, conf->poolinfo); in init_resync()
2745 static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf) in raid1_alloc_init_r1buf() argument
2747 struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO); in raid1_alloc_init_r1buf()
2752 for (i = conf->poolinfo->raid_disks; i--; ) { in raid1_alloc_init_r1buf()
2775 struct r1conf *conf = mddev->private; in raid1_sync_request() local
2790 if (!mempool_initialized(&conf->r1buf_pool)) in raid1_sync_request()
2791 if (init_resync(conf)) in raid1_sync_request()
2804 conf->fullsync = 0; in raid1_sync_request()
2807 close_sync(conf); in raid1_sync_request()
2810 conf->cluster_sync_low = 0; in raid1_sync_request()
2811 conf->cluster_sync_high = 0; in raid1_sync_request()
2819 conf->fullsync == 0) { in raid1_sync_request()
2827 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { in raid1_sync_request()
2837 if (atomic_read(&conf->nr_waiting[idx])) in raid1_sync_request()
2846 (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); in raid1_sync_request()
2848 if (raise_barrier(conf, sector_nr)) in raid1_sync_request()
2851 r1_bio = raid1_alloc_init_r1buf(conf); in raid1_sync_request()
2869 for (i = 0; i < conf->raid_disks * 2; i++) { in raid1_sync_request()
2873 rdev = conf->mirrors[i].rdev; in raid1_sync_request()
2876 if (i < conf->raid_disks) in raid1_sync_request()
2940 for (i = 0 ; i < conf->raid_disks * 2 ; i++) in raid1_sync_request()
2942 struct md_rdev *rdev = conf->mirrors[i].rdev; in raid1_sync_request()
2957 conf->recovery_disabled = mddev->recovery_disabled; in raid1_sync_request()
3003 !conf->fullsync && in raid1_sync_request()
3010 for (i = 0 ; i < conf->raid_disks * 2; i++) { in raid1_sync_request()
3033 conf->cluster_sync_high < sector_nr + nr_sectors) { in raid1_sync_request()
3034 conf->cluster_sync_low = mddev->curr_resync_completed; in raid1_sync_request()
3035 conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS; in raid1_sync_request()
3038 conf->cluster_sync_low, in raid1_sync_request()
3039 conf->cluster_sync_high); in raid1_sync_request()
3047 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) { in raid1_sync_request()
3078 struct r1conf *conf; in setup_conf() local
3084 conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL); in setup_conf()
3085 if (!conf) in setup_conf()
3088 conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR, in setup_conf()
3090 if (!conf->nr_pending) in setup_conf()
3093 conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR, in setup_conf()
3095 if (!conf->nr_waiting) in setup_conf()
3098 conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR, in setup_conf()
3100 if (!conf->nr_queued) in setup_conf()
3103 conf->barrier = kcalloc(BARRIER_BUCKETS_NR, in setup_conf()
3105 if (!conf->barrier) in setup_conf()
3108 conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info), in setup_conf()
3111 if (!conf->mirrors) in setup_conf()
3114 conf->tmppage = alloc_page(GFP_KERNEL); in setup_conf()
3115 if (!conf->tmppage) in setup_conf()
3118 conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL); in setup_conf()
3119 if (!conf->poolinfo) in setup_conf()
3121 conf->poolinfo->raid_disks = mddev->raid_disks * 2; in setup_conf()
3122 err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc, in setup_conf()
3123 rbio_pool_free, conf->poolinfo); in setup_conf()
3127 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0); in setup_conf()
3131 conf->poolinfo->mddev = mddev; in setup_conf()
3134 spin_lock_init(&conf->device_lock); in setup_conf()
3135 conf->raid_disks = mddev->raid_disks; in setup_conf()
3139 if (disk_idx >= conf->raid_disks || disk_idx < 0) in setup_conf()
3142 if (!raid1_add_conf(conf, rdev, disk_idx, in setup_conf()
3146 conf->mddev = mddev; in setup_conf()
3147 INIT_LIST_HEAD(&conf->retry_list); in setup_conf()
3148 INIT_LIST_HEAD(&conf->bio_end_io_list); in setup_conf()
3150 spin_lock_init(&conf->resync_lock); in setup_conf()
3151 init_waitqueue_head(&conf->wait_barrier); in setup_conf()
3153 bio_list_init(&conf->pending_bio_list); in setup_conf()
3154 conf->recovery_disabled = mddev->recovery_disabled - 1; in setup_conf()
3157 for (i = 0; i < conf->raid_disks * 2; i++) { in setup_conf()
3159 disk = conf->mirrors + i; in setup_conf()
3161 if (i < conf->raid_disks && in setup_conf()
3162 disk[conf->raid_disks].rdev) { in setup_conf()
3169 disk[conf->raid_disks].rdev; in setup_conf()
3170 disk[conf->raid_disks].rdev = NULL; in setup_conf()
3181 conf->fullsync = 1; in setup_conf()
3186 rcu_assign_pointer(conf->thread, in setup_conf()
3188 if (!conf->thread) in setup_conf()
3191 return conf; in setup_conf()
3194 if (conf) { in setup_conf()
3195 mempool_exit(&conf->r1bio_pool); in setup_conf()
3196 kfree(conf->mirrors); in setup_conf()
3197 safe_put_page(conf->tmppage); in setup_conf()
3198 kfree(conf->poolinfo); in setup_conf()
3199 kfree(conf->nr_pending); in setup_conf()
3200 kfree(conf->nr_waiting); in setup_conf()
3201 kfree(conf->nr_queued); in setup_conf()
3202 kfree(conf->barrier); in setup_conf()
3203 bioset_exit(&conf->bio_split); in setup_conf()
3204 kfree(conf); in setup_conf()
3225 struct r1conf *conf; in raid1_run() local
3246 conf = setup_conf(mddev); in raid1_run()
3248 conf = mddev->private; in raid1_run()
3250 if (IS_ERR(conf)) in raid1_run()
3251 return PTR_ERR(conf); in raid1_run()
3257 raid1_free(mddev, conf); in raid1_run()
3263 for (i = 0; i < conf->raid_disks; i++) in raid1_run()
3264 if (conf->mirrors[i].rdev == NULL || in raid1_run()
3265 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) || in raid1_run()
3266 test_bit(Faulty, &conf->mirrors[i].rdev->flags)) in raid1_run()
3271 if (conf->raid_disks - mddev->degraded < 1) { in raid1_run()
3272 md_unregister_thread(mddev, &conf->thread); in raid1_run()
3274 raid1_free(mddev, conf); in raid1_run()
3278 if (conf->raid_disks - mddev->degraded == 1) in raid1_run()
3291 rcu_assign_pointer(mddev->thread, conf->thread); in raid1_run()
3292 rcu_assign_pointer(conf->thread, NULL); in raid1_run()
3293 mddev->private = conf; in raid1_run()
3306 struct r1conf *conf = priv; in raid1_free() local
3308 mempool_exit(&conf->r1bio_pool); in raid1_free()
3309 kfree(conf->mirrors); in raid1_free()
3310 safe_put_page(conf->tmppage); in raid1_free()
3311 kfree(conf->poolinfo); in raid1_free()
3312 kfree(conf->nr_pending); in raid1_free()
3313 kfree(conf->nr_waiting); in raid1_free()
3314 kfree(conf->nr_queued); in raid1_free()
3315 kfree(conf->barrier); in raid1_free()
3316 bioset_exit(&conf->bio_split); in raid1_free()
3317 kfree(conf); in raid1_free()
3355 * 2/ resize conf->mirrors in raid1_reshape()
3359 * Then resize conf->mirrors and swap in the new r1bio pool. in raid1_reshape()
3367 struct r1conf *conf = mddev->private; in raid1_reshape() local
3391 if (raid_disks < conf->raid_disks) { in raid1_reshape()
3393 for (d= 0; d < conf->raid_disks; d++) in raid1_reshape()
3394 if (conf->mirrors[d].rdev) in raid1_reshape()
3421 freeze_array(conf, 0); in raid1_reshape()
3424 oldpool = conf->r1bio_pool; in raid1_reshape()
3425 conf->r1bio_pool = newpool; in raid1_reshape()
3427 for (d = d2 = 0; d < conf->raid_disks; d++) { in raid1_reshape()
3428 struct md_rdev *rdev = conf->mirrors[d].rdev; in raid1_reshape()
3440 kfree(conf->mirrors); in raid1_reshape()
3441 conf->mirrors = newmirrors; in raid1_reshape()
3442 kfree(conf->poolinfo); in raid1_reshape()
3443 conf->poolinfo = newpoolinfo; in raid1_reshape()
3445 spin_lock_irqsave(&conf->device_lock, flags); in raid1_reshape()
3446 mddev->degraded += (raid_disks - conf->raid_disks); in raid1_reshape()
3447 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_reshape()
3448 conf->raid_disks = mddev->raid_disks = raid_disks; in raid1_reshape()
3451 unfreeze_array(conf); in raid1_reshape()
3463 struct r1conf *conf = mddev->private; in raid1_quiesce() local
3466 freeze_array(conf, 0); in raid1_quiesce()
3468 unfreeze_array(conf); in raid1_quiesce()
3477 struct r1conf *conf; in raid1_takeover() local
3481 conf = setup_conf(mddev); in raid1_takeover()
3482 if (!IS_ERR(conf)) { in raid1_takeover()
3484 conf->array_frozen = 1; in raid1_takeover()
3488 return conf; in raid1_takeover()