Lines Matching defs:r5conf

570 struct r5conf {  struct
571 struct hlist_head *stripe_hashtbl;
573 spinlock_t hash_locks[NR_STRIPE_HASH_LOCKS];
574 struct mddev *mddev;
575 int chunk_sectors;
576 int level, algorithm, rmw_level;
577 int max_degraded;
578 int raid_disks;
579 int max_nr_stripes;
580 int min_nr_stripes;
582 unsigned long stripe_size;
583 unsigned int stripe_shift;
584 unsigned long stripe_sectors;
592 sector_t reshape_progress;
596 sector_t reshape_safe;
597 int previous_raid_disks;
598 int prev_chunk_sectors;
599 int prev_algo;
600 short generation; /* increments with every reshape */
601 seqcount_spinlock_t gen_lock; /* lock against generation changes */
602 unsigned long reshape_checkpoint; /* Time we last updated
604 long long min_offset_diff; /* minimum difference between
611 struct list_head handle_list; /* stripes needing handling */
612 struct list_head loprio_list; /* low priority stripes */
613 struct list_head hold_list; /* preread ready stripes */
614 struct list_head delayed_list; /* stripes that have plugged requests */
615 struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */
616 struct bio *retry_read_aligned; /* currently retrying aligned bios */
617 unsigned int retry_read_offset; /* sector offset into retry_read_aligned */
618 struct bio *retry_read_aligned_list; /* aligned bios retry list */
619 atomic_t preread_active_stripes; /* stripes with scheduled io */
620 atomic_t active_aligned_reads;
621 atomic_t pending_full_writes; /* full write backlog */
622 int bypass_count; /* bypassed prereads */
623 int bypass_threshold; /* preread nice */
624 int skip_copy; /* Don't copy data from bio to stripe cache */
625 struct list_head *last_hold; /* detect hold_list promotions */
627 atomic_t reshape_stripes; /* stripes with pending writes for reshape */
631 int active_name;
632 char cache_name[2][48];
633 struct kmem_cache *slab_cache; /* for allocating stripes */
634 struct mutex cache_size_mutex; /* Protect changes to cache size */
636 int seq_flush, seq_write;
637 int quiesce;
639 int fullsync; /* set to 1 if a full sync is needed,
643 int recovery_disabled;
645 struct raid5_percpu __percpu *percpu;
646 int scribble_disks;
647 int scribble_sectors;
648 struct hlist_node node;
653 atomic_t active_stripes;
654 struct list_head inactive_list[NR_STRIPE_HASH_LOCKS];
656 atomic_t r5c_cached_full_stripes;
657 struct list_head r5c_full_stripe_list;
658 atomic_t r5c_cached_partial_stripes;
659 struct list_head r5c_partial_stripe_list;
660 atomic_t r5c_flushing_full_stripes;
661 atomic_t r5c_flushing_partial_stripes;
663 atomic_t empty_inactive_list_nr;
664 struct llist_head released_stripes;
665 wait_queue_head_t wait_for_quiescent;
666 wait_queue_head_t wait_for_stripe;
667 wait_queue_head_t wait_for_reshape;
668 unsigned long cache_state;
669 struct shrinker *shrinker;
670 int pool_size; /* number of disks in stripeheads in pool */
671 spinlock_t device_lock;
672 struct disk_info *disks;
673 struct bio_set bio_split;
678 struct md_thread __rcu *thread;
679 struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
680 struct r5worker_group *worker_groups;
681 int group_cnt;
682 int worker_cnt_per_group;
683 struct r5l_log *log;
684 void *log_private;
686 spinlock_t pending_bios_lock;
687 bool batch_bio_dispatch;
688 struct r5pending_data *pending_data;
689 struct list_head free_list;
690 struct list_head pending_list;
714 static inline struct bio *r5_next_bio(struct r5conf *conf, struct bio *bio, sector_t sector) in r5_next_bio() argument