Lines Matching +full:t +full:- +full:head +full:- +full:semi
1 // SPDX-License-Identifier: GPL-2.0-only
3 * fs/fs-writeback.c
14 * Additions for address_space-based writeback
28 #include <linux/backing-dev.h>
37 #define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_SHIFT - 10))
70 static inline struct inode *wb_inode(struct list_head *head) in wb_inode() argument
72 return list_entry(head, struct inode, i_io_list); in wb_inode()
90 set_bit(WB_has_dirty_io, &wb->state); in wb_io_lists_populated()
91 WARN_ON_ONCE(!wb->avg_write_bandwidth); in wb_io_lists_populated()
92 atomic_long_add(wb->avg_write_bandwidth, in wb_io_lists_populated()
93 &wb->bdi->tot_write_bandwidth); in wb_io_lists_populated()
100 if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) && in wb_io_lists_depopulated()
101 list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) { in wb_io_lists_depopulated()
102 clear_bit(WB_has_dirty_io, &wb->state); in wb_io_lists_depopulated()
103 WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth, in wb_io_lists_depopulated()
104 &wb->bdi->tot_write_bandwidth) < 0); in wb_io_lists_depopulated()
109 * inode_io_list_move_locked - move an inode onto a bdi_writeback IO list
112 * @head: one of @wb->b_{dirty|io|more_io|dirty_time}
114 * Move @inode->i_io_list to @list of @wb and set %WB_has_dirty_io.
120 struct list_head *head) in inode_io_list_move_locked() argument
122 assert_spin_locked(&wb->list_lock); in inode_io_list_move_locked()
123 assert_spin_locked(&inode->i_lock); in inode_io_list_move_locked()
124 WARN_ON_ONCE(inode->i_state & I_FREEING); in inode_io_list_move_locked()
126 list_move(&inode->i_io_list, head); in inode_io_list_move_locked()
128 /* dirty_time doesn't count as dirty_io until expiration */ in inode_io_list_move_locked()
129 if (head != &wb->b_dirty_time) in inode_io_list_move_locked()
138 spin_lock_irq(&wb->work_lock); in wb_wakeup()
139 if (test_bit(WB_registered, &wb->state)) in wb_wakeup()
140 mod_delayed_work(bdi_wq, &wb->dwork, 0); in wb_wakeup()
141 spin_unlock_irq(&wb->work_lock); in wb_wakeup()
146 * wakes-up the corresponding bdi thread which should then take care of the
147 * periodic background write-out of dirty inodes. Since the write-out would
151 * Note, we wouldn't bother setting up the timer, but this function is on the
152 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
153 * by delaying the wake-up.
163 spin_lock_irq(&wb->work_lock); in wb_wakeup_delayed()
164 if (test_bit(WB_registered, &wb->state)) in wb_wakeup_delayed()
165 queue_delayed_work(bdi_wq, &wb->dwork, timeout); in wb_wakeup_delayed()
166 spin_unlock_irq(&wb->work_lock); in wb_wakeup_delayed()
171 struct wb_completion *done = work->done; in finish_writeback_work()
173 if (work->auto_free) in finish_writeback_work()
176 wait_queue_head_t *waitq = done->waitq; in finish_writeback_work()
178 /* @done can't be accessed after the following dec */ in finish_writeback_work()
179 if (atomic_dec_and_test(&done->cnt)) in finish_writeback_work()
189 if (work->done) in wb_queue_work()
190 atomic_inc(&work->done->cnt); in wb_queue_work()
192 spin_lock_irq(&wb->work_lock); in wb_queue_work()
194 if (test_bit(WB_registered, &wb->state)) { in wb_queue_work()
195 list_add_tail(&work->list, &wb->work_list); in wb_queue_work()
196 mod_delayed_work(bdi_wq, &wb->dwork, 0); in wb_queue_work()
200 spin_unlock_irq(&wb->work_lock); in wb_queue_work()
204 * wb_wait_for_completion - wait for completion of bdi_writeback_works
207 * Wait for one or more work items issued to @bdi with their ->done field
210 * are completed. Work items which are waited upon aren't freed
215 atomic_dec(&done->cnt); /* put down the initial count */ in wb_wait_for_completion()
216 wait_event(*done->waitq, !atomic_read(&done->cnt)); in wb_wait_for_completion()
231 * of operation which isn't well supported. As such, the goal is not
233 * avoiding too aggressive flip-flops from occasional foreign writes.
245 #define WB_FRN_HIST_SLOTS 16 /* inode->i_wb_frn_history is 16bit */
252 #define WB_FRN_MAX_IN_FLIGHT 1024 /* don't queue too many concurrently */
258 #define WB_MAX_INODES_PER_ISW ((1024UL - sizeof(struct inode_switch_wbs_context)) \
284 wb = &bdi->wb; in __inode_attach_wb()
290 if (unlikely(cmpxchg(&inode->i_wb, NULL, wb))) in __inode_attach_wb()
295 * inode_cgwb_move_to_attached - put the inode onto wb->b_attached list
305 assert_spin_locked(&wb->list_lock); in inode_cgwb_move_to_attached()
306 assert_spin_locked(&inode->i_lock); in inode_cgwb_move_to_attached()
307 WARN_ON_ONCE(inode->i_state & I_FREEING); in inode_cgwb_move_to_attached()
309 inode->i_state &= ~I_SYNC_QUEUED; in inode_cgwb_move_to_attached()
310 if (wb != &wb->bdi->wb) in inode_cgwb_move_to_attached()
311 list_move(&inode->i_io_list, &wb->b_attached); in inode_cgwb_move_to_attached()
313 list_del_init(&inode->i_io_list); in inode_cgwb_move_to_attached()
318 * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it
321 * Returns @inode's wb with its list_lock held. @inode->i_lock must be
327 __releases(&inode->i_lock) in locked_inode_to_wb_and_lock_list()
328 __acquires(&wb->list_lock) in locked_inode_to_wb_and_lock_list()
335 * @inode->i_lock and @wb->list_lock but list_lock nests in locked_inode_to_wb_and_lock_list()
337 * association hasn't changed after acquiring list_lock. in locked_inode_to_wb_and_lock_list()
340 spin_unlock(&inode->i_lock); in locked_inode_to_wb_and_lock_list()
341 spin_lock(&wb->list_lock); in locked_inode_to_wb_and_lock_list()
343 /* i_wb may have changed inbetween, can't use inode_to_wb() */ in locked_inode_to_wb_and_lock_list()
344 if (likely(wb == inode->i_wb)) { in locked_inode_to_wb_and_lock_list()
349 spin_unlock(&wb->list_lock); in locked_inode_to_wb_and_lock_list()
352 spin_lock(&inode->i_lock); in locked_inode_to_wb_and_lock_list()
357 * inode_to_wb_and_lock_list - determine an inode's wb and lock it
360 * Same as locked_inode_to_wb_and_lock_list() but @inode->i_lock isn't held
364 __acquires(&wb->list_lock) in inode_to_wb_and_lock_list()
366 spin_lock(&inode->i_lock); in inode_to_wb_and_lock_list()
377 * the first part, all inode pointers are placed into a NULL-terminated
379 * an inode could be left in a non-consistent state.
387 down_write(&bdi->wb_switch_rwsem); in bdi_down_write_wb_switch_rwsem()
392 up_write(&bdi->wb_switch_rwsem); in bdi_up_write_wb_switch_rwsem()
399 struct address_space *mapping = inode->i_mapping; in inode_do_switch_wbs()
400 XA_STATE(xas, &mapping->i_pages, 0); in inode_do_switch_wbs()
404 spin_lock(&inode->i_lock); in inode_do_switch_wbs()
405 xa_lock_irq(&mapping->i_pages); in inode_do_switch_wbs()
409 * path owns the inode and we shouldn't modify ->i_io_list. in inode_do_switch_wbs()
411 if (unlikely(inode->i_state & (I_FREEING | I_WILL_FREE))) in inode_do_switch_wbs()
424 wb_stat_mod(old_wb, WB_RECLAIMABLE, -nr); in inode_do_switch_wbs()
433 wb_stat_mod(old_wb, WB_WRITEBACK, -nr); in inode_do_switch_wbs()
438 atomic_dec(&old_wb->writeback_inodes); in inode_do_switch_wbs()
439 atomic_inc(&new_wb->writeback_inodes); in inode_do_switch_wbs()
447 * ->b_dirty which is always correct including from ->b_dirty_time. in inode_do_switch_wbs()
448 * The transfer preserves @inode->dirtied_when ordering. If the @inode in inode_do_switch_wbs()
452 if (!list_empty(&inode->i_io_list)) { in inode_do_switch_wbs()
453 inode->i_wb = new_wb; in inode_do_switch_wbs()
455 if (inode->i_state & I_DIRTY_ALL) { in inode_do_switch_wbs()
458 list_for_each_entry(pos, &new_wb->b_dirty, i_io_list) in inode_do_switch_wbs()
459 if (time_after_eq(inode->dirtied_when, in inode_do_switch_wbs()
460 pos->dirtied_when)) in inode_do_switch_wbs()
463 pos->i_io_list.prev); in inode_do_switch_wbs()
468 inode->i_wb = new_wb; in inode_do_switch_wbs()
471 /* ->i_wb_frn updates may race wbc_detach_inode() but doesn't matter */ in inode_do_switch_wbs()
472 inode->i_wb_frn_winner = 0; in inode_do_switch_wbs()
473 inode->i_wb_frn_avg_time = 0; in inode_do_switch_wbs()
474 inode->i_wb_frn_history = 0; in inode_do_switch_wbs()
481 smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH); in inode_do_switch_wbs()
483 xa_unlock_irq(&mapping->i_pages); in inode_do_switch_wbs()
484 spin_unlock(&inode->i_lock); in inode_do_switch_wbs()
493 struct backing_dev_info *bdi = inode_to_bdi(isw->inodes[0]); in inode_switch_wbs_work_fn()
494 struct bdi_writeback *old_wb = isw->inodes[0]->i_wb; in inode_switch_wbs_work_fn()
495 struct bdi_writeback *new_wb = isw->new_wb; in inode_switch_wbs_work_fn()
503 down_read(&bdi->wb_switch_rwsem); in inode_switch_wbs_work_fn()
511 * Grabbing old_wb->list_lock, inode->i_lock and the i_pages lock in inode_switch_wbs_work_fn()
516 spin_lock(&old_wb->list_lock); in inode_switch_wbs_work_fn()
517 spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING); in inode_switch_wbs_work_fn()
519 spin_lock(&new_wb->list_lock); in inode_switch_wbs_work_fn()
520 spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING); in inode_switch_wbs_work_fn()
523 for (inodep = isw->inodes; *inodep; inodep++) { in inode_switch_wbs_work_fn()
524 WARN_ON_ONCE((*inodep)->i_wb != old_wb); in inode_switch_wbs_work_fn()
529 spin_unlock(&new_wb->list_lock); in inode_switch_wbs_work_fn()
530 spin_unlock(&old_wb->list_lock); in inode_switch_wbs_work_fn()
532 up_read(&bdi->wb_switch_rwsem); in inode_switch_wbs_work_fn()
539 for (inodep = isw->inodes; *inodep; inodep++) in inode_switch_wbs_work_fn()
561 spin_lock(&inode->i_lock); in inode_prepare_wbs_switch()
562 if (!(inode->i_sb->s_flags & SB_ACTIVE) || in inode_prepare_wbs_switch()
563 inode->i_state & (I_WB_SWITCH | I_FREEING | I_WILL_FREE) || in inode_prepare_wbs_switch()
565 spin_unlock(&inode->i_lock); in inode_prepare_wbs_switch()
568 inode->i_state |= I_WB_SWITCH; in inode_prepare_wbs_switch()
570 spin_unlock(&inode->i_lock); in inode_prepare_wbs_switch()
576 * inode_switch_wbs - change the wb association of an inode
590 if (inode->i_state & I_WB_SWITCH) in inode_switch_wbs()
612 isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC); in inode_switch_wbs()
614 if (!isw->new_wb) in inode_switch_wbs()
617 if (!inode_prepare_wbs_switch(inode, isw->new_wb)) in inode_switch_wbs()
620 isw->inodes[0] = inode; in inode_switch_wbs()
628 INIT_RCU_WORK(&isw->work, inode_switch_wbs_work_fn); in inode_switch_wbs()
629 queue_rcu_work(isw_wq, &isw->work); in inode_switch_wbs()
634 if (isw->new_wb) in inode_switch_wbs()
635 wb_put(isw->new_wb); in inode_switch_wbs()
645 if (!inode_prepare_wbs_switch(inode, isw->new_wb)) in isw_prepare_wbs_switch()
648 isw->inodes[*nr] = inode; in isw_prepare_wbs_switch()
651 if (*nr >= WB_MAX_INODES_PER_ISW - 1) in isw_prepare_wbs_switch()
658 * cleanup_offline_cgwb - detach associated inodes
679 for (memcg_css = wb->memcg_css->parent; memcg_css; in cleanup_offline_cgwb()
680 memcg_css = memcg_css->parent) { in cleanup_offline_cgwb()
681 isw->new_wb = wb_get_create(wb->bdi, memcg_css, GFP_KERNEL); in cleanup_offline_cgwb()
682 if (isw->new_wb) in cleanup_offline_cgwb()
685 if (unlikely(!isw->new_wb)) in cleanup_offline_cgwb()
686 isw->new_wb = &wb->bdi->wb; /* wb_get() is noop for bdi's wb */ in cleanup_offline_cgwb()
689 spin_lock(&wb->list_lock); in cleanup_offline_cgwb()
693 * inodes won't be written back for a long time when lazytime is in cleanup_offline_cgwb()
694 * enabled, and thus pinning the dying cgwbs. It won't break the in cleanup_offline_cgwb()
698 restart = isw_prepare_wbs_switch(isw, &wb->b_attached, &nr); in cleanup_offline_cgwb()
700 restart = isw_prepare_wbs_switch(isw, &wb->b_dirty_time, &nr); in cleanup_offline_cgwb()
701 spin_unlock(&wb->list_lock); in cleanup_offline_cgwb()
706 wb_put(isw->new_wb); in cleanup_offline_cgwb()
717 INIT_RCU_WORK(&isw->work, inode_switch_wbs_work_fn); in cleanup_offline_cgwb()
718 queue_rcu_work(isw_wq, &isw->work); in cleanup_offline_cgwb()
724 * wbc_attach_and_unlock_inode - associate wbc with target inode and unlock it
735 __releases(&inode->i_lock) in wbc_attach_and_unlock_inode()
738 spin_unlock(&inode->i_lock); in wbc_attach_and_unlock_inode()
742 wbc->wb = inode_to_wb(inode); in wbc_attach_and_unlock_inode()
743 wbc->inode = inode; in wbc_attach_and_unlock_inode()
745 wbc->wb_id = wbc->wb->memcg_css->id; in wbc_attach_and_unlock_inode()
746 wbc->wb_lcand_id = inode->i_wb_frn_winner; in wbc_attach_and_unlock_inode()
747 wbc->wb_tcand_id = 0; in wbc_attach_and_unlock_inode()
748 wbc->wb_bytes = 0; in wbc_attach_and_unlock_inode()
749 wbc->wb_lcand_bytes = 0; in wbc_attach_and_unlock_inode()
750 wbc->wb_tcand_bytes = 0; in wbc_attach_and_unlock_inode()
752 wb_get(wbc->wb); in wbc_attach_and_unlock_inode()
753 spin_unlock(&inode->i_lock); in wbc_attach_and_unlock_inode()
762 if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css))) in wbc_attach_and_unlock_inode()
763 inode_switch_wbs(inode, wbc->wb_id); in wbc_attach_and_unlock_inode()
767 * wbc_attach_fdatawrite_inode - associate wbc and inode for fdatawrite
778 spin_lock(&inode->i_lock); in wbc_attach_fdatawrite_inode()
785 * wbc_detach_inode - disassociate wbc from inode and perform foreign detection
792 * memcg only tracks page ownership on first-use basis severely confining
794 * per-inode. While the support for concurrent write sharing of an inode
797 * charging only by first-use can too readily lead to grossly incorrect
808 * using Boyer-Moore majority vote algorithm. In addition to the byte
813 * to semi-reliably detect the most active writer even when it's not the
818 * inode->i_wb_frn_history. If the amount of recorded foreign IO time is
823 struct bdi_writeback *wb = wbc->wb; in wbc_detach_inode()
824 struct inode *inode = wbc->inode; in wbc_detach_inode()
832 history = inode->i_wb_frn_history; in wbc_detach_inode()
833 avg_time = inode->i_wb_frn_avg_time; in wbc_detach_inode()
836 if (wbc->wb_bytes >= wbc->wb_lcand_bytes && in wbc_detach_inode()
837 wbc->wb_bytes >= wbc->wb_tcand_bytes) { in wbc_detach_inode()
838 max_id = wbc->wb_id; in wbc_detach_inode()
839 max_bytes = wbc->wb_bytes; in wbc_detach_inode()
840 } else if (wbc->wb_lcand_bytes >= wbc->wb_tcand_bytes) { in wbc_detach_inode()
841 max_id = wbc->wb_lcand_id; in wbc_detach_inode()
842 max_bytes = wbc->wb_lcand_bytes; in wbc_detach_inode()
844 max_id = wbc->wb_tcand_id; in wbc_detach_inode()
845 max_bytes = wbc->wb_tcand_bytes; in wbc_detach_inode()
852 * deciding whether to switch or not. This is to prevent one-off in wbc_detach_inode()
856 wb->avg_write_bandwidth); in wbc_detach_inode()
858 avg_time += (max_time >> WB_FRN_TIME_AVG_SHIFT) - in wbc_detach_inode()
877 if (wbc->wb_id != max_id) in wbc_detach_inode()
878 history |= (1U << slots) - 1; in wbc_detach_inode()
884 * Switch if the current wb isn't the consistent winner. in wbc_detach_inode()
896 * following fields but we don't mind occassional inaccuracies. in wbc_detach_inode()
898 inode->i_wb_frn_winner = max_id; in wbc_detach_inode()
899 inode->i_wb_frn_avg_time = min(avg_time, (unsigned long)U16_MAX); in wbc_detach_inode()
900 inode->i_wb_frn_history = history; in wbc_detach_inode()
902 wb_put(wbc->wb); in wbc_detach_inode()
903 wbc->wb = NULL; in wbc_detach_inode()
908 * wbc_account_cgroup_owner - account writeback to update inode cgroup ownership
924 * pageout() path doesn't attach @wbc to the inode being written in wbc_account_cgroup_owner()
925 * out. This is intentional as we don't want the function to block in wbc_account_cgroup_owner()
929 if (!wbc->wb || wbc->no_cgroup_owner) in wbc_account_cgroup_owner()
933 /* dead cgroups shouldn't contribute to inode ownership arbitration */ in wbc_account_cgroup_owner()
934 if (!(css->flags & CSS_ONLINE)) in wbc_account_cgroup_owner()
937 id = css->id; in wbc_account_cgroup_owner()
939 if (id == wbc->wb_id) { in wbc_account_cgroup_owner()
940 wbc->wb_bytes += bytes; in wbc_account_cgroup_owner()
944 if (id == wbc->wb_lcand_id) in wbc_account_cgroup_owner()
945 wbc->wb_lcand_bytes += bytes; in wbc_account_cgroup_owner()
947 /* Boyer-Moore majority vote algorithm */ in wbc_account_cgroup_owner()
948 if (!wbc->wb_tcand_bytes) in wbc_account_cgroup_owner()
949 wbc->wb_tcand_id = id; in wbc_account_cgroup_owner()
950 if (id == wbc->wb_tcand_id) in wbc_account_cgroup_owner()
951 wbc->wb_tcand_bytes += bytes; in wbc_account_cgroup_owner()
953 wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes); in wbc_account_cgroup_owner()
958 * wb_split_bdi_pages - split nr_pages to write according to bandwidth
964 * @wb->bdi.
968 unsigned long this_bw = wb->avg_write_bandwidth; in wb_split_bdi_pages()
969 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth); in wb_split_bdi_pages()
986 * bdi_split_work_to_wbs - split a wb_writeback_work to all wb's of a bdi
992 * have dirty inodes. If @base_work->nr_page isn't %LONG_MAX, it's
1001 struct bdi_writeback *wb = list_entry(&bdi->wb_list, in bdi_split_work_to_wbs()
1007 list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) { in bdi_split_work_to_wbs()
1020 (base_work->sync_mode == WB_SYNC_NONE || in bdi_split_work_to_wbs()
1021 list_empty(&wb->b_dirty_time))) in bdi_split_work_to_wbs()
1026 nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages); in bdi_split_work_to_wbs()
1031 work->nr_pages = nr_pages; in bdi_split_work_to_wbs()
1032 work->auto_free = 1; in bdi_split_work_to_wbs()
1040 * Pin @wb so that it stays on @bdi->wb_list. This allows in bdi_split_work_to_wbs()
1047 /* alloc failed, execute synchronously using on-stack fallback */ in bdi_split_work_to_wbs()
1050 work->nr_pages = nr_pages; in bdi_split_work_to_wbs()
1051 work->auto_free = 0; in bdi_split_work_to_wbs()
1052 work->done = &fallback_work_done; in bdi_split_work_to_wbs()
1068 * cgroup_writeback_by_id - initiate cgroup writeback from bdi and memcg IDs
1090 return -ENOENT; in cgroup_writeback_by_id()
1098 ret = -ENOENT; in cgroup_writeback_by_id()
1103 * And find the associated wb. If the wb isn't there already in cgroup_writeback_by_id()
1104 * there's nothing to flush, don't create one. in cgroup_writeback_by_id()
1108 ret = -ENOENT; in cgroup_writeback_by_id()
1119 * BTW the memcg stats are flushed periodically and this is best-effort in cgroup_writeback_by_id()
1128 work->nr_pages = dirty; in cgroup_writeback_by_id()
1129 work->sync_mode = WB_SYNC_NONE; in cgroup_writeback_by_id()
1130 work->range_cyclic = 1; in cgroup_writeback_by_id()
1131 work->reason = reason; in cgroup_writeback_by_id()
1132 work->done = done; in cgroup_writeback_by_id()
1133 work->auto_free = 1; in cgroup_writeback_by_id()
1137 ret = -ENOMEM; in cgroup_writeback_by_id()
1149 * cgroup_writeback_umount - flush inode wb switches for umount
1153 * flushes in-flight inode wb switches. An inode wb switch goes through
1162 if (!(sb->s_bdi->capabilities & BDI_CAP_WRITEBACK)) in cgroup_writeback_umount()
1174 * ensure that all in-flight wb switches are in the workqueue. in cgroup_writeback_umount()
1185 return -ENOMEM; in cgroup_writeback_init()
1198 assert_spin_locked(&wb->list_lock); in inode_cgwb_move_to_attached()
1199 assert_spin_locked(&inode->i_lock); in inode_cgwb_move_to_attached()
1200 WARN_ON_ONCE(inode->i_state & I_FREEING); in inode_cgwb_move_to_attached()
1202 inode->i_state &= ~I_SYNC_QUEUED; in inode_cgwb_move_to_attached()
1203 list_del_init(&inode->i_io_list); in inode_cgwb_move_to_attached()
1209 __releases(&inode->i_lock) in locked_inode_to_wb_and_lock_list()
1210 __acquires(&wb->list_lock) in locked_inode_to_wb_and_lock_list()
1214 spin_unlock(&inode->i_lock); in locked_inode_to_wb_and_lock_list()
1215 spin_lock(&wb->list_lock); in locked_inode_to_wb_and_lock_list()
1220 __acquires(&wb->list_lock) in inode_to_wb_and_lock_list()
1224 spin_lock(&wb->list_lock); in inode_to_wb_and_lock_list()
1239 if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) { in bdi_split_work_to_wbs()
1240 base_work->auto_free = 0; in bdi_split_work_to_wbs()
1241 wb_queue_work(&bdi->wb, base_work); in bdi_split_work_to_wbs()
1247 __releases(&inode->i_lock) in wbc_attach_and_unlock_inode()
1249 spin_unlock(&inode->i_lock); in wbc_attach_and_unlock_inode()
1277 if (test_bit(WB_start_all, &wb->state) || in wb_start_writeback()
1278 test_and_set_bit(WB_start_all, &wb->state)) in wb_start_writeback()
1281 wb->start_all_reason = reason; in wb_start_writeback()
1286 * wb_start_background_writeback - start background writeback
1313 spin_lock(&inode->i_lock); in inode_io_list_del()
1315 inode->i_state &= ~I_SYNC_QUEUED; in inode_io_list_del()
1316 list_del_init(&inode->i_io_list); in inode_io_list_del()
1319 spin_unlock(&inode->i_lock); in inode_io_list_del()
1320 spin_unlock(&wb->list_lock); in inode_io_list_del()
1329 struct super_block *sb = inode->i_sb; in sb_mark_inode_writeback()
1332 if (list_empty(&inode->i_wb_list)) { in sb_mark_inode_writeback()
1333 spin_lock_irqsave(&sb->s_inode_wblist_lock, flags); in sb_mark_inode_writeback()
1334 if (list_empty(&inode->i_wb_list)) { in sb_mark_inode_writeback()
1335 list_add_tail(&inode->i_wb_list, &sb->s_inodes_wb); in sb_mark_inode_writeback()
1338 spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags); in sb_mark_inode_writeback()
1347 struct super_block *sb = inode->i_sb; in sb_clear_inode_writeback()
1350 if (!list_empty(&inode->i_wb_list)) { in sb_clear_inode_writeback()
1351 spin_lock_irqsave(&sb->s_inode_wblist_lock, flags); in sb_clear_inode_writeback()
1352 if (!list_empty(&inode->i_wb_list)) { in sb_clear_inode_writeback()
1353 list_del_init(&inode->i_wb_list); in sb_clear_inode_writeback()
1356 spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags); in sb_clear_inode_writeback()
1361 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
1362 * furthest end of its superblock's dirty-inode list.
1364 * Before stamping the inode's ->dirtied_when, we check to see whether it is
1365 * already the most-recently-dirtied inode on the b_dirty list. If that is
1367 * out and we don't reset its dirtied_when.
1371 assert_spin_locked(&inode->i_lock); in redirty_tail_locked()
1373 inode->i_state &= ~I_SYNC_QUEUED; in redirty_tail_locked()
1375 * When the inode is being freed just don't bother with dirty list in redirty_tail_locked()
1379 if (inode->i_state & I_FREEING) { in redirty_tail_locked()
1380 list_del_init(&inode->i_io_list); in redirty_tail_locked()
1384 if (!list_empty(&wb->b_dirty)) { in redirty_tail_locked()
1387 tail = wb_inode(wb->b_dirty.next); in redirty_tail_locked()
1388 if (time_before(inode->dirtied_when, tail->dirtied_when)) in redirty_tail_locked()
1389 inode->dirtied_when = jiffies; in redirty_tail_locked()
1391 inode_io_list_move_locked(inode, wb, &wb->b_dirty); in redirty_tail_locked()
1396 spin_lock(&inode->i_lock); in redirty_tail()
1398 spin_unlock(&inode->i_lock); in redirty_tail()
1402 * requeue inode for re-scanning after bdi->b_io list is exhausted.
1406 inode_io_list_move_locked(inode, wb, &wb->b_more_io); in requeue_io()
1411 assert_spin_locked(&inode->i_lock); in inode_sync_complete()
1413 inode->i_state &= ~I_SYNC; in inode_sync_complete()
1416 /* Called with inode->i_lock which ensures memory ordering. */ in inode_sync_complete()
1420 static bool inode_dirtied_after(struct inode *inode, unsigned long t) in inode_dirtied_after() argument
1422 bool ret = time_after(inode->dirtied_when, t); in inode_dirtied_after()
1427 * This test is necessary to prevent such wrapped-around relative times in inode_dirtied_after()
1430 ret = ret && time_before_eq(inode->dirtied_when, jiffies); in inode_dirtied_after()
1451 inode = wb_inode(delaying_queue->prev); in move_expired_inodes()
1454 spin_lock(&inode->i_lock); in move_expired_inodes()
1455 list_move(&inode->i_io_list, &tmp); in move_expired_inodes()
1457 inode->i_state |= I_SYNC_QUEUED; in move_expired_inodes()
1458 spin_unlock(&inode->i_lock); in move_expired_inodes()
1459 if (sb_is_blkdev_sb(inode->i_sb)) in move_expired_inodes()
1461 if (sb && sb != inode->i_sb) in move_expired_inodes()
1463 sb = inode->i_sb; in move_expired_inodes()
1474 * we don't take inode->i_lock here because it is just a pointless overhead. in move_expired_inodes()
1479 sb = wb_inode(tmp.prev)->i_sb; in move_expired_inodes()
1482 if (inode->i_sb == sb) in move_expired_inodes()
1483 list_move(&inode->i_io_list, dispatch_queue); in move_expired_inodes()
1499 * +--> dequeue for IO
1507 assert_spin_locked(&wb->list_lock); in queue_io()
1508 list_splice_init(&wb->b_more_io, &wb->b_io); in queue_io()
1509 moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, dirtied_before); in queue_io()
1510 if (!work->for_sync) in queue_io()
1511 time_expire_jif = jiffies - dirtytime_expire_interval * HZ; in queue_io()
1512 moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io, in queue_io()
1523 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) { in write_inode()
1525 ret = inode->i_sb->s_op->write_inode(inode, wbc); in write_inode()
1541 assert_spin_locked(&inode->i_lock); in inode_wait_for_writeback()
1543 if (!(inode->i_state & I_SYNC)) in inode_wait_for_writeback()
1549 /* Checking I_SYNC with inode->i_lock guarantees memory ordering. */ in inode_wait_for_writeback()
1550 if (!(inode->i_state & I_SYNC)) in inode_wait_for_writeback()
1552 spin_unlock(&inode->i_lock); in inode_wait_for_writeback()
1554 spin_lock(&inode->i_lock); in inode_wait_for_writeback()
1565 __releases(inode->i_lock) in inode_sleep_on_writeback()
1571 assert_spin_locked(&inode->i_lock); in inode_sleep_on_writeback()
1575 /* Checking I_SYNC with inode->i_lock guarantees memory ordering. */ in inode_sleep_on_writeback()
1576 sleep = !!(inode->i_state & I_SYNC); in inode_sleep_on_writeback()
1577 spin_unlock(&inode->i_lock); in inode_sleep_on_writeback()
1587 * inodes. This function can be called only by flusher thread - noone else
1595 if (inode->i_state & I_FREEING) in requeue_inode()
1603 if ((inode->i_state & I_DIRTY) && in requeue_inode()
1604 (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)) in requeue_inode()
1605 inode->dirtied_when = jiffies; in requeue_inode()
1607 if (wbc->pages_skipped) { in requeue_inode()
1614 if (inode->i_state & I_DIRTY_ALL) in requeue_inode()
1621 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) { in requeue_inode()
1623 * We didn't write back all the pages. nfs_writepages() in requeue_inode()
1626 if (wbc->nr_to_write <= 0 && in requeue_inode()
1640 } else if (inode->i_state & I_DIRTY) { in requeue_inode()
1647 } else if (inode->i_state & I_DIRTY_TIME) { in requeue_inode()
1648 inode->dirtied_when = jiffies; in requeue_inode()
1649 inode_io_list_move_locked(inode, wb, &wb->b_dirty_time); in requeue_inode()
1650 inode->i_state &= ~I_SYNC_QUEUED; in requeue_inode()
1659 * on @wbc->nr_to_write), and clear the relevant dirty flags from i_state.
1661 * This doesn't remove the inode from the writeback list it is on, except
1671 struct address_space *mapping = inode->i_mapping; in __writeback_single_inode()
1672 long nr_to_write = wbc->nr_to_write; in __writeback_single_inode()
1676 WARN_ON(!(inode->i_state & I_SYNC)); in __writeback_single_inode()
1685 * I/O completion. We don't do it for sync(2) writeback because it has a in __writeback_single_inode()
1686 * separate, external IO completion path and ->sync_fs for guaranteeing in __writeback_single_inode()
1689 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) { in __writeback_single_inode()
1700 if ((inode->i_state & I_DIRTY_TIME) && in __writeback_single_inode()
1701 (wbc->sync_mode == WB_SYNC_ALL || in __writeback_single_inode()
1702 time_after(jiffies, inode->dirtied_time_when + in __writeback_single_inode()
1714 spin_lock(&inode->i_lock); in __writeback_single_inode()
1715 dirty = inode->i_state & I_DIRTY; in __writeback_single_inode()
1716 inode->i_state &= ~dirty; in __writeback_single_inode()
1720 * __mark_inode_dirty() to test i_state without grabbing i_lock - in __writeback_single_inode()
1732 inode->i_state |= I_DIRTY_PAGES; in __writeback_single_inode()
1733 else if (unlikely(inode->i_state & I_PINNING_NETFS_WB)) { in __writeback_single_inode()
1734 if (!(inode->i_state & I_DIRTY_PAGES)) { in __writeback_single_inode()
1735 inode->i_state &= ~I_PINNING_NETFS_WB; in __writeback_single_inode()
1736 wbc->unpinned_netfs_wb = true; in __writeback_single_inode()
1741 spin_unlock(&inode->i_lock); in __writeback_single_inode()
1743 /* Don't write the inode if only I_DIRTY_PAGES was set */ in __writeback_single_inode()
1749 wbc->unpinned_netfs_wb = false; in __writeback_single_inode()
1755 * Write out an inode's dirty data and metadata on-demand, i.e. separately from
1758 * whether it is a data-integrity sync (%WB_SYNC_ALL) or not (%WB_SYNC_NONE).
1769 spin_lock(&inode->i_lock); in writeback_single_inode()
1770 if (!atomic_read(&inode->i_count)) in writeback_single_inode()
1771 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); in writeback_single_inode()
1773 WARN_ON(inode->i_state & I_WILL_FREE); in writeback_single_inode()
1775 if (inode->i_state & I_SYNC) { in writeback_single_inode()
1782 if (wbc->sync_mode != WB_SYNC_ALL) in writeback_single_inode()
1786 WARN_ON(inode->i_state & I_SYNC); in writeback_single_inode()
1790 * For data-integrity syncs we also need to check whether any pages are in writeback_single_inode()
1794 if (!(inode->i_state & I_DIRTY_ALL) && in writeback_single_inode()
1795 (wbc->sync_mode != WB_SYNC_ALL || in writeback_single_inode()
1796 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))) in writeback_single_inode()
1798 inode->i_state |= I_SYNC; in writeback_single_inode()
1806 spin_lock(&inode->i_lock); in writeback_single_inode()
1808 * If the inode is freeing, its i_io_list shoudn't be updated in writeback_single_inode()
1811 if (!(inode->i_state & I_FREEING)) { in writeback_single_inode()
1817 if (!(inode->i_state & I_DIRTY_ALL)) in writeback_single_inode()
1819 else if (!(inode->i_state & I_SYNC_QUEUED)) { in writeback_single_inode()
1820 if ((inode->i_state & I_DIRTY)) in writeback_single_inode()
1822 else if (inode->i_state & I_DIRTY_TIME) { in writeback_single_inode()
1823 inode->dirtied_when = jiffies; in writeback_single_inode()
1826 &wb->b_dirty_time); in writeback_single_inode()
1831 spin_unlock(&wb->list_lock); in writeback_single_inode()
1834 spin_unlock(&inode->i_lock); in writeback_single_inode()
1856 if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages) in writeback_chunk_size()
1859 pages = min(wb->avg_write_bandwidth / 2, in writeback_chunk_size()
1861 pages = min(pages, work->nr_pages); in writeback_chunk_size()
1874 * NOTE! This is called with wb->list_lock held, and will
1883 .sync_mode = work->sync_mode, in writeback_sb_inodes()
1884 .tagged_writepages = work->tagged_writepages, in writeback_sb_inodes()
1885 .for_kupdate = work->for_kupdate, in writeback_sb_inodes()
1886 .for_background = work->for_background, in writeback_sb_inodes()
1887 .for_sync = work->for_sync, in writeback_sb_inodes()
1888 .range_cyclic = work->range_cyclic, in writeback_sb_inodes()
1897 if (work->for_kupdate) in writeback_sb_inodes()
1898 dirtied_before = jiffies - in writeback_sb_inodes()
1901 while (!list_empty(&wb->b_io)) { in writeback_sb_inodes()
1902 struct inode *inode = wb_inode(wb->b_io.prev); in writeback_sb_inodes()
1906 if (inode->i_sb != sb) { in writeback_sb_inodes()
1907 if (work->sb) { in writeback_sb_inodes()
1926 * Don't bother with new inodes or inodes being freed, first in writeback_sb_inodes()
1930 spin_lock(&inode->i_lock); in writeback_sb_inodes()
1931 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { in writeback_sb_inodes()
1933 spin_unlock(&inode->i_lock); in writeback_sb_inodes()
1936 if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) { in writeback_sb_inodes()
1939 * doing writeback-for-data-integrity, move it to in writeback_sb_inodes()
1947 spin_unlock(&inode->i_lock); in writeback_sb_inodes()
1951 spin_unlock(&wb->list_lock); in writeback_sb_inodes()
1958 if (inode->i_state & I_SYNC) { in writeback_sb_inodes()
1962 spin_lock(&wb->list_lock); in writeback_sb_inodes()
1965 inode->i_state |= I_SYNC; in writeback_sb_inodes()
1979 work->nr_pages -= write_chunk - wbc.nr_to_write; in writeback_sb_inodes()
1980 wrote = write_chunk - wbc.nr_to_write - wbc.pages_skipped; in writeback_sb_inodes()
1989 * in balance_dirty_pages(). cond_resched() doesn't in writeback_sb_inodes()
1993 blk_flush_plug(current->plug, false); in writeback_sb_inodes()
2002 spin_lock(&inode->i_lock); in writeback_sb_inodes()
2003 if (!(inode->i_state & I_DIRTY_ALL)) in writeback_sb_inodes()
2007 spin_unlock(&inode->i_lock); in writeback_sb_inodes()
2010 spin_unlock(&tmp_wb->list_lock); in writeback_sb_inodes()
2011 spin_lock(&wb->list_lock); in writeback_sb_inodes()
2021 if (work->nr_pages <= 0) in writeback_sb_inodes()
2034 while (!list_empty(&wb->b_io)) { in __writeback_inodes_wb()
2035 struct inode *inode = wb_inode(wb->b_io.prev); in __writeback_inodes_wb()
2036 struct super_block *sb = inode->i_sb; in __writeback_inodes_wb()
2041 * s_umount being grabbed by someone else. Don't use in __writeback_inodes_wb()
2048 up_read(&sb->s_umount); in __writeback_inodes_wb()
2054 if (work->nr_pages <= 0) in __writeback_inodes_wb()
2074 spin_lock(&wb->list_lock); in writeback_inodes_wb()
2075 if (list_empty(&wb->b_io)) in writeback_inodes_wb()
2078 spin_unlock(&wb->list_lock); in writeback_inodes_wb()
2081 return nr_pages - work.nr_pages; in writeback_inodes_wb()
2088 * dirtying-time in the inode's address_space. So this periodic writeback code
2094 * one-second gap.
2102 long nr_pages = work->nr_pages; in wb_writeback()
2114 if (work->nr_pages <= 0) in wb_writeback()
2118 * Background writeout and kupdate-style writeback may in wb_writeback()
2123 if ((work->for_background || work->for_kupdate) && in wb_writeback()
2124 !list_empty(&wb->work_list)) in wb_writeback()
2131 if (work->for_background && !wb_over_bg_thresh(wb)) in wb_writeback()
2135 spin_lock(&wb->list_lock); in wb_writeback()
2138 if (list_empty(&wb->b_io)) { in wb_writeback()
2145 if (work->for_kupdate) { in wb_writeback()
2146 dirtied_before = jiffies - in wb_writeback()
2149 } else if (work->for_background) in wb_writeback()
2155 if (work->sb) in wb_writeback()
2156 progress = writeback_sb_inodes(work->sb, wb, work); in wb_writeback()
2170 spin_unlock(&wb->list_lock); in wb_writeback()
2177 if (list_empty(&wb->b_more_io)) { in wb_writeback()
2178 spin_unlock(&wb->list_lock); in wb_writeback()
2188 inode = wb_inode(wb->b_more_io.prev); in wb_writeback()
2189 spin_lock(&inode->i_lock); in wb_writeback()
2190 spin_unlock(&wb->list_lock); in wb_writeback()
2196 return nr_pages - work->nr_pages; in wb_writeback()
2200 * Return the next wb_writeback_work struct that hasn't been processed yet.
2206 spin_lock_irq(&wb->work_lock); in get_next_work_item()
2207 if (!list_empty(&wb->work_list)) { in get_next_work_item()
2208 work = list_entry(wb->work_list.next, in get_next_work_item()
2210 list_del_init(&work->list); in get_next_work_item()
2212 spin_unlock_irq(&wb->work_lock); in get_next_work_item()
2245 expired = wb->last_old_flush + in wb_check_old_data_flush()
2250 wb->last_old_flush = jiffies; in wb_check_old_data_flush()
2272 if (!test_bit(WB_start_all, &wb->state)) in wb_check_start_all()
2281 .reason = wb->start_all_reason, in wb_check_start_all()
2287 clear_bit(WB_start_all, &wb->state); in wb_check_start_all()
2300 set_bit(WB_writeback_running, &wb->state); in wb_do_writeback()
2308 * Check for a flush-everything request in wb_do_writeback()
2317 clear_bit(WB_writeback_running, &wb->state); in wb_do_writeback()
2332 set_worker_desc("flush-%s", bdi_dev_name(wb->bdi)); in wb_workfn()
2335 !test_bit(WB_registered, &wb->state))) { in wb_workfn()
2345 } while (!list_empty(&wb->work_list)); in wb_workfn()
2348 * bdi_wq can't get enough workers and we're running off in wb_workfn()
2349 * the emergency worker. Don't hog it. Hopefully, 1024 is in wb_workfn()
2357 if (!list_empty(&wb->work_list)) in wb_workfn()
2374 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) in __wakeup_flusher_threads_bdi()
2396 blk_flush_plug(current->plug, true); in wakeup_flusher_threads()
2414 * this function won't be necessary. But if the only thing that has
2430 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) in wakeup_dirtytime_writeback()
2431 if (!list_empty(&wb->b_dirty_time)) in wakeup_dirtytime_writeback()
2457 * __mark_inode_dirty - internal function to mark an inode dirty
2461 * multiple I_DIRTY_* flags, except that I_DIRTY_TIME can't be combined
2476 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
2477 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
2478 * the kernel-internal blockdev inode represents the dirtying time of the
2480 * page->mapping->host, so the page-dirtying time is recorded in the internal
2485 struct super_block *sb = inode->i_sb; in __mark_inode_dirty()
2494 * We tell ->dirty_inode callback that timestamps need to in __mark_inode_dirty()
2497 if (inode->i_state & I_DIRTY_TIME) { in __mark_inode_dirty()
2498 spin_lock(&inode->i_lock); in __mark_inode_dirty()
2499 if (inode->i_state & I_DIRTY_TIME) { in __mark_inode_dirty()
2500 inode->i_state &= ~I_DIRTY_TIME; in __mark_inode_dirty()
2503 spin_unlock(&inode->i_lock); in __mark_inode_dirty()
2508 * (if needed) it can update on-disk fields and journal the in __mark_inode_dirty()
2514 if (sb->s_op->dirty_inode) in __mark_inode_dirty()
2515 sb->s_op->dirty_inode(inode, in __mark_inode_dirty()
2524 * (We don't support setting both I_DIRTY_PAGES and I_DIRTY_TIME in __mark_inode_dirty()
2537 if ((inode->i_state & flags) == flags) in __mark_inode_dirty()
2540 spin_lock(&inode->i_lock); in __mark_inode_dirty()
2541 if ((inode->i_state & flags) != flags) { in __mark_inode_dirty()
2542 const int was_dirty = inode->i_state & I_DIRTY; in __mark_inode_dirty()
2546 inode->i_state |= flags; in __mark_inode_dirty()
2551 * list handling so that we don't move inodes under flush worker's in __mark_inode_dirty()
2556 spin_lock(&inode->i_lock); in __mark_inode_dirty()
2565 if (inode->i_state & I_SYNC_QUEUED) in __mark_inode_dirty()
2572 if (!S_ISBLK(inode->i_mode)) { in __mark_inode_dirty()
2576 if (inode->i_state & I_FREEING) in __mark_inode_dirty()
2580 * If the inode was already on b_dirty/b_io/b_more_io, don't in __mark_inode_dirty()
2581 * reposition it (that would break b_dirty time-ordering). in __mark_inode_dirty()
2587 inode->dirtied_when = jiffies; in __mark_inode_dirty()
2589 inode->dirtied_time_when = jiffies; in __mark_inode_dirty()
2591 if (inode->i_state & I_DIRTY) in __mark_inode_dirty()
2592 dirty_list = &wb->b_dirty; in __mark_inode_dirty()
2594 dirty_list = &wb->b_dirty_time; in __mark_inode_dirty()
2599 spin_unlock(&wb->list_lock); in __mark_inode_dirty()
2600 spin_unlock(&inode->i_lock); in __mark_inode_dirty()
2605 * we have to wake-up the corresponding bdi thread in __mark_inode_dirty()
2606 * to make sure background write-back happens in __mark_inode_dirty()
2610 (wb->bdi->capabilities & BDI_CAP_WRITEBACK)) in __mark_inode_dirty()
2617 spin_unlock(&wb->list_lock); in __mark_inode_dirty()
2618 spin_unlock(&inode->i_lock); in __mark_inode_dirty()
2639 WARN_ON(!rwsem_is_locked(&sb->s_umount)); in wait_sb_inodes()
2641 mutex_lock(&sb->s_sync_lock); in wait_sb_inodes()
2648 * reference. s_inode_wblist_lock protects sb->s_inodes_wb as well as in wait_sb_inodes()
2653 spin_lock_irq(&sb->s_inode_wblist_lock); in wait_sb_inodes()
2654 list_splice_init(&sb->s_inodes_wb, &sync_list); in wait_sb_inodes()
2666 struct address_space *mapping = inode->i_mapping; in wait_sb_inodes()
2674 list_move_tail(&inode->i_wb_list, &sb->s_inodes_wb); in wait_sb_inodes()
2677 * The mapping can appear untagged while still on-list since we in wait_sb_inodes()
2684 spin_unlock_irq(&sb->s_inode_wblist_lock); in wait_sb_inodes()
2686 spin_lock(&inode->i_lock); in wait_sb_inodes()
2687 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) { in wait_sb_inodes()
2688 spin_unlock(&inode->i_lock); in wait_sb_inodes()
2690 spin_lock_irq(&sb->s_inode_wblist_lock); in wait_sb_inodes()
2694 spin_unlock(&inode->i_lock); in wait_sb_inodes()
2709 spin_lock_irq(&sb->s_inode_wblist_lock); in wait_sb_inodes()
2711 spin_unlock_irq(&sb->s_inode_wblist_lock); in wait_sb_inodes()
2713 mutex_unlock(&sb->s_sync_lock); in wait_sb_inodes()
2719 struct backing_dev_info *bdi = sb->s_bdi; in __writeback_inodes_sb_nr()
2732 WARN_ON(!rwsem_is_locked(&sb->s_umount)); in __writeback_inodes_sb_nr()
2734 bdi_split_work_to_wbs(sb->s_bdi, &work, skip_if_busy); in __writeback_inodes_sb_nr()
2739 * writeback_inodes_sb_nr - writeback dirty inodes from given super_block
2757 * writeback_inodes_sb - writeback dirty inodes from given super_block
2772 * try_to_writeback_inodes_sb - try to start writeback if none underway
2780 if (!down_read_trylock(&sb->s_umount)) in try_to_writeback_inodes_sb()
2784 up_read(&sb->s_umount); in try_to_writeback_inodes_sb()
2789 * sync_inodes_sb - sync sb inode pages
2797 struct backing_dev_info *bdi = sb->s_bdi; in sync_inodes_sb()
2810 * Can't skip on !bdi_has_dirty() because we should wait for !dirty in sync_inodes_sb()
2816 WARN_ON(!rwsem_is_locked(&sb->s_umount)); in sync_inodes_sb()
2829 * write_inode_now - write an inode to disk
2847 if (!mapping_can_writeback(inode->i_mapping)) in write_inode_now()
2856 * sync_inode_metadata - write an inode to disk
2868 .nr_to_write = 0, /* metadata-only */ in sync_inode_metadata()