Lines Matching full:bc
412 static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block) in cache_read_lock() argument
414 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) in cache_read_lock()
415 read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); in cache_read_lock()
417 down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock); in cache_read_lock()
420 static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block) in cache_read_unlock() argument
422 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) in cache_read_unlock()
423 read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); in cache_read_unlock()
425 up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock); in cache_read_unlock()
428 static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block) in cache_write_lock() argument
430 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) in cache_write_lock()
431 write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); in cache_write_lock()
433 down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock); in cache_write_lock()
436 static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block) in cache_write_unlock() argument
438 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) in cache_write_unlock()
439 write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); in cache_write_unlock()
441 up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock); in cache_write_unlock()
536 static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep) in cache_init() argument
540 bc->num_locks = num_locks; in cache_init()
541 bc->no_sleep = no_sleep; in cache_init()
543 for (i = 0; i < bc->num_locks; i++) { in cache_init()
545 rwlock_init(&bc->trees[i].u.spinlock); in cache_init()
547 init_rwsem(&bc->trees[i].u.lock); in cache_init()
548 bc->trees[i].root = RB_ROOT; in cache_init()
551 lru_init(&bc->lru[LIST_CLEAN]); in cache_init()
552 lru_init(&bc->lru[LIST_DIRTY]); in cache_init()
555 static void cache_destroy(struct dm_buffer_cache *bc) in cache_destroy() argument
559 for (i = 0; i < bc->num_locks; i++) in cache_destroy()
560 WARN_ON_ONCE(!RB_EMPTY_ROOT(&bc->trees[i].root)); in cache_destroy()
562 lru_destroy(&bc->lru[LIST_CLEAN]); in cache_destroy()
563 lru_destroy(&bc->lru[LIST_DIRTY]); in cache_destroy()
571 static inline unsigned long cache_count(struct dm_buffer_cache *bc, int list_mode) in cache_count() argument
573 return bc->lru[list_mode].count; in cache_count()
576 static inline unsigned long cache_total(struct dm_buffer_cache *bc) in cache_total() argument
578 return cache_count(bc, LIST_CLEAN) + cache_count(bc, LIST_DIRTY); in cache_total()
613 static struct dm_buffer *cache_get(struct dm_buffer_cache *bc, sector_t block) in cache_get() argument
617 cache_read_lock(bc, block); in cache_get()
618 b = __cache_get(&bc->trees[cache_index(block, bc->num_locks)].root, block); in cache_get()
623 cache_read_unlock(bc, block); in cache_get()
634 static bool cache_put(struct dm_buffer_cache *bc, struct dm_buffer *b) in cache_put() argument
638 cache_read_lock(bc, b->block); in cache_put()
641 cache_read_unlock(bc, b->block); in cache_put()
678 static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode, in __cache_evict() argument
686 le = lru_evict(&bc->lru[list_mode], __evict_pred, &w, bc->no_sleep); in __cache_evict()
692 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root); in __cache_evict()
697 static struct dm_buffer *cache_evict(struct dm_buffer_cache *bc, int list_mode, in cache_evict() argument
703 lh_init(&lh, bc, true); in cache_evict()
704 b = __cache_evict(bc, list_mode, pred, context, &lh); in cache_evict()
715 static void cache_mark(struct dm_buffer_cache *bc, struct dm_buffer *b, int list_mode) in cache_mark() argument
717 cache_write_lock(bc, b->block); in cache_mark()
719 lru_remove(&bc->lru[b->list_mode], &b->lru); in cache_mark()
721 lru_insert(&bc->lru[b->list_mode], &b->lru); in cache_mark()
723 cache_write_unlock(bc, b->block); in cache_mark()
732 static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode, in __cache_mark_many() argument
740 le = lru_evict(&bc->lru[old_mode], __evict_pred, &w, bc->no_sleep); in __cache_mark_many()
746 lru_insert(&bc->lru[b->list_mode], &b->lru); in __cache_mark_many()
750 static void cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode, in cache_mark_many() argument
755 lh_init(&lh, bc, true); in cache_mark_many()
756 __cache_mark_many(bc, old_mode, new_mode, pred, context, &lh); in cache_mark_many()
778 static void __cache_iterate(struct dm_buffer_cache *bc, int list_mode, in __cache_iterate() argument
781 struct lru *lru = &bc->lru[list_mode]; in __cache_iterate()
806 static void cache_iterate(struct dm_buffer_cache *bc, int list_mode, in cache_iterate() argument
811 lh_init(&lh, bc, false); in cache_iterate()
812 __cache_iterate(bc, list_mode, fn, context, &lh); in cache_iterate()
849 static bool cache_insert(struct dm_buffer_cache *bc, struct dm_buffer *b) in cache_insert() argument
856 cache_write_lock(bc, b->block); in cache_insert()
858 r = __cache_insert(&bc->trees[cache_index(b->block, bc->num_locks)].root, b); in cache_insert()
860 lru_insert(&bc->lru[b->list_mode], &b->lru); in cache_insert()
861 cache_write_unlock(bc, b->block); in cache_insert()
874 static bool cache_remove(struct dm_buffer_cache *bc, struct dm_buffer *b) in cache_remove() argument
878 cache_write_lock(bc, b->block); in cache_remove()
884 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root); in cache_remove()
885 lru_remove(&bc->lru[b->list_mode], &b->lru); in cache_remove()
888 cache_write_unlock(bc, b->block); in cache_remove()
920 static void __remove_range(struct dm_buffer_cache *bc, in __remove_range() argument
941 lru_remove(&bc->lru[b->list_mode], &b->lru); in __remove_range()
947 static void cache_remove_range(struct dm_buffer_cache *bc, in cache_remove_range() argument
953 BUG_ON(bc->no_sleep); in cache_remove_range()
954 for (i = 0; i < bc->num_locks; i++) { in cache_remove_range()
955 down_write(&bc->trees[i].u.lock); in cache_remove_range()
956 __remove_range(bc, &bc->trees[i].root, begin, end, pred, release); in cache_remove_range()
957 up_write(&bc->trees[i].u.lock); in cache_remove_range()