Lines Matching +full:- +full:e

1 // SPDX-License-Identifier: GPL-2.0-only
8 #include "dm-cache-background-tracker.h"
9 #include "dm-cache-policy-internal.h"
10 #include "dm-cache-policy.h"
20 #define DM_MSG_PREFIX "cache-policy-smq"
22 /*----------------------------------------------------------------*/
37 /*----------------------------------------------------------------*/
52 /*----------------------------------------------------------------*/
54 #define INDEXER_NULL ((1u << 28u) - 1u)
69 es->begin = es->end = NULL; in space_init()
73 es->begin = vzalloc(array_size(nr_entries, sizeof(struct entry))); in space_init()
74 if (!es->begin) in space_init()
75 return -ENOMEM; in space_init()
77 es->end = es->begin + nr_entries; in space_init()
83 vfree(es->begin); in space_exit()
88 struct entry *e; in __get_entry() local
90 e = es->begin + block; in __get_entry()
91 BUG_ON(e >= es->end); in __get_entry()
93 return e; in __get_entry()
96 static unsigned int to_index(struct entry_space *es, struct entry *e) in to_index() argument
98 BUG_ON(e < es->begin || e >= es->end); in to_index()
99 return e - es->begin; in to_index()
110 /*----------------------------------------------------------------*/
119 l->nr_elts = 0; in l_init()
120 l->head = l->tail = INDEXER_NULL; in l_init()
125 return to_entry(es, l->head); in l_head()
130 return to_entry(es, l->tail); in l_tail()
133 static struct entry *l_next(struct entry_space *es, struct entry *e) in l_next() argument
135 return to_entry(es, e->next); in l_next()
138 static struct entry *l_prev(struct entry_space *es, struct entry *e) in l_prev() argument
140 return to_entry(es, e->prev); in l_prev()
145 return l->head == INDEXER_NULL; in l_empty()
148 static void l_add_head(struct entry_space *es, struct ilist *l, struct entry *e) in l_add_head() argument
152 e->next = l->head; in l_add_head()
153 e->prev = INDEXER_NULL; in l_add_head()
156 head->prev = l->head = to_index(es, e); in l_add_head()
158 l->head = l->tail = to_index(es, e); in l_add_head()
160 if (!e->sentinel) in l_add_head()
161 l->nr_elts++; in l_add_head()
164 static void l_add_tail(struct entry_space *es, struct ilist *l, struct entry *e) in l_add_tail() argument
168 e->next = INDEXER_NULL; in l_add_tail()
169 e->prev = l->tail; in l_add_tail()
172 tail->next = l->tail = to_index(es, e); in l_add_tail()
174 l->head = l->tail = to_index(es, e); in l_add_tail()
176 if (!e->sentinel) in l_add_tail()
177 l->nr_elts++; in l_add_tail()
181 struct entry *old, struct entry *e) in l_add_before() argument
186 l_add_head(es, l, e); in l_add_before()
189 e->prev = old->prev; in l_add_before()
190 e->next = to_index(es, old); in l_add_before()
191 prev->next = old->prev = to_index(es, e); in l_add_before()
193 if (!e->sentinel) in l_add_before()
194 l->nr_elts++; in l_add_before()
198 static void l_del(struct entry_space *es, struct ilist *l, struct entry *e) in l_del() argument
200 struct entry *prev = l_prev(es, e); in l_del()
201 struct entry *next = l_next(es, e); in l_del()
204 prev->next = e->next; in l_del()
206 l->head = e->next; in l_del()
209 next->prev = e->prev; in l_del()
211 l->tail = e->prev; in l_del()
213 if (!e->sentinel) in l_del()
214 l->nr_elts--; in l_del()
219 struct entry *e; in l_pop_head() local
221 for (e = l_head(es, l); e; e = l_next(es, e)) in l_pop_head()
222 if (!e->sentinel) { in l_pop_head()
223 l_del(es, l, e); in l_pop_head()
224 return e; in l_pop_head()
232 struct entry *e; in l_pop_tail() local
234 for (e = l_tail(es, l); e; e = l_prev(es, e)) in l_pop_tail()
235 if (!e->sentinel) { in l_pop_tail()
236 l_del(es, l, e); in l_pop_tail()
237 return e; in l_pop_tail()
243 /*----------------------------------------------------------------*/
246 * The stochastic-multi-queue is a set of lru lists stacked into levels.
274 q->es = es; in q_init()
275 q->nr_elts = 0; in q_init()
276 q->nr_levels = nr_levels; in q_init()
278 for (i = 0; i < q->nr_levels; i++) { in q_init()
279 l_init(q->qs + i); in q_init()
280 q->target_count[i] = 0u; in q_init()
283 q->last_target_nr_elts = 0u; in q_init()
284 q->nr_top_levels = 0u; in q_init()
285 q->nr_in_top_levels = 0u; in q_init()
290 return q->nr_elts; in q_size()
296 static void q_push(struct queue *q, struct entry *e) in q_push() argument
298 BUG_ON(e->pending_work); in q_push()
300 if (!e->sentinel) in q_push()
301 q->nr_elts++; in q_push()
303 l_add_tail(q->es, q->qs + e->level, e); in q_push()
306 static void q_push_front(struct queue *q, struct entry *e) in q_push_front() argument
308 BUG_ON(e->pending_work); in q_push_front()
310 if (!e->sentinel) in q_push_front()
311 q->nr_elts++; in q_push_front()
313 l_add_head(q->es, q->qs + e->level, e); in q_push_front()
316 static void q_push_before(struct queue *q, struct entry *old, struct entry *e) in q_push_before() argument
318 BUG_ON(e->pending_work); in q_push_before()
320 if (!e->sentinel) in q_push_before()
321 q->nr_elts++; in q_push_before()
323 l_add_before(q->es, q->qs + e->level, old, e); in q_push_before()
326 static void q_del(struct queue *q, struct entry *e) in q_del() argument
328 l_del(q->es, q->qs + e->level, e); in q_del()
329 if (!e->sentinel) in q_del()
330 q->nr_elts--; in q_del()
339 struct entry *e; in q_peek() local
341 max_level = min(max_level, q->nr_levels); in q_peek()
344 for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) { in q_peek()
345 if (e->sentinel) { in q_peek()
352 return e; in q_peek()
360 struct entry *e = q_peek(q, q->nr_levels, true); in q_pop() local
362 if (e) in q_pop()
363 q_del(q, e); in q_pop()
365 return e; in q_pop()
369 * This function assumes there is a non-sentinel entry to pop. It's only
371 * the q->nr_elts count.
375 struct entry *e; in __redist_pop_from() local
377 for (; level < q->nr_levels; level++) in __redist_pop_from()
378 for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) in __redist_pop_from()
379 if (!e->sentinel) { in __redist_pop_from()
380 l_del(q->es, q->qs + e->level, e); in __redist_pop_from()
381 return e; in __redist_pop_from()
393 BUG_ON(lend > q->nr_levels); in q_set_targets_subrange_()
394 nr_levels = lend - lbegin; in q_set_targets_subrange_()
399 q->target_count[level] = in q_set_targets_subrange_()
409 if (q->last_target_nr_elts == q->nr_elts) in q_set_targets()
412 q->last_target_nr_elts = q->nr_elts; in q_set_targets()
414 if (q->nr_top_levels > q->nr_levels) in q_set_targets()
415 q_set_targets_subrange_(q, q->nr_elts, 0, q->nr_levels); in q_set_targets()
418 q_set_targets_subrange_(q, q->nr_in_top_levels, in q_set_targets()
419 q->nr_levels - q->nr_top_levels, q->nr_levels); in q_set_targets()
421 if (q->nr_in_top_levels < q->nr_elts) in q_set_targets()
422 q_set_targets_subrange_(q, q->nr_elts - q->nr_in_top_levels, in q_set_targets()
423 0, q->nr_levels - q->nr_top_levels); in q_set_targets()
425 q_set_targets_subrange_(q, 0, 0, q->nr_levels - q->nr_top_levels); in q_set_targets()
433 struct entry *e; in q_redistribute() local
437 for (level = 0u; level < q->nr_levels - 1u; level++) { in q_redistribute()
438 l = q->qs + level; in q_redistribute()
439 target = q->target_count[level]; in q_redistribute()
444 while (l->nr_elts < target) { in q_redistribute()
445 e = __redist_pop_from(q, level + 1u); in q_redistribute()
446 if (!e) { in q_redistribute()
451 e->level = level; in q_redistribute()
452 l_add_tail(q->es, l, e); in q_redistribute()
458 l_above = q->qs + level + 1u; in q_redistribute()
459 while (l->nr_elts > target) { in q_redistribute()
460 e = l_pop_tail(q->es, l); in q_redistribute()
462 if (!e) in q_redistribute()
466 e->level = level + 1u; in q_redistribute()
467 l_add_tail(q->es, l_above, e); in q_redistribute()
472 static void q_requeue(struct queue *q, struct entry *e, unsigned int extra_levels, in q_requeue() argument
477 unsigned int new_level = min(q->nr_levels - 1u, e->level + extra_levels); in q_requeue()
480 if (extra_levels && (e->level < q->nr_levels - 1u)) { in q_requeue()
481 for (de = l_head(q->es, q->qs + new_level); de && de->sentinel; de = l_next(q->es, de)) in q_requeue()
486 de->level = e->level; in q_requeue()
505 q_del(q, e); in q_requeue()
506 e->level = new_level; in q_requeue()
507 q_push(q, e); in q_requeue()
510 /*----------------------------------------------------------------*/
513 #define SIXTEENTH (1u << (FP_SHIFT - 4u))
514 #define EIGHTH (1u << (FP_SHIFT - 3u))
530 s->hit_threshold = (nr_levels * 3u) / 4u; in stats_init()
531 s->hits = 0u; in stats_init()
532 s->misses = 0u; in stats_init()
537 s->hits = s->misses = 0u; in stats_reset()
542 if (level >= s->hit_threshold) in stats_level_accessed()
543 s->hits++; in stats_level_accessed()
545 s->misses++; in stats_level_accessed()
550 s->misses++; in stats_miss()
561 unsigned int confidence = safe_div(s->hits << FP_SHIFT, s->hits + s->misses); in stats_assess()
573 /*----------------------------------------------------------------*/
589 ht->es = es; in h_init()
591 ht->hash_bits = __ffs(nr_buckets); in h_init()
593 ht->buckets = vmalloc(array_size(nr_buckets, sizeof(*ht->buckets))); in h_init()
594 if (!ht->buckets) in h_init()
595 return -ENOMEM; in h_init()
598 ht->buckets[i] = INDEXER_NULL; in h_init()
605 vfree(ht->buckets); in h_exit()
610 return to_entry(ht->es, ht->buckets[bucket]); in h_head()
613 static struct entry *h_next(struct smq_hash_table *ht, struct entry *e) in h_next() argument
615 return to_entry(ht->es, e->hash_next); in h_next()
618 static void __h_insert(struct smq_hash_table *ht, unsigned int bucket, struct entry *e) in __h_insert() argument
620 e->hash_next = ht->buckets[bucket]; in __h_insert()
621 ht->buckets[bucket] = to_index(ht->es, e); in __h_insert()
624 static void h_insert(struct smq_hash_table *ht, struct entry *e) in h_insert() argument
626 unsigned int h = hash_64(from_oblock(e->oblock), ht->hash_bits); in h_insert()
628 __h_insert(ht, h, e); in h_insert()
634 struct entry *e; in __h_lookup() local
637 for (e = h_head(ht, h); e; e = h_next(ht, e)) { in __h_lookup()
638 if (e->oblock == oblock) in __h_lookup()
639 return e; in __h_lookup()
641 *prev = e; in __h_lookup()
648 struct entry *e, struct entry *prev) in __h_unlink() argument
651 prev->hash_next = e->hash_next; in __h_unlink()
653 ht->buckets[h] = e->hash_next; in __h_unlink()
661 struct entry *e, *prev; in h_lookup() local
662 unsigned int h = hash_64(from_oblock(oblock), ht->hash_bits); in h_lookup()
664 e = __h_lookup(ht, h, oblock, &prev); in h_lookup()
665 if (e && prev) { in h_lookup()
670 __h_unlink(ht, h, e, prev); in h_lookup()
671 __h_insert(ht, h, e); in h_lookup()
674 return e; in h_lookup()
677 static void h_remove(struct smq_hash_table *ht, struct entry *e) in h_remove() argument
679 unsigned int h = hash_64(from_oblock(e->oblock), ht->hash_bits); in h_remove()
686 e = __h_lookup(ht, h, e->oblock, &prev); in h_remove()
687 if (e) in h_remove()
688 __h_unlink(ht, h, e, prev); in h_remove()
691 /*----------------------------------------------------------------*/
706 ea->es = es; in init_allocator()
707 ea->nr_allocated = 0u; in init_allocator()
708 ea->begin = begin; in init_allocator()
710 l_init(&ea->free); in init_allocator()
712 l_add_tail(ea->es, &ea->free, __get_entry(ea->es, i)); in init_allocator()
715 static void init_entry(struct entry *e) in init_entry() argument
721 e->hash_next = INDEXER_NULL; in init_entry()
722 e->next = INDEXER_NULL; in init_entry()
723 e->prev = INDEXER_NULL; in init_entry()
724 e->level = 0u; in init_entry()
725 e->dirty = true; /* FIXME: audit */ in init_entry()
726 e->allocated = true; in init_entry()
727 e->sentinel = false; in init_entry()
728 e->pending_work = false; in init_entry()
733 struct entry *e; in alloc_entry() local
735 if (l_empty(&ea->free)) in alloc_entry()
738 e = l_pop_head(ea->es, &ea->free); in alloc_entry()
739 init_entry(e); in alloc_entry()
740 ea->nr_allocated++; in alloc_entry()
742 return e; in alloc_entry()
750 struct entry *e = __get_entry(ea->es, ea->begin + i); in alloc_particular_entry() local
752 BUG_ON(e->allocated); in alloc_particular_entry()
754 l_del(ea->es, &ea->free, e); in alloc_particular_entry()
755 init_entry(e); in alloc_particular_entry()
756 ea->nr_allocated++; in alloc_particular_entry()
758 return e; in alloc_particular_entry()
761 static void free_entry(struct entry_alloc *ea, struct entry *e) in free_entry() argument
763 BUG_ON(!ea->nr_allocated); in free_entry()
764 BUG_ON(!e->allocated); in free_entry()
766 ea->nr_allocated--; in free_entry()
767 e->allocated = false; in free_entry()
768 l_add_tail(ea->es, &ea->free, e); in free_entry()
773 return l_empty(&ea->free); in allocator_empty()
776 static unsigned int get_index(struct entry_alloc *ea, struct entry *e) in get_index() argument
778 return to_index(ea->es, e) - ea->begin; in get_index()
783 return __get_entry(ea->es, ea->begin + index); in get_entry()
786 /*----------------------------------------------------------------*/
869 /*----------------------------------------------------------------*/
878 return get_sentinel(&mq->writeback_sentinel_alloc, level, mq->current_writeback_sentinels); in writeback_sentinel()
883 return get_sentinel(&mq->demote_sentinel_alloc, level, mq->current_demote_sentinels); in demote_sentinel()
889 struct queue *q = &mq->dirty; in __update_writeback_sentinels()
892 for (level = 0; level < q->nr_levels; level++) { in __update_writeback_sentinels()
902 struct queue *q = &mq->clean; in __update_demote_sentinels()
905 for (level = 0; level < q->nr_levels; level++) { in __update_demote_sentinels()
914 if (time_after(jiffies, mq->next_writeback_period)) { in update_sentinels()
915 mq->next_writeback_period = jiffies + WRITEBACK_PERIOD; in update_sentinels()
916 mq->current_writeback_sentinels = !mq->current_writeback_sentinels; in update_sentinels()
920 if (time_after(jiffies, mq->next_demote_period)) { in update_sentinels()
921 mq->next_demote_period = jiffies + DEMOTE_PERIOD; in update_sentinels()
922 mq->current_demote_sentinels = !mq->current_demote_sentinels; in update_sentinels()
934 sentinel->level = level; in __sentinels_init()
935 q_push(&mq->dirty, sentinel); in __sentinels_init()
938 sentinel->level = level; in __sentinels_init()
939 q_push(&mq->clean, sentinel); in __sentinels_init()
945 mq->next_writeback_period = jiffies + WRITEBACK_PERIOD; in sentinels_init()
946 mq->next_demote_period = jiffies + DEMOTE_PERIOD; in sentinels_init()
948 mq->current_writeback_sentinels = false; in sentinels_init()
949 mq->current_demote_sentinels = false; in sentinels_init()
952 mq->current_writeback_sentinels = !mq->current_writeback_sentinels; in sentinels_init()
953 mq->current_demote_sentinels = !mq->current_demote_sentinels; in sentinels_init()
957 /*----------------------------------------------------------------*/
959 static void del_queue(struct smq_policy *mq, struct entry *e) in del_queue() argument
961 q_del(e->dirty ? &mq->dirty : &mq->clean, e); in del_queue()
964 static void push_queue(struct smq_policy *mq, struct entry *e) in push_queue() argument
966 if (e->dirty) in push_queue()
967 q_push(&mq->dirty, e); in push_queue()
969 q_push(&mq->clean, e); in push_queue()
972 // !h, !q, a -> h, q, a
973 static void push(struct smq_policy *mq, struct entry *e) in push() argument
975 h_insert(&mq->table, e); in push()
976 if (!e->pending_work) in push()
977 push_queue(mq, e); in push()
980 static void push_queue_front(struct smq_policy *mq, struct entry *e) in push_queue_front() argument
982 if (e->dirty) in push_queue_front()
983 q_push_front(&mq->dirty, e); in push_queue_front()
985 q_push_front(&mq->clean, e); in push_queue_front()
988 static void push_front(struct smq_policy *mq, struct entry *e) in push_front() argument
990 h_insert(&mq->table, e); in push_front()
991 if (!e->pending_work) in push_front()
992 push_queue_front(mq, e); in push_front()
995 static dm_cblock_t infer_cblock(struct smq_policy *mq, struct entry *e) in infer_cblock() argument
997 return to_cblock(get_index(&mq->cache_alloc, e)); in infer_cblock()
1000 static void requeue(struct smq_policy *mq, struct entry *e) in requeue() argument
1005 if (e->pending_work) in requeue()
1008 if (!test_and_set_bit(from_cblock(infer_cblock(mq, e)), mq->cache_hit_bits)) { in requeue()
1009 if (!e->dirty) { in requeue()
1010 q_requeue(&mq->clean, e, 1u, NULL, NULL); in requeue()
1014 q_requeue(&mq->dirty, e, 1u, in requeue()
1015 get_sentinel(&mq->writeback_sentinel_alloc, e->level, !mq->current_writeback_sentinels), in requeue()
1016 get_sentinel(&mq->writeback_sentinel_alloc, e->level, mq->current_writeback_sentinels)); in requeue()
1042 unsigned int hits = mq->cache_stats.hits; in default_promote_level()
1043 unsigned int misses = mq->cache_stats.misses; in default_promote_level()
1054 unsigned int threshold_level = allocator_empty(&mq->cache_alloc) ? in update_promote_levels()
1064 switch (stats_assess(&mq->hotspot_stats)) { in update_promote_levels()
1077 mq->read_promote_level = NR_HOTSPOT_LEVELS - threshold_level; in update_promote_levels()
1078 mq->write_promote_level = (NR_HOTSPOT_LEVELS - threshold_level); in update_promote_levels()
1087 switch (stats_assess(&mq->hotspot_stats)) { in update_level_jump()
1089 mq->hotspot_level_jump = 4u; in update_level_jump()
1093 mq->hotspot_level_jump = 2u; in update_level_jump()
1097 mq->hotspot_level_jump = 1u; in update_level_jump()
1104 clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks); in end_hotspot_period()
1107 if (time_after(jiffies, mq->next_hotspot_period)) { in end_hotspot_period()
1109 q_redistribute(&mq->hotspot); in end_hotspot_period()
1110 stats_reset(&mq->hotspot_stats); in end_hotspot_period()
1111 mq->next_hotspot_period = jiffies + HOTSPOT_UPDATE_PERIOD; in end_hotspot_period()
1117 if (time_after(jiffies, mq->next_cache_period)) { in end_cache_period()
1118 clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size)); in end_cache_period()
1120 q_redistribute(&mq->dirty); in end_cache_period()
1121 q_redistribute(&mq->clean); in end_cache_period()
1122 stats_reset(&mq->cache_stats); in end_cache_period()
1124 mq->next_cache_period = jiffies + CACHE_UPDATE_PERIOD; in end_cache_period()
1128 /*----------------------------------------------------------------*/
1138 return from_cblock(mq->cache_size) * p / 100u; in percent_to_target()
1147 if (idle || mq->cleaner) { in clean_target_met()
1151 return q_size(&mq->dirty) == 0u; in clean_target_met()
1164 nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated; in free_target_met()
1165 return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >= in free_target_met()
1169 /*----------------------------------------------------------------*/
1171 static void mark_pending(struct smq_policy *mq, struct entry *e) in mark_pending() argument
1173 BUG_ON(e->sentinel); in mark_pending()
1174 BUG_ON(!e->allocated); in mark_pending()
1175 BUG_ON(e->pending_work); in mark_pending()
1176 e->pending_work = true; in mark_pending()
1179 static void clear_pending(struct smq_policy *mq, struct entry *e) in clear_pending() argument
1181 BUG_ON(!e->pending_work); in clear_pending()
1182 e->pending_work = false; in clear_pending()
1189 struct entry *e; in queue_writeback() local
1191 e = q_peek(&mq->dirty, mq->dirty.nr_levels, idle); in queue_writeback()
1192 if (e) { in queue_writeback()
1193 mark_pending(mq, e); in queue_writeback()
1194 q_del(&mq->dirty, e); in queue_writeback()
1197 work.oblock = e->oblock; in queue_writeback()
1198 work.cblock = infer_cblock(mq, e); in queue_writeback()
1200 r = btracker_queue(mq->bg_work, &work, NULL); in queue_writeback()
1202 clear_pending(mq, e); in queue_writeback()
1203 q_push_front(&mq->dirty, e); in queue_writeback()
1212 struct entry *e; in queue_demotion() local
1214 if (WARN_ON_ONCE(!mq->migrations_allowed)) in queue_demotion()
1217 e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true); in queue_demotion()
1218 if (!e) { in queue_demotion()
1224 mark_pending(mq, e); in queue_demotion()
1225 q_del(&mq->clean, e); in queue_demotion()
1228 work.oblock = e->oblock; in queue_demotion()
1229 work.cblock = infer_cblock(mq, e); in queue_demotion()
1230 r = btracker_queue(mq->bg_work, &work, NULL); in queue_demotion()
1232 clear_pending(mq, e); in queue_demotion()
1233 q_push_front(&mq->clean, e); in queue_demotion()
1241 struct entry *e; in queue_promotion() local
1244 if (!mq->migrations_allowed) in queue_promotion()
1247 if (allocator_empty(&mq->cache_alloc)) { in queue_promotion()
1257 if (btracker_promotion_already_present(mq->bg_work, oblock)) in queue_promotion()
1264 e = alloc_entry(&mq->cache_alloc); in queue_promotion()
1265 BUG_ON(!e); in queue_promotion()
1266 e->pending_work = true; in queue_promotion()
1269 work.cblock = infer_cblock(mq, e); in queue_promotion()
1270 r = btracker_queue(mq->bg_work, &work, workp); in queue_promotion()
1272 free_entry(&mq->cache_alloc, e); in queue_promotion()
1275 /*----------------------------------------------------------------*/
1295 if (!allocator_empty(&mq->cache_alloc) && fast_promote) in should_promote()
1298 return maybe_promote(hs_e->level >= mq->write_promote_level); in should_promote()
1300 return maybe_promote(hs_e->level >= mq->read_promote_level); in should_promote()
1306 (void) sector_div(r, mq->cache_blocks_per_hotspot_block); in to_hblock()
1314 struct entry *e = h_lookup(&mq->hotspot_table, hb); in update_hotspot_queue() local
1316 if (e) { in update_hotspot_queue()
1317 stats_level_accessed(&mq->hotspot_stats, e->level); in update_hotspot_queue()
1319 hi = get_index(&mq->hotspot_alloc, e); in update_hotspot_queue()
1320 q_requeue(&mq->hotspot, e, in update_hotspot_queue()
1321 test_and_set_bit(hi, mq->hotspot_hit_bits) ? in update_hotspot_queue()
1322 0u : mq->hotspot_level_jump, in update_hotspot_queue()
1326 stats_miss(&mq->hotspot_stats); in update_hotspot_queue()
1328 e = alloc_entry(&mq->hotspot_alloc); in update_hotspot_queue()
1329 if (!e) { in update_hotspot_queue()
1330 e = q_pop(&mq->hotspot); in update_hotspot_queue()
1331 if (e) { in update_hotspot_queue()
1332 h_remove(&mq->hotspot_table, e); in update_hotspot_queue()
1333 hi = get_index(&mq->hotspot_alloc, e); in update_hotspot_queue()
1334 clear_bit(hi, mq->hotspot_hit_bits); in update_hotspot_queue()
1339 if (e) { in update_hotspot_queue()
1340 e->oblock = hb; in update_hotspot_queue()
1341 q_push(&mq->hotspot, e); in update_hotspot_queue()
1342 h_insert(&mq->hotspot_table, e); in update_hotspot_queue()
1346 return e; in update_hotspot_queue()
1349 /*----------------------------------------------------------------*/
1352 * Public interface, via the policy struct. See dm-cache-policy.h for a
1365 btracker_destroy(mq->bg_work); in smq_destroy()
1366 h_exit(&mq->hotspot_table); in smq_destroy()
1367 h_exit(&mq->table); in smq_destroy()
1368 free_bitset(mq->hotspot_hit_bits); in smq_destroy()
1369 free_bitset(mq->cache_hit_bits); in smq_destroy()
1370 space_exit(&mq->es); in smq_destroy()
1374 /*----------------------------------------------------------------*/
1380 struct entry *e, *hs_e; in __lookup() local
1385 e = h_lookup(&mq->table, oblock); in __lookup()
1386 if (e) { in __lookup()
1387 stats_level_accessed(&mq->cache_stats, e->level); in __lookup()
1389 requeue(mq, e); in __lookup()
1390 *cblock = infer_cblock(mq, e); in __lookup()
1394 stats_miss(&mq->cache_stats); in __lookup()
1407 return -ENOENT; in __lookup()
1419 spin_lock_irqsave(&mq->lock, flags); in smq_lookup()
1423 spin_unlock_irqrestore(&mq->lock, flags); in smq_lookup()
1438 spin_lock_irqsave(&mq->lock, flags); in smq_lookup_with_work()
1440 spin_unlock_irqrestore(&mq->lock, flags); in smq_lookup_with_work()
1452 spin_lock_irqsave(&mq->lock, flags); in smq_get_background_work()
1453 r = btracker_issue(mq->bg_work, result); in smq_get_background_work()
1454 if (r == -ENODATA) { in smq_get_background_work()
1457 r = btracker_issue(mq->bg_work, result); in smq_get_background_work()
1460 spin_unlock_irqrestore(&mq->lock, flags); in smq_get_background_work()
1473 struct entry *e = get_entry(&mq->cache_alloc, in __complete_background_work() local
1474 from_cblock(work->cblock)); in __complete_background_work()
1476 switch (work->op) { in __complete_background_work()
1479 clear_pending(mq, e); in __complete_background_work()
1481 e->oblock = work->oblock; in __complete_background_work()
1482 e->level = NR_CACHE_LEVELS - 1; in __complete_background_work()
1483 push(mq, e); in __complete_background_work()
1486 free_entry(&mq->cache_alloc, e); in __complete_background_work()
1494 h_remove(&mq->table, e); in __complete_background_work()
1495 free_entry(&mq->cache_alloc, e); in __complete_background_work()
1498 clear_pending(mq, e); in __complete_background_work()
1499 push_queue(mq, e); in __complete_background_work()
1506 clear_pending(mq, e); in __complete_background_work()
1507 push_queue(mq, e); in __complete_background_work()
1512 btracker_complete(mq->bg_work, work); in __complete_background_work()
1522 spin_lock_irqsave(&mq->lock, flags); in smq_complete_background_work()
1524 spin_unlock_irqrestore(&mq->lock, flags); in smq_complete_background_work()
1527 // in_hash(oblock) -> in_hash(oblock)
1530 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in __smq_set_clear_dirty() local
1532 if (e->pending_work) in __smq_set_clear_dirty()
1533 e->dirty = set; in __smq_set_clear_dirty()
1535 del_queue(mq, e); in __smq_set_clear_dirty()
1536 e->dirty = set; in __smq_set_clear_dirty()
1537 push_queue(mq, e); in __smq_set_clear_dirty()
1546 spin_lock_irqsave(&mq->lock, flags); in smq_set_dirty()
1548 spin_unlock_irqrestore(&mq->lock, flags); in smq_set_dirty()
1556 spin_lock_irqsave(&mq->lock, flags); in smq_clear_dirty()
1558 spin_unlock_irqrestore(&mq->lock, flags); in smq_clear_dirty()
1563 return hash_32(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1); in random_level()
1571 struct entry *e; in smq_load_mapping() local
1573 e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_load_mapping()
1574 e->oblock = oblock; in smq_load_mapping()
1575 e->dirty = dirty; in smq_load_mapping()
1576 e->level = hint_valid ? min(hint, NR_CACHE_LEVELS - 1) : random_level(cblock); in smq_load_mapping()
1577 e->pending_work = false; in smq_load_mapping()
1583 push_front(mq, e); in smq_load_mapping()
1591 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_invalidate_mapping() local
1593 if (!e->allocated) in smq_invalidate_mapping()
1594 return -ENODATA; in smq_invalidate_mapping()
1597 del_queue(mq, e); in smq_invalidate_mapping()
1598 h_remove(&mq->table, e); in smq_invalidate_mapping()
1599 free_entry(&mq->cache_alloc, e); in smq_invalidate_mapping()
1606 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_get_hint() local
1608 if (!e->allocated) in smq_get_hint()
1611 return e->level; in smq_get_hint()
1620 spin_lock_irqsave(&mq->lock, flags); in smq_residency()
1621 r = to_cblock(mq->cache_alloc.nr_allocated); in smq_residency()
1622 spin_unlock_irqrestore(&mq->lock, flags); in smq_residency()
1632 spin_lock_irqsave(&mq->lock, flags); in smq_tick()
1633 mq->tick++; in smq_tick()
1637 spin_unlock_irqrestore(&mq->lock, flags); in smq_tick()
1644 mq->migrations_allowed = allow; in smq_allow_migrations()
1658 return -EINVAL; in mq_set_config_value()
1669 return -EINVAL; in mq_set_config_value()
1690 mq->policy.destroy = smq_destroy; in init_policy_functions()
1691 mq->policy.lookup = smq_lookup; in init_policy_functions()
1692 mq->policy.lookup_with_work = smq_lookup_with_work; in init_policy_functions()
1693 mq->policy.get_background_work = smq_get_background_work; in init_policy_functions()
1694 mq->policy.complete_background_work = smq_complete_background_work; in init_policy_functions()
1695 mq->policy.set_dirty = smq_set_dirty; in init_policy_functions()
1696 mq->policy.clear_dirty = smq_clear_dirty; in init_policy_functions()
1697 mq->policy.load_mapping = smq_load_mapping; in init_policy_functions()
1698 mq->policy.invalidate_mapping = smq_invalidate_mapping; in init_policy_functions()
1699 mq->policy.get_hint = smq_get_hint; in init_policy_functions()
1700 mq->policy.residency = smq_residency; in init_policy_functions()
1701 mq->policy.tick = smq_tick; in init_policy_functions()
1702 mq->policy.allow_migrations = smq_allow_migrations; in init_policy_functions()
1705 mq->policy.set_config_value = mq_set_config_value; in init_policy_functions()
1706 mq->policy.emit_config_values = mq_emit_config_values; in init_policy_functions()
1744 mq->cache_size = cache_size; in __smq_create()
1745 mq->cache_block_size = cache_block_size; in __smq_create()
1748 &mq->hotspot_block_size, &mq->nr_hotspot_blocks); in __smq_create()
1750 mq->cache_blocks_per_hotspot_block = div64_u64(mq->hotspot_block_size, mq->cache_block_size); in __smq_create()
1751 mq->hotspot_level_jump = 1u; in __smq_create()
1752 if (space_init(&mq->es, total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size))) { in __smq_create()
1757 init_allocator(&mq->writeback_sentinel_alloc, &mq->es, 0, nr_sentinels_per_queue); in __smq_create()
1759 get_entry(&mq->writeback_sentinel_alloc, i)->sentinel = true; in __smq_create()
1761 init_allocator(&mq->demote_sentinel_alloc, &mq->es, nr_sentinels_per_queue, total_sentinels); in __smq_create()
1763 get_entry(&mq->demote_sentinel_alloc, i)->sentinel = true; in __smq_create()
1765 init_allocator(&mq->hotspot_alloc, &mq->es, total_sentinels, in __smq_create()
1766 total_sentinels + mq->nr_hotspot_blocks); in __smq_create()
1768 init_allocator(&mq->cache_alloc, &mq->es, in __smq_create()
1769 total_sentinels + mq->nr_hotspot_blocks, in __smq_create()
1770 total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size)); in __smq_create()
1772 mq->hotspot_hit_bits = alloc_bitset(mq->nr_hotspot_blocks); in __smq_create()
1773 if (!mq->hotspot_hit_bits) { in __smq_create()
1777 clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks); in __smq_create()
1780 mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size)); in __smq_create()
1781 if (!mq->cache_hit_bits) { in __smq_create()
1785 clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size)); in __smq_create()
1787 mq->cache_hit_bits = NULL; in __smq_create()
1789 mq->tick = 0; in __smq_create()
1790 spin_lock_init(&mq->lock); in __smq_create()
1792 q_init(&mq->hotspot, &mq->es, NR_HOTSPOT_LEVELS); in __smq_create()
1793 mq->hotspot.nr_top_levels = 8; in __smq_create()
1794 mq->hotspot.nr_in_top_levels = min(mq->nr_hotspot_blocks / NR_HOTSPOT_LEVELS, in __smq_create()
1795 from_cblock(mq->cache_size) / mq->cache_blocks_per_hotspot_block); in __smq_create()
1797 q_init(&mq->clean, &mq->es, NR_CACHE_LEVELS); in __smq_create()
1798 q_init(&mq->dirty, &mq->es, NR_CACHE_LEVELS); in __smq_create()
1800 stats_init(&mq->hotspot_stats, NR_HOTSPOT_LEVELS); in __smq_create()
1801 stats_init(&mq->cache_stats, NR_CACHE_LEVELS); in __smq_create()
1803 if (h_init(&mq->table, &mq->es, from_cblock(cache_size))) in __smq_create()
1806 if (h_init(&mq->hotspot_table, &mq->es, mq->nr_hotspot_blocks)) in __smq_create()
1810 mq->write_promote_level = mq->read_promote_level = NR_HOTSPOT_LEVELS; in __smq_create()
1812 mq->next_hotspot_period = jiffies; in __smq_create()
1813 mq->next_cache_period = jiffies; in __smq_create()
1815 mq->bg_work = btracker_create(4096); /* FIXME: hard coded value */ in __smq_create()
1816 if (!mq->bg_work) in __smq_create()
1819 mq->migrations_allowed = migrations_allowed; in __smq_create()
1820 mq->cleaner = cleaner; in __smq_create()
1822 return &mq->policy; in __smq_create()
1825 h_exit(&mq->hotspot_table); in __smq_create()
1827 h_exit(&mq->table); in __smq_create()
1829 free_bitset(mq->cache_hit_bits); in __smq_create()
1831 free_bitset(mq->hotspot_hit_bits); in __smq_create()
1833 space_exit(&mq->es); in __smq_create()
1864 /*----------------------------------------------------------------*/
1906 return -ENOMEM; in smq_init()
1936 return -ENOMEM; in smq_init()
1950 MODULE_AUTHOR("Joe Thornber <dm-[email protected]>");
1954 MODULE_ALIAS("dm-cache-default");
1955 MODULE_ALIAS("dm-cache-mq");
1956 MODULE_ALIAS("dm-cache-cleaner");