1 // Copyright 2019 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/task/sequence_manager/atomic_flag_set.h"
6
7 #include <bit>
8 #include <utility>
9
10 #include "base/check_op.h"
11 #include "base/functional/callback.h"
12
13 namespace base {
14 namespace sequence_manager {
15 namespace internal {
16
AtomicFlagSet(scoped_refptr<const AssociatedThreadId> associated_thread)17 AtomicFlagSet::AtomicFlagSet(
18 scoped_refptr<const AssociatedThreadId> associated_thread)
19 : associated_thread_(std::move(associated_thread)) {}
20
~AtomicFlagSet()21 AtomicFlagSet::~AtomicFlagSet() {
22 DCHECK(!alloc_list_head_);
23 DCHECK(!partially_free_list_head_);
24 }
25
26 AtomicFlagSet::AtomicFlag::AtomicFlag() = default;
27
~AtomicFlag()28 AtomicFlagSet::AtomicFlag::~AtomicFlag() {
29 ReleaseAtomicFlag();
30 }
31
AtomicFlag(AtomicFlagSet * outer,Group * element,size_t flag_bit)32 AtomicFlagSet::AtomicFlag::AtomicFlag(AtomicFlagSet* outer,
33 Group* element,
34 size_t flag_bit)
35 : outer_(outer), group_(element), flag_bit_(flag_bit) {}
36
AtomicFlag(AtomicFlag && other)37 AtomicFlagSet::AtomicFlag::AtomicFlag(AtomicFlag&& other)
38 : outer_(other.outer_), group_(other.group_), flag_bit_(other.flag_bit_) {
39 other.outer_ = nullptr;
40 other.group_ = nullptr;
41 }
42
SetActive(bool active)43 void AtomicFlagSet::AtomicFlag::SetActive(bool active) {
44 DCHECK(group_);
45 if (active) {
46 // Release semantics are required to ensure that all memory accesses made on
47 // this thread happen-before any others done on the thread running the
48 // active callbacks.
49 group_->flags.fetch_or(flag_bit_, std::memory_order_release);
50 } else {
51 // No operation is being performed based on the bit *not* being set (i.e.
52 // state of other memory is irrelevant); hence no memory order is required
53 // when unsetting it.
54 group_->flags.fetch_and(~flag_bit_, std::memory_order_relaxed);
55 }
56 }
57
ReleaseAtomicFlag()58 void AtomicFlagSet::AtomicFlag::ReleaseAtomicFlag() {
59 if (!group_)
60 return;
61
62 DCHECK_CALLED_ON_VALID_THREAD(outer_->associated_thread_->thread_checker);
63 SetActive(false);
64
65 // If |group_| was full then add it on the partially free list.
66 if (group_->IsFull())
67 outer_->AddToPartiallyFreeList(group_);
68
69 int index = Group::IndexOfFirstFlagSet(flag_bit_);
70 DCHECK(!group_->flag_callbacks[index].is_null());
71 group_->flag_callbacks[index] = RepeatingClosure();
72 group_->allocated_flags &= ~flag_bit_;
73
74 // If |group_| has become empty delete it.
75 if (group_->IsEmpty()) {
76 auto ptr = group_.ExtractAsDangling();
77 outer_->RemoveFromPartiallyFreeList(ptr);
78 outer_->RemoveFromAllocList(ptr);
79 }
80
81 outer_ = nullptr;
82 group_ = nullptr;
83 }
84
AddFlag(RepeatingClosure callback)85 AtomicFlagSet::AtomicFlag AtomicFlagSet::AddFlag(RepeatingClosure callback) {
86 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
87 // Allocate a new Group if needed.
88 if (!partially_free_list_head_) {
89 AddToAllocList(std::make_unique<Group>());
90 AddToPartiallyFreeList(alloc_list_head_.get());
91 }
92
93 DCHECK(partially_free_list_head_);
94 Group* group = partially_free_list_head_;
95 size_t first_unoccupied_index =
96 static_cast<size_t>(group->FindFirstUnallocatedFlag());
97 DCHECK(!group->flag_callbacks[first_unoccupied_index]);
98 group->flag_callbacks[first_unoccupied_index] = std::move(callback);
99
100 size_t flag_bit = size_t{1} << first_unoccupied_index;
101 group->allocated_flags |= flag_bit;
102
103 if (group->IsFull())
104 RemoveFromPartiallyFreeList(group);
105
106 return AtomicFlag(this, group, flag_bit);
107 }
108
RunActiveCallbacks() const109 void AtomicFlagSet::RunActiveCallbacks() const {
110 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
111 for (Group* iter = alloc_list_head_.get(); iter; iter = iter->next.get()) {
112 // Acquire semantics are required to guarantee that all memory side-effects
113 // made by other threads that were allowed to perform operations are
114 // synchronized with this thread before it returns from this method.
115 size_t active_flags = std::atomic_exchange_explicit(
116 &iter->flags, size_t{0}, std::memory_order_acquire);
117 // This is O(number of bits set).
118 while (active_flags) {
119 int index = Group::IndexOfFirstFlagSet(active_flags);
120 // Clear the flag.
121 active_flags ^= size_t{1} << index;
122 iter->flag_callbacks[index].Run();
123 }
124 }
125 }
126
127 AtomicFlagSet::Group::Group() = default;
128
~Group()129 AtomicFlagSet::Group::~Group() {
130 DCHECK_EQ(allocated_flags, 0u);
131 DCHECK(!partially_free_list_prev);
132 DCHECK(!partially_free_list_next);
133 }
134
IsFull() const135 bool AtomicFlagSet::Group::IsFull() const {
136 return (~allocated_flags) == 0u;
137 }
138
IsEmpty() const139 bool AtomicFlagSet::Group::IsEmpty() const {
140 return allocated_flags == 0u;
141 }
142
FindFirstUnallocatedFlag() const143 int AtomicFlagSet::Group::FindFirstUnallocatedFlag() const {
144 size_t unallocated_flags = ~allocated_flags;
145 DCHECK_NE(unallocated_flags, 0u);
146 int index = IndexOfFirstFlagSet(unallocated_flags);
147 DCHECK_LT(index, kNumFlags);
148 return index;
149 }
150
151 // static
IndexOfFirstFlagSet(size_t flag)152 int AtomicFlagSet::Group::IndexOfFirstFlagSet(size_t flag) {
153 DCHECK_NE(flag, 0u);
154 return std::countr_zero(flag);
155 }
156
AddToAllocList(std::unique_ptr<Group> group)157 void AtomicFlagSet::AddToAllocList(std::unique_ptr<Group> group) {
158 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
159 if (alloc_list_head_)
160 alloc_list_head_->prev = group.get();
161
162 group->next = std::move(alloc_list_head_);
163 alloc_list_head_ = std::move(group);
164 }
165
RemoveFromAllocList(Group * group)166 void AtomicFlagSet::RemoveFromAllocList(Group* group) {
167 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
168 if (group->next)
169 group->next->prev = group->prev;
170
171 if (group->prev) {
172 group->prev->next = std::move(group->next);
173 } else {
174 alloc_list_head_ = std::move(group->next);
175 }
176 }
177
AddToPartiallyFreeList(Group * group)178 void AtomicFlagSet::AddToPartiallyFreeList(Group* group) {
179 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
180 DCHECK_NE(partially_free_list_head_, group);
181 DCHECK(!group->partially_free_list_prev);
182 DCHECK(!group->partially_free_list_next);
183 if (partially_free_list_head_)
184 partially_free_list_head_->partially_free_list_prev = group;
185
186 group->partially_free_list_next = partially_free_list_head_;
187 partially_free_list_head_ = group;
188 }
189
RemoveFromPartiallyFreeList(Group * group)190 void AtomicFlagSet::RemoveFromPartiallyFreeList(Group* group) {
191 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
192 DCHECK(partially_free_list_head_);
193 // Check |group| is in the list.
194 DCHECK(partially_free_list_head_ == group || group->partially_free_list_prev);
195 if (group->partially_free_list_next) {
196 group->partially_free_list_next->partially_free_list_prev =
197 group->partially_free_list_prev;
198 }
199
200 if (group->partially_free_list_prev) {
201 group->partially_free_list_prev->partially_free_list_next =
202 group->partially_free_list_next;
203 } else {
204 partially_free_list_head_ = group->partially_free_list_next;
205 }
206
207 group->partially_free_list_prev = nullptr;
208 group->partially_free_list_next = nullptr;
209 }
210
211 } // namespace internal
212 } // namespace sequence_manager
213 } // namespace base
214