xref: /aosp_15_r20/external/cronet/base/task/sequence_manager/atomic_flag_set.h (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1 // Copyright 2019 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_TASK_SEQUENCE_MANAGER_ATOMIC_FLAG_SET_H_
6 #define BASE_TASK_SEQUENCE_MANAGER_ATOMIC_FLAG_SET_H_
7 
8 #include <atomic>
9 #include <memory>
10 
11 #include "base/base_export.h"
12 #include "base/functional/callback.h"
13 #include "base/memory/raw_ptr.h"
14 #include "base/task/sequence_manager/associated_thread_id.h"
15 
16 namespace base {
17 namespace sequence_manager {
18 namespace internal {
19 
20 // This class maintains a set of AtomicFlags which can be activated or
21 // deactivated at any time by any thread. When a flag is created a callback is
22 // specified and the RunActiveCallbacks method can be invoked to fire callbacks
23 // for all active flags. Creating releasing or destroying an AtomicFlag must be
24 // done on the associated thread, as must calling RunActiveCallbacks. This
25 // class is thread-affine.
26 class BASE_EXPORT AtomicFlagSet {
27  protected:
28   struct Group;
29 
30  public:
31   explicit AtomicFlagSet(
32       scoped_refptr<const AssociatedThreadId> associated_thread);
33   AtomicFlagSet(const AtomicFlagSet&) = delete;
34   AtomicFlagSet& operator=(const AtomicFlagSet&) = delete;
35   // AtomicFlags need to be released (or deleted) before this can be deleted.
36   ~AtomicFlagSet();
37 
38   // This class is thread-affine in addition SetActive can be called
39   // concurrently from any thread.
40   class BASE_EXPORT AtomicFlag {
41    public:
42     AtomicFlag();
43 
44     // Automatically releases the AtomicFlag.
45     ~AtomicFlag();
46 
47     AtomicFlag(const AtomicFlag&) = delete;
48     AtomicFlag(AtomicFlag&& other);
49 
50     // Can be called on any thread. Marks whether the flag is active or not,
51     // which controls whether RunActiveCallbacks() will fire the associated
52     // callback or not. In the absence of external synchronization, the value
53     // set by this call might not immediately be visible to a thread calling
54     // RunActiveCallbacks(); the only guarantee is that a value set by this will
55     // eventually be visible to other threads due to cache coherency. Release /
56     // acquire semantics are used on the underlying atomic operations so if
57     // RunActiveCallbacks sees the value set by a call to SetActive(), it will
58     // also see the memory changes that happened prior to that SetActive() call.
59     void SetActive(bool active);
60 
61     // Releases the flag. Must be called on the associated thread. SetActive
62     // can't be called after this.
63     void ReleaseAtomicFlag();
64 
65    private:
66     friend AtomicFlagSet;
67 
68     AtomicFlag(AtomicFlagSet* outer, Group* element, size_t flag_bit);
69 
70     raw_ptr<AtomicFlagSet, DanglingUntriaged> outer_ = nullptr;
71     raw_ptr<Group> group_ = nullptr;  // Null when AtomicFlag is invalid.
72     size_t flag_bit_ = 0;  // This is 1 << index of this flag within the group.
73   };
74 
75   // Adds a new flag to the set. The |callback| will be fired by
76   // RunActiveCallbacks if the flag is active. Must be called on the associated
77   // thread.
78   AtomicFlag AddFlag(RepeatingClosure callback);
79 
80   // Runs the registered callback for all flags marked as active and atomically
81   // resets all flags to inactive. Must be called on the associated thread.
82   void RunActiveCallbacks() const;
83 
84  protected:
GetAllocListForTesting()85   Group* GetAllocListForTesting() const { return alloc_list_head_.get(); }
86 
GetPartiallyFreeListForTesting()87   Group* GetPartiallyFreeListForTesting() const {
88     return partially_free_list_head_;
89   }
90 
91   // Wraps a single std::atomic<size_t> which is shared by a number of
92   // AtomicFlag's with one bit per flag.
93   struct BASE_EXPORT Group {
94     Group();
95     Group(const Group&) = delete;
96     Group& operator=(const Group&) = delete;
97     ~Group();
98 
99     static constexpr int kNumFlags = sizeof(size_t) * 8;
100 
101     std::atomic<size_t> flags = {0};
102     size_t allocated_flags = 0;
103     RepeatingClosure flag_callbacks[kNumFlags];
104     raw_ptr<Group> prev = nullptr;
105     std::unique_ptr<Group> next;
106     raw_ptr<Group> partially_free_list_prev = nullptr;
107     raw_ptr<Group> partially_free_list_next = nullptr;
108 
109     bool IsFull() const;
110 
111     bool IsEmpty() const;
112 
113     // Returns the index of the first unallocated flag. Must not be called when
114     // all flags are set.
115     int FindFirstUnallocatedFlag() const;
116 
117     // Computes the index of the |flag_callbacks| based on the number of leading
118     // zero bits in |flag|.
119     static int IndexOfFirstFlagSet(size_t flag);
120   };
121 
122  private:
123   void AddToAllocList(std::unique_ptr<Group> element);
124 
125   // This deletes |element|.
126   void RemoveFromAllocList(Group* element);
127 
128   void AddToPartiallyFreeList(Group* element);
129 
130   // This does not delete |element|.
131   void RemoveFromPartiallyFreeList(Group* element);
132 
133   const scoped_refptr<const AssociatedThreadId> associated_thread_;
134   std::unique_ptr<Group> alloc_list_head_;
135   raw_ptr<Group> partially_free_list_head_ = nullptr;
136 };
137 
138 }  // namespace internal
139 }  // namespace sequence_manager
140 }  // namespace base
141 
142 #endif  // BASE_TASK_SEQUENCE_MANAGER_ATOMIC_FLAG_SET_H_
143