xref: /aosp_15_r20/external/grpc-grpc/include/grpc/support/sync.h (revision cc02d7e222339f7a4f6ba5f422e6413f4bd931f2)
1 /*
2  *
3  * Copyright 2015 gRPC authors.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18 
19 #ifndef GRPC_SUPPORT_SYNC_H
20 #define GRPC_SUPPORT_SYNC_H
21 
22 /* Platform-specific type declarations of gpr_mu and gpr_cv.   */
23 #include <grpc/support/port_platform.h>
24 #include <grpc/support/time.h> /* for gpr_timespec */
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
30 /** Synchronization primitives for GPR.
31 
32    The type  gpr_mu              provides a non-reentrant mutex (lock).
33 
34    The type  gpr_cv              provides a condition variable.
35 
36    The type  gpr_once            provides for one-time initialization.
37 
38    The type gpr_event            provides one-time-setting, reading, and
39                                  waiting of a void*, with memory barriers.
40 
41    The type gpr_refcount         provides an object reference counter,
42                                  with memory barriers suitable to control
43                                  object lifetimes.
44 
45    The type gpr_stats_counter    provides an atomic statistics counter. It
46                                  provides no memory barriers.
47  */
48 
49 #include <grpc/support/sync_generic.h>  // IWYU pragma: export
50 
51 #if defined(GPR_CUSTOM_SYNC)
52 #include <grpc/support/sync_custom.h>  // IWYU pragma: export
53 #elif defined(GPR_ABSEIL_SYNC)
54 #include <grpc/support/sync_abseil.h>  // IWYU pragma: export
55 #elif defined(GPR_POSIX_SYNC)
56 #include <grpc/support/sync_posix.h>  // IWYU pragma: export
57 #elif defined(GPR_WINDOWS)
58 #include <grpc/support/sync_windows.h>  // IWYU pragma: export
59 #else
60 #error Unable to determine platform for sync
61 #endif
62 
63 /** --- Mutex interface ---
64 
65    At most one thread may hold an exclusive lock on a mutex at any given time.
66    Actions taken by a thread that holds a mutex exclusively happen after
67    actions taken by all previous holders of the mutex.  Variables of type
68    gpr_mu are uninitialized when first declared.  */
69 
70 /** Initialize *mu.  Requires:  *mu uninitialized.  */
71 GPRAPI void gpr_mu_init(gpr_mu* mu);
72 
73 /** Cause *mu no longer to be initialized, freeing any memory in use.  Requires:
74  *mu initialized; no other concurrent operation on *mu.  */
75 GPRAPI void gpr_mu_destroy(gpr_mu* mu);
76 
77 /** Wait until no thread has a lock on *mu, cause the calling thread to own an
78    exclusive lock on *mu, then return.  May block indefinitely or crash if the
79    calling thread has a lock on *mu.  Requires:  *mu initialized.  */
80 GPRAPI void gpr_mu_lock(gpr_mu* mu);
81 
82 /** Release an exclusive lock on *mu held by the calling thread.  Requires:  *mu
83    initialized; the calling thread holds an exclusive lock on *mu.  */
84 GPRAPI void gpr_mu_unlock(gpr_mu* mu);
85 
86 /** Without blocking, attempt to acquire an exclusive lock on *mu for the
87    calling thread, then return non-zero iff success.  Fail, if any thread holds
88    the lock; succeeds with high probability if no thread holds the lock.
89    Requires:  *mu initialized.  */
90 GPRAPI int gpr_mu_trylock(gpr_mu* mu);
91 
92 /** --- Condition variable interface ---
93 
94    A while-loop should be used with gpr_cv_wait() when waiting for conditions
95    to become true.  See the example below.  Variables of type gpr_cv are
96    uninitialized when first declared.  */
97 
98 /** Initialize *cv.  Requires:  *cv uninitialized.  */
99 GPRAPI void gpr_cv_init(gpr_cv* cv);
100 
101 /** Cause *cv no longer to be initialized, freeing any memory in use.  Requires:
102  *cv initialized; no other concurrent operation on *cv.*/
103 GPRAPI void gpr_cv_destroy(gpr_cv* cv);
104 
105 /** Atomically release *mu and wait on *cv.  When the calling thread is woken
106    from *cv or the deadline abs_deadline is exceeded, execute gpr_mu_lock(mu)
107    and return whether the deadline was exceeded.  Use
108    abs_deadline==gpr_inf_future for no deadline.  abs_deadline can be either
109    an absolute deadline, or a GPR_TIMESPAN.  May return even when not
110    woken explicitly.  Requires:  *mu and *cv initialized; the calling thread
111    holds an exclusive lock on *mu.  */
112 GPRAPI int gpr_cv_wait(gpr_cv* cv, gpr_mu* mu, gpr_timespec abs_deadline);
113 
114 /** If any threads are waiting on *cv, wake at least one.
115    Clients may treat this as an optimization of gpr_cv_broadcast()
116    for use in the case where waking more than one waiter is not useful.
117    Requires:  *cv initialized.  */
118 GPRAPI void gpr_cv_signal(gpr_cv* cv);
119 
120 /** Wake all threads waiting on *cv.  Requires:  *cv initialized.  */
121 GPRAPI void gpr_cv_broadcast(gpr_cv* cv);
122 
123 /** --- One-time initialization ---
124 
125    gpr_once must be declared with static storage class, and initialized with
126    GPR_ONCE_INIT.  e.g.,
127      static gpr_once once_var = GPR_ONCE_INIT;     */
128 
129 /** Ensure that (*init_function)() has been called exactly once (for the
130    specified gpr_once instance) and then return.
131    If multiple threads call gpr_once() on the same gpr_once instance, one of
132    them will call (*init_function)(), and the others will block until that call
133    finishes.*/
134 GPRAPI void gpr_once_init(gpr_once* once, void (*init_function)(void));
135 
136 /** --- One-time event notification ---
137 
138   These operations act on a gpr_event, which should be initialized with
139   gpr_ev_init(), or with GPR_EVENT_INIT if static, e.g.,
140        static gpr_event event_var = GPR_EVENT_INIT;
141   It requires no destruction.  */
142 
143 /** Initialize *ev. */
144 GPRAPI void gpr_event_init(gpr_event* ev);
145 
146 /** Set *ev so that gpr_event_get() and gpr_event_wait() will return value.
147    Requires:  *ev initialized; value != NULL; no prior or concurrent calls to
148    gpr_event_set(ev, ...) since initialization.  */
149 GPRAPI void gpr_event_set(gpr_event* ev, void* value);
150 
151 /** Return the value set by gpr_event_set(ev, ...), or NULL if no such call has
152    completed.  If the result is non-NULL, all operations that occurred prior to
153    the gpr_event_set(ev, ...) set will be visible after this call returns.
154    Requires:  *ev initialized.  This operation is faster than acquiring a mutex
155    on most platforms.  */
156 GPRAPI void* gpr_event_get(gpr_event* ev);
157 
158 /** Wait until *ev is set by gpr_event_set(ev, ...), or abs_deadline is
159    exceeded, then return gpr_event_get(ev).  Requires:  *ev initialized.  Use
160    abs_deadline==gpr_inf_future for no deadline.  When the event has been
161    signalled before the call, this operation is faster than acquiring a mutex
162    on most platforms.  */
163 GPRAPI void* gpr_event_wait(gpr_event* ev, gpr_timespec abs_deadline);
164 
165 /** --- Reference counting ---
166 
167    These calls act on the type gpr_refcount.  It requires no destruction.  */
168 
169 /** Initialize *r to value n.  */
170 GPRAPI void gpr_ref_init(gpr_refcount* r, int n);
171 
172 /** Increment the reference count *r.  Requires *r initialized. */
173 GPRAPI void gpr_ref(gpr_refcount* r);
174 
175 /** Increment the reference count *r.  Requires *r initialized.
176    Crashes if refcount is zero */
177 GPRAPI void gpr_ref_non_zero(gpr_refcount* r);
178 
179 /** Increment the reference count *r by n.  Requires *r initialized, n > 0. */
180 GPRAPI void gpr_refn(gpr_refcount* r, int n);
181 
182 /** Decrement the reference count *r and return non-zero iff it has reached
183    zero. .  Requires *r initialized. */
184 GPRAPI int gpr_unref(gpr_refcount* r);
185 
186 /** Return non-zero iff the reference count of *r is one, and thus is owned
187    by exactly one object. */
188 GPRAPI int gpr_ref_is_unique(gpr_refcount* r);
189 
190 /** --- Stats counters ---
191 
192    These calls act on the integral type gpr_stats_counter.  It requires no
193    destruction.  Static instances may be initialized with
194        gpr_stats_counter c = GPR_STATS_INIT;
195    Beware:  These operations do not imply memory barriers.  Do not use them to
196    synchronize other events.  */
197 
198 /** Initialize *c to the value n. */
199 GPRAPI void gpr_stats_init(gpr_stats_counter* c, intptr_t n);
200 
201 /** *c += inc.  Requires: *c initialized. */
202 GPRAPI void gpr_stats_inc(gpr_stats_counter* c, intptr_t inc);
203 
204 /** Return *c.  Requires: *c initialized. */
205 GPRAPI intptr_t gpr_stats_read(const gpr_stats_counter* c);
206 
207 /** ==================Example use of interface===================
208    A producer-consumer queue of up to N integers,
209    illustrating the use of the calls in this interface. */
210 #if 0
211 
212 #define N 4
213 
214    typedef struct queue {
215      gpr_cv non_empty;  /* Signalled when length becomes non-zero. */
216      gpr_cv non_full;   /* Signalled when length becomes non-N. */
217      gpr_mu mu;         /* Protects all fields below.
218                             (That is, except during initialization or
219                             destruction, the fields below should be accessed
220                             only by a thread that holds mu.) */
221      int head;           /* Index of head of queue 0..N-1. */
222      int length;         /* Number of valid elements in queue 0..N. */
223      int elem[N];        /* elem[head .. head+length-1] are queue elements. */
224    } queue;
225 
226    /* Initialize *q. */
227    void queue_init(queue *q) {
228      gpr_mu_init(&q->mu);
229      gpr_cv_init(&q->non_empty);
230      gpr_cv_init(&q->non_full);
231      q->head = 0;
232      q->length = 0;
233    }
234 
235    /* Free storage associated with *q. */
236    void queue_destroy(queue *q) {
237      gpr_mu_destroy(&q->mu);
238      gpr_cv_destroy(&q->non_empty);
239      gpr_cv_destroy(&q->non_full);
240    }
241 
242    /* Wait until there is room in *q, then append x to *q. */
243    void queue_append(queue *q, int x) {
244      gpr_mu_lock(&q->mu);
245      /* To wait for a predicate without a deadline, loop on the negation of the
246         predicate, and use gpr_cv_wait(..., gpr_inf_future) inside the loop
247         to release the lock, wait, and reacquire on each iteration.  Code that
248         makes the condition true should use gpr_cv_broadcast() on the
249         corresponding condition variable.  The predicate must be on state
250         protected by the lock.  */
251      while (q->length == N) {
252        gpr_cv_wait(&q->non_full, &q->mu, gpr_inf_future);
253      }
254      if (q->length == 0) {  /* Wake threads blocked in queue_remove(). */
255        /* It's normal to use gpr_cv_broadcast() or gpr_signal() while
256           holding the lock. */
257        gpr_cv_broadcast(&q->non_empty);
258      }
259      q->elem[(q->head + q->length) % N] = x;
260      q->length++;
261      gpr_mu_unlock(&q->mu);
262    }
263 
264    /* If it can be done without blocking, append x to *q and return non-zero.
265       Otherwise return 0. */
266    int queue_try_append(queue *q, int x) {
267      int result = 0;
268      if (gpr_mu_trylock(&q->mu)) {
269        if (q->length != N) {
270          if (q->length == 0) {  /* Wake threads blocked in queue_remove(). */
271            gpr_cv_broadcast(&q->non_empty);
272          }
273          q->elem[(q->head + q->length) % N] = x;
274          q->length++;
275          result = 1;
276        }
277        gpr_mu_unlock(&q->mu);
278      }
279      return result;
280    }
281 
282    /* Wait until the *q is non-empty or deadline abs_deadline passes.  If the
283       queue is non-empty, remove its head entry, place it in *head, and return
284       non-zero.  Otherwise return 0.  */
285    int queue_remove(queue *q, int *head, gpr_timespec abs_deadline) {
286      int result = 0;
287      gpr_mu_lock(&q->mu);
288      /* To wait for a predicate with a deadline, loop on the negation of the
289         predicate or until gpr_cv_wait() returns true.  Code that makes
290         the condition true should use gpr_cv_broadcast() on the corresponding
291         condition variable.  The predicate must be on state protected by the
292         lock. */
293      while (q->length == 0 &&
294             !gpr_cv_wait(&q->non_empty, &q->mu, abs_deadline)) {
295      }
296      if (q->length != 0) {    /* Queue is non-empty. */
297        result = 1;
298        if (q->length == N) {  /* Wake threads blocked in queue_append(). */
299          gpr_cv_broadcast(&q->non_full);
300        }
301        *head = q->elem[q->head];
302        q->head = (q->head + 1) % N;
303        q->length--;
304      } /* else deadline exceeded */
305      gpr_mu_unlock(&q->mu);
306      return result;
307    }
308 #endif /* 0 */
309 
310 #ifdef __cplusplus
311 }  // extern "C"
312 #endif
313 
314 #endif /* GRPC_SUPPORT_SYNC_H */
315