1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_SEQLOCK_H
3 #define __LINUX_SEQLOCK_H
4 
5 /*
6  * seqcount_t / seqlock_t - a reader-writer consistency mechanism with
7  * lockless readers (read-only retry loops), and no writer starvation.
8  *
9  * See Documentation/locking/seqlock.rst
10  *
11  * Copyrights:
12  * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli
13  * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH
14  */
15 
16 #include <linux/compiler.h>
17 #include <linux/kcsan-checks.h>
18 #include <linux/lockdep.h>
19 #include <linux/mutex.h>
20 #include <linux/preempt.h>
21 #include <linux/seqlock_types.h>
22 #include <linux/spinlock.h>
23 
24 #include <asm/processor.h>
25 
26 /*
27  * The seqlock seqcount_t interface does not prescribe a precise sequence of
28  * read begin/retry/end. For readers, typically there is a call to
29  * read_seqcount_begin() and read_seqcount_retry(), however, there are more
30  * esoteric cases which do not follow this pattern.
31  *
32  * As a consequence, we take the following best-effort approach for raw usage
33  * via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
34  * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as
35  * atomics; if there is a matching read_seqcount_retry() call, no following
36  * memory operations are considered atomic. Usage of the seqlock_t interface
37  * is not affected.
38  */
39 #define KCSAN_SEQLOCK_REGION_MAX 1000
40 
__seqcount_init(seqcount_t * s,const char * name,struct lock_class_key * key)41 static inline void __seqcount_init(seqcount_t *s, const char *name,
42 					  struct lock_class_key *key)
43 {
44 	/*
45 	 * Make sure we are not reinitializing a held lock:
46 	 */
47 	lockdep_init_map(&s->dep_map, name, key, 0);
48 	s->sequence = 0;
49 }
50 
51 #ifdef CONFIG_DEBUG_LOCK_ALLOC
52 
53 # define SEQCOUNT_DEP_MAP_INIT(lockname)				\
54 		.dep_map = { .name = #lockname }
55 
56 /**
57  * seqcount_init() - runtime initializer for seqcount_t
58  * @s: Pointer to the seqcount_t instance
59  */
60 # define seqcount_init(s)						\
61 	do {								\
62 		static struct lock_class_key __key;			\
63 		__seqcount_init((s), #s, &__key);			\
64 	} while (0)
65 
seqcount_lockdep_reader_access(const seqcount_t * s)66 static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
67 {
68 	seqcount_t *l = (seqcount_t *)s;
69 	unsigned long flags;
70 
71 	local_irq_save(flags);
72 	seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
73 	seqcount_release(&l->dep_map, _RET_IP_);
74 	local_irq_restore(flags);
75 }
76 
77 #else
78 # define SEQCOUNT_DEP_MAP_INIT(lockname)
79 # define seqcount_init(s) __seqcount_init(s, NULL, NULL)
80 # define seqcount_lockdep_reader_access(x)
81 #endif
82 
83 /**
84  * SEQCNT_ZERO() - static initializer for seqcount_t
85  * @name: Name of the seqcount_t instance
86  */
87 #define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }
88 
89 /*
90  * Sequence counters with associated locks (seqcount_LOCKNAME_t)
91  *
92  * A sequence counter which associates the lock used for writer
93  * serialization at initialization time. This enables lockdep to validate
94  * that the write side critical section is properly serialized.
95  *
96  * For associated locks which do not implicitly disable preemption,
97  * preemption protection is enforced in the write side function.
98  *
99  * Lockdep is never used in any for the raw write variants.
100  *
101  * See Documentation/locking/seqlock.rst
102  */
103 
104 /*
105  * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated
106  * @seqcount:	The real sequence counter
107  * @lock:	Pointer to the associated lock
108  *
109  * A plain sequence counter with external writer synchronization by
110  * LOCKNAME @lock. The lock is associated to the sequence counter in the
111  * static initializer or init function. This enables lockdep to validate
112  * that the write side critical section is properly serialized.
113  *
114  * LOCKNAME:	raw_spinlock, spinlock, rwlock or mutex
115  */
116 
117 /*
118  * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
119  * @s:		Pointer to the seqcount_LOCKNAME_t instance
120  * @lock:	Pointer to the associated lock
121  */
122 
123 #define seqcount_LOCKNAME_init(s, _lock, lockname)			\
124 	do {								\
125 		seqcount_##lockname##_t *____s = (s);			\
126 		seqcount_init(&____s->seqcount);			\
127 		__SEQ_LOCK(____s->lock = (_lock));			\
128 	} while (0)
129 
130 #define seqcount_raw_spinlock_init(s, lock)	seqcount_LOCKNAME_init(s, lock, raw_spinlock)
131 #define seqcount_spinlock_init(s, lock)		seqcount_LOCKNAME_init(s, lock, spinlock)
132 #define seqcount_rwlock_init(s, lock)		seqcount_LOCKNAME_init(s, lock, rwlock)
133 #define seqcount_mutex_init(s, lock)		seqcount_LOCKNAME_init(s, lock, mutex)
134 
135 /*
136  * SEQCOUNT_LOCKNAME()	- Instantiate seqcount_LOCKNAME_t and helpers
137  * seqprop_LOCKNAME_*()	- Property accessors for seqcount_LOCKNAME_t
138  *
139  * @lockname:		"LOCKNAME" part of seqcount_LOCKNAME_t
140  * @locktype:		LOCKNAME canonical C data type
141  * @preemptible:	preemptibility of above locktype
142  * @lockbase:		prefix for associated lock/unlock
143  */
144 #define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase)	\
145 static __always_inline seqcount_t *					\
146 __seqprop_##lockname##_ptr(seqcount_##lockname##_t *s)			\
147 {									\
148 	return &s->seqcount;						\
149 }									\
150 									\
151 static __always_inline const seqcount_t *				\
152 __seqprop_##lockname##_const_ptr(const seqcount_##lockname##_t *s)	\
153 {									\
154 	return &s->seqcount;						\
155 }									\
156 									\
157 static __always_inline unsigned						\
158 __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s)	\
159 {									\
160 	unsigned seq = smp_load_acquire(&s->seqcount.sequence);		\
161 									\
162 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))				\
163 		return seq;						\
164 									\
165 	if (preemptible && unlikely(seq & 1)) {				\
166 		__SEQ_LOCK(lockbase##_lock(s->lock));			\
167 		__SEQ_LOCK(lockbase##_unlock(s->lock));			\
168 									\
169 		/*							\
170 		 * Re-read the sequence counter since the (possibly	\
171 		 * preempted) writer made progress.			\
172 		 */							\
173 		seq = smp_load_acquire(&s->seqcount.sequence);		\
174 	}								\
175 									\
176 	return seq;							\
177 }									\
178 									\
179 static __always_inline bool						\
180 __seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s)	\
181 {									\
182 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))				\
183 		return preemptible;					\
184 									\
185 	/* PREEMPT_RT relies on the above LOCK+UNLOCK */		\
186 	return false;							\
187 }									\
188 									\
189 static __always_inline void						\
190 __seqprop_##lockname##_assert(const seqcount_##lockname##_t *s)		\
191 {									\
192 	__SEQ_LOCK(lockdep_assert_held(s->lock));			\
193 }
194 
195 /*
196  * __seqprop() for seqcount_t
197  */
198 
__seqprop_ptr(seqcount_t * s)199 static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
200 {
201 	return s;
202 }
203 
__seqprop_const_ptr(const seqcount_t * s)204 static inline const seqcount_t *__seqprop_const_ptr(const seqcount_t *s)
205 {
206 	return s;
207 }
208 
__seqprop_sequence(const seqcount_t * s)209 static inline unsigned __seqprop_sequence(const seqcount_t *s)
210 {
211 	return smp_load_acquire(&s->sequence);
212 }
213 
__seqprop_preemptible(const seqcount_t * s)214 static inline bool __seqprop_preemptible(const seqcount_t *s)
215 {
216 	return false;
217 }
218 
__seqprop_assert(const seqcount_t * s)219 static inline void __seqprop_assert(const seqcount_t *s)
220 {
221 	lockdep_assert_preemption_disabled();
222 }
223 
224 #define __SEQ_RT	IS_ENABLED(CONFIG_PREEMPT_RT)
225 
SEQCOUNT_LOCKNAME(raw_spinlock,raw_spinlock_t,false,raw_spin)226 SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t,  false,    raw_spin)
227 SEQCOUNT_LOCKNAME(spinlock,     spinlock_t,      __SEQ_RT, spin)
228 SEQCOUNT_LOCKNAME(rwlock,       rwlock_t,        __SEQ_RT, read)
229 SEQCOUNT_LOCKNAME(mutex,        struct mutex,    true,     mutex)
230 #undef SEQCOUNT_LOCKNAME
231 
232 /*
233  * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
234  * @name:	Name of the seqcount_LOCKNAME_t instance
235  * @lock:	Pointer to the associated LOCKNAME
236  */
237 
238 #define SEQCOUNT_LOCKNAME_ZERO(seq_name, assoc_lock) {			\
239 	.seqcount		= SEQCNT_ZERO(seq_name.seqcount),	\
240 	__SEQ_LOCK(.lock	= (assoc_lock))				\
241 }
242 
243 #define SEQCNT_RAW_SPINLOCK_ZERO(name, lock)	SEQCOUNT_LOCKNAME_ZERO(name, lock)
244 #define SEQCNT_SPINLOCK_ZERO(name, lock)	SEQCOUNT_LOCKNAME_ZERO(name, lock)
245 #define SEQCNT_RWLOCK_ZERO(name, lock)		SEQCOUNT_LOCKNAME_ZERO(name, lock)
246 #define SEQCNT_MUTEX_ZERO(name, lock)		SEQCOUNT_LOCKNAME_ZERO(name, lock)
247 #define SEQCNT_WW_MUTEX_ZERO(name, lock) 	SEQCOUNT_LOCKNAME_ZERO(name, lock)
248 
249 #define __seqprop_case(s, lockname, prop)				\
250 	seqcount_##lockname##_t: __seqprop_##lockname##_##prop
251 
252 #define __seqprop(s, prop) _Generic(*(s),				\
253 	seqcount_t:		__seqprop_##prop,			\
254 	__seqprop_case((s),	raw_spinlock,	prop),			\
255 	__seqprop_case((s),	spinlock,	prop),			\
256 	__seqprop_case((s),	rwlock,		prop),			\
257 	__seqprop_case((s),	mutex,		prop))
258 
259 #define seqprop_ptr(s)			__seqprop(s, ptr)(s)
260 #define seqprop_const_ptr(s)		__seqprop(s, const_ptr)(s)
261 #define seqprop_sequence(s)		__seqprop(s, sequence)(s)
262 #define seqprop_preemptible(s)		__seqprop(s, preemptible)(s)
263 #define seqprop_assert(s)		__seqprop(s, assert)(s)
264 
265 /**
266  * __read_seqcount_begin() - begin a seqcount_t read section
267  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
268  *
269  * Return: count to be passed to read_seqcount_retry()
270  */
271 #define __read_seqcount_begin(s)					\
272 ({									\
273 	unsigned __seq;							\
274 									\
275 	while (unlikely((__seq = seqprop_sequence(s)) & 1))		\
276 		cpu_relax();						\
277 									\
278 	kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);			\
279 	__seq;								\
280 })
281 
282 /**
283  * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
284  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
285  *
286  * Return: count to be passed to read_seqcount_retry()
287  */
288 #define raw_read_seqcount_begin(s) __read_seqcount_begin(s)
289 
290 /**
291  * read_seqcount_begin() - begin a seqcount_t read critical section
292  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
293  *
294  * Return: count to be passed to read_seqcount_retry()
295  */
296 #define read_seqcount_begin(s)						\
297 ({									\
298 	seqcount_lockdep_reader_access(seqprop_const_ptr(s));		\
299 	raw_read_seqcount_begin(s);					\
300 })
301 
302 /**
303  * raw_read_seqcount() - read the raw seqcount_t counter value
304  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
305  *
306  * raw_read_seqcount opens a read critical section of the given
307  * seqcount_t, without any lockdep checking, and without checking or
308  * masking the sequence counter LSB. Calling code is responsible for
309  * handling that.
310  *
311  * Return: count to be passed to read_seqcount_retry()
312  */
313 #define raw_read_seqcount(s)						\
314 ({									\
315 	unsigned __seq = seqprop_sequence(s);				\
316 									\
317 	kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);			\
318 	__seq;								\
319 })
320 
321 /**
322  * raw_seqcount_try_begin() - begin a seqcount_t read critical section
323  *                            w/o lockdep and w/o counter stabilization
324  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
325  * @start: count to be passed to read_seqcount_retry()
326  *
327  * Similar to raw_seqcount_begin(), except it enables eliding the critical
328  * section entirely if odd, instead of doing the speculation knowing it will
329  * fail.
330  *
331  * Useful when counter stabilization is more or less equivalent to taking
332  * the lock and there is a slowpath that does that.
333  *
334  * If true, start will be set to the (even) sequence count read.
335  *
336  * Return: true when a read critical section is started.
337  */
338 #define raw_seqcount_try_begin(s, start)				\
339 ({									\
340 	start = raw_read_seqcount(s);					\
341 	!(start & 1);							\
342 })
343 
344 /**
345  * raw_seqcount_begin() - begin a seqcount_t read critical section w/o
346  *                        lockdep and w/o counter stabilization
347  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
348  *
349  * raw_seqcount_begin opens a read critical section of the given
350  * seqcount_t. Unlike read_seqcount_begin(), this function will not wait
351  * for the count to stabilize. If a writer is active when it begins, it
352  * will fail the read_seqcount_retry() at the end of the read critical
353  * section instead of stabilizing at the beginning of it.
354  *
355  * Use this only in special kernel hot paths where the read section is
356  * small and has a high probability of success through other external
357  * means. It will save a single branching instruction.
358  *
359  * Return: count to be passed to read_seqcount_retry()
360  */
361 #define raw_seqcount_begin(s)						\
362 ({									\
363 	/*								\
364 	 * If the counter is odd, let read_seqcount_retry() fail	\
365 	 * by decrementing the counter.					\
366 	 */								\
367 	raw_read_seqcount(s) & ~1;					\
368 })
369 
370 /**
371  * __read_seqcount_retry() - end a seqcount_t read section w/o barrier
372  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
373  * @start: count, from read_seqcount_begin()
374  *
375  * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
376  * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
377  * provided before actually loading any of the variables that are to be
378  * protected in this critical section.
379  *
380  * Use carefully, only in critical code, and comment how the barrier is
381  * provided.
382  *
383  * Return: true if a read section retry is required, else false
384  */
385 #define __read_seqcount_retry(s, start)					\
386 	do___read_seqcount_retry(seqprop_const_ptr(s), start)
387 
388 static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
389 {
390 	kcsan_atomic_next(0);
391 	return unlikely(READ_ONCE(s->sequence) != start);
392 }
393 
394 /**
395  * read_seqcount_retry() - end a seqcount_t read critical section
396  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
397  * @start: count, from read_seqcount_begin()
398  *
399  * read_seqcount_retry closes the read critical section of given
400  * seqcount_t.  If the critical section was invalid, it must be ignored
401  * (and typically retried).
402  *
403  * Return: true if a read section retry is required, else false
404  */
405 #define read_seqcount_retry(s, start)					\
406 	do_read_seqcount_retry(seqprop_const_ptr(s), start)
407 
do_read_seqcount_retry(const seqcount_t * s,unsigned start)408 static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start)
409 {
410 	smp_rmb();
411 	return do___read_seqcount_retry(s, start);
412 }
413 
414 /**
415  * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
416  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
417  *
418  * Context: check write_seqcount_begin()
419  */
420 #define raw_write_seqcount_begin(s)					\
421 do {									\
422 	if (seqprop_preemptible(s))					\
423 		preempt_disable();					\
424 									\
425 	do_raw_write_seqcount_begin(seqprop_ptr(s));			\
426 } while (0)
427 
do_raw_write_seqcount_begin(seqcount_t * s)428 static inline void do_raw_write_seqcount_begin(seqcount_t *s)
429 {
430 	kcsan_nestable_atomic_begin();
431 	s->sequence++;
432 	smp_wmb();
433 }
434 
435 /**
436  * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
437  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
438  *
439  * Context: check write_seqcount_end()
440  */
441 #define raw_write_seqcount_end(s)					\
442 do {									\
443 	do_raw_write_seqcount_end(seqprop_ptr(s));			\
444 									\
445 	if (seqprop_preemptible(s))					\
446 		preempt_enable();					\
447 } while (0)
448 
do_raw_write_seqcount_end(seqcount_t * s)449 static inline void do_raw_write_seqcount_end(seqcount_t *s)
450 {
451 	smp_wmb();
452 	s->sequence++;
453 	kcsan_nestable_atomic_end();
454 }
455 
456 /**
457  * write_seqcount_begin_nested() - start a seqcount_t write section with
458  *                                 custom lockdep nesting level
459  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
460  * @subclass: lockdep nesting level
461  *
462  * See Documentation/locking/lockdep-design.rst
463  * Context: check write_seqcount_begin()
464  */
465 #define write_seqcount_begin_nested(s, subclass)			\
466 do {									\
467 	seqprop_assert(s);						\
468 									\
469 	if (seqprop_preemptible(s))					\
470 		preempt_disable();					\
471 									\
472 	do_write_seqcount_begin_nested(seqprop_ptr(s), subclass);	\
473 } while (0)
474 
do_write_seqcount_begin_nested(seqcount_t * s,int subclass)475 static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
476 {
477 	seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
478 	do_raw_write_seqcount_begin(s);
479 }
480 
481 /**
482  * write_seqcount_begin() - start a seqcount_t write side critical section
483  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
484  *
485  * Context: sequence counter write side sections must be serialized and
486  * non-preemptible. Preemption will be automatically disabled if and
487  * only if the seqcount write serialization lock is associated, and
488  * preemptible.  If readers can be invoked from hardirq or softirq
489  * context, interrupts or bottom halves must be respectively disabled.
490  */
491 #define write_seqcount_begin(s)						\
492 do {									\
493 	seqprop_assert(s);						\
494 									\
495 	if (seqprop_preemptible(s))					\
496 		preempt_disable();					\
497 									\
498 	do_write_seqcount_begin(seqprop_ptr(s));			\
499 } while (0)
500 
do_write_seqcount_begin(seqcount_t * s)501 static inline void do_write_seqcount_begin(seqcount_t *s)
502 {
503 	do_write_seqcount_begin_nested(s, 0);
504 }
505 
506 /**
507  * write_seqcount_end() - end a seqcount_t write side critical section
508  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
509  *
510  * Context: Preemption will be automatically re-enabled if and only if
511  * the seqcount write serialization lock is associated, and preemptible.
512  */
513 #define write_seqcount_end(s)						\
514 do {									\
515 	do_write_seqcount_end(seqprop_ptr(s));				\
516 									\
517 	if (seqprop_preemptible(s))					\
518 		preempt_enable();					\
519 } while (0)
520 
do_write_seqcount_end(seqcount_t * s)521 static inline void do_write_seqcount_end(seqcount_t *s)
522 {
523 	seqcount_release(&s->dep_map, _RET_IP_);
524 	do_raw_write_seqcount_end(s);
525 }
526 
527 /**
528  * raw_write_seqcount_barrier() - do a seqcount_t write barrier
529  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
530  *
531  * This can be used to provide an ordering guarantee instead of the usual
532  * consistency guarantee. It is one wmb cheaper, because it can collapse
533  * the two back-to-back wmb()s.
534  *
535  * Note that writes surrounding the barrier should be declared atomic (e.g.
536  * via WRITE_ONCE): a) to ensure the writes become visible to other threads
537  * atomically, avoiding compiler optimizations; b) to document which writes are
538  * meant to propagate to the reader critical section. This is necessary because
539  * neither writes before nor after the barrier are enclosed in a seq-writer
540  * critical section that would ensure readers are aware of ongoing writes::
541  *
542  *	seqcount_t seq;
543  *	bool X = true, Y = false;
544  *
545  *	void read(void)
546  *	{
547  *		bool x, y;
548  *
549  *		do {
550  *			int s = read_seqcount_begin(&seq);
551  *
552  *			x = X; y = Y;
553  *
554  *		} while (read_seqcount_retry(&seq, s));
555  *
556  *		BUG_ON(!x && !y);
557  *      }
558  *
559  *      void write(void)
560  *      {
561  *		WRITE_ONCE(Y, true);
562  *
563  *		raw_write_seqcount_barrier(seq);
564  *
565  *		WRITE_ONCE(X, false);
566  *      }
567  */
568 #define raw_write_seqcount_barrier(s)					\
569 	do_raw_write_seqcount_barrier(seqprop_ptr(s))
570 
do_raw_write_seqcount_barrier(seqcount_t * s)571 static inline void do_raw_write_seqcount_barrier(seqcount_t *s)
572 {
573 	kcsan_nestable_atomic_begin();
574 	s->sequence++;
575 	smp_wmb();
576 	s->sequence++;
577 	kcsan_nestable_atomic_end();
578 }
579 
580 /**
581  * write_seqcount_invalidate() - invalidate in-progress seqcount_t read
582  *                               side operations
583  * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
584  *
585  * After write_seqcount_invalidate, no seqcount_t read side operations
586  * will complete successfully and see data older than this.
587  */
588 #define write_seqcount_invalidate(s)					\
589 	do_write_seqcount_invalidate(seqprop_ptr(s))
590 
do_write_seqcount_invalidate(seqcount_t * s)591 static inline void do_write_seqcount_invalidate(seqcount_t *s)
592 {
593 	smp_wmb();
594 	kcsan_nestable_atomic_begin();
595 	s->sequence+=2;
596 	kcsan_nestable_atomic_end();
597 }
598 
599 /*
600  * Latch sequence counters (seqcount_latch_t)
601  *
602  * A sequence counter variant where the counter even/odd value is used to
603  * switch between two copies of protected data. This allows the read path,
604  * typically NMIs, to safely interrupt the write side critical section.
605  *
606  * As the write sections are fully preemptible, no special handling for
607  * PREEMPT_RT is needed.
608  */
609 typedef struct {
610 	seqcount_t seqcount;
611 } seqcount_latch_t;
612 
613 /**
614  * SEQCNT_LATCH_ZERO() - static initializer for seqcount_latch_t
615  * @seq_name: Name of the seqcount_latch_t instance
616  */
617 #define SEQCNT_LATCH_ZERO(seq_name) {					\
618 	.seqcount		= SEQCNT_ZERO(seq_name.seqcount),	\
619 }
620 
621 /**
622  * seqcount_latch_init() - runtime initializer for seqcount_latch_t
623  * @s: Pointer to the seqcount_latch_t instance
624  */
625 #define seqcount_latch_init(s) seqcount_init(&(s)->seqcount)
626 
627 /**
628  * raw_read_seqcount_latch() - pick even/odd latch data copy
629  * @s: Pointer to seqcount_latch_t
630  *
631  * See raw_write_seqcount_latch() for details and a full reader/writer
632  * usage example.
633  *
634  * Return: sequence counter raw value. Use the lowest bit as an index for
635  * picking which data copy to read. The full counter must then be checked
636  * with raw_read_seqcount_latch_retry().
637  */
raw_read_seqcount_latch(const seqcount_latch_t * s)638 static __always_inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
639 {
640 	/*
641 	 * Pairs with the first smp_wmb() in raw_write_seqcount_latch().
642 	 * Due to the dependent load, a full smp_rmb() is not needed.
643 	 */
644 	return READ_ONCE(s->seqcount.sequence);
645 }
646 
647 /**
648  * read_seqcount_latch() - pick even/odd latch data copy
649  * @s: Pointer to seqcount_latch_t
650  *
651  * See write_seqcount_latch() for details and a full reader/writer usage
652  * example.
653  *
654  * Return: sequence counter raw value. Use the lowest bit as an index for
655  * picking which data copy to read. The full counter must then be checked
656  * with read_seqcount_latch_retry().
657  */
read_seqcount_latch(const seqcount_latch_t * s)658 static __always_inline unsigned read_seqcount_latch(const seqcount_latch_t *s)
659 {
660 	kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
661 	return raw_read_seqcount_latch(s);
662 }
663 
664 /**
665  * raw_read_seqcount_latch_retry() - end a seqcount_latch_t read section
666  * @s:		Pointer to seqcount_latch_t
667  * @start:	count, from raw_read_seqcount_latch()
668  *
669  * Return: true if a read section retry is required, else false
670  */
671 static __always_inline int
raw_read_seqcount_latch_retry(const seqcount_latch_t * s,unsigned start)672 raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
673 {
674 	smp_rmb();
675 	return unlikely(READ_ONCE(s->seqcount.sequence) != start);
676 }
677 
678 /**
679  * read_seqcount_latch_retry() - end a seqcount_latch_t read section
680  * @s:		Pointer to seqcount_latch_t
681  * @start:	count, from read_seqcount_latch()
682  *
683  * Return: true if a read section retry is required, else false
684  */
685 static __always_inline int
read_seqcount_latch_retry(const seqcount_latch_t * s,unsigned start)686 read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
687 {
688 	kcsan_atomic_next(0);
689 	return raw_read_seqcount_latch_retry(s, start);
690 }
691 
692 /**
693  * raw_write_seqcount_latch() - redirect latch readers to even/odd copy
694  * @s: Pointer to seqcount_latch_t
695  */
raw_write_seqcount_latch(seqcount_latch_t * s)696 static __always_inline void raw_write_seqcount_latch(seqcount_latch_t *s)
697 {
698 	smp_wmb();	/* prior stores before incrementing "sequence" */
699 	s->seqcount.sequence++;
700 	smp_wmb();      /* increment "sequence" before following stores */
701 }
702 
703 /**
704  * write_seqcount_latch_begin() - redirect latch readers to odd copy
705  * @s: Pointer to seqcount_latch_t
706  *
707  * The latch technique is a multiversion concurrency control method that allows
708  * queries during non-atomic modifications. If you can guarantee queries never
709  * interrupt the modification -- e.g. the concurrency is strictly between CPUs
710  * -- you most likely do not need this.
711  *
712  * Where the traditional RCU/lockless data structures rely on atomic
713  * modifications to ensure queries observe either the old or the new state the
714  * latch allows the same for non-atomic updates. The trade-off is doubling the
715  * cost of storage; we have to maintain two copies of the entire data
716  * structure.
717  *
718  * Very simply put: we first modify one copy and then the other. This ensures
719  * there is always one copy in a stable state, ready to give us an answer.
720  *
721  * The basic form is a data structure like::
722  *
723  *	struct latch_struct {
724  *		seqcount_latch_t	seq;
725  *		struct data_struct	data[2];
726  *	};
727  *
728  * Where a modification, which is assumed to be externally serialized, does the
729  * following::
730  *
731  *	void latch_modify(struct latch_struct *latch, ...)
732  *	{
733  *		write_seqcount_latch_begin(&latch->seq);
734  *		modify(latch->data[0], ...);
735  *		write_seqcount_latch(&latch->seq);
736  *		modify(latch->data[1], ...);
737  *		write_seqcount_latch_end(&latch->seq);
738  *	}
739  *
740  * The query will have a form like::
741  *
742  *	struct entry *latch_query(struct latch_struct *latch, ...)
743  *	{
744  *		struct entry *entry;
745  *		unsigned seq, idx;
746  *
747  *		do {
748  *			seq = read_seqcount_latch(&latch->seq);
749  *
750  *			idx = seq & 0x01;
751  *			entry = data_query(latch->data[idx], ...);
752  *
753  *		// This includes needed smp_rmb()
754  *		} while (read_seqcount_latch_retry(&latch->seq, seq));
755  *
756  *		return entry;
757  *	}
758  *
759  * So during the modification, queries are first redirected to data[1]. Then we
760  * modify data[0]. When that is complete, we redirect queries back to data[0]
761  * and we can modify data[1].
762  *
763  * NOTE:
764  *
765  *	The non-requirement for atomic modifications does _NOT_ include
766  *	the publishing of new entries in the case where data is a dynamic
767  *	data structure.
768  *
769  *	An iteration might start in data[0] and get suspended long enough
770  *	to miss an entire modification sequence, once it resumes it might
771  *	observe the new entry.
772  *
773  * NOTE2:
774  *
775  *	When data is a dynamic data structure; one should use regular RCU
776  *	patterns to manage the lifetimes of the objects within.
777  */
write_seqcount_latch_begin(seqcount_latch_t * s)778 static __always_inline void write_seqcount_latch_begin(seqcount_latch_t *s)
779 {
780 	kcsan_nestable_atomic_begin();
781 	raw_write_seqcount_latch(s);
782 }
783 
784 /**
785  * write_seqcount_latch() - redirect latch readers to even copy
786  * @s: Pointer to seqcount_latch_t
787  */
write_seqcount_latch(seqcount_latch_t * s)788 static __always_inline void write_seqcount_latch(seqcount_latch_t *s)
789 {
790 	raw_write_seqcount_latch(s);
791 }
792 
793 /**
794  * write_seqcount_latch_end() - end a seqcount_latch_t write section
795  * @s:		Pointer to seqcount_latch_t
796  *
797  * Marks the end of a seqcount_latch_t writer section, after all copies of the
798  * latch-protected data have been updated.
799  */
write_seqcount_latch_end(seqcount_latch_t * s)800 static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s)
801 {
802 	kcsan_nestable_atomic_end();
803 }
804 
805 #define __SEQLOCK_UNLOCKED(lockname)					\
806 	{								\
807 		.seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \
808 		.lock =	__SPIN_LOCK_UNLOCKED(lockname)			\
809 	}
810 
811 /**
812  * seqlock_init() - dynamic initializer for seqlock_t
813  * @sl: Pointer to the seqlock_t instance
814  */
815 #define seqlock_init(sl)						\
816 	do {								\
817 		spin_lock_init(&(sl)->lock);				\
818 		seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock);	\
819 	} while (0)
820 
821 /**
822  * DEFINE_SEQLOCK(sl) - Define a statically allocated seqlock_t
823  * @sl: Name of the seqlock_t instance
824  */
825 #define DEFINE_SEQLOCK(sl) \
826 		seqlock_t sl = __SEQLOCK_UNLOCKED(sl)
827 
828 /**
829  * read_seqbegin() - start a seqlock_t read side critical section
830  * @sl: Pointer to seqlock_t
831  *
832  * Return: count, to be passed to read_seqretry()
833  */
read_seqbegin(const seqlock_t * sl)834 static inline unsigned read_seqbegin(const seqlock_t *sl)
835 {
836 	return read_seqcount_begin(&sl->seqcount);
837 }
838 
839 /**
840  * read_seqretry() - end a seqlock_t read side section
841  * @sl: Pointer to seqlock_t
842  * @start: count, from read_seqbegin()
843  *
844  * read_seqretry closes the read side critical section of given seqlock_t.
845  * If the critical section was invalid, it must be ignored (and typically
846  * retried).
847  *
848  * Return: true if a read section retry is required, else false
849  */
read_seqretry(const seqlock_t * sl,unsigned start)850 static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
851 {
852 	return read_seqcount_retry(&sl->seqcount, start);
853 }
854 
855 /*
856  * For all seqlock_t write side functions, use the internal
857  * do_write_seqcount_begin() instead of generic write_seqcount_begin().
858  * This way, no redundant lockdep_assert_held() checks are added.
859  */
860 
861 /**
862  * write_seqlock() - start a seqlock_t write side critical section
863  * @sl: Pointer to seqlock_t
864  *
865  * write_seqlock opens a write side critical section for the given
866  * seqlock_t.  It also implicitly acquires the spinlock_t embedded inside
867  * that sequential lock. All seqlock_t write side sections are thus
868  * automatically serialized and non-preemptible.
869  *
870  * Context: if the seqlock_t read section, or other write side critical
871  * sections, can be invoked from hardirq or softirq contexts, use the
872  * _irqsave or _bh variants of this function instead.
873  */
write_seqlock(seqlock_t * sl)874 static inline void write_seqlock(seqlock_t *sl)
875 {
876 	spin_lock(&sl->lock);
877 	do_write_seqcount_begin(&sl->seqcount.seqcount);
878 }
879 
880 /**
881  * write_sequnlock() - end a seqlock_t write side critical section
882  * @sl: Pointer to seqlock_t
883  *
884  * write_sequnlock closes the (serialized and non-preemptible) write side
885  * critical section of given seqlock_t.
886  */
write_sequnlock(seqlock_t * sl)887 static inline void write_sequnlock(seqlock_t *sl)
888 {
889 	do_write_seqcount_end(&sl->seqcount.seqcount);
890 	spin_unlock(&sl->lock);
891 }
892 
893 /**
894  * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section
895  * @sl: Pointer to seqlock_t
896  *
897  * _bh variant of write_seqlock(). Use only if the read side section, or
898  * other write side sections, can be invoked from softirq contexts.
899  */
write_seqlock_bh(seqlock_t * sl)900 static inline void write_seqlock_bh(seqlock_t *sl)
901 {
902 	spin_lock_bh(&sl->lock);
903 	do_write_seqcount_begin(&sl->seqcount.seqcount);
904 }
905 
906 /**
907  * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section
908  * @sl: Pointer to seqlock_t
909  *
910  * write_sequnlock_bh closes the serialized, non-preemptible, and
911  * softirqs-disabled, seqlock_t write side critical section opened with
912  * write_seqlock_bh().
913  */
write_sequnlock_bh(seqlock_t * sl)914 static inline void write_sequnlock_bh(seqlock_t *sl)
915 {
916 	do_write_seqcount_end(&sl->seqcount.seqcount);
917 	spin_unlock_bh(&sl->lock);
918 }
919 
920 /**
921  * write_seqlock_irq() - start a non-interruptible seqlock_t write section
922  * @sl: Pointer to seqlock_t
923  *
924  * _irq variant of write_seqlock(). Use only if the read side section, or
925  * other write sections, can be invoked from hardirq contexts.
926  */
write_seqlock_irq(seqlock_t * sl)927 static inline void write_seqlock_irq(seqlock_t *sl)
928 {
929 	spin_lock_irq(&sl->lock);
930 	do_write_seqcount_begin(&sl->seqcount.seqcount);
931 }
932 
933 /**
934  * write_sequnlock_irq() - end a non-interruptible seqlock_t write section
935  * @sl: Pointer to seqlock_t
936  *
937  * write_sequnlock_irq closes the serialized and non-interruptible
938  * seqlock_t write side section opened with write_seqlock_irq().
939  */
write_sequnlock_irq(seqlock_t * sl)940 static inline void write_sequnlock_irq(seqlock_t *sl)
941 {
942 	do_write_seqcount_end(&sl->seqcount.seqcount);
943 	spin_unlock_irq(&sl->lock);
944 }
945 
__write_seqlock_irqsave(seqlock_t * sl)946 static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
947 {
948 	unsigned long flags;
949 
950 	spin_lock_irqsave(&sl->lock, flags);
951 	do_write_seqcount_begin(&sl->seqcount.seqcount);
952 	return flags;
953 }
954 
955 /**
956  * write_seqlock_irqsave() - start a non-interruptible seqlock_t write
957  *                           section
958  * @lock:  Pointer to seqlock_t
959  * @flags: Stack-allocated storage for saving caller's local interrupt
960  *         state, to be passed to write_sequnlock_irqrestore().
961  *
962  * _irqsave variant of write_seqlock(). Use it only if the read side
963  * section, or other write sections, can be invoked from hardirq context.
964  */
965 #define write_seqlock_irqsave(lock, flags)				\
966 	do { flags = __write_seqlock_irqsave(lock); } while (0)
967 
968 /**
969  * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write
970  *                                section
971  * @sl:    Pointer to seqlock_t
972  * @flags: Caller's saved interrupt state, from write_seqlock_irqsave()
973  *
974  * write_sequnlock_irqrestore closes the serialized and non-interruptible
975  * seqlock_t write section previously opened with write_seqlock_irqsave().
976  */
977 static inline void
write_sequnlock_irqrestore(seqlock_t * sl,unsigned long flags)978 write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
979 {
980 	do_write_seqcount_end(&sl->seqcount.seqcount);
981 	spin_unlock_irqrestore(&sl->lock, flags);
982 }
983 
984 /**
985  * read_seqlock_excl() - begin a seqlock_t locking reader section
986  * @sl:	Pointer to seqlock_t
987  *
988  * read_seqlock_excl opens a seqlock_t locking reader critical section.  A
989  * locking reader exclusively locks out *both* other writers *and* other
990  * locking readers, but it does not update the embedded sequence number.
991  *
992  * Locking readers act like a normal spin_lock()/spin_unlock().
993  *
994  * Context: if the seqlock_t write section, *or other read sections*, can
995  * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
996  * variant of this function instead.
997  *
998  * The opened read section must be closed with read_sequnlock_excl().
999  */
read_seqlock_excl(seqlock_t * sl)1000 static inline void read_seqlock_excl(seqlock_t *sl)
1001 {
1002 	spin_lock(&sl->lock);
1003 }
1004 
1005 /**
1006  * read_sequnlock_excl() - end a seqlock_t locking reader critical section
1007  * @sl: Pointer to seqlock_t
1008  */
read_sequnlock_excl(seqlock_t * sl)1009 static inline void read_sequnlock_excl(seqlock_t *sl)
1010 {
1011 	spin_unlock(&sl->lock);
1012 }
1013 
1014 /**
1015  * read_seqlock_excl_bh() - start a seqlock_t locking reader section with
1016  *			    softirqs disabled
1017  * @sl: Pointer to seqlock_t
1018  *
1019  * _bh variant of read_seqlock_excl(). Use this variant only if the
1020  * seqlock_t write side section, *or other read sections*, can be invoked
1021  * from softirq contexts.
1022  */
read_seqlock_excl_bh(seqlock_t * sl)1023 static inline void read_seqlock_excl_bh(seqlock_t *sl)
1024 {
1025 	spin_lock_bh(&sl->lock);
1026 }
1027 
1028 /**
1029  * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking
1030  *			      reader section
1031  * @sl: Pointer to seqlock_t
1032  */
read_sequnlock_excl_bh(seqlock_t * sl)1033 static inline void read_sequnlock_excl_bh(seqlock_t *sl)
1034 {
1035 	spin_unlock_bh(&sl->lock);
1036 }
1037 
1038 /**
1039  * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking
1040  *			     reader section
1041  * @sl: Pointer to seqlock_t
1042  *
1043  * _irq variant of read_seqlock_excl(). Use this only if the seqlock_t
1044  * write side section, *or other read sections*, can be invoked from a
1045  * hardirq context.
1046  */
read_seqlock_excl_irq(seqlock_t * sl)1047 static inline void read_seqlock_excl_irq(seqlock_t *sl)
1048 {
1049 	spin_lock_irq(&sl->lock);
1050 }
1051 
1052 /**
1053  * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t
1054  *                             locking reader section
1055  * @sl: Pointer to seqlock_t
1056  */
read_sequnlock_excl_irq(seqlock_t * sl)1057 static inline void read_sequnlock_excl_irq(seqlock_t *sl)
1058 {
1059 	spin_unlock_irq(&sl->lock);
1060 }
1061 
__read_seqlock_excl_irqsave(seqlock_t * sl)1062 static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
1063 {
1064 	unsigned long flags;
1065 
1066 	spin_lock_irqsave(&sl->lock, flags);
1067 	return flags;
1068 }
1069 
1070 /**
1071  * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t
1072  *				 locking reader section
1073  * @lock:  Pointer to seqlock_t
1074  * @flags: Stack-allocated storage for saving caller's local interrupt
1075  *         state, to be passed to read_sequnlock_excl_irqrestore().
1076  *
1077  * _irqsave variant of read_seqlock_excl(). Use this only if the seqlock_t
1078  * write side section, *or other read sections*, can be invoked from a
1079  * hardirq context.
1080  */
1081 #define read_seqlock_excl_irqsave(lock, flags)				\
1082 	do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
1083 
1084 /**
1085  * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t
1086  *				      locking reader section
1087  * @sl:    Pointer to seqlock_t
1088  * @flags: Caller saved interrupt state, from read_seqlock_excl_irqsave()
1089  */
1090 static inline void
read_sequnlock_excl_irqrestore(seqlock_t * sl,unsigned long flags)1091 read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
1092 {
1093 	spin_unlock_irqrestore(&sl->lock, flags);
1094 }
1095 
1096 /**
1097  * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader
1098  * @lock: Pointer to seqlock_t
1099  * @seq : Marker and return parameter. If the passed value is even, the
1100  * reader will become a *lockless* seqlock_t reader as in read_seqbegin().
1101  * If the passed value is odd, the reader will become a *locking* reader
1102  * as in read_seqlock_excl().  In the first call to this function, the
1103  * caller *must* initialize and pass an even value to @seq; this way, a
1104  * lockless read can be optimistically tried first.
1105  *
1106  * read_seqbegin_or_lock is an API designed to optimistically try a normal
1107  * lockless seqlock_t read section first.  If an odd counter is found, the
1108  * lockless read trial has failed, and the next read iteration transforms
1109  * itself into a full seqlock_t locking reader.
1110  *
1111  * This is typically used to avoid seqlock_t lockless readers starvation
1112  * (too much retry loops) in the case of a sharp spike in write side
1113  * activity.
1114  *
1115  * Context: if the seqlock_t write section, *or other read sections*, can
1116  * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
1117  * variant of this function instead.
1118  *
1119  * Check Documentation/locking/seqlock.rst for template example code.
1120  *
1121  * Return: the encountered sequence counter value, through the @seq
1122  * parameter, which is overloaded as a return parameter. This returned
1123  * value must be checked with need_seqretry(). If the read section need to
1124  * be retried, this returned value must also be passed as the @seq
1125  * parameter of the next read_seqbegin_or_lock() iteration.
1126  */
read_seqbegin_or_lock(seqlock_t * lock,int * seq)1127 static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
1128 {
1129 	if (!(*seq & 1))	/* Even */
1130 		*seq = read_seqbegin(lock);
1131 	else			/* Odd */
1132 		read_seqlock_excl(lock);
1133 }
1134 
1135 /**
1136  * need_seqretry() - validate seqlock_t "locking or lockless" read section
1137  * @lock: Pointer to seqlock_t
1138  * @seq: sequence count, from read_seqbegin_or_lock()
1139  *
1140  * Return: true if a read section retry is required, false otherwise
1141  */
need_seqretry(seqlock_t * lock,int seq)1142 static inline int need_seqretry(seqlock_t *lock, int seq)
1143 {
1144 	return !(seq & 1) && read_seqretry(lock, seq);
1145 }
1146 
1147 /**
1148  * done_seqretry() - end seqlock_t "locking or lockless" reader section
1149  * @lock: Pointer to seqlock_t
1150  * @seq: count, from read_seqbegin_or_lock()
1151  *
1152  * done_seqretry finishes the seqlock_t read side critical section started
1153  * with read_seqbegin_or_lock() and validated by need_seqretry().
1154  */
done_seqretry(seqlock_t * lock,int seq)1155 static inline void done_seqretry(seqlock_t *lock, int seq)
1156 {
1157 	if (seq & 1)
1158 		read_sequnlock_excl(lock);
1159 }
1160 
1161 /**
1162  * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or
1163  *                                   a non-interruptible locking reader
1164  * @lock: Pointer to seqlock_t
1165  * @seq:  Marker and return parameter. Check read_seqbegin_or_lock().
1166  *
1167  * This is the _irqsave variant of read_seqbegin_or_lock(). Use it only if
1168  * the seqlock_t write section, *or other read sections*, can be invoked
1169  * from hardirq context.
1170  *
1171  * Note: Interrupts will be disabled only for "locking reader" mode.
1172  *
1173  * Return:
1174  *
1175  *   1. The saved local interrupts state in case of a locking reader, to
1176  *      be passed to done_seqretry_irqrestore().
1177  *
1178  *   2. The encountered sequence counter value, returned through @seq
1179  *      overloaded as a return parameter. Check read_seqbegin_or_lock().
1180  */
1181 static inline unsigned long
read_seqbegin_or_lock_irqsave(seqlock_t * lock,int * seq)1182 read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
1183 {
1184 	unsigned long flags = 0;
1185 
1186 	if (!(*seq & 1))	/* Even */
1187 		*seq = read_seqbegin(lock);
1188 	else			/* Odd */
1189 		read_seqlock_excl_irqsave(lock, flags);
1190 
1191 	return flags;
1192 }
1193 
1194 /**
1195  * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a
1196  *				non-interruptible locking reader section
1197  * @lock:  Pointer to seqlock_t
1198  * @seq:   Count, from read_seqbegin_or_lock_irqsave()
1199  * @flags: Caller's saved local interrupt state in case of a locking
1200  *	   reader, also from read_seqbegin_or_lock_irqsave()
1201  *
1202  * This is the _irqrestore variant of done_seqretry(). The read section
1203  * must've been opened with read_seqbegin_or_lock_irqsave(), and validated
1204  * by need_seqretry().
1205  */
1206 static inline void
done_seqretry_irqrestore(seqlock_t * lock,int seq,unsigned long flags)1207 done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
1208 {
1209 	if (seq & 1)
1210 		read_sequnlock_excl_irqrestore(lock, flags);
1211 }
1212 #endif /* __LINUX_SEQLOCK_H */
1213