Home
last modified time | relevance | path

Searched +full:lock +full:- +full:less (Results 1 – 25 of 1019) sorted by relevance

12345678910>>...41

/linux-6.14.4/include/linux/
Dllist.h1 /* SPDX-License-Identifier: GPL-2.0-only */
5 * Lock-less NULL terminated single linked list
15 * llist_del_first or llist_del_all used in other consumers, then a lock is
16 * needed. This is because llist_del_first depends on list->first->next not
17 * changing, but without lock protection, there's no way to be sure about that
19 * preempted back, the list->first is the same as before causing the cmpxchg in
28 * add | - | - | -
30 * del_all | | | -
33 * operation, with "-" being no lock needed, while "L" being lock is needed.
43 * architectures that don't have NMI-safe cmpxchg implementation, the
[all …]
Drbtree_latch.h1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Latched RB-trees
7 * Since RB-trees have non-atomic modifications they're not immediately suited
8 * for RCU/lockless queries. Even though we made RB-tree lookups non-fatal for
11 * The simplest solution is a seqlock + RB-tree, this will allow lockless
17 * employing the latch technique -- see @write_seqcount_latch_begin -- to
18 * implement a latched RB-tree which does allow for unconditional lookups by
26 * Therefore, this does require a lockless RB-tree iteration to be non-fatal;
28 * condition -- not seeing partial stores -- because the latch thing isolates
50 * latch_tree_ops - operators to define the tree order
[all …]
Dbit_spinlock.h1 /* SPDX-License-Identifier: GPL-2.0 */
11 * bit-based spin_lock()
19 * Assuming the lock is uncontended, this never enters in bit_spin_lock()
21 * within the inner loop a non-atomic test is used to in bit_spin_lock()
22 * busywait with less bus contention for a good time to in bit_spin_lock()
23 * attempt to acquire the lock bit. in bit_spin_lock()
55 * bit-based spin_unlock()
70 * bit-based spin_unlock()
71 * non-atomic version, which can be used eg. if the bit lock itself is
87 * Return true if the lock is held.
Dprocessor.h1 /* SPDX-License-Identifier: GPL-2.0 */
9 * spin_begin is used before beginning a busy-wait loop, and must be paired
16 * of these primitives. It should not lock or take any other resource.
21 * less than the cost of a context switch (and associated overhead).
23 * Detection of resource owner and decision to spin or sleep or guest-yield
24 * (e.g., spin lock holder vcpu preempted, or mutex owner not on CPU) can be
/linux-6.14.4/lib/
Dllist.c1 // SPDX-License-Identifier: GPL-2.0-only
3 * Lock-less NULL terminated single linked list
6 * architectures that don't have NMI-safe cmpxchg implementation, the
19 * llist_add_batch - add several linked entries in batch
22 * @head: the head for your lock-less list
29 struct llist_node *first = READ_ONCE(head->first); in llist_add_batch()
32 new_last->next = first; in llist_add_batch()
33 } while (!try_cmpxchg(&head->first, &first, new_first)); in llist_add_batch()
40 * llist_del_first - delete the first entry of lock-less list
41 * @head: the head for your lock-less list
[all …]
Dpercpu_counter.c1 // SPDX-License-Identifier: GPL-2.0
65 raw_spin_lock_irqsave(&fbc->lock, flags); in percpu_counter_set()
67 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); in percpu_counter_set()
70 fbc->count = amount; in percpu_counter_set()
71 raw_spin_unlock_irqrestore(&fbc->lock, flags); in percpu_counter_set()
83 * the this_cpu_add(), and the interrupt updates this_cpu(*fbc->counters),
90 * 1. the fast path uses local cmpxchg (note: no lock prefix)
98 count = this_cpu_read(*fbc->counters); in percpu_counter_add_batch()
101 raw_spin_lock_irqsave(&fbc->lock, flags); in percpu_counter_add_batch()
106 count = __this_cpu_read(*fbc->counters); in percpu_counter_add_batch()
[all …]
/linux-6.14.4/tools/testing/selftests/bpf/progs/
Drefcounted_kptr.c1 // SPDX-License-Identifier: GPL-2.0
41 private(A) struct bpf_spin_lock lock; variable
51 static bool less(struct bpf_rb_node *node_a, const struct bpf_rb_node *node_b) in less() function
59 return a->key < b->key; in less()
70 return node_a->key < node_b->key; in less_a()
75 struct bpf_spin_lock *lock) in __insert_in_tree_and_list() argument
81 return -1; in __insert_in_tree_and_list()
84 m->key = 123; in __insert_in_tree_and_list()
85 m->list_data = 456; in __insert_in_tree_and_list()
87 bpf_spin_lock(lock); in __insert_in_tree_and_list()
[all …]
Drefcounted_kptr_fail.c1 // SPDX-License-Identifier: GPL-2.0
23 static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) in less() function
31 return node_a->key < node_b->key; in less()
35 __failure __msg("Unreleased reference id=4 alloc_insn={{[0-9]+}}")
45 bpf_rbtree_add(&groot, &n->node, less); in rbtree_refcounted_node_ref_escapes()
52 m->key = 2; in rbtree_refcounted_node_ref_escapes()
76 __failure __msg("Unreleased reference id=3 alloc_insn={{[0-9]+}}")
87 m->key = 2; in rbtree_refcounted_node_ref_escapes_owning_input()
90 bpf_rbtree_add(&groot, &n->node, less); in rbtree_refcounted_node_ref_escapes_owning_input()
97 __failure __msg("function calls are not allowed while holding a lock")
[all …]
Drbtree_fail.c1 // SPDX-License-Identifier: GPL-2.0
20 static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) in less() function
28 return node_a->key < node_b->key; in less()
41 bpf_rbtree_add(&groot, &n->node, less); in rbtree_api_nolock_add()
56 bpf_rbtree_add(&groot, &n->node, less); in rbtree_api_nolock_remove()
59 bpf_rbtree_remove(&groot, &n->node); in rbtree_api_nolock_remove()
72 __failure __msg("rbtree_remove node input must be non-owning ref")
89 bpf_rbtree_add(&groot, &n->node, less); in rbtree_api_remove_unadded_node()
92 res = bpf_rbtree_remove(&groot, &n->node); in rbtree_api_remove_unadded_node()
96 res = bpf_rbtree_remove(&groot, &m->node); in rbtree_api_remove_unadded_node()
[all …]
Drbtree.c1 // SPDX-License-Identifier: GPL-2.0
25 long less_callback_ran = -1;
26 long removed_key = -1;
27 long first_data[2] = {-1, -1};
36 static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) in less() function
45 return node_a->key < node_b->key; in less()
48 static long __add_three(struct bpf_rb_root *root, struct bpf_spin_lock *lock) in __add_three() argument
55 n->key = 5; in __add_three()
62 m->key = 1; in __add_three()
65 bpf_rbtree_add(&groot, &n->node, less); in __add_three()
[all …]
/linux-6.14.4/drivers/md/dm-vdo/
Dmemory-alloc.c1 // SPDX-License-Identifier: GPL-2.0-only
13 #include "memory-alloc.h"
38 * @flag_ptr: Location of the allocation-allowed flag
71 * performance-critical stage for us, so a linked list should be fine.
80 spinlock_t lock; member
101 spin_lock_irqsave(&memory_stats.lock, flags); in add_kmalloc_block()
105 spin_unlock_irqrestore(&memory_stats.lock, flags); in add_kmalloc_block()
112 spin_lock_irqsave(&memory_stats.lock, flags); in remove_kmalloc_block()
113 memory_stats.kmalloc_blocks--; in remove_kmalloc_block()
114 memory_stats.kmalloc_bytes -= size; in remove_kmalloc_block()
[all …]
Drecovery-journal.h1 /* SPDX-License-Identifier: GPL-2.0-only */
13 #include "admin-state.h"
19 #include "wait-queue.h"
30 * The concurrency guarantees of this single-threaded model allow the code to omit more
31 * fine-grained locking for recovery journal structures.
33 * The journal consists of a set of on-disk blocks arranged as a circular log with monotonically
36 * half-open interval containing the active blocks. 'active' is the number of the block actively
38 * = active + 1, and head may be any value in the interval [tail - size, active].
40 * The journal also contains a set of in-memory blocks which are used to buffer up entries until
41 * they can be committed. In general the number of in-memory blocks ('tail_buffer_count') will be
[all …]
/linux-6.14.4/drivers/net/ethernet/mellanox/mlx5/core/
Dirq_affinity.c1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
10 pool->irqs_per_cpu[cpu]--; in cpu_put()
15 pool->irqs_per_cpu[cpu]++; in cpu_get()
22 int best_cpu = -1; in cpu_get_least_loaded()
27 if (!pool->irqs_per_cpu[cpu]) { in cpu_get_least_loaded()
33 if (pool->irqs_per_cpu[cpu] < pool->irqs_per_cpu[best_cpu]) in cpu_get_least_loaded()
36 if (best_cpu == -1) { in cpu_get_least_loaded()
38 mlx5_core_err(pool->dev, "NO online CPUs in req_mask (%*pbl)\n", in cpu_get_least_loaded()
42 pool->irqs_per_cpu[best_cpu]++; in cpu_get_least_loaded()
55 err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs, GFP_KERNEL); in irq_pool_request_irq()
[all …]
/linux-6.14.4/drivers/pwm/
Dpwm-microchip-core.c1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2021-2023 Microchip Corporation. All rights reserved.
8 * https://www.microsemi.com/document-portal/doc_download/1245275-corepwm-hb
11 * - If the IP block is configured without "shadow registers", all register
19 * - The IP block has no concept of a duty cycle, only rising/falling edges of
25 * If the duty cycle is 0%, and the requested period is less than the
28 * - The PWM period is set for the whole IP block not per channel. The driver
59 struct mutex lock; /* protects the shared period */ member
78 * 0-7 and the upper reg 8-15. Check if the pwm is in the upper reg in mchp_core_pwm_enable()
81 reg_offset = MCHPCOREPWM_EN(pwm->hwpwm >> 3); in mchp_core_pwm_enable()
[all …]
/linux-6.14.4/drivers/staging/media/atomisp/pci/runtime/tagger/interface/
Dia_css_tagger_common.h1 /* SPDX-License-Identifier: GPL-2.0 */
4 * Copyright (c) 2010 - 2015, Intel Corporation.
16 * Should be one less than NUM_CONTINUOUS_FRAMES in sh_css_internal.h
27 u8 lock; /* the lock on the element */ member
/linux-6.14.4/arch/x86/kernel/cpu/mce/
Dgenpool.c1 // SPDX-License-Identifier: GPL-2.0-only
15 * printk() is not safe in MCE context. This is a lock-less memory allocator
16 * used to save error information organized in a lock-less list.
37 err1 = &t->err; in is_duplicate_mce_record()
39 llist_for_each_entry(node, &l->llnode, llnode) { in is_duplicate_mce_record()
40 err2 = &node->err; in is_duplicate_mce_record()
42 if (!mce_cmp(&err1->m, &err2->m)) in is_duplicate_mce_record()
49 * The system has panicked - we'd like to peruse the list of MCE records
68 llist_add(&node->llnode, &new_head); in mce_gen_pool_prepare_records()
86 mce = &node->err.m; in mce_gen_pool_process()
[all …]
/linux-6.14.4/arch/mips/include/asm/octeon/
Dcvmx-bootmem.h7 * Copyright (c) 2003-2008 Cavium Networks
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
100 uint32_t lock; member
126 uint32_t lock;
151 * address. This is an allocate-only algorithm, so
158 * @alignment: Alignment required - must be power of 2
181 * @alignment: Alignment required - must be power of 2
182 * @name: name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
201 * @name: name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
[all …]
/linux-6.14.4/Documentation/mm/
Dsplit_page_table_lock.rst2 Split page table lock
5 Originally, mm->page_table_lock spinlock protected all page tables of the
7 multi-threaded applications due high contention on the lock. To improve
8 scalability, split page table lock was introduced.
10 With split page table lock we have separate per-table lock to serialize
11 access to the table. At the moment we use split lock for PTE and PMD
12 tables. Access to higher level tables protected by mm->page_table_lock.
14 There are helpers to lock/unlock a table and other accessor functions:
16 - pte_offset_map_lock()
17 maps PTE and takes PTE table lock, returns pointer to PTE with
[all …]
/linux-6.14.4/Documentation/bpf/
Dgraph_ds_impl.rst5 This document describes implementation details of new-style "graph" data
22 ------------
26 with the map API (HASH, ARRAY), others less so. Consequently, programs
31 no longer relevant. With the introduction of kfuncs, kptrs, and the any-context
35 Two such data structures - linked_list and rbtree - have many verification
44 ------------
47 helper functions - either standard map API helpers like ``bpf_map_update_elem``
48 or map-specific helpers. The new-style graph data structures instead use kfuncs
57 -------
59 The new-style data structures are intrusive and are defined similarly to their
[all …]
/linux-6.14.4/drivers/clk/samsung/
Dclk-cpu.c1 // SPDX-License-Identifier: GPL-2.0-only
37 #include <linux/clk-provider.h>
40 #include "clk-cpu.h"
48 * struct exynos_cpuclk_regs - Register offsets for CPU related clocks
71 * struct exynos_cpuclk_chip - Chip specific data for CPU clock
83 * struct exynos_cpuclk - information about clock supplied to a CPU core
88 * @lock: cpu clock domain register access lock
94 * @chip: chip-specific data for the CPU clock
103 spinlock_t *lock; member
111 /* ---- Common code --------------------------------------------------------- */
[all …]
/linux-6.14.4/kernel/locking/
Dww_mutex.h1 /* SPDX-License-Identifier: GPL-2.0-only */
9 __ww_waiter_first(struct mutex *lock) in __ww_waiter_first() argument
13 w = list_first_entry(&lock->wait_list, struct mutex_waiter, list); in __ww_waiter_first()
14 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_first()
21 __ww_waiter_next(struct mutex *lock, struct mutex_waiter *w) in __ww_waiter_next() argument
24 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_next()
31 __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w) in __ww_waiter_prev() argument
34 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_prev()
41 __ww_waiter_last(struct mutex *lock) in __ww_waiter_last() argument
45 w = list_last_entry(&lock->wait_list, struct mutex_waiter, list); in __ww_waiter_last()
[all …]
/linux-6.14.4/arch/powerpc/kvm/
Dbook3s_xics.c1 // SPDX-License-Identifier: GPL-2.0-only
39 * Each ICS has a spin lock protecting the information about the IRQ
50 * - To speed up resends, keep a bitmap of "resend" set bits in the
53 * - Speed up server# -> ICP lookup (array ? hash table ?)
55 * - Make ICS lockless as well, or at least a per-interrupt lock or hashed
59 /* -- ICS routines -- */
81 return -EINVAL; in ics_deliver_irq()
83 state = &ics->irq_state[src]; in ics_deliver_irq()
84 if (!state->exists) in ics_deliver_irq()
85 return -EINVAL; in ics_deliver_irq()
[all …]
/linux-6.14.4/drivers/gpu/drm/i915/gt/
Dintel_rps_types.h1 /* SPDX-License-Identifier: MIT */
41 * struct intel_rps_freq_caps - rps freq capabilities
42 * @rp0_freq: non-overclocked max frequency
43 * @rp1_freq: "less than" RP0 power/freqency
56 struct mutex lock; /* protects enabling and the worker */ member
60 * i915->irq_lock
92 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
93 u8 rp1_freq; /* "less than" RP0 power/freqency */
94 u8 rp0_freq; /* Non-overclocked max frequency. */
/linux-6.14.4/drivers/usb/gadget/function/
Df_mass_storage.h1 /* SPDX-License-Identifier: GPL-2.0 */
36 "true to force read-only"); \
40 "true to simulate CD-ROM instead of disk"); \
83 struct mutex lock; member
105 const char *vendor_name; /* 8 characters or less */
106 const char *product_name; /* 16 characters or less */
/linux-6.14.4/Documentation/admin-guide/mm/
Dmultigen_lru.rst1 .. SPDX-License-Identifier: GPL-2.0
4 Multi-Gen LRU
6 The multi-gen LRU is an alternative LRU implementation that optimizes
26 -----------
38 0x0001 The main switch for the multi-gen LRU.
41 theoretically worsen lock contention (mmap_lock). If it is
42 disabled, the multi-gen LRU will suffer a minor performance
46 0x0004 Clearing the accessed bit in non-leaf page table entries as
49 disabled, the multi-gen LRU will suffer a negligible
65 --------------------
[all …]

12345678910>>...41