Lines Matching +full:line +full:- +full:orders
1 // SPDX-License-Identifier: GPL-2.0-or-later
66 u64 delta = sched_clock() - seen; in recently_sleepy()
107 return (val >> _Q_TAIL_CPU_OFFSET) - 1; in decode_tail_cpu()
141 " bne- 1b \n" in trylock_clean_tail()
145 : "r" (&lock->val), "r"(tail), "r" (newval), in trylock_clean_tail()
173 " bne- 1b \n" in publish_tail_cpu()
175 : "r" (&lock->val), "r" (tail), "r"(_Q_TAIL_CPU_MASK) in publish_tail_cpu()
189 " bne- 1b \n" in set_mustq()
191 : "r" (&lock->val), "r" (_Q_MUST_Q_VAL) in set_mustq()
205 " bne- 1b \n" in clear_mustq()
207 : "r" (&lock->val), "r" (_Q_MUST_Q_VAL) in clear_mustq()
224 " bne- 2f \n" in try_set_sleepy()
226 " bne- 1b \n" in try_set_sleepy()
229 : "r" (&lock->val), "r"(old), "r" (new) in try_set_sleepy()
268 * orders the release barrier in publish_tail_cpu performed by the in get_tail_qnode()
275 struct qnode *qnode = &qnodesp->nodes[idx]; in get_tail_qnode()
276 if (qnode->lock == lock) in get_tail_qnode()
318 if (READ_ONCE(lock->val) == val) { in __yield_to_locked_owner()
363 next = READ_ONCE(node->next); in propagate_sleepy()
367 if (next->sleepy) in propagate_sleepy()
372 next->sleepy = 1; in propagate_sleepy()
391 if (node->sleepy || vcpu_is_preempted(prev_cpu)) { in yield_to_prev()
392 u32 val = READ_ONCE(lock->val); in yield_to_prev()
395 if (node->next && !node->next->sleepy) { in yield_to_prev()
399 * to become "non-sleepy" if vCPU preemption in yield_to_prev()
404 node->next->sleepy = 1; in yield_to_prev()
411 node->sleepy = false; in yield_to_prev()
429 if (!READ_ONCE(node->locked)) { in yield_to_prev()
473 val = READ_ONCE(lock->val); in try_to_steal_lock()
510 * while the owner is preempted -- we won't interfere in try_to_steal_lock()
540 if (unlikely(qnodesp->count >= MAX_NODES)) { in queued_spin_lock_mcs_queue()
547 idx = qnodesp->count++; in queued_spin_lock_mcs_queue()
549 * Ensure that we increment the head node->count before initialising in queued_spin_lock_mcs_queue()
554 node = &qnodesp->nodes[idx]; in queued_spin_lock_mcs_queue()
555 node->next = NULL; in queued_spin_lock_mcs_queue()
556 node->lock = lock; in queued_spin_lock_mcs_queue()
557 node->cpu = smp_processor_id(); in queued_spin_lock_mcs_queue()
558 node->sleepy = 0; in queued_spin_lock_mcs_queue()
559 node->locked = 0; in queued_spin_lock_mcs_queue()
561 tail = encode_tail_cpu(node->cpu); in queued_spin_lock_mcs_queue()
579 WRITE_ONCE(prev->next, node); in queued_spin_lock_mcs_queue()
583 while (!READ_ONCE(node->locked)) { in queued_spin_lock_mcs_queue()
596 * like it could cause additional line transitions because in queued_spin_lock_mcs_queue()
600 next = READ_ONCE(node->next); in queued_spin_lock_mcs_queue()
612 val = READ_ONCE(lock->val); in queued_spin_lock_mcs_queue()
669 /* There is a next, must wait for node->next != NULL (MCS protocol) */ in queued_spin_lock_mcs_queue()
670 next = READ_ONCE(node->next); in queued_spin_lock_mcs_queue()
673 while (!(next = READ_ONCE(node->next))) in queued_spin_lock_mcs_queue()
682 * the acquire barrier we took the lock with orders that update vs in queued_spin_lock_mcs_queue()
687 int next_cpu = next->cpu; in queued_spin_lock_mcs_queue()
688 WRITE_ONCE(next->locked, 1); in queued_spin_lock_mcs_queue()
694 WRITE_ONCE(next->locked, 1); in queued_spin_lock_mcs_queue()
702 * values if an interrupt occurs after we increment qnodesp->count in queued_spin_lock_mcs_queue()
703 * but before node->lock is initialized. The barrier ensures that in queued_spin_lock_mcs_queue()
706 node->lock = NULL; in queued_spin_lock_mcs_queue()
708 qnodesp->count--; in queued_spin_lock_mcs_queue()