Lines Matching +full:first +full:- +full:generation
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2022-2024 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
18 #define PAGE_MASK (~(PAGE_SIZE - 1))
26 len -= sizeof(type); \
42 * __cvdso_getrandom_data - Generic vDSO implementation of getrandom() syscall.
81 params->size_of_opaque_state = sizeof(*state); in __cvdso_getrandom_data()
82 params->mmap_prot = PROT_READ | PROT_WRITE; in __cvdso_getrandom_data()
83 params->mmap_flags = MAP_DROPPABLE | MAP_ANONYMOUS; in __cvdso_getrandom_data()
84 for (size_t i = 0; i < ARRAY_SIZE(params->reserved); ++i) in __cvdso_getrandom_data()
85 params->reserved[i] = 0; in __cvdso_getrandom_data()
91 return -EFAULT; in __cvdso_getrandom_data()
107 if (unlikely(!READ_ONCE(rng_info->is_ready))) in __cvdso_getrandom_data()
111 * This condition is checked after @rng_info->is_ready, because before the kernel's RNG is in __cvdso_getrandom_data()
119 * @state->in_use is basic reentrancy protection against this running in a signal handler in __cvdso_getrandom_data()
121 * level of reentrancy. If a signal interrupts this after reading @state->in_use, but before in __cvdso_getrandom_data()
122 * writing @state->in_use, there is still no race, because the signal handler will run to in __cvdso_getrandom_data()
125 in_use = READ_ONCE(state->in_use); in __cvdso_getrandom_data()
129 WRITE_ONCE(state->in_use, true); in __cvdso_getrandom_data()
133 * @rng_info->generation must always be read here, as it serializes @state->key with the in __cvdso_getrandom_data()
136 current_generation = READ_ONCE(rng_info->generation); in __cvdso_getrandom_data()
139 * If @state->generation doesn't match the kernel RNG's generation, then it means the in __cvdso_getrandom_data()
140 * kernel's RNG has reseeded, and so @state->key is reseeded as well. in __cvdso_getrandom_data()
142 if (unlikely(state->generation != current_generation)) { in __cvdso_getrandom_data()
144 * Write the generation before filling the key, in case of fork. If there is a fork in __cvdso_getrandom_data()
148 * generation counter, so the fork would not be detected. Therefore, write in __cvdso_getrandom_data()
149 * @state->generation before the call to the getrandom syscall. in __cvdso_getrandom_data()
151 WRITE_ONCE(state->generation, current_generation); in __cvdso_getrandom_data()
155 * smp_store_release(&_vdso_rng_data.generation) in random.c. in __cvdso_getrandom_data()
159 /* Reseed @state->key using fresh bytes from the kernel. */ in __cvdso_getrandom_data()
160 if (getrandom_syscall(state->key, sizeof(state->key), 0) != sizeof(state->key)) { in __cvdso_getrandom_data()
162 * If the syscall failed to refresh the key, then @state->key is now in __cvdso_getrandom_data()
163 * invalid, so invalidate the generation so that it is not used again, and in __cvdso_getrandom_data()
166 WRITE_ONCE(state->generation, 0); in __cvdso_getrandom_data()
169 * Set @state->in_use to false only after the last write to @state in the in __cvdso_getrandom_data()
172 WRITE_ONCE(state->in_use, false); in __cvdso_getrandom_data()
178 * Set @state->pos to beyond the end of the batch, so that the batch is refilled in __cvdso_getrandom_data()
181 state->pos = sizeof(state->batch); in __cvdso_getrandom_data()
188 * First use bytes out of @state->batch, which may have been filled by the last call to this in __cvdso_getrandom_data()
191 batch_len = min_t(size_t, sizeof(state->batch) - state->pos, len); in __cvdso_getrandom_data()
194 memcpy_and_zero_src(buffer, state->batch + state->pos, batch_len); in __cvdso_getrandom_data()
195 state->pos += batch_len; in __cvdso_getrandom_data()
197 len -= batch_len; in __cvdso_getrandom_data()
201 /* Prevent the loop from being reordered wrt ->generation. */ in __cvdso_getrandom_data()
205 * Since @rng_info->generation will never be 0, re-read @state->generation, rather in __cvdso_getrandom_data()
211 if (unlikely(READ_ONCE(state->generation) != READ_ONCE(rng_info->generation))) { in __cvdso_getrandom_data()
214 * user force-reseeding the kernel's RNG using the ioctl. in __cvdso_getrandom_data()
217 WRITE_ONCE(state->in_use, false); in __cvdso_getrandom_data()
227 * Set @state->in_use to false only when there will be no more reads or writes of in __cvdso_getrandom_data()
230 WRITE_ONCE(state->in_use, false); in __cvdso_getrandom_data()
237 __arch_chacha20_blocks_nostack(buffer, state->key, counter, nblocks); in __cvdso_getrandom_data()
239 len -= nblocks * CHACHA_BLOCK_SIZE; in __cvdso_getrandom_data()
242 BUILD_BUG_ON(sizeof(state->batch_key) % CHACHA_BLOCK_SIZE != 0); in __cvdso_getrandom_data()
245 __arch_chacha20_blocks_nostack(state->batch_key, state->key, counter, in __cvdso_getrandom_data()
246 sizeof(state->batch_key) / CHACHA_BLOCK_SIZE); in __cvdso_getrandom_data()
249 state->pos = 0; in __cvdso_getrandom_data()