forked from smuellerDD/lrng
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlrng_interface_aux.c
210 lines (181 loc) · 6.07 KB
/
lrng_interface_aux.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
* LRNG auxiliary interfaces
*
* Copyright (C) 2022 Stephan Mueller <[email protected]>
* Copyright (C) 2017 Jason A. Donenfeld <[email protected]>. All
* Rights Reserved.
* Copyright (C) 2016 Jason Cooper <[email protected]>
*/
#include <linux/lrng.h>
#include <linux/mm.h>
#include <linux/random.h>
#include "lrng_es_mgr.h"
#include "lrng_interface_random_kernel.h"
/*
* Fill a buffer with random numbers and tokenize it to provide random numbers
* to callers in fixed chunks. This approach is provided to be consistent with
* the Linux kernel interface requirements. Yet, this approach violate the
* backtracking resistance of the random number generator. Thus, the provided
* random numbers are not considered to be as strong as those requested directly
* from the LRNG.
*/
struct batched_entropy {
union {
u64 entropy_u64[LRNG_DRNG_BLOCKSIZE / sizeof(u64)];
u32 entropy_u32[LRNG_DRNG_BLOCKSIZE / sizeof(u32)];
u16 entropy_u16[LRNG_DRNG_BLOCKSIZE / sizeof(u16)];
u8 entropy_u8[LRNG_DRNG_BLOCKSIZE / sizeof(u8)];
};
unsigned int position;
spinlock_t batch_lock;
};
/*
* Get a random word for internal kernel use only. The quality of the random
* number is as good as /dev/urandom, but there is no backtrack protection,
* with the goal of being quite fast and not depleting entropy.
*/
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
.batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
};
u64 get_random_u64(void)
{
u64 ret;
unsigned long flags;
struct batched_entropy *batch;
lrng_debug_report_seedlevel("get_random_u64");
batch = raw_cpu_ptr(&batched_entropy_u64);
spin_lock_irqsave(&batch->batch_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
lrng_get_random_bytes(batch->entropy_u64, LRNG_DRNG_BLOCKSIZE);
batch->position = 0;
}
ret = batch->entropy_u64[batch->position++];
spin_unlock_irqrestore(&batch->batch_lock, flags);
return ret;
}
EXPORT_SYMBOL(get_random_u64);
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
.batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
};
u32 get_random_u32(void)
{
u32 ret;
unsigned long flags;
struct batched_entropy *batch;
lrng_debug_report_seedlevel("get_random_u32");
batch = raw_cpu_ptr(&batched_entropy_u32);
spin_lock_irqsave(&batch->batch_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
lrng_get_random_bytes(batch->entropy_u32, LRNG_DRNG_BLOCKSIZE);
batch->position = 0;
}
ret = batch->entropy_u32[batch->position++];
spin_unlock_irqrestore(&batch->batch_lock, flags);
return ret;
}
EXPORT_SYMBOL(get_random_u32);
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u16) = {
.batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u16.lock),
};
u16 get_random_u16(void)
{
u16 ret;
unsigned long flags;
struct batched_entropy *batch;
lrng_debug_report_seedlevel("get_random_u16");
batch = raw_cpu_ptr(&batched_entropy_u16);
spin_lock_irqsave(&batch->batch_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u16) == 0) {
lrng_get_random_bytes(batch->entropy_u16, LRNG_DRNG_BLOCKSIZE);
batch->position = 0;
}
ret = batch->entropy_u16[batch->position++];
spin_unlock_irqrestore(&batch->batch_lock, flags);
return ret;
}
EXPORT_SYMBOL(get_random_u16);
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u8) = {
.batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u8.lock),
};
u8 get_random_u8(void)
{
u8 ret;
unsigned long flags;
struct batched_entropy *batch;
lrng_debug_report_seedlevel("get_random_u8");
batch = raw_cpu_ptr(&batched_entropy_u8);
spin_lock_irqsave(&batch->batch_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u8) == 0) {
lrng_get_random_bytes(batch->entropy_u8, LRNG_DRNG_BLOCKSIZE);
batch->position = 0;
}
ret = batch->entropy_u8[batch->position++];
spin_unlock_irqrestore(&batch->batch_lock, flags);
return ret;
}
EXPORT_SYMBOL(get_random_u8);
/* Taken directly from random.c */
u32 __get_random_u32_below(u32 ceil)
{
u64 mult = (u64)ceil * get_random_u32();
if (unlikely((u32)mult < ceil)) {
u32 bound = -ceil % ceil;
while (unlikely((u32)mult < bound))
mult = (u64)ceil * get_random_u32();
}
return mult >> 32;
}
EXPORT_SYMBOL(__get_random_u32_below);
#ifdef CONFIG_SMP
/*
* This function is called when the CPU is coming up, with entry
* CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
*/
int random_prepare_cpu(unsigned int cpu)
{
/*
* When the cpu comes back online, immediately invalidate all batches,
* so that we serve fresh randomness.
*/
per_cpu_ptr(&batched_entropy_u8, cpu)->position = 0;
per_cpu_ptr(&batched_entropy_u16, cpu)->position = 0;
per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
return 0;
}
int random_online_cpu(unsigned int cpu)
{
return 0;
}
#endif
/*
* It's important to invalidate all potential batched entropy that might
* be stored before the crng is initialized, which we can do lazily by
* simply resetting the counter to zero so that it's re-extracted on the
* next usage.
*/
void invalidate_batched_entropy(void)
{
int cpu;
unsigned long flags;
for_each_possible_cpu(cpu) {
struct batched_entropy *batched_entropy;
batched_entropy = per_cpu_ptr(&batched_entropy_u8, cpu);
spin_lock_irqsave(&batched_entropy->batch_lock, flags);
batched_entropy->position = 0;
spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
batched_entropy = per_cpu_ptr(&batched_entropy_u16, cpu);
spin_lock_irqsave(&batched_entropy->batch_lock, flags);
batched_entropy->position = 0;
spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
spin_lock_irqsave(&batched_entropy->batch_lock, flags);
batched_entropy->position = 0;
spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
spin_lock(&batched_entropy->batch_lock);
batched_entropy->position = 0;
spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
}
}