summaryrefslogtreecommitdiffstats
path: root/lib/rhashtable.c
diff options
context:
space:
mode:
authorJason A. Donenfeld2017-06-08 04:47:13 +0200
committerTheodore Ts'o2017-06-20 04:06:28 +0200
commitd48ad080ec0101c2cca92926bed64993ab565c3d (patch)
tree85a3211479e495739ba9a2763cf2a8095e70f87f /lib/rhashtable.c
parentceph: ensure RNG is seeded before using (diff)
downloadkernel-qcow2-linux-d48ad080ec0101c2cca92926bed64993ab565c3d.tar.gz
kernel-qcow2-linux-d48ad080ec0101c2cca92926bed64993ab565c3d.tar.xz
kernel-qcow2-linux-d48ad080ec0101c2cca92926bed64993ab565c3d.zip
rhashtable: use get_random_u32 for hash_rnd
This is much faster and just as secure. It also has the added benefit of probably returning better randomness at early-boot on systems with architectural RNGs. Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com> Cc: Thomas Graf <tgraf@suug.ch> Cc: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r--lib/rhashtable.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index d9e7274a04cd..a1eb7c947f46 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -235,7 +235,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
INIT_LIST_HEAD(&tbl->walkers);
- get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
+ tbl->hash_rnd = get_random_u32();
for (i = 0; i < nbuckets; i++)
INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);