summaryrefslogtreecommitdiff
path: root/drivers/char/random.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/random.c')
-rw-r--r--drivers/char/random.c297
1 files changed, 142 insertions, 155 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index f43c89f7c44..0cf98bd4f2d 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -272,7 +272,7 @@ static int random_write_wakeup_thresh = 128;
static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28;
-static DEFINE_PER_CPU(int, trickle_count) = 0;
+static DEFINE_PER_CPU(int, trickle_count);
/*
* A pool of size .poolwords is stirred with a primitive polynomial
@@ -370,17 +370,19 @@ static struct poolinfo {
*/
static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
+static struct fasync_struct *fasync;
#if 0
-static int debug = 0;
+static int debug;
module_param(debug, bool, 0644);
-#define DEBUG_ENT(fmt, arg...) do { if (debug) \
- printk(KERN_DEBUG "random %04d %04d %04d: " \
- fmt,\
- input_pool.entropy_count,\
- blocking_pool.entropy_count,\
- nonblocking_pool.entropy_count,\
- ## arg); } while (0)
+#define DEBUG_ENT(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG "random %04d %04d %04d: " \
+ fmt,\
+ input_pool.entropy_count,\
+ blocking_pool.entropy_count,\
+ nonblocking_pool.entropy_count,\
+ ## arg); } while (0)
#else
#define DEBUG_ENT(fmt, arg...) do {} while (0)
#endif
@@ -394,7 +396,7 @@ module_param(debug, bool, 0644);
struct entropy_store;
struct entropy_store {
- /* mostly-read data: */
+ /* read-only data: */
struct poolinfo *poolinfo;
__u32 *pool;
const char *name;
@@ -402,7 +404,7 @@ struct entropy_store {
struct entropy_store *pull;
/* read-write data: */
- spinlock_t lock ____cacheline_aligned_in_smp;
+ spinlock_t lock;
unsigned add_ptr;
int entropy_count;
int input_rotate;
@@ -438,25 +440,26 @@ static struct entropy_store nonblocking_pool = {
};
/*
- * This function adds a byte into the entropy "pool". It does not
+ * This function adds bytes into the entropy "pool". It does not
* update the entropy estimate. The caller should call
- * credit_entropy_store if this is appropriate.
+ * credit_entropy_bits if this is appropriate.
*
* The pool is stirred with a primitive polynomial of the appropriate
* degree, and then twisted. We twist by three bits at a time because
* it's cheap to do so and helps slightly in the expected case where
* the entropy is concentrated in the low-order bits.
*/
-static void __add_entropy_words(struct entropy_store *r, const __u32 *in,
- int nwords, __u32 out[16])
+static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
+ int nbytes, __u8 out[64])
{
static __u32 const twist_table[8] = {
0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
- unsigned long i, add_ptr, tap1, tap2, tap3, tap4, tap5;
- int new_rotate, input_rotate;
+ unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
+ int input_rotate;
int wordmask = r->poolinfo->poolwords - 1;
- __u32 w, next_w;
+ const char *bytes = in;
+ __u32 w;
unsigned long flags;
/* Taps are constant, so we can load them without holding r->lock. */
@@ -465,78 +468,76 @@ static void __add_entropy_words(struct entropy_store *r, const __u32 *in,
tap3 = r->poolinfo->tap3;
tap4 = r->poolinfo->tap4;
tap5 = r->poolinfo->tap5;
- next_w = *in++;
spin_lock_irqsave(&r->lock, flags);
- prefetch_range(r->pool, wordmask);
input_rotate = r->input_rotate;
- add_ptr = r->add_ptr;
+ i = r->add_ptr;
- while (nwords--) {
- w = rol32(next_w, input_rotate);
- if (nwords > 0)
- next_w = *in++;
- i = add_ptr = (add_ptr - 1) & wordmask;
- /*
- * Normally, we add 7 bits of rotation to the pool.
- * At the beginning of the pool, add an extra 7 bits
- * rotation, so that successive passes spread the
- * input bits across the pool evenly.
- */
- new_rotate = input_rotate + 14;
- if (i)
- new_rotate = input_rotate + 7;
- input_rotate = new_rotate & 31;
+ /* mix one byte at a time to simplify size handling and churn faster */
+ while (nbytes--) {
+ w = rol32(*bytes++, input_rotate & 31);
+ i = (i - 1) & wordmask;
/* XOR in the various taps */
+ w ^= r->pool[i];
w ^= r->pool[(i + tap1) & wordmask];
w ^= r->pool[(i + tap2) & wordmask];
w ^= r->pool[(i + tap3) & wordmask];
w ^= r->pool[(i + tap4) & wordmask];
w ^= r->pool[(i + tap5) & wordmask];
- w ^= r->pool[i];
+
+ /* Mix the result back in with a twist */
r->pool[i] = (w >> 3) ^ twist_table[w & 7];
+
+ /*
+ * Normally, we add 7 bits of rotation to the pool.
+ * At the beginning of the pool, add an extra 7 bits
+ * rotation, so that successive passes spread the
+ * input bits across the pool evenly.
+ */
+ input_rotate += i ? 7 : 14;
}
r->input_rotate = input_rotate;
- r->add_ptr = add_ptr;
+ r->add_ptr = i;
- if (out) {
- for (i = 0; i < 16; i++) {
- out[i] = r->pool[add_ptr];
- add_ptr = (add_ptr - 1) & wordmask;
- }
- }
+ if (out)
+ for (j = 0; j < 16; j++)
+ ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
spin_unlock_irqrestore(&r->lock, flags);
}
-static inline void add_entropy_words(struct entropy_store *r, const __u32 *in,
- int nwords)
+static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
{
- __add_entropy_words(r, in, nwords, NULL);
+ mix_pool_bytes_extract(r, in, bytes, NULL);
}
/*
* Credit (or debit) the entropy store with n bits of entropy
*/
-static void credit_entropy_store(struct entropy_store *r, int nbits)
+static void credit_entropy_bits(struct entropy_store *r, int nbits)
{
unsigned long flags;
+ if (!nbits)
+ return;
+
spin_lock_irqsave(&r->lock, flags);
- if (r->entropy_count + nbits < 0) {
- DEBUG_ENT("negative entropy/overflow (%d+%d)\n",
- r->entropy_count, nbits);
+ DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
+ r->entropy_count += nbits;
+ if (r->entropy_count < 0) {
+ DEBUG_ENT("negative entropy/overflow\n");
r->entropy_count = 0;
- } else if (r->entropy_count + nbits > r->poolinfo->POOLBITS) {
+ } else if (r->entropy_count > r->poolinfo->POOLBITS)
r->entropy_count = r->poolinfo->POOLBITS;
- } else {
- r->entropy_count += nbits;
- if (nbits)
- DEBUG_ENT("added %d entropy credits to %s\n",
- nbits, r->name);
+
+ /* should we wake readers? */
+ if (r == &input_pool &&
+ r->entropy_count >= random_read_wakeup_thresh) {
+ wake_up_interruptible(&random_read_wait);
+ kill_fasync(&fasync, SIGIO, POLL_IN);
}
spin_unlock_irqrestore(&r->lock, flags);
@@ -551,7 +552,7 @@ static void credit_entropy_store(struct entropy_store *r, int nbits)
/* There is one of these per entropy source */
struct timer_rand_state {
cycles_t last_time;
- long last_delta,last_delta2;
+ long last_delta, last_delta2;
unsigned dont_count_entropy:1;
};
@@ -586,7 +587,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
sample.jiffies = jiffies;
sample.cycles = get_cycles();
sample.num = num;
- add_entropy_words(&input_pool, (u32 *)&sample, sizeof(sample)/4);
+ mix_pool_bytes(&input_pool, &sample, sizeof(sample));
/*
* Calculate number of bits of randomness we probably added.
@@ -620,13 +621,9 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
* Round down by 1 bit on general principles,
* and limit entropy entimate to 12 bits.
*/
- credit_entropy_store(&input_pool,
- min_t(int, fls(delta>>1), 11));
+ credit_entropy_bits(&input_pool,
+ min_t(int, fls(delta>>1), 11));
}
-
- if(input_pool.entropy_count >= random_read_wakeup_thresh)
- wake_up_interruptible(&random_read_wait);
-
out:
preempt_enable();
}
@@ -677,7 +674,7 @@ void add_disk_randomness(struct gendisk *disk)
*
*********************************************************************/
-static ssize_t extract_entropy(struct entropy_store *r, void * buf,
+static ssize_t extract_entropy(struct entropy_store *r, void *buf,
size_t nbytes, int min, int rsvd);
/*
@@ -704,10 +701,10 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
"(%d of %d requested)\n",
r->name, bytes * 8, nbytes * 8, r->entropy_count);
- bytes=extract_entropy(r->pull, tmp, bytes,
- random_read_wakeup_thresh / 8, rsvd);
- add_entropy_words(r, tmp, (bytes + 3) / 4);
- credit_entropy_store(r, bytes*8);
+ bytes = extract_entropy(r->pull, tmp, bytes,
+ random_read_wakeup_thresh / 8, rsvd);
+ mix_pool_bytes(r, tmp, bytes);
+ credit_entropy_bits(r, bytes*8);
}
}
@@ -744,13 +741,15 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
if (r->limit && nbytes + reserved >= r->entropy_count / 8)
nbytes = r->entropy_count/8 - reserved;
- if(r->entropy_count / 8 >= nbytes + reserved)
+ if (r->entropy_count / 8 >= nbytes + reserved)
r->entropy_count -= nbytes*8;
else
r->entropy_count = reserved;
- if (r->entropy_count < random_write_wakeup_thresh)
+ if (r->entropy_count < random_write_wakeup_thresh) {
wake_up_interruptible(&random_write_wait);
+ kill_fasync(&fasync, SIGIO, POLL_OUT);
+ }
}
DEBUG_ENT("debiting %d entropy credits from %s%s\n",
@@ -764,45 +763,46 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
static void extract_buf(struct entropy_store *r, __u8 *out)
{
int i;
- __u32 data[16], buf[5 + SHA_WORKSPACE_WORDS];
+ __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
+ __u8 extract[64];
+
+ /* Generate a hash across the pool, 16 words (512 bits) at a time */
+ sha_init(hash);
+ for (i = 0; i < r->poolinfo->poolwords; i += 16)
+ sha_transform(hash, (__u8 *)(r->pool + i), workspace);
- sha_init(buf);
/*
- * As we hash the pool, we mix intermediate values of
- * the hash back into the pool. This eliminates
- * backtracking attacks (where the attacker knows
- * the state of the pool plus the current outputs, and
- * attempts to find previous ouputs), unless the hash
- * function can be inverted.
+ * We mix the hash back into the pool to prevent backtracking
+ * attacks (where the attacker knows the state of the pool
+ * plus the current outputs, and attempts to find previous
+ * ouputs), unless the hash function can be inverted. By
+ * mixing at least a SHA1 worth of hash data back, we make
+ * brute-forcing the feedback as hard as brute-forcing the
+ * hash.
*/
- for (i = 0; i < r->poolinfo->poolwords; i += 16) {
- /* hash blocks of 16 words = 512 bits */
- sha_transform(buf, (__u8 *)(r->pool + i), buf + 5);
- /* feed back portion of the resulting hash */
- add_entropy_words(r, &buf[i % 5], 1);
- }
+ mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
/*
- * To avoid duplicates, we atomically extract a
- * portion of the pool while mixing, and hash one
- * final time.
+ * To avoid duplicates, we atomically extract a portion of the
+ * pool while mixing, and hash one final time.
*/
- __add_entropy_words(r, &buf[i % 5], 1, data);
- sha_transform(buf, (__u8 *)data, buf + 5);
+ sha_transform(hash, extract, workspace);
+ memset(extract, 0, sizeof(extract));
+ memset(workspace, 0, sizeof(workspace));
/*
- * In case the hash function has some recognizable
- * output pattern, we fold it in half.
+ * In case the hash function has some recognizable output
+ * pattern, we fold it in half. Thus, we always feed back
+ * twice as much data as we output.
*/
-
- buf[0] ^= buf[3];
- buf[1] ^= buf[4];
- buf[2] ^= rol32(buf[2], 16);
- memcpy(out, buf, EXTRACT_SIZE);
- memset(buf, 0, sizeof(buf));
+ hash[0] ^= hash[3];
+ hash[1] ^= hash[4];
+ hash[2] ^= rol32(hash[2], 16);
+ memcpy(out, hash, EXTRACT_SIZE);
+ memset(hash, 0, sizeof(hash));
}
-static ssize_t extract_entropy(struct entropy_store *r, void * buf,
+static ssize_t extract_entropy(struct entropy_store *r, void *buf,
size_t nbytes, int min, int reserved)
{
ssize_t ret = 0, i;
@@ -872,7 +872,6 @@ void get_random_bytes(void *buf, int nbytes)
{
extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
}
-
EXPORT_SYMBOL(get_random_bytes);
/*
@@ -894,12 +893,11 @@ static void init_std_data(struct entropy_store *r)
spin_unlock_irqrestore(&r->lock, flags);
now = ktime_get_real();
- add_entropy_words(r, (__u32 *)&now, sizeof(now)/4);
- add_entropy_words(r, (__u32 *)utsname(),
- sizeof(*(utsname()))/4);
+ mix_pool_bytes(r, &now, sizeof(now));
+ mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
}
-static int __init rand_initialize(void)
+static int rand_initialize(void)
{
init_std_data(&input_pool);
init_std_data(&blocking_pool);
@@ -940,7 +938,7 @@ void rand_initialize_disk(struct gendisk *disk)
#endif
static ssize_t
-random_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos)
+random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
ssize_t n, retval = 0, count = 0;
@@ -1002,8 +1000,7 @@ random_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos)
}
static ssize_t
-urandom_read(struct file * file, char __user * buf,
- size_t nbytes, loff_t *ppos)
+urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
return extract_entropy_user(&nonblocking_pool, buf, nbytes);
}
@@ -1038,16 +1035,15 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
count -= bytes;
p += bytes;
- add_entropy_words(r, buf, (bytes + 3) / 4);
+ mix_pool_bytes(r, buf, bytes);
cond_resched();
}
return 0;
}
-static ssize_t
-random_write(struct file * file, const char __user * buffer,
- size_t count, loff_t *ppos)
+static ssize_t random_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
{
size_t ret;
struct inode *inode = file->f_path.dentry->d_inode;
@@ -1064,9 +1060,7 @@ random_write(struct file * file, const char __user * buffer,
return (ssize_t)count;
}
-static int
-random_ioctl(struct inode * inode, struct file * file,
- unsigned int cmd, unsigned long arg)
+static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
{
int size, ent_count;
int __user *p = (int __user *)arg;
@@ -1074,8 +1068,8 @@ random_ioctl(struct inode * inode, struct file * file,
switch (cmd) {
case RNDGETENTCNT:
- ent_count = input_pool.entropy_count;
- if (put_user(ent_count, p))
+ /* inherently racy, no point locking */
+ if (put_user(input_pool.entropy_count, p))
return -EFAULT;
return 0;
case RNDADDTOENTCNT:
@@ -1083,13 +1077,7 @@ random_ioctl(struct inode * inode, struct file * file,
return -EPERM;
if (get_user(ent_count, p))
return -EFAULT;
- credit_entropy_store(&input_pool, ent_count);
- /*
- * Wake up waiting processes if we have enough
- * entropy.
- */
- if (input_pool.entropy_count >= random_read_wakeup_thresh)
- wake_up_interruptible(&random_read_wait);
+ credit_entropy_bits(&input_pool, ent_count);
return 0;
case RNDADDENTROPY:
if (!capable(CAP_SYS_ADMIN))
@@ -1104,39 +1092,45 @@ random_ioctl(struct inode * inode, struct file * file,
size);
if (retval < 0)
return retval;
- credit_entropy_store(&input_pool, ent_count);
- /*
- * Wake up waiting processes if we have enough
- * entropy.
- */
- if (input_pool.entropy_count >= random_read_wakeup_thresh)
- wake_up_interruptible(&random_read_wait);
+ credit_entropy_bits(&input_pool, ent_count);
return 0;
case RNDZAPENTCNT:
case RNDCLEARPOOL:
/* Clear the entropy pool counters. */
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- init_std_data(&input_pool);
- init_std_data(&blocking_pool);
- init_std_data(&nonblocking_pool);
+ rand_initialize();
return 0;
default:
return -EINVAL;
}
}
+static int random_fasync(int fd, struct file *filp, int on)
+{
+ return fasync_helper(fd, filp, on, &fasync);
+}
+
+static int random_release(struct inode *inode, struct file *filp)
+{
+ return fasync_helper(-1, filp, 0, &fasync);
+}
+
const struct file_operations random_fops = {
.read = random_read,
.write = random_write,
.poll = random_poll,
- .ioctl = random_ioctl,
+ .unlocked_ioctl = random_ioctl,
+ .fasync = random_fasync,
+ .release = random_release,
};
const struct file_operations urandom_fops = {
.read = urandom_read,
.write = random_write,
- .ioctl = random_ioctl,
+ .unlocked_ioctl = random_ioctl,
+ .fasync = random_fasync,
+ .release = random_release,
};
/***************************************************************
@@ -1157,7 +1151,6 @@ void generate_random_uuid(unsigned char uuid_out[16])
/* Set the UUID variant to DCE */
uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80;
}
-
EXPORT_SYMBOL(generate_random_uuid);
/********************************************************************
@@ -1339,7 +1332,7 @@ ctl_table random_table[] = {
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static __u32 twothirdsMD4Transform (__u32 const buf[4], __u32 const in[12])
+static __u32 twothirdsMD4Transform(__u32 const buf[4], __u32 const in[12])
{
__u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
@@ -1487,8 +1480,8 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
*/
memcpy(hash, saddr, 16);
- hash[4]=((__force u16)sport << 16) + (__force u16)dport;
- memcpy(&hash[5],keyptr->secret,sizeof(__u32) * 7);
+ hash[4] = ((__force u16)sport << 16) + (__force u16)dport;
+ memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
seq += keyptr->count;
@@ -1538,10 +1531,10 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
* Note that the words are placed into the starting vector, which is
* then mixed with a partial MD4 over random data.
*/
- hash[0]=(__force u32)saddr;
- hash[1]=(__force u32)daddr;
- hash[2]=((__force u16)sport << 16) + (__force u16)dport;
- hash[3]=keyptr->secret[11];
+ hash[0] = (__force u32)saddr;
+ hash[1] = (__force u32)daddr;
+ hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
+ hash[3] = keyptr->secret[11];
seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK;
seq += keyptr->count;
@@ -1556,10 +1549,7 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
* Choosing a clock of 64 ns period is OK. (period of 274 s)
*/
seq += ktime_to_ns(ktime_get_real()) >> 6;
-#if 0
- printk("init_seq(%lx, %lx, %d, %d) = %d\n",
- saddr, daddr, sport, dport, seq);
-#endif
+
return seq;
}
@@ -1582,14 +1572,15 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 dport)
+u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+ __be16 dport)
{
struct keydata *keyptr = get_keyptr();
u32 hash[12];
memcpy(hash, saddr, 16);
hash[4] = (__force u32)dport;
- memcpy(&hash[5],keyptr->secret,sizeof(__u32) * 7);
+ memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
return twothirdsMD4Transform((const __u32 *)daddr, hash);
}
@@ -1617,13 +1608,9 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
seq += ktime_to_ns(ktime_get_real());
seq &= (1ull << 48) - 1;
-#if 0
- printk("dccp init_seq(%lx, %lx, %d, %d) = %d\n",
- saddr, daddr, sport, dport, seq);
-#endif
+
return seq;
}
-
EXPORT_SYMBOL(secure_dccp_sequence_number);
#endif