summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2011-11-13 20:55:35 +0100
committerJiri Kosina <jkosina@suse.cz>2011-11-13 20:55:53 +0100
commit2290c0d06d82faee87b1ab2d9d4f7bf81ef64379 (patch)
treee075e4d5534193f28e6059904f61e5ca03958d3c /lib
parent4da669a2e3e5bc70b30a0465f3641528681b5f77 (diff)
parent52e4c2a05256cb83cda12f3c2137ab1533344edb (diff)
Merge branch 'master' into for-next
Sync with Linus tree to have 157550ff ("mtd: add GPMI-NAND driver in the config and Makefile") as I have patch depending on that one.
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug16
-rw-r--r--lib/Makefile4
-rw-r--r--lib/atomic64.c66
-rw-r--r--lib/bitmap.c10
-rw-r--r--lib/dma-debug.c69
-rw-r--r--lib/dynamic_debug.c173
-rw-r--r--lib/fault-inject.c5
-rw-r--r--lib/idr.c15
-rw-r--r--lib/kobject_uevent.c2
-rw-r--r--lib/kstrtox.c75
-rw-r--r--lib/kstrtox.h8
-rw-r--r--lib/llist.c74
-rw-r--r--lib/nlattr.c1
-rw-r--r--lib/percpu_counter.c20
-rw-r--r--lib/proportions.c12
-rw-r--r--lib/radix-tree.c10
-rw-r--r--lib/raid6/algos.c1
-rw-r--r--lib/raid6/int.uc2
-rw-r--r--lib/raid6/mktables.c1
-rw-r--r--lib/raid6/recov.c1
-rw-r--r--lib/ratelimit.c4
-rw-r--r--lib/rwsem-spinlock.c38
-rw-r--r--lib/rwsem.c14
-rw-r--r--lib/smp_processor_id.c2
-rw-r--r--lib/spinlock_debug.c19
-rw-r--r--lib/string.c57
-rw-r--r--lib/vsprintf.c47
28 files changed, 459 insertions, 290 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index abff69d4ff8..d971366b8de 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -275,7 +275,4 @@ config CORDIC
This option provides an implementation of the CORDIC algorithm;
calculations are in fixed point. Module will be called cordic.
-config LLIST
- bool
-
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 75330bd8756..82928f5ea04 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -248,8 +248,9 @@ config DEFAULT_HUNG_TASK_TIMEOUT
to determine when a task has become non-responsive and should
be considered hung.
- It can be adjusted at runtime via the kernel.hung_task_timeout
- sysctl or by writing a value to /proc/sys/kernel/hung_task_timeout.
+ It can be adjusted at runtime via the kernel.hung_task_timeout_secs
+ sysctl or by writing a value to
+ /proc/sys/kernel/hung_task_timeout_secs.
A timeout of 0 disables the check. The default is two minutes.
Keeping the default should be fine in most cases.
@@ -1070,6 +1071,17 @@ config FAIL_IO_TIMEOUT
Only works with drivers that use the generic timeout handling,
for others it wont do anything.
+config FAIL_MMC_REQUEST
+ bool "Fault-injection capability for MMC IO"
+ select DEBUG_FS
+ depends on FAULT_INJECTION && MMC
+ help
+ Provide fault-injection capability for MMC IO.
+ This will make the mmc core return data errors. This is
+ useful to test the error handling in the mmc block device
+ and to test how the mmc host driver handles retries from
+ the block device.
+
config FAULT_INJECTION_DEBUG_FS
bool "Debugfs entries for fault-injection capabilities"
depends on FAULT_INJECTION && SYSFS && DEBUG_FS
diff --git a/lib/Makefile b/lib/Makefile
index 3f5bc6d903e..a4da283f5dc 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -22,7 +22,7 @@ lib-y += kobject.o kref.o klist.o
obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \
- bsearch.o find_last_bit.o find_next_bit.o
+ bsearch.o find_last_bit.o find_next_bit.o llist.o
obj-y += kstrtox.o
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
@@ -115,8 +115,6 @@ obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
obj-$(CONFIG_CORDIC) += cordic.o
-obj-$(CONFIG_LLIST) += llist.o
-
hostprogs-y := gen_crc32table
clean-files := crc32table.h
diff --git a/lib/atomic64.c b/lib/atomic64.c
index e12ae0dd08a..3975470caf4 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -29,11 +29,11 @@
* Ensure each lock is in a separate cacheline.
*/
static union {
- spinlock_t lock;
+ raw_spinlock_t lock;
char pad[L1_CACHE_BYTES];
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
-static inline spinlock_t *lock_addr(const atomic64_t *v)
+static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
{
unsigned long addr = (unsigned long) v;
@@ -45,12 +45,12 @@ static inline spinlock_t *lock_addr(const atomic64_t *v)
long long atomic64_read(const atomic64_t *v)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
long long val;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = v->counter;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_read);
@@ -58,34 +58,34 @@ EXPORT_SYMBOL(atomic64_read);
void atomic64_set(atomic64_t *v, long long i)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
v->counter = i;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(atomic64_set);
void atomic64_add(long long a, atomic64_t *v)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
v->counter += a;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(atomic64_add);
long long atomic64_add_return(long long a, atomic64_t *v)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
long long val;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = v->counter += a;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_add_return);
@@ -93,23 +93,23 @@ EXPORT_SYMBOL(atomic64_add_return);
void atomic64_sub(long long a, atomic64_t *v)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
v->counter -= a;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(atomic64_sub);
long long atomic64_sub_return(long long a, atomic64_t *v)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
long long val;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = v->counter -= a;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_sub_return);
@@ -117,14 +117,14 @@ EXPORT_SYMBOL(atomic64_sub_return);
long long atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
long long val;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = v->counter - 1;
if (val >= 0)
v->counter = val;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_dec_if_positive);
@@ -132,14 +132,14 @@ EXPORT_SYMBOL(atomic64_dec_if_positive);
long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
long long val;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = v->counter;
if (val == o)
v->counter = n;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_cmpxchg);
@@ -147,13 +147,13 @@ EXPORT_SYMBOL(atomic64_cmpxchg);
long long atomic64_xchg(atomic64_t *v, long long new)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
long long val;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = v->counter;
v->counter = new;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_xchg);
@@ -161,15 +161,15 @@ EXPORT_SYMBOL(atomic64_xchg);
int atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
int ret = 0;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
if (v->counter != u) {
v->counter += a;
ret = 1;
}
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return ret;
}
EXPORT_SYMBOL(atomic64_add_unless);
@@ -179,7 +179,7 @@ static int init_atomic64_lock(void)
int i;
for (i = 0; i < NR_LOCKS; ++i)
- spin_lock_init(&atomic64_lock[i].lock);
+ raw_spin_lock_init(&atomic64_lock[i].lock);
return 0;
}
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 2f4412e4d07..0d4a127dd9b 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
{
int c, old_c, totaldigits, ndigits, nchunks, nbits;
u32 chunk;
- const char __user *ubuf = buf;
+ const char __user __force *ubuf = (const char __user __force *)buf;
bitmap_zero(maskp, nmaskbits);
@@ -504,7 +504,9 @@ int bitmap_parse_user(const char __user *ubuf,
{
if (!access_ok(VERIFY_READ, ubuf, ulen))
return -EFAULT;
- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
+ return __bitmap_parse((const char __force *)ubuf,
+ ulen, 1, maskp, nmaskbits);
+
}
EXPORT_SYMBOL(bitmap_parse_user);
@@ -594,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
{
unsigned a, b;
int c, old_c, totaldigits;
- const char __user *ubuf = buf;
+ const char __user __force *ubuf = (const char __user __force *)buf;
int exp_digit, in_range;
totaldigits = c = 0;
@@ -694,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf,
{
if (!access_ok(VERIFY_READ, ubuf, ulen))
return -EFAULT;
- return __bitmap_parselist((const char *)ubuf,
+ return __bitmap_parselist((const char __force *)ubuf,
ulen, 1, maskp, nmaskbits);
}
EXPORT_SYMBOL(bitmap_parselist_user);
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index db07bfd9298..74c6c7fce74 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -24,6 +24,7 @@
#include <linux/spinlock.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
+#include <linux/export.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/sched.h>
@@ -62,6 +63,8 @@ struct dma_debug_entry {
#endif
};
+typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
+
struct hash_bucket {
struct list_head list;
spinlock_t lock;
@@ -240,18 +243,37 @@ static void put_hash_bucket(struct hash_bucket *bucket,
spin_unlock_irqrestore(&bucket->lock, __flags);
}
+static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
+{
+ return ((a->dev_addr == a->dev_addr) &&
+ (a->dev == b->dev)) ? true : false;
+}
+
+static bool containing_match(struct dma_debug_entry *a,
+ struct dma_debug_entry *b)
+{
+ if (a->dev != b->dev)
+ return false;
+
+ if ((b->dev_addr <= a->dev_addr) &&
+ ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
+ return true;
+
+ return false;
+}
+
/*
* Search a given entry in the hash bucket list
*/
-static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
- struct dma_debug_entry *ref)
+static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
+ struct dma_debug_entry *ref,
+ match_fn match)
{
struct dma_debug_entry *entry, *ret = NULL;
int matches = 0, match_lvl, last_lvl = 0;
list_for_each_entry(entry, &bucket->list, list) {
- if ((entry->dev_addr != ref->dev_addr) ||
- (entry->dev != ref->dev))
+ if (!match(ref, entry))
continue;
/*
@@ -293,6 +315,39 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
return ret;
}
+static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
+ struct dma_debug_entry *ref)
+{
+ return __hash_bucket_find(bucket, ref, exact_match);
+}
+
+static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
+ struct dma_debug_entry *ref,
+ unsigned long *flags)
+{
+
+ unsigned int max_range = dma_get_max_seg_size(ref->dev);
+ struct dma_debug_entry *entry, index = *ref;
+ unsigned int range = 0;
+
+ while (range <= max_range) {
+ entry = __hash_bucket_find(*bucket, &index, containing_match);
+
+ if (entry)
+ return entry;
+
+ /*
+ * Nothing found, go back a hash bucket
+ */
+ put_hash_bucket(*bucket, flags);
+ range += (1 << HASH_FN_SHIFT);
+ index.dev_addr -= (1 << HASH_FN_SHIFT);
+ *bucket = get_hash_bucket(&index, flags);
+ }
+
+ return NULL;
+}
+
/*
* Add an entry to a hash bucket
*/
@@ -802,7 +857,7 @@ static void check_unmap(struct dma_debug_entry *ref)
}
bucket = get_hash_bucket(ref, &flags);
- entry = hash_bucket_find(bucket, ref);
+ entry = bucket_find_exact(bucket, ref);
if (!entry) {
err_printk(ref->dev, NULL, "DMA-API: device driver tries "
@@ -902,7 +957,7 @@ static void check_sync(struct device *dev,
bucket = get_hash_bucket(ref, &flags);
- entry = hash_bucket_find(bucket, ref);
+ entry = bucket_find_contain(&bucket, ref, &flags);
if (!entry) {
err_printk(dev, NULL, "DMA-API: device driver tries "
@@ -1060,7 +1115,7 @@ static int get_nr_mapped_entries(struct device *dev,
int mapped_ents;
bucket = get_hash_bucket(ref, &flags);
- entry = hash_bucket_find(bucket, ref);
+ entry = bucket_find_exact(bucket, ref);
mapped_ents = 0;
if (entry)
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 79fc20b65e7..dcdade39e47 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -10,6 +10,8 @@
* Copyright (C) 2011 Bart Van Assche. All Rights Reserved.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -29,6 +31,8 @@
#include <linux/jump_label.h>
#include <linux/hardirq.h>
#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
extern struct _ddebug __start___verbose[];
extern struct _ddebug __stop___verbose[];
@@ -37,7 +41,6 @@ struct ddebug_table {
struct list_head link;
char *mod_name;
unsigned int num_ddebugs;
- unsigned int num_enabled;
struct _ddebug *ddebugs;
};
@@ -147,19 +150,13 @@ static void ddebug_change(const struct ddebug_query *query,
newflags = (dp->flags & mask) | flags;
if (newflags == dp->flags)
continue;
-
- if (!newflags)
- dt->num_enabled--;
- else if (!dp->flags)
- dt->num_enabled++;
dp->flags = newflags;
if (newflags)
dp->enabled = 1;
else
dp->enabled = 0;
if (verbose)
- printk(KERN_INFO
- "ddebug: changed %s:%d [%s]%s %s\n",
+ pr_info("changed %s:%d [%s]%s %s\n",
dp->filename, dp->lineno,
dt->mod_name, dp->function,
ddebug_describe_flags(dp, flagbuf,
@@ -169,7 +166,7 @@ static void ddebug_change(const struct ddebug_query *query,
mutex_unlock(&ddebug_lock);
if (!nfound && verbose)
- printk(KERN_INFO "ddebug: no matches for query\n");
+ pr_info("no matches for query\n");
}
/*
@@ -214,10 +211,10 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords)
if (verbose) {
int i;
- printk(KERN_INFO "%s: split into words:", __func__);
+ pr_info("split into words:");
for (i = 0 ; i < nwords ; i++)
- printk(" \"%s\"", words[i]);
- printk("\n");
+ pr_cont(" \"%s\"", words[i]);
+ pr_cont("\n");
}
return nwords;
@@ -329,16 +326,15 @@ static int ddebug_parse_query(char *words[], int nwords,
}
} else {
if (verbose)
- printk(KERN_ERR "%s: unknown keyword \"%s\"\n",
- __func__, words[i]);
+ pr_err("unknown keyword \"%s\"\n", words[i]);
return -EINVAL;
}
}
if (verbose)
- printk(KERN_INFO "%s: q->function=\"%s\" q->filename=\"%s\" "
- "q->module=\"%s\" q->format=\"%s\" q->lineno=%u-%u\n",
- __func__, query->function, query->filename,
+ pr_info("q->function=\"%s\" q->filename=\"%s\" "
+ "q->module=\"%s\" q->format=\"%s\" q->lineno=%u-%u\n",
+ query->function, query->filename,
query->module, query->format, query->first_lineno,
query->last_lineno);
@@ -367,7 +363,7 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
return -EINVAL;
}
if (verbose)
- printk(KERN_INFO "%s: op='%c'\n", __func__, op);
+ pr_info("op='%c'\n", op);
for ( ; *str ; ++str) {
for (i = ARRAY_SIZE(opt_array) - 1; i >= 0; i--) {
@@ -382,7 +378,7 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
if (flags == 0)
return -EINVAL;
if (verbose)
- printk(KERN_INFO "%s: flags=0x%x\n", __func__, flags);
+ pr_info("flags=0x%x\n", flags);
/* calculate final *flagsp, *maskp according to mask and op */
switch (op) {
@@ -400,8 +396,7 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
break;
}
if (verbose)
- printk(KERN_INFO "%s: *flagsp=0x%x *maskp=0x%x\n",
- __func__, *flagsp, *maskp);
+ pr_info("*flagsp=0x%x *maskp=0x%x\n", *flagsp, *maskp);
return 0;
}
@@ -426,40 +421,117 @@ static int ddebug_exec_query(char *query_string)
return 0;
}
+#define PREFIX_SIZE 64
+
+static int remaining(int wrote)
+{
+ if (PREFIX_SIZE - wrote > 0)
+ return PREFIX_SIZE - wrote;
+ return 0;
+}
+
+static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
+{
+ int pos_after_tid;
+ int pos = 0;
+
+ pos += snprintf(buf + pos, remaining(pos), "%s", KERN_DEBUG);
+ if (desc->flags & _DPRINTK_FLAGS_INCL_TID) {
+ if (in_interrupt())
+ pos += snprintf(buf + pos, remaining(pos), "%s ",
+ "<intr>");
+ else
+ pos += snprintf(buf + pos, remaining(pos), "[%d] ",
+ task_pid_vnr(current));
+ }
+ pos_after_tid = pos;
+ if (desc->flags & _DPRINTK_FLAGS_INCL_MODNAME)
+ pos += snprintf(buf + pos, remaining(pos), "%s:",
+ desc->modname);
+ if (desc->flags & _DPRINTK_FLAGS_INCL_FUNCNAME)
+ pos += snprintf(buf + pos, remaining(pos), "%s:",
+ desc->function);
+ if (desc->flags & _DPRINTK_FLAGS_INCL_LINENO)
+ pos += snprintf(buf + pos, remaining(pos), "%d:", desc->lineno);
+ if (pos - pos_after_tid)
+ pos += snprintf(buf + pos, remaining(pos), " ");
+ if (pos >= PREFIX_SIZE)
+ buf[PREFIX_SIZE - 1] = '\0';
+
+ return buf;
+}
+
int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
{
va_list args;
int res;
+ struct va_format vaf;
+ char buf[PREFIX_SIZE];
BUG_ON(!descriptor);
BUG_ON(!fmt);
va_start(args, fmt);
- res = printk(KERN_DEBUG);
- if (descriptor->flags & _DPRINTK_FLAGS_INCL_TID) {
- if (in_interrupt())
- res += printk(KERN_CONT "<intr> ");
- else
- res += printk(KERN_CONT "[%d] ", task_pid_vnr(current));
- }
- if (descriptor->flags & _DPRINTK_FLAGS_INCL_MODNAME)
- res += printk(KERN_CONT "%s:", descriptor->modname);
- if (descriptor->flags & _DPRINTK_FLAGS_INCL_FUNCNAME)
- res += printk(KERN_CONT "%s:", descriptor->function);
- if (descriptor->flags & _DPRINTK_FLAGS_INCL_LINENO)
- res += printk(KERN_CONT "%d ", descriptor->lineno);
- res += vprintk(fmt, args);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ res = printk("%s%pV", dynamic_emit_prefix(descriptor, buf), &vaf);
va_end(args);
return res;
}
EXPORT_SYMBOL(__dynamic_pr_debug);
+int __dynamic_dev_dbg(struct _ddebug *descriptor,
+ const struct device *dev, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int res;
+ char buf[PREFIX_SIZE];
+
+ BUG_ON(!descriptor);
+ BUG_ON(!fmt);
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ res = __dev_printk(dynamic_emit_prefix(descriptor, buf), dev, &vaf);
+ va_end(args);
+
+ return res;
+}
+EXPORT_SYMBOL(__dynamic_dev_dbg);
+
+#ifdef CONFIG_NET
+
+int __dynamic_netdev_dbg(struct _ddebug *descriptor,
+ const struct net_device *dev, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int res;
+ char buf[PREFIX_SIZE];
+
+ BUG_ON(!descriptor);
+ BUG_ON(!fmt);
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ res = __netdev_printk(dynamic_emit_prefix(descriptor, buf), dev, &vaf);
+ va_end(args);
+
+ return res;
+}
+EXPORT_SYMBOL(__dynamic_netdev_dbg);
+
+#endif
+
static __initdata char ddebug_setup_string[1024];
static __init int ddebug_setup_query(char *str)
{
if (strlen(str) >= 1024) {
- pr_warning("ddebug boot param string too large\n");
+ pr_warn("ddebug boot param string too large\n");
return 0;
}
strcpy(ddebug_setup_string, str);
@@ -487,8 +559,7 @@ static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf,
return -EFAULT;
tmpbuf[len] = '\0';
if (verbose)
- printk(KERN_INFO "%s: read %d bytes from userspace\n",
- __func__, (int)len);
+ pr_info("read %d bytes from userspace\n", (int)len);
ret = ddebug_exec_query(tmpbuf);
if (ret)
@@ -551,8 +622,7 @@ static void *ddebug_proc_start(struct seq_file *m, loff_t *pos)
int n = *pos;
if (verbose)
- printk(KERN_INFO "%s: called m=%p *pos=%lld\n",
- __func__, m, (unsigned long long)*pos);
+ pr_info("called m=%p *pos=%lld\n", m, (unsigned long long)*pos);
mutex_lock(&ddebug_lock);
@@ -577,8 +647,8 @@ static void *ddebug_proc_next(struct seq_file *m, void *p, loff_t *pos)
struct _ddebug *dp;
if (verbose)
- printk(KERN_INFO "%s: called m=%p p=%p *pos=%lld\n",
- __func__, m, p, (unsigned long long)*pos);
+ pr_info("called m=%p p=%p *pos=%lld\n",
+ m, p, (unsigned long long)*pos);
if (p == SEQ_START_TOKEN)
dp = ddebug_iter_first(iter);
@@ -601,8 +671,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
char flagsbuf[8];
if (verbose)
- printk(KERN_INFO "%s: called m=%p p=%p\n",
- __func__, m, p);
+ pr_info("called m=%p p=%p\n", m, p);
if (p == SEQ_START_TOKEN) {
seq_puts(m,
@@ -627,8 +696,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
static void ddebug_proc_stop(struct seq_file *m, void *p)
{
if (verbose)
- printk(KERN_INFO "%s: called m=%p p=%p\n",
- __func__, m, p);
+ pr_info("called m=%p p=%p\n", m, p);
mutex_unlock(&ddebug_lock);
}
@@ -651,7 +719,7 @@ static int ddebug_proc_open(struct inode *inode, struct file *file)
int err;
if (verbose)
- printk(KERN_INFO "%s: called\n", __func__);
+ pr_info("called\n");
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
if (iter == NULL)
@@ -695,7 +763,6 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
}
dt->mod_name = new_name;
dt->num_ddebugs = n;
- dt->num_enabled = 0;
dt->ddebugs = tab;
mutex_lock(&ddebug_lock);
@@ -703,8 +770,7 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
mutex_unlock(&ddebug_lock);
if (verbose)
- printk(KERN_INFO "%u debug prints in module %s\n",
- n, dt->mod_name);
+ pr_info("%u debug prints in module %s\n", n, dt->mod_name);
return 0;
}
EXPORT_SYMBOL_GPL(ddebug_add_module);
@@ -726,8 +792,7 @@ int ddebug_remove_module(const char *mod_name)
int ret = -ENOENT;
if (verbose)
- printk(KERN_INFO "%s: removing module \"%s\"\n",
- __func__, mod_name);
+ pr_info("removing module \"%s\"\n", mod_name);
mutex_lock(&ddebug_lock);
list_for_each_entry_safe(dt, nextdt, &ddebug_tables, link) {
@@ -803,8 +868,8 @@ static int __init dynamic_debug_init(void)
if (ddebug_setup_string[0] != '\0') {
ret = ddebug_exec_query(ddebug_setup_string);
if (ret)
- pr_warning("Invalid ddebug boot param %s",
- ddebug_setup_string);
+ pr_warn("Invalid ddebug boot param %s",
+ ddebug_setup_string);
else
pr_info("ddebug initialized with string %s",
ddebug_setup_string);
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index f193b779644..4f7554025e3 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -14,7 +14,7 @@
* setup_fault_attr() is a helper function for various __setup handlers, so it
* returns 0 on error, because that is what __setup handlers do.
*/
-int __init setup_fault_attr(struct fault_attr *attr, char *str)
+int setup_fault_attr(struct fault_attr *attr, char *str)
{
unsigned long probability;
unsigned long interval;
@@ -36,6 +36,7 @@ int __init setup_fault_attr(struct fault_attr *attr, char *str)
return 1;
}
+EXPORT_SYMBOL_GPL(setup_fault_attr);
static void fail_dump(struct fault_attr *attr)
{
@@ -130,6 +131,7 @@ bool should_fail(struct fault_attr *attr, ssize_t size)
return true;
}
+EXPORT_SYMBOL_GPL(should_fail);
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
@@ -243,5 +245,6 @@ fail:
return ERR_PTR(-ENOMEM);
}
+EXPORT_SYMBOL_GPL(fault_create_debugfs_attr);
#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
diff --git a/lib/idr.c b/lib/idr.c
index 5acf9bb1096..ed055b297c8 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -767,8 +767,8 @@ EXPORT_SYMBOL(ida_pre_get);
* @starting_id: id to start search at
* @p_id: pointer to the allocated handle
*
- * Allocate new ID above or equal to @ida. It should be called with
- * any required locks.
+ * Allocate new ID above or equal to @starting_id. It should be called
+ * with any required locks.
*
* If memory is required, it will return %-EAGAIN, you should unlock
* and go back to the ida_pre_get() call. If the ida is full, it will
@@ -944,6 +944,7 @@ int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
{
int ret, id;
unsigned int max;
+ unsigned long flags;
BUG_ON((int)start < 0);
BUG_ON((int)end < 0);
@@ -959,7 +960,7 @@ again:
if (!ida_pre_get(ida, gfp_mask))
return -ENOMEM;
- spin_lock(&simple_ida_lock);
+ spin_lock_irqsave(&simple_ida_lock, flags);
ret = ida_get_new_above(ida, start, &id);
if (!ret) {
if (id > max) {
@@ -969,7 +970,7 @@ again:
ret = id;
}
}
- spin_unlock(&simple_ida_lock);
+ spin_unlock_irqrestore(&simple_ida_lock, flags);
if (unlikely(ret == -EAGAIN))
goto again;
@@ -985,10 +986,12 @@ EXPORT_SYMBOL(ida_simple_get);
*/
void ida_simple_remove(struct ida *ida, unsigned int id)
{
+ unsigned long flags;
+
BUG_ON((int)id < 0);
- spin_lock(&simple_ida_lock);
+ spin_lock_irqsave(&simple_ida_lock, flags);
ida_remove(ida, id);
- spin_unlock(&simple_ida_lock);
+ spin_unlock_irqrestore(&simple_ida_lock, flags);
}
EXPORT_SYMBOL(ida_simple_remove);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 70af0a7f97c..ad72a03ce5e 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -282,7 +282,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
kobj_bcast_filter,
kobj);
/* ENOBUFS should be handled in userspace */
- if (retval == -ENOBUFS)
+ if (retval == -ENOBUFS || retval == -ESRCH)
retval = 0;
} else
retval = -ENOMEM;
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index 5e066759f55..7a94c8f14e2 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -18,26 +18,40 @@
#include <linux/module.h>
#include <linux/types.h>
#include <asm/uaccess.h>
+#include "kstrtox.h"
-static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res)
+const char *_parse_integer_fixup_radix(const char *s, unsigned int *base)
{
- unsigned long long acc;
- int ok;
-
- if (base == 0) {
+ if (*base == 0) {
if (s[0] == '0') {
if (_tolower(s[1]) == 'x' && isxdigit(s[2]))
- base = 16;
+ *base = 16;
else
- base = 8;
+ *base = 8;
} else
- base = 10;
+ *base = 10;
}
- if (base == 16 && s[0] == '0' && _tolower(s[1]) == 'x')
+ if (*base == 16 && s[0] == '0' && _tolower(s[1]) == 'x')
s += 2;
+ return s;
+}
- acc = 0;
- ok = 0;
+/*
+ * Convert non-negative integer string representation in explicitly given radix
+ * to an integer.
+ * Return number of characters consumed maybe or-ed with overflow bit.
+ * If overflow occurs, result integer (incorrect) is still returned.
+ *
+ * Don't you dare use this function.
+ */
+unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *res)
+{
+ unsigned int rv;
+ int overflow;
+
+ *res = 0;
+ rv = 0;
+ overflow = 0;
while (*s) {
unsigned int val;
@@ -45,23 +59,40 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res)
val = *s - '0';
else if ('a' <= _tolower(*s) && _tolower(*s) <= 'f')
val = _tolower(*s) - 'a' + 10;
- else if (*s == '\n' && *(s + 1) == '\0')
- break;
else
- return -EINVAL;
+ break;
if (val >= base)
- return -EINVAL;
- if (acc > div_u64(ULLONG_MAX - val, base))
- return -ERANGE;
- acc = acc * base + val;
- ok = 1;
-
+ break;
+ if (*res > div_u64(ULLONG_MAX - val, base))
+ overflow = 1;
+ *res = *res * base + val;
+ rv++;
s++;
}
- if (!ok)
+ if (overflow)
+ rv |= KSTRTOX_OVERFLOW;
+ return rv;
+}
+
+static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res)
+{
+ unsigned long long _res;
+ unsigned int rv;
+
+ s = _parse_integer_fixup_radix(s, &base);
+ rv = _parse_integer(s, base, &_res);
+ if (rv & KSTRTOX_OVERFLOW)
+ return -ERANGE;
+ rv &= ~KSTRTOX_OVERFLOW;
+ if (rv == 0)
+ return -EINVAL;
+ s += rv;
+ if (*s == '\n')
+ s++;
+ if (*s)
return -EINVAL;
- *res = acc;
+ *res = _res;
return 0;
}
diff --git a/lib/kstrtox.h b/lib/kstrtox.h
new file mode 100644
index 00000000000..f13eeeaf441
--- /dev/null
+++ b/lib/kstrtox.h
@@ -0,0 +1,8 @@
+#ifndef _LIB_KSTRTOX_H
+#define _LIB_KSTRTOX_H
+
+#define KSTRTOX_OVERFLOW (1U << 31)
+const char *_parse_integer_fixup_radix(const char *s, unsigned int *base);
+unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *res);
+
+#endif
diff --git a/lib/llist.c b/lib/llist.c
index da445724fa1..700cff77a38 100644
--- a/lib/llist.c
+++ b/lib/llist.c
@@ -3,8 +3,8 @@
*
* The basic atomic operation of this list is cmpxchg on long. On
* architectures that don't have NMI-safe cmpxchg implementation, the
- * list can NOT be used in NMI handler. So code uses the list in NMI
- * handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
+ * list can NOT be used in NMI handlers. So code that uses the list in
+ * an NMI handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
*
* Copyright 2010,2011 Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
@@ -30,48 +30,28 @@
#include <asm/system.h>
/**
- * llist_add - add a new entry
- * @new: new entry to be added
- * @head: the head for your lock-less list
- */
-void llist_add(struct llist_node *new, struct llist_head *head)
-{
- struct llist_node *entry, *old_entry;
-
-#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
- BUG_ON(in_nmi());
-#endif
-
- entry = head->first;
- do {
- old_entry = entry;
- new->next = entry;
- cpu_relax();
- } while ((entry = cmpxchg(&head->first, old_entry, new)) != old_entry);
-}
-EXPORT_SYMBOL_GPL(llist_add);
-
-/**
* llist_add_batch - add several linked entries in batch
* @new_first: first entry in batch to be added
* @new_last: last entry in batch to be added
* @head: the head for your lock-less list
+ *
+ * Return whether list is empty before adding.
*/
-void llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
+bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
struct llist_head *head)
{
struct llist_node *entry, *old_entry;
-#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
- BUG_ON(in_nmi());
-#endif
-
entry = head->first;
- do {
+ for (;;) {
old_entry = entry;
new_last->next = entry;
- cpu_relax();
- } while ((entry = cmpxchg(&head->first, old_entry, new_first)) != old_entry);
+ entry = cmpxchg(&head->first, old_entry, new_first);
+ if (entry == old_entry)
+ break;
+ }
+
+ return old_entry == NULL;
}
EXPORT_SYMBOL_GPL(llist_add_batch);
@@ -93,37 +73,17 @@ struct llist_node *llist_del_first(struct llist_head *head)
{
struct llist_node *entry, *old_entry, *next;
-#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
- BUG_ON(in_nmi());
-#endif
-
entry = head->first;
- do {
+ for (;;) {
if (entry == NULL)
return NULL;
old_entry = entry;
next = entry->next;
- cpu_relax();
- } while ((entry = cmpxchg(&head->first, old_entry, next)) != old_entry);
+ entry = cmpxchg(&head->first, old_entry, next);
+ if (entry == old_entry)
+ break;
+ }
return entry;
}
EXPORT_SYMBOL_GPL(llist_del_first);
-
-/**
- * llist_del_all - delete all entries from lock-less list
- * @head: the head of lock-less list to delete all entries
- *
- * If list is empty, return NULL, otherwise, delete all entries and
- * return the pointer to the first entry. The order of entries
- * deleted is from the newest to the oldest added one.
- */
-struct llist_node *llist_del_all(struct llist_head *head)
-{
-#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
- BUG_ON(in_nmi());
-#endif
-
- return xchg(&head->first, NULL);
-}
-EXPORT_SYMBOL_GPL(llist_del_all);
diff --git a/lib/nlattr.c b/lib/nlattr.c
index ac09f2226dc..a8408b6cacd 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -20,6 +20,7 @@ static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = {
[NLA_U16] = sizeof(u16),
[NLA_U32] = sizeof(u32),
[NLA_U64] = sizeof(u64),
+ [NLA_MSECS] = sizeof(u64),
[NLA_NESTED] = NLA_HDRLEN,
};
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 28f2c33c6b5..f8a3f1a829b 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -10,8 +10,10 @@
#include <linux/module.h>
#include <linux/debugobjects.h>
+#ifdef CONFIG_HOTPLUG_CPU
static LIST_HEAD(percpu_counters);
static DEFINE_MUTEX(percpu_counters_lock);
+#endif
#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
@@ -59,13 +61,13 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
{
int cpu;
- spin_lock(&fbc->lock);
+ raw_spin_lock(&fbc->lock);
for_each_possible_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
*pcount = 0;
}
fbc->count = amount;
- spin_unlock(&fbc->lock);
+ raw_spin_unlock(&fbc->lock);
}
EXPORT_SYMBOL(percpu_counter_set);
@@ -76,10 +78,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
preempt_disable();
count = __this_cpu_read(*fbc->counters) + amount;
if (count >= batch || count <= -batch) {
- spin_lock(&fbc->lock);
+ raw_spin_lock(&fbc->lock);
fbc->count += count;
__this_cpu_write(*fbc->counters, 0);
- spin_unlock(&fbc->lock);
+ raw_spin_unlock(&fbc->lock);
} else {
__this_cpu_write(*fbc->counters, count);
}
@@ -96,13 +98,13 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
s64 ret;
int cpu;
- spin_lock(&fbc->lock);
+ raw_spin_lock(&fbc->lock);
ret = fbc->count;
for_each_online_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
}
- spin_unlock(&fbc->lock);
+ raw_spin_unlock(&fbc->lock);
return ret;
}
EXPORT_SYMBOL(__percpu_counter_sum);
@@ -110,7 +112,7 @@ EXPORT_SYMBOL(__percpu_counter_sum);
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
struct lock_class_key *key)
{
- spin_lock_init(&fbc->lock);
+ raw_spin_lock_init(&fbc->lock);
lockdep_set_class(&fbc->lock, key);
fbc->count = amount;
fbc->counters = alloc_percpu(s32);
@@ -173,11 +175,11 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
s32 *pcount;
unsigned long flags;
- spin_lock_irqsave(&fbc->lock, flags);
+ raw_spin_lock_irqsave(&fbc->lock, flags);
pcount = per_cpu_ptr(fbc->counters, cpu);
fbc->count += *pcount;
*pcount = 0;
- spin_unlock_irqrestore(&fbc->lock, flags);
+ raw_spin_unlock_irqrestore(&fbc->lock, flags);
}
mutex_unlock(&percpu_counters_lock);
#endif
diff --git a/lib/proportions.c b/lib/proportions.c
index d50746a79de..05df84801b5 100644
--- a/lib/proportions.c
+++ b/lib/proportions.c
@@ -190,7 +190,7 @@ prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift)
int prop_local_init_percpu(struct prop_local_percpu *pl)
{
- spin_lock_init(&pl->lock);
+ raw_spin_lock_init(&pl->lock);
pl->shift = 0;
pl->period = 0;
return percpu_counter_init(&pl->events, 0);
@@ -226,7 +226,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl)
if (pl->period == global_period)
return;
- spin_lock_irqsave(&pl->lock, flags);
+ raw_spin_lock_irqsave(&pl->lock, flags);
prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
/*
@@ -247,7 +247,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl)
percpu_counter_set(&pl->events, 0);
pl->period = global_period;
- spin_unlock_irqrestore(&pl->lock, flags);
+ raw_spin_unlock_irqrestore(&pl->lock, flags);
}
/*
@@ -324,7 +324,7 @@ void prop_fraction_percpu(struct prop_descriptor *pd,
int prop_local_init_single(struct prop_local_single *pl)
{
- spin_lock_init(&pl->lock);
+ raw_spin_lock_init(&pl->lock);
pl->shift = 0;
pl->period = 0;
pl->events = 0;
@@ -356,7 +356,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl)
if (pl->period == global_period)
return;
- spin_lock_irqsave(&pl->lock, flags);
+ raw_spin_lock_irqsave(&pl->lock, flags);
prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
/*
* For each missed period, we half the local counter.
@@ -367,7 +367,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl)
else
pl->events = 0;
pl->period = global_period;
- spin_unlock_irqrestore(&pl->lock, flags);
+ raw_spin_unlock_irqrestore(&pl->lock, flags);
}
/*
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index a2f9da59c19..d9df7454519 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -576,7 +576,6 @@ int radix_tree_tag_get(struct radix_tree_root *root,
{
unsigned int height, shift;
struct radix_tree_node *node;
- int saw_unset_tag = 0;
/* check the root's tag bit */
if (!root_tag_get(root, tag))
@@ -603,15 +602,10 @@ int radix_tree_tag_get(struct radix_tree_root *root,
return 0;
offset = (index >> shift) & RADIX_TREE_MAP_MASK;
-
- /*
- * This is just a debug check. Later, we can bale as soon as
- * we see an unset tag.
- */
if (!tag_get(node, tag, offset))
- saw_unset_tag = 1;
+ return 0;
if (height == 1)
- return !!tag_get(node, tag, offset);
+ return 1;
node = rcu_dereference_raw(node->slots[offset]);
shift -= RADIX_TREE_MAP_SHIFT;
height--;
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index b595f560bee..8b02f60ffc8 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -17,6 +17,7 @@
*/
#include <linux/raid/pq.h>
+#include <linux/module.h>
#ifndef __KERNEL__
#include <sys/mman.h>
#include <stdio.h>
diff --git a/lib/raid6/int.uc b/lib/raid6/int.uc
index d1e276a14fa..5b50f8dfc5d 100644
--- a/lib/raid6/int.uc
+++ b/lib/raid6/int.uc
@@ -11,7 +11,7 @@
* ----------------------------------------------------------------------- */
/*
- * raid6int$#.c
+ * int$#.c
*
* $#-way unrolled portable integer math RAID-6 instruction set
*
diff --git a/lib/raid6/mktables.c b/lib/raid6/mktables.c
index 3b1500843bb..8a3780902ce 100644
--- a/lib/raid6/mktables.c
+++ b/lib/raid6/mktables.c
@@ -60,6 +60,7 @@ int main(int argc, char *argv[])
uint8_t exptbl[256], invtbl[256];
printf("#include <linux/raid/pq.h>\n");
+ printf("#include <linux/export.h>\n");
/* Compute multiplication table */
printf("\nconst u8 __attribute__((aligned(256)))\n"
diff --git a/lib/raid6/recov.c b/lib/raid6/recov.c
index 8590d19cf52..fe275d7b6b3 100644
--- a/lib/raid6/recov.c
+++ b/lib/raid6/recov.c
@@ -18,6 +18,7 @@
* the syndrome.)
*/
+#include <linux/export.h>
#include <linux/raid/pq.h>
/* Recover two failed data blocks. */
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 027a03f4c56..c96d500577d 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -39,7 +39,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
* in addition to the one that will be printed by
* the entity that is holding the lock already:
*/
- if (!spin_trylock_irqsave(&rs->lock, flags))
+ if (!raw_spin_trylock_irqsave(&rs->lock, flags))
return 0;
if (!rs->begin)
@@ -60,7 +60,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
rs->missed++;
ret = 0;
}
- spin_unlock_irqrestore(&rs->lock, flags);
+ raw_spin_unlock_irqrestore(&rs->lock, flags);
return ret;
}
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index ffc9fc7f3b0..f2393c21fe8 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -22,9 +22,9 @@ int rwsem_is_locked(struct rw_semaphore *sem)
int ret = 1;
unsigned long flags;
- if (spin_trylock_irqsave(&sem->wait_lock, flags)) {
+ if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
ret = (sem->activity != 0);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
return ret;
}
@@ -44,7 +44,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
sem->activity = 0;
- spin_lock_init(&sem->wait_lock);
+ raw_spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
EXPORT_SYMBOL(__init_rwsem);
@@ -145,12 +145,12 @@ void __sched __down_read(struct rw_semaphore *sem)
struct task_struct *tsk;
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity++;
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
goto out;
}
@@ -165,7 +165,7 @@ void __sched __down_read(struct rw_semaphore *sem)
list_add_tail(&waiter.list, &sem->wait_list);
/* we don't need to touch the semaphore struct anymore */
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
/* wait to be given the lock */
for (;;) {
@@ -189,7 +189,7 @@ int __down_read_trylock(struct rw_semaphore *sem)
int ret = 0;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
/* granted */
@@ -197,7 +197,7 @@ int __down_read_trylock(struct rw_semaphore *sem)
ret = 1;
}
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return ret;
}
@@ -212,12 +212,12 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
struct task_struct *tsk;
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->activity == 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity = -1;
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
goto out;
}
@@ -232,7 +232,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
list_add_tail(&waiter.list, &sem->wait_list);
/* we don't need to touch the semaphore struct anymore */
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
/* wait to be given the lock */
for (;;) {
@@ -260,7 +260,7 @@ int __down_write_trylock(struct rw_semaphore *sem)
unsigned long flags;
int ret = 0;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->activity == 0 && list_empty(&sem->wait_list)) {
/* granted */
@@ -268,7 +268,7 @@ int __down_write_trylock(struct rw_semaphore *sem)
ret = 1;
}
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return ret;
}
@@ -280,12 +280,12 @@ void __up_read(struct rw_semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (--sem->activity == 0 && !list_empty(&sem->wait_list))
sem = __rwsem_wake_one_writer(sem);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
/*
@@ -295,13 +295,13 @@ void __up_write(struct rw_semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
sem->activity = 0;
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 1);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
/*
@@ -312,12 +312,12 @@ void __downgrade_write(struct rw_semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
sem->activity = 1;
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 0);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
diff --git a/lib/rwsem.c b/lib/rwsem.c
index aa7c3052261..410aa1189b1 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -22,7 +22,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
sem->count = RWSEM_UNLOCKED_VALUE;
- spin_lock_init(&sem->wait_lock);
+ raw_spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
@@ -180,7 +180,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
/* set up my own style of waitqueue */
- spin_lock_irq(&sem->wait_lock);
+ raw_spin_lock_irq(&sem->wait_lock);
waiter.task = tsk;
waiter.flags = flags;
get_task_struct(tsk);
@@ -204,7 +204,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
- spin_unlock_irq(&sem->wait_lock);
+ raw_spin_unlock_irq(&sem->wait_lock);
/* wait to be given the lock */
for (;;) {
@@ -245,13 +245,13 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
/* do nothing if list empty */
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return sem;
}
@@ -265,13 +265,13 @@ struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
/* do nothing if list empty */
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return sem;
}
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 4689cb073da..503f087382a 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -22,7 +22,7 @@ notrace unsigned int debug_smp_processor_id(void)
* Kernel threads bound to a single CPU can safely use
* smp_processor_id():
*/
- if (cpumask_equal(&current->cpus_allowed, cpumask_of(this_cpu)))
+ if (cpumask_equal(tsk_cpus_allowed(current), cpumask_of(this_cpu)))
goto out;
/*
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 4755b98b6df..5f3eacdd617 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -49,13 +49,10 @@ void __rwlock_init(rwlock_t *lock, const char *name,
EXPORT_SYMBOL(__rwlock_init);
-static void spin_bug(raw_spinlock_t *lock, const char *msg)
+static void spin_dump(raw_spinlock_t *lock, const char *msg)
{
struct task_struct *owner = NULL;
- if (!debug_locks_off())
- return;
-
if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
owner = lock->owner;
printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
@@ -70,6 +67,14 @@ static void spin_bug(raw_spinlock_t *lock, const char *msg)
dump_stack();
}
+static void spin_bug(raw_spinlock_t *lock, const char *msg)
+{
+ if (!debug_locks_off())
+ return;
+
+ spin_dump(lock, msg);
+}
+
#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
static inline void
@@ -113,11 +118,7 @@ static void __spin_lock_debug(raw_spinlock_t *lock)
/* lockup suspected: */
if (print_once) {
print_once = 0;
- printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, "
- "%s/%d, %p\n",
- raw_smp_processor_id(), current->comm,
- task_pid_nr(current), lock);
- dump_stack();
+ spin_dump(lock, "lockup");
#ifdef CONFIG_SMP
trigger_all_cpu_backtrace();
#endif
diff --git a/lib/string.c b/lib/string.c
index 01fad9b203e..dc4a86341f9 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -360,7 +360,6 @@ char *strim(char *s)
size_t size;
char *end;
- s = skip_spaces(s);
size = strlen(s);
if (!size)
return s;
@@ -370,7 +369,7 @@ char *strim(char *s)
end--;
*(end + 1) = '\0';
- return s;
+ return skip_spaces(s);
}
EXPORT_SYMBOL(strim);
@@ -756,3 +755,57 @@ void *memchr(const void *s, int c, size_t n)
}
EXPORT_SYMBOL(memchr);
#endif
+
+static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes)
+{
+ while (bytes) {
+ if (*start != value)
+ return (void *)start;
+ start++;
+ bytes--;
+ }
+ return NULL;
+}
+
+/**
+ * memchr_inv - Find an unmatching character in an area of memory.
+ * @start: The memory area
+ * @c: Find a character other than c
+ * @bytes: The size of the area.
+ *
+ * returns the address of the first character other than @c, or %NULL
+ * if the whole buffer contains just @c.
+ */
+void *memchr_inv(const void *start, int c, size_t bytes)
+{
+ u8 value = c;
+ u64 value64;
+ unsigned int words, prefix;
+
+ if (bytes <= 16)
+ return check_bytes8(start, value, bytes);
+
+ value64 = value | value << 8 | value << 16 | value << 24;
+ value64 = (value64 & 0xffffffff) | value64 << 32;
+ prefix = 8 - ((unsigned long)start) % 8;
+
+ if (prefix) {
+ u8 *r = check_bytes8(start, value, prefix);
+ if (r)
+ return r;
+ start += prefix;
+ bytes -= prefix;
+ }
+
+ words = bytes / 8;
+
+ while (words) {
+ if (*(u64 *)start != value64)
+ return check_bytes8(start, value, 8);
+ start += 8;
+ words--;
+ }
+
+ return check_bytes8(start, value, bytes % 8);
+}
+EXPORT_SYMBOL(memchr_inv);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index d7222a9c826..993599e66e5 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -31,17 +31,7 @@
#include <asm/div64.h>
#include <asm/sections.h> /* for dereference_function_descriptor() */
-static unsigned int simple_guess_base(const char *cp)
-{
- if (cp[0] == '0') {
- if (_tolower(cp[1]) == 'x' && isxdigit(cp[2]))
- return 16;
- else
- return 8;
- } else {
- return 10;
- }
-}
+#include "kstrtox.h"
/**
* simple_strtoull - convert a string to an unsigned long long
@@ -51,23 +41,14 @@ static unsigned int simple_guess_base(const char *cp)
*/
unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
{
- unsigned long long result = 0;
+ unsigned long long result;
+ unsigned int rv;
- if (!base)
- base = simple_guess_base(cp);
+ cp = _parse_integer_fixup_radix(cp, &base);
+ rv = _parse_integer(cp, base, &result);
+ /* FIXME */
+ cp += (rv & ~KSTRTOX_OVERFLOW);
- if (base == 16 && cp[0] == '0' && _tolower(cp[1]) == 'x')
- cp += 2;
-
- while (isxdigit(*cp)) {
- unsigned int value;
-
- value = isdigit(*cp) ? *cp - '0' : _tolower(*cp) - 'a' + 10;
- if (value >= base)
- break;
- result = result * base + value;
- cp++;
- }
if (endp)
*endp = (char *)cp;
@@ -566,7 +547,7 @@ char *mac_address_string(char *buf, char *end, u8 *addr,
}
for (i = 0; i < 6; i++) {
- p = pack_hex_byte(p, addr[i]);
+ p = hex_byte_pack(p, addr[i]);
if (fmt[0] == 'M' && i != 5)
*p++ = separator;
}
@@ -686,13 +667,13 @@ char *ip6_compressed_string(char *p, const char *addr)
lo = word & 0xff;
if (hi) {
if (hi > 0x0f)
- p = pack_hex_byte(p, hi);
+ p = hex_byte_pack(p, hi);
else
*p++ = hex_asc_lo(hi);
- p = pack_hex_byte(p, lo);
+ p = hex_byte_pack(p, lo);
}
else if (lo > 0x0f)
- p = pack_hex_byte(p, lo);
+ p = hex_byte_pack(p, lo);
else
*p++ = hex_asc_lo(lo);
needcolon = true;
@@ -714,8 +695,8 @@ char *ip6_string(char *p, const char *addr, const char *fmt)
int i;
for (i = 0; i < 8; i++) {
- p = pack_hex_byte(p, *addr++);
- p = pack_hex_byte(p, *addr++);
+ p = hex_byte_pack(p, *addr++);
+ p = hex_byte_pack(p, *addr++);
if (fmt[0] == 'I' && i != 7)
*p++ = ':';
}
@@ -773,7 +754,7 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
}
for (i = 0; i < 16; i++) {
- p = pack_hex_byte(p, addr[index[i]]);
+ p = hex_byte_pack(p, addr[index[i]]);
switch (i) {
case 3:
case 5: